From f826880f7502e8bbf1ad96eca53d4882c9cdd355 Mon Sep 17 00:00:00 2001 From: wjm Date: Tue, 8 Jun 2021 04:28:41 +0800 Subject: [PATCH 001/226] fix sc --- ge/graph/preprocess/graph_preprocess.cc | 119 ++++++++++-------- ge/graph/preprocess/graph_preprocess.h | 3 +- ge/ir_build/ge_ir_build.cc | 2 +- .../preprocess/graph_preprocess_unittest.cc | 15 +++ 4 files changed, 84 insertions(+), 55 deletions(-) diff --git a/ge/graph/preprocess/graph_preprocess.cc b/ge/graph/preprocess/graph_preprocess.cc index 0c4adeea..a73c6a96 100644 --- a/ge/graph/preprocess/graph_preprocess.cc +++ b/ge/graph/preprocess/graph_preprocess.cc @@ -1420,9 +1420,10 @@ Status GraphPrepare::AdjustDataOpOutput(const NodePtr &node) { return SUCCESS; } -Status GraphPrepare::CheckInternalFormat(const NodePtr &input_node, const GeTensorDesc &desc, bool tune_flag) { +Status GraphPrepare::CheckInternalFormat(const NodePtr &input_node, const GeTensorDesc &desc) { auto format = desc.GetFormat(); auto origin_format = desc.GetOriginFormat(); + auto tune_flag = (options_.build_mode == BUILD_MODE_TUNING) && (options_.build_step == BUILD_STEP_AFTER_BUILDER); bool need_check_internal_format = (!IsTansDataOpData(input_node)) && (!options_.is_single_op) && (!tune_flag); if (need_check_internal_format) { bool is_internal = TypeUtils::IsInternalFormat(format) || TypeUtils::IsInternalFormat(origin_format); @@ -1439,6 +1440,63 @@ Status GraphPrepare::CheckInternalFormat(const NodePtr &input_node, const GeTens return SUCCESS; } +Status GraphPrepare::UpdateDataInputOutputDesc(GeAttrValue::INT index, OpDescPtr &op, GeTensorDesc &desc) { + auto data_type = desc.GetDataType(); + uint32_t length = 1; + bool type_ret = TypeUtils::GetDataTypeLength(data_type, length); + if (!type_ret) { + std::string reason = "Input datatype[" + TypeUtils::DataTypeToSerialString(data_type) + "] of index:" + + std::to_string(index) + " input tensor is not support"; + REPORT_INPUT_ERROR("E19025", std::vector({"reason"}), std::vector({reason})); + GELOGE(PARAM_INVALID, "[Check][Param] Input datatype %s is not support.", + TypeUtils::DataTypeToSerialString(data_type).c_str()); + return FAILED; + } + int64_t desc_shape = desc.GetShape().GetShapeSize(); + FMK_INT64_UINT32_MULCHECK(desc_shape, length); + int64_t shape_size = desc_shape * length; + GE_IF_BOOL_EXEC(shape_size == 0 && desc.GetShape().GetDimNum() == 0, shape_size = static_cast(length)); + int64_t size = 0; + GE_IF_BOOL_EXEC(ge::TensorUtils::GetSize(desc, size) != GRAPH_SUCCESS, + REPORT_CALL_ERROR("E19999", "Get size of user input tensor failed, index:%ld", index); + GELOGE(INTERNAL_ERROR, "[Get][Size] of user input tensor failed, index:%ld", index); return FAILED); + bool size_check = (size != 0 && shape_size != size); + if (size_check) { + std::string reason = "input tensor[index:" + std::to_string(index) + "]'s data size[" + std::to_string(size) + + "] != shape_size[" + std::to_string(size) + "], check invalid"; + REPORT_INPUT_ERROR("E19025", std::vector({"reason"}), std::vector({reason})); + GELOGE(PARAM_INVALID, "[Check][Param] input data size = %ld, shape_size = %ld.", size, shape_size); + return FAILED; + } + ge::TensorUtils::SetSize(desc, shape_size); + + auto tune_flag = (options_.build_mode == BUILD_MODE_TUNING) && (options_.build_step == BUILD_STEP_AFTER_BUILDER); + if (!tune_flag) { + graphStatus graph_ret = op->UpdateInputDesc(0, desc); + if (graph_ret != GRAPH_SUCCESS) { + REPORT_CALL_ERROR("E19999", "Update input desc of op:%s(%s) failed, index:0", + op->GetName().c_str(), op->GetType().c_str()); + GELOGE(graph_ret, "[Update][InputDesc] of op:%s(%s) failed, index:0", + op->GetName().c_str(), op->GetType().c_str()); + return graph_ret; + } + // Size will be recalculated in the build stage + ge::TensorUtils::SetSize(desc, 0); + graph_ret = op->UpdateOutputDesc(0, desc); + if (graph_ret != GRAPH_SUCCESS) { + REPORT_CALL_ERROR("E19999", "Update output desc of op:%s(%s) failed, index:0", + op->GetName().c_str(), op->GetType().c_str()); + GELOGE(graph_ret, "[Update][OutputDesc] of op:%s(%s) failed, index:0", + op->GetName().c_str(), op->GetType().c_str()); + return graph_ret; + } + } else { + GELOGI("data %s skip update info in tune mode", op->GetName().c_str()); + } + + return SUCCESS; +} + Status GraphPrepare::UpdateInput(const std::vector &user_input, const std::map &graph_option) { // Get shape range of input in dynamic_execute mode @@ -1471,63 +1529,18 @@ Status GraphPrepare::UpdateInput(const std::vector &user_input, } GeTensorDesc desc(user_input[index].GetTensorDesc()); // data maybe internal format [FRACTAL_NZ] at singleop process such as GEMM. - auto tune_flag = (options_.build_mode == BUILD_MODE_TUNING) && (options_.build_step == BUILD_STEP_AFTER_BUILDER); - ret = CheckInternalFormat(input_node, desc, tune_flag); + ret = CheckInternalFormat(input_node, desc); if (ret != SUCCESS) { GELOGE(INTERNAL_ERROR, "[Check][InternalFormat] on %s failed", op->GetName().c_str()); return ret; } - auto data_type = desc.GetDataType(); - uint32_t length = 1; - bool type_ret = TypeUtils::GetDataTypeLength(data_type, length); - if (!type_ret) { - std::string reason = "Input datatype[" + TypeUtils::DataTypeToSerialString(data_type) + "] of index:" + - std::to_string(index) + " input tensor is not support"; - REPORT_INPUT_ERROR("E19025", std::vector({"reason"}), std::vector({reason})); - GELOGE(PARAM_INVALID, "[Check][Param] Input datatype %s is not support.", - TypeUtils::DataTypeToSerialString(data_type).c_str()); - return FAILED; - } - int64_t desc_shape = desc.GetShape().GetShapeSize(); - FMK_INT64_UINT32_MULCHECK(desc_shape, length); - int64_t shape_size = desc_shape * length; - GE_IF_BOOL_EXEC(shape_size == 0 && desc.GetShape().GetDimNum() == 0, shape_size = static_cast(length)); - int64_t size = 0; - GE_IF_BOOL_EXEC(ge::TensorUtils::GetSize(desc, size) != GRAPH_SUCCESS, - REPORT_CALL_ERROR("E19999", "Get size of user input tensor failed, index:%ld", index); - GELOGE(INTERNAL_ERROR, "[Get][Size] of user input tensor failed, index:%ld", index); - return FAILED); - bool size_check = (size != 0 && shape_size != size); - if (size_check) { - std::string reason = "input tensor[index:" + std::to_string(index) + "]'s data size[" + std::to_string(size) + - "] != shape_size[" + std::to_string(size) + "], check invalid"; - REPORT_INPUT_ERROR("E19025", std::vector({"reason"}), std::vector({reason})); - GELOGE(PARAM_INVALID, "[Check][Param] input data size = %ld, shape_size = %ld.", size, shape_size); - return FAILED; - } - ge::TensorUtils::SetSize(desc, shape_size); - if (!tune_flag) { - graphStatus graph_ret = op->UpdateInputDesc(0, desc); - if (graph_ret != GRAPH_SUCCESS) { - REPORT_CALL_ERROR("E19999", "Update input desc of op:%s(%s) failed, index:0", - op->GetName().c_str(), op->GetType().c_str()); - GELOGE(graph_ret, "[Update][InputDesc] of op:%s(%s) failed, index:0", - op->GetName().c_str(), op->GetType().c_str()); - return graph_ret; - } - // Size will be recalculated in the build stage - ge::TensorUtils::SetSize(desc, 0); - graph_ret = op->UpdateOutputDesc(0, desc); - if (graph_ret != GRAPH_SUCCESS) { - REPORT_CALL_ERROR("E19999", "Update output desc of op:%s(%s) failed, index:0", - op->GetName().c_str(), op->GetType().c_str()); - GELOGE(graph_ret, "[Update][OutputDesc] of op:%s(%s) failed, index:0", - op->GetName().c_str(), op->GetType().c_str()); - return graph_ret; - } - } else { - GELOGI("data %s skip update info in tune mode", op->GetName().c_str()); + + ret = UpdateDataInputOutputDesc(index, op, desc); + if (ret != SUCCESS) { + GELOGE(FAILED, "[Update][DataInputOutputDesc] on %s failed", op->GetName().c_str()); + return ret; } + if (!dynamic_shape_range_vec.empty()) { ret = UpdateDynamicInputShapeRange(index, dynamic_shape_range_vec, op, desc); GE_CHK_STATUS_RET(ret, "[Update][DynamicInputShapeRange] on %s failed.", op->GetName().c_str()); diff --git a/ge/graph/preprocess/graph_preprocess.h b/ge/graph/preprocess/graph_preprocess.h index 584f4d16..22bc566c 100755 --- a/ge/graph/preprocess/graph_preprocess.h +++ b/ge/graph/preprocess/graph_preprocess.h @@ -63,7 +63,8 @@ class GraphPrepare { Status CheckRefOp(); Status SetRtContext(rtContext_t rt_context, rtCtxMode_t mode); Status AdjustDataOpOutput(const NodePtr &node); - Status CheckInternalFormat(const NodePtr &input_node, const GeTensorDesc &desc, bool tune_flag); + Status CheckInternalFormat(const NodePtr &input_node, const GeTensorDesc &desc); + Status UpdateDataInputOutputDesc(GeAttrValue::INT index, OpDescPtr &op, GeTensorDesc &desc); Status UpdateInput(const std::vector &user_input, const std::map &graph_option); Status CheckAndUpdateInput(const std::vector &user_input, const std::map &graph_option); Status CheckConstOp(); diff --git a/ge/ir_build/ge_ir_build.cc b/ge/ir_build/ge_ir_build.cc index 21db83aa..befffa93 100644 --- a/ge/ir_build/ge_ir_build.cc +++ b/ge/ir_build/ge_ir_build.cc @@ -559,8 +559,8 @@ graphStatus Impl::Init(const Graph &graph, const std::map user_input = {input1}; + std::map graph_option; + auto ret = graph_prepare.UpdateInput(user_input, graph_option); + EXPECT_EQ(ret, ge::FAILED); +} + TEST_F(UtestGraphPreproces, test_check_user_input) { ge::GraphPrepare graph_prepare; graph_prepare.compute_graph_ = BuildGraph1(); From 2abf8be62178511bf6c073ff821b309a8e2817ee Mon Sep 17 00:00:00 2001 From: wjm Date: Tue, 8 Jun 2021 04:28:41 +0800 Subject: [PATCH 002/226] fix sc --- ge/graph/preprocess/graph_preprocess.cc | 119 ++++++++++-------- ge/graph/preprocess/graph_preprocess.h | 3 +- ge/ir_build/ge_ir_build.cc | 2 +- .../preprocess/graph_preprocess_unittest.cc | 15 +++ 4 files changed, 84 insertions(+), 55 deletions(-) diff --git a/ge/graph/preprocess/graph_preprocess.cc b/ge/graph/preprocess/graph_preprocess.cc index 0c4adeea..a73c6a96 100644 --- a/ge/graph/preprocess/graph_preprocess.cc +++ b/ge/graph/preprocess/graph_preprocess.cc @@ -1420,9 +1420,10 @@ Status GraphPrepare::AdjustDataOpOutput(const NodePtr &node) { return SUCCESS; } -Status GraphPrepare::CheckInternalFormat(const NodePtr &input_node, const GeTensorDesc &desc, bool tune_flag) { +Status GraphPrepare::CheckInternalFormat(const NodePtr &input_node, const GeTensorDesc &desc) { auto format = desc.GetFormat(); auto origin_format = desc.GetOriginFormat(); + auto tune_flag = (options_.build_mode == BUILD_MODE_TUNING) && (options_.build_step == BUILD_STEP_AFTER_BUILDER); bool need_check_internal_format = (!IsTansDataOpData(input_node)) && (!options_.is_single_op) && (!tune_flag); if (need_check_internal_format) { bool is_internal = TypeUtils::IsInternalFormat(format) || TypeUtils::IsInternalFormat(origin_format); @@ -1439,6 +1440,63 @@ Status GraphPrepare::CheckInternalFormat(const NodePtr &input_node, const GeTens return SUCCESS; } +Status GraphPrepare::UpdateDataInputOutputDesc(GeAttrValue::INT index, OpDescPtr &op, GeTensorDesc &desc) { + auto data_type = desc.GetDataType(); + uint32_t length = 1; + bool type_ret = TypeUtils::GetDataTypeLength(data_type, length); + if (!type_ret) { + std::string reason = "Input datatype[" + TypeUtils::DataTypeToSerialString(data_type) + "] of index:" + + std::to_string(index) + " input tensor is not support"; + REPORT_INPUT_ERROR("E19025", std::vector({"reason"}), std::vector({reason})); + GELOGE(PARAM_INVALID, "[Check][Param] Input datatype %s is not support.", + TypeUtils::DataTypeToSerialString(data_type).c_str()); + return FAILED; + } + int64_t desc_shape = desc.GetShape().GetShapeSize(); + FMK_INT64_UINT32_MULCHECK(desc_shape, length); + int64_t shape_size = desc_shape * length; + GE_IF_BOOL_EXEC(shape_size == 0 && desc.GetShape().GetDimNum() == 0, shape_size = static_cast(length)); + int64_t size = 0; + GE_IF_BOOL_EXEC(ge::TensorUtils::GetSize(desc, size) != GRAPH_SUCCESS, + REPORT_CALL_ERROR("E19999", "Get size of user input tensor failed, index:%ld", index); + GELOGE(INTERNAL_ERROR, "[Get][Size] of user input tensor failed, index:%ld", index); return FAILED); + bool size_check = (size != 0 && shape_size != size); + if (size_check) { + std::string reason = "input tensor[index:" + std::to_string(index) + "]'s data size[" + std::to_string(size) + + "] != shape_size[" + std::to_string(size) + "], check invalid"; + REPORT_INPUT_ERROR("E19025", std::vector({"reason"}), std::vector({reason})); + GELOGE(PARAM_INVALID, "[Check][Param] input data size = %ld, shape_size = %ld.", size, shape_size); + return FAILED; + } + ge::TensorUtils::SetSize(desc, shape_size); + + auto tune_flag = (options_.build_mode == BUILD_MODE_TUNING) && (options_.build_step == BUILD_STEP_AFTER_BUILDER); + if (!tune_flag) { + graphStatus graph_ret = op->UpdateInputDesc(0, desc); + if (graph_ret != GRAPH_SUCCESS) { + REPORT_CALL_ERROR("E19999", "Update input desc of op:%s(%s) failed, index:0", + op->GetName().c_str(), op->GetType().c_str()); + GELOGE(graph_ret, "[Update][InputDesc] of op:%s(%s) failed, index:0", + op->GetName().c_str(), op->GetType().c_str()); + return graph_ret; + } + // Size will be recalculated in the build stage + ge::TensorUtils::SetSize(desc, 0); + graph_ret = op->UpdateOutputDesc(0, desc); + if (graph_ret != GRAPH_SUCCESS) { + REPORT_CALL_ERROR("E19999", "Update output desc of op:%s(%s) failed, index:0", + op->GetName().c_str(), op->GetType().c_str()); + GELOGE(graph_ret, "[Update][OutputDesc] of op:%s(%s) failed, index:0", + op->GetName().c_str(), op->GetType().c_str()); + return graph_ret; + } + } else { + GELOGI("data %s skip update info in tune mode", op->GetName().c_str()); + } + + return SUCCESS; +} + Status GraphPrepare::UpdateInput(const std::vector &user_input, const std::map &graph_option) { // Get shape range of input in dynamic_execute mode @@ -1471,63 +1529,18 @@ Status GraphPrepare::UpdateInput(const std::vector &user_input, } GeTensorDesc desc(user_input[index].GetTensorDesc()); // data maybe internal format [FRACTAL_NZ] at singleop process such as GEMM. - auto tune_flag = (options_.build_mode == BUILD_MODE_TUNING) && (options_.build_step == BUILD_STEP_AFTER_BUILDER); - ret = CheckInternalFormat(input_node, desc, tune_flag); + ret = CheckInternalFormat(input_node, desc); if (ret != SUCCESS) { GELOGE(INTERNAL_ERROR, "[Check][InternalFormat] on %s failed", op->GetName().c_str()); return ret; } - auto data_type = desc.GetDataType(); - uint32_t length = 1; - bool type_ret = TypeUtils::GetDataTypeLength(data_type, length); - if (!type_ret) { - std::string reason = "Input datatype[" + TypeUtils::DataTypeToSerialString(data_type) + "] of index:" + - std::to_string(index) + " input tensor is not support"; - REPORT_INPUT_ERROR("E19025", std::vector({"reason"}), std::vector({reason})); - GELOGE(PARAM_INVALID, "[Check][Param] Input datatype %s is not support.", - TypeUtils::DataTypeToSerialString(data_type).c_str()); - return FAILED; - } - int64_t desc_shape = desc.GetShape().GetShapeSize(); - FMK_INT64_UINT32_MULCHECK(desc_shape, length); - int64_t shape_size = desc_shape * length; - GE_IF_BOOL_EXEC(shape_size == 0 && desc.GetShape().GetDimNum() == 0, shape_size = static_cast(length)); - int64_t size = 0; - GE_IF_BOOL_EXEC(ge::TensorUtils::GetSize(desc, size) != GRAPH_SUCCESS, - REPORT_CALL_ERROR("E19999", "Get size of user input tensor failed, index:%ld", index); - GELOGE(INTERNAL_ERROR, "[Get][Size] of user input tensor failed, index:%ld", index); - return FAILED); - bool size_check = (size != 0 && shape_size != size); - if (size_check) { - std::string reason = "input tensor[index:" + std::to_string(index) + "]'s data size[" + std::to_string(size) + - "] != shape_size[" + std::to_string(size) + "], check invalid"; - REPORT_INPUT_ERROR("E19025", std::vector({"reason"}), std::vector({reason})); - GELOGE(PARAM_INVALID, "[Check][Param] input data size = %ld, shape_size = %ld.", size, shape_size); - return FAILED; - } - ge::TensorUtils::SetSize(desc, shape_size); - if (!tune_flag) { - graphStatus graph_ret = op->UpdateInputDesc(0, desc); - if (graph_ret != GRAPH_SUCCESS) { - REPORT_CALL_ERROR("E19999", "Update input desc of op:%s(%s) failed, index:0", - op->GetName().c_str(), op->GetType().c_str()); - GELOGE(graph_ret, "[Update][InputDesc] of op:%s(%s) failed, index:0", - op->GetName().c_str(), op->GetType().c_str()); - return graph_ret; - } - // Size will be recalculated in the build stage - ge::TensorUtils::SetSize(desc, 0); - graph_ret = op->UpdateOutputDesc(0, desc); - if (graph_ret != GRAPH_SUCCESS) { - REPORT_CALL_ERROR("E19999", "Update output desc of op:%s(%s) failed, index:0", - op->GetName().c_str(), op->GetType().c_str()); - GELOGE(graph_ret, "[Update][OutputDesc] of op:%s(%s) failed, index:0", - op->GetName().c_str(), op->GetType().c_str()); - return graph_ret; - } - } else { - GELOGI("data %s skip update info in tune mode", op->GetName().c_str()); + + ret = UpdateDataInputOutputDesc(index, op, desc); + if (ret != SUCCESS) { + GELOGE(FAILED, "[Update][DataInputOutputDesc] on %s failed", op->GetName().c_str()); + return ret; } + if (!dynamic_shape_range_vec.empty()) { ret = UpdateDynamicInputShapeRange(index, dynamic_shape_range_vec, op, desc); GE_CHK_STATUS_RET(ret, "[Update][DynamicInputShapeRange] on %s failed.", op->GetName().c_str()); diff --git a/ge/graph/preprocess/graph_preprocess.h b/ge/graph/preprocess/graph_preprocess.h index 584f4d16..22bc566c 100755 --- a/ge/graph/preprocess/graph_preprocess.h +++ b/ge/graph/preprocess/graph_preprocess.h @@ -63,7 +63,8 @@ class GraphPrepare { Status CheckRefOp(); Status SetRtContext(rtContext_t rt_context, rtCtxMode_t mode); Status AdjustDataOpOutput(const NodePtr &node); - Status CheckInternalFormat(const NodePtr &input_node, const GeTensorDesc &desc, bool tune_flag); + Status CheckInternalFormat(const NodePtr &input_node, const GeTensorDesc &desc); + Status UpdateDataInputOutputDesc(GeAttrValue::INT index, OpDescPtr &op, GeTensorDesc &desc); Status UpdateInput(const std::vector &user_input, const std::map &graph_option); Status CheckAndUpdateInput(const std::vector &user_input, const std::map &graph_option); Status CheckConstOp(); diff --git a/ge/ir_build/ge_ir_build.cc b/ge/ir_build/ge_ir_build.cc index 21db83aa..befffa93 100644 --- a/ge/ir_build/ge_ir_build.cc +++ b/ge/ir_build/ge_ir_build.cc @@ -559,8 +559,8 @@ graphStatus Impl::Init(const Graph &graph, const std::map user_input = {input1}; + std::map graph_option; + auto ret = graph_prepare.UpdateInput(user_input, graph_option); + EXPECT_EQ(ret, ge::FAILED); +} + TEST_F(UtestGraphPreproces, test_check_user_input) { ge::GraphPrepare graph_prepare; graph_prepare.compute_graph_ = BuildGraph1(); From c7f3b08445e1e45fde7f9a1483dfa2ab23940356 Mon Sep 17 00:00:00 2001 From: wjm Date: Wed, 9 Jun 2021 06:28:07 +0800 Subject: [PATCH 003/226] mark original input --- ge/generator/ge_generator.cc | 1 + metadef | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/ge/generator/ge_generator.cc b/ge/generator/ge_generator.cc index 24b35bca..505b1908 100644 --- a/ge/generator/ge_generator.cc +++ b/ge/generator/ge_generator.cc @@ -205,6 +205,7 @@ static Status AddInputs(const ComputeGraphPtr &graph, const NodePtr &node, const } (void)AttrUtils::SetBool(data_op, "_is_single_op", true); + (void)AttrUtils::SetBool(data_op, ATTR_NAME_IS_ORIGINAL_INPUT, true); GE_CHK_BOOL_EXEC(data_op->AddInputDesc(tensor) == GRAPH_SUCCESS, REPORT_CALL_ERROR("E19999", "AddInputDesc failed for node:%s", data_op->GetName().c_str()); diff --git a/metadef b/metadef index 310610e5..2ad00e17 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit 310610e5392e01659d214ad380e9ed2c39f9f5a3 +Subproject commit 2ad00e17886fd06c0d00f8a8cf370783a3d31818 From 96ecc01cbe7286888b57852226771d7a9e1f3e1a Mon Sep 17 00:00:00 2001 From: wjm Date: Thu, 10 Jun 2021 11:47:43 +0800 Subject: [PATCH 004/226] fix safe --- ge/common/dump/exception_dumper.cc | 1 + ge/graph/build/model_builder.cc | 2 +- ge/graph/load/model_manager/task_info/kernel_task_info.cc | 2 ++ ge/graph/preprocess/insert_op/util_insert_aipp_op.cc | 1 + ge/hybrid/model/hybrid_model_builder.cc | 1 + ge/plugin/engine/engine_manage.cc | 2 +- 6 files changed, 7 insertions(+), 2 deletions(-) diff --git a/ge/common/dump/exception_dumper.cc b/ge/common/dump/exception_dumper.cc index c8ec3d35..c41da551 100644 --- a/ge/common/dump/exception_dumper.cc +++ b/ge/common/dump/exception_dumper.cc @@ -161,6 +161,7 @@ Status ExceptionDumper::DumpExceptionInfo(const std::vector &ex uint64_t proto_size = dump_data.ByteSizeLong(); std::unique_ptr proto_msg(new (std::nothrow) char[proto_size]); + GE_CHECK_NOTNULL(proto_msg); bool ret = dump_data.SerializeToArray(proto_msg.get(), proto_size); if (!ret || proto_size == 0) { REPORT_INNER_ERROR("E19999", "Serialize proto to string fail"); diff --git a/ge/graph/build/model_builder.cc b/ge/graph/build/model_builder.cc index d38e89fe..e35e4e7d 100755 --- a/ge/graph/build/model_builder.cc +++ b/ge/graph/build/model_builder.cc @@ -707,7 +707,7 @@ Status ModelBuilder::SaveDataToModel(ge::Model &model, ge::GeModel &ge_model) { if (!kernel_name.empty() && (kernel_buffer.GetSize() > 0)) { GE_CHECK_NOTNULL(kernel_buffer.GetData()); std::vector data(kernel_buffer.GetData(), kernel_buffer.GetData() + kernel_buffer.GetSize()); - tbe_kernel = std::make_shared(kernel_name, std::move(data)); + tbe_kernel = MakeShared(kernel_name, std::move(data)); GE_CHECK_NOTNULL(tbe_kernel); GELOGI("Node [%s][%s] start recovery extra attr %s from %s", node_op_desc->GetName().c_str(), node_op_desc->GetType().c_str(), ge::OP_EXTATTR_NAME_TBE_KERNEL, ATTR_NAME_TBE_KERNEL_NAME.c_str()); diff --git a/ge/graph/load/model_manager/task_info/kernel_task_info.cc b/ge/graph/load/model_manager/task_info/kernel_task_info.cc index bfb6e24b..07ad63ca 100755 --- a/ge/graph/load/model_manager/task_info/kernel_task_info.cc +++ b/ge/graph/load/model_manager/task_info/kernel_task_info.cc @@ -645,6 +645,7 @@ Status KernelTaskInfo::InitTVMTask(uint16_t offset, const domi::KernelDef &kerne GE_CHECK_NOTNULL(op_desc); args_addr = std::unique_ptr(new (std::nothrow) uint8_t[args_size_]); + GE_CHECK_NOTNULL(args_addr); errno_t sec_ret = memcpy_s(args_addr.get(), args_size_, kernel_def.args().data(), args_size_); if (sec_ret != EOK) { REPORT_CALL_ERROR("E19999", "Call memcpy_s fail, size:%u, ret:0x%X", args_size_, sec_ret); @@ -1000,6 +1001,7 @@ Status KernelTaskInfo::InitAicpuTask(uint32_t op_index, const domi::KernelDef &k // copy args to new host memory args_addr = std::unique_ptr(new (std::nothrow) uint8_t[args_size_]); + GE_CHECK_NOTNULL(args_addr); GE_PRINT_DYNAMIC_MEMORY(new, "cce task physical memory.", sizeof(uint8_t) * args_size_) errno_t sec_ret = memcpy_s(args_addr.get(), args_size_, kernel_def.args().data(), args_size_); if (sec_ret != EOK) { diff --git a/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc b/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc index 3cd26139..cc7f276e 100755 --- a/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc +++ b/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc @@ -568,6 +568,7 @@ Status InsertNewOpUtil::GetDataRelatedNode(NodePtr &node, std::map aipp_params(new (std::nothrow) domi::AippOpParams()); + GE_CHECK_NOTNULL(aipp_params); ge::GeAttrValue::NAMED_ATTRS aipp_attr; GE_CHK_BOOL_RET_STATUS(AttrUtils::GetNamedAttrs(data_op, ATTR_NAME_AIPP, aipp_attr), ACL_ERROR_GE_AIPP_NOT_EXIST, "[Get][Attr] %s from op:%s failed", ATTR_NAME_AIPP.c_str(), data_op->GetName().c_str()); diff --git a/ge/hybrid/model/hybrid_model_builder.cc b/ge/hybrid/model/hybrid_model_builder.cc index e80d9b90..554ddbbb 100755 --- a/ge/hybrid/model/hybrid_model_builder.cc +++ b/ge/hybrid/model/hybrid_model_builder.cc @@ -1044,6 +1044,7 @@ Status HybridModelBuilder::InitConstantOps() { } else { var_tensor.reset(new(std::nothrow)TensorValue(nullptr, 0)); } + GE_CHECK_NOTNULL(var_tensor); } else { GE_CHK_STATUS_RET_NOLOG(VarNodeToTensor(var_node, var_tensor)); GELOGD("Init const op tensor. name = %s, size = %ld", var_name.c_str(), var_tensor->GetSize()); diff --git a/ge/plugin/engine/engine_manage.cc b/ge/plugin/engine/engine_manage.cc index 0e129526..9cc37fd6 100644 --- a/ge/plugin/engine/engine_manage.cc +++ b/ge/plugin/engine/engine_manage.cc @@ -38,7 +38,7 @@ Status EngineManager::RegisterEngine(const std::string &engine_name, DNNEnginePt if (engine_map_ == nullptr) { engine_map_.reset(new (std::nothrow) std::map()); } - + GE_CHECK_NOTNULL(engine_map_); auto it = engine_map_->find(engine_name); if (it != engine_map_->end()) { GELOGW("engine %s already exist.", engine_name.c_str()); From 3e68c392878a9161d9a4ff291469e50487949c78 Mon Sep 17 00:00:00 2001 From: wjm Date: Thu, 10 Jun 2021 13:01:01 +0800 Subject: [PATCH 005/226] fix --- ge/plugin/engine/engine_manage.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ge/plugin/engine/engine_manage.cc b/ge/plugin/engine/engine_manage.cc index 9cc37fd6..0e129526 100644 --- a/ge/plugin/engine/engine_manage.cc +++ b/ge/plugin/engine/engine_manage.cc @@ -38,7 +38,7 @@ Status EngineManager::RegisterEngine(const std::string &engine_name, DNNEnginePt if (engine_map_ == nullptr) { engine_map_.reset(new (std::nothrow) std::map()); } - GE_CHECK_NOTNULL(engine_map_); + auto it = engine_map_->find(engine_name); if (it != engine_map_->end()) { GELOGW("engine %s already exist.", engine_name.c_str()); From 4c398c9f8b7f1259a67b0526b29c38cbc0692c32 Mon Sep 17 00:00:00 2001 From: wjm Date: Thu, 10 Jun 2021 18:34:11 +0800 Subject: [PATCH 006/226] update submodule --- metadef | 2 +- parser | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/metadef b/metadef index 9c9907b7..3e14f92d 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit 9c9907b76a457f456072af96b8cbcfb7943beccc +Subproject commit 3e14f92d47abc9a2e703be2171f047553f7597e0 diff --git a/parser b/parser index 15a27afe..4151e330 160000 --- a/parser +++ b/parser @@ -1 +1 @@ -Subproject commit 15a27afefe45f2abdb78787d629163aab9437599 +Subproject commit 4151e33028c518057289b569b36cd4069af362a4 From bdbfe5eea44e4d40b8a6940c69ab6a26cc2c0efb Mon Sep 17 00:00:00 2001 From: wjm Date: Thu, 10 Jun 2021 23:45:20 +0800 Subject: [PATCH 007/226] fix --- ge/ge_opt_info/ge_opt_info.h | 1 + .../load/model_manager/task_info/memcpy_async_task_info.h | 2 +- ge/graph/preprocess/graph_preprocess.cc | 4 ++-- ge/hybrid/executor/hybrid_model_pipeline_executor.cc | 1 + ge/hybrid/node_executor/hccl/hccl_node_executor.h | 2 +- 5 files changed, 6 insertions(+), 4 deletions(-) diff --git a/ge/ge_opt_info/ge_opt_info.h b/ge/ge_opt_info/ge_opt_info.h index 935dff25..5cc1063a 100644 --- a/ge/ge_opt_info/ge_opt_info.h +++ b/ge/ge_opt_info/ge_opt_info.h @@ -24,6 +24,7 @@ namespace ge { class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY GeOptInfo { public: GeOptInfo() = default; + ~GeOptInfo() = default; static Status SetOptInfo(); }; } // namespace ge diff --git a/ge/graph/load/model_manager/task_info/memcpy_async_task_info.h b/ge/graph/load/model_manager/task_info/memcpy_async_task_info.h index 728305ff..4ae03967 100755 --- a/ge/graph/load/model_manager/task_info/memcpy_async_task_info.h +++ b/ge/graph/load/model_manager/task_info/memcpy_async_task_info.h @@ -47,7 +47,7 @@ class MemcpyAsyncTaskInfo : public TaskInfo { uint64_t count_; uint32_t kind_; vector io_addrs_; - int64_t fixed_addr_offset_; + int64_t fixed_addr_offset_ = 0; DavinciModel *davinci_model_ = nullptr; uint32_t args_offset_ = 0; }; diff --git a/ge/graph/preprocess/graph_preprocess.cc b/ge/graph/preprocess/graph_preprocess.cc index bc8646e7..d7f33b4b 100644 --- a/ge/graph/preprocess/graph_preprocess.cc +++ b/ge/graph/preprocess/graph_preprocess.cc @@ -1756,8 +1756,8 @@ Status GraphPrepare::CtrlFlowPreProcess() { PassManager graph_pass; // After InferShape Mark v1 control flow for unknown shape. - auto mark_force_unknown_pass = new (std::nothrow) MarkForceUnknownForCondPass; - GE_CHK_STATUS_RET(graph_pass.AddPass("PreRun::MarkForceUnknownForCondPass", mark_force_unknown_pass)); + GE_CHK_STATUS_RET(graph_pass.AddPass("PreRun::MarkForceUnknownForCondPass", + new (std::nothrow) MarkForceUnknownForCondPass)); GE_CHK_STATUS_RET(graph_pass.Run(compute_graph_)); return SUCCESS; diff --git a/ge/hybrid/executor/hybrid_model_pipeline_executor.cc b/ge/hybrid/executor/hybrid_model_pipeline_executor.cc index b5e66628..57ba20d4 100644 --- a/ge/hybrid/executor/hybrid_model_pipeline_executor.cc +++ b/ge/hybrid/executor/hybrid_model_pipeline_executor.cc @@ -188,6 +188,7 @@ HybridModelPipelineExecutor::HybridModelPipelineExecutor(HybridModel *model, uin config_.num_executors = kNumExecutors; config_.num_stages = model_->GetRootGraphItem()->NumGroups(); config_.device_id = device_id_; + config_.iteration_end = 0; } Status StageExecutor::InitExecutionContext() { diff --git a/ge/hybrid/node_executor/hccl/hccl_node_executor.h b/ge/hybrid/node_executor/hccl/hccl_node_executor.h index b020208d..757f7593 100644 --- a/ge/hybrid/node_executor/hccl/hccl_node_executor.h +++ b/ge/hybrid/node_executor/hccl/hccl_node_executor.h @@ -62,7 +62,7 @@ class RdmaNodeTask : public NodeTask { int32_t local_index_ = 0; std::mutex hccl_mutex_; std::condition_variable cond_; - bool skip_flag_; + bool skip_flag_ = false; }; From 478bb52093857f881d722ed12ca99053ffcbe2e7 Mon Sep 17 00:00:00 2001 From: wjm Date: Fri, 11 Jun 2021 01:44:03 +0800 Subject: [PATCH 008/226] fix --- .../node_executor/compiledsubgraph/known_node_executor.cc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc index e5663fb8..d343f9fe 100755 --- a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc +++ b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc @@ -136,8 +136,7 @@ Status KnownNodeTask::Init(TaskContext &context) { Status KnownNodeTask::InitDavinciModel(const HybridModel &model, TensorBuffer *weight_buffer) { GELOGD("[Init][DavinciModel] start"); davinci_model_->InitRuntimeParams(); - GE_CHK_STATUS_RET(davinci_model_->InitVariableMem(), - "[Init][VariableMem] failed"); + GE_CHK_STATUS_RET(davinci_model_->InitVariableMem(), "[Init][VariableMem] failed"); int32_t device_id = 0; GE_CHK_RT_RET(rtGetDevice(&device_id)); davinci_model_->SetDeviceId(static_cast(device_id)); @@ -181,7 +180,7 @@ Status KnownNodeExecutor::PrepareTask(NodeTask &task, TaskContext &context) cons } Status KnownNodeExecutor::SetDaviciModel(const HybridModel &model, const NodePtr &node, - std::shared_ptr &davinci_model) const { + std::shared_ptr &davinci_model) const { // set known node flag as true davinci_model->SetKnownNode(true); davinci_model->SetId(model.GetModelId()); From 52964f64bc3bc25694c6a608980150828c581270 Mon Sep 17 00:00:00 2001 From: wjm Date: Fri, 11 Jun 2021 03:30:49 +0800 Subject: [PATCH 009/226] fix --- .../node_executor/compiledsubgraph/known_node_executor.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc index d343f9fe..8b3c691f 100755 --- a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc +++ b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc @@ -247,8 +247,7 @@ Status KnownNodeExecutor::ParseAttrForAllocatingOutputs(NodeItem &node_item, Com GE_CHECK_NOTNULL(net_output_desc); std::map connected_inputs; std::map data_indices; - GE_CHK_STATUS_RET(GetDataNodes(graph, data_indices), - "[%s] Failed to get data node indices", + GE_CHK_STATUS_RET(GetDataNodes(graph, data_indices), "[%s] Failed to get data node indices", node_item.NodeName().c_str()); for (const auto &in_data_anchor : net_output_node->GetAllInDataAnchors()) { auto out_data_anchor = in_data_anchor->GetPeerOutAnchor(); From 195d299596c579dc96d77c4cf1f818aa1dc083f3 Mon Sep 17 00:00:00 2001 From: wjm Date: Fri, 11 Jun 2021 04:03:34 +0800 Subject: [PATCH 010/226] update submodule --- metadef | 2 +- parser | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/metadef b/metadef index 2f89122e..84e7ab39 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit 2f89122e1fa26b3633a8efa4bf0a0269bebf537e +Subproject commit 84e7ab39b0daf7ca2b2f5549e3279647da7875e2 diff --git a/parser b/parser index 4151e330..ffd94df4 160000 --- a/parser +++ b/parser @@ -1 +1 @@ -Subproject commit 4151e33028c518057289b569b36cd4069af362a4 +Subproject commit ffd94df471f7dd2b1928cc8d27e43e7210aaa7e7 From d8ba1fb2c0d85436d43d5b0fe132a8c2e2d1724d Mon Sep 17 00:00:00 2001 From: zhengyuanhua Date: Fri, 11 Jun 2021 09:42:50 +0800 Subject: [PATCH 011/226] remove graph ut form ge --- cmake/external_libs/protobuf_shared.cmake | 12 +- cmake/external_libs/protobuf_static.cmake | 10 +- cmake/external_libs/protoc.cmake | 12 +- metadef | 2 +- parser | 2 +- .../ge_graph/ge_graph_anchor_unittest.cc | 112 ------------------ .../ge_graph/ge_model_serialize_unittest.cc | 3 +- .../testcase/ge_graph/ge_tensor_unittest.cc | 18 +-- tests/ut/ge/CMakeLists.txt | 1 + .../dynamic_shape_partition_unittest.cc | 5 +- tests/ut/ge/hybrid/ge_hybrid_unittest.cc | 3 +- 11 files changed, 26 insertions(+), 154 deletions(-) diff --git a/cmake/external_libs/protobuf_shared.cmake b/cmake/external_libs/protobuf_shared.cmake index 6334c8a3..dfdb0606 100755 --- a/cmake/external_libs/protobuf_shared.cmake +++ b/cmake/external_libs/protobuf_shared.cmake @@ -11,14 +11,14 @@ if ((${CMAKE_INSTALL_PREFIX} STREQUAL /usr/local) OR message(STATUS "No install prefix selected, default to ${CMAKE_INSTALL_PREFIX}.") endif() if (GE_PB_PKG) - set(REQ_URL "${GE_PB_PKG}/libs/protobuf/v3.8.0.tar.gz") + set(REQ_URL "${GE_PB_PKG}/libs/protobuf/v3.13.0.tar.gz") else() if (ENABLE_GITEE) - set(REQ_URL "https://gitee.com/mirrors/protobuf_source/repository/archive/v3.8.0.tar.gz") - set(MD5 "eba86ae9f07ba5cfbaf8af3bc4e84236") + set(REQ_URL "https://gitee.com/mirrors/protobuf_source/repository/archive/v3.13.0.tar.gz") + set(MD5 "f4489cb88922ad9c58cbe3308d59cee5") else() - set(REQ_URL "https://github.com/protocolbuffers/protobuf/archive/v3.8.0.tar.gz") - set(MD5 "3d9e32700639618a4d2d342c99d4507a") + set(REQ_URL "https://github.com/protocolbuffers/protobuf/archive/v3.13.0.tar.gz") + set(MD5 "1a6274bc4a65b55a6fa70e264d796490") endif () endif() @@ -58,7 +58,7 @@ target_include_directories(ascend_protobuf INTERFACE ${PROTOBUF_SHARED_PKG_DIR}/ set(INSTALL_BASE_DIR "") set(INSTALL_LIBRARY_DIR lib) -install(FILES ${PROTOBUF_SHARED_PKG_DIR}/${CMAKE_INSTALL_LIBDIR}/ascend_protobuf.so.3.8.0.0 OPTIONAL +install(FILES ${PROTOBUF_SHARED_PKG_DIR}/${CMAKE_INSTALL_LIBDIR}/ascend_protobuf.so.3.13.0.0 OPTIONAL DESTINATION ${INSTALL_LIBRARY_DIR}) install(FILES ${PROTOBUF_SHARED_PKG_DIR}/${CMAKE_INSTALL_LIBDIR}/ascend_protobuf.so OPTIONAL DESTINATION ${INSTALL_LIBRARY_DIR}) diff --git a/cmake/external_libs/protobuf_static.cmake b/cmake/external_libs/protobuf_static.cmake index 22f537cf..b8ff90bb 100755 --- a/cmake/external_libs/protobuf_static.cmake +++ b/cmake/external_libs/protobuf_static.cmake @@ -16,11 +16,11 @@ if(GE_PB_PKG) set(REQ_URL "${GE_PB_PKG}/libs/protobuf/v3.8.0.tar.gz") else() if (ENABLE_GITEE) - set(REQ_URL "https://gitee.com/mirrors/protobuf_source/repository/archive/v3.8.0.tar.gz") - set(MD5 "eba86ae9f07ba5cfbaf8af3bc4e84236") + set(REQ_URL "https://gitee.com/mirrors/protobuf_source/repository/archive/v3.13.0.tar.gz") + set(MD5 "f4489cb88922ad9c58cbe3308d59cee5") else() - set(REQ_URL "https://github.com/protocolbuffers/protobuf/archive/v3.8.0.tar.gz") - set(MD5 "3d9e32700639618a4d2d342c99d4507a") + set(REQ_URL "https://github.com/protocolbuffers/protobuf/archive/v3.13.0.tar.gz") + set(MD5 "1a6274bc4a65b55a6fa70e264d796490") endif () endif() @@ -29,8 +29,6 @@ set(protobuf_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack") set(PROTOBUF_STATIC_PKG_DIR ${CMAKE_INSTALL_PREFIX}/protobuf_static) ExternalProject_Add(protobuf_static_build URL ${REQ_URL} - #URL /home/txd/workspace/linux_cmake/pkg/protobuf-3.8.0.tar.gz - #SOURCE_DIR ${METADEF_DIR}/../../third_party/protobuf/src/protobuf-3.8.0 TLS_VERIFY OFF CONFIGURE_COMMAND ${CMAKE_COMMAND} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} diff --git a/cmake/external_libs/protoc.cmake b/cmake/external_libs/protoc.cmake index 421f2632..f16f5e22 100755 --- a/cmake/external_libs/protoc.cmake +++ b/cmake/external_libs/protoc.cmake @@ -13,14 +13,14 @@ if ((${CMAKE_INSTALL_PREFIX} STREQUAL /usr/local) OR endif() if(GE_PB_PKG) - set(REQ_URL "${GE_PB_PKG}/libs/protobuf/v3.8.0.tar.gz") + set(REQ_URL "${GE_PB_PKG}/libs/protobuf/v3.13.0.tar.gz") else() if (ENABLE_GITEE) - set(REQ_URL "https://gitee.com/mirrors/protobuf_source/repository/archive/v3.8.0.tar.gz") - set(MD5 "eba86ae9f07ba5cfbaf8af3bc4e84236") + set(REQ_URL "https://gitee.com/mirrors/protobuf_source/repository/archive/v3.13.0.tar.gz") + set(MD5 "f4489cb88922ad9c58cbe3308d59cee5") else() - set(REQ_URL "https://github.com/protocolbuffers/protobuf/archive/v3.8.0.tar.gz") - set(MD5 "3d9e32700639618a4d2d342c99d4507a") + set(REQ_URL "https://github.com/protocolbuffers/protobuf/archive/v3.13.0.tar.gz") + set(MD5 "1a6274bc4a65b55a6fa70e264d796490") endif () endif() @@ -28,8 +28,6 @@ set(protobuf_CXXFLAGS "-Wno-maybe-uninitialized -Wno-unused-parameter -fPIC -fst set(protobuf_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack") ExternalProject_Add(protoc_build URL ${REQ_URL} - #URL /home/txd/workspace/linux_cmake/pkg/protobuf-3.8.0.tar.gz - #SOURCE_DIR ${GE_CODE_DIR}/../third_party/protobuf/src/protobuf-3.8.0 TLS_VERIFY OFF CONFIGURE_COMMAND ${CMAKE_COMMAND} -Dprotobuf_WITH_ZLIB=OFF -Dprotobuf_BUILD_TESTS=OFF -DBUILD_SHARED_LIBS=OFF -DCMAKE_CXX_FLAGS=${protobuf_CXXFLAGS} -DCMAKE_CXX_LDFLAGS=${protobuf_LDFLAGS} -DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX}/protoc /cmake BUILD_COMMAND $(MAKE) diff --git a/metadef b/metadef index b27915cd..c6030152 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit b27915cd37919430a61953f8998b7acce4a60177 +Subproject commit c6030152c6dc05515115765babb5d64fde649df4 diff --git a/parser b/parser index e75eda62..155d3262 160000 --- a/parser +++ b/parser @@ -1 +1 @@ -Subproject commit e75eda62de2b51a0bded5481ca81eb8fc7bf376e +Subproject commit 155d3262ba17f800094abb58b6a809b041cf0a74 diff --git a/tests/ut/common/graph/testcase/ge_graph/ge_graph_anchor_unittest.cc b/tests/ut/common/graph/testcase/ge_graph/ge_graph_anchor_unittest.cc index 5cf7569b..85328b27 100644 --- a/tests/ut/common/graph/testcase/ge_graph/ge_graph_anchor_unittest.cc +++ b/tests/ut/common/graph/testcase/ge_graph/ge_graph_anchor_unittest.cc @@ -272,115 +272,3 @@ TEST_F(UtestGeAnchor, graph_utils_test) { EXPECT_EQ(GraphUtils::RemoveEdge(conv_node->GetOutDataAnchor(0), bn_node->GetInControlAnchor()), GRAPH_SUCCESS); EXPECT_EQ(GraphUtils::RemoveEdge(conv_node->GetOutDataAnchor(0), bn_node->GetInControlAnchor()), GRAPH_FAILED); } - -TEST_F(UtestGeAnchor, data_anchor_replace_peer) { - ComputeGraphPtr graph_ptr = std::make_shared("graph"); - OpDescPtr in_op_ptr = std::make_shared("in_op_1", "float"); - in_op_ptr->AddInputDesc("x1", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW)); - in_op_ptr->AddInputDesc("x2", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW)); - in_op_ptr->AddInputDesc("x3", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW)); - in_op_ptr->AddOutputDesc("y1", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW)); - in_op_ptr->AddOutputDesc("y2", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW)); - in_op_ptr->AddOutputDesc("y3", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW)); - NodePtr node1 = graph_ptr->AddNode(in_op_ptr); - NodePtr node2 = graph_ptr->AddNode(in_op_ptr); - NodePtr node3 = graph_ptr->AddNode(in_op_ptr); - - OutDataAnchorPtr out_data_anchor = node1->GetOutDataAnchor(1); - InDataAnchorPtr in_data_anchor = node2->GetInDataAnchor(1); - EXPECT_EQ(out_data_anchor != nullptr, true); - EXPECT_EQ(in_data_anchor != nullptr, true); - EXPECT_EQ(node1->GetOutDataAnchor(1)->LinkTo(node2->GetInDataAnchor(0)), GRAPH_SUCCESS); - EXPECT_EQ(node1->GetOutDataAnchor(1)->LinkTo(node2->GetInDataAnchor(1)), GRAPH_SUCCESS); - EXPECT_EQ(node1->GetOutDataAnchor(1)->LinkTo(node2->GetInDataAnchor(2)), GRAPH_SUCCESS); - - size_t out_idx = 0; - for (; out_idx < out_data_anchor->peer_anchors_.size(); out_idx++) { - if (out_data_anchor->peer_anchors_[out_idx].lock() == in_data_anchor) { - break; - } - } - EXPECT_EQ(out_idx, 1); - - size_t in_idx = 0; - for (; in_idx < in_data_anchor->peer_anchors_.size(); in_idx++) { - if (in_data_anchor->peer_anchors_[in_idx].lock() == out_data_anchor) { - break; - } - } - EXPECT_EQ(in_idx, 0); - - out_data_anchor->ReplacePeer(in_data_anchor, node3->GetInDataAnchor(1), node3->GetOutDataAnchor(1)); - - size_t out_idx1 = 0; - for (; out_idx1 < out_data_anchor->peer_anchors_.size(); out_idx1++) { - if (out_data_anchor->peer_anchors_[out_idx1].lock() == node3->GetInDataAnchor(1)) { - break; - } - } - EXPECT_EQ(out_idx1, out_idx); - - size_t in_idx1 = 0; - for (; in_idx1 < in_data_anchor->peer_anchors_.size(); in_idx1++) { - if (in_data_anchor->peer_anchors_[in_idx1].lock() == node3->GetOutDataAnchor(1)) { - break; - } - } - EXPECT_EQ(in_idx1, in_idx); -} - -TEST_F(UtestGeAnchor, graph_utils_insert_node) { - ComputeGraphPtr graph_ptr = std::make_shared("graph"); - OpDescPtr in_op_ptr = std::make_shared("in_op_1", "float"); - in_op_ptr->AddInputDesc("x1", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW)); - in_op_ptr->AddInputDesc("x2", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW)); - in_op_ptr->AddInputDesc("x3", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW)); - in_op_ptr->AddOutputDesc("y1", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW)); - in_op_ptr->AddOutputDesc("y2", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW)); - in_op_ptr->AddOutputDesc("y3", GeTensorDesc(GeShape({1, 32, 8, 8}), FORMAT_NCHW)); - NodePtr node1 = graph_ptr->AddNode(in_op_ptr); - NodePtr node2 = graph_ptr->AddNode(in_op_ptr); - NodePtr node3 = graph_ptr->AddNode(in_op_ptr); - - OutDataAnchorPtr out_data_anchor = node1->GetOutDataAnchor(1); - InDataAnchorPtr in_data_anchor = node2->GetInDataAnchor(1); - EXPECT_EQ(out_data_anchor != nullptr, true); - EXPECT_EQ(in_data_anchor != nullptr, true); - EXPECT_EQ(node1->GetOutDataAnchor(1)->LinkTo(node2->GetInDataAnchor(0)), GRAPH_SUCCESS); - EXPECT_EQ(node1->GetOutDataAnchor(1)->LinkTo(node2->GetInDataAnchor(1)), GRAPH_SUCCESS); - EXPECT_EQ(node1->GetOutDataAnchor(1)->LinkTo(node2->GetInDataAnchor(2)), GRAPH_SUCCESS); - - size_t out_idx = 0; - for (; out_idx < out_data_anchor->peer_anchors_.size(); out_idx++) { - if (out_data_anchor->peer_anchors_[out_idx].lock() == in_data_anchor) { - break; - } - } - EXPECT_EQ(out_idx, 1); - - size_t in_idx = 0; - for (; in_idx < in_data_anchor->peer_anchors_.size(); in_idx++) { - if (in_data_anchor->peer_anchors_[in_idx].lock() == out_data_anchor) { - break; - } - } - EXPECT_EQ(in_idx, 0); - - GraphUtils::InsertNodeBetweenDataAnchors(out_data_anchor, in_data_anchor, node3); - - size_t out_idx1 = 0; - for (; out_idx1 < out_data_anchor->peer_anchors_.size(); out_idx1++) { - if (out_data_anchor->peer_anchors_[out_idx1].lock() == node3->GetInDataAnchor(0)) { - break; - } - } - EXPECT_EQ(out_idx1, out_idx); - - size_t in_idx1 = 0; - for (; in_idx1 < in_data_anchor->peer_anchors_.size(); in_idx1++) { - if (in_data_anchor->peer_anchors_[in_idx1].lock() == node3->GetOutDataAnchor(0)) { - break; - } - } - EXPECT_EQ(in_idx1, in_idx); -} diff --git a/tests/ut/common/graph/testcase/ge_graph/ge_model_serialize_unittest.cc b/tests/ut/common/graph/testcase/ge_graph/ge_model_serialize_unittest.cc index 0366446c..c91f68df 100644 --- a/tests/ut/common/graph/testcase/ge_graph/ge_model_serialize_unittest.cc +++ b/tests/ut/common/graph/testcase/ge_graph/ge_model_serialize_unittest.cc @@ -30,6 +30,7 @@ #include "graph/model_serialize.h" #include "graph/detail/model_serialize_imp.h" +#include "graph/node_impl.h" #include "graph/ge_attr_value.h" #include "graph/utils/graph_utils.h" #include "graph/utils/tensor_utils.h" @@ -1062,7 +1063,7 @@ TEST(UtestGeModelSerialize, test_model_serialize_imp_invalid_param) { auto graph = std::make_shared("test_graph"); auto node = graph->AddNode(std::make_shared()); - node->op_ = nullptr; + node->impl_->op_ = nullptr; ge::proto::ModelDef model_def; Model model; model.SetGraph(GraphUtils::CreateGraphFromComputeGraph(graph)); diff --git a/tests/ut/common/graph/testcase/ge_graph/ge_tensor_unittest.cc b/tests/ut/common/graph/testcase/ge_graph/ge_tensor_unittest.cc index aa43ac99..838df735 100644 --- a/tests/ut/common/graph/testcase/ge_graph/ge_tensor_unittest.cc +++ b/tests/ut/common/graph/testcase/ge_graph/ge_tensor_unittest.cc @@ -25,6 +25,7 @@ #include "graph/ge_attr_value.h" #include "graph/tensor.h" #include "graph/utils/tensor_utils.h" +#include "graph/ge_tensor_impl.h" #undef private #undef protected @@ -196,23 +197,6 @@ TEST_F(UtestGeTensor, test_shape_copy_move) { EXPECT_EQ(shape4.GetDimNum(), 3); } -TEST_F(UtestGeTensor, test_tensor_desc_invalid_null) { - GeTensorDesc tensor_desc(nullptr, nullptr); - EXPECT_EQ(tensor_desc.GetDataType(), DT_UNDEFINED); - EXPECT_EQ(tensor_desc.GetFormat(), FORMAT_RESERVED); - EXPECT_EQ(tensor_desc.MutableShape().shape_def_.GetProtoMsg(), nullptr); - - GeTensorDesc tensor_desc2; - EXPECT_EQ(tensor_desc2.GetDataType(), DT_FLOAT); - EXPECT_EQ(tensor_desc2.GetFormat(), FORMAT_ND); - - tensor_desc2.SetDataType(DT_DUAL_SUB_INT8); - EXPECT_EQ(tensor_desc2.GetDataType(), DT_DUAL_SUB_INT8); - - TensorUtils::SetWeightSize(tensor_desc, 100); - EXPECT_EQ(TensorUtils::GetWeightSize(tensor_desc), 0); -} - TEST_F(UtestGeTensor, test_tensor_invalid_null) { ProtoMsgOwner msg_owner; GeTensor tensor(msg_owner, nullptr); diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index 63579109..0d1ae079 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -121,6 +121,7 @@ set(GRAPH_SRC_FILES "${GE_CODE_DIR}/metadef/register/op_tiling.cpp" "${GE_CODE_DIR}/metadef/graph/utils/tuning_utils.cc" "${GE_CODE_DIR}/metadef/register/op_tiling_registry.cpp" + "${GE_CODE_DIR}/metadef/register/op_tiling_registry_impl.cpp" ) set(PARSER_SRC_FILES diff --git a/tests/ut/ge/graph/partition/dynamic_shape_partition_unittest.cc b/tests/ut/ge/graph/partition/dynamic_shape_partition_unittest.cc index c8abadb5..ec1caebd 100644 --- a/tests/ut/ge/graph/partition/dynamic_shape_partition_unittest.cc +++ b/tests/ut/ge/graph/partition/dynamic_shape_partition_unittest.cc @@ -20,6 +20,7 @@ #define protected public #include "graph/partition/dynamic_shape_partition.h" #include "compute_graph.h" +#include "graph/compute_graph_impl.h" #include "inc/framework/common/types.h" #include "utils/graph_utils.h" #include "graph/debug/ge_attr_define.h" @@ -111,9 +112,9 @@ TEST_F(UtestDynamicShapePartition, merge_control_flow_group) { (void)AttrUtils::SetBool(merge->GetOpDesc(), ATTR_NAME_FORCE_UNKNOWN_SHAPE, true); (void)AttrUtils::SetInt(merge->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, 3); - EXPECT_EQ(graph->sub_graph_.size(), 0); + EXPECT_EQ(graph->impl_->sub_graph_.size(), 0); DynamicShapePartitioner partitioner(graph); EXPECT_EQ(partitioner.Partition(), SUCCESS); - EXPECT_EQ(graph->sub_graph_.size(), 1); + EXPECT_EQ(graph->impl_->sub_graph_.size(), 1); } } // namespace ge \ No newline at end of file diff --git a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc index 7a2a5dfe..f6c75d50 100644 --- a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc +++ b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc @@ -40,6 +40,7 @@ #include "graph/types.h" #include "graph/utils/tensor_utils.h" #include "graph/testcase/ge_graph/graph_builder_utils.h" +#include "graph/op_desc_impl.h" #undef private #undef protected @@ -736,7 +737,7 @@ TEST_F(UtestGeHybrid, TestParseDependencies) { std::vector deps; deps.push_back("Data"); auto op_desc = netoutput->GetOpDesc(); - op_desc->input_name_idx_["Data"] = 0; + op_desc->impl_->input_name_idx_["Data"] = 0; auto data_desc = data->GetOpDesc(); auto tensor = std::make_shared(); auto tensor_desc = data_desc->MutableInputDesc(0); From fa1d5b78b6d9c2fa0729a75ba915728949311afc Mon Sep 17 00:00:00 2001 From: wjm Date: Fri, 11 Jun 2021 06:47:47 +0800 Subject: [PATCH 012/226] fix coverity --- ge/graph/load/model_manager/model_manager.cc | 4 +++- ge/graph/load/model_manager/task_info/hccl_task_info.cc | 2 +- .../load/model_manager/task_info/kernel_ex_task_info.cc | 2 +- ge/graph/load/model_manager/task_info/kernel_task_info.cc | 2 +- ge/graph/load/model_manager/zero_copy_offset.cc | 6 ++++-- ge/hybrid/model/hybrid_model_builder.cc | 2 +- 6 files changed, 11 insertions(+), 7 deletions(-) diff --git a/ge/graph/load/model_manager/model_manager.cc b/ge/graph/load/model_manager/model_manager.cc index 45540ba0..8b44daea 100755 --- a/ge/graph/load/model_manager/model_manager.cc +++ b/ge/graph/load/model_manager/model_manager.cc @@ -569,6 +569,7 @@ Status ModelManager::DataInputTensor(uint32_t model_id, const std::vector(cur_dynamic_dims.size() * sizeof(int32_t)); GE_CHK_BOOL_EXEC(memcpy_s(data.data, length, cur_dynamic_dims.data(), length) == EOK, REPORT_CALL_ERROR("E19999", "memcpy data failed, size:%u", length); + delete[] reinterpret_cast(data.data); return INTERNAL_ERROR, "[Memcpy][Data] failed, size:%u.", length); data.length = length; input_data.blobs.push_back(data); @@ -1790,7 +1791,8 @@ Status ModelManager::LaunchKernelCheckAicpuOp(std::vector &aicpu_op std::vector op_name; op_name.clear(); op_name.resize(kOpNameMaxSize); - GE_CHK_RT(rtMemcpy(op_name.data(), aicpu_info.opLen, reinterpret_cast(aicpu_info.opType), + GE_CHK_RT(rtMemcpy(op_name.data(), aicpu_info.opLen, + reinterpret_cast(static_cast(aicpu_info.opType)), aicpu_info.opLen, RT_MEMCPY_DEVICE_TO_HOST)); std::string kernel_type = (static_cast(aicpu_info.kernelsType) == TF_KERNEL) ? "TF_KERNEL" : "CPU_KERNEL"; diff --git a/ge/graph/load/model_manager/task_info/hccl_task_info.cc b/ge/graph/load/model_manager/task_info/hccl_task_info.cc index c3c5c8b7..a3cef836 100644 --- a/ge/graph/load/model_manager/task_info/hccl_task_info.cc +++ b/ge/graph/load/model_manager/task_info/hccl_task_info.cc @@ -329,7 +329,7 @@ void HcclTaskInfo::GetPrivateDefByTaskDef(const domi::TaskDef &task) { // Get privateDef and opsKernelStorePtr from taskDef and save them in taskInfo GELOGI("get custom info in modelTaskDef."); ops_kernel_store_ = nullptr; - void *ops_kernel_store_name_temp = reinterpret_cast(task.ops_kernel_store_ptr()); + void *ops_kernel_store_name_temp = reinterpret_cast(static_cast(task.ops_kernel_store_ptr())); if (ops_kernel_store_name_temp != nullptr) { ops_kernel_store_ = std::move(ops_kernel_store_name_temp); std::string private_def_temp = task.private_def(); diff --git a/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc b/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc index a4b3de75..1257b192 100644 --- a/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc +++ b/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc @@ -420,7 +420,7 @@ Status KernelExTaskInfo::Distribute() { // xxxxxxxx xxxxxxxx xxxxxxxx xx10xxxx: HOST_ONLY // xxxxxxxx xxxxxxxx xxxxxxxx xx11xxxx: HOST_FIRST if (topic_type_flag_ > 0) { - dump_flag_ = dump_flag_ | topic_type_flag_; + dump_flag_ = dump_flag_ | static_cast(topic_type_flag_); } rtError_t rt_ret = rtKernelLaunchEx(kernel_buf_, kernel_buf_size_, dump_flag_, stream_); if (rt_ret != RT_ERROR_NONE) { diff --git a/ge/graph/load/model_manager/task_info/kernel_task_info.cc b/ge/graph/load/model_manager/task_info/kernel_task_info.cc index 07ad63ca..e4514d55 100755 --- a/ge/graph/load/model_manager/task_info/kernel_task_info.cc +++ b/ge/graph/load/model_manager/task_info/kernel_task_info.cc @@ -436,7 +436,7 @@ Status KernelTaskInfo::Distribute() { // xxxxxxxx xxxxxxxx xxxxxxxx xx01xxxx: DEVICE_FIRST // xxxxxxxx xxxxxxxx xxxxxxxx xx10xxxx: HOST_ONLY // xxxxxxxx xxxxxxxx xxxxxxxx xx11xxxx: HOST_FIRST - dump_flag_ = dump_flag_ | topic_type_flag_; + dump_flag_ = dump_flag_ | static_cast(topic_type_flag_); } GELOGI("distribute task info kernel_type %d, flag %d", kernel_type_, dump_flag_); // blockDim is reserved parameter, set to 1 diff --git a/ge/graph/load/model_manager/zero_copy_offset.cc b/ge/graph/load/model_manager/zero_copy_offset.cc index 4a57a899..2a0423c7 100644 --- a/ge/graph/load/model_manager/zero_copy_offset.cc +++ b/ge/graph/load/model_manager/zero_copy_offset.cc @@ -62,7 +62,8 @@ Status ZeroCopyOffset::InitInputDataInfo(int64_t output_size, void *virtual_addr for (size_t index = 0; index < zero_copy_basic_offset_.size(); ++index) { if (zero_copy_basic_offset_.at(index) == virtual_addr_offset) { out_count++; - uint64_t out_offset = reinterpret_cast(virtual_addr) + zero_copy_relative_offset_.at(index); + uint64_t out_offset = static_cast(reinterpret_cast(virtual_addr)) + + zero_copy_relative_offset_.at(index); data_info_.emplace_back(output_size, reinterpret_cast(static_cast(out_offset))); relative_offset_.emplace_back(zero_copy_relative_offset_.at(index)); GELOGI("[ZCPY] virtual_addr: %p has been l2-fusion to %lu, need copy data_size is %ld.", basic_addr_, @@ -117,7 +118,8 @@ Status ZeroCopyOffset::InitOutputDataInfo(const vector &input_size_list for (size_t index = 0; index < zero_copy_basic_offset_.size(); ++index) { if (zero_copy_basic_offset_.at(index) == virtual_addr_offset) { in_count++; - uint64_t in_offset = reinterpret_cast(virtual_addr_list[idx]) + zero_copy_relative_offset_.at(index); + uint64_t in_offset = static_cast(reinterpret_cast(virtual_addr_list[idx])) + + zero_copy_relative_offset_.at(index); int64_t real_data_size = ModelUtils::GetInputSize(op_desc).at(idx); data_info_.emplace_back(real_data_size, reinterpret_cast(static_cast(in_offset))); relative_offset_.emplace_back(zero_copy_relative_offset_.at(index)); diff --git a/ge/hybrid/model/hybrid_model_builder.cc b/ge/hybrid/model/hybrid_model_builder.cc index 554ddbbb..62e941eb 100755 --- a/ge/hybrid/model/hybrid_model_builder.cc +++ b/ge/hybrid/model/hybrid_model_builder.cc @@ -945,7 +945,7 @@ Status HybridModelBuilder::VarNodeToTensor(const NodePtr &var_node, std::unique_ } int64_t var_size = CalcVarSizeInBytes(*tensor_desc); - // var size is only for checking, will not allocate any memory by it + GE_CHECK_GE(var_size, 0); tensor.reset(new(std::nothrow)TensorValue(dev_mem, static_cast(var_size))); GE_CHECK_NOTNULL(tensor); GELOGI("Get var memory addr %p for node %s, size = %ld, mem_type=%u", dev_mem, var_name.c_str(), var_size, mem_type); From 84cb4ad61655e6867be8cddca024f1890cedc5ce Mon Sep 17 00:00:00 2001 From: wjm Date: Fri, 11 Jun 2021 17:36:52 +0800 Subject: [PATCH 013/226] fix --- .../format_transfers/format_transfer_fractal_nz.cc | 4 ++++ .../format_transfers/format_transfer_fractal_zz.cc | 4 ++++ .../compiledsubgraph/known_node_executor.cc | 11 +++-------- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/ge/common/formats/format_transfers/format_transfer_fractal_nz.cc b/ge/common/formats/format_transfers/format_transfer_fractal_nz.cc index 24be6023..798ec55a 100755 --- a/ge/common/formats/format_transfers/format_transfer_fractal_nz.cc +++ b/ge/common/formats/format_transfers/format_transfer_fractal_nz.cc @@ -185,6 +185,7 @@ Status TransFormatFromNdToFracNz(const TransArgs &args, TransResult &result, con auto src_offset = (src_h_head + w1_idx * w0) * size; auto protected_size = dst_size - dst_offset < static_cast(SECUREC_MEM_MAX_LEN) ? dst_size - dst_offset : static_cast(SECUREC_MEM_MAX_LEN); + GE_CHECK_GE(protected_size, 0); auto ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size * w0)); if (ret != EOK) { @@ -202,6 +203,7 @@ Status TransFormatFromNdToFracNz(const TransArgs &args, TransResult &result, con auto src_offset = (src_h_head + src_w_idx) * size; auto protected_size = dst_size - dst_offset < static_cast(SECUREC_MEM_MAX_LEN) ? dst_size - dst_offset : static_cast(SECUREC_MEM_MAX_LEN); + GE_CHECK_GE(protected_size, 0); auto ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size)); if (ret != EOK) { @@ -267,6 +269,7 @@ Status TransFormatFromFracNzToNd(const TransArgs &args, TransResult &result, con auto dst_offset = (dst_h_head + w1_idx * w0) * size; auto protected_size = dst_size - dst_offset < static_cast(SECUREC_MEM_MAX_LEN) ? dst_size - dst_offset : static_cast(SECUREC_MEM_MAX_LEN); + GE_CHECK_GE(protected_size, 0); ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size * w0)); if (ret != EOK) { @@ -285,6 +288,7 @@ Status TransFormatFromFracNzToNd(const TransArgs &args, TransResult &result, con auto dst_offset = (dst_h_head + dst_w_idx) * size; auto protected_size = dst_size - dst_offset < static_cast(SECUREC_MEM_MAX_LEN) ? dst_size - dst_offset : static_cast(SECUREC_MEM_MAX_LEN); + GE_CHECK_GE(protected_size, 0); ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size)); if (ret != EOK) { diff --git a/ge/common/formats/format_transfers/format_transfer_fractal_zz.cc b/ge/common/formats/format_transfers/format_transfer_fractal_zz.cc index 1cb142b3..14315084 100755 --- a/ge/common/formats/format_transfers/format_transfer_fractal_zz.cc +++ b/ge/common/formats/format_transfers/format_transfer_fractal_zz.cc @@ -193,6 +193,7 @@ Status TransFormatFromNdToFracZz(const TransArgs &args, TransResult &result, con auto protected_size = dst_size - dst_offset < static_cast(SECUREC_MEM_MAX_LEN) ? dst_size - dst_offset : static_cast(SECUREC_MEM_MAX_LEN); + GE_CHECK_GE(protected_size, 0); auto ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size * w0)); if (ret != EOK) { @@ -213,6 +214,7 @@ Status TransFormatFromNdToFracZz(const TransArgs &args, TransResult &result, con auto protected_size = dst_size - dst_offset < static_cast(SECUREC_MEM_MAX_LEN) ? dst_size - dst_offset : static_cast(SECUREC_MEM_MAX_LEN); + GE_CHECK_GE(protected_size, 0); auto ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size)); if (ret != EOK) { @@ -284,6 +286,7 @@ Status TransFormatFromFracZzToNd(const TransArgs &args, TransResult &result, con auto protected_size = dst_size - dst_offset < static_cast(SECUREC_MEM_MAX_LEN) ? dst_size - dst_offset : static_cast(SECUREC_MEM_MAX_LEN); + GE_CHECK_GE(protected_size, 0); auto ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size * w0)); if (ret != EOK) { @@ -304,6 +307,7 @@ Status TransFormatFromFracZzToNd(const TransArgs &args, TransResult &result, con auto protected_size = dst_size - dst_offset < static_cast(SECUREC_MEM_MAX_LEN) ? dst_size - dst_offset : static_cast(SECUREC_MEM_MAX_LEN); + GE_CHECK_GE(protected_size, 0); auto ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size)); if (ret != EOK) { diff --git a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc index 8b3c691f..b4e26332 100755 --- a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc +++ b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc @@ -260,11 +260,8 @@ Status KnownNodeExecutor::ParseAttrForAllocatingOutputs(NodeItem &node_item, Com GE_CHECK_NOTNULL(op_desc); auto src_op_type = src_node->GetType(); auto output_index = in_data_anchor->GetIdx(); - GELOGD("Node %s, output %d, src node = %s, src node type = %s", - node_item.NodeName().c_str(), - output_index, - src_node->GetName().c_str(), - src_op_type.c_str()); + GELOGD("Node %s, output %d, src node = %s, src node type = %s", node_item.NodeName().c_str(), output_index, + src_node->GetName().c_str(), src_op_type.c_str()); // parse reuse outputs std::string input_key = std::to_string(op_desc->GetId()) + "_" + std::to_string(out_data_anchor->GetIdx()); auto it = connected_inputs.find(input_key); @@ -285,9 +282,7 @@ Status KnownNodeExecutor::ParseAttrForAllocatingOutputs(NodeItem &node_item, Com GELOGD("[%s] output[%u] reuses input[%d]", node_item.NodeName().c_str(), output_index, data_index); } else if (src_op_type == CONSTANTOP || src_op_type == CONSTANT || src_op_type == VARIABLE) { node_item.ref_outputs.emplace(output_index, src_node); - GELOGD("[%s] output[%d] ref to node [%s]", - node_item.NodeName().c_str(), - output_index, + GELOGD("[%s] output[%d] ref to node [%s]", node_item.NodeName().c_str(), output_index, src_node->GetName().c_str()); } } From 16c5ba59006b40a1fd7f0c1fa9a23fa42bd78117 Mon Sep 17 00:00:00 2001 From: wjm Date: Fri, 11 Jun 2021 18:20:07 +0800 Subject: [PATCH 014/226] add --- .../formats/format_transfers/format_transfer_c1hwncoc0_hwcn.cc | 1 + .../formats/format_transfers/format_transfer_fractal_nz.cc | 2 +- .../formats/format_transfers/format_transfer_fractal_z.cc | 3 +++ .../formats/format_transfers/format_transfer_fracz_hwcn.cc | 1 + .../formats/format_transfers/format_transfer_fracz_nchw.cc | 1 + .../formats/format_transfers/format_transfer_fracz_nhwc.cc | 1 + .../formats/format_transfers/format_transfer_hwcn_c1hwncoc0.cc | 1 + .../formats/format_transfers/format_transfer_nc1hwc0_nchw.cc | 1 + .../formats/format_transfers/format_transfer_nc1hwc0_nhwc.cc | 1 + .../formats/format_transfers/format_transfer_nchw_nc1hwc0.cc | 1 + .../formats/format_transfers/format_transfer_nhwc_nc1hwc0.cc | 1 + .../formats/format_transfers/format_transfer_transpose.cc | 1 + 12 files changed, 14 insertions(+), 1 deletion(-) diff --git a/ge/common/formats/format_transfers/format_transfer_c1hwncoc0_hwcn.cc b/ge/common/formats/format_transfers/format_transfer_c1hwncoc0_hwcn.cc index ce271c6d..aae95584 100644 --- a/ge/common/formats/format_transfers/format_transfer_c1hwncoc0_hwcn.cc +++ b/ge/common/formats/format_transfers/format_transfer_c1hwncoc0_hwcn.cc @@ -123,6 +123,7 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, int size auto protected_size = total_size - dst_offset < static_cast(SECUREC_MEM_MAX_LEN) ? total_size - dst_offset : static_cast(SECUREC_MEM_MAX_LEN); + GE_CHECK_GE(protected_size, 0); auto ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size)); if (ret != EOK) { diff --git a/ge/common/formats/format_transfers/format_transfer_fractal_nz.cc b/ge/common/formats/format_transfers/format_transfer_fractal_nz.cc index 798ec55a..4f597e32 100755 --- a/ge/common/formats/format_transfers/format_transfer_fractal_nz.cc +++ b/ge/common/formats/format_transfers/format_transfer_fractal_nz.cc @@ -59,7 +59,7 @@ bool CheckShape(Format format, const ShapeVector &shape) { return CheckShapeValid(shape, kDimSize4D); default: std::string error = "Trans format between " + FmtToStr(TypeUtils::FormatToSerialString(format)) + - " and FORMAT_FRACTAL_NZ is not supported."; + " and FORMAT_FRACTAL_NZ is not supported."; GE_ERRORLOG_AND_ERRORMSG(ACL_ERROR_GE_FORMAT_INVALID, error.c_str()); return false; } diff --git a/ge/common/formats/format_transfers/format_transfer_fractal_z.cc b/ge/common/formats/format_transfers/format_transfer_fractal_z.cc index 38125979..882a2a68 100644 --- a/ge/common/formats/format_transfers/format_transfer_fractal_z.cc +++ b/ge/common/formats/format_transfers/format_transfer_fractal_z.cc @@ -226,6 +226,7 @@ Status TransFormatFromNchwToFz(const TransArgs &args, TransResult &result) { auto protected_size = dst_size - offset < static_cast(SECUREC_MEM_MAX_LEN) ? dst_size - offset : static_cast(SECUREC_MEM_MAX_LEN); + GE_CHECK_GE(protected_size, 0); errno_t ret = EOK; if (need_pad_zero) { ret = memset_s(dst.get() + offset, static_cast(protected_size), 0, static_cast(size)); @@ -390,6 +391,7 @@ Status TransFormatHwcnToFz(const TransArgs &args, TransResult &result) { auto protected_size = dst_size - dst_offset < static_cast(SECUREC_MEM_MAX_LEN) ? dst_size - dst_offset : static_cast(SECUREC_MEM_MAX_LEN); + GE_CHECK_GE(protected_size, 0); auto pad_zero = ((c1i * c0 + c0i) >= c) || (n1n0i >= n); errno_t ret = EOK; if (pad_zero) { @@ -474,6 +476,7 @@ Status TransFormatNhwcToFz(const TransArgs &args, TransResult &result) { auto protected_size = dst_size - dst_offset < static_cast(SECUREC_MEM_MAX_LEN) ? dst_size - dst_offset : static_cast(SECUREC_MEM_MAX_LEN); + GE_CHECK_GE(protected_size, 0); auto pad_zero = ((c1i * c0 + c0i) >= c) || (n1n0i >= n); errno_t ret = EOK; if (pad_zero) { diff --git a/ge/common/formats/format_transfers/format_transfer_fracz_hwcn.cc b/ge/common/formats/format_transfers/format_transfer_fracz_hwcn.cc index f6af7534..abe6263b 100755 --- a/ge/common/formats/format_transfers/format_transfer_fracz_hwcn.cc +++ b/ge/common/formats/format_transfers/format_transfer_fracz_hwcn.cc @@ -128,6 +128,7 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const in auto dst_offset = dst_idx * size; auto protected_size = total_size - dst_offset < static_cast(SECUREC_MEM_MAX_LEN) ? total_size - dst_offset : static_cast(SECUREC_MEM_MAX_LEN); + GE_CHECK_GE(protected_size, 0); auto ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size)); if (ret != EOK) { diff --git a/ge/common/formats/format_transfers/format_transfer_fracz_nchw.cc b/ge/common/formats/format_transfers/format_transfer_fracz_nchw.cc index aaeca490..58073397 100755 --- a/ge/common/formats/format_transfers/format_transfer_fracz_nchw.cc +++ b/ge/common/formats/format_transfers/format_transfer_fracz_nchw.cc @@ -130,6 +130,7 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const in auto dst_offset = dst_idx * size; auto protected_size = total_size - dst_offset < static_cast(SECUREC_MEM_MAX_LEN) ? total_size - dst_offset : static_cast(SECUREC_MEM_MAX_LEN); + GE_CHECK_GE(protected_size, 0); auto ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size)); if (ret != EOK) { diff --git a/ge/common/formats/format_transfers/format_transfer_fracz_nhwc.cc b/ge/common/formats/format_transfers/format_transfer_fracz_nhwc.cc index 1e71ea09..3122f137 100755 --- a/ge/common/formats/format_transfers/format_transfer_fracz_nhwc.cc +++ b/ge/common/formats/format_transfers/format_transfer_fracz_nhwc.cc @@ -128,6 +128,7 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, int size auto dst_offset = dst_idx * size; auto protected_size = total_size - dst_offset < static_cast(SECUREC_MEM_MAX_LEN) ? total_size - dst_offset : static_cast(SECUREC_MEM_MAX_LEN); + GE_CHECK_GE(protected_size, 0); auto ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size)); if (ret != EOK) { diff --git a/ge/common/formats/format_transfers/format_transfer_hwcn_c1hwncoc0.cc b/ge/common/formats/format_transfers/format_transfer_hwcn_c1hwncoc0.cc index cb7f889b..c597cde0 100755 --- a/ge/common/formats/format_transfers/format_transfer_hwcn_c1hwncoc0.cc +++ b/ge/common/formats/format_transfers/format_transfer_hwcn_c1hwncoc0.cc @@ -149,6 +149,7 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const in auto protected_size = total_size - dst_offset < static_cast(SECUREC_MEM_MAX_LEN) ? total_size - dst_offset : static_cast(SECUREC_MEM_MAX_LEN); + GE_CHECK_GE(protected_size, 0); int64_t c_idx = c0_idx + c1_idx * c0; int64_t src_idx = h_idx * wcn + w_idx * cn + c_idx * n + n_idx; auto src_offset = src_idx * size; diff --git a/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nchw.cc b/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nchw.cc index 09ff45d9..c442bee9 100755 --- a/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nchw.cc +++ b/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nchw.cc @@ -129,6 +129,7 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const in auto protected_size = total_size - dst_offset < static_cast(SECUREC_MEM_MAX_LEN) ? total_size - dst_offset : static_cast(SECUREC_MEM_MAX_LEN); + GE_CHECK_GE(protected_size, 0); auto ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size)); if (ret != EOK) { diff --git a/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nhwc.cc b/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nhwc.cc index e9e41cd1..603ddffa 100755 --- a/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nhwc.cc +++ b/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nhwc.cc @@ -129,6 +129,7 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const in auto protected_size = total_size - dst_offset < static_cast(SECUREC_MEM_MAX_LEN) ? total_size - dst_offset : static_cast(SECUREC_MEM_MAX_LEN); + GE_CHECK_GE(protected_size, 0); auto ret = memcpy_s(dst.get() + dst_offset, static_cast(protected_size), args.data + src_offset, static_cast(size)); if (ret != EOK) { diff --git a/ge/common/formats/format_transfers/format_transfer_nchw_nc1hwc0.cc b/ge/common/formats/format_transfers/format_transfer_nchw_nc1hwc0.cc index ea2b1d7f..5cab311d 100755 --- a/ge/common/formats/format_transfers/format_transfer_nchw_nc1hwc0.cc +++ b/ge/common/formats/format_transfers/format_transfer_nchw_nc1hwc0.cc @@ -144,6 +144,7 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const in auto protected_size = total_size - dst_offset < static_cast(SECUREC_MEM_MAX_LEN) ? total_size - dst_offset : static_cast(SECUREC_MEM_MAX_LEN); + GE_CHECK_GE(protected_size, 0); int64_t cIdx = c0_idx + c1_idx * c0; int64_t srcIdx = n_idx * chw + cIdx * hw + h_idx * w + w_idx; auto src_offset = srcIdx * size; diff --git a/ge/common/formats/format_transfers/format_transfer_nhwc_nc1hwc0.cc b/ge/common/formats/format_transfers/format_transfer_nhwc_nc1hwc0.cc index 518790b6..939c967c 100755 --- a/ge/common/formats/format_transfers/format_transfer_nhwc_nc1hwc0.cc +++ b/ge/common/formats/format_transfers/format_transfer_nhwc_nc1hwc0.cc @@ -149,6 +149,7 @@ Status GetDstDataAfterTrans(const TransArgs &args, TransResult &result, const in auto protected_size = total_size - dst_offset < static_cast(SECUREC_MEM_MAX_LEN) ? total_size - dst_offset : static_cast(SECUREC_MEM_MAX_LEN); + GE_CHECK_GE(protected_size, 0); int64_t c_idx = c0_idx + c1_idx * c0; int64_t src_idx = n_idx * hwc + h_idx * wc + w_idx * c + c_idx; auto src_offset = src_idx * size; diff --git a/ge/common/formats/format_transfers/format_transfer_transpose.cc b/ge/common/formats/format_transfers/format_transfer_transpose.cc index 54c5444b..9a4d3fd6 100755 --- a/ge/common/formats/format_transfers/format_transfer_transpose.cc +++ b/ge/common/formats/format_transfers/format_transfer_transpose.cc @@ -171,6 +171,7 @@ Status Transpose(const uint8_t *src, const std::vector &src_shape, Data auto protected_size = dst_size - dst_offset_bytes < static_cast(SECUREC_MEM_MAX_LEN) ? dst_size - dst_offset_bytes : static_cast(SECUREC_MEM_MAX_LEN); + GE_CHECK_GE(protected_size, 0); auto ret = memcpy_s(dst.get() + dst_offset_bytes, static_cast(protected_size), src + src_offset, static_cast(data_size)); if (ret != EOK) { From 92c6de14f384d4f81e4129a5aa2d7b202be7647b Mon Sep 17 00:00:00 2001 From: wjm Date: Fri, 11 Jun 2021 18:21:39 +0800 Subject: [PATCH 015/226] add --- .../compiledsubgraph/known_node_executor.cc | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc index b4e26332..8b3c691f 100755 --- a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc +++ b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc @@ -260,8 +260,11 @@ Status KnownNodeExecutor::ParseAttrForAllocatingOutputs(NodeItem &node_item, Com GE_CHECK_NOTNULL(op_desc); auto src_op_type = src_node->GetType(); auto output_index = in_data_anchor->GetIdx(); - GELOGD("Node %s, output %d, src node = %s, src node type = %s", node_item.NodeName().c_str(), output_index, - src_node->GetName().c_str(), src_op_type.c_str()); + GELOGD("Node %s, output %d, src node = %s, src node type = %s", + node_item.NodeName().c_str(), + output_index, + src_node->GetName().c_str(), + src_op_type.c_str()); // parse reuse outputs std::string input_key = std::to_string(op_desc->GetId()) + "_" + std::to_string(out_data_anchor->GetIdx()); auto it = connected_inputs.find(input_key); @@ -282,7 +285,9 @@ Status KnownNodeExecutor::ParseAttrForAllocatingOutputs(NodeItem &node_item, Com GELOGD("[%s] output[%u] reuses input[%d]", node_item.NodeName().c_str(), output_index, data_index); } else if (src_op_type == CONSTANTOP || src_op_type == CONSTANT || src_op_type == VARIABLE) { node_item.ref_outputs.emplace(output_index, src_node); - GELOGD("[%s] output[%d] ref to node [%s]", node_item.NodeName().c_str(), output_index, + GELOGD("[%s] output[%d] ref to node [%s]", + node_item.NodeName().c_str(), + output_index, src_node->GetName().c_str()); } } From bdee8d1e058bd29ec778b90cc8fd7a3da8675e0d Mon Sep 17 00:00:00 2001 From: wjm Date: Sat, 12 Jun 2021 05:43:45 +0800 Subject: [PATCH 016/226] fix --- .../format_transfer_fracz_hwcn.cc | 16 ++++----- ge/common/helper/model_cache_helper.cc | 7 ++++ .../node_executor/hccl/hccl_node_executor.cc | 34 +++++++++++-------- 3 files changed, 33 insertions(+), 24 deletions(-) mode change 100755 => 100644 ge/common/formats/format_transfers/format_transfer_fracz_hwcn.cc diff --git a/ge/common/formats/format_transfers/format_transfer_fracz_hwcn.cc b/ge/common/formats/format_transfers/format_transfer_fracz_hwcn.cc old mode 100755 new mode 100644 index abe6263b..ed3a062c --- a/ge/common/formats/format_transfers/format_transfer_fracz_hwcn.cc +++ b/ge/common/formats/format_transfers/format_transfer_fracz_hwcn.cc @@ -17,6 +17,7 @@ #include "common/formats/format_transfers/format_transfer_fracz_hwcn.h" #include + #include #include "common/formats/utils/formats_definitions.h" @@ -35,8 +36,8 @@ Status CheckArgsForFracZToHwcn(const TransArgs &args) { auto dst_shape = args.dst_shape; if (args.src_format != FORMAT_FRACTAL_Z || args.dst_format != FORMAT_HWCN) { std::string error = "Dose not support trans format from " + - FmtToStr(TypeUtils::FormatToSerialString(args.src_format)) + " to " + - FmtToStr(TypeUtils::FormatToSerialString(args.dst_format)); + FmtToStr(TypeUtils::FormatToSerialString(args.src_format)) + " to " + + FmtToStr(TypeUtils::FormatToSerialString(args.dst_format)); GE_ERRORLOG_AND_ERRORMSG(ACL_ERROR_GE_FORMAT_INVALID, error.c_str()); return ACL_ERROR_GE_FORMAT_INVALID; } @@ -52,15 +53,13 @@ Status CheckArgsForFracZToHwcn(const TransArgs &args) { if (!CheckShapeValid(src_shape, kFracZDimsNum)) { GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "[Check][Shape]Value is invalid, src shape %s", ShapeToString(src_shape).c_str()); - REPORT_CALL_ERROR("E19999", "Src shape %s check invalid", - ShapeToString(src_shape).c_str()); + REPORT_CALL_ERROR("E19999", "Src shape %s check invalid", ShapeToString(src_shape).c_str()); return ACL_ERROR_GE_SHAPE_INVALID; } if (!CheckShapeValid(dst_shape, kHwcnDimsNum)) { GELOGE(ACL_ERROR_GE_SHAPE_INVALID, "[Check][Shape]Value is invalid, dst shape %s", ShapeToString(dst_shape).c_str()); - REPORT_CALL_ERROR("E19999", "Dst shape %s check invalid", - ShapeToString(dst_shape).c_str()); + REPORT_CALL_ERROR("E19999", "Dst shape %s check invalid", ShapeToString(dst_shape).c_str()); return ACL_ERROR_GE_SHAPE_INVALID; } int64_t c0 = GetCubeSizeByDataType(args.src_data_type); @@ -71,9 +70,8 @@ Status CheckArgsForFracZToHwcn(const TransArgs &args) { int64_t n0 = Ceil(dst_shape.at(kHwcnN), static_cast(kNiSize)); if (src_shape.at(kFracZHWC1) != dst_shape.at(kHwcnH) * dst_shape.at(kHwcnW) * c1 || src_shape.at(kFracZC0) != c0 || src_shape.at(kFracZNi) != kNiSize || src_shape.at(kFracZN0) != n0) { - std::string error = "Failed to check relationship between src shape" + - FmtToStr(ShapeToString(src_shape)) + " and dst shape" + - FmtToStr(ShapeToString(dst_shape)); + std::string error = "Failed to check relationship between src shape" + FmtToStr(ShapeToString(src_shape)) + + " and dst shape" + FmtToStr(ShapeToString(dst_shape)); GE_ERRORLOG_AND_ERRORMSG(ACL_ERROR_GE_SHAPE_INVALID, error.c_str()); return ACL_ERROR_GE_SHAPE_INVALID; } diff --git a/ge/common/helper/model_cache_helper.cc b/ge/common/helper/model_cache_helper.cc index 9cd88ef1..0e6c6329 100755 --- a/ge/common/helper/model_cache_helper.cc +++ b/ge/common/helper/model_cache_helper.cc @@ -1679,6 +1679,13 @@ Status ModelCacheHelper::LoadOmModelFromCache(GeModelPtr &ge_model) const { GELOGW("LoadOmModelFromCache: Load model from file failed. ret = %u", ret); return ret; } + std::function callback = [&]() { + if (model_data.model_data != nullptr) { + delete[] reinterpret_cast(model_data.model_data); + model_data.model_data = nullptr; + } + }; + GE_MAKE_GUARD(release, callback); ModelHelper model_helper; ret = model_helper.LoadModel(model_data); diff --git a/ge/hybrid/node_executor/hccl/hccl_node_executor.cc b/ge/hybrid/node_executor/hccl/hccl_node_executor.cc index 31f2c7a1..6be9849c 100644 --- a/ge/hybrid/node_executor/hccl/hccl_node_executor.cc +++ b/ge/hybrid/node_executor/hccl/hccl_node_executor.cc @@ -15,15 +15,16 @@ */ #include "hybrid/node_executor/hccl/hccl_node_executor.h" + #include "common/ge/plugin_manager.h" #include "common/math/math_util.h" #include "external/graph/attr_value.h" +#include "external/graph/types.h" #include "graph/debug/ge_attr_define.h" #include "graph/manager/util/hcom_util.h" #include "graph/utils/type_utils.h" -#include "external/graph/types.h" -#include "hybrid/executor/hybrid_execution_context.h" #include "hccl/hcom.h" +#include "hybrid/executor/hybrid_execution_context.h" #include "runtime/event.h" namespace ge { @@ -267,14 +268,16 @@ Status RdmaNodeTask::ExtractTensor(TaskContext &context, vector do } Status BuildAllToAllVparams(TaskContext &context, HcomAllToAllVParams ¶ms) { - void **input_addrs[kAllToAllVInputNums] = {¶ms.sendbuf, ¶ms.sendcounts, ¶ms.sdispls, - ¶ms.recvcounts, ¶ms.rdispls}; + void **input_addrs[kAllToAllVInputNums] = {¶ms.sendbuf, ¶ms.sendcounts, ¶ms.sdispls, ¶ms.recvcounts, + ¶ms.rdispls}; for (size_t i = 0; i < kAllToAllVInputNums; ++i) { auto addr = context.MutableInput(i); GE_CHECK_NOTNULL(addr); @@ -383,13 +386,14 @@ Status BuildAllToAllVparams(TaskContext &context, HcomAllToAllVParams ¶ms) { } params.sendtype = iter->second; params.recvtype = iter->second; + params.group = nullptr; return SUCCESS; } Status BuildGatherAllToAllParams(TaskContext &context, HcomGatherAllToAllVParams ¶ms) { - void **input_addrs[kGatherAllToAllVInputNums] = {¶ms.addrInfo, ¶ms.addrInfoCountPerRank, - ¶ms.recvcounts, ¶ms.rdispls}; + void **input_addrs[kGatherAllToAllVInputNums] = {¶ms.addrInfo, ¶ms.addrInfoCountPerRank, ¶ms.recvcounts, + ¶ms.rdispls}; for (size_t i = 0; i < kGatherAllToAllVInputNums; ++i) { auto addr = context.MutableInput(i); GE_CHECK_NOTNULL(addr); @@ -418,8 +422,9 @@ Status BuildGatherAllToAllParams(TaskContext &context, HcomGatherAllToAllVParams params.recvtype = iter->second; int64_t addr_len = 0; - (void) ge::AttrUtils::GetInt(op_desc, "addr_length", addr_len); + (void)ge::AttrUtils::GetInt(op_desc, "addr_length", addr_len); params.addrLength = static_cast(addr_len); + params.group = nullptr; return SUCCESS; } @@ -428,7 +433,7 @@ Status AllToAllNodeTask::ExecuteAsync(TaskContext &context, std::functionGetNodeName()); p_ctx->SetStatus(FAILED); @@ -460,7 +465,6 @@ Status AllToAllNodeTask::ExecuteAsync(TaskContext &context, std::function Date: Sun, 13 Jun 2021 19:44:48 +0800 Subject: [PATCH 017/226] Optimize performance of single_op executor. --- ge/hybrid/executor/hybrid_model_executor.cc | 14 +++---- ge/hybrid/executor/hybrid_model_executor.h | 3 +- ge/single_op/single_op_model.cc | 38 ++++++++++++++++++- .../hybrid_model_async_executor_unittest.cc | 5 +-- tests/ut/ge/hybrid/ge_hybrid_unittest.cc | 1 + 5 files changed, 49 insertions(+), 12 deletions(-) diff --git a/ge/hybrid/executor/hybrid_model_executor.cc b/ge/hybrid/executor/hybrid_model_executor.cc index d8939175..b3c2c471 100755 --- a/ge/hybrid/executor/hybrid_model_executor.cc +++ b/ge/hybrid/executor/hybrid_model_executor.cc @@ -41,6 +41,8 @@ HybridModelExecutor::~HybridModelExecutor() { Status HybridModelExecutor::Init() { GELOGD("Start to init HybridGraphEngine."); GE_CHK_STATUS_RET_NOLOG(InitExecutionContext()); + root_graph_executor_.reset(new (std::nothrow) SubgraphExecutor(model_->GetRootGraphItem(), &context_)); + GE_CHECK_NOTNULL(root_graph_executor_); GELOGD("HybridGraphEngine initialized successfully."); return SUCCESS; } @@ -60,8 +62,7 @@ Status HybridModelExecutor::Execute(HybridModelExecutor::ExecuteArgs &args) { GE_CHK_RT_RET(rtMemcpyAsync(context_.global_step, sizeof(uint64_t), &context_.iteration, sizeof(uint64_t), RT_MEMCPY_HOST_TO_DEVICE_EX, context_.stream)); } - SubgraphExecutor executor(model_->GetRootGraphItem(), &context_); - auto ret = ExecuteGraphInternal(executor, args); + auto ret = ExecuteGraphInternal(args); Cleanup(); RECORD_MODEL_EXECUTION_EVENT(&context_, "[Cleanup] End"); GELOGD("Model executed successfully."); @@ -79,8 +80,7 @@ Status HybridModelExecutor::Execute(HybridModelExecutor::ExecuteArgs &args) { return SUCCESS; } -Status HybridModelExecutor::ExecuteGraphInternal(SubgraphExecutor &executor, - HybridModelExecutor::ExecuteArgs &args) { +Status HybridModelExecutor::ExecuteGraphInternal(HybridModelExecutor::ExecuteArgs &args) { RECORD_MODEL_EXECUTION_EVENT(&context_, "[InitContext] Start"); GE_CHK_STATUS_RET_NOLOG(ResetExecutionContext(context_)); RECORD_MODEL_EXECUTION_EVENT(&context_, "[InitContext] End"); @@ -94,7 +94,7 @@ Status HybridModelExecutor::ExecuteGraphInternal(SubgraphExecutor &executor, GE_CHK_STATUS_RET_NOLOG(prof_mgr.ProfileStepInfo(index_id, model_id, 0, stream_, device_id)); } - HYBRID_CHK_STATUS_RET(executor.ExecuteAsync(args.inputs, args.input_desc, args.outputs), + HYBRID_CHK_STATUS_RET(root_graph_executor_->ExecuteAsync(args.inputs, args.input_desc, args.outputs), "Failed to execute partitioned call."); RECORD_MODEL_EXECUTION_EVENT(&context_, "[ExecuteAsync] End"); @@ -103,7 +103,7 @@ Status HybridModelExecutor::ExecuteGraphInternal(SubgraphExecutor &executor, } if (!model_->IsSingleOp()) { - Status ret = executor.Synchronize(); + Status ret = root_graph_executor_->Synchronize(); if (ret != ge::SUCCESS) { auto model_manager = ModelManager::GetInstance(); GE_CHECK_NOTNULL(model_manager); @@ -123,7 +123,7 @@ Status HybridModelExecutor::ExecuteGraphInternal(SubgraphExecutor &executor, } args.outputs.clear(); - HYBRID_CHK_STATUS_RET(executor.GetOutputs(args.outputs, args.output_desc), "Failed to get outputs"); + HYBRID_CHK_STATUS_RET(root_graph_executor_->GetOutputs(args.outputs, args.output_desc), "Failed to get outputs"); RECORD_MODEL_EXECUTION_EVENT(&context_, "[GetOutput] End"); return SUCCESS; } diff --git a/ge/hybrid/executor/hybrid_model_executor.h b/ge/hybrid/executor/hybrid_model_executor.h index 566043d9..102e4f8b 100644 --- a/ge/hybrid/executor/hybrid_model_executor.h +++ b/ge/hybrid/executor/hybrid_model_executor.h @@ -48,7 +48,7 @@ class HybridModelExecutor { Status Execute(ExecuteArgs &args); private: - Status ExecuteGraphInternal(SubgraphExecutor &executor, ExecuteArgs &args); + Status ExecuteGraphInternal(ExecuteArgs &args); Status Cleanup(); Status InitExecutionContext(); static Status ResetExecutionContext(GraphExecutionContext &context); @@ -58,6 +58,7 @@ class HybridModelExecutor { uint32_t device_id_; rtStream_t stream_; GraphExecutionContext context_; + std::unique_ptr root_graph_executor_; }; } // namespace hybrid } // namespace ge diff --git a/ge/single_op/single_op_model.cc b/ge/single_op/single_op_model.cc index 67642f2e..3c0f7972 100755 --- a/ge/single_op/single_op_model.cc +++ b/ge/single_op/single_op_model.cc @@ -44,20 +44,56 @@ using std::vector; namespace ge { namespace { const size_t kDataOutputNum = 1; +const uint32_t kInputIndexOfData = 0; const uint32_t kOutputIndexOfData = 0; constexpr char const *kAttrSupportDynamicShape = "support_dynamicshape"; +Status CheckHostMem(const std::vector &dependencies, const NodePtr &node, bool &flag) { + for (const auto &input_name : dependencies) { + auto op_desc = node->GetOpDesc(); + int input_index = op_desc->GetInputIndexByName(input_name); + if (input_index < 0) { + GELOGE(INTERNAL_ERROR, "[Get][InputIndex]failed, node:[%s] inputname: %s.", + node->GetName().c_str(), input_name.c_str()); + REPORT_CALL_ERROR("E19999", "GetInputIndexByName failed, node:[%s] inputname: %s.", + node->GetName().c_str(), input_name.c_str()); + return INTERNAL_ERROR; + } + + const auto &in_anchor = node->GetInDataAnchor(input_index); + GE_CHECK_NOTNULL(in_anchor); + const auto &peer_out_anchor = in_anchor->GetPeerOutAnchor(); + GE_CHECK_NOTNULL(peer_out_anchor); + const auto &src_node = peer_out_anchor->GetOwnerNode(); + GE_CHECK_NOTNULL(src_node); + auto src_op_desc = src_node->GetOpDesc(); + GE_CHECK_NOTNULL(src_op_desc); + if (src_op_desc->GetType() == DATA) { + auto tensor = src_op_desc->MutableInputDesc(kInputIndexOfData); + if (AttrUtils::HasAttr(tensor, ATTR_NAME_VALUE)) { + GELOGD("Get hostmem from node %s, inputname: %s.", src_node->GetName().c_str(), input_name.c_str()); + continue; + } + } + flag = false; + return SUCCESS; + } + flag = true; + return SUCCESS; +} + Status IfInferDepend(GeModelPtr &ge_model, bool &flag) { auto comp_graph = GraphUtils::GetComputeGraph(ge_model->GetGraph()); GE_CHECK_NOTNULL(comp_graph); for (const auto &node : comp_graph->GetAllNodes()) { + GE_CHECK_NOTNULL(node); auto op_desc = node->GetOpDesc(); GE_CHECK_NOTNULL(op_desc); const auto &depends = op_desc->GetOpInferDepends(); bool support_dynamic_shape = false; (void)AttrUtils::GetBool(op_desc, kAttrSupportDynamicShape, support_dynamic_shape); if (!depends.empty() && support_dynamic_shape) { - flag = true; + CheckHostMem(depends, node, flag); return SUCCESS; } } diff --git a/tests/ut/ge/hybrid/executor/hybrid_model_async_executor_unittest.cc b/tests/ut/ge/hybrid/executor/hybrid_model_async_executor_unittest.cc index d2679439..52537ee2 100644 --- a/tests/ut/ge/hybrid/executor/hybrid_model_async_executor_unittest.cc +++ b/tests/ut/ge/hybrid/executor/hybrid_model_async_executor_unittest.cc @@ -92,16 +92,15 @@ TEST_F(UtestHybridModelAsyncExecutor, Test_execute_internal) { GeRootModelPtr ge_root_model = make_shared(graph); ge_root_model->SetModelName("test_name"); HybridModel hybrid_model(ge_root_model); + hybrid_model.root_graph_item_.reset(new GraphItem); HybridModelExecutor executor(&hybrid_model, 0, nullptr); ASSERT_EQ(executor.Init(), SUCCESS); auto &context = executor.context_; - GraphItem graph_item; - SubgraphExecutor subgraph_executor(&graph_item, &context); HybridModelExecutor::ExecuteArgs args; std::pair> eof_entry; eof_entry.first = nullptr; context.callback_manager->callback_queue_.Push(eof_entry); - ASSERT_EQ(executor.ExecuteGraphInternal(subgraph_executor, args), SUCCESS); + ASSERT_EQ(executor.ExecuteGraphInternal(args), SUCCESS); } } // namespace ge \ No newline at end of file diff --git a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc index 7a2a5dfe..088aec50 100644 --- a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc +++ b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc @@ -330,6 +330,7 @@ TEST_F(UtestGeHybrid, hybrid_model_executor) { ComputeGraphPtr compute_graph = MakeShared("abc"); GeRootModelPtr root_model = MakeShared(compute_graph); HybridModel model(root_model); + model.root_graph_item_.reset(new GraphItem); HybridModel *model_ptr = &model; uint32_t device_id = 0; From 13c98395e2c7c578375780afa4884887018d49a0 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Mon, 14 Jun 2021 20:09:00 +0800 Subject: [PATCH 018/226] Add ut. --- .../ge/single_op/single_op_model_unittest.cc | 33 +++++++++++++++---- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/tests/ut/ge/single_op/single_op_model_unittest.cc b/tests/ut/ge/single_op/single_op_model_unittest.cc index a2c1cb02..1cb2b22c 100644 --- a/tests/ut/ge/single_op/single_op_model_unittest.cc +++ b/tests/ut/ge/single_op/single_op_model_unittest.cc @@ -17,12 +17,11 @@ #include #include +#define protected public +#define private public #include "graph/load/model_manager/model_utils.h" #include "graph/utils/graph_utils.h" #include "runtime/rt.h" - -#define protected public -#define private public #include "single_op/single_op_model.h" #include "single_op/task/tbe_task_builder.h" #include "single_op/task/rts_kernel_task_builder.h" @@ -30,14 +29,18 @@ #include "framework/common/helper/model_helper.h" #include "single_op/single_op.h" #include "single_op/stream_resource.h" +#include "graph/passes/graph_builder_utils.h" #undef private #undef protected -#include "graph/passes/graph_builder_utils.h" using namespace std; using namespace testing; using namespace ge; +namespace { +constexpr char const *kAttrSupportDynamicShape = "support_dynamicshape"; +} // namespace + class UtestSingleOpModel : public testing::Test { protected: void SetUp() {} @@ -208,12 +211,28 @@ TEST_F(UtestSingleOpModel, test_build_dynamic_op) { model.model_helper_.model_ = ge::MakeShared(); // make graph - auto compute_graph = make_shared("graph"); - auto data_op = make_shared("Data", DATA); - auto data_node = compute_graph->AddNode(data_op); + ut::GraphBuilder builder = ut::GraphBuilder("graph"); + auto data = builder.AddNode("Data", "Data", 0, 1); + auto transdata = builder.AddNode("Transdata", "Transdata", 1, 1); + auto netoutput = builder.AddNode("Netoutput", "NetOutput", 1, 0); + builder.AddDataEdge(data, 0, transdata, 0); + builder.AddDataEdge(transdata, 0, netoutput, 0); + auto compute_graph = builder.GetGraph(); + auto graph = GraphUtils::CreateGraphFromComputeGraph(compute_graph); model.model_helper_.model_->SetGraph(graph); + auto op_desc = transdata->GetOpDesc(); + op_desc->input_name_idx_["Data"] = 0; + const vector depend_names = { "Data" }; + op_desc->SetOpInferDepends(depend_names); + (void)AttrUtils::SetBool(op_desc, kAttrSupportDynamicShape, true); + + auto tensor = std::make_shared(); + auto data_desc = data->GetOpDesc(); + auto tensor_desc = data_desc->MutableInputDesc(0); + AttrUtils::SetTensor(tensor_desc, "_value", tensor); + // set task_def auto model_task_def = make_shared(); domi::TaskDef *task_def = model_task_def->add_task(); From 1ab9ae32dc4520be393242297ce900beeb9d2564 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Tue, 15 Jun 2021 10:00:19 +0800 Subject: [PATCH 019/226] Add ut. --- ge/single_op/single_op_model.cc | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/ge/single_op/single_op_model.cc b/ge/single_op/single_op_model.cc index 3c0f7972..4a7638b1 100755 --- a/ge/single_op/single_op_model.cc +++ b/ge/single_op/single_op_model.cc @@ -48,7 +48,7 @@ const uint32_t kInputIndexOfData = 0; const uint32_t kOutputIndexOfData = 0; constexpr char const *kAttrSupportDynamicShape = "support_dynamicshape"; -Status CheckHostMem(const std::vector &dependencies, const NodePtr &node, bool &flag) { +Status CheckHostMem(const std::vector &dependencies, const NodePtr &node, bool &is_host_mem) { for (const auto &input_name : dependencies) { auto op_desc = node->GetOpDesc(); int input_index = op_desc->GetInputIndexByName(input_name); @@ -75,14 +75,14 @@ Status CheckHostMem(const std::vector &dependencies, const NodePtr &node continue; } } - flag = false; + is_host_mem = false; return SUCCESS; } - flag = true; + is_host_mem = true; return SUCCESS; } -Status IfInferDepend(GeModelPtr &ge_model, bool &flag) { +Status CheckInferDepend(GeModelPtr &ge_model, bool &is_infer_depend, bool &is_host_mem) { auto comp_graph = GraphUtils::GetComputeGraph(ge_model->GetGraph()); GE_CHECK_NOTNULL(comp_graph); for (const auto &node : comp_graph->GetAllNodes()) { @@ -93,16 +93,18 @@ Status IfInferDepend(GeModelPtr &ge_model, bool &flag) { bool support_dynamic_shape = false; (void)AttrUtils::GetBool(op_desc, kAttrSupportDynamicShape, support_dynamic_shape); if (!depends.empty() && support_dynamic_shape) { - CheckHostMem(depends, node, flag); - return SUCCESS; + is_infer_depend = true; + return CheckHostMem(depends, node, is_host_mem); } } return SUCCESS; } Status NeedHybridModel(GeModelPtr &ge_model, bool &flag) { - bool infer_depend_flag = false; - GE_CHK_STATUS_RET(IfInferDepend(ge_model, infer_depend_flag), "[Check][InferDepend] failed."); + bool is_infer_depend = false; + bool is_host_mem = false; + GE_CHK_STATUS_RET(CheckInferDepend(ge_model, is_infer_depend, is_host_mem), "[Check][InferDepend] failed."); + bool need_d2h_cpy = is_infer_depend && !is_host_mem; auto tasks = ge_model->GetModelTaskDefPtr()->task(); int32_t kernel_task_num = 0; for (int i = 0; i < tasks.size(); ++i) { @@ -112,7 +114,7 @@ Status NeedHybridModel(GeModelPtr &ge_model, bool &flag) { tasks[i].kernel_with_handle().context(); auto kernel_type = static_cast(context.kernel_type()); if (kernel_type == ccKernelType::TE) { - if (infer_depend_flag) { + if (need_d2h_cpy) { flag = true; return SUCCESS; } @@ -553,7 +555,8 @@ Status SingleOpModel::BuildOp(StreamResource &resource, SingleOp &single_op) { auto ge_model = model_helper_.GetGeModel(); GE_CHECK_NOTNULL(ge_model); bool infer_depend_flag = false; - GE_CHK_STATUS_RET(IfInferDepend(ge_model, infer_depend_flag), "[Check][InferDepend] failed."); + bool is_host_mem = false; + GE_CHK_STATUS_RET(CheckInferDepend(ge_model, infer_depend_flag, is_host_mem)), "[Check][InferDepend] failed."); if (infer_depend_flag) { // construct single_op, do single op with HybridModelExecutor GELOGD("Init hybrid model params of single op, and will do execute with hybrid model executor."); From b35412f5eaa40705adc2bdd014d62ebc32a0f898 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Tue, 15 Jun 2021 10:07:43 +0800 Subject: [PATCH 020/226] Add ut. --- ge/single_op/single_op_model.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ge/single_op/single_op_model.cc b/ge/single_op/single_op_model.cc index 4a7638b1..182d1466 100755 --- a/ge/single_op/single_op_model.cc +++ b/ge/single_op/single_op_model.cc @@ -556,7 +556,7 @@ Status SingleOpModel::BuildOp(StreamResource &resource, SingleOp &single_op) { GE_CHECK_NOTNULL(ge_model); bool infer_depend_flag = false; bool is_host_mem = false; - GE_CHK_STATUS_RET(CheckInferDepend(ge_model, infer_depend_flag, is_host_mem)), "[Check][InferDepend] failed."); + GE_CHK_STATUS_RET(CheckInferDepend(ge_model, infer_depend_flag, is_host_mem), "[Check][InferDepend] failed."); if (infer_depend_flag) { // construct single_op, do single op with HybridModelExecutor GELOGD("Init hybrid model params of single op, and will do execute with hybrid model executor."); From e85bbe218143a8e02ab17884da223447a11a440e Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Sat, 12 Jun 2021 12:00:01 +0800 Subject: [PATCH 021/226] Fix dynamic shape partition --- ge/graph/common/omg_util.cc | 15 -- ge/graph/common/omg_util.h | 9 - ge/graph/partition/dynamic_shape_partition.cc | 23 ++- ge/graph/partition/dynamic_shape_partition.h | 2 +- .../mark_force_unknown_for_cond_pass.cc | 38 +--- .../passes/mark_graph_unknown_status_pass.cc | 6 + ge/graph/passes/merge_to_stream_merge_pass.cc | 5 +- ge/graph/passes/next_iteration_pass.cc | 10 +- .../passes/switch_to_stream_switch_pass.cc | 16 +- ge/hybrid/executor/node_state.cc | 57 ++++- ge/hybrid/executor/node_state.h | 4 + ge/hybrid/executor/subgraph_context.cc | 2 +- ge/hybrid/executor/subgraph_context.h | 4 +- ge/hybrid/executor/subgraph_executor.cc | 18 +- ge/hybrid/executor/subgraph_executor.h | 1 - ge/hybrid/model/node_item.cc | 5 +- ge/hybrid/model/node_item.h | 5 +- ge/hybrid/node_executor/task_context.cc | 17 +- ge/hybrid/node_executor/task_context.h | 4 +- .../dynamic_shape_partition_unittest.cc | 194 ++++++++++++++---- .../worker/execution_engine_unittest.cc | 8 +- tests/ut/ge/hybrid/ge_hybrid_unittest.cc | 8 +- .../ge_local_node_executor_unittest.cc | 5 - .../rts/rts_node_task_unittest.cc | 40 ---- 24 files changed, 283 insertions(+), 213 deletions(-) diff --git a/ge/graph/common/omg_util.cc b/ge/graph/common/omg_util.cc index 52e6cb9c..b2017e4d 100644 --- a/ge/graph/common/omg_util.cc +++ b/ge/graph/common/omg_util.cc @@ -274,21 +274,6 @@ bool IsUnknownShapeTensor(const GeTensorDesc &tensor_desc) { return false; } -/// -/// @brief Set Op _force_unknown_shape flag -/// @param [in] node -/// @param [in] force_unknown, set attribute if true -/// @param [in] group_index, condition group index of node. -/// @return -/// -void MarkForceUnknownShape(const NodePtr &node, bool force_unknown, int64_t group_index) { - if (!force_unknown) { - return; - } - - SetControlFlowGroup(node, group_index); -} - /// /// @brief Set Op _control_flow_group flag /// @param [in] node diff --git a/ge/graph/common/omg_util.h b/ge/graph/common/omg_util.h index 148e4102..edaafa45 100644 --- a/ge/graph/common/omg_util.h +++ b/ge/graph/common/omg_util.h @@ -125,15 +125,6 @@ Status GetMemorySize(const NodePtr &node, int64_t &output_size); /// bool IsUnknownShapeTensor(const GeTensorDesc &tensor_desc); -/// -/// @brief Set Op _force_unknown_shape flag -/// @param [in] node -/// @param [in] force_unknown, set attribute if true -/// @param [in] group_index, condition group index of node. -/// @return -/// -void MarkForceUnknownShape(const NodePtr &node, bool force_unknown, int64_t group_index); - /// /// @brief Set Op _control_flow_group flag /// @param [in] node diff --git a/ge/graph/partition/dynamic_shape_partition.cc b/ge/graph/partition/dynamic_shape_partition.cc index 055b2aa4..1db47498 100755 --- a/ge/graph/partition/dynamic_shape_partition.cc +++ b/ge/graph/partition/dynamic_shape_partition.cc @@ -364,6 +364,7 @@ static std::string ToString(const std::vector &clusters) { } void DynamicShapePartitioner::MergeClustersControlFlow() { + std::unordered_set all_merged_clusters; for (const auto &item : control_clusters_) { const auto &control_cluster = item.second; auto rit = control_cluster.rbegin(); @@ -373,17 +374,32 @@ void DynamicShapePartitioner::MergeClustersControlFlow() { } const auto &cluster = *rit; + if (all_merged_clusters.count(cluster) > 0) { + continue; + } + + bool is_unknown_cluster = cluster->IsUnknownShape(); for (++rit; rit != control_cluster.rend(); ++rit) { const auto &cluster_from = *rit; + if (all_merged_clusters.count(cluster_from) > 0) { + continue; + } + auto merged_clusters = cluster->MergeAllPathFrom(cluster_from); GELOGD("Merge all path cluster from %lu to %lu %s.", cluster_from->Id(), cluster->Id(), ToString(merged_clusters).c_str()); for (const auto &merged_cluster : merged_clusters) { + all_merged_clusters.emplace(merged_cluster); for (const auto &node : merged_cluster->Nodes()) { node_2_cluster_[node] = cluster; } } } + + if (!is_unknown_cluster && cluster->IsUnknownShape()) { + GELOGD("Add to ordered cluster: %s", cluster->DebugString().c_str()); + ordered_cluster_.push_back(cluster); + } } } @@ -703,7 +719,12 @@ void Cluster::Merge(ClusterPtr other) { if (other->min_ < min_) { min_ = other->min_; } -}; + + if (!IsUnknownShape() && other->IsUnknownShape()) { + type_ = UNKNOWN_SHAPE; + } +} + bool Cluster::TryMerge(ClusterPtr other) { std::queue forward_reached; forward_reached.push(other); diff --git a/ge/graph/partition/dynamic_shape_partition.h b/ge/graph/partition/dynamic_shape_partition.h index a17c4e4b..bd3b128f 100644 --- a/ge/graph/partition/dynamic_shape_partition.h +++ b/ge/graph/partition/dynamic_shape_partition.h @@ -161,7 +161,7 @@ class DynamicShapePartitioner { ge::ComputeGraphPtr root_graph_; // The original graph to partition std::unordered_map> node_2_cluster_; // Record nodes and the cluster it belongs to // V1 control flow cluster, need merge to one Graph. - std::unordered_map>> control_clusters_; + std::map>> control_clusters_; // topological sorted clusters, this field will change with the splitting. // When partitioning UNKNOWN_SHAPE cluster, it is a collection of all topological sorted UNKNOWN_SHAPE clusters // When partitioning KNOWN_SHAPE cluster, it is a collection of all topological sorted KNOWN_SHAPE clusters diff --git a/ge/graph/passes/mark_force_unknown_for_cond_pass.cc b/ge/graph/passes/mark_force_unknown_for_cond_pass.cc index 08b358ee..74babadc 100644 --- a/ge/graph/passes/mark_force_unknown_for_cond_pass.cc +++ b/ge/graph/passes/mark_force_unknown_for_cond_pass.cc @@ -132,39 +132,17 @@ void MarkForceUnknownForCondPass::MarkUnknownForSwitch(const NodePtr &node, std: /// @return /// void MarkForceUnknownForCondPass::MarkUnknownForSwitch(const std::map> &switch_groups) { - std::function callback = [](const NodePtr &n) { - return n->GetOpDesc()->HasAttr(ATTR_NAME_CONTROL_FLOW_GROUP); - }; - - for (auto it1 = switch_groups.begin(); it1 != switch_groups.end(); ++it1) { - const auto &op_node1 = it1->first; - const auto &op_desc1 = op_node1->GetOpDesc(); - if (op_desc1->HasAttr(ATTR_NAME_CONTROL_FLOW_GROUP)) { + for (auto it = switch_groups.begin(); it != switch_groups.end(); ++it) { + const auto &op_node = it->first; + const auto &op_desc = op_node->GetOpDesc(); + if (op_desc->HasAttr(ATTR_NAME_CONTROL_FLOW_GROUP)) { continue; } - if (IsUnknownShapeTensor(op_desc1->GetOutputDesc(0))) { - int64_t group_index = op_desc1->GetId(); - GELOGI("Mark %s as unknown shape control flow, group index: %ld", op_desc1->GetName().c_str(), group_index); - MarkForceUnknownShape(op_node1, true, group_index); - for (const auto &n : it1->second) { - MarkForceUnknownShape(n, true, group_index); - } - - for (auto it2 = switch_groups.begin(); it2 != switch_groups.end(); ++it2) { - const auto &op_node2 = it2->first; - const auto &op_desc2 = op_node2->GetOpDesc(); - if (op_desc2->HasAttr(ATTR_NAME_CONTROL_FLOW_GROUP)) { - continue; - } - - if (std::any_of(it2->second.begin(), it2->second.end(), callback)) { - MarkForceUnknownShape(op_node2, true, group_index); - for (const auto &n : it2->second) { - MarkForceUnknownShape(n, true, group_index); - } - } - } + int64_t group_index = op_desc->GetId(); + SetControlFlowGroup(op_node, group_index); + for (const auto &n : it->second) { + SetControlFlowGroup(n, group_index); } } } diff --git a/ge/graph/passes/mark_graph_unknown_status_pass.cc b/ge/graph/passes/mark_graph_unknown_status_pass.cc index 2d7b179b..9e460fc7 100644 --- a/ge/graph/passes/mark_graph_unknown_status_pass.cc +++ b/ge/graph/passes/mark_graph_unknown_status_pass.cc @@ -40,6 +40,12 @@ Status MarkGraphUnknownStatusPass::Run(ComputeGraphPtr graph) { } } + const auto &node = graph->GetParentNode(); + if (!is_unknown_shape && node != nullptr && node->GetType() == PARTITIONEDCALL) { + GE_CHK_GRAPH_STATUS_RET(NodeUtils::GetNodeUnknownShapeStatus(*node, is_unknown_shape), + "[Get][ShapeStatus] of node[%s] failed!", node->GetName().c_str()); + } + for (const auto &node : graph->GetDirectNode()) { GELOGD("Set OwnerGraphIsUnknown attr to node[%s]", node->GetName().c_str()); (void)AttrUtils::SetBool(node->GetOpDesc(), kOwnerGraphIsUnknown, is_unknown_shape); diff --git a/ge/graph/passes/merge_to_stream_merge_pass.cc b/ge/graph/passes/merge_to_stream_merge_pass.cc index 0b383911..dbcff620 100644 --- a/ge/graph/passes/merge_to_stream_merge_pass.cc +++ b/ge/graph/passes/merge_to_stream_merge_pass.cc @@ -89,8 +89,7 @@ Status MergeToStreamMergePass::AddActiveNodes(const ComputeGraphPtr &graph, cons REPORT_INNER_ERROR("E19999", "Param node is nullptr, check invalid"); return FAILED, "[Check][Param] Param of pre node is nullptr."); int64_t group_index = -1; - bool force_unknown = AttrUtils::GetInt(node->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, group_index); - MarkForceUnknownShape(node, force_unknown, group_index); + (void)AttrUtils::GetInt(node->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, group_index); for (const InDataAnchorPtr &in_data_anchor : node->GetAllInDataAnchors()) { OutDataAnchorPtr peer_out_anchor = in_data_anchor->GetPeerOutAnchor(); GE_IF_BOOL_EXEC(peer_out_anchor == nullptr, continue); @@ -109,7 +108,7 @@ Status MergeToStreamMergePass::AddActiveNodes(const ComputeGraphPtr &graph, cons GELOGE(FAILED, "[Set][ActiveLabelList] for node %s failed.", active_node->GetName().c_str()); return FAILED; } - MarkForceUnknownShape(active_node, force_unknown, group_index); + SetControlFlowGroup(active_node, group_index); } return SUCCESS; diff --git a/ge/graph/passes/next_iteration_pass.cc b/ge/graph/passes/next_iteration_pass.cc index 67735b8b..fb8f8627 100644 --- a/ge/graph/passes/next_iteration_pass.cc +++ b/ge/graph/passes/next_iteration_pass.cc @@ -284,13 +284,21 @@ Status NextIterationPass::HandleWhileGroup(ComputeGraphPtr &graph) { /// @return void /// void NextIterationPass::HandleSwitchExitNodes(const LoopCondGroup &loop_group, int64_t group_index) { + std::string node_type; for (const auto &switch_node : loop_group.switch_nodes) { SetControlFlowGroup(switch_node, group_index); for (const auto &node : switch_node->GetOutDataNodes()) { - std::string node_type; (void)GetOriginalType(node, node_type); if (kExitOpTypes.count(node_type) > 0) { SetControlFlowGroup(node, group_index); + } else { + // For: Switch -> Cast -> Exit + for (const auto &n : node->GetOutDataNodes()) { + (void)GetOriginalType(n, node_type); + if (kExitOpTypes.count(node_type) > 0) { + SetControlFlowGroup(n, group_index); + } + } } } } diff --git a/ge/graph/passes/switch_to_stream_switch_pass.cc b/ge/graph/passes/switch_to_stream_switch_pass.cc index e7743130..e4ab0111 100644 --- a/ge/graph/passes/switch_to_stream_switch_pass.cc +++ b/ge/graph/passes/switch_to_stream_switch_pass.cc @@ -395,8 +395,8 @@ NodePtr SwitchToStreamSwitchPass::CreateStreamSwitchNode(const ComputeGraphPtr & peer_cond_anchor->GetOwnerNode()->GetName().c_str(), stream_switch->GetName().c_str()); int64_t group_index = -1; - bool force_unknown = AttrUtils::GetInt(switch_node->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, group_index); - MarkForceUnknownShape(stream_switch, force_unknown, group_index); + (void)AttrUtils::GetInt(switch_node->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, group_index); + SetControlFlowGroup(stream_switch, group_index); return stream_switch; } @@ -491,8 +491,8 @@ int64_t SwitchToStreamSwitchPass::GetGroupId(const NodePtr &node) { Status SwitchToStreamSwitchPass::CombineSwitchNode(const ComputeGraphPtr &graph) { for (auto iter = cond_node_map_.begin(); iter != cond_node_map_.end(); ++iter) { for (auto group_iter = iter->second.begin(); group_iter != iter->second.end(); ++group_iter) { - std::list false_switch_list = group_iter->second[SWITCH_FALSE_OUTPUT]; - std::list true_switch_list = group_iter->second[SWITCH_TRUE_OUTPUT]; + const std::list &false_switch_list = group_iter->second[SWITCH_FALSE_OUTPUT]; + const std::list &true_switch_list = group_iter->second[SWITCH_TRUE_OUTPUT]; std::set same_cond_switch; same_cond_switch.insert(false_switch_list.begin(), false_switch_list.end()); same_cond_switch.insert(true_switch_list.begin(), true_switch_list.end()); @@ -524,13 +524,13 @@ Status SwitchToStreamSwitchPass::CombineSwitchNode(const ComputeGraphPtr &graph) std::function callback = [&group_index](const NodePtr &n) { return AttrUtils::GetInt(n->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, group_index); }; - bool is_unknown_shape = std::any_of(same_cond_switch.begin(), same_cond_switch.end(), callback); - MarkForceUnknownShape(active_node, is_unknown_shape, group_index); + (void)std::any_of(same_cond_switch.begin(), same_cond_switch.end(), callback); + SetControlFlowGroup(active_node, group_index); const std::string &cond_group = cond_node->GetName(); for (uint32_t i = 0; i < SWITCH_OUTPUT_NUM; ++i) { bool true_branch_flag = (i == SWITCH_TRUE_OUTPUT); - std::list &switch_list = (true_branch_flag ? true_switch_list : false_switch_list); + const std::list &switch_list = (true_branch_flag ? true_switch_list : false_switch_list); GE_IF_BOOL_EXEC(switch_list.empty(), continue); // select first stream_switch @@ -559,7 +559,7 @@ Status SwitchToStreamSwitchPass::CombineSwitchNode(const ComputeGraphPtr &graph) "[Add][Edge] between %s and %s failed.", cast_node->GetName().c_str(), stream_switch->GetName().c_str()); - MarkForceUnknownShape(stream_switch, is_unknown_shape, group_index); + SetControlFlowGroup(stream_switch, group_index); for (const NodePtr &node : switch_list) { GE_IF_BOOL_EXEC(node != stream_switch, { GE_CHK_STATUS(GraphUtils::RemoveEdge(peer_cond_anchor, node->GetInDataAnchor(0)), diff --git a/ge/hybrid/executor/node_state.cc b/ge/hybrid/executor/node_state.cc index 313a2934..42e08811 100644 --- a/ge/hybrid/executor/node_state.cc +++ b/ge/hybrid/executor/node_state.cc @@ -19,8 +19,9 @@ #include "framework/common/debug/log.h" #include "graph/compute_graph.h" #include "graph/utils/tensor_utils.h" -#include "hybrid_execution_context.h" -#include "subgraph_context.h" +#include "hybrid/executor/hybrid_execution_context.h" +#include "hybrid/executor/subgraph_context.h" +#include "hybrid/node_executor/task_context.h" #define INC_ITERATION_COUNT(iteration) \ do { \ @@ -258,6 +259,8 @@ ShapeFuture::ShapeFuture(NodeState *src_node, NodeState::NodeState(const NodeItem &node_item, SubgraphContext *subgraph_context) : node_item_(&node_item), shape_inference_state_(node_item), subgraph_context_(subgraph_context) { this->op_desc_ = node_item.node->GetOpDesc(); + auto unique_task_context = TaskContext::Create(this, subgraph_context_); + task_context_ = std::shared_ptr(unique_task_context.release()); } Status NodeState::AwaitInputTensors(GraphExecutionContext &context) const { @@ -314,15 +317,53 @@ std::shared_ptr NodeState::GetTaskContext() { return task_context_; } +void NodeState::SavePersistTensor(int input_idx, const TensorValue &tensor) { + if (node_item_->root_data_.count(input_idx) > 0) { + GELOGD("[%s] Save Root input tensor: %d", GetName().c_str(), input_idx); + root_tensor_values_[input_idx] = tensor; + } + + if (node_item_->enter_data_.count(input_idx) > 0) { + GELOGD("[%s] Save Enter input tensor: %d", GetName().c_str(), input_idx); + root_tensor_values_[input_idx] = tensor; + } +} + +void NodeState::UpdatePersistTensor(int input_idx) { + const auto it = root_tensor_values_.find(input_idx); + if (it == root_tensor_values_.end()) { + GELOGW("[%s] Not found saved tensor: %d", GetName().c_str(), input_idx); + return; + } + + auto tensor = task_context_->MutableInput(input_idx); + if (tensor == nullptr) { + GELOGW("[%s] Not found input tensor: %d", GetName().c_str(), input_idx); + return; + } + + *tensor = it->second; + GELOGD("[%s] Update input tensor: %d", GetName().c_str(), input_idx); +} + void NodeState::ResetContext(uint64_t iteration) { switch_index_ = -1; subgraph_context_->ResetContext(node_item_->node); - if (iteration == 0) { - data_scheduled_ = static_cast(node_item_->root_data_.size()); - ctrl_scheduled_ = static_cast(node_item_->root_ctrl_.size()); - } else { - data_scheduled_ = static_cast(node_item_->root_data_.size() + node_item_->enter_data_.size()); - ctrl_scheduled_ = static_cast(node_item_->root_ctrl_.size() + node_item_->enter_ctrl_.size()); + auto unique_task_context = TaskContext::Create(this, subgraph_context_); + task_context_ = std::shared_ptr(unique_task_context.release()); + + data_scheduled_ = static_cast(node_item_->root_data_.size()); + ctrl_scheduled_ = static_cast(node_item_->root_ctrl_.size()); + for (auto item : node_item_->root_data_) { + UpdatePersistTensor(item.first); + } + + if (iteration > 0) { + data_scheduled_ += static_cast(node_item_->enter_data_.size()); + ctrl_scheduled_ += static_cast(node_item_->enter_ctrl_.size()); + for (auto item : node_item_->enter_data_) { + UpdatePersistTensor(item.first); + } } iteration_count_ = iteration; diff --git a/ge/hybrid/executor/node_state.h b/ge/hybrid/executor/node_state.h index 9dd29846..72e2b90e 100644 --- a/ge/hybrid/executor/node_state.h +++ b/ge/hybrid/executor/node_state.h @@ -129,6 +129,8 @@ struct NodeState { void RunStreamActive(); void RunNextIteration(); + void SavePersistTensor(int input_idx, const TensorValue &tensor); + Status NodeScheduled(const std::function &ready) const; void SetScheduleFuture(std::future &&future); @@ -187,6 +189,7 @@ struct NodeState { void SetCtrlSchedule(const NodeState &node_state, const std::function &ready); void ResetContext(uint64_t iteration); void ScheduleContext(const NodeState &node_state); + void UpdatePersistTensor(int input_idx); const NodeItem *node_item_ = nullptr; std::shared_ptr kernel_task_ = nullptr; @@ -199,6 +202,7 @@ struct NodeState { std::future schedule_future_; std::shared_ptr frame_state_; + std::map root_tensor_values_; uint64_t active_count_ = 0; uint64_t iteration_count_ = 0; uint32_t ctrl_scheduled_ = 0; diff --git a/ge/hybrid/executor/subgraph_context.cc b/ge/hybrid/executor/subgraph_context.cc index b6763ffd..41ada9af 100644 --- a/ge/hybrid/executor/subgraph_context.cc +++ b/ge/hybrid/executor/subgraph_context.cc @@ -19,7 +19,7 @@ namespace ge { namespace hybrid { -SubgraphContext::SubgraphContext(const GraphItem *graph_item, const GraphExecutionContext *execution_context) +SubgraphContext::SubgraphContext(const GraphItem *graph_item, GraphExecutionContext *execution_context) : graph_item_(graph_item), execution_context_(execution_context) { } diff --git a/ge/hybrid/executor/subgraph_context.h b/ge/hybrid/executor/subgraph_context.h index a43cd210..d11d00d7 100644 --- a/ge/hybrid/executor/subgraph_context.h +++ b/ge/hybrid/executor/subgraph_context.h @@ -30,7 +30,7 @@ namespace ge { namespace hybrid { class SubgraphContext { public: - explicit SubgraphContext(const GraphItem *graph_item, const GraphExecutionContext *execution_context); + explicit SubgraphContext(const GraphItem *graph_item, GraphExecutionContext *execution_context); ~SubgraphContext(); Status Init(); @@ -54,7 +54,7 @@ class SubgraphContext { FrameStatePtr GetOrCreateFrameState(const NodeItem &node_item); // no lock friend class TaskContext; const GraphItem *graph_item_; - const GraphExecutionContext *execution_context_; + GraphExecutionContext *execution_context_; mmRWLock_t rw_lock_; std::vector all_inputs_; std::vector all_outputs_; diff --git a/ge/hybrid/executor/subgraph_executor.cc b/ge/hybrid/executor/subgraph_executor.cc index 612e7565..7429acc5 100644 --- a/ge/hybrid/executor/subgraph_executor.cc +++ b/ge/hybrid/executor/subgraph_executor.cc @@ -175,16 +175,12 @@ Status SubgraphExecutor::ExecuteAsyncForKnownShape(const std::vectorSetKernelTask(node_item->kernel_task); - known_shape_task_context_ = TaskContext::Create(node_state.get(), context_, subgraph_context_.get()); - GE_CHECK_NOTNULL(known_shape_task_context_); - node_state->SetTaskContext(known_shape_task_context_); - std::function callback; GE_CHK_STATUS_RET_NOLOG(InitCallback(node_state.get(), callback)); - HYBRID_CHK_STATUS_RET(ExecutionEngine::ExecuteAsync(*node_state, known_shape_task_context_, *context_, callback), + HYBRID_CHK_STATUS_RET(ExecutionEngine::ExecuteAsync(*node_state, node_state->GetTaskContext(), *context_, callback), "[%s] Failed to execute node [%s] for known subgraph.", graph_item_->GetName().c_str(), - known_shape_task_context_->GetNodeName()); + node_state->GetName().c_str()); GELOGD("[%s] Done execute non-dynamic subgraph successfully.", graph_item_->GetName().c_str()); return SUCCESS; @@ -271,16 +267,12 @@ Status SubgraphExecutor::PrepareNode(const NodeItem &node_item, int group) { } else { node_state->SetKernelTask(node_item.kernel_task); } - auto unique_task_context = TaskContext::Create(node_state.get(), context_, subgraph_context_.get()); - GE_CHECK_NOTNULL(unique_task_context); const auto &task = node_state->GetKernelTask(); if (task == nullptr) { GELOGE(INTERNAL_ERROR, "[Get][KernelTask] failed for[%s], NodeTask is null.", node_state->GetName().c_str()); REPORT_CALL_ERROR("E19999", "GetKernelTask failed for %s, nodetask is null.", node_state->GetName().c_str()); return INTERNAL_ERROR; } - auto shared_task_context = std::shared_ptr(unique_task_context.release()); - node_state->SetTaskContext(shared_task_context); GE_CHK_STATUS_RET_NOLOG(NodeEnqueue(p_node_state)); return AfterPrepared(p_node_state); } @@ -480,19 +472,15 @@ Status SubgraphExecutor::PrepareForExecution(GraphExecutionContext *ctx, NodeSta } else { node_state.SetKernelTask(node_item.kernel_task); } - auto unique_task_context = TaskContext::Create(&node_state, context_, subgraph_context_.get()); - GE_CHECK_NOTNULL(unique_task_context); const auto &task = node_state.GetKernelTask(); if (task == nullptr) { GELOGE(INTERNAL_ERROR, "[Invoke][GetKernelTask] failed for[%s], NodeTask is null.", node_state.GetName().c_str()); REPORT_CALL_ERROR("E19999", "invoke GetKernelTask failed for %s, NodeTask is null.", node_state.GetName().c_str()); return INTERNAL_ERROR; } - auto shared_task_context = std::shared_ptr(unique_task_context.release()); - node_state.SetTaskContext(shared_task_context); GE_CHK_RT_RET(rtCtxSetCurrent(ctx->rt_context)); RECORD_COMPILE_EVENT(ctx, node_item.NodeName().c_str(), "[UpdateTilingData] start"); - GE_CHK_STATUS_RET_NOLOG(task->UpdateTilingData(*shared_task_context)); // update op_desc before alloc ws + GE_CHK_STATUS_RET_NOLOG(task->UpdateTilingData(*node_state.GetTaskContext())); // update op_desc before alloc ws RECORD_COMPILE_EVENT(ctx, node_item.NodeName().c_str(), "[UpdateTilingData] end"); return SUCCESS; } diff --git a/ge/hybrid/executor/subgraph_executor.h b/ge/hybrid/executor/subgraph_executor.h index 758bf426..e4c0debe 100644 --- a/ge/hybrid/executor/subgraph_executor.h +++ b/ge/hybrid/executor/subgraph_executor.h @@ -125,7 +125,6 @@ class SubgraphExecutor { ThreadPool pre_run_pool_; BlockingQueue ready_queue_; std::unique_ptr shape_inference_engine_; - std::shared_ptr known_shape_task_context_; std::mutex mu_; // Guard for prepare_queues_. std::map> prepare_queues_; diff --git a/ge/hybrid/model/node_item.cc b/ge/hybrid/model/node_item.cc index b339e630..cef06fc6 100644 --- a/ge/hybrid/model/node_item.cc +++ b/ge/hybrid/model/node_item.cc @@ -398,12 +398,11 @@ void NodeItem::SetDataSend(NodeItem *node_item, int anchor_index) { data_send_.emplace(node_item); node_item->data_recv_[this] = anchor_index; if (is_root_node_) { - node_item->root_data_.emplace(this); + node_item->root_data_[anchor_index] = this; } // If Enter feed Not Merge, take as root Node. if (IsEnterOp() && (node_item->node_type != STREAMMERGE)) { - node_item->enter_data_.emplace(this); - node_item->enter_inside_.emplace(anchor_index); + node_item->enter_data_[anchor_index] = this; } GELOGI("Node[%s] will control node[%s]", NodeName().c_str(), node_item->NodeName().c_str()); } diff --git a/ge/hybrid/model/node_item.h b/ge/hybrid/model/node_item.h index 8de15952..ec66f094 100644 --- a/ge/hybrid/model/node_item.h +++ b/ge/hybrid/model/node_item.h @@ -148,15 +148,14 @@ struct NodeItem { int64_t frame_index_ = -1; int64_t parent_frame_ = -1; std::set root_ctrl_; // Recv ctrl from root node - std::set root_data_; // Recv data from root node + std::map root_data_; // Recv data from root node std::set enter_ctrl_; // Recv ctrl from Enter node - std::set enter_data_; // Recv data from Enter node + std::map enter_data_; // Recv data from Enter node std::set data_send_; // Send data notify to std::map data_recv_; // Recv data notify from std::set ctrl_send_; // Send ctrl notify to std::set ctrl_recv_; // Recv ctrl notify from std::vector> switch_groups_; // Send ctrl notify to - std::set enter_inside_; // Enter feed loop inside Node, Not cross Merge. std::shared_ptr kernel_task; std::unique_ptr fused_subgraph; diff --git a/ge/hybrid/node_executor/task_context.cc b/ge/hybrid/node_executor/task_context.cc index 14eb1222..fe580c1e 100644 --- a/ge/hybrid/node_executor/task_context.cc +++ b/ge/hybrid/node_executor/task_context.cc @@ -52,9 +52,7 @@ void TaskContext::ReleaseWorkspace() { } } -std::unique_ptr TaskContext::Create(NodeState *node_state, - GraphExecutionContext *execution_context, - SubgraphContext *subgraph_context) { +std::unique_ptr TaskContext::Create(NodeState *node_state, SubgraphContext *subgraph_context) { const NodeItem &node_item = *node_state->GetNodeItem(); GELOGI("[%s] To create task context, input start = %d, num_inputs = %d, output start = %d, num_outputs = %d.", node_item.NodeName().c_str(), @@ -75,7 +73,7 @@ std::unique_ptr TaskContext::Create(NodeState *node_state, } auto task_context = std::unique_ptr( - new(std::nothrow)TaskContext(execution_context, node_state, subgraph_context)); + new(std::nothrow)TaskContext(subgraph_context->execution_context_, node_state, subgraph_context)); if (task_context == nullptr) { REPORT_CALL_ERROR("E19999", "Create TaskContext failed for [%s].", node_item.NodeName().c_str()); GELOGE(MEMALLOC_FAILED, "[Create][TaskContext] failed for [%s].", node_item.NodeName().c_str()); @@ -85,7 +83,7 @@ std::unique_ptr TaskContext::Create(NodeState *node_state, task_context->node_item_ = &node_item; task_context->inputs_start_ = subgraph_context->all_inputs_.data() + node_item.input_start; task_context->outputs_start_ = subgraph_context->all_outputs_.data() + node_item.output_start; - task_context->iteration_ = execution_context->iteration; + task_context->iteration_ = subgraph_context->execution_context_->iteration; return task_context; } @@ -460,6 +458,10 @@ Status TaskContext::PropagateOutputs() { subgraph_context_->all_inputs_[input_offset].SetName( node_item_->NodeName() + "_in_" + std::to_string(dst_input_idx)); } + + auto dst_node_state = subgraph_context_->GetOrCreateNodeState(dst_node_item); + GE_CHECK_NOTNULL(dst_node_state); + dst_node_state->SavePersistTensor(dst_input_idx, *tensor); } } (void)guard; @@ -489,11 +491,6 @@ void TaskContext::ReleaseInputsAndOutputs() { } void TaskContext::ReleaseInput(int index) { - if (node_item_->enter_inside_.count(index) > 0) { - GELOGD("[%s] Tensor of input[%d] is enter, keep it", GetNodeName(), index); - return; - } - auto input_tensor = MutableInput(index); if (input_tensor != nullptr) { input_tensor->Destroy(); diff --git a/ge/hybrid/node_executor/task_context.h b/ge/hybrid/node_executor/task_context.h index ba4c62e6..c96e194e 100644 --- a/ge/hybrid/node_executor/task_context.h +++ b/ge/hybrid/node_executor/task_context.h @@ -36,9 +36,7 @@ class SubgraphContext; class TaskContext { public: - static std::unique_ptr Create(NodeState *node_state, - GraphExecutionContext *execution_context, - SubgraphContext *subgraph_context); + static std::unique_ptr Create(NodeState *node_state, SubgraphContext *subgraph_context); ~TaskContext(); diff --git a/tests/ut/ge/graph/partition/dynamic_shape_partition_unittest.cc b/tests/ut/ge/graph/partition/dynamic_shape_partition_unittest.cc index ec1caebd..da1abd0f 100644 --- a/tests/ut/ge/graph/partition/dynamic_shape_partition_unittest.cc +++ b/tests/ut/ge/graph/partition/dynamic_shape_partition_unittest.cc @@ -24,6 +24,7 @@ #include "inc/framework/common/types.h" #include "utils/graph_utils.h" #include "graph/debug/ge_attr_define.h" +#include "graph/common/omg_util.h" namespace ge { namespace { @@ -38,33 +39,33 @@ GeTensorDescPtr CreateTensorDesc(std::initializer_list shape, Format fo } class NodeBuilder { - public: - NodeBuilder(const std::string &name, const std::string &type) { op_desc_ = std::make_shared(name, type); } - - NodeBuilder &AddInputDesc(std::initializer_list shape = {1, 1, 224, 224}, Format format = FORMAT_NCHW, - DataType data_type = DT_FLOAT) { - op_desc_->AddInputDesc(CreateTensorDesc(shape, format, data_type)->Clone()); - return *this; - } - - NodeBuilder &AddOutputDesc(std::initializer_list shape = {1, 1, 224, 224}, Format format = FORMAT_NCHW, - DataType data_type = DT_FLOAT) { - op_desc_->AddOutputDesc(CreateTensorDesc(shape, format, data_type)->Clone()); - return *this; - } - - NodeBuilder &AddOutputDesc(GeTensorDescPtr tensor_desc) { - op_desc_->AddOutputDesc(tensor_desc->Clone()); - return *this; - } - - NodePtr Build(const ComputeGraphPtr &graph) { - NodePtr node = graph->AddNode(op_desc_); - return node; - } - - private: - OpDescPtr op_desc_; + public: + NodeBuilder(const std::string &name, const std::string &type) { op_desc_ = std::make_shared(name, type); } + + NodeBuilder &AddInputDesc(std::initializer_list shape = {1, 1, 224, 224}, Format format = FORMAT_NCHW, + DataType data_type = DT_FLOAT) { + op_desc_->AddInputDesc(CreateTensorDesc(shape, format, data_type)->Clone()); + return *this; + } + + NodeBuilder &AddOutputDesc(std::initializer_list shape = {1, 1, 224, 224}, Format format = FORMAT_NCHW, + DataType data_type = DT_FLOAT) { + op_desc_->AddOutputDesc(CreateTensorDesc(shape, format, data_type)->Clone()); + return *this; + } + + NodeBuilder &AddOutputDesc(GeTensorDescPtr tensor_desc) { + op_desc_->AddOutputDesc(tensor_desc->Clone()); + return *this; + } + + NodePtr Build(const ComputeGraphPtr &graph) { + NodePtr node = graph->AddNode(op_desc_); + return node; + } + + private: + OpDescPtr op_desc_; }; } // namespace @@ -93,28 +94,137 @@ TEST_F(UtestDynamicShapePartition, single_op_scene_success) { EXPECT_EQ(partitioner.Partition(), SUCCESS); } +/******************************************************************************* + * | + * Merge1 + * Active / \ Active + * / \. + * / \. + * Merge2 \. + * Active/ \Active \. + * / \ \. + * Add Sub Relu + * | | | + * | | | + * Switch_f2 Switch_t2 | + * \ / | + * \ / | + * Less2 | + * | | + * | | + * Switch_f Switch_t + * | \ / | + * | Active | + * | | | + * | Less1 | + * | / \ | + * | / \ | + * Data Data + ******************************************************************************/ TEST_F(UtestDynamicShapePartition, merge_control_flow_group) { ComputeGraphPtr graph = std::make_shared("default"); AttrUtils::SetStr(*graph, ATTR_NAME_SESSION_GRAPH_ID, "session_graph_id"); - NodePtr data1 = NodeBuilder("data1", DATA).AddInputDesc({1}).AddOutputDesc({1}).Build(graph); - NodePtr data2 = NodeBuilder("data2", DATA).AddInputDesc({1}).AddOutputDesc({1}).Build(graph); - NodePtr merge = NodeBuilder("node2", MERGE).AddInputDesc({1}).AddInputDesc({1}) - .AddOutputDesc({1}).AddOutputDesc({}).Build(graph); - - GraphUtils::AddEdge(data1->GetOutDataAnchor(0), merge->GetInDataAnchor(0)); - GraphUtils::AddEdge(data2->GetOutDataAnchor(0), merge->GetInDataAnchor(1)); - - (void)AttrUtils::SetBool(data1->GetOpDesc(), ATTR_NAME_FORCE_UNKNOWN_SHAPE, true); - (void)AttrUtils::SetInt(data1->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, 3); - (void)AttrUtils::SetBool(data2->GetOpDesc(), ATTR_NAME_FORCE_UNKNOWN_SHAPE, true); - (void)AttrUtils::SetInt(data2->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, 3); - (void)AttrUtils::SetBool(merge->GetOpDesc(), ATTR_NAME_FORCE_UNKNOWN_SHAPE, true); - (void)AttrUtils::SetInt(merge->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, 3); + auto data1 = NodeBuilder("data1", DATA).AddInputDesc({1}).AddOutputDesc({1}).Build(graph); + auto data2 = NodeBuilder("data2", DATA).AddInputDesc({1}).AddOutputDesc({1}).Build(graph); + + auto less1 = NodeBuilder("less1", LESS).AddInputDesc({1}).AddInputDesc({1}).AddOutputDesc({1}).Build(graph); + auto active1 = NodeBuilder("active1", STREAMACTIVE).Build(graph); + auto switch_t = NodeBuilder("switch_t", STREAMSWITCH).AddInputDesc({1}).AddInputDesc({1}).Build(graph); + auto switch_f = NodeBuilder("switch_f", STREAMSWITCH).AddInputDesc({1}).AddInputDesc({1}).Build(graph); + auto const_01 = NodeBuilder("const_01", CONSTANT).AddOutputDesc({1}).Build(graph); + auto const_11 = NodeBuilder("const_11", CONSTANT).AddOutputDesc({1}).Build(graph); + + + auto less2 = NodeBuilder("less2", LESS).AddInputDesc({1}).AddInputDesc({1}).AddOutputDesc({1}).Build(graph); + auto active2 = NodeBuilder("active2", STREAMACTIVE).Build(graph); + auto switch_t2 = NodeBuilder("switch_t2", STREAMSWITCH).AddInputDesc({1}).AddInputDesc({1}).Build(graph); + auto switch_f2 = NodeBuilder("switch_f2", STREAMSWITCH).AddInputDesc({1}).AddInputDesc({1}).Build(graph); + auto const_02 = NodeBuilder("const_02", CONSTANT).AddOutputDesc({1}).Build(graph); + auto const_12 = NodeBuilder("const_12", CONSTANT).AddOutputDesc({1}).Build(graph); + + auto add2 = NodeBuilder("add2", ADD).AddInputDesc({1}).AddOutputDesc({1}).Build(graph); + auto sub2 = NodeBuilder("sub2", SUB).AddInputDesc({1}).AddOutputDesc({1}).Build(graph); + auto merge2 = NodeBuilder("merge2", STREAMMERGE).AddInputDesc({1}).AddInputDesc({1}).AddOutputDesc({1}).Build(graph); + auto active_f2 = NodeBuilder("active_f2", STREAMACTIVE).Build(graph); + auto active_t2 = NodeBuilder("active_t2", STREAMACTIVE).Build(graph); + + auto relu1 = NodeBuilder("relu1", RELU).AddInputDesc({1}).AddOutputDesc({1}).Build(graph); + auto merge1 = NodeBuilder("merge1", STREAMMERGE).AddInputDesc({1}).AddInputDesc({1}).AddOutputDesc({1}).Build(graph); + auto active_f1 = NodeBuilder("active_f1", STREAMACTIVE).Build(graph); + auto active_t1 = NodeBuilder("active_t1", STREAMACTIVE).Build(graph); + + auto output1 = NodeBuilder("noutput1", NETOUTPUT).AddInputDesc({1}).Build(graph); + + GraphUtils::AddEdge(data1->GetOutDataAnchor(0), less1->GetInDataAnchor(0)); + GraphUtils::AddEdge(data2->GetOutDataAnchor(0), less1->GetInDataAnchor(1)); + GraphUtils::AddEdge(less1->GetOutDataAnchor(0), switch_t->GetInDataAnchor(0)); + GraphUtils::AddEdge(less1->GetOutDataAnchor(0), switch_f->GetInDataAnchor(0)); + GraphUtils::AddEdge(const_01->GetOutDataAnchor(0), switch_t->GetInDataAnchor(1)); + GraphUtils::AddEdge(const_11->GetOutDataAnchor(0), switch_f->GetInDataAnchor(1)); + GraphUtils::AddEdge(less1->GetOutControlAnchor(), active1->GetInControlAnchor()); + GraphUtils::AddEdge(active1->GetOutControlAnchor(), switch_t->GetInControlAnchor()); + GraphUtils::AddEdge(active1->GetOutControlAnchor(), switch_f->GetInControlAnchor()); + + + GraphUtils::AddEdge(data1->GetOutDataAnchor(0), less2->GetInDataAnchor(0)); + GraphUtils::AddEdge(less1->GetOutDataAnchor(0), less2->GetInDataAnchor(1)); + GraphUtils::AddEdge(less2->GetOutDataAnchor(0), switch_t2->GetInDataAnchor(0)); + GraphUtils::AddEdge(less2->GetOutDataAnchor(0), switch_f2->GetInDataAnchor(0)); + GraphUtils::AddEdge(const_02->GetOutDataAnchor(0), switch_t2->GetInDataAnchor(1)); + GraphUtils::AddEdge(const_12->GetOutDataAnchor(0), switch_f2->GetInDataAnchor(1)); + GraphUtils::AddEdge(less2->GetOutControlAnchor(), active2->GetInControlAnchor()); + GraphUtils::AddEdge(active2->GetOutControlAnchor(), switch_t2->GetInControlAnchor()); + GraphUtils::AddEdge(active2->GetOutControlAnchor(), switch_f2->GetInControlAnchor()); + + + GraphUtils::AddEdge(switch_f2->GetOutControlAnchor(), add2->GetInControlAnchor()); + GraphUtils::AddEdge(less2->GetOutDataAnchor(0), add2->GetInDataAnchor(0)); + GraphUtils::AddEdge(add2->GetOutDataAnchor(0), merge2->GetInDataAnchor(0)); + GraphUtils::AddEdge(add2->GetOutControlAnchor(), active_f2->GetInControlAnchor()); + GraphUtils::AddEdge(active_f2->GetOutControlAnchor(), merge2->GetInControlAnchor()); + + GraphUtils::AddEdge(switch_t2->GetOutControlAnchor(), sub2->GetInControlAnchor()); + GraphUtils::AddEdge(less2->GetOutDataAnchor(0), sub2->GetInDataAnchor(0)); + GraphUtils::AddEdge(sub2->GetOutDataAnchor(0), merge2->GetInDataAnchor(1)); + GraphUtils::AddEdge(sub2->GetOutControlAnchor(), active_t2->GetInControlAnchor()); + GraphUtils::AddEdge(active_t2->GetOutControlAnchor(), merge2->GetInControlAnchor()); + + GraphUtils::AddEdge(switch_t->GetOutControlAnchor(), less2->GetInControlAnchor()); + GraphUtils::AddEdge(switch_f->GetOutControlAnchor(), relu1->GetInControlAnchor()); + + + GraphUtils::AddEdge(merge2->GetOutDataAnchor(0), merge1->GetInDataAnchor(0)); + GraphUtils::AddEdge(merge2->GetOutControlAnchor(), active_f1->GetInControlAnchor()); + GraphUtils::AddEdge(active_f1->GetOutControlAnchor(), merge1->GetInControlAnchor()); + + GraphUtils::AddEdge(data2->GetOutDataAnchor(0), relu1->GetInDataAnchor(1)); + GraphUtils::AddEdge(relu1->GetOutDataAnchor(0), merge1->GetInDataAnchor(0)); + GraphUtils::AddEdge(relu1->GetOutControlAnchor(), active_t1->GetInControlAnchor()); + GraphUtils::AddEdge(active_t1->GetOutControlAnchor(), merge1->GetInControlAnchor()); + + GraphUtils::AddEdge(merge1->GetOutDataAnchor(0), output1->GetInDataAnchor(0)); + + AttrUtils::SetBool(merge2->GetOpDesc(), ATTR_NAME_FORCE_UNKNOWN_SHAPE, true); + EXPECT_EQ(graph->TopologicalSorting(), GRAPH_SUCCESS); + + SetControlFlowGroup(merge2, merge2->GetOpDesc()->GetId()); + SetControlFlowGroup(switch_f2, merge2->GetOpDesc()->GetId()); + SetControlFlowGroup(switch_t2, merge2->GetOpDesc()->GetId()); + SetControlFlowGroup(active2, merge2->GetOpDesc()->GetId()); + SetControlFlowGroup(active_t2, merge2->GetOpDesc()->GetId()); + SetControlFlowGroup(active_f2, merge2->GetOpDesc()->GetId()); + + SetControlFlowGroup(merge1, merge1->GetOpDesc()->GetId()); + SetControlFlowGroup(switch_f, merge1->GetOpDesc()->GetId()); + SetControlFlowGroup(switch_t, merge1->GetOpDesc()->GetId()); + SetControlFlowGroup(active1, merge1->GetOpDesc()->GetId()); + SetControlFlowGroup(active_f1, merge1->GetOpDesc()->GetId()); + SetControlFlowGroup(active_t1, merge1->GetOpDesc()->GetId()); EXPECT_EQ(graph->impl_->sub_graph_.size(), 0); DynamicShapePartitioner partitioner(graph); EXPECT_EQ(partitioner.Partition(), SUCCESS); - EXPECT_EQ(graph->impl_->sub_graph_.size(), 1); + EXPECT_EQ(graph->impl_->sub_graph_.size(), 3); // input less1 uknown } } // namespace ge \ No newline at end of file diff --git a/tests/ut/ge/hybrid/executor/worker/execution_engine_unittest.cc b/tests/ut/ge/hybrid/executor/worker/execution_engine_unittest.cc index 07022230..e0ccbfa5 100644 --- a/tests/ut/ge/hybrid/executor/worker/execution_engine_unittest.cc +++ b/tests/ut/ge/hybrid/executor/worker/execution_engine_unittest.cc @@ -84,9 +84,6 @@ TEST_F(UtestExecutionEngine, ExecuteAsync_without_kernel_task) { SubgraphContext subgraph_context(nullptr, &execution_context); NodeState node_state(*node_item, &subgraph_context); - auto task_context = TaskContext::Create(&node_state, &execution_context, &subgraph_context); - auto shared_task_context = std::shared_ptr(task_context.release()); - node_state.SetTaskContext(shared_task_context); ExecutionEngine execution_engine; ASSERT_TRUE(node_state.GetTaskContext() != nullptr); @@ -119,14 +116,11 @@ TEST_F(UtestExecutionEngine, ExecuteAsync_without_callback_and_kernel_task) { SubgraphContext subgraph_context(nullptr, &execution_context); NodeState node_state(*node_item, &subgraph_context); - auto task_context = TaskContext::Create(&node_state, &execution_context, &subgraph_context); uint32_t task_id = 0; uint32_t stream_id = 1; std::string task_type = "rts"; uint32_t block_dim = 0; - task_context->SaveProfilingTaskDescInfo(task_id, stream_id, task_type, block_dim); - auto shared_task_context = std::shared_ptr(task_context.release()); - node_state.SetTaskContext(shared_task_context); + node_state.GetTaskContext()->SaveProfilingTaskDescInfo(task_id, stream_id, task_type, block_dim); ExecutionEngine execution_engine; ASSERT_TRUE(node_state.GetTaskContext() != nullptr); diff --git a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc index f6c75d50..d634ed14 100644 --- a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc +++ b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc @@ -161,10 +161,8 @@ TEST_F(UtestGeHybrid, task_update_tiling_info) { GraphExecutionContext execution_context; SubgraphContext subgraph_context(nullptr, &execution_context); NodeState node_state(*node_item, &subgraph_context); - auto task_context = TaskContext::Create(&node_state, &execution_context, &subgraph_context); - ASSERT_TRUE(task_context != nullptr); ASSERT_EQ(aicore_task->InitTilingInfo(*op_desc), SUCCESS); - ASSERT_EQ(aicore_task->UpdateTilingInfo(*task_context), SUCCESS); + ASSERT_EQ(aicore_task->UpdateTilingInfo(*node_state.GetTaskContext()), SUCCESS); } TEST_F(UtestGeHybrid, index_taskdefs_failed) { @@ -482,7 +480,7 @@ TEST_F(UtestGeHybrid, TestTaskContext) { subgraph_context.all_outputs_.resize(1); NodeState node_state(*node_item, &subgraph_context); - auto task_context = TaskContext::Create(&node_state, &execution_context, &subgraph_context); + auto task_context = node_state.GetTaskContext(); ASSERT_TRUE(task_context != nullptr); auto desc = task_context->MutableInputDesc(2); ASSERT_TRUE(desc == nullptr); @@ -527,7 +525,7 @@ TEST_F(UtestGeHybrid, hybrid_model_executor_update_args) { subgraph_context.all_outputs_.resize(1); NodeState node_state(*node_item, &subgraph_context); - auto task_context = TaskContext::Create(&node_state, &execution_context, &subgraph_context); + auto task_context = node_state.GetTaskContext(); int32_t buffer[1]; aicore_task->tiling_buffer_ = TensorBuffer::Create(buffer, sizeof(buffer)); diff --git a/tests/ut/ge/hybrid/node_executor/ge_local/ge_local_node_executor_unittest.cc b/tests/ut/ge/hybrid/node_executor/ge_local/ge_local_node_executor_unittest.cc index a7a407a4..e4d211f9 100644 --- a/tests/ut/ge/hybrid/node_executor/ge_local/ge_local_node_executor_unittest.cc +++ b/tests/ut/ge/hybrid/node_executor/ge_local/ge_local_node_executor_unittest.cc @@ -97,11 +97,6 @@ TEST_F(UtestGeLocalNodeExecutor, test_no_op_task) { auto node_state = subgraph_context.GetOrCreateNodeState(node_item); ASSERT_NE(node_state, nullptr); - auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context); - ASSERT_NE(unique_task_context, nullptr); - auto shared_task_context = std::shared_ptr(unique_task_context.release()); - node_state->SetTaskContext(shared_task_context); - NodeTaskPtr task = nullptr; GeLocalNodeExecutor node_executor; ASSERT_EQ(node_executor.LoadTask(hybrid_model, node, task), SUCCESS); diff --git a/tests/ut/ge/hybrid/node_executor/rts/rts_node_task_unittest.cc b/tests/ut/ge/hybrid/node_executor/rts/rts_node_task_unittest.cc index 44b2f37f..109e5192 100644 --- a/tests/ut/ge/hybrid/node_executor/rts/rts_node_task_unittest.cc +++ b/tests/ut/ge/hybrid/node_executor/rts/rts_node_task_unittest.cc @@ -96,11 +96,6 @@ TEST_F(UtestRtsNodeTask, test_stream_switch_task) { auto node_state = subgraph_context.GetOrCreateNodeState(node_item); ASSERT_NE(node_state, nullptr); - auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context); - ASSERT_NE(unique_task_context, nullptr); - auto shared_task_context = std::shared_ptr(unique_task_context.release()); - node_state->SetTaskContext(shared_task_context); - uint64_t value_0 = 110; uint64_t value_1 = 120; TensorValue in_tensor0(&value_0, sizeof(value_0)); @@ -153,11 +148,6 @@ TEST_F(UtestRtsNodeTask, test_stream_active_task) { auto node_state = subgraph_context.GetOrCreateNodeState(node_item); ASSERT_NE(node_state, nullptr); - auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context); - ASSERT_NE(unique_task_context, nullptr); - auto shared_task_context = std::shared_ptr(unique_task_context.release()); - node_state->SetTaskContext(shared_task_context); - NodeTaskPtr task = nullptr; RtsNodeExecutor node_executor; ASSERT_EQ(node_executor.LoadTask(hybrid_model, node, task), SUCCESS); @@ -203,11 +193,6 @@ TEST_F(UtestRtsNodeTask, test_stream_merge_task) { auto node_state = subgraph_context.GetOrCreateNodeState(node_item); ASSERT_NE(node_state, nullptr); - auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context); - ASSERT_NE(unique_task_context, nullptr); - auto shared_task_context = std::shared_ptr(unique_task_context.release()); - node_state->SetTaskContext(shared_task_context); - uint64_t value_0 = 110; TensorValue in_tensor0(&value_0, sizeof(value_0)); subgraph_context.SetInput(*node_item, 0, in_tensor0); @@ -271,11 +256,6 @@ TEST_F(UtestRtsNodeTask, test_memcpy_async_task) { auto node_state = subgraph_context.GetOrCreateNodeState(node_item); ASSERT_NE(node_state, nullptr); - auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context); - ASSERT_NE(unique_task_context, nullptr); - auto shared_task_context = std::shared_ptr(unique_task_context.release()); - node_state->SetTaskContext(shared_task_context); - uint64_t value_0 = 110; TensorValue in_tensor0(&value_0, sizeof(value_0)); subgraph_context.SetInput(*node_item, 0, in_tensor0); @@ -328,11 +308,6 @@ TEST_F(UtestRtsNodeTask, test_pass_through_task) { auto node_state = subgraph_context.GetOrCreateNodeState(node_item); ASSERT_NE(node_state, nullptr); - auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context); - ASSERT_NE(unique_task_context, nullptr); - auto shared_task_context = std::shared_ptr(unique_task_context.release()); - node_state->SetTaskContext(shared_task_context); - uint64_t value_0 = 110; TensorValue in_tensor0(&value_0, sizeof(value_0)); subgraph_context.SetInput(*node_item, 0, in_tensor0); @@ -384,11 +359,6 @@ TEST_F(UtestRtsNodeTask, test_unsupport_label_set) { auto node_state = subgraph_context.GetOrCreateNodeState(node_item); ASSERT_NE(node_state, nullptr); - auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context); - ASSERT_NE(unique_task_context, nullptr); - auto shared_task_context = std::shared_ptr(unique_task_context.release()); - node_state->SetTaskContext(shared_task_context); - NodeTaskPtr task = nullptr; RtsNodeExecutor node_executor; ASSERT_EQ(node_executor.LoadTask(hybrid_model, node, task), SUCCESS); @@ -428,11 +398,6 @@ TEST_F(UtestRtsNodeTask, test_unsupport_label_goto) { auto node_state = subgraph_context.GetOrCreateNodeState(node_item); ASSERT_NE(node_state, nullptr); - auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context); - ASSERT_NE(unique_task_context, nullptr); - auto shared_task_context = std::shared_ptr(unique_task_context.release()); - node_state->SetTaskContext(shared_task_context); - NodeTaskPtr task = nullptr; RtsNodeExecutor node_executor; ASSERT_EQ(node_executor.LoadTask(hybrid_model, node, task), SUCCESS); @@ -472,11 +437,6 @@ TEST_F(UtestRtsNodeTask, test_unsupport_label_switch) { auto node_state = subgraph_context.GetOrCreateNodeState(node_item); ASSERT_NE(node_state, nullptr); - auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context); - ASSERT_NE(unique_task_context, nullptr); - auto shared_task_context = std::shared_ptr(unique_task_context.release()); - node_state->SetTaskContext(shared_task_context); - NodeTaskPtr task = nullptr; RtsNodeExecutor node_executor; ASSERT_EQ(node_executor.LoadTask(hybrid_model, node, task), SUCCESS); From 8852766766ec531f47227d237706b04fc53dff8d Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Sat, 12 Jun 2021 13:16:42 +0800 Subject: [PATCH 022/226] Fix hccl_node_executor_unittest --- .../hccl/hccl_node_executor_unittest.cc | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/tests/ut/ge/hybrid/node_executor/hccl/hccl_node_executor_unittest.cc b/tests/ut/ge/hybrid/node_executor/hccl/hccl_node_executor_unittest.cc index afaf067e..8e6630f6 100644 --- a/tests/ut/ge/hybrid/node_executor/hccl/hccl_node_executor_unittest.cc +++ b/tests/ut/ge/hybrid/node_executor/hccl/hccl_node_executor_unittest.cc @@ -94,18 +94,17 @@ TEST_F(UtestHcclNodeExecutor, test_rdmatask_extract_tensor) { tensor.SetData(data); ctx->SetTensor(1, 0, tensor.Clone()); - auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context); vector addr_infos; shared_ptr task = MakeShared(); task->remote_index_ = {1, 0}; - ASSERT_EQ(task->ExtractTensor(*unique_task_context, addr_infos), PARAM_INVALID); + ASSERT_EQ(task->ExtractTensor(*node_state->GetTaskContext(), addr_infos), PARAM_INVALID); Shape s2({1}); TensorDesc tensor_desc2(s2); Tensor tensor2(tensor_desc2); ctx->SetTensor(1, 0, tensor2.Clone()); - task->ExtractTensor(*unique_task_context, addr_infos); - ASSERT_EQ(task->ExtractTensor(*unique_task_context, addr_infos), PARAM_INVALID); + task->ExtractTensor(*node_state->GetTaskContext(), addr_infos); + ASSERT_EQ(task->ExtractTensor(*node_state->GetTaskContext(), addr_infos), PARAM_INVALID); RuntimeInferenceContext::DestroyContext(std::to_string(graph_context.context_id)); } @@ -140,11 +139,6 @@ TEST_F(UtestHcclNodeExecutor, gatheralltoallv_execute) { auto node_state = subgraph_context.GetOrCreateNodeState(node_item); ASSERT_NE(node_state, nullptr); - auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context); - ASSERT_NE(unique_task_context, nullptr); - auto shared_task_context = std::shared_ptr(unique_task_context.release()); - node_state->SetTaskContext(shared_task_context); - for (int i=0; i<4; ++i) { uint64_t value_0 = 512; TensorValue in_tensor0(&value_0, sizeof(value_0)); @@ -206,11 +200,6 @@ TEST_F(UtestHcclNodeExecutor, alltoallv_execute) { auto node_state = subgraph_context.GetOrCreateNodeState(node_item); ASSERT_NE(node_state, nullptr); - auto unique_task_context = TaskContext::Create(node_state.get(), &graph_context, &subgraph_context); - ASSERT_NE(unique_task_context, nullptr); - auto shared_task_context = std::shared_ptr(unique_task_context.release()); - node_state->SetTaskContext(shared_task_context); - for (int i=0; i<5; ++i) { uint64_t value_0 = 512; TensorValue in_tensor0(&value_0, sizeof(value_0)); From ab65075326c2758b5054abb766ca3275b0e26e94 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Sat, 12 Jun 2021 17:52:12 +0800 Subject: [PATCH 023/226] Add Init to NodeState --- ge/hybrid/executor/node_state.cc | 9 +++++++++ ge/hybrid/executor/node_state.h | 10 ++-------- ge/hybrid/executor/subgraph_context.cc | 25 ++++++++++++++++++------- ge/hybrid/executor/subgraph_context.h | 1 + 4 files changed, 30 insertions(+), 15 deletions(-) diff --git a/ge/hybrid/executor/node_state.cc b/ge/hybrid/executor/node_state.cc index 42e08811..468c84e6 100644 --- a/ge/hybrid/executor/node_state.cc +++ b/ge/hybrid/executor/node_state.cc @@ -259,8 +259,16 @@ ShapeFuture::ShapeFuture(NodeState *src_node, NodeState::NodeState(const NodeItem &node_item, SubgraphContext *subgraph_context) : node_item_(&node_item), shape_inference_state_(node_item), subgraph_context_(subgraph_context) { this->op_desc_ = node_item.node->GetOpDesc(); +} + +Status NodeState::Init(int group, const shared_ptr &frame_state) { + GE_CHECK_NOTNULL(frame_state); + group_ = group; + frame_state_ = frame_state; auto unique_task_context = TaskContext::Create(this, subgraph_context_); + GE_CHECK_NOTNULL(unique_task_context); task_context_ = std::shared_ptr(unique_task_context.release()); + return SUCCESS; } Status NodeState::AwaitInputTensors(GraphExecutionContext &context) const { @@ -350,6 +358,7 @@ void NodeState::ResetContext(uint64_t iteration) { switch_index_ = -1; subgraph_context_->ResetContext(node_item_->node); auto unique_task_context = TaskContext::Create(this, subgraph_context_); + GE_CHECK_NOTNULL_JUST_RETURN(unique_task_context); task_context_ = std::shared_ptr(unique_task_context.release()); data_scheduled_ = static_cast(node_item_->root_data_.size()); diff --git a/ge/hybrid/executor/node_state.h b/ge/hybrid/executor/node_state.h index 72e2b90e..85f9e4c3 100644 --- a/ge/hybrid/executor/node_state.h +++ b/ge/hybrid/executor/node_state.h @@ -100,6 +100,8 @@ struct NodeState { NodeState(const NodeItem &node_item, SubgraphContext *subgraph_context); ~NodeState() = default; + Status Init(int group, const shared_ptr &frame_state); + OpDesc *GetOpDesc() const { return op_desc_.get(); } @@ -152,18 +154,10 @@ struct NodeState { return merge_index_; } - void SetGroup(int group) { - group_ = group; - } - int GetGroup() const { return group_; } - void SetFrameState(const shared_ptr &frame_state) { - frame_state_ = frame_state; - } - const shared_ptr &GetKernelTask() const { return kernel_task_; } diff --git a/ge/hybrid/executor/subgraph_context.cc b/ge/hybrid/executor/subgraph_context.cc index 41ada9af..5e97a9a2 100644 --- a/ge/hybrid/executor/subgraph_context.cc +++ b/ge/hybrid/executor/subgraph_context.cc @@ -79,20 +79,31 @@ NodeStatePtr SubgraphContext::GetOrCreateNodeState(const NodeItem *node_item) { return nullptr; } + return CreateNodeState(node_item); +} + +NodeStatePtr SubgraphContext::CreateNodeState(const NodeItem *node_item) { GELOGD("[%s] lock for write", node_item->NodeName().c_str()); if (mmRWLockWRLock(&rw_lock_) != EN_OK) { REPORT_CALL_ERROR("E19999", "[Node:%s] Lock for write failed", node_item->NodeName().c_str()); GELOGE(INTERNAL_ERROR, "[RWLock][Lock][Node:%s] Lock for write failed", node_item->NodeName().c_str()); return nullptr; } + auto &node_state = node_states_[node_item]; - if (node_state == nullptr) { - const auto &guard = node_item->MutexGuard("GetOrCreateNodeState"); - node_state.reset(new(std::nothrow)NodeState(*node_item, this)); - node_state->SetFrameState(GetOrCreateFrameState(*node_item)); - node_state->SetGroup(group_); - (void)guard; - } + do { + if (node_state == nullptr) { + const auto &guard = node_item->MutexGuard("GetOrCreateNodeState"); + node_state.reset(new(std::nothrow)NodeState(*node_item, this)); + if (node_state == nullptr || node_state->Init(group_, GetOrCreateFrameState(*node_item)) != SUCCESS) { + GELOGE(INTERNAL_ERROR, "[Create][NodeState] failed for[%s].", node_item->NodeName().c_str()); + REPORT_CALL_ERROR("E19999", "Create NodeState failed for %s.", node_item->NodeName().c_str()); + break; + } + (void)guard; + } + } while (0); + GELOGD("[%s] unlock for write", node_item->NodeName().c_str()); if (mmWRLockUnLock(&rw_lock_) != EN_OK) { REPORT_CALL_ERROR("E19999", "[Node:%s] Unlock for write failed", node_item->NodeName().c_str()); diff --git a/ge/hybrid/executor/subgraph_context.h b/ge/hybrid/executor/subgraph_context.h index d11d00d7..023be981 100644 --- a/ge/hybrid/executor/subgraph_context.h +++ b/ge/hybrid/executor/subgraph_context.h @@ -51,6 +51,7 @@ class SubgraphContext { void NodeDone(const NodePtr &node); private: + NodeStatePtr CreateNodeState(const NodeItem *node_item); FrameStatePtr GetOrCreateFrameState(const NodeItem &node_item); // no lock friend class TaskContext; const GraphItem *graph_item_; From f578e8fff4f958e1ec52b8e0c73b6dbc95e7c77d Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Sat, 12 Jun 2021 18:36:32 +0800 Subject: [PATCH 024/226] Fix NodeState for UT --- .../worker/execution_engine_unittest.cc | 23 +++++++++---------- tests/ut/ge/hybrid/ge_hybrid_unittest.cc | 20 +++++++++------- 2 files changed, 23 insertions(+), 20 deletions(-) diff --git a/tests/ut/ge/hybrid/executor/worker/execution_engine_unittest.cc b/tests/ut/ge/hybrid/executor/worker/execution_engine_unittest.cc index e0ccbfa5..cc20d614 100644 --- a/tests/ut/ge/hybrid/executor/worker/execution_engine_unittest.cc +++ b/tests/ut/ge/hybrid/executor/worker/execution_engine_unittest.cc @@ -83,15 +83,14 @@ TEST_F(UtestExecutionEngine, ExecuteAsync_without_kernel_task) { execution_context.profiling_level = 1; SubgraphContext subgraph_context(nullptr, &execution_context); - NodeState node_state(*node_item, &subgraph_context); - - ExecutionEngine execution_engine; - ASSERT_TRUE(node_state.GetTaskContext() != nullptr); + auto node_state = subgraph_context.GetOrCreateNodeState(node_item.get()); + ASSERT_TRUE(node_state->GetTaskContext() != nullptr); std::function callback; SubgraphExecutor executor(hybrid_model.GetRootGraphItem(), &execution_context); - executor.InitCallback(&node_state, callback); - EXPECT_EQ(execution_engine.ExecuteAsync(node_state, node_state.GetTaskContext(), execution_context, callback), INTERNAL_ERROR); + executor.InitCallback(node_state.get(), callback); + ExecutionEngine execution_engine; + EXPECT_EQ(execution_engine.ExecuteAsync(*node_state, node_state->GetTaskContext(), execution_context, callback), INTERNAL_ERROR); } TEST_F(UtestExecutionEngine, ExecuteAsync_without_callback_and_kernel_task) { @@ -115,18 +114,18 @@ TEST_F(UtestExecutionEngine, ExecuteAsync_without_callback_and_kernel_task) { execution_context.model = &hybrid_model; SubgraphContext subgraph_context(nullptr, &execution_context); - NodeState node_state(*node_item, &subgraph_context); + auto node_state = subgraph_context.GetOrCreateNodeState(node_item.get()); uint32_t task_id = 0; uint32_t stream_id = 1; std::string task_type = "rts"; uint32_t block_dim = 0; - node_state.GetTaskContext()->SaveProfilingTaskDescInfo(task_id, stream_id, task_type, block_dim); + node_state->GetTaskContext()->SaveProfilingTaskDescInfo(task_id, stream_id, task_type, block_dim); - ExecutionEngine execution_engine; - ASSERT_TRUE(node_state.GetTaskContext() != nullptr); + ASSERT_TRUE(node_state->GetTaskContext() != nullptr); std::function callback; SubgraphExecutor executor(hybrid_model.GetRootGraphItem(), &execution_context); - executor.InitCallback(&node_state, callback); - EXPECT_EQ(execution_engine.ExecuteAsync(node_state, node_state.GetTaskContext(), execution_context, callback), INTERNAL_ERROR); + executor.InitCallback(node_state.get(), callback); + ExecutionEngine execution_engine; + EXPECT_EQ(execution_engine.ExecuteAsync(*node_state, node_state->GetTaskContext(), execution_context, callback), INTERNAL_ERROR); } diff --git a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc index d634ed14..228af832 100644 --- a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc +++ b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc @@ -160,9 +160,9 @@ TEST_F(UtestGeHybrid, task_update_tiling_info) { GraphExecutionContext execution_context; SubgraphContext subgraph_context(nullptr, &execution_context); - NodeState node_state(*node_item, &subgraph_context); + auto node_state = subgraph_context.GetOrCreateNodeState(node_item.get()); ASSERT_EQ(aicore_task->InitTilingInfo(*op_desc), SUCCESS); - ASSERT_EQ(aicore_task->UpdateTilingInfo(*node_state.GetTaskContext()), SUCCESS); + ASSERT_EQ(aicore_task->UpdateTilingInfo(*node_state->GetTaskContext()), SUCCESS); } TEST_F(UtestGeHybrid, index_taskdefs_failed) { @@ -475,12 +475,14 @@ TEST_F(UtestGeHybrid, TestTaskContext) { node_item->output_start = 0; GraphExecutionContext execution_context; - SubgraphContext subgraph_context(nullptr, &execution_context); + GraphItem graph_item; + SubgraphContext subgraph_context(&graph_item, &execution_context); + ASSERT_EQ(subgraph_context.Init(), SUCCESS); subgraph_context.all_inputs_.resize(2); subgraph_context.all_outputs_.resize(1); - NodeState node_state(*node_item, &subgraph_context); - auto task_context = node_state.GetTaskContext(); + auto node_state = subgraph_context.GetOrCreateNodeState(node_item.get()); + auto task_context = node_state->GetTaskContext(); ASSERT_TRUE(task_context != nullptr); auto desc = task_context->MutableInputDesc(2); ASSERT_TRUE(desc == nullptr); @@ -520,12 +522,14 @@ TEST_F(UtestGeHybrid, hybrid_model_executor_update_args) { node_item->output_start = 0; GraphExecutionContext execution_context; - SubgraphContext subgraph_context(nullptr, &execution_context); + GraphItem graph_item; + SubgraphContext subgraph_context(&graph_item, &execution_context); + ASSERT_EQ(subgraph_context.Init(), SUCCESS); subgraph_context.all_inputs_.resize(2); subgraph_context.all_outputs_.resize(1); - NodeState node_state(*node_item, &subgraph_context); - auto task_context = node_state.GetTaskContext(); + auto node_state = subgraph_context.GetOrCreateNodeState(node_item.get()); + auto task_context = node_state->GetTaskContext(); int32_t buffer[1]; aicore_task->tiling_buffer_ = TensorBuffer::Create(buffer, sizeof(buffer)); From 367774c5b009edc3d8838163629a37925692e611 Mon Sep 17 00:00:00 2001 From: wangzhengjun Date: Tue, 15 Jun 2021 14:44:44 +0800 Subject: [PATCH 025/226] enable optimization --- ge/graph/optimize/graph_optimize.cc | 2 -- 1 file changed, 2 deletions(-) diff --git a/ge/graph/optimize/graph_optimize.cc b/ge/graph/optimize/graph_optimize.cc index 835e257b..55f374eb 100644 --- a/ge/graph/optimize/graph_optimize.cc +++ b/ge/graph/optimize/graph_optimize.cc @@ -336,10 +336,8 @@ Status GraphOptimize::OptimizeAfterStage1(ComputeGraphPtr &compute_graph) { GELOGI("[OptimizeAfterStage1]: engine type will exclude:%s.", exclude_core_type.c_str()); continue; } -#ifndef ONLY_COMPILE_OPEN_SRC GELOGI("Begin to optimize graph after stage1 by engine %s.", iter->first.c_str()); ret = (iter->second)->OptimizeAfterStage1(*compute_graph); -#endif if (ret != SUCCESS) { REPORT_INNER_ERROR("E19999", "Call OptimizeAfterStage1 failed, ret:%d, engine_name:%s, " "graph_name:%s.", ret, iter->first.c_str(), compute_graph->GetName().c_str()); From 181cd5891bd97b4aca9f28330e1f0a20def75e69 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Tue, 15 Jun 2021 16:46:28 +0800 Subject: [PATCH 026/226] Release context in execute end. --- ge/hybrid/executor/hybrid_model_executor.cc | 1 + ge/hybrid/executor/subgraph_executor.h | 2 ++ 2 files changed, 3 insertions(+) diff --git a/ge/hybrid/executor/hybrid_model_executor.cc b/ge/hybrid/executor/hybrid_model_executor.cc index b3c2c471..b4173407 100755 --- a/ge/hybrid/executor/hybrid_model_executor.cc +++ b/ge/hybrid/executor/hybrid_model_executor.cc @@ -125,6 +125,7 @@ Status HybridModelExecutor::ExecuteGraphInternal(HybridModelExecutor::ExecuteArg args.outputs.clear(); HYBRID_CHK_STATUS_RET(root_graph_executor_->GetOutputs(args.outputs, args.output_desc), "Failed to get outputs"); RECORD_MODEL_EXECUTION_EVENT(&context_, "[GetOutput] End"); + root_graph_executor_->ResetContext(); return SUCCESS; } diff --git a/ge/hybrid/executor/subgraph_executor.h b/ge/hybrid/executor/subgraph_executor.h index 758bf426..0f54e4ca 100644 --- a/ge/hybrid/executor/subgraph_executor.h +++ b/ge/hybrid/executor/subgraph_executor.h @@ -41,6 +41,8 @@ class SubgraphExecutor { Status PartialExecuteAsync(int task_group); + void ResetContext() { subgraph_context_.release(); } + /** * Execute subgraph async, output tensor address(not data) and output tensor descriptions are * valid after this method returned From ab7334ed780343a80c885a1b064d1a42fa51faf0 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Tue, 15 Jun 2021 16:50:40 +0800 Subject: [PATCH 027/226] Release context in execute end. --- ge/hybrid/executor/hybrid_model_executor.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ge/hybrid/executor/hybrid_model_executor.cc b/ge/hybrid/executor/hybrid_model_executor.cc index b4173407..2abd9cd6 100755 --- a/ge/hybrid/executor/hybrid_model_executor.cc +++ b/ge/hybrid/executor/hybrid_model_executor.cc @@ -70,6 +70,7 @@ Status HybridModelExecutor::Execute(HybridModelExecutor::ExecuteArgs &args) { context_.profiler->Dump(std::cout); context_.profiler->Reset(); } + root_graph_executor_->ResetContext(); context_.iteration += 1; if (ret == END_OF_SEQUENCE) { @@ -125,7 +126,6 @@ Status HybridModelExecutor::ExecuteGraphInternal(HybridModelExecutor::ExecuteArg args.outputs.clear(); HYBRID_CHK_STATUS_RET(root_graph_executor_->GetOutputs(args.outputs, args.output_desc), "Failed to get outputs"); RECORD_MODEL_EXECUTION_EVENT(&context_, "[GetOutput] End"); - root_graph_executor_->ResetContext(); return SUCCESS; } From 492d36b237ec601da5644054ab3eed4c4fbfd6d7 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Tue, 15 Jun 2021 20:01:06 +0800 Subject: [PATCH 028/226] Fix ut. --- tests/ut/ge/single_op/single_op_model_unittest.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ut/ge/single_op/single_op_model_unittest.cc b/tests/ut/ge/single_op/single_op_model_unittest.cc index 1cb2b22c..fb772c33 100644 --- a/tests/ut/ge/single_op/single_op_model_unittest.cc +++ b/tests/ut/ge/single_op/single_op_model_unittest.cc @@ -223,7 +223,7 @@ TEST_F(UtestSingleOpModel, test_build_dynamic_op) { model.model_helper_.model_->SetGraph(graph); auto op_desc = transdata->GetOpDesc(); - op_desc->input_name_idx_["Data"] = 0; + op_desc->impl_->input_name_idx_["Data"] = 0; const vector depend_names = { "Data" }; op_desc->SetOpInferDepends(depend_names); (void)AttrUtils::SetBool(op_desc, kAttrSupportDynamicShape, true); From 0c2d07eb7250e5cad532a906691f48b4dd48b552 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Tue, 15 Jun 2021 21:44:09 +0800 Subject: [PATCH 029/226] Fix ut. --- ge/hybrid/executor/subgraph_executor.cc | 9 ++++++--- ge/hybrid/executor/worker/shape_inference_engine.cc | 2 +- tests/ut/ge/single_op/single_op_model_unittest.cc | 1 + 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/ge/hybrid/executor/subgraph_executor.cc b/ge/hybrid/executor/subgraph_executor.cc index 612e7565..4f0566b4 100644 --- a/ge/hybrid/executor/subgraph_executor.cc +++ b/ge/hybrid/executor/subgraph_executor.cc @@ -100,9 +100,12 @@ Status SubgraphExecutor::InitInputsForUnknownShape(const std::vectorGetOrCreateNodeState(input_node); - GE_CHECK_NOTNULL(node_state); - node_state->GetShapeInferenceState().UpdateInputShape(0, *tensor_desc); + auto op_desc = input_node->GetOpDesc(); + GE_CHECK_NOTNULL(op_desc); + auto output_desc = op_desc->MutableOutputDesc(kDataInputIndex); + output_desc.SetShape(tensor_desc->GetShape()); + output_desc.SetOriginShape(tensor_desc->GetOriginShape()); + output_desc.SetDataType(tensor_desc->GetDataType()); } } diff --git a/ge/hybrid/executor/worker/shape_inference_engine.cc b/ge/hybrid/executor/worker/shape_inference_engine.cc index a2efbb25..4dc5b79c 100755 --- a/ge/hybrid/executor/worker/shape_inference_engine.cc +++ b/ge/hybrid/executor/worker/shape_inference_engine.cc @@ -69,7 +69,7 @@ Status ShapeInferenceEngine::InferShape(NodeState &node_state) { // Do shape inference GELOGD("[%s] Start to invoke InferShapeAndType", node_item.NodeName().c_str()); - { + if (node_state.GetType() != DATA_TYPE && node_state.GetType() != AIPP_DATA_TYPE) { RECORD_SHAPE_INFERENCE_EVENT(execution_context_, node_item.NodeName().c_str(), "[InferShapeAndType] Start"); GE_CHK_STATUS_RET(ShapeRefiner::InferShapeAndTypeForRunning(node_item.node, true), "[Invoke][InferShapeAndType] for %s failed.", node_item.NodeName().c_str()); diff --git a/tests/ut/ge/single_op/single_op_model_unittest.cc b/tests/ut/ge/single_op/single_op_model_unittest.cc index fb772c33..cb0b497d 100644 --- a/tests/ut/ge/single_op/single_op_model_unittest.cc +++ b/tests/ut/ge/single_op/single_op_model_unittest.cc @@ -30,6 +30,7 @@ #include "single_op/single_op.h" #include "single_op/stream_resource.h" #include "graph/passes/graph_builder_utils.h" +#include "graph/op_desc_impl.h" #undef private #undef protected From 7ce31b2e0ec853582645d45336874c1262424b44 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Tue, 15 Jun 2021 22:06:11 +0800 Subject: [PATCH 030/226] Fix ut. --- ge/hybrid/executor/subgraph_executor.cc | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ge/hybrid/executor/subgraph_executor.cc b/ge/hybrid/executor/subgraph_executor.cc index 4f0566b4..b474c5dd 100644 --- a/ge/hybrid/executor/subgraph_executor.cc +++ b/ge/hybrid/executor/subgraph_executor.cc @@ -103,9 +103,10 @@ Status SubgraphExecutor::InitInputsForUnknownShape(const std::vectorGetOpDesc(); GE_CHECK_NOTNULL(op_desc); auto output_desc = op_desc->MutableOutputDesc(kDataInputIndex); - output_desc.SetShape(tensor_desc->GetShape()); - output_desc.SetOriginShape(tensor_desc->GetOriginShape()); - output_desc.SetDataType(tensor_desc->GetDataType()); + GE_CHECK_NOTNULL(output_desc); + output_desc->SetShape(tensor_desc->GetShape()); + output_desc->SetOriginShape(tensor_desc->GetOriginShape()); + output_desc->SetDataType(tensor_desc->GetDataType()); } } From 24eedfa3b4df7eb41fbb13f36759f7537500209a Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Tue, 15 Jun 2021 22:09:17 +0800 Subject: [PATCH 031/226] Fix ut. --- ge/hybrid/executor/worker/shape_inference_engine.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/ge/hybrid/executor/worker/shape_inference_engine.cc b/ge/hybrid/executor/worker/shape_inference_engine.cc index 4dc5b79c..18fed710 100755 --- a/ge/hybrid/executor/worker/shape_inference_engine.cc +++ b/ge/hybrid/executor/worker/shape_inference_engine.cc @@ -68,6 +68,7 @@ Status ShapeInferenceEngine::InferShape(NodeState &node_state) { } // Do shape inference + // Skipping infer shape of input node. GELOGD("[%s] Start to invoke InferShapeAndType", node_item.NodeName().c_str()); if (node_state.GetType() != DATA_TYPE && node_state.GetType() != AIPP_DATA_TYPE) { RECORD_SHAPE_INFERENCE_EVENT(execution_context_, node_item.NodeName().c_str(), "[InferShapeAndType] Start"); From b64048a39f53e30fcc69491bf0c93c805bdec3b8 Mon Sep 17 00:00:00 2001 From: xchu42 Date: Sat, 12 Jun 2021 14:01:24 +0800 Subject: [PATCH 032/226] Init NodeExecutor on demand --- ge/hybrid/node_executor/node_executor.cc | 75 ++++++------- ge/hybrid/node_executor/node_executor.h | 7 +- tests/ut/ge/CMakeLists.txt | 2 + .../node_executor/node_executor_unittest.cc | 103 ++++++++++++++++++ 4 files changed, 143 insertions(+), 44 deletions(-) create mode 100644 tests/ut/ge/hybrid/node_executor/node_executor_unittest.cc diff --git a/ge/hybrid/node_executor/node_executor.cc b/ge/hybrid/node_executor/node_executor.cc index 5f3d6e45..04225557 100755 --- a/ge/hybrid/node_executor/node_executor.cc +++ b/ge/hybrid/node_executor/node_executor.cc @@ -58,8 +58,8 @@ Status NodeExecutor::CompileTask(const HybridModel &model, const NodePtr &node, } Status NodeExecutorManager::EnsureInitialized() { - GE_CHK_STATUS_RET(InitializeExecutors()); std::lock_guard lk(mu_); + ++ref_count_; if (initialized_) { return SUCCESS; } @@ -115,17 +115,14 @@ NodeExecutorManager::ExecutorType NodeExecutorManager::ResolveExecutorType(Node return it->second; } -Status NodeExecutorManager::GetExecutor(Node &node, const NodeExecutor **executor) const { +Status NodeExecutorManager::GetExecutor(Node &node, const NodeExecutor **executor) { auto executor_type = ResolveExecutorType(node); + GELOGD("[%s] Set node executor by type: %d.", node.GetName().c_str(), static_cast(executor_type)); const auto it = executors_.find(executor_type); if (it == executors_.end()) { - REPORT_INNER_ERROR("E19999", "Failed to get executor by type: %d.", static_cast(executor_type)); - GELOGE(INTERNAL_ERROR, "[Check][ExecutorType]Failed to get executor by type: %d.", - static_cast(executor_type)); - return INTERNAL_ERROR; + return GetOrCreateExecutor(executor_type, executor); } - GELOGD("[%s] Set node executor by type: %d.", node.GetName().c_str(), static_cast(executor_type)); *executor = it->second.get(); return SUCCESS; } @@ -178,51 +175,50 @@ Status NodeExecutorManager::CalcOpRunningParam(Node &node) const { return OpsKernelBuilderManager::Instance().CalcOpRunningParam(node); } -Status NodeExecutorManager::InitializeExecutors() { +Status NodeExecutorManager::GetOrCreateExecutor(ExecutorType executor_type, const NodeExecutor **out_executor) { std::lock_guard lk(mu_); - if (executor_initialized_) { - ++ref_count_; - GELOGI("Executor is already initialized. add ref count to [%d]", ref_count_); + const auto executor_it = executors_.find(executor_type); + if (executor_it != executors_.end()) { + *out_executor = executor_it->second.get(); return SUCCESS; } - GELOGI("Start to Initialize NodeExecutors"); - for (auto &it : builders_) { - auto engine_type = it.first; - auto build_fn = it.second; - GE_CHECK_NOTNULL(build_fn); - auto executor = std::unique_ptr(build_fn()); - if (executor == nullptr) { - REPORT_CALL_ERROR("E19999", "Create NodeExecutor failed for engine type = %d", - static_cast(engine_type)); - GELOGE(INTERNAL_ERROR, "[Create][NodeExecutor] failed for engine type = %d", static_cast(engine_type)); - return INTERNAL_ERROR; - } + GELOGI("Start to Initialize NodeExecutor, type = %d", static_cast(executor_type)); + auto it = builders_.find(executor_type); + if (it == builders_.end()) { + REPORT_CALL_ERROR("E19999", "Create NodeExecutor failed for executor type = %d", + static_cast(executor_type)); + GELOGE(INTERNAL_ERROR, "[Create][NodeExecutor] failed for executor type = %d", static_cast(executor_type)); + return INTERNAL_ERROR; + } - GELOGD("Executor of engine type = %d was created successfully", static_cast(engine_type)); - auto ret = executor->Initialize(); - if (ret != SUCCESS) { - REPORT_CALL_ERROR("E19999", "Initialize NodeExecutor failed for type = %d", static_cast(engine_type)); - GELOGE(ret, "[Initialize][NodeExecutor] failed for type = %d", static_cast(engine_type)); - for (auto &executor_it : executors_) { - executor_it.second->Finalize(); - } - executors_.clear(); - return ret; - } + auto build_fn = it->second; + GE_CHECK_NOTNULL(build_fn); + auto executor = std::unique_ptr(build_fn()); + if (executor == nullptr) { + REPORT_CALL_ERROR("E19999", "Create NodeExecutor failed for executor type = %d", + static_cast(executor_type)); + GELOGE(INTERNAL_ERROR, "[Create][NodeExecutor] failed for engine type = %d", static_cast(executor_type)); + return INTERNAL_ERROR; + } - executors_.emplace(engine_type, std::move(executor)); + GELOGD("Executor of engine type = %d was created successfully", static_cast(executor_type)); + auto ret = executor->Initialize(); + if (ret != SUCCESS) { + REPORT_CALL_ERROR("E19999", "Initialize NodeExecutor failed for type = %d", static_cast(executor_type)); + GELOGE(ret, "[Initialize][NodeExecutor] failed for type = %d", static_cast(executor_type)); + return ret; } - ++ref_count_; - executor_initialized_ = true; - GELOGI("Initializing NodeExecutors successfully."); + *out_executor = executor.get(); + executors_.emplace(executor_type, std::move(executor)); + GELOGI("Initializing NodeExecutor successfully, type = %d", static_cast(executor_type)); return SUCCESS; } void NodeExecutorManager::FinalizeExecutors() { std::lock_guard lk(mu_); - if (!executor_initialized_) { + if (ref_count_ <= 0) { GELOGD("No need for finalizing for not initialized."); return; } @@ -237,7 +233,6 @@ void NodeExecutorManager::FinalizeExecutors() { it.second->Finalize(); } executors_.clear(); - executor_initialized_ = false; GELOGD("Done invoking Finalize successfully."); } diff --git a/ge/hybrid/node_executor/node_executor.h b/ge/hybrid/node_executor/node_executor.h index fffd4e7d..97c9cee9 100644 --- a/ge/hybrid/node_executor/node_executor.h +++ b/ge/hybrid/node_executor/node_executor.h @@ -179,8 +179,6 @@ class NodeExecutorManager { */ Status EnsureInitialized(); - Status InitializeExecutors(); - void FinalizeExecutors(); /** @@ -196,7 +194,7 @@ class NodeExecutorManager { * @param executor executor * @return SUCCESS on success, error code otherwise */ - Status GetExecutor(Node &node, const NodeExecutor **executor) const; + Status GetExecutor(Node &node, const NodeExecutor **executor); /** * Resolve executor type by node @@ -206,12 +204,13 @@ class NodeExecutorManager { ExecutorType ResolveExecutorType(Node &node) const; private: + Status GetOrCreateExecutor(ExecutorType executor_type, const NodeExecutor **executor); + std::map> executors_; std::map> builders_; std::map engine_mapping_; std::mutex mu_; bool initialized_ = false; - bool executor_initialized_ = false; int ref_count_ = 0; }; diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index 63579109..cd3d541c 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -836,6 +836,7 @@ set(HYBRID_TEST_FILES "hybrid/executor/subgraph_executor_unittest.cc" "hybrid/executor/worker/execution_engine_unittest.cc" "hybrid/model/hybrid_model_builder_unittest.cc" + "hybrid/node_executor/node_executor_unittest.cc" "hybrid/node_executor/rts/rts_node_task_unittest.cc" "hybrid/node_executor/host_cpu/host_cpu_node_task_unittest.cc" "hybrid/node_executor/ge_local/ge_local_node_executor_unittest.cc" @@ -843,6 +844,7 @@ set(HYBRID_TEST_FILES "hybrid/executor/hybrid_model_async_executor_unittest.cc" "hybrid/executor/hybrid_model_pipeline_executor_unittest.cc" "hybrid/node_executor/aicore/aicore_task_compiler_unittest.cc" + ) set(OTHERS_TEST_FILES diff --git a/tests/ut/ge/hybrid/node_executor/node_executor_unittest.cc b/tests/ut/ge/hybrid/node_executor/node_executor_unittest.cc new file mode 100644 index 00000000..8a1240d3 --- /dev/null +++ b/tests/ut/ge/hybrid/node_executor/node_executor_unittest.cc @@ -0,0 +1,103 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#define private public +#define protected public +#include "hybrid/node_executor/node_executor.h" +#undef protected +#undef private + +using namespace std; +using namespace testing; + +namespace ge { +using namespace hybrid; + +namespace { + bool finalized = false; +} + +class NodeExecutorTest : public testing::Test { + protected: + void SetUp() {} + void TearDown() { } +}; + +class FailureNodeExecutor : public NodeExecutor { + public: + Status Initialize() override { + return INTERNAL_ERROR; + } +}; + +class SuccessNodeExecutor : public NodeExecutor { + public: + Status Initialize() override { + initialized = true; + finalized = false; + return SUCCESS; + } + + Status Finalize() override { + finalized = true; + } + + bool initialized = false; +}; + +REGISTER_NODE_EXECUTOR_BUILDER(NodeExecutorManager::ExecutorType::AICORE, FailureNodeExecutor); +REGISTER_NODE_EXECUTOR_BUILDER(NodeExecutorManager::ExecutorType::AICPU_TF, SuccessNodeExecutor); + +TEST_F(NodeExecutorTest, TestGetOrCreateExecutor) { + auto &manager = NodeExecutorManager::GetInstance(); + const NodeExecutor *executor = nullptr; + Status ret = SUCCESS; + // no builder + ret = manager.GetOrCreateExecutor(NodeExecutorManager::ExecutorType::RESERVED, &executor); + ASSERT_EQ(ret, INTERNAL_ERROR); + // initialize failure + ret = manager.GetOrCreateExecutor(NodeExecutorManager::ExecutorType::AICORE, &executor); + ASSERT_EQ(ret, INTERNAL_ERROR); + ret = manager.GetOrCreateExecutor(NodeExecutorManager::ExecutorType::AICPU_TF, &executor); + ASSERT_EQ(ret, SUCCESS); + ASSERT_TRUE(executor != nullptr); + ret = manager.GetOrCreateExecutor(NodeExecutorManager::ExecutorType::AICPU_TF, &executor); + ASSERT_EQ(ret, SUCCESS); + ASSERT_TRUE(executor != nullptr); + ASSERT_TRUE(((SuccessNodeExecutor*)executor)->initialized); +} + +TEST_F(NodeExecutorTest, TestInitAndFinalize) { + auto &manager = NodeExecutorManager::GetInstance(); + manager.FinalizeExecutors(); + manager.EnsureInitialized(); + manager.EnsureInitialized(); + const NodeExecutor *executor = nullptr; + auto ret = manager.GetOrCreateExecutor(NodeExecutorManager::ExecutorType::AICPU_TF, &executor); + ASSERT_EQ(ret, SUCCESS); + ASSERT_TRUE(executor != nullptr); + ASSERT_TRUE(((SuccessNodeExecutor*)executor)->initialized); + manager.FinalizeExecutors(); + ASSERT_FALSE(manager.executors_.empty()); + manager.FinalizeExecutors(); + ASSERT_TRUE(manager.executors_.empty()); + ASSERT_TRUE(finalized); +} +} // namespace ge From d1eba02e1e972da774c2ddd474fef242f31b14d5 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Wed, 16 Jun 2021 09:28:41 +0800 Subject: [PATCH 033/226] Fix ut. --- tests/ut/ge/single_op/single_op_model_unittest.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/ut/ge/single_op/single_op_model_unittest.cc b/tests/ut/ge/single_op/single_op_model_unittest.cc index fb772c33..cb0b497d 100644 --- a/tests/ut/ge/single_op/single_op_model_unittest.cc +++ b/tests/ut/ge/single_op/single_op_model_unittest.cc @@ -30,6 +30,7 @@ #include "single_op/single_op.h" #include "single_op/stream_resource.h" #include "graph/passes/graph_builder_utils.h" +#include "graph/op_desc_impl.h" #undef private #undef protected From 69da59b6b790cd76dca11413b5e342aab6a56caa Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Wed, 16 Jun 2021 09:40:26 +0800 Subject: [PATCH 034/226] Fix ut. --- ge/hybrid/executor/subgraph_executor.cc | 5 ++++- .../hybrid/executor/hybrid_model_async_executor_unittest.cc | 4 ++-- tests/ut/ge/single_op/single_op_model_unittest.cc | 4 +++- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/ge/hybrid/executor/subgraph_executor.cc b/ge/hybrid/executor/subgraph_executor.cc index 7081c8f4..c26eac9b 100644 --- a/ge/hybrid/executor/subgraph_executor.cc +++ b/ge/hybrid/executor/subgraph_executor.cc @@ -100,13 +100,16 @@ Status SubgraphExecutor::InitInputsForUnknownShape(const std::vectorGetOrCreateNodeState(input_node); + GE_CHECK_NOTNULL(node_state); + node_state->GetShapeInferenceState().UpdateInputShape(0, *tensor_desc); auto op_desc = input_node->GetOpDesc(); GE_CHECK_NOTNULL(op_desc); auto output_desc = op_desc->MutableOutputDesc(kDataInputIndex); GE_CHECK_NOTNULL(output_desc); output_desc->SetShape(tensor_desc->GetShape()); output_desc->SetOriginShape(tensor_desc->GetOriginShape()); - output_desc->SetDataType(tensor_desc->GetDataType()); + output_desc->SetDataType(tensor_desc->GetDataType()); } } diff --git a/tests/ut/ge/hybrid/executor/hybrid_model_async_executor_unittest.cc b/tests/ut/ge/hybrid/executor/hybrid_model_async_executor_unittest.cc index 52537ee2..98bb78f2 100644 --- a/tests/ut/ge/hybrid/executor/hybrid_model_async_executor_unittest.cc +++ b/tests/ut/ge/hybrid/executor/hybrid_model_async_executor_unittest.cc @@ -87,7 +87,7 @@ TEST_F(UtestHybridModelAsyncExecutor, BuildDeviceTensor) { ASSERT_EQ(size, 100); } -TEST_F(UtestHybridModelAsyncExecutor, Test_execute_internal) { +TEST_F(UtestHybridModelAsyncExecutor, Test_execute) { ComputeGraphPtr graph = std::make_shared("test"); GeRootModelPtr ge_root_model = make_shared(graph); ge_root_model->SetModelName("test_name"); @@ -101,6 +101,6 @@ TEST_F(UtestHybridModelAsyncExecutor, Test_execute_internal) { std::pair> eof_entry; eof_entry.first = nullptr; context.callback_manager->callback_queue_.Push(eof_entry); - ASSERT_EQ(executor.ExecuteGraphInternal(args), SUCCESS); + ASSERT_EQ(executor.Execute(args), SUCCESS); } } // namespace ge \ No newline at end of file diff --git a/tests/ut/ge/single_op/single_op_model_unittest.cc b/tests/ut/ge/single_op/single_op_model_unittest.cc index cb0b497d..63a3eafe 100644 --- a/tests/ut/ge/single_op/single_op_model_unittest.cc +++ b/tests/ut/ge/single_op/single_op_model_unittest.cc @@ -224,7 +224,6 @@ TEST_F(UtestSingleOpModel, test_build_dynamic_op) { model.model_helper_.model_->SetGraph(graph); auto op_desc = transdata->GetOpDesc(); - op_desc->impl_->input_name_idx_["Data"] = 0; const vector depend_names = { "Data" }; op_desc->SetOpInferDepends(depend_names); (void)AttrUtils::SetBool(op_desc, kAttrSupportDynamicShape, true); @@ -247,6 +246,9 @@ TEST_F(UtestSingleOpModel, test_build_dynamic_op) { DynamicSingleOp dynamic_single_op(0, &stream_mu_, nullptr); StreamResource res((uintptr_t)1); model.BuildDynamicOp(res, dynamic_single_op); + + op_desc->impl_->input_name_idx_["Data"] = 0; + model.BuildDynamicOp(res, dynamic_single_op); } TEST_F(UtestSingleOpModel, test_host_mem) { From 58086ab1872f2fc7374dc7b969a3fdcb206b7841 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Wed, 16 Jun 2021 11:14:05 +0800 Subject: [PATCH 035/226] Release mem. --- ge/hybrid/executor/subgraph_executor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ge/hybrid/executor/subgraph_executor.h b/ge/hybrid/executor/subgraph_executor.h index 7e1c2d0b..35f6e67e 100644 --- a/ge/hybrid/executor/subgraph_executor.h +++ b/ge/hybrid/executor/subgraph_executor.h @@ -41,7 +41,7 @@ class SubgraphExecutor { Status PartialExecuteAsync(int task_group); - void ResetContext() { subgraph_context_.release(); } + void ResetContext() { subgraph_context_.reset(nullptr); } /** * Execute subgraph async, output tensor address(not data) and output tensor descriptions are From 116167dc88160c6f6c10703f0ad3c8bd570b48eb Mon Sep 17 00:00:00 2001 From: zhou_lili Date: Tue, 15 Jun 2021 11:49:11 +0800 Subject: [PATCH 036/226] ge code for 1981 --- ge/CMakeLists.txt | 2 + ge/executor/CMakeLists.txt | 1 + ge/graph/build/label_allocator.cc | 5 + ge/graph/build/logical_stream_allocator.cc | 5 + ge/graph/build/stream_allocator.cc | 11 +- ge/graph/build/task_generator.cc | 32 +- ge/graph/build/task_generator.h | 1 + ge/graph/load/model_manager/davinci_model.cc | 207 ++++++--- ge/graph/load/model_manager/davinci_model.h | 6 + .../model_manager/task_info/ffts_task_info.cc | 393 ++++++++++++++++++ .../model_manager/task_info/ffts_task_info.h | 66 +++ ge/graph/partition/graph_partition.cc | 21 +- metadef | 2 +- parser | 2 +- tests/depends/runtime/src/runtime_stub.cc | 4 + tests/ut/ge/CMakeLists.txt | 2 + .../ge/graph/load/davinci_model_unittest.cc | 140 +++++++ .../ge/graph/load/ffts_task_info_unittest.cc | 212 ++++++++++ third_party/fwkacllib/inc/runtime/rt.h | 1 + third_party/fwkacllib/inc/runtime/rt_ffts.h | 185 +++++++++ third_party/fwkacllib/inc/runtime/rt_model.h | 1 + 21 files changed, 1235 insertions(+), 64 deletions(-) create mode 100644 ge/graph/load/model_manager/task_info/ffts_task_info.cc create mode 100644 ge/graph/load/model_manager/task_info/ffts_task_info.h create mode 100644 tests/ut/ge/graph/load/ffts_task_info_unittest.cc create mode 100755 third_party/fwkacllib/inc/runtime/rt_ffts.h diff --git a/ge/CMakeLists.txt b/ge/CMakeLists.txt index 215d2832..81e2d539 100755 --- a/ge/CMakeLists.txt +++ b/ge/CMakeLists.txt @@ -174,6 +174,7 @@ set(TRAIN_SRC_LIST "graph/load/model_manager/task_info/model_exit_task_info.cc" "graph/load/model_manager/task_info/event_record_task_info.cc" "graph/load/model_manager/task_info/event_wait_task_info.cc" + "graph/load/model_manager/task_info/ffts_task_info.cc" "graph/load/model_manager/task_info/fusion_start_task_info.cc" "graph/load/model_manager/task_info/fusion_stop_task_info.cc" "graph/load/model_manager/task_info/hccl_task_info.cc" @@ -662,6 +663,7 @@ set(INFER_SRC_LIST "graph/load/model_manager/task_info/task_info.cc" "graph/load/model_manager/task_info/event_record_task_info.cc" "graph/load/model_manager/task_info/event_wait_task_info.cc" + "graph/load/model_manager/task_info/ffts_task_info.cc" "graph/load/model_manager/task_info/fusion_start_task_info.cc" "graph/load/model_manager/task_info/fusion_stop_task_info.cc" "graph/load/model_manager/task_info/kernel_ex_task_info.cc" diff --git a/ge/executor/CMakeLists.txt b/ge/executor/CMakeLists.txt index f1267c1e..b04216b8 100644 --- a/ge/executor/CMakeLists.txt +++ b/ge/executor/CMakeLists.txt @@ -37,6 +37,7 @@ set(SRC_LIST "../graph/load/model_manager/task_info/task_info.cc" "../graph/load/model_manager/task_info/event_record_task_info.cc" "../graph/load/model_manager/task_info/event_wait_task_info.cc" + "../graph/load/model_manager/task_info/ffts_task_info.cc" "../graph/load/model_manager/task_info/fusion_start_task_info.cc" "../graph/load/model_manager/task_info/fusion_stop_task_info.cc" "../graph/load/model_manager/task_info/kernel_ex_task_info.cc" diff --git a/ge/graph/build/label_allocator.cc b/ge/graph/build/label_allocator.cc index 32bdd0a3..dd7ee828 100644 --- a/ge/graph/build/label_allocator.cc +++ b/ge/graph/build/label_allocator.cc @@ -86,6 +86,11 @@ bool LabelAllocator::CollectFunctionalNode(ComputeGraphPtr &graph, std::setGetOpDesc() != nullptr && func_node->GetOpDesc()->HasAttr(ATTR_NAME_FFTS_SUB_GRAPH)) { + GELOGD("Graph[%s] is ffts subgraph, skip label allocator.", graph->GetName().c_str()); + return true; + } + ComputeGraphPtr owner_graph = func_node->GetOwnerComputeGraph(); if (owner_graph == nullptr) { REPORT_INNER_ERROR("E19999", "ComputeGraph owner not set in node:%s(%s), graph:%s", diff --git a/ge/graph/build/logical_stream_allocator.cc b/ge/graph/build/logical_stream_allocator.cc index c74cdf7a..58763aa9 100644 --- a/ge/graph/build/logical_stream_allocator.cc +++ b/ge/graph/build/logical_stream_allocator.cc @@ -474,6 +474,11 @@ Status UpdateForSkippedEnginePass::Run(ComputeGraphPtr graph, const vectorGetDirectNode()) { auto op_desc = node->GetOpDesc(); GE_CHECK_NOTNULL(op_desc); + if (op_desc->HasAttr(ATTR_NAME_THREAD_SCOPE_ID)) { + op_desc->SetStreamId(kInvalidStream); + GELOGI("Ffts node %s of type %s reassign to invalid stream.", node->GetName().c_str(), node->GetType().c_str()); + continue; + } int64_t stream_id = op_desc->GetStreamId(); if (ops_without_label.find(op_desc) != ops_without_label.end()) { if (AreAllPredStreamsInvalid(node) && op_desc->GetSubgraphInstanceNames().empty()) { diff --git a/ge/graph/build/stream_allocator.cc b/ge/graph/build/stream_allocator.cc index dae36b83..d896925c 100644 --- a/ge/graph/build/stream_allocator.cc +++ b/ge/graph/build/stream_allocator.cc @@ -432,7 +432,11 @@ Status StreamAllocator::SetActiveStreamsForSubgraphs() { // Insert the send/recv event id to the graph Status StreamAllocator::InsertSyncEvents() { - for (const auto &cur_node : whole_graph_->GetNodes(whole_graph_->GetGraphUnknownFlag())) { + auto ffts_filter = [](const Node &node, const char *, const ComputeGraphPtr &) { + return !node.GetOpDesc()->HasAttr(ATTR_NAME_FFTS_SUB_GRAPH); + }; + + for (const auto &cur_node : whole_graph_->GetNodes(whole_graph_->GetGraphUnknownFlag(), nullptr, ffts_filter)) { // Take the adjacent points, then judge whether need to insert the event for (const OutDataAnchorPtr &anchor : cur_node->GetAllOutDataAnchors()) { for (const InDataAnchorPtr &peer_in_anchor : anchor->GetPeerInDataAnchors()) { @@ -531,6 +535,11 @@ Status StreamAllocator::InsertOneEventInTwoNodes(const NodePtr &cur_node, const Status StreamAllocator::InsertEventsForSubgraph() { for (const auto &subgraph : whole_graph_->GetAllSubgraphs()) { GE_CHECK_NOTNULL(subgraph); + const auto parent_node = subgraph->GetParentNode(); + if (parent_node != nullptr && parent_node->GetOpDesc()->HasAttr(ATTR_NAME_FFTS_SUB_GRAPH)) { + GELOGD("Skip ffts subgraph, parent node is %s.", parent_node->GetName().c_str()); + continue; + } for (const auto &node : subgraph->GetDirectNode()) { auto op_desc = node->GetOpDesc(); GE_CHECK_NOTNULL(op_desc); diff --git a/ge/graph/build/task_generator.cc b/ge/graph/build/task_generator.cc index 12da803d..f9456aab 100755 --- a/ge/graph/build/task_generator.cc +++ b/ge/graph/build/task_generator.cc @@ -354,7 +354,10 @@ Status TaskGenerator::GenerateTask(RunContext &run_context, ComputeGraphPtr &gra }; GE_MAKE_GUARD(release, callback); - for (auto &node : graph->GetNodes(graph->GetGraphUnknownFlag())) { + auto ffts_filter = [](const Node &node, const char *, const ComputeGraphPtr &) { + return !node.GetOpDesc()->HasAttr(ATTR_NAME_FFTS_SUB_GRAPH); + }; + for (auto &node : graph->GetNodes(graph->GetGraphUnknownFlag(), nullptr, ffts_filter)) { OpDescPtr op_desc = node->GetOpDesc(); GE_CHECK_NOTNULL(op_desc); node_index++; @@ -380,10 +383,8 @@ Status TaskGenerator::GenerateTask(RunContext &run_context, ComputeGraphPtr &gra GELOGI("Fusion node[name:%s, type:%s] do not need generate task again.", name.c_str(), type.c_str()); continue; } - if (op_kernel_lib_name.empty()) { - GELOGI("Node[name:%s, type:%s] does not need to generate task.", name.c_str(), type.c_str()); - continue; - } + GE_CHK_BOOL_EXEC_INFO(!op_kernel_lib_name.empty(), continue, + "Node[name:%s, type:%s] does not need to generate task.", name.c_str(), type.c_str()); auto kernel_info_store = ops_kernel_manager.GetOpsKernelInfoStore(op_kernel_lib_name); if (kernel_info_store == nullptr) { REPORT_INNER_ERROR("E19999", "Get ops kernel info store failed for op:%s(%s), op_kernel_name:%s", @@ -394,6 +395,10 @@ Status TaskGenerator::GenerateTask(RunContext &run_context, ComputeGraphPtr &gra } GE_CHK_STATUS_RET(UpdateAnchorStatus(node), "[Call][UpdateAnchorStatus] node:%s(%s) failed", name.c_str(), type.c_str()); + if (node->GetOpDesc()->HasAttr(ATTR_NAME_FFTS_SUB_GRAPH)) { + GE_CHK_STATUS_RET(UpdateAnchorStatusForFfts(node), "[Call][UpdateAnchorStatusForFfts] node:%s(%s) failed", + name.c_str(), type.c_str()); + } // Profiling task size_t task_list_size_before = task_def_list.size(); GE_CHK_STATUS_RET(InsertProfilingTaskBefore(op_desc, profiling_point, all_reduce_nodes, node_index, task_def_list)); @@ -571,7 +576,24 @@ Status TaskGenerator::GenerateTaskForFusionNode(FusionTaskInfo &fusion_task_info return ret; } +Status TaskGenerator::UpdateAnchorStatusForFfts(const NodePtr &node) { + GELOGD("Start UpdateAnchorStatusForFfts for %s.", node->GetName().c_str()); + if (!node->GetOpDesc()->GetSubgraphInstanceNames().empty()) { + for (size_t i = 0; i < node->GetOpDesc()->GetSubgraphInstanceNames().size(); ++i) { + auto sub_graph = NodeUtils::GetSubgraph(*node, i); + GE_CHECK_NOTNULL(sub_graph); + GELOGD("Start update anchor status for %s.", sub_graph->GetName().c_str()); + for (auto &ffts_node : sub_graph->GetDirectNode()) { + GE_CHK_STATUS_RET(UpdateAnchorStatus(ffts_node), "[Call][UpdateAnchorStatus] node:%s(%s) failed", + ffts_node->GetName().c_str(), ffts_node->GetType().c_str()); + } + } + } + return SUCCESS; +} + Status TaskGenerator::UpdateAnchorStatus(const NodePtr &node) { + GELOGD("Start UpdateAnchorStatus for %s.", node->GetName().c_str()); if (NodeUtils::SetAllAnchorStatus(node) != GRAPH_SUCCESS) { REPORT_CALL_ERROR("E19999", "SetAllAnchorStatus fail for op:%s(%s)", node->GetName().c_str(), node->GetType().c_str()); diff --git a/ge/graph/build/task_generator.h b/ge/graph/build/task_generator.h index 9f12d568..40cef3ba 100755 --- a/ge/graph/build/task_generator.h +++ b/ge/graph/build/task_generator.h @@ -80,6 +80,7 @@ class TaskGenerator { Status FindProfilingNodeIndex(const ComputeGraphPtr &graph, ProfilingPoint &profiling_point, std::vector &all_reduce_nodes); private: + Status UpdateAnchorStatusForFfts(const NodePtr &node); Status UpdateAnchorStatus(const NodePtr &node); Status UpdateOpIsVarAttr(const OpDescPtr &op_desc, uint64_t session_id); diff --git a/ge/graph/load/model_manager/davinci_model.cc b/ge/graph/load/model_manager/davinci_model.cc index 5b67c205..97238a4a 100755 --- a/ge/graph/load/model_manager/davinci_model.cc +++ b/ge/graph/load/model_manager/davinci_model.cc @@ -99,6 +99,9 @@ const uint32_t kEndOfSequenceNew = 507005; const int32_t kModelAbortNormal = 0x0704000e; const int32_t kModelAbortNormalNew = 507024; const uint32_t kInteval = 2; +const uint32_t kFftsTbeHandleElementSize = 2; +const uint32_t kNonTailBlock = 0; +const uint32_t kTailBlock = 1; const char *const kModelName = "model_name"; const char *const kModeleId = "model_id"; const char *const kLoadStartTime = "load_start_time"; @@ -116,14 +119,15 @@ const char *const kWorkSpaceSize = "workspace_size"; const char *const kTotalSize = "total_size"; const char *const kTaskCount = "task_count"; const char *const kTaskId = "task_id"; -const char* const kRequestId = "request_id"; -const char* const kThreadId = "thread_id"; -const char* const kInputBeginTime = "input_begin_time"; -const char* const kInputEndTime = "input_end_time"; -const char* const kInferBeginTime = "infer_begin_time"; -const char* const kInferEndTime = "infer_end_time"; -const char* const kOutputBeginTime = "output_start_time"; -const char* const kOutputEndTime = "output_end_time"; +const char *const kRequestId = "request_id"; +const char *const kThreadId = "thread_id"; +const char *const kInputBeginTime = "input_begin_time"; +const char *const kInputEndTime = "input_end_time"; +const char *const kInferBeginTime = "infer_begin_time"; +const char *const kInferEndTime = "infer_end_time"; +const char *const kOutputBeginTime = "output_start_time"; +const char *const kOutputEndTime = "output_end_time"; +const char *const kStubFuncName = "_register_stub_func"; const uint32_t kStringHeadElems = 2; const uint32_t kPlacementHostData = 0; const size_t kAlignment = 64; @@ -902,10 +906,8 @@ Status DavinciModel::InitNodes(const ComputeGraphPtr &compute_graph) { SetLabelForDynamic(node); auto it = op_desc_handle.find(op_desc->GetType()); if (it != op_desc_handle.end()) { - if ((this->*it->second)(op_desc) != SUCCESS) { - GELOGE(PARAM_INVALID, "[Init][Node] failed, Name:%s", op_desc->GetName().c_str()); - return PARAM_INVALID; - } + GE_CHK_BOOL_TRUE_EXEC_WITH_LOG((this->*it->second)(op_desc) != SUCCESS, return PARAM_INVALID, + "[Init][Node] failed, Name:%s", op_desc->GetName().c_str()); continue; } @@ -935,7 +937,8 @@ Status DavinciModel::InitNodes(const ComputeGraphPtr &compute_graph) { GE_TIMESTAMP_RESTART(InitTbeHandle); if (IsTbeTask(op_desc)) { - Status status = InitTbeHandle(op_desc); + Status status = + op_desc->HasAttr(ATTR_NAME_THREAD_SCOPE_ID) ? InitTbeHandleWithFfts(op_desc) : InitTbeHandle(op_desc); if (status != SUCCESS) { GELOGE(status, "[Init][TbeHandle] failed. op:%s", op_desc->GetName().c_str()); return status; @@ -3700,6 +3703,7 @@ Status DavinciModel::InitConstant(const OpDescPtr &op_desc) { /// @return Status /// Status DavinciModel::InitTbeHandle(const OpDescPtr &op_desc) { + string bin_file = op_desc->GetName(); auto kernel = ge_model_->GetTBEKernelStore().FindKernel(op_desc->GetName()); auto tbe_kernel = (kernel != nullptr) ? kernel : op_desc->TryGetExtAttr(OP_EXTATTR_NAME_TBE_KERNEL, TBEKernelPtr()); if (tbe_kernel == nullptr) { @@ -3708,12 +3712,61 @@ Status DavinciModel::InitTbeHandle(const OpDescPtr &op_desc) { GELOGE(INTERNAL_ERROR, "[Check][Param] TBE: %s can't find tvm bin file!", op_desc->GetName().c_str()); return INTERNAL_ERROR; } + GE_CHK_STATUS_RET(FunctionRegister(op_desc, bin_file, tbe_kernel, false), "Function register of bin file: %s failed", + bin_file.c_str()); + return SUCCESS; +} - std::string session_graph_model_id; - GetUniqueId(op_desc, session_graph_model_id); - const char *bin_file_key = GetRegisterStub(op_desc->GetName(), session_graph_model_id); // from set, always valid. - TBEHandleStore &kernel_store = TBEHandleStore::GetInstance(); +Status DavinciModel::InitTbeHandleWithFfts(const OpDescPtr &op_desc) { + std::vector tbe_kernel; + tbe_kernel = op_desc->TryGetExtAttr(OP_EXTATTR_NAME_THREAD_TBE_KERNEL, tbe_kernel); + GELOGD("Kernel bin ptr vec size is %zu.", tbe_kernel.size()); + if (tbe_kernel.size() != kFftsTbeHandleElementSize) { + REPORT_INNER_ERROR("E19999", "Get tbe_kernel for op:%s(%s) fail, model_id:%u", + op_desc->GetName().c_str(), op_desc->GetType().c_str(), model_id_); + GELOGE(INTERNAL_ERROR, "[Check][Param] TBE: %s can't find tvm bin file, size is %zu when ffts", + op_desc->GetName().c_str(), tbe_kernel.size()); + return INTERNAL_ERROR; + } + if (tbe_kernel[0] == nullptr || tbe_kernel[1] == nullptr) { + REPORT_INNER_ERROR("E19999", "Tbe kernel for op:%s is nullptr.", op_desc->GetName().c_str()); + GELOGE(INTERNAL_ERROR, "[Check][Param] TBE: tvm bin file of %s is nullptr when ffts.", op_desc->GetName().c_str()); + return INTERNAL_ERROR; + } + vector bin_file_keys; + (void)AttrUtils::GetListStr(op_desc, kStubFuncName, bin_file_keys); + if (bin_file_keys.size() != kFftsTbeHandleElementSize) { + REPORT_INNER_ERROR("E19999", "Get bin_file for op:%s(%s) fail.", op_desc->GetName().c_str(), + op_desc->GetType().c_str()); + GELOGE(INTERNAL_ERROR, "[Check][Param] TBE: %s can't find bin file keys, size is %zu when ffts", + op_desc->GetName().c_str(), bin_file_keys.size()); + return INTERNAL_ERROR; + } + GE_CHK_STATUS_RET(FunctionRegister(op_desc, bin_file_keys[kNonTailBlock], tbe_kernel[kNonTailBlock], true, + kNonTailBlock), + "Function register of first bin file %s failed.", bin_file_keys[kNonTailBlock].c_str()); + GE_CHK_STATUS_RET(FunctionRegister(op_desc, bin_file_keys[kTailBlock], tbe_kernel[kTailBlock], true, kTailBlock), + "Function register of second bin file %s failed.", bin_file_keys[kTailBlock].c_str()); + return SUCCESS; +} +Status DavinciModel::FunctionRegister(const OpDescPtr &op_desc, string &bin_file, OpKernelBinPtr &tbe_kernel, + bool is_ffts, size_t thread_index) { + if (thread_index > 1) { + GELOGE(INTERNAL_ERROR, "[Check][Param] failed. Thread index: %zu should less than 1.", thread_index); + return INTERNAL_ERROR; + } + const char *bin_file_key; + if (is_ffts) { + bin_file_key = GetRegisterStub(bin_file, ""); + GELOGI("Node:%s inherit func name:%s directly.", op_desc->GetName().c_str(), bin_file_key); + } else { + std::string session_graph_model_id; + GetUniqueId(op_desc, session_graph_model_id); + bin_file_key = GetRegisterStub(bin_file, session_graph_model_id); // from set, always valid. + } + + TBEHandleStore &kernel_store = TBEHandleStore::GetInstance(); std::lock_guard lock(tvm_bin_mutex_); if (rtQueryFunctionRegistered(bin_file_key) != RT_ERROR_NONE) { void *bin_handle = nullptr; @@ -3721,59 +3774,115 @@ Status DavinciModel::InitTbeHandle(const OpDescPtr &op_desc) { GELOGD("TBE: can't find the kernel_name[%s] in HandleMap", bin_file_key); rtDevBinary_t binary; - std::string json_string; - GE_IF_BOOL_EXEC(AttrUtils::GetStr(op_desc, TVM_ATTR_NAME_MAGIC, json_string), - GELOGD("Get original type of session_graph_id.")); - if (json_string == "RT_DEV_BINARY_MAGIC_ELF_AICPU") { - binary.magic = RT_DEV_BINARY_MAGIC_ELF_AICPU; - } else if (json_string == "RT_DEV_BINARY_MAGIC_ELF") { - binary.magic = RT_DEV_BINARY_MAGIC_ELF; - } else if (json_string == "RT_DEV_BINARY_MAGIC_ELF_AIVEC") { - binary.magic = RT_DEV_BINARY_MAGIC_ELF_AIVEC; - } else if (json_string == "RT_DEV_BINARY_MAGIC_ELF_AICUBE") { - binary.magic = RT_DEV_BINARY_MAGIC_ELF_AICUBE; - } else { - REPORT_INNER_ERROR("E19999", "Attr:%s value:%s in op:%s(%s), model_id:%u, check invalid", - TVM_ATTR_NAME_MAGIC.c_str(), json_string.c_str(), - op_desc->GetName().c_str(), op_desc->GetType().c_str(), model_id_); - GELOGE(PARAM_INVALID, "[Check][Param] Attr:%s value:%s in op:%s(%s), model_id:%u, check invalid", - TVM_ATTR_NAME_MAGIC.c_str(), json_string.c_str(), - op_desc->GetName().c_str(), op_desc->GetType().c_str(), model_id_); - return PARAM_INVALID; - } - + GE_CHK_STATUS_RET(InitBinaryMagic(op_desc, is_ffts, thread_index, binary), "Init binary magic of %s failed.", + op_desc->GetName().c_str()); binary.version = 0; binary.data = tbe_kernel->GetBinData(); binary.length = tbe_kernel->GetBinDataSize(); - GELOGD("TBE: binary.length: %lu", binary.length); GE_CHK_RT_RET(rtDevBinaryRegister(&binary, &bin_handle)); - std::string meta_data; - GE_IF_BOOL_EXEC(AttrUtils::GetStr(op_desc, TVM_ATTR_NAME_METADATA, meta_data), - GELOGI("Get original type of json_string")); - GELOGD("TBE: meta data: %s", meta_data.empty() ? "null" : meta_data.c_str()); - GE_IF_BOOL_EXEC(!meta_data.empty(), GE_CHK_RT_RET(rtMetadataRegister(bin_handle, meta_data.c_str()))); - + GE_CHK_STATUS_RET(InitMetaData(op_desc, is_ffts, thread_index, bin_handle), "Init tvm meta data of %s failed.", + op_desc->GetName().c_str()); kernel_store.StoreTBEHandle(bin_file_key, bin_handle, tbe_kernel); } else { GELOGI("TBE: find the kernel_name[%s] in HandleMap", bin_file_key); kernel_store.ReferTBEHandle(bin_file_key); } - std::string kernel_name; - GE_IF_BOOL_EXEC(AttrUtils::GetStr(op_desc, op_desc->GetName() + "_kernelname", kernel_name), - GELOGD("Get original type of kernel_name")); + GE_CHK_STATUS_RET(InitKernelName(op_desc, is_ffts, thread_index, kernel_name), "Init kernel name of %s failed.", + op_desc->GetName().c_str()); GE_CHK_RT_RET(rtFunctionRegister(bin_handle, bin_file_key, bin_file_key, kernel_name.c_str(), 0)); used_tbe_handle_map_[bin_file_key] = 1; // Init used num to 1. return SUCCESS; } - // Kernel registed, Increase used num in store. StoreTbeHandle(bin_file_key); return SUCCESS; } +Status DavinciModel::InitBinaryMagic(const OpDescPtr &op_desc, bool is_ffts, size_t thread_index, + rtDevBinary_t &binary) { + string json_string; + const string &tvm_magic = is_ffts ? TVM_ATTR_NAME_THREAD_MAGIC : TVM_ATTR_NAME_MAGIC; + const static std::map binary_magics = { + {"RT_DEV_BINARY_MAGIC_ELF_AICPU", RT_DEV_BINARY_MAGIC_ELF_AICPU}, + {"RT_DEV_BINARY_MAGIC_ELF", RT_DEV_BINARY_MAGIC_ELF}, + {"RT_DEV_BINARY_MAGIC_ELF_AIVEC", RT_DEV_BINARY_MAGIC_ELF_AIVEC}, + {"RT_DEV_BINARY_MAGIC_ELF_AICUBE", RT_DEV_BINARY_MAGIC_ELF_AICUBE} + }; + if (is_ffts) { + vector json_list; + (void)AttrUtils::GetListStr(op_desc, tvm_magic, json_list); + if (json_list.size() != kFftsTbeHandleElementSize) { + GELOGE(INTERNAL_ERROR, "[Check][Param] failed. Attr is %s, thread index is %zu, json list size is %zu.", + tvm_magic.c_str(), thread_index, json_list.size()); + return INTERNAL_ERROR; + } + json_string = json_list[thread_index]; + } else { + (void)AttrUtils::GetStr(op_desc, tvm_magic, json_string); + } + auto iter = binary_magics.find(json_string); + if (iter == binary_magics.end()) { + REPORT_INNER_ERROR("E19999", "Attr:%s value:%s in op:%s(%s), model_id:%u, check invalid", + tvm_magic.c_str(), json_string.c_str(), op_desc->GetName().c_str(), + op_desc->GetType().c_str(), model_id_); + GELOGE(PARAM_INVALID, "[Check][Param] Attr:%s value:%s in op:%s(%s), model_id:%u, check invalid", + TVM_ATTR_NAME_MAGIC.c_str(), json_string.c_str(), + op_desc->GetName().c_str(), op_desc->GetType().c_str(), model_id_); + return PARAM_INVALID; + } + binary.magic = iter->second; + return SUCCESS; +} + +Status DavinciModel::InitMetaData(const OpDescPtr &op_desc, bool is_ffts, size_t thread_index, void *bin_handle) { + string meta_data; + const string &tvm_metadata = is_ffts ? TVM_ATTR_NAME_THREAD_METADATA : TVM_ATTR_NAME_METADATA; + if (is_ffts) { + vector meta_data_list; + (void)AttrUtils::GetListStr(op_desc, tvm_metadata, meta_data_list); + if (meta_data_list.size() != kFftsTbeHandleElementSize) { + GELOGE(INTERNAL_ERROR, "[Check][Param] failed, attr is %s, thread index is %zu, meta data list size is %zu.", + tvm_metadata.c_str(), thread_index, meta_data_list.size()); + return INTERNAL_ERROR; + } + meta_data = meta_data_list[thread_index]; + } else { + (void)AttrUtils::GetStr(op_desc, tvm_metadata, meta_data); + } + GELOGD("TBE: meta data: %s", meta_data.empty() ? "null" : meta_data.c_str()); + if (!meta_data.empty()) { + GE_CHK_RT_RET(rtMetadataRegister(bin_handle, meta_data.c_str())); + } + return SUCCESS; +} + +Status DavinciModel::InitKernelName(const OpDescPtr &op_desc, bool is_ffts, size_t thread_index, string &kernel_name) { + if (is_ffts) { + // delete prefix, eg: *sgt_graph_nodes*/loss_scale/gradient/fp32_vals/Mean_grad/Tile + vector kernel_name_list; + auto pos = op_desc->GetName().find("/"); + if (pos == std::string::npos) { + GELOGE(INTERNAL_ERROR, "[Check][Param] failed, subgraph node name: %s.", op_desc->GetName().c_str()); + return INTERNAL_ERROR; + } + string attr_kernel_name = op_desc->GetName().substr(pos + 1) + "_thread_kernelname"; + (void)AttrUtils::GetListStr(op_desc, attr_kernel_name, kernel_name_list); + if (kernel_name_list.size() != kFftsTbeHandleElementSize) { + GELOGE(INTERNAL_ERROR, "[Check][Param] failed, attr is %s, thread index is %zu, kernel name list size is %zu.", + attr_kernel_name.c_str(), thread_index, kernel_name_list.size()); + return INTERNAL_ERROR; + } + kernel_name = kernel_name_list[thread_index]; + } else { + string attr_kernel_name = op_desc->GetName() + "_kernelname"; + (void)AttrUtils::GetStr(op_desc, attr_kernel_name, kernel_name); + } + return SUCCESS; +} + void DavinciModel::StoreTbeHandle(const std::string &handle_key) { // Online mode FE may call rtFunctionRegister. TBEHandleStore &kernel_store = TBEHandleStore::GetInstance(); diff --git a/ge/graph/load/model_manager/davinci_model.h b/ge/graph/load/model_manager/davinci_model.h index 819a2ea2..4c06ad98 100755 --- a/ge/graph/load/model_manager/davinci_model.h +++ b/ge/graph/load/model_manager/davinci_model.h @@ -771,6 +771,12 @@ class DavinciModel { /// @return Status /// Status InitTbeHandle(const OpDescPtr &op_desc); + Status InitTbeHandleWithFfts(const OpDescPtr &op_desc); + Status FunctionRegister(const OpDescPtr &op_desc, string &bin_file, OpKernelBinPtr &tbe_kernel, bool is_ffts, + size_t thread_index = 0); + Status InitBinaryMagic(const OpDescPtr &op_desc, bool is_ffts, size_t thread_index, rtDevBinary_t &binary); + Status InitMetaData(const OpDescPtr &op_desc, bool is_ffts, size_t thread_index, void *bin_handle); + Status InitKernelName(const OpDescPtr &op_desc, bool is_ffts, size_t thread_index, string &kernel_name); void StoreTbeHandle(const string &handle_key); void CleanTbeHandle(); diff --git a/ge/graph/load/model_manager/task_info/ffts_task_info.cc b/ge/graph/load/model_manager/task_info/ffts_task_info.cc new file mode 100644 index 00000000..e311ccac --- /dev/null +++ b/ge/graph/load/model_manager/task_info/ffts_task_info.cc @@ -0,0 +1,393 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "graph/load/model_manager/task_info/ffts_task_info.h" + +#include + +#include "graph/load/model_manager/davinci_model.h" + +namespace { +constexpr uint32_t kAddrLen = sizeof(void *); +} +namespace ge { +FftsTaskInfo::~FftsTaskInfo() { + GE_FREE_RT_LOG(args_); +} + +Status FftsTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) { + GELOGI("FftsTaskInfo Init Start."); + GE_CHECK_NOTNULL(davinci_model); + davinci_model_ = davinci_model; + GE_CHK_STATUS_RET_NOLOG(SetStream(task_def.stream_id(), davinci_model_->GetStreamList())); + + const domi::FftsTaskDef &ffts_task_def = task_def.ffts_task(); + OpDescPtr op_desc = davinci_model_->GetOpByIndex(ffts_task_def.op_index()); + GE_CHECK_NOTNULL(op_desc); + + if ((ffts_task_def.sub_task_size() > static_cast(RT_FFTS_MAX_SUB_TASK_NUM)) || + (ffts_task_def.ticket_cache_size() > static_cast(RT_FFTS_MAX_TICKET_CACHE_NUM))) { + GELOGE(INTERNAL_ERROR, "[Check][Param] failed. Node: %s, sub task desc size: %d, ticket cache size: %d", + op_desc->GetName().c_str(), ffts_task_def.sub_task_size(), ffts_task_def.ticket_cache_size()); + return INTERNAL_ERROR; + } + args_size_ = kAddrLen * ffts_task_def.addr_size(); + GE_CHK_RT_RET(rtMalloc(&args_, args_size_, RT_MEMORY_HBM)); + InitFftsDescInfo(ffts_task_def.ffts_desc(), sub_task_info_.fftsDesc); + + sub_task_info_.fftsType = static_cast(ffts_task_def.ffts_type()); + sub_task_info_.subTaskNum = ffts_task_def.sub_task_size(); + for (int idx = 0; idx < ffts_task_def.sub_task_size(); ++idx) { + GE_CHK_STATUS_RET_NOLOG(InitSubTaskInfo(ffts_task_def.sub_task(idx), sub_task_info_.subTask[idx])); + } + + sub_task_info_.tickCacheNum = ffts_task_def.ticket_cache_size(); + for (int idx = 0; idx < ffts_task_def.ticket_cache_size(); ++idx) { + GE_CHK_STATUS_RET_NOLOG(InitTicketCache(ffts_task_def.ticket_cache(idx), sub_task_info_.ticketCache[idx])); + } + + size_t data_size = kAddrLen * io_addrs_.size(); + GE_CHK_RT_RET(rtMemcpy(args_, args_size_, io_addrs_.data(), data_size, RT_MEMCPY_HOST_TO_DEVICE)); + GELOGI("FftsTaskInfo::Init Success. Node: %s, input/output size: %zu", op_desc->GetName().c_str(), io_addrs_.size()); + return SUCCESS; +} + +void FftsTaskInfo::InitFftsDescInfo(const domi::FftsDescInfoDef &ffts_desc_def, rtFftsDescInfo_t &ffts_desc) { + ffts_desc.tm = static_cast(ffts_desc_def.tm()); + ffts_desc.di = static_cast(ffts_desc_def.di()); + ffts_desc.dw = static_cast(ffts_desc_def.dw()); + ffts_desc.df = static_cast(ffts_desc_def.df()); + ffts_desc.dataSplitUnit = static_cast(ffts_desc_def.data_split_unit()); + ffts_desc.prefetchOstNum = static_cast(ffts_desc_def.prefetch_ost_num()); + ffts_desc.cacheMaintainOstNum = static_cast(ffts_desc_def.cache_maintain_ost_num()); + ffts_desc.aicPrefetchUpper = static_cast(ffts_desc_def.aic_prefetch_upper()); + ffts_desc.aicPrefetchLower = static_cast(ffts_desc_def.aic_prefetch_lower()); + ffts_desc.aivPrefetchUpper = static_cast(ffts_desc_def.aiv_prefetch_upper()); + ffts_desc.aivPrefetchLower = static_cast(ffts_desc_def.aiv_prefetch_lower()); +} + +Status FftsTaskInfo::InitSubTaskInfo(const domi::FftsSubTaskDef &sub_task_def, rtFftsSubTaskInfo_t &sub_task_desc) { + if ((sub_task_def.dst_tick_cache_id_size() > static_cast(RT_FFTS_MAX_TICKET_CACHE_PER_SUBTASK)) || + (sub_task_def.src_tick_cache_id_size() > static_cast(RT_FFTS_MAX_TICKET_CACHE_PER_SUBTASK))) { + GELOGE(FAILED, "[Check][Param] Invalid FftsSubTaskInfo, dst tick cache id size: %d, src tick cache id size: %d", + sub_task_def.dst_tick_cache_id_size(), sub_task_def.src_tick_cache_id_size()); + return FAILED; + } + + if (sub_task_def.has_auto_thread_aic_aiv() == sub_task_def.has_manual_thread_aic_aiv()) { + GELOGE(FAILED, "[Check][Param] Invalid FftsSubTaskInfo, auto thread aic/aiv: %d, manual thread aic/aiv: %d", + sub_task_def.has_auto_thread_aic_aiv(), sub_task_def.has_manual_thread_aic_aiv()); + return FAILED; + } + + thread_dim_ = sub_task_def.thread_dim(); + GE_CHK_BOOL_RET_STATUS(thread_dim_ != 0, FAILED, "[Get][thread_dim] failed, Invalid thread dim: %u!", thread_dim_); + + sub_task_desc.subTaskType = static_cast(sub_task_def.sub_task_type()); + sub_task_desc.threadDim = sub_task_def.thread_dim(); + + sub_task_desc.dstTickCacheVldBitmap = sub_task_def.dst_tick_cache_vld_bitmap(); + sub_task_desc.srcTickCacheVldBitmap = sub_task_def.src_tick_cache_vld_bitmap(); + sub_task_desc.srcDataOutOfSubGraphBitmap = sub_task_def.src_data_out_of_subgraph_bitmap(); + + for (int idx = 0; idx < sub_task_def.dst_tick_cache_id_size(); ++idx) { + sub_task_desc.dstTickCacheID[idx] = sub_task_def.dst_tick_cache_id(idx); + } + + for (int idx = 0; idx < sub_task_def.src_tick_cache_id_size(); ++idx) { + sub_task_desc.srcTickCacheID[idx] = sub_task_def.src_tick_cache_id(idx); + } + + if (sub_task_def.has_auto_thread_aic_aiv()) { + GE_CHK_STATUS_RET_NOLOG(InitAutoAicAiv(sub_task_def.auto_thread_aic_aiv(), sub_task_desc.custom.autoThreadAicAiv)); + } + + if (sub_task_def.has_manual_thread_aic_aiv()) { + GE_CHK_STATUS_RET_NOLOG( + InitManualAicAiv(sub_task_def.manual_thread_aic_aiv(), sub_task_desc.custom.manualThreadAicAiv)); + } + + if (sub_task_def.has_manual_thread_nop()) { + GE_CHK_STATUS_RET_NOLOG(InitManualNop(sub_task_def.manual_thread_nop(), sub_task_desc.custom.manualThreadNop)); + } + + return SUCCESS; +} + +Status FftsTaskInfo::InitTicketCache(const domi::TicketCacheDef &ticket_cache_def, rtTicketCache_t &ticket_cache) { + if (ticket_cache_def.has_auto_thread_cache() == ticket_cache_def.has_manual_thread_cache()) { + GELOGE(FAILED, "[Check][Param] Invalid TicketCacheDef, has auto thread cache: %d, has manual thread cache: %d", + ticket_cache_def.has_auto_thread_cache(), ticket_cache_def.has_manual_thread_cache()); + return FAILED; + } + + ticket_cache.cacheOption = static_cast(ticket_cache_def.cache_option()); + ticket_cache.ticketCacheWindow = ticket_cache_def.ticket_cache_window(); + + if (ticket_cache_def.has_auto_thread_cache()) { + InitAutoCacheInfo(ticket_cache_def.auto_thread_cache(), ticket_cache.custom.autoThreadCache); + } + if (ticket_cache_def.has_manual_thread_cache()) { + GE_CHK_STATUS_RET_NOLOG( + InitManualCacheInfo(ticket_cache_def.manual_thread_cache(), ticket_cache.custom.manualThreadCache)); + } + + return SUCCESS; +} + +// task_addr = {0,200,700,1000,2000, 3500} +// task_addr_offset = {20,40,2,100,200} +template +Status FftsTaskInfo::InitIoAddrs(const RuntimeParam &rts_param, const T &aic_aiv_def, uint32_t thread_dim, + uint32_t addr_count) { + for (uint32_t i = 0; i < addr_count; ++i) { + uintptr_t logic_addr = aic_aiv_def.task_addr(i) + thread_dim * aic_aiv_def.task_addr_offset(i); + uint8_t *io_addr = nullptr; + if (ModelUtils::GetRtAddress(rts_param, logic_addr, io_addr) != SUCCESS) { + GELOGE(INTERNAL_ERROR, "[Check][GetRtAddress]GetRtAddress failed."); + return INTERNAL_ERROR; + } + GELOGD("aic_aiv_def task base addr is %ld, offset is %ld, thread is %d, logic addrs is 0x%lx, io addr is %p", + aic_aiv_def.task_addr(i), aic_aiv_def.task_addr_offset(i), thread_dim, logic_addr, io_addr); + io_addrs_.emplace_back(io_addr); + } + return SUCCESS; +} + +Status FftsTaskInfo::InitAutoAicAiv(const domi::AutoThreadAicAivDef &aic_aiv_def, rtAutoThreadAicAivInfo_t &aic_aiv) { + if (aic_aiv_def.src_prefetch_size() > static_cast(RT_FFTS_MAX_TICKET_CACHE_PER_SUBTASK)) { + GELOGE(FAILED, "[Check][Param] Invalid AutoThreadAicAivInfo, prefetch size: %d", aic_aiv_def.src_prefetch_size()); + return FAILED; + } + + aic_aiv.taskParamAddr = reinterpret_cast(args_) + kAddrLen * io_addrs_.size(); + GELOGD("AutoThreadAicAivDef: task param addr is %lu.", aic_aiv.taskParamAddr); + const auto &rts_param = davinci_model_->GetRuntimeParam(); + for (uint32_t i = 0; i < thread_dim_ - 1; ++i) { + GE_CHK_STATUS_RET_NOLOG(InitIoAddrs(rts_param, aic_aiv_def, i, + static_cast(aic_aiv_def.task_addr_offset_size()))); + } + GE_CHK_STATUS_RET_NOLOG(InitIoAddrs(rts_param, aic_aiv_def, thread_dim_ - 1, aic_aiv_def.input_output_count())); + int last_thread_workspace_size = aic_aiv_def.task_addr_size() - aic_aiv_def.task_addr_offset_size(); + for (int k = 0; k < last_thread_workspace_size; ++k) { + uintptr_t logic_addr = aic_aiv_def.task_addr(aic_aiv_def.task_addr_offset_size() + k); + uint8_t *io_addr = nullptr; + GE_CHK_STATUS_RET_NOLOG(ModelUtils::GetRtAddress(rts_param, logic_addr, io_addr)); + GELOGD("logic addr is 0x%lx, io addr is %p.", logic_addr, io_addr); + io_addrs_.emplace_back(io_addr); + } + + aic_aiv.taskParamOffset = aic_aiv_def.task_param_offset(); + GELOGD("args_: %p, io_addrs size: %zu, task param offset: %u.", args_, io_addrs_.size(), aic_aiv.taskParamOffset); + aic_aiv.satMode = aic_aiv_def.sat_mode(); + aic_aiv.scheduleMode = aic_aiv_def.schedule_mode(); + aic_aiv.iCachePrefetchCnt = aic_aiv_def.cache_prefetch_cnt(); + + aic_aiv.prefetchEnableBitmap = aic_aiv_def.prefetch_enable_bitmap(); + aic_aiv.prefetchOnceBitmap = aic_aiv_def.prefetch_once_bitmap(); + + aic_aiv.tailBlkDim = aic_aiv_def.tail_blk_dim(); + aic_aiv.nonTailBlkDim = aic_aiv_def.non_tail_blk_dim(); + + aic_aiv.nonTailTaskFuncStub = davinci_model_->GetRegisterStub(aic_aiv_def.non_tail_task_func_stub(), ""); + aic_aiv.tailTaskFuncStub = davinci_model_->GetRegisterStub(aic_aiv_def.tail_task_func_stub(), ""); + + GELOGI("Set func name[%s][%s] succ.", aic_aiv.nonTailTaskFuncStub, aic_aiv.tailTaskFuncStub); + for (int idx = 0; idx < aic_aiv_def.src_prefetch_size(); ++idx) { + InitAutoPrefetch(aic_aiv_def.src_prefetch(idx), aic_aiv.srcPrefetch[idx]); + } + + return SUCCESS; +} + +void FftsTaskInfo::InitAutoCacheInfo(const domi::AutoThreadCacheDef &cache_def, rtAutoThreadCacheInfo_t &cache) { + cache.dataAddr = cache_def.data_addr(); + cache.dataAddrOffset = cache_def.data_addr_offset(); + cache.nonTailDataLen = cache_def.non_tail_data_len(); + cache.tailDataLen = cache_def.tail_data_len(); + cache.ticketCacheRefCnt = cache_def.ticket_cache_ref_cnt(); +} + +void FftsTaskInfo::InitAutoPrefetch(const domi::AutoThreadPrefetchDef &prefetch_def, rtAutoThreadPrefetch_t &prefetch) { + prefetch.dataAddr = prefetch_def.data_addr(); + prefetch.dataAddrOffset = prefetch_def.data_addr_offset(); + prefetch.nonTailDataLen = prefetch_def.non_tail_data_len(); + prefetch.tailDataLen = prefetch_def.tail_data_len(); +} + +Status FftsTaskInfo::InitManualAicAiv(const domi::ManualThreadAicAivDef &aic_aiv_def, + rtManualThreadAicAivInfo_t &aic_aiv) { + if ((aic_aiv_def.thread_prefetch_dmu_idx_size() > static_cast(RT_FFTS_MAX_MANUAL_THREAD_NUM)) || + (aic_aiv_def.thread_blk_dim_size() > static_cast(RT_FFTS_MAX_MANUAL_THREAD_NUM)) || + (aic_aiv_def.thread_task_func_stub_size() > static_cast(RT_FFTS_MAX_MANUAL_THREAD_NUM)) || + (aic_aiv_def.src_dep_tbl_size() > static_cast(RT_FFTS_MAX_TICKET_CACHE_PER_SUBTASK))) { + GELOGE(FAILED, "[Check][Param] Invalid ManualThreadAicAivInfo, thread prefetch dmu desc size: %d, " + "thread blk dim size: %d, thread task func stub size: %d, src dep tbl size: %d", + aic_aiv_def.thread_prefetch_dmu_idx_size(), aic_aiv_def.thread_blk_dim_size(), + aic_aiv_def.thread_task_func_stub_size(), aic_aiv_def.src_dep_tbl_size()); + return FAILED; + } + aic_aiv.taskParamAddr = reinterpret_cast(args_) + kAddrLen * io_addrs_.size(); + GELOGD("ManualThreadAicAivDef: task param addr is %lu.", aic_aiv.taskParamAddr); + const auto &rts_param = davinci_model_->GetRuntimeParam(); + for (uint32_t i = 0; i < thread_dim_ - 1; ++i) { + GE_CHK_STATUS_RET_NOLOG(InitIoAddrs(rts_param, aic_aiv_def, i, + static_cast(aic_aiv_def.task_addr_offset_size()))); + } + GE_CHK_STATUS_RET_NOLOG(InitIoAddrs(rts_param, aic_aiv_def, thread_dim_ - 1, aic_aiv_def.input_output_count())); + int last_thread_workspace_size = aic_aiv_def.task_addr_size() - aic_aiv_def.task_addr_offset_size(); + for (int k = 0; k < last_thread_workspace_size; ++k) { + uintptr_t logic_addr = aic_aiv_def.task_addr(aic_aiv_def.task_addr_offset_size() + k); + uint8_t *io_addr = nullptr; + GE_CHK_STATUS_RET_NOLOG(ModelUtils::GetRtAddress(rts_param, logic_addr, io_addr)); + io_addrs_.emplace_back(io_addr); + } + aic_aiv.taskParamOffset = aic_aiv_def.task_param_offset(); + + aic_aiv.satMode = aic_aiv_def.sat_mode(); + aic_aiv.scheduleMode = aic_aiv_def.schedule_mode(); + aic_aiv.iCachePrefetchCnt = aic_aiv_def.cache_prefetch_cnt(); + + aic_aiv.prefetchEnableBitmap = aic_aiv_def.prefetch_enable_bitmap(); // 8 bit bitmap 1 0 1 0 + aic_aiv.prefetchOnceBitmap = aic_aiv_def.prefetch_once_bitmap(); // 8 bit bitmap 1 0 1 0 + aic_aiv.prefetchOnceDmuNum = aic_aiv_def.prefetch_once_dmu_num(); + + for (int idx = 0; idx < aic_aiv_def.thread_prefetch_dmu_idx_size(); ++idx) { + aic_aiv.threadPrefetchDmuIdx[idx] = aic_aiv_def.thread_prefetch_dmu_idx(idx); + } + for (int idx = 0; idx < aic_aiv_def.thread_blk_dim_size(); ++idx) { + aic_aiv.threadBlkDim[idx] = aic_aiv_def.thread_blk_dim(idx); + } + for (int idx = 0; idx < aic_aiv_def.thread_task_func_stub_size(); ++idx) { + aic_aiv.threadTaskFuncStub[idx] = aic_aiv_def.thread_task_func_stub(idx).c_str(); + } + + InitManualDmuInfo(aic_aiv_def, aic_aiv.prefetchList); + for (int idx = 0; idx < aic_aiv_def.src_dep_tbl_size(); ++idx) { + GE_CHK_STATUS_RET_NOLOG(InitManualDependency(aic_aiv_def.src_dep_tbl(idx), aic_aiv.srcDepTbl[idx])); + } + + return SUCCESS; +} + +Status FftsTaskInfo::InitManualCacheInfo(const domi::ManualThreadCacheDef &cache_def, + rtManualThreadCacheInfo_t &cache_info) { + if ((cache_def.slice_dmu_idx_size() > static_cast(RT_FFTS_MAX_MANUAL_THREAD_NUM)) || + (cache_def.ticket_cache_ref_cnt_tbl_size() > static_cast(RT_FFTS_MAX_MANUAL_THREAD_NUM))) { + GELOGE(FAILED, "[Check][Param] Invalid ManualThreadCacheInfo slice dum desc index %d, ticket cache ref cnt %d", + cache_def.slice_dmu_idx_size(), cache_def.ticket_cache_ref_cnt_tbl_size()); + return FAILED; + } + + InitManualDmuInfo(cache_def, cache_info.dmuList); + for (int idx = 0; idx < cache_def.slice_dmu_idx_size(); ++idx) { + cache_info.sliceDmuIdx[idx] = cache_def.slice_dmu_idx(idx); + } + + for (int idx = 0; idx < cache_def.ticket_cache_ref_cnt_tbl_size(); ++idx) { + cache_info.ticketCacheRefCntTbl[idx] = cache_def.ticket_cache_ref_cnt_tbl(idx); + } + + return SUCCESS; +} + +Status FftsTaskInfo::InitManualDependency(const domi::ManualThreadDependencyDef &dependency_def, + rtManualThreadDependency_t &dependency) { + if (dependency_def.dependency_size() > static_cast(RT_FFTS_MANUAL_SRC_DEPEND_TBL_LEN)) { + GELOGE(FAILED, "[Check][Param] Invalid ManualThreadDependency size: %d", dependency_def.dependency_size()); + return FAILED; + } + + for (int idx = 0; idx < dependency_def.dependency_size(); ++idx) { + dependency.dependency[idx] = dependency_def.dependency(idx); + } + + return SUCCESS; +} + +Status FftsTaskInfo::InitManualNop(const domi::ManualThreadNopDef &nop_def, rtManualThreadNopInfo_t &nop_info) { + if (nop_def.src_dep_tbl_size() > static_cast(RT_FFTS_MAX_TICKET_CACHE_PER_SUBTASK)) { + GELOGE(FAILED, "[Check][Param] Invalid ManualThreadNopInfo, src dep tbl size: %d", nop_def.src_dep_tbl_size()); + return FAILED; + } + + for (int idx = 0; idx < nop_def.src_dep_tbl_size(); ++idx) { + GE_CHK_STATUS_RET_NOLOG(InitManualDependency(nop_def.src_dep_tbl(idx), nop_info.srcDepTbl[idx])); + } + + return SUCCESS; +} + +void FftsTaskInfo::InitManualDmuInfo(const domi::ManualThreadAicAivDef &aic_aiv_def, rtManualThreadDmuInfo_t *&dmu) { + if (aic_aiv_def.prefetch_list().empty()) { + return; + } + + std::vector buffer(sizeof(rtManualThreadDmuInfo_t) * aic_aiv_def.prefetch_list_size()); + dmu = reinterpret_cast(buffer.data()); + for (int idx = 0; idx < aic_aiv_def.prefetch_list_size(); ++idx) { + InitManualDmuInfo(aic_aiv_def.prefetch_list(idx), dmu[idx]); + } +} + +void FftsTaskInfo::InitManualDmuInfo(const domi::ManualThreadCacheDef &cache_def, rtManualThreadDmuInfo_t *&dmu) { + if (cache_def.dmu_list().empty()) { + return; + } + + std::vector buffer(sizeof(rtManualThreadDmuInfo_t) * cache_def.dmu_list_size()); + dmu = reinterpret_cast(buffer.data()); + for (int idx = 0; idx < cache_def.dmu_list_size(); ++idx) { + InitManualDmuInfo(cache_def.dmu_list(idx), dmu[idx]); + } +} + +void FftsTaskInfo::InitManualDmuInfo(const domi::ManualThreadDmuDef &dmu_def, rtManualThreadDmuInfo_t &dmu) { + dmu.dataAddr = dmu_def.data_addr(); + dmu.numOuter = dmu_def.num_outer(); + dmu.numInner = dmu_def.num_inner(); + dmu.strideOuter = dmu_def.stride_outer(); + dmu.lenInner = dmu_def.len_inner(); + dmu.strideInner = dmu_def.stride_inner(); +} + +Status FftsTaskInfo::CalculateArgs(const domi::TaskDef &task_def, DavinciModel *davinci_model) { + return SUCCESS; +} + +Status FftsTaskInfo::UpdateArgs() { + GE_CHECK_NOTNULL(davinci_model_); + std::vector io_addrs = io_addrs_; + davinci_model_->UpdateKnownZeroCopyAddr(io_addrs); + auto addr_size = kAddrLen * io_addrs.size(); + GE_CHK_RT_RET(rtMemcpy(args_, args_size_, io_addrs.data(), addr_size, RT_MEMCPY_HOST_TO_DEVICE)); + return SUCCESS; +} + +Status FftsTaskInfo::Distribute() { + GELOGI("FftsTaskInfo Distribute Start."); + rtError_t rt_ret = rtFftsTaskLaunch(&sub_task_info_, stream_); + if (rt_ret != RT_ERROR_NONE) { + GELOGE(RT_FAILED, "[Check][RT_ret] Call rtFftsTaskLaunch failed, ret: 0x%X", rt_ret); + return RT_ERROR_TO_GE_STATUS(rt_ret); + } + + GELOGI("FftsTaskInfo Distribute Success."); + return SUCCESS; +} + +REGISTER_TASK_INFO(RT_MODEL_TASK_FFTS_TASK, FftsTaskInfo); +} // namespace ge diff --git a/ge/graph/load/model_manager/task_info/ffts_task_info.h b/ge/graph/load/model_manager/task_info/ffts_task_info.h new file mode 100644 index 00000000..ffc286f9 --- /dev/null +++ b/ge/graph/load/model_manager/task_info/ffts_task_info.h @@ -0,0 +1,66 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_FFTS_TASK_INFO_H_ +#define GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_FFTS_TASK_INFO_H_ + +#include "graph/load/model_manager/task_info/task_info.h" +#include "graph/op_desc.h" + +namespace ge { +class FftsTaskInfo : public TaskInfo { + public: + FftsTaskInfo() = default; + ~FftsTaskInfo() override; + + Status Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) override; + + Status Distribute() override; + + Status UpdateArgs() override; + + Status CalculateArgs(const domi::TaskDef &task_def, DavinciModel *davinci_model) override; + + private: + void InitFftsDescInfo(const domi::FftsDescInfoDef &ffts_desc_def, rtFftsDescInfo_t &ffts_desc); + Status InitSubTaskInfo(const domi::FftsSubTaskDef &task_def, rtFftsSubTaskInfo_t &task); + Status InitTicketCache(const domi::TicketCacheDef &cache_def, rtTicketCache_t &cache); + + Status InitAutoAicAiv(const domi::AutoThreadAicAivDef &aic_aiv_def, rtAutoThreadAicAivInfo_t &aic_aiv); + void InitAutoCacheInfo(const domi::AutoThreadCacheDef &cache_def, rtAutoThreadCacheInfo_t &cache); + void InitAutoPrefetch(const domi::AutoThreadPrefetchDef &prefetch_def, rtAutoThreadPrefetch_t &prefetch); + + Status InitManualAicAiv(const domi::ManualThreadAicAivDef &aic_aiv_def, rtManualThreadAicAivInfo_t &aic_aiv); + Status InitManualCacheInfo(const domi::ManualThreadCacheDef &cache_def, rtManualThreadCacheInfo_t &cache); + Status InitManualDependency(const domi::ManualThreadDependencyDef &depend_def, rtManualThreadDependency_t &depend); + Status InitManualNop(const domi::ManualThreadNopDef &nop_def, rtManualThreadNopInfo_t &nop); + + void InitManualDmuInfo(const domi::ManualThreadDmuDef &dmu_def, rtManualThreadDmuInfo_t &dmu); + void InitManualDmuInfo(const domi::ManualThreadCacheDef &cache_def, rtManualThreadDmuInfo_t *&dmu); + void InitManualDmuInfo(const domi::ManualThreadAicAivDef &aic_aiv_def, rtManualThreadDmuInfo_t *&dmu); + + template + Status InitIoAddrs(const RuntimeParam &rts_param, const T &aic_aiv_def, uint32_t thread_dim, uint32_t addr_count); + + DavinciModel *davinci_model_{nullptr}; + rtFftsTaskInfo_t sub_task_info_; + std::vector io_addrs_; + uint32_t thread_dim_{0}; + void *args_{nullptr}; // runtime args memory + uint32_t args_size_{0}; // runtime args memory length +}; +} // namespace ge +#endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_FFTS_TASK_INFO_H_ diff --git a/ge/graph/partition/graph_partition.cc b/ge/graph/partition/graph_partition.cc index c3f9480d..a810aab0 100755 --- a/ge/graph/partition/graph_partition.cc +++ b/ge/graph/partition/graph_partition.cc @@ -179,6 +179,7 @@ Status ge::GraphPartitioner::MergeAfterSubGraphOptimization(ge::ComputeGraphPtr GELOGE(ret, "[Merge][SubGraph] Failed, ret:%d", ret); } GE_CHECK_NOTNULL(original_compute_graph); + output_merged_compute_graph->SetName(original_compute_graph->GetName()); // partition sub graph for (const auto &sub_graph : original_compute_graph->GetAllSubgraphs()) { ComputeGraphPtr merged_sub_graph = nullptr; @@ -188,8 +189,16 @@ Status ge::GraphPartitioner::MergeAfterSubGraphOptimization(ge::ComputeGraphPtr GELOGE(ret, "[Merge][SubGraph] Failed, ret:%d", ret); continue; } + // this means subgraph added in optimize subgraph and without partitions, so just add to root graph + if (merged_sub_graph == sub_graph) { + GELOGI("Just add subgraph %s (parent node is %s) to root graph %s.", sub_graph->GetName().c_str(), + sub_graph->GetParentNode()->GetName().c_str(), output_merged_compute_graph->GetName().c_str()); + sub_graph->SetParentGraph(sub_graph->GetParentNode()->GetOwnerComputeGraph()); + GE_IF_BOOL_EXEC(output_merged_compute_graph->AddSubgraph(sub_graph->GetName(), merged_sub_graph) != SUCCESS, + return FAILED;) + continue; + } // add sub graph - output_merged_compute_graph->SetName(original_compute_graph->GetName()); merged_sub_graph->SetName(sub_graph->GetName()); merged_sub_graph->SetInputSize(sub_graph->GetInputSize()); merged_sub_graph->SetOutputSize(sub_graph->GetOutputSize()); @@ -245,12 +254,9 @@ Status ge::GraphPartitioner::MergeSubGraph(ge::ComputeGraphPtr &output_merged_co } if ((graph_2_graph_partition_info_.find(original_compute_graph) == graph_2_graph_partition_info_.end()) || (graph_2_subgraph_list_.find(original_compute_graph) == graph_2_subgraph_list_.end())) { - REPORT_INNER_ERROR("E19999", "original_compute_graph:%s is not find in graph_2_graph_partition_info_.", - original_compute_graph->GetName().c_str()); - GELOGE(GE_GRAPH_NULL_INPUT, - "[Check][Param] original_compute_graph:%s is not find in graph_2_graph_partition_info_.", - original_compute_graph->GetName().c_str()); - return FAILED; + GELOGW("[GraphPartition]: compute_graph has not found, just return original."); + output_merged_compute_graph = original_compute_graph; + return SUCCESS; } GraphPartitionInfo &subgraph_info = graph_2_graph_partition_info_[original_compute_graph]; const auto &sub_graph_list = graph_2_subgraph_list_[original_compute_graph]; @@ -708,6 +714,7 @@ Status ge::GraphPartitioner::AddPartitionsToGraphNode(vectorGetName()); + (void)sub_graph->SetExtAttr("part_src_graph", compute_graph); GELOGD("set attr success. subgraph(%s) with parent graph(%s)", sub_graph->GetName().c_str(), compute_graph->GetName().c_str()); GE_DUMP(sub_graph, sub_graph->GetName() + "_" + mode_2_str_[graph_info_.mode_]); diff --git a/metadef b/metadef index c6030152..00c0c12e 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit c6030152c6dc05515115765babb5d64fde649df4 +Subproject commit 00c0c12eede6c7bce93a1eda5f0bb437ae80a7ec diff --git a/parser b/parser index 155d3262..3073129b 160000 --- a/parser +++ b/parser @@ -1 +1 @@ -Subproject commit 155d3262ba17f800094abb58b6a809b041cf0a74 +Subproject commit 3073129b68c0fae12a8b7531d60782e39128a28c diff --git a/tests/depends/runtime/src/runtime_stub.cc b/tests/depends/runtime/src/runtime_stub.cc index 2b1af23c..0c9e2c27 100644 --- a/tests/depends/runtime/src/runtime_stub.cc +++ b/tests/depends/runtime/src/runtime_stub.cc @@ -456,6 +456,10 @@ rtError_t rtDebugRegisterForStream(rtStream_t stream, uint32_t flag, const void rtError_t rtDebugUnRegisterForStream(rtStream_t stream) { return RT_ERROR_NONE; } + +rtError_t rtFftsTaskLaunch(rtFftsTaskInfo_t *fftsTaskInfo, rtStream_t stream) { + return RT_ERROR_NONE; +} #ifdef __cplusplus } #endif diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index 0d1ae079..8b024820 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -437,6 +437,7 @@ set(DISTINCT_GRAPH_LOAD_SRC_FILES "${GE_CODE_DIR}/ge/graph/load/model_manager/task_info/stream_active_task_info.cc" "${GE_CODE_DIR}/ge/graph/load/model_manager/task_info/end_graph_task_info.cc" "${GE_CODE_DIR}/ge/graph/load/model_manager/task_info/model_exit_task_info.cc" + "${GE_CODE_DIR}/ge/graph/load/model_manager/task_info/ffts_task_info.cc" "${GE_CODE_DIR}/ge/graph/load/model_manager/task_info/super_kernel/super_kernel.cc" "${GE_CODE_DIR}/ge/graph/load/model_manager/task_info/super_kernel/super_kernel_factory.cc" "${GE_CODE_DIR}/ge/model/ge_model.cc" @@ -649,6 +650,7 @@ set(DISTINCT_GRAPH_LOAD_TEST_FILES "graph/load/hccl_task_info_unittest.cc" "graph/load/kernel_ex_task_info_unittest.cc" "graph/load/kernel_task_info_unittest.cc" + "graph/load/ffts_task_info_unittest.cc" "graph/load/memcpy_addr_async_task_info_unittest.cc" "graph/load/memcpy_async_task_info_unittest.cc" "graph/load/cpu_queue_schedule_unittest.cc" diff --git a/tests/ut/ge/graph/load/davinci_model_unittest.cc b/tests/ut/ge/graph/load/davinci_model_unittest.cc index 3f9cc850..ddf241ff 100644 --- a/tests/ut/ge/graph/load/davinci_model_unittest.cc +++ b/tests/ut/ge/graph/load/davinci_model_unittest.cc @@ -1059,4 +1059,144 @@ TEST_F(UtestDavinciModel, get_total_memsize_exclude_zero_copy) { EXPECT_EQ(model.GetTotalMemSizeExcludeZeroCopy(total_useful_size), SUCCESS); EXPECT_EQ(total_useful_size, 512); } + +// test InitTbeHandle +TEST_F(UtestDavinciModel, init_tbe_handle) { + DavinciModel model(0, nullptr); + OpDescPtr op_desc = CreateOpDesc("data", DATA); + model.ge_model_ = make_shared(); + // without kernel + EXPECT_EQ(model.InitTbeHandle(op_desc), INTERNAL_ERROR); + vector buffer; + string key = op_desc->GetName(); + TBEKernelPtr tbe_kernel_ptr = std::make_shared(key, std::move(buffer)); + op_desc->SetExtAttr(OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel_ptr); + string attr_kernel_name = op_desc->GetName() + "_kernelname"; + string kernel_name = "kernel_name"; + AttrUtils::SetStr(op_desc, attr_kernel_name, kernel_name); + EXPECT_EQ(model.InitTbeHandle(op_desc), SUCCESS); + // rtQueryFunctionRegistered(bin_file_key) failed + EXPECT_EQ(model.used_tbe_handle_map_.size(), 0); +} + +// test InitTbeHandleWithFfts +TEST_F(UtestDavinciModel, init_tbe_handle_with_ffts) { + DavinciModel model(0, nullptr); + OpDescPtr op_desc = CreateOpDesc("data", DATA); + model.ge_model_ = make_shared(); + // without tbe_kernel + EXPECT_EQ(model.InitTbeHandleWithFfts(op_desc), INTERNAL_ERROR); + + std::vector tbe_kernel; + vector buffer; + string key = op_desc->GetName(); + OpKernelBinPtr tbe_kernel_ptr0 = std::make_shared(key, std::move(buffer)); + OpKernelBinPtr tbe_kernel_ptr1 = std::make_shared(key, std::move(buffer)); + tbe_kernel.push_back(tbe_kernel_ptr0); + tbe_kernel.push_back(tbe_kernel_ptr1); + op_desc->SetExtAttr(OP_EXTATTR_NAME_THREAD_TBE_KERNEL, tbe_kernel); + // without _register_stub_func + EXPECT_EQ(model.InitTbeHandleWithFfts(op_desc), INTERNAL_ERROR); + + vector bin_file_keys; + bin_file_keys.emplace_back(op_desc->GetName() + "_0"); + bin_file_keys.emplace_back(op_desc->GetName() + "_1"); + AttrUtils::SetListStr(op_desc, "_register_stub_func", bin_file_keys); + + EXPECT_EQ(model.InitTbeHandleWithFfts(op_desc), SUCCESS); + // rtQueryFunctionRegistered(bin_file_key) failed + EXPECT_EQ(model.used_tbe_handle_map_.size(), 0); +} + +// test InitBinaryMagic +TEST_F(UtestDavinciModel, init_binary_magic) { + DavinciModel model(0, nullptr); + rtDevBinary_t binary; + OpDescPtr op_desc = CreateOpDesc("data", DATA); + bool is_ffts = true; + vector json_list; + AttrUtils::SetListStr(op_desc, TVM_ATTR_NAME_THREAD_MAGIC, json_list); + // without tvm_magic + EXPECT_EQ(model.InitBinaryMagic(op_desc, is_ffts, 0, binary), INTERNAL_ERROR); + json_list.emplace_back("RT_DEV_BINARY_MAGIC_ELF_AICPU"); + json_list.emplace_back("RT_DEV_BINARY_MAGIC_ELF"); + op_desc->DelAttr(TVM_ATTR_NAME_THREAD_MAGIC); + AttrUtils::SetListStr(op_desc, TVM_ATTR_NAME_THREAD_MAGIC, json_list); + EXPECT_EQ(model.InitBinaryMagic(op_desc, is_ffts, 0, binary), SUCCESS); + EXPECT_EQ(binary.magic, RT_DEV_BINARY_MAGIC_ELF_AICPU); + EXPECT_EQ(model.InitBinaryMagic(op_desc, is_ffts, 1, binary), SUCCESS); + EXPECT_EQ(binary.magic, RT_DEV_BINARY_MAGIC_ELF); + + json_list.clear(); + json_list.emplace_back("RT_DEV_BINARY_MAGIC_ELF_AIVEC"); + json_list.emplace_back("RT_DEV_BINARY_MAGIC_ELF_AICUBE"); + op_desc->DelAttr(TVM_ATTR_NAME_THREAD_MAGIC); + AttrUtils::SetListStr(op_desc, TVM_ATTR_NAME_THREAD_MAGIC, json_list); + EXPECT_EQ(model.InitBinaryMagic(op_desc, is_ffts, 0, binary), SUCCESS); + EXPECT_EQ(binary.magic, RT_DEV_BINARY_MAGIC_ELF_AIVEC); + EXPECT_EQ(model.InitBinaryMagic(op_desc, is_ffts, 1, binary), SUCCESS); + EXPECT_EQ(binary.magic, RT_DEV_BINARY_MAGIC_ELF_AICUBE); + + // with invalid json type + json_list.clear(); + json_list.emplace_back("RT_DEV_BINARY_MAGIC_ELF_INVALID"); + json_list.emplace_back("RT_DEV_BINARY_MAGIC_ELF_INVALID"); + op_desc->DelAttr(TVM_ATTR_NAME_THREAD_MAGIC); + AttrUtils::SetListStr(op_desc, TVM_ATTR_NAME_THREAD_MAGIC, json_list); + EXPECT_EQ(model.InitBinaryMagic(op_desc, is_ffts, 0, binary), PARAM_INVALID); + + // test unffts + is_ffts = false; + string json_string = "RT_DEV_BINARY_MAGIC_ELF_AIVEC"; + AttrUtils::SetStr(op_desc, TVM_ATTR_NAME_MAGIC, json_string); + EXPECT_EQ(model.InitBinaryMagic(op_desc, is_ffts, 0, binary), SUCCESS); + EXPECT_EQ(binary.magic, RT_DEV_BINARY_MAGIC_ELF_AIVEC); +} + +// test InitMetaData +TEST_F(UtestDavinciModel, init_meta_data) { + DavinciModel model(0, nullptr); + void *bin_handle; + OpDescPtr op_desc = CreateOpDesc("data", DATA); + bool is_ffts = true; + vector meta_data_list; + // with empty meta_data + EXPECT_EQ(model.InitMetaData(op_desc, is_ffts, 0, bin_handle), INTERNAL_ERROR); + meta_data_list.emplace_back("meta_data_0"); + meta_data_list.emplace_back("meta_data_1"); + AttrUtils::SetListStr(op_desc, TVM_ATTR_NAME_THREAD_METADATA, meta_data_list); + EXPECT_EQ(model.InitMetaData(op_desc, is_ffts, 0, bin_handle), SUCCESS); + + is_ffts = false; + string meta_data = "meta_data"; + AttrUtils::SetStr(op_desc, TVM_ATTR_NAME_METADATA, meta_data); + EXPECT_EQ(model.InitMetaData(op_desc, is_ffts, 0, bin_handle), SUCCESS); +} + +// test InitKernelName +TEST_F(UtestDavinciModel, init_kernel_name) { + DavinciModel model(0, nullptr); + string kernel_name; + OpDescPtr op_desc = CreateOpDesc("data", DATA); + bool is_ffts = true; + // failed when name is invalid + EXPECT_EQ(model.InitKernelName(op_desc, is_ffts, 0, kernel_name), INTERNAL_ERROR); + OpDescPtr op_desc1 = CreateOpDesc("sgt_graph_nodes/loss_scale", SCALE); + string attr_kernel_name = "loss_scale_thread_kernelname"; + vector kernel_name_list; + AttrUtils::SetListStr(op_desc, attr_kernel_name, kernel_name_list); + // failed without kernel_name + EXPECT_EQ(model.InitKernelName(op_desc, is_ffts, 0, kernel_name), INTERNAL_ERROR); + kernel_name_list.emplace_back("kernel_name_0"); + kernel_name_list.emplace_back("kernel_name_1"); + AttrUtils::SetListStr(op_desc1, attr_kernel_name, kernel_name_list); + EXPECT_EQ(model.InitKernelName(op_desc1, is_ffts, 0, kernel_name), SUCCESS); + + // without ffts + is_ffts = false; + attr_kernel_name = "data_kernelname"; + kernel_name = "kernel_name"; + AttrUtils::SetStr(op_desc, attr_kernel_name, kernel_name); + EXPECT_EQ(model.InitKernelName(op_desc, is_ffts, 0, kernel_name), SUCCESS); +} } // namespace ge diff --git a/tests/ut/ge/graph/load/ffts_task_info_unittest.cc b/tests/ut/ge/graph/load/ffts_task_info_unittest.cc new file mode 100644 index 00000000..25838f7e --- /dev/null +++ b/tests/ut/ge/graph/load/ffts_task_info_unittest.cc @@ -0,0 +1,212 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#define private public +#define protected public + +#include "graph/load/model_manager/task_info/ffts_task_info.h" +#include "cce/aicpu_engine_struct.h" +#include "common/ge/ge_util.h" +#include "common/properties_manager.h" +#include "framework/common/debug/ge_log.h" +#include "framework/common/fmk_error_codes.h" +#include "graph/attr_value.h" +#include "graph/load/model_manager/davinci_model.h" +#include "graph/load/model_manager/model_manager.h" +#include "runtime/rt_ffts.h" + +namespace ge { +extern OpDescPtr CreateOpDesc(string name, string type); + +class UtestFftsTaskInfo : public testing::Test { +protected: + void SetUp() {} + + void TearDown() {} + +public: + void CreateFftsTaskInfo(DavinciModel &davinci_model, domi::TaskDef &task_def, FftsTaskInfo &ffts_task_info) { + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + davinci_model.stream_list_ = { stream }; + task_def.set_stream_id(0); + + domi::FftsTaskDef *ffts_task_def = task_def.mutable_ffts_task(); + davinci_model.op_list_[0] = CreateOpDesc("test", PARTITIONEDCALL); + ffts_task_def->set_op_index(0); + ffts_task_def->set_addr_size(2); + domi::FftsDescInfoDef *ffts_desc = ffts_task_def->mutable_ffts_desc(); + ffts_desc->set_tm(0); + rtFftsTaskInfo_t sub_task_info; + ffts_task_info.sub_task_info_ = sub_task_info; + ffts_task_def->set_ffts_type(RT_FFTS_TYPE_AUTO_THREAD); + } +}; + +// test FftsTaskInfo Init with no subtask and no ticket cache +TEST_F(UtestFftsTaskInfo, success_ffts_task_info_without_subtask) { + DavinciModel davinci_model(0, nullptr); + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + davinci_model.stream_list_ = { stream }; + domi::TaskDef task_def; + task_def.set_stream_id(0); + + domi::FftsTaskDef *ffts_task_def = task_def.mutable_ffts_task(); + FftsTaskInfo ffts_task_info; + // init failed when model without op_desc + EXPECT_EQ(ffts_task_info.Init(task_def, &davinci_model), PARAM_INVALID); + + davinci_model.op_list_[0] = CreateOpDesc("test", PARTITIONEDCALL); + ffts_task_def->set_op_index(0); + ffts_task_def->set_addr_size(2); + domi::FftsDescInfoDef *ffts_desc = ffts_task_def->mutable_ffts_desc(); + ffts_desc->set_tm(0); + rtFftsTaskInfo_t sub_task_info; + ffts_task_info.sub_task_info_ = sub_task_info; + ffts_task_def->set_ffts_type(RT_FFTS_TYPE_AUTO_THREAD); + ffts_task_info.io_addrs_ = { (void*)0x12345678, (void*)0x22345678 }; + EXPECT_EQ(ffts_task_info.Init(task_def, &davinci_model), SUCCESS); +} + +// test FftsTaskInfo Init with subtask and no ticket cache: AutoThreadAicAivDef +TEST_F(UtestFftsTaskInfo, success_ffts_task_info_with_auto_thread_subgraph) { + DavinciModel davinci_model(0, nullptr); + domi::TaskDef task_def; + FftsTaskInfo ffts_task_info; + CreateFftsTaskInfo(davinci_model, task_def, ffts_task_info); + domi::FftsSubTaskDef *ffts_sub_task_def = task_def.mutable_ffts_task()->add_sub_task(); + ffts_sub_task_def->set_thread_dim(static_cast(1)); + //sub_task_def.has_auto_thread_aic_aiv() == sub_task_def.has_manual_thread_aic_aiv() + EXPECT_EQ(ffts_task_info.Init(task_def, &davinci_model), FAILED); + + domi::AutoThreadAicAivDef *auto_thread_aic_aiv_def = ffts_sub_task_def->mutable_auto_thread_aic_aiv(); + domi::AutoThreadPrefetchDef *src_prefetch = auto_thread_aic_aiv_def->add_src_prefetch(); + // without InitIoAddrs + ffts_task_info.thread_dim_ = 0; + RuntimeParam runtime_param; + ffts_task_info.io_addrs_ = { (void*)0x12345678, (void*)0x22345678 }; + EXPECT_EQ(ffts_task_info.Init(task_def, &davinci_model), SUCCESS); +} + +// test FftsTaskInfo Init with subtask and no ticket cache: ManualThreadAicAivDef +TEST_F(UtestFftsTaskInfo, success_ffts_task_info_with_manual_thread_subgraph) { + DavinciModel davinci_model(0, nullptr); + domi::TaskDef task_def; + FftsTaskInfo ffts_task_info; + CreateFftsTaskInfo(davinci_model, task_def, ffts_task_info); + domi::FftsSubTaskDef *ffts_sub_task_def = task_def.mutable_ffts_task()->add_sub_task(); + ffts_sub_task_def->set_thread_dim(static_cast(1)); + //sub_task_def.has_auto_thread_aic_aiv() == sub_task_def.has_manual_thread_aic_aiv() + + domi::ManualThreadAicAivDef *manual_thread_aic_aiv_def = ffts_sub_task_def->mutable_manual_thread_aic_aiv(); + manual_thread_aic_aiv_def->add_thread_prefetch_dmu_idx(static_cast(0)); + manual_thread_aic_aiv_def->add_thread_blk_dim(static_cast(0)); + manual_thread_aic_aiv_def->add_thread_task_func_stub("ffts"); + domi::ManualThreadDmuDef *prefetch_list = manual_thread_aic_aiv_def->add_prefetch_list(); + prefetch_list->set_data_addr(static_cast(0)); + // without InitIoAddrs + ffts_task_info.thread_dim_ = 0; + RuntimeParam runtime_param; + ffts_task_info.io_addrs_ = { (void*)0x12345678, (void*)0x22345678 }; + EXPECT_EQ(ffts_task_info.Init(task_def, &davinci_model), SUCCESS); +} + +// test FftsTaskInfo Init with subtask and no ticket cache: ManualThreadNopDef +TEST_F(UtestFftsTaskInfo, success_ffts_task_info_with_manual_thread_nop_subgraph) { + DavinciModel davinci_model(0, nullptr); + domi::TaskDef task_def; + FftsTaskInfo ffts_task_info; + CreateFftsTaskInfo(davinci_model, task_def, ffts_task_info); + + domi::FftsSubTaskDef *ffts_sub_task_def = task_def.mutable_ffts_task()->add_sub_task(); + ffts_sub_task_def->set_thread_dim(static_cast(1)); + domi::AutoThreadAicAivDef *auto_thread_aic_aiv_def = ffts_sub_task_def->mutable_auto_thread_aic_aiv(); + domi::ManualThreadNopDef *manual_thread_nop = ffts_sub_task_def->mutable_manual_thread_nop(); + domi::ManualThreadDependencyDef *src_dep_tbl = manual_thread_nop->add_src_dep_tbl(); + src_dep_tbl->add_dependency(static_cast(0)); + + // without InitIoAddrs + ffts_task_info.thread_dim_ = 0; + RuntimeParam runtime_param; + ffts_task_info.io_addrs_ = { (void*)0x12345678, (void*)0x22345678 }; + EXPECT_EQ(ffts_task_info.Init(task_def, &davinci_model), SUCCESS); +} + +// test FftsTaskInfo Init with no subtask and ticket cache:AutoThreadCacheDef +TEST_F(UtestFftsTaskInfo, success_ffts_task_info_with_auto_thread_ticket_cache) { + DavinciModel davinci_model(0, nullptr); + domi::TaskDef task_def; + FftsTaskInfo ffts_task_info; + CreateFftsTaskInfo(davinci_model, task_def, ffts_task_info); + + domi::TicketCacheDef *ticket_cache_def = task_def.mutable_ffts_task()->add_ticket_cache(); + //ticket_cache_def.has_auto_thread_cache() == ticket_cache_def.has_manual_thread_cache() + EXPECT_EQ(ffts_task_info.Init(task_def, &davinci_model), FAILED); + domi::AutoThreadCacheDef *auto_thread_cache = ticket_cache_def->mutable_auto_thread_cache(); + + ffts_task_info.io_addrs_ = { (void*)0x12345678, (void*)0x22345678 }; + EXPECT_EQ(ffts_task_info.Init(task_def, &davinci_model), SUCCESS); +} + +// test FftsTaskInfo Init with no subtask and ticket cache:ManualThreadCacheDef +TEST_F(UtestFftsTaskInfo, success_ffts_task_info_with_manual_thread_ticket_cache) { + DavinciModel davinci_model(0, nullptr); + domi::TaskDef task_def; + FftsTaskInfo ffts_task_info; + CreateFftsTaskInfo(davinci_model, task_def, ffts_task_info); + + domi::TicketCacheDef *ticket_cache_def = task_def.mutable_ffts_task()->add_ticket_cache(); + domi::ManualThreadCacheDef *manual_thread_cache = ticket_cache_def->mutable_manual_thread_cache(); + manual_thread_cache->add_slice_dmu_idx(static_cast(0)); + manual_thread_cache->add_ticket_cache_ref_cnt_tbl(static_cast(0)); + domi::ManualThreadDmuDef *dmu_list = manual_thread_cache->add_dmu_list(); + + ffts_task_info.io_addrs_ = { (void*)0x12345678, (void*)0x22345678 }; + EXPECT_EQ(ffts_task_info.Init(task_def, &davinci_model), SUCCESS); +} + +// test FftsTaskInfo UpdateArgs +TEST_F(UtestFftsTaskInfo, success_ffts_task_info_update_args) { + DavinciModel davinci_model(0, nullptr); + FftsTaskInfo ffts_task_info; + ffts_task_info.davinci_model_ = &davinci_model; + ffts_task_info.io_addrs_ = { (void*)0x12345678, (void*)0x22345678 }; + EXPECT_EQ(ffts_task_info.UpdateArgs(), SUCCESS); +} + +// test FftsTaskInfo CalculateArgs +TEST_F(UtestFftsTaskInfo, success_ffts_task_info_calculate_args) { + DavinciModel davinci_model(0, nullptr); + domi::TaskDef task_def; + FftsTaskInfo ffts_task_info; + EXPECT_EQ(ffts_task_info.CalculateArgs(task_def, &davinci_model), SUCCESS); +} + +// test FftsTaskInfo Distribute +TEST_F(UtestFftsTaskInfo, success_ffts_task_info_distribute) { + DavinciModel davinci_model(0, nullptr); + FftsTaskInfo ffts_task_info; + rtFftsTaskInfo_t sub_task_info; + ffts_task_info.sub_task_info_ = sub_task_info; + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + ffts_task_info.stream_ = stream; + EXPECT_EQ(ffts_task_info.Distribute(), SUCCESS); +} +} // namespace ge \ No newline at end of file diff --git a/third_party/fwkacllib/inc/runtime/rt.h b/third_party/fwkacllib/inc/runtime/rt.h index 83cafa3c..aa394eea 100644 --- a/third_party/fwkacllib/inc/runtime/rt.h +++ b/third_party/fwkacllib/inc/runtime/rt.h @@ -27,5 +27,6 @@ #include "mem.h" #include "rt_model.h" #include "stream.h" +#include "rt_ffts.h" #endif // __CCE_RUNTIME_RT_H__ diff --git a/third_party/fwkacllib/inc/runtime/rt_ffts.h b/third_party/fwkacllib/inc/runtime/rt_ffts.h new file mode 100755 index 00000000..720da7cd --- /dev/null +++ b/third_party/fwkacllib/inc/runtime/rt_ffts.h @@ -0,0 +1,185 @@ +/* + * Copyright (c) Huawei Technologies Co. , Ltd. 2021. All rights reserved. + * Description: ffts interface + */ + +#ifndef __CCE_RUNTIME_FFTS_H +#define __CCE_RUNTIME_FFTS_H + +#include "base.h" + +#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +extern "C" { +#endif + +#define RT_FFTS_MAX_SUB_TASK_NUM 32U +#define RT_FFTS_MAX_TICKET_CACHE_NUM 64U +#define RT_FFTS_MAX_MANUAL_THREAD_NUM 16U +#define RT_FFTS_MAX_TICKET_CACHE_PER_SUBTASK 8U +#define RT_FFTS_MANUAL_SRC_DEPEND_TBL_LEN 32U + +typedef enum tagFftsType { + RT_FFTS_TYPE_AUTO_THREAD = 2, // ffts auto thread mode, same as ffts define + RT_FFTS_TYPE_MANUAL_THREAD = 3, // ffts manual thread mode, same as ffts define +} rtFftsType_t; + +typedef enum tagFftsSubTaskType { + RT_FFTS_SUB_TASK_TYPE_AIC = 0, + RT_FFTS_SUB_TASK_TYPE_AIV = 1, + RT_FFTS_SUB_TASK_TYPE_NOP = 2, + RT_FFTS_SUB_TASK_TYPE_NOTIFY_WAIT = 3, + RT_FFTS_SUB_TASK_TYPE_NOTIFY_RECORD = 4, + RT_FFTS_SUB_TASK_TYPE_WRITE_VALUE = 5, + RT_FFTS_SUB_TASK_TYPE_MIX_AIC = 6, + RT_FFTS_SUB_TASK_TYPE_MIX_AIV = 7, + RT_FFTS_SUB_TASK_TYPE_SDMA = 8, + RT_FFTS_SUB_TASK_TYPE_RESERVED, +} rtFftsSubTaskType_t; + +typedef struct tagManualThreadDmuInfo { + uint64_t dataAddr; // device mem + uint16_t numOuter; + uint16_t numInner; + uint32_t strideOuter; + uint32_t lenInner; + uint32_t strideInner; +} rtManualThreadDmuInfo_t; + +typedef struct tagManualThreadDependency { + uint8_t dependency[RT_FFTS_MANUAL_SRC_DEPEND_TBL_LEN]; +} rtManualThreadDependency_t; + +typedef struct tagManualThreadAicAivInfo { + uint64_t taskParamAddr; // device mem + uint16_t taskParamOffset; + // when satMode=1 and FP16 computation with none INF inputs overflows/underflows, results will be +/-INF of FP16 + // when satMode=0 and FP16 computation with none INF inputs overflows/underflows + // results will be saturated to +/- MAX of FP16 + uint8_t satMode; + uint8_t scheduleMode; // 0:normal mode, 1:batch mode, 2:sync mode, 3: reserved + uint8_t iCachePrefetchCnt; // units is 2K + uint8_t prefetchEnableBitmap; // 8 bit bitmap 1 0 1 0 + uint8_t prefetchOnceBitmap; // 8 bit bitmap 1 0 1 0 + uint16_t prefetchOnceDmuNum; // prefetch_once_dmu_descriptor_index in ffts + // num: thread0_prefetch_dmu_descriptor_index - prefetch_once_dmu_descriptor_index + uint16_t threadPrefetchDmuIdx[RT_FFTS_MAX_MANUAL_THREAD_NUM]; // max valid is threadDim + uint16_t threadBlkDim[RT_FFTS_MAX_MANUAL_THREAD_NUM]; + const char *threadTaskFuncStub[RT_FFTS_MAX_MANUAL_THREAD_NUM]; + + rtManualThreadDmuInfo_t *prefetchList; // dmu desc 0-64k, length is the last threadPrefetchDmuIdx[threadDim - 1] + rtManualThreadDependency_t srcDepTbl[RT_FFTS_MAX_TICKET_CACHE_PER_SUBTASK]; +} rtManualThreadAicAivInfo_t; + +typedef struct tagAutoThreadPrefetch { + uint64_t dataAddr; // device mem + uint32_t dataAddrOffset; + uint32_t nonTailDataLen; + uint32_t tailDataLen; +} rtAutoThreadPrefetch_t; + +typedef struct tagAutoThreadAicAivInfo { + uint64_t taskParamAddr; // device mem + uint16_t taskParamOffset; + // when satMode=1 and FP16 computation with none INF inputs overflows/underflows, results will be +/-INF of FP16 + // when satMode=0 and FP16 computation with none INF inputs overflows/underflows + // results will be saturated to +/- MAX of FP16 + uint8_t satMode; + uint8_t scheduleMode; // 0:normal mode, 1:batch mode, 2:sync mode, 3: reserved + uint8_t iCachePrefetchCnt; // units is 2K + uint8_t prefetchEnableBitmap; // 8 bit bitmap + uint8_t prefetchOnceBitmap; // 8 bit bitmap + + uint16_t tailBlkDim; + uint16_t nonTailBlkDim; + + const char *nonTailTaskFuncStub; + const char *tailTaskFuncStub; + + // for prefetch, valid num is prefetchEnableBitmap bit count + // if prefetchEnableBitmap = '00010011', need prefetch number is 3, srcPrefetch is only 0, 1, 2 is valid + rtAutoThreadPrefetch_t srcPrefetch[RT_FFTS_MAX_TICKET_CACHE_PER_SUBTASK]; +} rtAutoThreadAicAivInfo_t; + +typedef struct tagAutoThreadCacheInfo { + uint64_t dataAddr; // device mem + uint32_t dataAddrOffset; + uint32_t nonTailDataLen; + uint32_t tailDataLen; + uint16_t ticketCacheRefCnt; +} rtAutoThreadCacheInfo_t; + +typedef struct tagManualThreadCacheInfo { + rtManualThreadDmuInfo_t *dmuList; // 0-64k + uint16_t dmuNum; + uint16_t sliceDmuIdx[RT_FFTS_MAX_MANUAL_THREAD_NUM]; + uint16_t ticketCacheRefCntTbl[RT_FFTS_MAX_MANUAL_THREAD_NUM]; +} rtManualThreadCacheInfo_t; + +typedef enum tagCacheOp { + RT_CACHE_OP_NONE = 0, + RT_CACHE_OP_FLUSH = 1, + RT_CACHE_OP_INVALIDATE = 2, + RT_CACHE_OP_WRITE_BACK = 3, +} rtCacheOp_t; + +typedef struct tagTicketCache { + rtCacheOp_t cacheOption; + uint8_t ticketCacheWindow; + union { + rtAutoThreadCacheInfo_t autoThreadCache; + rtManualThreadCacheInfo_t manualThreadCache; + } custom; +} rtTicketCache_t; + +typedef struct tagManualThreadNopInfo { + // depend srcTickCacheVldBitmap in rtFftsSubTaskInfo_t + rtManualThreadDependency_t srcDepTbl[RT_FFTS_MAX_TICKET_CACHE_PER_SUBTASK]; +} rtManualThreadNopInfo_t; + +typedef struct tagFftsSubTaskInfo { + rtFftsSubTaskType_t subTaskType; + uint16_t threadDim; + uint8_t dstTickCacheVldBitmap; + uint8_t srcTickCacheVldBitmap; + uint8_t srcDataOutOfSubGraphBitmap; + uint8_t dstTickCacheID[RT_FFTS_MAX_TICKET_CACHE_PER_SUBTASK]; + uint8_t srcTickCacheID[RT_FFTS_MAX_TICKET_CACHE_PER_SUBTASK]; + union { + rtAutoThreadAicAivInfo_t autoThreadAicAiv; + rtManualThreadAicAivInfo_t manualThreadAicAiv; + rtManualThreadNopInfo_t manualThreadNop; + } custom; +} rtFftsSubTaskInfo_t; + +typedef struct tagFftsDescInfo { + uint8_t tm; // thread subtask kickstart mode, 0:order, 1:disorder + uint8_t di; // discard invalidate + uint8_t dw; // discard write back + uint8_t df; // discard flush + uint8_t dataSplitUnit; // split source or ticket cache by 2~dataSplitUnit MB + uint8_t prefetchOstNum; + uint8_t cacheMaintainOstNum; + uint8_t aicPrefetchUpper; + uint8_t aicPrefetchLower; + uint8_t aivPrefetchUpper; + uint8_t aivPrefetchLower; +} rtFftsDescInfo_t; + +typedef struct tagFftsTaskInfo { + rtFftsType_t fftsType; + uint16_t subTaskNum; + uint16_t tickCacheNum; + rtFftsDescInfo_t fftsDesc; + // sub task desc, real num is subTaskNum + rtFftsSubTaskInfo_t subTask[RT_FFTS_MAX_SUB_TASK_NUM]; + + // ticket cache, real number is ticketCacheNum + rtTicketCache_t ticketCache[RT_FFTS_MAX_TICKET_CACHE_NUM]; +} rtFftsTaskInfo_t; + +RTS_API rtError_t rtFftsTaskLaunch(rtFftsTaskInfo_t *fftsTaskInfo, rtStream_t stream); + +#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +} +#endif +#endif //__CCE_RUNTIME_FFTS_H diff --git a/third_party/fwkacllib/inc/runtime/rt_model.h b/third_party/fwkacllib/inc/runtime/rt_model.h index 6481f655..74539222 100644 --- a/third_party/fwkacllib/inc/runtime/rt_model.h +++ b/third_party/fwkacllib/inc/runtime/rt_model.h @@ -50,6 +50,7 @@ typedef enum tagModelTaskType { RT_MODEL_TASK_STREAM_LABEL_SWITCH_BY_INDEX, RT_MODEL_TASK_STREAM_LABEL_GOTO, RT_MODEL_TASK_MODEL_EXIT, + RT_MODEL_TASK_FFTS_TASK, RT_MODEL_TASK_ALL_KERNEL, } rtModelTaskType_t; From 6927a8eef3663007b74d3cc6905ff7cf60633d91 Mon Sep 17 00:00:00 2001 From: zhou_chao1993 Date: Wed, 16 Jun 2021 11:15:37 +0800 Subject: [PATCH 037/226] modif dump config --- ge/common/dump/dump_manager.cc | 4 ++-- tests/ut/ge/common/dump_manager_unittest.cc | 11 +++++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/ge/common/dump/dump_manager.cc b/ge/common/dump/dump_manager.cc index a6944fc6..ebe16fed 100644 --- a/ge/common/dump/dump_manager.cc +++ b/ge/common/dump/dump_manager.cc @@ -33,7 +33,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY DumpManager &DumpManager::GetIn bool DumpManager::NeedDoDump(const DumpConfig &dump_config, DumpProperties &dump_properties) { if (dump_config.dump_status.empty() && dump_config.dump_debug.empty()) { - dump_properties_map_.emplace(kInferSessionId, dump_properties); + dump_properties_map_[kInferSessionId] = dump_properties; GELOGI("Dump does not open"); return false; } @@ -41,7 +41,7 @@ bool DumpManager::NeedDoDump(const DumpConfig &dump_config, DumpProperties &dump if ((dump_config.dump_status == kDumpoff || dump_config.dump_status == kDumpOFF) && dump_config.dump_debug == kDumpoff) { dump_properties.ClearDumpPropertyValue(); - dump_properties_map_.emplace(kInferSessionId, dump_properties); + dump_properties_map_[kInferSessionId] = dump_properties; return false; } if (dump_config.dump_status == kDumpOn && dump_config.dump_debug == kDumpOn) { diff --git a/tests/ut/ge/common/dump_manager_unittest.cc b/tests/ut/ge/common/dump_manager_unittest.cc index 50eabc4a..7a242997 100644 --- a/tests/ut/ge/common/dump_manager_unittest.cc +++ b/tests/ut/ge/common/dump_manager_unittest.cc @@ -16,6 +16,8 @@ #include +#define protected public +#define private public #include "common/dump/dump_manager.h" #include "common/debug/log.h" #include "common/ge_inner_error_codes.h" @@ -102,4 +104,13 @@ TEST_F(UTEST_dump_manager, is_dump_single_op_close_success) { auto dump = DumpManager::GetInstance().GetDumpProperties(0); DumpManager::GetInstance().RemoveDumpProperties(0); } + + TEST_F(UTEST_dump_manager, not_need_do_dump) { + DumpConfig dump_config; + dump_config.dump_status = "off"; + dump_config.dump_debug = "off"; + DumpProperties dump_properties; + bool ret = DumpManager::GetInstance().NeedDoDump(dump_config, dump_properties); + EXPECT_EQ(ret, false); + } } // namespace ge \ No newline at end of file From 23c8a0d5811f5e808c610ca1f7efa1f9e75d4cd9 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Wed, 16 Jun 2021 20:13:41 +0800 Subject: [PATCH 038/226] Fix ut. --- ge/hybrid/executor/hybrid_model_executor.cc | 2 +- ge/hybrid/executor/node_state.h | 5 +++++ ge/hybrid/executor/subgraph_executor.cc | 1 + ge/hybrid/executor/subgraph_executor.h | 2 +- ge/hybrid/executor/worker/shape_inference_engine.cc | 2 +- ge/single_op/single_op_model.cc | 8 ++------ tests/ut/ge/single_op/single_op_model_unittest.cc | 13 +++++++------ 7 files changed, 18 insertions(+), 15 deletions(-) diff --git a/ge/hybrid/executor/hybrid_model_executor.cc b/ge/hybrid/executor/hybrid_model_executor.cc index 2abd9cd6..9bf70d26 100755 --- a/ge/hybrid/executor/hybrid_model_executor.cc +++ b/ge/hybrid/executor/hybrid_model_executor.cc @@ -70,7 +70,7 @@ Status HybridModelExecutor::Execute(HybridModelExecutor::ExecuteArgs &args) { context_.profiler->Dump(std::cout); context_.profiler->Reset(); } - root_graph_executor_->ResetContext(); + root_graph_executor_->ReleaseContext(); context_.iteration += 1; if (ret == END_OF_SEQUENCE) { diff --git a/ge/hybrid/executor/node_state.h b/ge/hybrid/executor/node_state.h index 85f9e4c3..e8ccd416 100644 --- a/ge/hybrid/executor/node_state.h +++ b/ge/hybrid/executor/node_state.h @@ -177,6 +177,10 @@ struct NodeState { void SetTaskContext(std::shared_ptr &task_context); std::shared_ptr GetTaskContext(); + void SetSkipInferShape(bool skip_infershape) { skip_infershape_ = skip_infershape; } + + bool GetSkipInferShape() const { return skip_infershape_; } + private: bool IsScheduleReady() const; void SetDataSchedule(const NodeState &node_state, const std::function &ready); @@ -204,6 +208,7 @@ struct NodeState { int merge_index_ = -1; // Use for Execute (Reset after Executed). int switch_index_ = -1; // Use for Schedule (Reset after Prepared). int group_ = -1; + bool skip_infershape_ = false; }; } // namespace hybrid } // namespace ge diff --git a/ge/hybrid/executor/subgraph_executor.cc b/ge/hybrid/executor/subgraph_executor.cc index c26eac9b..6979d05f 100644 --- a/ge/hybrid/executor/subgraph_executor.cc +++ b/ge/hybrid/executor/subgraph_executor.cc @@ -110,6 +110,7 @@ Status SubgraphExecutor::InitInputsForUnknownShape(const std::vectorSetShape(tensor_desc->GetShape()); output_desc->SetOriginShape(tensor_desc->GetOriginShape()); output_desc->SetDataType(tensor_desc->GetDataType()); + node_state->SetSkipInferShape(true); } } diff --git a/ge/hybrid/executor/subgraph_executor.h b/ge/hybrid/executor/subgraph_executor.h index 35f6e67e..76732c37 100644 --- a/ge/hybrid/executor/subgraph_executor.h +++ b/ge/hybrid/executor/subgraph_executor.h @@ -41,7 +41,7 @@ class SubgraphExecutor { Status PartialExecuteAsync(int task_group); - void ResetContext() { subgraph_context_.reset(nullptr); } + void ReleaseContext() { subgraph_context_.reset(nullptr); } /** * Execute subgraph async, output tensor address(not data) and output tensor descriptions are diff --git a/ge/hybrid/executor/worker/shape_inference_engine.cc b/ge/hybrid/executor/worker/shape_inference_engine.cc index 18fed710..96959b80 100755 --- a/ge/hybrid/executor/worker/shape_inference_engine.cc +++ b/ge/hybrid/executor/worker/shape_inference_engine.cc @@ -70,7 +70,7 @@ Status ShapeInferenceEngine::InferShape(NodeState &node_state) { // Do shape inference // Skipping infer shape of input node. GELOGD("[%s] Start to invoke InferShapeAndType", node_item.NodeName().c_str()); - if (node_state.GetType() != DATA_TYPE && node_state.GetType() != AIPP_DATA_TYPE) { + if (node_state.GetSkipInferShape()) { RECORD_SHAPE_INFERENCE_EVENT(execution_context_, node_item.NodeName().c_str(), "[InferShapeAndType] Start"); GE_CHK_STATUS_RET(ShapeRefiner::InferShapeAndTypeForRunning(node_item.node, true), "[Invoke][InferShapeAndType] for %s failed.", node_item.NodeName().c_str()); diff --git a/ge/single_op/single_op_model.cc b/ge/single_op/single_op_model.cc index 182d1466..90a6362c 100755 --- a/ge/single_op/single_op_model.cc +++ b/ge/single_op/single_op_model.cc @@ -49,8 +49,8 @@ const uint32_t kOutputIndexOfData = 0; constexpr char const *kAttrSupportDynamicShape = "support_dynamicshape"; Status CheckHostMem(const std::vector &dependencies, const NodePtr &node, bool &is_host_mem) { + auto op_desc = node->GetOpDesc(); for (const auto &input_name : dependencies) { - auto op_desc = node->GetOpDesc(); int input_index = op_desc->GetInputIndexByName(input_name); if (input_index < 0) { GELOGE(INTERNAL_ERROR, "[Get][InputIndex]failed, node:[%s] inputname: %s.", @@ -60,11 +60,7 @@ Status CheckHostMem(const std::vector &dependencies, const NodePtr &node return INTERNAL_ERROR; } - const auto &in_anchor = node->GetInDataAnchor(input_index); - GE_CHECK_NOTNULL(in_anchor); - const auto &peer_out_anchor = in_anchor->GetPeerOutAnchor(); - GE_CHECK_NOTNULL(peer_out_anchor); - const auto &src_node = peer_out_anchor->GetOwnerNode(); + const auto &src_node = NodeUtils::GetInDataNodeByIndex(*node, input_index); GE_CHECK_NOTNULL(src_node); auto src_op_desc = src_node->GetOpDesc(); GE_CHECK_NOTNULL(src_op_desc); diff --git a/tests/ut/ge/single_op/single_op_model_unittest.cc b/tests/ut/ge/single_op/single_op_model_unittest.cc index 63a3eafe..1975f9f4 100644 --- a/tests/ut/ge/single_op/single_op_model_unittest.cc +++ b/tests/ut/ge/single_op/single_op_model_unittest.cc @@ -213,7 +213,7 @@ TEST_F(UtestSingleOpModel, test_build_dynamic_op) { // make graph ut::GraphBuilder builder = ut::GraphBuilder("graph"); - auto data = builder.AddNode("Data", "Data", 0, 1); + auto data = builder.AddNode("Data", "Data", 1, 1); auto transdata = builder.AddNode("Transdata", "Transdata", 1, 1); auto netoutput = builder.AddNode("Netoutput", "NetOutput", 1, 0); builder.AddDataEdge(data, 0, transdata, 0); @@ -228,11 +228,6 @@ TEST_F(UtestSingleOpModel, test_build_dynamic_op) { op_desc->SetOpInferDepends(depend_names); (void)AttrUtils::SetBool(op_desc, kAttrSupportDynamicShape, true); - auto tensor = std::make_shared(); - auto data_desc = data->GetOpDesc(); - auto tensor_desc = data_desc->MutableInputDesc(0); - AttrUtils::SetTensor(tensor_desc, "_value", tensor); - // set task_def auto model_task_def = make_shared(); domi::TaskDef *task_def = model_task_def->add_task(); @@ -249,6 +244,12 @@ TEST_F(UtestSingleOpModel, test_build_dynamic_op) { op_desc->impl_->input_name_idx_["Data"] = 0; model.BuildDynamicOp(res, dynamic_single_op); + + auto tensor = std::make_shared(); + auto data_desc = data->GetOpDesc(); + auto tensor_desc = data_desc->MutableInputDesc(0); + AttrUtils::SetTensor(tensor_desc, "_value", tensor); + model.BuildDynamicOp(res, dynamic_single_op); } TEST_F(UtestSingleOpModel, test_host_mem) { From b17eafe3dbf2dbf0a0f921d8941445425d2fae26 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Wed, 16 Jun 2021 21:52:13 +0800 Subject: [PATCH 039/226] Fix bug. --- ge/hybrid/executor/node_state.h | 2 +- ge/hybrid/executor/worker/shape_inference_engine.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ge/hybrid/executor/node_state.h b/ge/hybrid/executor/node_state.h index e8ccd416..002e07ab 100644 --- a/ge/hybrid/executor/node_state.h +++ b/ge/hybrid/executor/node_state.h @@ -179,7 +179,7 @@ struct NodeState { void SetSkipInferShape(bool skip_infershape) { skip_infershape_ = skip_infershape; } - bool GetSkipInferShape() const { return skip_infershape_; } + bool SkipInferShape() const { return skip_infershape_; } private: bool IsScheduleReady() const; diff --git a/ge/hybrid/executor/worker/shape_inference_engine.cc b/ge/hybrid/executor/worker/shape_inference_engine.cc index 96959b80..753818bc 100755 --- a/ge/hybrid/executor/worker/shape_inference_engine.cc +++ b/ge/hybrid/executor/worker/shape_inference_engine.cc @@ -70,7 +70,7 @@ Status ShapeInferenceEngine::InferShape(NodeState &node_state) { // Do shape inference // Skipping infer shape of input node. GELOGD("[%s] Start to invoke InferShapeAndType", node_item.NodeName().c_str()); - if (node_state.GetSkipInferShape()) { + if (!node_state.SkipInferShape()) { RECORD_SHAPE_INFERENCE_EVENT(execution_context_, node_item.NodeName().c_str(), "[InferShapeAndType] Start"); GE_CHK_STATUS_RET(ShapeRefiner::InferShapeAndTypeForRunning(node_item.node, true), "[Invoke][InferShapeAndType] for %s failed.", node_item.NodeName().c_str()); From 4bc0f6f2af291635ec162cea483c1974b8976d35 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Wed, 16 Jun 2021 22:00:50 +0800 Subject: [PATCH 040/226] Fix bug. --- ge/hybrid/executor/node_state.h | 2 +- ge/hybrid/executor/worker/shape_inference_engine.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ge/hybrid/executor/node_state.h b/ge/hybrid/executor/node_state.h index 002e07ab..b80b60b0 100644 --- a/ge/hybrid/executor/node_state.h +++ b/ge/hybrid/executor/node_state.h @@ -179,7 +179,7 @@ struct NodeState { void SetSkipInferShape(bool skip_infershape) { skip_infershape_ = skip_infershape; } - bool SkipInferShape() const { return skip_infershape_; } + bool MaySkipShapeInference() const { return skip_infershape_; } private: bool IsScheduleReady() const; diff --git a/ge/hybrid/executor/worker/shape_inference_engine.cc b/ge/hybrid/executor/worker/shape_inference_engine.cc index 753818bc..50dc389c 100755 --- a/ge/hybrid/executor/worker/shape_inference_engine.cc +++ b/ge/hybrid/executor/worker/shape_inference_engine.cc @@ -70,7 +70,7 @@ Status ShapeInferenceEngine::InferShape(NodeState &node_state) { // Do shape inference // Skipping infer shape of input node. GELOGD("[%s] Start to invoke InferShapeAndType", node_item.NodeName().c_str()); - if (!node_state.SkipInferShape()) { + if (!node_state.MaySkipShapeInference()) { RECORD_SHAPE_INFERENCE_EVENT(execution_context_, node_item.NodeName().c_str(), "[InferShapeAndType] Start"); GE_CHK_STATUS_RET(ShapeRefiner::InferShapeAndTypeForRunning(node_item.node, true), "[Invoke][InferShapeAndType] for %s failed.", node_item.NodeName().c_str()); From 5bcb04dfb797158bc460fd43a2dc6c4058c41b6b Mon Sep 17 00:00:00 2001 From: wq160 Date: Thu, 17 Jun 2021 09:43:45 +0800 Subject: [PATCH 041/226] update submodule --- metadef | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metadef b/metadef index 00c0c12e..8c5fd448 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit 00c0c12eede6c7bce93a1eda5f0bb437ae80a7ec +Subproject commit 8c5fd4486f870d8b63213565aa39fdf1ba1e497a From 246d7e4fd8455f5ed5332b434b82d85b67f15358 Mon Sep 17 00:00:00 2001 From: y00500818 Date: Thu, 17 Jun 2021 10:36:12 +0800 Subject: [PATCH 042/226] bugfix for restore context --- ge/generator/ge_generator.cc | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/ge/generator/ge_generator.cc b/ge/generator/ge_generator.cc index 575afb35..58047c89 100644 --- a/ge/generator/ge_generator.cc +++ b/ge/generator/ge_generator.cc @@ -674,6 +674,12 @@ Status GeGenerator::GenerateModel(const Graph &graph, const string &file_name_pr GELOGD("Current ctx is null."); ctx = nullptr; } + std::function callback = [&]() { + if (ctx != nullptr) { + (void)rtCtxSetCurrent(ctx); + } + }; + GE_MAKE_GUARD(restore, callback); GeRootModelPtr ge_root_model = nullptr; GE_CHECK_NOTNULL_EXEC(impl_, return PARAM_INVALID); @@ -712,11 +718,6 @@ Status GeGenerator::GenerateModel(const Graph &graph, const string &file_name_pr } return ret; } - - if (ctx != nullptr) { - (void)rtCtxSetCurrent(ctx); - } - return SUCCESS; } From 1bed26c72e9b8a7386703b8d698a4d55c379bb3f Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Thu, 17 Jun 2021 11:28:14 +0800 Subject: [PATCH 043/226] Remove reduplicated useless proto --- ge/client/proto/ge_api.proto | 1 - ge/client/proto/ge_ir.proto | 193 -- ge/client/proto/insert_op.proto | 140 -- ge/client/proto/om.proto | 396 ---- ge/client/proto/task.proto | 179 -- ge/common/proto/ge_ir.proto | 193 -- ge/common/proto/insert_op.proto | 140 -- ge/common/proto/om.proto | 396 ---- ge/common/proto/op_mapping.proto | 75 - ge/common/proto/task.proto | 179 -- ge/common/proto/tensorflow/attr_value.proto | 70 - ge/common/proto/tensorflow/function.proto | 108 - ge/common/proto/tensorflow/graph.proto | 64 - .../proto/tensorflow/graph_library.proto | 22 - ge/common/proto/tensorflow/node_def.proto | 71 - ge/common/proto/tensorflow/op_def.proto | 172 -- .../proto/tensorflow/resource_handle.proto | 37 - ge/common/proto/tensorflow/tensor.proto | 102 - ge/common/proto/tensorflow/tensor_shape.proto | 53 - ge/common/proto/tensorflow/types.proto | 82 - ge/common/proto/tensorflow/versions.proto | 39 - ge/executor/proto/dump_task.proto | 113 - ge/executor/proto/ge_ir.proto | 193 -- ge/executor/proto/insert_op.proto | 140 -- ge/executor/proto/om.proto | 396 ---- ge/executor/proto/op_mapping.proto | 75 - ge/executor/proto/task.proto | 179 -- ge/ge_local_engine/proto/task.proto | 179 -- ge/offline/proto/ge_ir.proto | 193 -- ge/offline/proto/insert_op.proto | 140 -- ge/offline/proto/om.proto | 396 ---- ge/offline/proto/task.proto | 179 -- ge/proto/caffe/caffe.proto | 1829 ----------------- ge/proto/dump_task.proto | 113 - ge/proto/fusion_model.proto | 21 - ge/proto/fwk_adapter.proto | 37 - ge/proto/ge_api.proto | 88 - ge/proto/ge_ir.proto | 193 -- ge/proto/insert_op.proto | 140 -- ge/proto/om.proto | 396 ---- ge/proto/op_mapping.proto | 75 - ge/proto/optimizer_priority.proto | 7 - ge/proto/task.proto | 179 -- ge/proto/tensorflow/attr_value.proto | 70 - ge/proto/tensorflow/function.proto | 108 - ge/proto/tensorflow/graph.proto | 64 - ge/proto/tensorflow/graph_library.proto | 22 - ge/proto/tensorflow/node_def.proto | 71 - ge/proto/tensorflow/op_def.proto | 172 -- ge/proto/tensorflow/resource_handle.proto | 37 - ge/proto/tensorflow/tensor.proto | 102 - ge/proto/tensorflow/tensor_shape.proto | 53 - ge/proto/tensorflow/types.proto | 82 - ge/proto/tensorflow/versions.proto | 39 - 54 files changed, 8793 deletions(-) delete mode 100644 ge/client/proto/ge_api.proto delete mode 100644 ge/client/proto/ge_ir.proto delete mode 100644 ge/client/proto/insert_op.proto delete mode 100755 ge/client/proto/om.proto delete mode 100644 ge/client/proto/task.proto delete mode 100644 ge/common/proto/ge_ir.proto delete mode 100644 ge/common/proto/insert_op.proto delete mode 100644 ge/common/proto/om.proto delete mode 100644 ge/common/proto/op_mapping.proto delete mode 100644 ge/common/proto/task.proto delete mode 100644 ge/common/proto/tensorflow/attr_value.proto delete mode 100644 ge/common/proto/tensorflow/function.proto delete mode 100644 ge/common/proto/tensorflow/graph.proto delete mode 100644 ge/common/proto/tensorflow/graph_library.proto delete mode 100644 ge/common/proto/tensorflow/node_def.proto delete mode 100644 ge/common/proto/tensorflow/op_def.proto delete mode 100644 ge/common/proto/tensorflow/resource_handle.proto delete mode 100644 ge/common/proto/tensorflow/tensor.proto delete mode 100644 ge/common/proto/tensorflow/tensor_shape.proto delete mode 100644 ge/common/proto/tensorflow/types.proto delete mode 100644 ge/common/proto/tensorflow/versions.proto delete mode 100644 ge/executor/proto/dump_task.proto delete mode 100644 ge/executor/proto/ge_ir.proto delete mode 100644 ge/executor/proto/insert_op.proto delete mode 100644 ge/executor/proto/om.proto delete mode 100644 ge/executor/proto/op_mapping.proto delete mode 100644 ge/executor/proto/task.proto delete mode 100644 ge/ge_local_engine/proto/task.proto delete mode 100644 ge/offline/proto/ge_ir.proto delete mode 100644 ge/offline/proto/insert_op.proto delete mode 100644 ge/offline/proto/om.proto delete mode 100644 ge/offline/proto/task.proto delete mode 100644 ge/proto/caffe/caffe.proto delete mode 100644 ge/proto/dump_task.proto delete mode 100755 ge/proto/fusion_model.proto delete mode 100644 ge/proto/fwk_adapter.proto delete mode 100755 ge/proto/ge_api.proto delete mode 100644 ge/proto/ge_ir.proto delete mode 100644 ge/proto/insert_op.proto delete mode 100644 ge/proto/om.proto delete mode 100644 ge/proto/op_mapping.proto delete mode 100644 ge/proto/optimizer_priority.proto delete mode 100644 ge/proto/task.proto delete mode 100644 ge/proto/tensorflow/attr_value.proto delete mode 100644 ge/proto/tensorflow/function.proto delete mode 100644 ge/proto/tensorflow/graph.proto delete mode 100644 ge/proto/tensorflow/graph_library.proto delete mode 100644 ge/proto/tensorflow/node_def.proto delete mode 100644 ge/proto/tensorflow/op_def.proto delete mode 100644 ge/proto/tensorflow/resource_handle.proto delete mode 100644 ge/proto/tensorflow/tensor.proto delete mode 100644 ge/proto/tensorflow/tensor_shape.proto delete mode 100644 ge/proto/tensorflow/types.proto delete mode 100644 ge/proto/tensorflow/versions.proto diff --git a/ge/client/proto/ge_api.proto b/ge/client/proto/ge_api.proto deleted file mode 100644 index 26d705fe..00000000 --- a/ge/client/proto/ge_api.proto +++ /dev/null @@ -1 +0,0 @@ -../../proto/ge_api.proto \ No newline at end of file diff --git a/ge/client/proto/ge_ir.proto b/ge/client/proto/ge_ir.proto deleted file mode 100644 index c0ef3071..00000000 --- a/ge/client/proto/ge_ir.proto +++ /dev/null @@ -1,193 +0,0 @@ -syntax = "proto3"; - -package ge.proto; - -enum DataType -{ - DT_UNDEFINED = 0; // Used to indicate a DataType field has not been set. - DT_FLOAT = 1; // float type - DT_FLOAT16 = 2; // fp16 type - DT_INT8 = 3; // int8 type - DT_UINT8 = 4; // uint8 type - DT_INT16 = 5; // int16 type - DT_UINT16 = 6; // uint16 type - DT_INT32 = 7; // - DT_INT64 = 8; // int64 type - DT_UINT32 = 9; // unsigned int32 - DT_UINT64 = 10; // unsigned int64 - DT_BOOL = 11; // bool type - DT_DOUBLE = 12; // double type - DT_STRING = 13; // string type - DT_DUAL_SUB_INT8 = 14; /**< dual output int8 type */ - DT_DUAL_SUB_UINT8 = 15; /**< dual output uint8 type */ - DT_COMPLEX64 = 16; // complex64 type - DT_COMPLEX128 = 17; // complex128 type - DT_QINT8 = 18; // qint8 type - DT_QINT16 = 19; // qint16 type - DT_QINT32 = 20; // qint32 type - DT_QUINT8 = 21; // quint8 type - DT_QUINT16 = 22; // quint16 type - DT_RESOURCE = 23; // resource type - DT_STRING_REF = 24; // string_ref type - DT_DUAL = 25; /**< dual output type */ - DT_VARIANT = 26; // variant type - DT_BF16 = 27; // bf16 type - DT_INT4 = 28; // int4 type -} - -message AttrDef -{ - message ListValue - { - enum ListValueType{ - VT_LIST_NONE = 0; - VT_LIST_STRING = 1; - VT_LIST_INT = 2; - VT_LIST_FLOAT = 3; - VT_LIST_BOOL = 4; - VT_LIST_BYTES = 5; - VT_LIST_TENSOR_DESC = 6; - VT_LIST_TENSOR = 7; - VT_LIST_GRAPH = 8; - VT_LIST_NAMED_ATTRS = 9; - VT_LIST_DATA_TYPE = 10; - } - repeated bytes s = 2; // "list(string)" - repeated int64 i = 3; // "list(int)" - repeated float f = 4; // "list(float)" - repeated bool b = 5; // "list(bool)" - repeated bytes bt = 7; - repeated TensorDescriptor td = 8; - repeated TensorDef t = 9; - repeated GraphDef g = 10; - repeated NamedAttrs na = 11; - repeated int64 dt = 12; // list ge::DataType - - ListValueType val_type = 20; - } - - message ListListInt{ - message ListInt{ - repeated int64 list_i = 1; // list int - } - repeated ListInt list_list_i = 1; // list list int - } - - oneof value - { - bytes s = 2; // "string" - int64 i = 3; // "int" - float f = 4; // "float" - bool b = 5; // "bool" - bytes bt = 7; - ListValue list = 1; // any "list(...)" - NamedAttrs func = 10; // Used to support attr nesting - TensorDescriptor td = 11; // GeTensorDesc type - TensorDef t = 12; // GeTensor type - GraphDef g = 13; // Graph type - ListListInt list_list_int = 14; // List List Int type - int64 dt = 15; // ge::DataType - } -} - -// A list of attr names and their values. The whole list is attached -// with a string name. E.g., MatMul[T=float]. -message NamedAttrs -{ - string name = 1; - map attr = 2; -} - -// Shape / dimension description, using row-major order -message ShapeDef -{ - repeated int64 dim = 1; // Size of each dimension -} - -// Multidimensional data description -message TensorDescriptor -{ - string name = 1; // Optional parameter, tensor name - - DataType dtype = 2; // tensor datatype - ShapeDef shape = 3; // Shape / dimension - string layout = 4; // Tensor format, eg: "NCHW", "NHWC", "CHW", "ND" - - bool has_out_attr = 9; - int64 size = 10; - int64 weight_size = 11; - bool reuse_input = 12; - bool output_tensor = 13; - string device_type = 14; - bool input_tensor =15; - int64 real_dim_cnt = 16; - int64 reuse_input_index = 17; - int64 data_offset = 18; - int64 cmps_size = 19; - string cmps_tab = 20; - int64 cmps_tab_offset = 21; - - map attr = 5; // Set of extra parameter fields -} - -// GeTensor definition -message TensorDef -{ - TensorDescriptor desc = 1; // Tensor description - bytes data = 2; // Tensor data -} - - -// Operator description -message OpDef -{ - string name = 1; // name - string type = 2; // type - - repeated string input = 5; // input original op name + outgoing index. op_name:index - - map attr = 10; // Set of operator parameter fields - - bool has_out_attr = 20; - int64 id = 21; - int64 stream_id =22; - repeated string input_name = 23; - repeated string src_name = 24; - repeated int64 src_index = 25; - repeated string dst_name = 26; - repeated int64 dst_index = 27; - repeated int64 input_i = 28; - repeated int64 output_i = 29; - repeated int64 workspace = 30; - repeated int64 workspace_bytes = 31; - repeated bool is_input_const = 32; - repeated TensorDescriptor input_desc = 33; - repeated TensorDescriptor output_desc = 34; - repeated string subgraph_name = 35; -} - -// Graph definition -message GraphDef -{ - string name = 1; // name - - repeated string input = 4; // Graph input - repeated string output = 5; // Graph output - - repeated OpDef op = 6; // List of operators - - map attr = 11; // Extended field -} - -// model definition -message ModelDef -{ - string name = 1; // name - uint32 version = 2; // IR Proto verion - string custom_version = 3; // User model version number, passed in by user - - repeated GraphDef graph = 7; // Graph definition,graph[0] represents the main diagram in modeldef - - map attr = 11; // Extended field -} - diff --git a/ge/client/proto/insert_op.proto b/ge/client/proto/insert_op.proto deleted file mode 100644 index 7d708865..00000000 --- a/ge/client/proto/insert_op.proto +++ /dev/null @@ -1,140 +0,0 @@ -syntax = "proto3"; - -package domi; - -message InsertNewOps { - repeated AippOpParams aipp_op = 1; - repeated MultiShapeOpParams multi_shape_op = 2; -} - -message AippOpParams { - enum InputFormat { - UNDEFINED = 0; - YUV420SP_U8 = 1; - XRGB8888_U8 = 2; - RGB888_U8 = 3; - YUV400_U8 = 4; - NC1HWC0DI_FP16 = 5; - NC1HWC0DI_S8 = 6; - ARGB8888_U8 = 7; - YUYV_U8 = 8; - YUV422SP_U8 = 9; - AYUV444_U8 = 10; - RAW10 = 11; - RAW12 = 12; - RAW16 = 13; - RAW24 = 14; - RGB16 = 15; - RGB20 = 16; - RGB24 = 17; - RGB8_IR = 18; - RGB16_IR = 19; - RGB24_IR = 20; - } - - enum AippMode { - undefined = 0; - static = 1; - dynamic = 2; - } - - // AIPPģʽ־̬AIPPͶ̬AIPP - AippMode aipp_mode = 1; - - // related_input_rankΪΪͣ÷Χ>=0, <=DataӵĸĬֵΪ0 - // ʶģ͵ĵڼAIPPģ룬ҪԵ2AIPPrelated_input_rankΪ1 - uint32 related_input_rank = 2; - - // related_input_name is optional and the top name of data node which inserts aipp - string related_input_name = 6; - - // input_edge_idxΪѡΪͣ÷ΧΪ>=0 - // øòãڶDataӲͬͬAIPPòûãĬ϶related_input_rankָģAIPP - // ֵ <= Dataߵĸ - repeated uint32 input_edge_idx = 3; - - // [Begin] ̬AIPPþ̬AIPPʱЧ - uint32 max_src_image_size = 4; - - // Ƿ֧תĬϲ֧֣֧תʱжĿռʧ - bool support_rotation = 5; - - // [End] ̬AIPP - - - // [Begin] ̬AIPPö̬AIPPʱЧ - InputFormat input_format = 51; - bool csc_switch = 52; - float cpadding_value = 53; - bool rbuv_swap_switch = 54; - bool ax_swap_switch = 55; - bool single_line_mode = 56; - - int32 src_image_size_w = 57; - int32 src_image_size_h = 58; - - bool crop = 59; - int32 load_start_pos_w = 60; - int32 load_start_pos_h = 61; - int32 crop_size_w = 62; - int32 crop_size_h = 63; - - bool resize = 64; - int32 resize_output_w = 65; - int32 resize_output_h = 66; - - bool padding = 67; - int32 left_padding_size = 68; - int32 right_padding_size = 69; - int32 top_padding_size = 70; - int32 bottom_padding_size = 71; - float padding_value = 72; - - int32 mean_chn_0 = 10; - int32 mean_chn_1 = 11; - int32 mean_chn_2 = 12; - int32 mean_chn_3 = 19; - float min_chn_0 = 13; - float min_chn_1 = 14; - float min_chn_2 = 15; - float min_chn_3 = 20; - repeated float var_reci_chn_0 = 16; - repeated float var_reci_chn_1 = 17; - repeated float var_reci_chn_2 = 18; - repeated float var_reci_chn_3 = 21; - - repeated int32 matrix_r0c0 = 30; - repeated int32 matrix_r0c1 = 31; - repeated int32 matrix_r0c2 = 32; - repeated int32 matrix_r1c0 = 33; - repeated int32 matrix_r1c1 = 34; - repeated int32 matrix_r1c2 = 35; - repeated int32 matrix_r2c0 = 36; - repeated int32 matrix_r2c1 = 37; - repeated int32 matrix_r2c2 = 38; - repeated int32 output_bias_0 = 39; - repeated int32 output_bias_1 = 40; - repeated int32 output_bias_2 = 41; - repeated int32 input_bias_0 = 42; - repeated int32 input_bias_1 = 43; - repeated int32 input_bias_2 = 44; - - // [End] ̬AIPP - - // The n number that is used for raw/rgbir data into f16 transformation. - // The transformation equation is x/(2^n). If set to 0, no transform is performed. - uint32 raw_rgbir_to_f16_n = 45; -} - -message MultiShapeOpParams { - enum MultiShapeMode { - batch = 0; //̬batch - resolution = 1; //ֱ̬ʣչ - } - - MultiShapeMode mode = 1; //ģʽ - uint32 related_input_rank = 2; //Ӳ뵽ĸ - - - repeated uint32 batch_list = 11; //batch_listֵbatch_listĸ28֮ -} diff --git a/ge/client/proto/om.proto b/ge/client/proto/om.proto deleted file mode 100755 index e15e5f80..00000000 --- a/ge/client/proto/om.proto +++ /dev/null @@ -1,396 +0,0 @@ -/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Apache License for more details at - * http://www.apache.org/licenses/LICENSE-2.0 - */ -syntax = "proto3"; - -package domi; - -enum TargetType -{ - MINI = 0; - TINY = 1; - LITE = 2; -} - -// offline model -message ModelDef { - string name = 1; - uint32 version = 2; - - uint64 memory_size = 10; - uint32 stream_num = 11; - uint32 event_num = 12; - uint64 weight_size = 13; - uint32 label_num = 15; - repeated OpDef op = 20; - TargetType target_type = 23; - - map attr = 30; -}; - -// operator define -message OpDef { - string name = 1; - string type = 2; - - uint32 id = 3; - uint32 stream_id = 4; - - repeated string input_name = 5; - - repeated string src_name = 8; - repeated int32 src_index = 9; - repeated int64 input = 10; - repeated int64 output = 11; - repeated TensorDescriptor input_desc = 12; - repeated TensorDescriptor output_desc = 13; - repeated WeightDef weights = 14; - repeated string dst_name = 15; - repeated int32 dst_index = 16; - - repeated int64 workspace = 20; - repeated uint32 workspace_bytes = 21; - - repeated string weight_name = 22; - repeated bool is_input_const = 23; - - map attr = 30; - - QuantizeFactorParams quantize_factor = 31; - - oneof op_params { - // start at 100 here - SendOpParams sender_param = 100; - RecvOpParams receiver_param = 200; - ConvolutionOpParams convolution_param = 300; - PoolingOpParams pooling_param = 400; - EltwiseOpParams eltwise_param = 500; - BatchNormOpParams batchnorm_param = 600; - ScaleOpParams scale_param = 700; - FullConnectionOpParams full_connection_param = 800; - SoftmaxOpParams softmax_param = 900; - ActivationOpParams activation_param = 1000; - ReshapeOpParams reshape_param = 1100; - } -}; - -message SendOpParams { - uint32 event_id = 1; -}; - -message RecvOpParams { - uint32 event_id = 1; -}; - -enum QuantizeScaleType -{ - VECTOR_SCALE = 0; - SCALAR_SCALE = 1; -} - -enum QuantizeScaleMode -{ - NORMAL_MODE = 0; - SQRT_MODE = 1; -} - -enum QuantizeAlgorithm -{ - NON_OFFSET_ALGO = 0; - HALF_OFFSET_ALGO = 1; - ALL_OFFSET_ALGO = 2; -} -message QuantizeFactor -{ - QuantizeScaleMode scale_mode = 1; - bytes scale_value = 2; - int64 scale_offset = 3; - bytes offset_data_value = 4; - int64 offset_data_offset = 5; - bytes offset_weight_value = 6; - int64 offset_weight_offset = 7; - bytes offset_pad_value = 8; - int64 offset_pad_offset = 9; -}; - -message QuantizeCalcFactor -{ - bytes offsetw = 1; - int64 offsetw_offset = 2; - bytes offsetd = 3; - int64 offsetd_offset = 4; - bytes scalereq = 5; - int64 scaledreq_offset = 6; - bytes offsetdnext = 7; - int64 offsetdnext_offset = 8; -} - -message QuantizeFactorParams -{ - QuantizeAlgorithm quantize_algo = 1; - QuantizeScaleType scale_type = 2; - QuantizeFactor quantize_param = 3; - QuantizeFactor dequantize_param = 4; - QuantizeFactor requantize_param = 5; - QuantizeCalcFactor quantizecalc_param = 6; -}; - -message ConvolutionOpParams { - int32 mode = 1; - int32 algo = 2; - int32 pad_mode = 3; - uint32 group = 4; - uint32 num_output = 5; - - repeated uint32 pad = 10; - repeated uint32 stride = 11; - repeated uint32 dilation = 12; - repeated uint32 kernel = 13; - - float alpha = 20; - float beta = 21; - - WeightDef filter = 40; - WeightDef bias = 41; - - bool relu_flag = 62; - repeated uint32 adj = 70; - repeated uint32 target_shape = 71; - repeated uint32 before_pad = 72; -}; - -message PoolingOpParams { - int32 mode = 1; - int32 nan_opt = 2; - int32 pad_mode = 3; - bool global_pooling = 4; - - repeated uint32 window = 10; - repeated uint32 pad = 11; - repeated uint32 stride = 12; - bool ceil_mode = 13; - int32 data_mode = 14; - - float alpha = 20; - float beta = 21; - repeated uint32 before_pad = 22; -}; - -message EltwiseOpParams { - int32 mode = 1; - repeated float coeff = 2; - float alpha = 3; - float beta = 4; - repeated WeightDef weight = 5; - bool relu_flag = 6; -}; - -message ActivationOpParams { - int32 mode = 1; - float coef = 2; - float alpha = 3; - float beta = 4; -}; - -message BatchNormOpParams { - int32 mode = 1; - - float alpha = 2; - float beta = 3; - double epsilon = 4;//optinal,[default = 1e-5] - bool use_global_stats = 5; //optinal,by default true,testing mode - float moving_average_fraction = 6; //optinal,[default = .999]; - - WeightDef estimated_mean = 7; - WeightDef estimated_variance = 8; - - WeightDef scale = 9; - WeightDef bias = 10; -}; - -message ScaleOpParams { - WeightDef scale = 1; - WeightDef bias = 2; -}; - -message ReshapeOpParams { - float alpha = 1; - float beta = 2; - ShapeDef shape = 3; - int32 axis = 4; - int32 num_axes = 5; - int32 format = 6; -}; - -message SoftmaxOpParams { - int32 algo = 1; - int32 mode = 2; - float alpha = 3; - float beta = 4; -}; - -message FullConnectionOpParams { - WeightDef filter = 1; - WeightDef bias = 2; - uint32 num_output = 3; - bool relu_flag = 12; -}; - -message FlattenOpParams { - float alpha = 1; - float beta = 2; - int32 start_axis = 3; - int32 end_axis = 4; -} - -message AddLimitedOpParams { - float alpha = 1; - float beta = 2; - int32 axis = 3; - bool broadcast = 4; - - repeated WeightDef weight = 10; -}; - -message MulLimitedOpParams { - float alpha = 1; - float beta = 2; - int32 axis = 3; - bool broadcast = 4; - - repeated WeightDef weight = 10; -}; - -message AddOpParams { - float alpha = 1; - float beta = 2; - - repeated WeightDef weight = 10; -}; - -message MulOpParams { - float alpha = 1; - float beta = 2; - - repeated WeightDef weight = 10; -}; - -message SubOpParams { - float alpha = 1; - float beta = 2; - - repeated WeightDef weight = 10; -}; - -message BiasAddOpParams { - float alpha = 1; - float beta = 2; - - WeightDef bias = 10; -}; - -message MatMulOpParams { - float alpha = 1; - float beta = 2; - bool transposeX = 3; - bool transposeW = 4; - - WeightDef filter = 10; - WeightDef bias = 12; -}; - -message RsqrtOpParams { - float alpha = 1; - float beta = 2; -}; - - -message WeightDef { - int32 format = 1; - int32 data_type = 2; - ShapeDef shape = 3; - bytes data = 4; - int64 data_offset = 5; - uint32 cmps_size = 6; - bytes cmps_tab = 7; - int64 cmps_tab_offset = 10; - CompressInfo cmps_info = 8; - AllOffsetQuantizeInfo alloffset_quantize_info = 11; -} - -message ShapeDef { - repeated int64 dim = 1; -} - -enum DeviceType { - NPU = 0; // In default, we will use NPU. - CPU = 1; // CPU -} - -message AllOffsetQuantizeInfo { - float scale = 1; - int32 offset = 2; -} - -message TensorDescriptor { - int32 format = 1; - int32 data_type = 2; - repeated int64 dim = 3; - uint32 size = 4; - bool reuse_input = 5; - bool output_tensor = 7; - DeviceType device_type = 8; - bool input_tensor = 9; - uint32 real_dim_cnt = 10; - uint32 reuse_input_index = 11; - AllOffsetQuantizeInfo alloffset_quantize_info = 12; -} - -message CompressInfo { - int32 blockRow = 1; // block row - int32 blockCol = 2; // block col - int32 fractalK = 3; // fractal K - int32 fractalN = 4; // fractal N - int32 lastFractalK = 5; // K of last fractal - int32 lastFractalN = 6; // N of last fractal - int32 cubeSize = 7; // cube's length - int32 loadDir = 8; // data load directtiono 0:col load 1:row load -} - -message AttrDef { - message ListValue { - repeated string s = 2; // "list(string)" - repeated int64 i = 3 [packed = true]; // "list(int)" - repeated float f = 4 [packed = true]; // "list(float)" - repeated bool b = 5 [packed = true]; // "list(bool)" - repeated uint32 u = 6 [packed = true]; // "list(uint)" - repeated bytes bt = 7; - } - - oneof value { - string s = 2; // "string" - int64 i = 3; // "int" - float f = 4; // "float" - bool b = 5; // "bool" - uint32 u = 6; // "uint32" - bytes bt = 7; - ListValue list = 1; // any "list(...)" - NamedAttrs func = 10; - } -} - -// A list of attr names and their values. The whole list is attached -// with a string name. E.g., MatMul[T=float]. -message NamedAttrs { - string name = 1; - map attr = 2; -} - diff --git a/ge/client/proto/task.proto b/ge/client/proto/task.proto deleted file mode 100644 index 0da5631e..00000000 --- a/ge/client/proto/task.proto +++ /dev/null @@ -1,179 +0,0 @@ -/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Apache License for more details at - * http://www.apache.org/licenses/LICENSE-2.0 - */ -syntax = "proto3"; - -package domi; - -message ModelTaskDef { - string version = 1; - - map attr = 9; // Extended field - repeated TaskDef task = 10; - - uint64 memory_size = 11; - uint32 stream_num = 12; - uint32 event_num = 13; - uint64 weight_size = 14; - - repeated bytes op = 15; // input/output opdef in bytes - - uint64 base_addr = 16; // base addr - uint64 weight_addr = 17; // weight addr - uint32 batch_num = 18; -} - - -message TaskDef { - uint32 id = 1; - uint32 type = 2; - - uint32 stream_id = 10; - uint32 event_id = 11; - - KernelDef kernel = 20; - KernelExDef kernel_ex = 21; - KernelHcclDef kernel_hccl = 25; - EventExDef event_ex = 26; - LogTimeStampDef log_timestamp = 28; - - uint32 label_id = 30; - - MemcpyAsyncDef memcpy_async = 31; - StreamSwitchDef stream_switch = 32; - StreamActiveDef stream_active = 33; - bytes private_def = 34; - uint64 ops_kernel_store_ptr = 35; // adjustments to other fields in the future - StreamSwitchNDef stream_switch_n = 36; - - LabelSetDef label_set = 37; - LabelGotoExDef label_goto_ex = 38; - LabelSwitchByIndexDef label_switch_by_index = 39; - KernelDefWithHandle kernel_with_handle = 40; -} - -message KernelDef { - KernelContext context = 1; - - string stub_func = 10; - uint32 block_dim = 11; - uint32 args_size = 12; - bytes args = 13; - bytes sm_desc = 14; - bytes flowtable = 15; - string so_name = 16; - string kernel_name = 17; - bytes kernel_ext_info = 18; - uint32 kernel_ext_info_size = 19; -} - -message KernelDefWithHandle { - KernelContext context = 1; - - uint64 handle = 10; - string dev_func = 11; - uint32 block_dim = 12; - uint32 args_size = 13; - bytes args = 14; - bytes sm_desc = 15; - string original_kernel_key = 16; - string node_info = 17; -} - -message KernelContext { - uint32 kernel_type = 1; - uint32 op_id = 2; // OP type in CCE - uint32 kernel_func_id = 3; - uint32 op_index = 4; // TE/Custom operator - bool is_flowtable = 5; // Identify whether args is a flowtable structure - bytes args_offset = 6; // args offset information - uint32 args_count = 7; // args count - repeated uint32 origin_op_index = 8; -} - - -message KernelExDef { - uint32 flags = 1; - - uint32 op_index = 4; - uint32 args_size = 12; - bytes args = 13; - bytes task_info = 14; // serialized nodeDef, funcDef, inputoutput - uint32 task_info_size = 15; - bytes kernel_ext_info = 16; - uint32 kernel_ext_info_size = 17; -} - - -message KernelHcclDef { - uint32 op_index = 8; - string hccl_type = 9; -} - - -message EventExDef { - uint32 op_index = 1; - uint32 event_type = 2; -} - -message LogTimeStampDef { - uint64 logid = 1; - bool notify = 2; - uint32 flat = 3; -} - -message MemcpyAsyncDef { - uint64 dst = 1; - uint64 dst_max = 2; - uint64 src = 3; - uint64 count = 4; - uint32 kind = 5; - uint32 op_index = 6; -} - -message StreamSwitchDef { - uint32 op_index = 1; - uint32 true_stream_id = 2; - int64 value = 3; - uint64 value_ptr = 4; - uint32 data_type = 5; -} - -message StreamActiveDef { - uint32 op_index = 1; - uint32 active_stream_id = 2; -} - -message StreamSwitchNDef { - uint32 op_index = 1; - uint32 size = 2; - repeated int64 target_value = 3; - repeated uint32 true_stream_id = 4; - uint32 element_size = 5; - uint32 data_type = 6; -} - -message LabelSetDef { - uint32 op_index = 1; - uint32 label_id = 2; - uint32 model_id = 3; -} - -message LabelGotoExDef { - uint32 op_index = 1; - uint32 label_id = 2; - uint32 model_id = 3; -} - -message LabelSwitchByIndexDef { - uint32 op_index = 1; - uint32 label_max = 2; -} diff --git a/ge/common/proto/ge_ir.proto b/ge/common/proto/ge_ir.proto deleted file mode 100644 index c0ef3071..00000000 --- a/ge/common/proto/ge_ir.proto +++ /dev/null @@ -1,193 +0,0 @@ -syntax = "proto3"; - -package ge.proto; - -enum DataType -{ - DT_UNDEFINED = 0; // Used to indicate a DataType field has not been set. - DT_FLOAT = 1; // float type - DT_FLOAT16 = 2; // fp16 type - DT_INT8 = 3; // int8 type - DT_UINT8 = 4; // uint8 type - DT_INT16 = 5; // int16 type - DT_UINT16 = 6; // uint16 type - DT_INT32 = 7; // - DT_INT64 = 8; // int64 type - DT_UINT32 = 9; // unsigned int32 - DT_UINT64 = 10; // unsigned int64 - DT_BOOL = 11; // bool type - DT_DOUBLE = 12; // double type - DT_STRING = 13; // string type - DT_DUAL_SUB_INT8 = 14; /**< dual output int8 type */ - DT_DUAL_SUB_UINT8 = 15; /**< dual output uint8 type */ - DT_COMPLEX64 = 16; // complex64 type - DT_COMPLEX128 = 17; // complex128 type - DT_QINT8 = 18; // qint8 type - DT_QINT16 = 19; // qint16 type - DT_QINT32 = 20; // qint32 type - DT_QUINT8 = 21; // quint8 type - DT_QUINT16 = 22; // quint16 type - DT_RESOURCE = 23; // resource type - DT_STRING_REF = 24; // string_ref type - DT_DUAL = 25; /**< dual output type */ - DT_VARIANT = 26; // variant type - DT_BF16 = 27; // bf16 type - DT_INT4 = 28; // int4 type -} - -message AttrDef -{ - message ListValue - { - enum ListValueType{ - VT_LIST_NONE = 0; - VT_LIST_STRING = 1; - VT_LIST_INT = 2; - VT_LIST_FLOAT = 3; - VT_LIST_BOOL = 4; - VT_LIST_BYTES = 5; - VT_LIST_TENSOR_DESC = 6; - VT_LIST_TENSOR = 7; - VT_LIST_GRAPH = 8; - VT_LIST_NAMED_ATTRS = 9; - VT_LIST_DATA_TYPE = 10; - } - repeated bytes s = 2; // "list(string)" - repeated int64 i = 3; // "list(int)" - repeated float f = 4; // "list(float)" - repeated bool b = 5; // "list(bool)" - repeated bytes bt = 7; - repeated TensorDescriptor td = 8; - repeated TensorDef t = 9; - repeated GraphDef g = 10; - repeated NamedAttrs na = 11; - repeated int64 dt = 12; // list ge::DataType - - ListValueType val_type = 20; - } - - message ListListInt{ - message ListInt{ - repeated int64 list_i = 1; // list int - } - repeated ListInt list_list_i = 1; // list list int - } - - oneof value - { - bytes s = 2; // "string" - int64 i = 3; // "int" - float f = 4; // "float" - bool b = 5; // "bool" - bytes bt = 7; - ListValue list = 1; // any "list(...)" - NamedAttrs func = 10; // Used to support attr nesting - TensorDescriptor td = 11; // GeTensorDesc type - TensorDef t = 12; // GeTensor type - GraphDef g = 13; // Graph type - ListListInt list_list_int = 14; // List List Int type - int64 dt = 15; // ge::DataType - } -} - -// A list of attr names and their values. The whole list is attached -// with a string name. E.g., MatMul[T=float]. -message NamedAttrs -{ - string name = 1; - map attr = 2; -} - -// Shape / dimension description, using row-major order -message ShapeDef -{ - repeated int64 dim = 1; // Size of each dimension -} - -// Multidimensional data description -message TensorDescriptor -{ - string name = 1; // Optional parameter, tensor name - - DataType dtype = 2; // tensor datatype - ShapeDef shape = 3; // Shape / dimension - string layout = 4; // Tensor format, eg: "NCHW", "NHWC", "CHW", "ND" - - bool has_out_attr = 9; - int64 size = 10; - int64 weight_size = 11; - bool reuse_input = 12; - bool output_tensor = 13; - string device_type = 14; - bool input_tensor =15; - int64 real_dim_cnt = 16; - int64 reuse_input_index = 17; - int64 data_offset = 18; - int64 cmps_size = 19; - string cmps_tab = 20; - int64 cmps_tab_offset = 21; - - map attr = 5; // Set of extra parameter fields -} - -// GeTensor definition -message TensorDef -{ - TensorDescriptor desc = 1; // Tensor description - bytes data = 2; // Tensor data -} - - -// Operator description -message OpDef -{ - string name = 1; // name - string type = 2; // type - - repeated string input = 5; // input original op name + outgoing index. op_name:index - - map attr = 10; // Set of operator parameter fields - - bool has_out_attr = 20; - int64 id = 21; - int64 stream_id =22; - repeated string input_name = 23; - repeated string src_name = 24; - repeated int64 src_index = 25; - repeated string dst_name = 26; - repeated int64 dst_index = 27; - repeated int64 input_i = 28; - repeated int64 output_i = 29; - repeated int64 workspace = 30; - repeated int64 workspace_bytes = 31; - repeated bool is_input_const = 32; - repeated TensorDescriptor input_desc = 33; - repeated TensorDescriptor output_desc = 34; - repeated string subgraph_name = 35; -} - -// Graph definition -message GraphDef -{ - string name = 1; // name - - repeated string input = 4; // Graph input - repeated string output = 5; // Graph output - - repeated OpDef op = 6; // List of operators - - map attr = 11; // Extended field -} - -// model definition -message ModelDef -{ - string name = 1; // name - uint32 version = 2; // IR Proto verion - string custom_version = 3; // User model version number, passed in by user - - repeated GraphDef graph = 7; // Graph definition,graph[0] represents the main diagram in modeldef - - map attr = 11; // Extended field -} - diff --git a/ge/common/proto/insert_op.proto b/ge/common/proto/insert_op.proto deleted file mode 100644 index 7d708865..00000000 --- a/ge/common/proto/insert_op.proto +++ /dev/null @@ -1,140 +0,0 @@ -syntax = "proto3"; - -package domi; - -message InsertNewOps { - repeated AippOpParams aipp_op = 1; - repeated MultiShapeOpParams multi_shape_op = 2; -} - -message AippOpParams { - enum InputFormat { - UNDEFINED = 0; - YUV420SP_U8 = 1; - XRGB8888_U8 = 2; - RGB888_U8 = 3; - YUV400_U8 = 4; - NC1HWC0DI_FP16 = 5; - NC1HWC0DI_S8 = 6; - ARGB8888_U8 = 7; - YUYV_U8 = 8; - YUV422SP_U8 = 9; - AYUV444_U8 = 10; - RAW10 = 11; - RAW12 = 12; - RAW16 = 13; - RAW24 = 14; - RGB16 = 15; - RGB20 = 16; - RGB24 = 17; - RGB8_IR = 18; - RGB16_IR = 19; - RGB24_IR = 20; - } - - enum AippMode { - undefined = 0; - static = 1; - dynamic = 2; - } - - // AIPPģʽ־̬AIPPͶ̬AIPP - AippMode aipp_mode = 1; - - // related_input_rankΪΪͣ÷Χ>=0, <=DataӵĸĬֵΪ0 - // ʶģ͵ĵڼAIPPģ룬ҪԵ2AIPPrelated_input_rankΪ1 - uint32 related_input_rank = 2; - - // related_input_name is optional and the top name of data node which inserts aipp - string related_input_name = 6; - - // input_edge_idxΪѡΪͣ÷ΧΪ>=0 - // øòãڶDataӲͬͬAIPPòûãĬ϶related_input_rankָģAIPP - // ֵ <= Dataߵĸ - repeated uint32 input_edge_idx = 3; - - // [Begin] ̬AIPPþ̬AIPPʱЧ - uint32 max_src_image_size = 4; - - // Ƿ֧תĬϲ֧֣֧תʱжĿռʧ - bool support_rotation = 5; - - // [End] ̬AIPP - - - // [Begin] ̬AIPPö̬AIPPʱЧ - InputFormat input_format = 51; - bool csc_switch = 52; - float cpadding_value = 53; - bool rbuv_swap_switch = 54; - bool ax_swap_switch = 55; - bool single_line_mode = 56; - - int32 src_image_size_w = 57; - int32 src_image_size_h = 58; - - bool crop = 59; - int32 load_start_pos_w = 60; - int32 load_start_pos_h = 61; - int32 crop_size_w = 62; - int32 crop_size_h = 63; - - bool resize = 64; - int32 resize_output_w = 65; - int32 resize_output_h = 66; - - bool padding = 67; - int32 left_padding_size = 68; - int32 right_padding_size = 69; - int32 top_padding_size = 70; - int32 bottom_padding_size = 71; - float padding_value = 72; - - int32 mean_chn_0 = 10; - int32 mean_chn_1 = 11; - int32 mean_chn_2 = 12; - int32 mean_chn_3 = 19; - float min_chn_0 = 13; - float min_chn_1 = 14; - float min_chn_2 = 15; - float min_chn_3 = 20; - repeated float var_reci_chn_0 = 16; - repeated float var_reci_chn_1 = 17; - repeated float var_reci_chn_2 = 18; - repeated float var_reci_chn_3 = 21; - - repeated int32 matrix_r0c0 = 30; - repeated int32 matrix_r0c1 = 31; - repeated int32 matrix_r0c2 = 32; - repeated int32 matrix_r1c0 = 33; - repeated int32 matrix_r1c1 = 34; - repeated int32 matrix_r1c2 = 35; - repeated int32 matrix_r2c0 = 36; - repeated int32 matrix_r2c1 = 37; - repeated int32 matrix_r2c2 = 38; - repeated int32 output_bias_0 = 39; - repeated int32 output_bias_1 = 40; - repeated int32 output_bias_2 = 41; - repeated int32 input_bias_0 = 42; - repeated int32 input_bias_1 = 43; - repeated int32 input_bias_2 = 44; - - // [End] ̬AIPP - - // The n number that is used for raw/rgbir data into f16 transformation. - // The transformation equation is x/(2^n). If set to 0, no transform is performed. - uint32 raw_rgbir_to_f16_n = 45; -} - -message MultiShapeOpParams { - enum MultiShapeMode { - batch = 0; //̬batch - resolution = 1; //ֱ̬ʣչ - } - - MultiShapeMode mode = 1; //ģʽ - uint32 related_input_rank = 2; //Ӳ뵽ĸ - - - repeated uint32 batch_list = 11; //batch_listֵbatch_listĸ28֮ -} diff --git a/ge/common/proto/om.proto b/ge/common/proto/om.proto deleted file mode 100644 index e15e5f80..00000000 --- a/ge/common/proto/om.proto +++ /dev/null @@ -1,396 +0,0 @@ -/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Apache License for more details at - * http://www.apache.org/licenses/LICENSE-2.0 - */ -syntax = "proto3"; - -package domi; - -enum TargetType -{ - MINI = 0; - TINY = 1; - LITE = 2; -} - -// offline model -message ModelDef { - string name = 1; - uint32 version = 2; - - uint64 memory_size = 10; - uint32 stream_num = 11; - uint32 event_num = 12; - uint64 weight_size = 13; - uint32 label_num = 15; - repeated OpDef op = 20; - TargetType target_type = 23; - - map attr = 30; -}; - -// operator define -message OpDef { - string name = 1; - string type = 2; - - uint32 id = 3; - uint32 stream_id = 4; - - repeated string input_name = 5; - - repeated string src_name = 8; - repeated int32 src_index = 9; - repeated int64 input = 10; - repeated int64 output = 11; - repeated TensorDescriptor input_desc = 12; - repeated TensorDescriptor output_desc = 13; - repeated WeightDef weights = 14; - repeated string dst_name = 15; - repeated int32 dst_index = 16; - - repeated int64 workspace = 20; - repeated uint32 workspace_bytes = 21; - - repeated string weight_name = 22; - repeated bool is_input_const = 23; - - map attr = 30; - - QuantizeFactorParams quantize_factor = 31; - - oneof op_params { - // start at 100 here - SendOpParams sender_param = 100; - RecvOpParams receiver_param = 200; - ConvolutionOpParams convolution_param = 300; - PoolingOpParams pooling_param = 400; - EltwiseOpParams eltwise_param = 500; - BatchNormOpParams batchnorm_param = 600; - ScaleOpParams scale_param = 700; - FullConnectionOpParams full_connection_param = 800; - SoftmaxOpParams softmax_param = 900; - ActivationOpParams activation_param = 1000; - ReshapeOpParams reshape_param = 1100; - } -}; - -message SendOpParams { - uint32 event_id = 1; -}; - -message RecvOpParams { - uint32 event_id = 1; -}; - -enum QuantizeScaleType -{ - VECTOR_SCALE = 0; - SCALAR_SCALE = 1; -} - -enum QuantizeScaleMode -{ - NORMAL_MODE = 0; - SQRT_MODE = 1; -} - -enum QuantizeAlgorithm -{ - NON_OFFSET_ALGO = 0; - HALF_OFFSET_ALGO = 1; - ALL_OFFSET_ALGO = 2; -} -message QuantizeFactor -{ - QuantizeScaleMode scale_mode = 1; - bytes scale_value = 2; - int64 scale_offset = 3; - bytes offset_data_value = 4; - int64 offset_data_offset = 5; - bytes offset_weight_value = 6; - int64 offset_weight_offset = 7; - bytes offset_pad_value = 8; - int64 offset_pad_offset = 9; -}; - -message QuantizeCalcFactor -{ - bytes offsetw = 1; - int64 offsetw_offset = 2; - bytes offsetd = 3; - int64 offsetd_offset = 4; - bytes scalereq = 5; - int64 scaledreq_offset = 6; - bytes offsetdnext = 7; - int64 offsetdnext_offset = 8; -} - -message QuantizeFactorParams -{ - QuantizeAlgorithm quantize_algo = 1; - QuantizeScaleType scale_type = 2; - QuantizeFactor quantize_param = 3; - QuantizeFactor dequantize_param = 4; - QuantizeFactor requantize_param = 5; - QuantizeCalcFactor quantizecalc_param = 6; -}; - -message ConvolutionOpParams { - int32 mode = 1; - int32 algo = 2; - int32 pad_mode = 3; - uint32 group = 4; - uint32 num_output = 5; - - repeated uint32 pad = 10; - repeated uint32 stride = 11; - repeated uint32 dilation = 12; - repeated uint32 kernel = 13; - - float alpha = 20; - float beta = 21; - - WeightDef filter = 40; - WeightDef bias = 41; - - bool relu_flag = 62; - repeated uint32 adj = 70; - repeated uint32 target_shape = 71; - repeated uint32 before_pad = 72; -}; - -message PoolingOpParams { - int32 mode = 1; - int32 nan_opt = 2; - int32 pad_mode = 3; - bool global_pooling = 4; - - repeated uint32 window = 10; - repeated uint32 pad = 11; - repeated uint32 stride = 12; - bool ceil_mode = 13; - int32 data_mode = 14; - - float alpha = 20; - float beta = 21; - repeated uint32 before_pad = 22; -}; - -message EltwiseOpParams { - int32 mode = 1; - repeated float coeff = 2; - float alpha = 3; - float beta = 4; - repeated WeightDef weight = 5; - bool relu_flag = 6; -}; - -message ActivationOpParams { - int32 mode = 1; - float coef = 2; - float alpha = 3; - float beta = 4; -}; - -message BatchNormOpParams { - int32 mode = 1; - - float alpha = 2; - float beta = 3; - double epsilon = 4;//optinal,[default = 1e-5] - bool use_global_stats = 5; //optinal,by default true,testing mode - float moving_average_fraction = 6; //optinal,[default = .999]; - - WeightDef estimated_mean = 7; - WeightDef estimated_variance = 8; - - WeightDef scale = 9; - WeightDef bias = 10; -}; - -message ScaleOpParams { - WeightDef scale = 1; - WeightDef bias = 2; -}; - -message ReshapeOpParams { - float alpha = 1; - float beta = 2; - ShapeDef shape = 3; - int32 axis = 4; - int32 num_axes = 5; - int32 format = 6; -}; - -message SoftmaxOpParams { - int32 algo = 1; - int32 mode = 2; - float alpha = 3; - float beta = 4; -}; - -message FullConnectionOpParams { - WeightDef filter = 1; - WeightDef bias = 2; - uint32 num_output = 3; - bool relu_flag = 12; -}; - -message FlattenOpParams { - float alpha = 1; - float beta = 2; - int32 start_axis = 3; - int32 end_axis = 4; -} - -message AddLimitedOpParams { - float alpha = 1; - float beta = 2; - int32 axis = 3; - bool broadcast = 4; - - repeated WeightDef weight = 10; -}; - -message MulLimitedOpParams { - float alpha = 1; - float beta = 2; - int32 axis = 3; - bool broadcast = 4; - - repeated WeightDef weight = 10; -}; - -message AddOpParams { - float alpha = 1; - float beta = 2; - - repeated WeightDef weight = 10; -}; - -message MulOpParams { - float alpha = 1; - float beta = 2; - - repeated WeightDef weight = 10; -}; - -message SubOpParams { - float alpha = 1; - float beta = 2; - - repeated WeightDef weight = 10; -}; - -message BiasAddOpParams { - float alpha = 1; - float beta = 2; - - WeightDef bias = 10; -}; - -message MatMulOpParams { - float alpha = 1; - float beta = 2; - bool transposeX = 3; - bool transposeW = 4; - - WeightDef filter = 10; - WeightDef bias = 12; -}; - -message RsqrtOpParams { - float alpha = 1; - float beta = 2; -}; - - -message WeightDef { - int32 format = 1; - int32 data_type = 2; - ShapeDef shape = 3; - bytes data = 4; - int64 data_offset = 5; - uint32 cmps_size = 6; - bytes cmps_tab = 7; - int64 cmps_tab_offset = 10; - CompressInfo cmps_info = 8; - AllOffsetQuantizeInfo alloffset_quantize_info = 11; -} - -message ShapeDef { - repeated int64 dim = 1; -} - -enum DeviceType { - NPU = 0; // In default, we will use NPU. - CPU = 1; // CPU -} - -message AllOffsetQuantizeInfo { - float scale = 1; - int32 offset = 2; -} - -message TensorDescriptor { - int32 format = 1; - int32 data_type = 2; - repeated int64 dim = 3; - uint32 size = 4; - bool reuse_input = 5; - bool output_tensor = 7; - DeviceType device_type = 8; - bool input_tensor = 9; - uint32 real_dim_cnt = 10; - uint32 reuse_input_index = 11; - AllOffsetQuantizeInfo alloffset_quantize_info = 12; -} - -message CompressInfo { - int32 blockRow = 1; // block row - int32 blockCol = 2; // block col - int32 fractalK = 3; // fractal K - int32 fractalN = 4; // fractal N - int32 lastFractalK = 5; // K of last fractal - int32 lastFractalN = 6; // N of last fractal - int32 cubeSize = 7; // cube's length - int32 loadDir = 8; // data load directtiono 0:col load 1:row load -} - -message AttrDef { - message ListValue { - repeated string s = 2; // "list(string)" - repeated int64 i = 3 [packed = true]; // "list(int)" - repeated float f = 4 [packed = true]; // "list(float)" - repeated bool b = 5 [packed = true]; // "list(bool)" - repeated uint32 u = 6 [packed = true]; // "list(uint)" - repeated bytes bt = 7; - } - - oneof value { - string s = 2; // "string" - int64 i = 3; // "int" - float f = 4; // "float" - bool b = 5; // "bool" - uint32 u = 6; // "uint32" - bytes bt = 7; - ListValue list = 1; // any "list(...)" - NamedAttrs func = 10; - } -} - -// A list of attr names and their values. The whole list is attached -// with a string name. E.g., MatMul[T=float]. -message NamedAttrs { - string name = 1; - map attr = 2; -} - diff --git a/ge/common/proto/op_mapping.proto b/ge/common/proto/op_mapping.proto deleted file mode 100644 index d626eb49..00000000 --- a/ge/common/proto/op_mapping.proto +++ /dev/null @@ -1,75 +0,0 @@ -syntax = "proto3"; -package toolkit.aicpu.dump; - -message Shape { - repeated uint64 dim = 1; -} - -message Output { - int32 data_type = 1; - int32 format = 2; - Shape shape = 3; - uint64 address = 4; - string original_name = 5; - int32 original_output_index = 6; - int32 original_output_data_type = 7; - int32 original_output_format = 8; - uint64 size = 9; - Shape origin_shape = 10; -} - -message Input { - int32 data_type =1; - int32 format = 2; - Shape shape = 3; - uint64 address = 4; - uint64 size = 5; - Shape origin_shape = 6; -} - -enum BufferType { - L1 = 0; -} - -message OpBuffer { - BufferType buffer_type = 1; - uint64 address = 2; - uint64 size = 3; -} - -message Op { - string op_name = 1; - string op_type = 2; -} - -message Task { - uint32 task_id = 1; - uint32 stream_id = 2; - Op op = 3; - repeated Output output = 4; - bool end_graph = 5; - repeated Input input = 6; - repeated OpBuffer buffer = 7; -} - -message OpMappingInfo { - string dump_path = 1; - oneof model_name_param { - string model_name = 2; - } - oneof model_id_param { - uint32 model_id = 3; - } - oneof step_id { - uint64 step_id_addr = 4; - } - oneof iterations_per_loop { - uint64 iterations_per_loop_addr = 5; - } - oneof loop_cond { - uint64 loop_cond_addr = 6; - } - uint32 flag = 7; // 0x01 load, 0x00 unload - repeated Task task = 8; - string dump_step = 9; -} \ No newline at end of file diff --git a/ge/common/proto/task.proto b/ge/common/proto/task.proto deleted file mode 100644 index 0da5631e..00000000 --- a/ge/common/proto/task.proto +++ /dev/null @@ -1,179 +0,0 @@ -/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Apache License for more details at - * http://www.apache.org/licenses/LICENSE-2.0 - */ -syntax = "proto3"; - -package domi; - -message ModelTaskDef { - string version = 1; - - map attr = 9; // Extended field - repeated TaskDef task = 10; - - uint64 memory_size = 11; - uint32 stream_num = 12; - uint32 event_num = 13; - uint64 weight_size = 14; - - repeated bytes op = 15; // input/output opdef in bytes - - uint64 base_addr = 16; // base addr - uint64 weight_addr = 17; // weight addr - uint32 batch_num = 18; -} - - -message TaskDef { - uint32 id = 1; - uint32 type = 2; - - uint32 stream_id = 10; - uint32 event_id = 11; - - KernelDef kernel = 20; - KernelExDef kernel_ex = 21; - KernelHcclDef kernel_hccl = 25; - EventExDef event_ex = 26; - LogTimeStampDef log_timestamp = 28; - - uint32 label_id = 30; - - MemcpyAsyncDef memcpy_async = 31; - StreamSwitchDef stream_switch = 32; - StreamActiveDef stream_active = 33; - bytes private_def = 34; - uint64 ops_kernel_store_ptr = 35; // adjustments to other fields in the future - StreamSwitchNDef stream_switch_n = 36; - - LabelSetDef label_set = 37; - LabelGotoExDef label_goto_ex = 38; - LabelSwitchByIndexDef label_switch_by_index = 39; - KernelDefWithHandle kernel_with_handle = 40; -} - -message KernelDef { - KernelContext context = 1; - - string stub_func = 10; - uint32 block_dim = 11; - uint32 args_size = 12; - bytes args = 13; - bytes sm_desc = 14; - bytes flowtable = 15; - string so_name = 16; - string kernel_name = 17; - bytes kernel_ext_info = 18; - uint32 kernel_ext_info_size = 19; -} - -message KernelDefWithHandle { - KernelContext context = 1; - - uint64 handle = 10; - string dev_func = 11; - uint32 block_dim = 12; - uint32 args_size = 13; - bytes args = 14; - bytes sm_desc = 15; - string original_kernel_key = 16; - string node_info = 17; -} - -message KernelContext { - uint32 kernel_type = 1; - uint32 op_id = 2; // OP type in CCE - uint32 kernel_func_id = 3; - uint32 op_index = 4; // TE/Custom operator - bool is_flowtable = 5; // Identify whether args is a flowtable structure - bytes args_offset = 6; // args offset information - uint32 args_count = 7; // args count - repeated uint32 origin_op_index = 8; -} - - -message KernelExDef { - uint32 flags = 1; - - uint32 op_index = 4; - uint32 args_size = 12; - bytes args = 13; - bytes task_info = 14; // serialized nodeDef, funcDef, inputoutput - uint32 task_info_size = 15; - bytes kernel_ext_info = 16; - uint32 kernel_ext_info_size = 17; -} - - -message KernelHcclDef { - uint32 op_index = 8; - string hccl_type = 9; -} - - -message EventExDef { - uint32 op_index = 1; - uint32 event_type = 2; -} - -message LogTimeStampDef { - uint64 logid = 1; - bool notify = 2; - uint32 flat = 3; -} - -message MemcpyAsyncDef { - uint64 dst = 1; - uint64 dst_max = 2; - uint64 src = 3; - uint64 count = 4; - uint32 kind = 5; - uint32 op_index = 6; -} - -message StreamSwitchDef { - uint32 op_index = 1; - uint32 true_stream_id = 2; - int64 value = 3; - uint64 value_ptr = 4; - uint32 data_type = 5; -} - -message StreamActiveDef { - uint32 op_index = 1; - uint32 active_stream_id = 2; -} - -message StreamSwitchNDef { - uint32 op_index = 1; - uint32 size = 2; - repeated int64 target_value = 3; - repeated uint32 true_stream_id = 4; - uint32 element_size = 5; - uint32 data_type = 6; -} - -message LabelSetDef { - uint32 op_index = 1; - uint32 label_id = 2; - uint32 model_id = 3; -} - -message LabelGotoExDef { - uint32 op_index = 1; - uint32 label_id = 2; - uint32 model_id = 3; -} - -message LabelSwitchByIndexDef { - uint32 op_index = 1; - uint32 label_max = 2; -} diff --git a/ge/common/proto/tensorflow/attr_value.proto b/ge/common/proto/tensorflow/attr_value.proto deleted file mode 100644 index 438d7163..00000000 --- a/ge/common/proto/tensorflow/attr_value.proto +++ /dev/null @@ -1,70 +0,0 @@ -/** - * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow - * - * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "AttrValueProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "tensor.proto"; -import "tensor_shape.proto"; -import "types.proto"; - -// Protocol buffer representing the value for an attr used to configure an Op. -// Comment indicates the corresponding attr type. Only the field matching the -// attr type may be filled. -message AttrValue { - // LINT.IfChange - message ListValue { - repeated bytes s = 2; // "list(string)" - repeated int64 i = 3 [packed = true]; // "list(int)" - repeated float f = 4 [packed = true]; // "list(float)" - repeated bool b = 5 [packed = true]; // "list(bool)" - repeated DataType type = 6 [packed = true]; // "list(type)" - repeated TensorShapeProto shape = 7; // "list(shape)" - repeated TensorProto tensor = 8; // "list(tensor)" - repeated NameAttrList func = 9; // "list(attr)" - } - // LINT.ThenChange(https://www.tensorflow.org/code/tensorflow/c/c_api.cc) - - oneof value { - bytes s = 2; // "string" - int64 i = 3; // "int" - float f = 4; // "float" - bool b = 5; // "bool" - DataType type = 6; // "type" - TensorShapeProto shape = 7; // "shape" - TensorProto tensor = 8; // "tensor" - ListValue list = 1; // any "list(...)" - - // "func" represents a function. func.name is a function's name or - // a primitive op's name. func.attr.first is the name of an attr - // defined for that function. func.attr.second is the value for - // that attr in the instantiation. - NameAttrList func = 10; - - // This is a placeholder only used in nodes defined inside a - // function. It indicates the attr value will be supplied when - // the function is instantiated. For example, let us suppose a - // node "N" in function "FN". "N" has an attr "A" with value - // placeholder = "foo". When FN is instantiated with attr "foo" - // set to "bar", the instantiated node N's attr A will have been - // given the value "bar". - string placeholder = 9; - } -} - -// A list of attr names and their values. The whole list is attached -// with a string name. E.g., MatMul[T=float]. -message NameAttrList { - string name = 1; - map attr = 2; -} diff --git a/ge/common/proto/tensorflow/function.proto b/ge/common/proto/tensorflow/function.proto deleted file mode 100644 index 44681e32..00000000 --- a/ge/common/proto/tensorflow/function.proto +++ /dev/null @@ -1,108 +0,0 @@ -/** - * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow - * - * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "FunctionProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "attr_value.proto"; -import "node_def.proto"; -import "op_def.proto"; - -// A library is a set of named functions. -message FunctionDefLibrary { - repeated FunctionDef function = 1; - repeated GradientDef gradient = 2; -} - -// A function can be instantiated when the runtime can bind every attr -// with a value. When a GraphDef has a call to a function, it must -// have binding for every attr defined in the signature. -// * device spec, etc. -message FunctionDef { - // The definition of the function's name, arguments, return values, - // attrs etc. - OpDef signature = 1; - - // Attributes specific to this function definition. - map attr = 5; - - // NOTE: field id 2 deleted on Jan 11, 2017, GraphDef version 21. - reserved 2; - - // In both of the following fields, there is the need to specify an - // output that is used as either the input to another node (in - // `node_def`) or as a return value of the function (in `ret`). - // Unlike the NodeDefs in GraphDef, we need to be able to specify a - // list in some cases (instead of just single outputs). Also, we - // need to be able to deal with lists of unknown length (so the - // output index may not be known at function definition time). So - // we use the following format instead: - // * "fun_in" where "fun_in" is the name of a function input arg in - // the `signature` field above. This represents that input, whether - // it is a single tensor or a list. - // * "fun_in:0" gives the first element of a function input arg (a - // non-list input is considered a list of length 1 for these - // purposes). - // * "node:out" where "node" is the name of a node in `node_def` and - // "out" is the name one of its op's output arguments (the name - // comes from the OpDef of the node's op). This represents that - // node's output, whether it is a single tensor or a list. - // Note: We enforce that an op's output arguments are never - // renamed in the backwards-compatibility test. - // * "node:out:0" gives the first element of a node output arg (a - // non-list output is considered a list of length 1 for these - // purposes). - // - // NOT CURRENTLY SUPPORTED (but may be in the future): - // * "node:out:-1" gives last element in a node output list - // * "node:out:1:" gives a list with all but the first element in a - // node output list - // * "node:out::-1" gives a list with all but the last element in a - // node output list - - // The body of the function. Unlike the NodeDefs in a GraphDef, attrs - // may have values of type `placeholder` and the `input` field uses - // the "output" format above. - - // By convention, "op" in node_def is resolved by consulting with a - // user-defined library first. If not resolved, "func" is assumed to - // be a builtin op. - repeated NodeDef node_def = 3; - - // A mapping from the output arg names from `signature` to the - // outputs from `node_def` that should be returned by the function. - map ret = 4; -} - -// GradientDef defines the gradient function of a function defined in -// a function library. -// -// A gradient function g (specified by gradient_func) for a function f -// (specified by function_name) must follow the following: -// -// The function 'f' must be a numerical function which takes N inputs -// and produces M outputs. Its gradient function 'g', which is a -// function taking N + M inputs and produces N outputs. -// -// I.e. if we have -// (y1, y2, ..., y_M) = f(x1, x2, ..., x_N), -// then, g is -// (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N, -// dL/dy1, dL/dy2, ..., dL/dy_M), -// where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the -// loss function). dL/dx_i is the partial derivative of L with respect -// to x_i. -message GradientDef { - string function_name = 1; // The function name. - string gradient_func = 2; // The gradient function's name. -} diff --git a/ge/common/proto/tensorflow/graph.proto b/ge/common/proto/tensorflow/graph.proto deleted file mode 100644 index 73bfc6ee..00000000 --- a/ge/common/proto/tensorflow/graph.proto +++ /dev/null @@ -1,64 +0,0 @@ -/** - * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow - * - * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "GraphProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "node_def.proto"; -import "function.proto"; -import "versions.proto"; - -// Represents the graph of operations -message GraphDef { - repeated NodeDef node = 1; - - // Compatibility versions of the graph. See core/public/version.h for version - // history. The GraphDef version is distinct from the TensorFlow version, and - // each release of TensorFlow will support a range of GraphDef versions. - VersionDef versions = 4; - - // Deprecated single version field; use versions above instead. Since all - // GraphDef changes before "versions" was introduced were forward - // compatible, this field is entirely ignored. - int32 version = 3 [deprecated = true]; - - // EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET. - // - // "library" provides user-defined functions. - // - // Naming: - // * library.function.name are in a flat namespace. - // NOTE: We may need to change it to be hierarchical to support - // different orgs. E.g., - // { "/google/nn", { ... }}, - // { "/google/vision", { ... }} - // { "/org_foo/module_bar", { ... }} - // map named_lib; - // * If node[i].op is the name of one function in "library", - // node[i] is deemed as a function call. Otherwise, node[i].op - // must be a primitive operation supported by the runtime. - // - // - // Function call semantics: - // - // * The callee may start execution as soon as some of its inputs - // are ready. The caller may want to use Tuple() mechanism to - // ensure all inputs are ready in the same time. - // - // * The consumer of return values may start executing as soon as - // the return values the consumer depends on are ready. The - // consumer may want to use Tuple() mechanism to ensure the - // consumer does not start until all return values of the callee - // function are ready. - FunctionDefLibrary library = 2; -}; diff --git a/ge/common/proto/tensorflow/graph_library.proto b/ge/common/proto/tensorflow/graph_library.proto deleted file mode 100644 index 7bca0838..00000000 --- a/ge/common/proto/tensorflow/graph_library.proto +++ /dev/null @@ -1,22 +0,0 @@ -/** - * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow - * - * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -syntax = "proto3"; - -package domi.tensorflow; - -import "graph.proto"; - -message GeGraphDef { - string name = 1; - GraphDef graph = 2; -} - -message GraphDefLibrary { - repeated GeGraphDef graph_def = 1; -}; \ No newline at end of file diff --git a/ge/common/proto/tensorflow/node_def.proto b/ge/common/proto/tensorflow/node_def.proto deleted file mode 100644 index 50cf5cac..00000000 --- a/ge/common/proto/tensorflow/node_def.proto +++ /dev/null @@ -1,71 +0,0 @@ -/** - * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow - * - * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "NodeProto"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "attr_value.proto"; - -message NodeDef { - // The name given to this operator. Used for naming inputs, - // logging, visualization, etc. Unique within a single GraphDef. - // Must match the regexp "[A-Za-z0-9.][A-Za-z0-9_./]*". - string name = 1; - - // The operation name. There may be custom parameters in attrs. - // Op names starting with an underscore are reserved for internal use. - string op = 2; - - // Each input is "node:src_output" with "node" being a string name and - // "src_output" indicating which output tensor to use from "node". If - // "src_output" is 0 the ":0" suffix can be omitted. Regular inputs - // may optionally be followed by control inputs that have the format - // "^node". - repeated string input = 3; - - // A (possibly partial) specification for the device on which this - // node should be placed. - // The expected syntax for this string is as follows: - // - // DEVICE_SPEC ::= PARTIAL_SPEC - // - // PARTIAL_SPEC ::= ("/" CONSTRAINT) * - // CONSTRAINT ::= ("job:" JOB_NAME) - // | ("replica:" [1-9][0-9]*) - // | ("task:" [1-9][0-9]*) - // | ("device:" [A-Za-z]* ":" ([1-9][0-9]* | "*") ) - // - // Valid values for this string include: - // * "/job:worker/replica:0/task:1/device:GPU:3" (full specification) - // * "/job:worker/device:GPU:3" (partial specification) - // * "" (no specification) - // - // If the constraints do not resolve to a single device (or if this - // field is empty or not present), the runtime will attempt to - // choose a device automatically. - string device = 4; - - // Operation-specific graph-construction-time configuration. - // Note that this should include all attrs defined in the - // corresponding OpDef, including those with a value matching - // the default -- this allows the default to change and makes - // NodeDefs easier to interpret on their own. However, if - // an attr with a default is not specified in this list, the - // default will be used. - // The "names" (keys) must match the regexp "[a-z][a-z0-9_]+" (and - // one of the names from the corresponding OpDef's attr field). - // The values must have a type matching the corresponding OpDef - // attr's type field. - // Add some examples here showing best practices. - map attr = 5; -}; diff --git a/ge/common/proto/tensorflow/op_def.proto b/ge/common/proto/tensorflow/op_def.proto deleted file mode 100644 index 7f0e8ce2..00000000 --- a/ge/common/proto/tensorflow/op_def.proto +++ /dev/null @@ -1,172 +0,0 @@ -/** - * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow - * - * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "OpDefProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "attr_value.proto"; -import "types.proto"; - -// Defines an operation. A NodeDef in a GraphDef specifies an Op by -// using the "op" field which should match the name of a OpDef. -// LINT.IfChange -message OpDef { - // Op names starting with an underscore are reserved for internal use. - // Names should be CamelCase and match the regexp "[A-Z][a-zA-Z0-9_]*". - string name = 1; - - // For describing inputs and outputs. - message ArgDef { - // Name for the input/output. Should match the regexp "[a-z][a-z0-9_]*". - string name = 1; - - // Human readable description. - string description = 2; - - // Describes the type of one or more tensors that are accepted/produced - // by this input/output arg. The only legal combinations are: - // * For a single tensor: either the "type" field is set or the - // "type_attr" field is set to the name of an attr with type "type". - // * For a sequence of tensors with the same type: the "number_attr" - // field will be set to the name of an attr with type "int", and - // either the "type" or "type_attr" field will be set as for - // single tensors. - // * For a sequence of tensors, the "type_list_attr" field will be set - // to the name of an attr with type "list(type)". - DataType type = 3; - string type_attr = 4; // if specified, attr must have type "type" - string number_attr = 5; // if specified, attr must have type "int" - // If specified, attr must have type "list(type)", and none of - // type, type_attr, and number_attr may be specified. - string type_list_attr = 6; - - // For inputs: if true, the inputs are required to be refs. - // By default, inputs can be either refs or non-refs. - // For outputs: if true, outputs are refs, otherwise they are not. - bool is_ref = 16; - }; - - // Description of the input(s). - repeated ArgDef input_arg = 2; - - // Description of the output(s). - repeated ArgDef output_arg = 3; - - // Description of the graph-construction-time configuration of this - // Op. That is to say, this describes the attr fields that will - // be specified in the NodeDef. - message AttrDef { - // A descriptive name for the argument. May be used, e.g. by the - // Python client, as a keyword argument name, and so should match - // the regexp "[a-z][a-z0-9_]+". - string name = 1; - - // One of the type names from attr_value.proto ("string", "list(string)", - // "int", etc.). - string type = 2; - - // A reasonable default for this attribute if the user does not supply - // a value. If not specified, the user must supply a value. - AttrValue default_value = 3; - - // Human-readable description. - string description = 4; - - - // --- Constraints --- - // These constraints are only in effect if specified. Default is no - // constraints. - - // For type == "int", this is a minimum value. For "list(___)" - // types, this is the minimum length. - bool has_minimum = 5; - int64 minimum = 6; - - // The set of allowed values. Has type that is the "list" version - // of the "type" field above (uses the "list" field of AttrValue). - // If type == "type" or "list(type)" above, then the "type" field - // of "allowed_values.list" has the set of allowed DataTypes. - // If type == "string" or "list(string)", then the "s" field of - // "allowed_values.list" has the set of allowed strings. - AttrValue allowed_values = 7; - } - repeated AttrDef attr = 4; - - // Optional deprecation based on GraphDef versions. - OpDeprecation deprecation = 8; - - // One-line human-readable description of what the Op does. - string summary = 5; - - // Additional, longer human-readable description of what the Op does. - string description = 6; - - // ------------------------------------------------------------------------- - // Which optimizations this operation can participate in. - - // True if the operation is commutative ("op(a,b) == op(b,a)" for all inputs) - bool is_commutative = 18; - - // If is_aggregate is true, then this operation accepts N >= 2 - // inputs and produces 1 output all of the same type. Should be - // associative and commutative, and produce output with the same - // shape as the input. The optimizer may replace an aggregate op - // taking input from multiple devices with a tree of aggregate ops - // that aggregate locally within each device (and possibly within - // groups of nearby devices) before communicating. - bool is_aggregate = 16; // for things like add - - // Other optimizations go here, like - // can_alias_input, rewrite_when_output_unused, partitioning_strategy, etc. - - // ------------------------------------------------------------------------- - // Optimization constraints. - - // Ops are marked as stateful if their behavior depends on some state beyond - // their input tensors (e.g. variable reading op) or if they have - // a side-effect (e.g. printing or asserting ops). Equivalently, stateless ops - // must always produce the same output for the same input and have - // no side-effects. - // - // By default Ops may be moved between devices. Stateful ops should - // either not be moved, or should only be moved if that state can also - // be moved (e.g. via some sort of save / restore). - // Stateful ops are guaranteed to never be optimized away by Common - // Subexpression Elimination (CSE). - bool is_stateful = 17; // for things like variables, queue - - // ------------------------------------------------------------------------- - // Non-standard options. - - // By default, all inputs to an Op must be initialized Tensors. Ops - // that may initialize tensors for the first time should set this - // field to true, to allow the Op to take an uninitialized Tensor as - // input. - bool allows_uninitialized_input = 19; // for Assign, etc. -}; -// LINT.ThenChange( -// https://www.tensorflow.org/code/tensorflow/core/framework/op_def_util.cc) - -// Information about version-dependent deprecation of an op -message OpDeprecation { - // First GraphDef version at which the op is disallowed. - int32 version = 1; - - // Explanation of why it was deprecated and what to use instead. - string explanation = 2; -}; - -// A collection of OpDefs -message OpList { - repeated OpDef op = 1; -}; diff --git a/ge/common/proto/tensorflow/resource_handle.proto b/ge/common/proto/tensorflow/resource_handle.proto deleted file mode 100644 index 91c46c9a..00000000 --- a/ge/common/proto/tensorflow/resource_handle.proto +++ /dev/null @@ -1,37 +0,0 @@ -/** - * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow - * - * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "ResourceHandle"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -// Protocol buffer representing a handle to a tensorflow resource. Handles are -// not valid across executions, but can be serialized back and forth from within -// a single run. -message ResourceHandleProto { - // Unique name for the device containing the resource. - string device = 1; - - // Container in which this resource is placed. - string container = 2; - - // Unique name of this resource. - string name = 3; - - // Hash code for the type of the resource. Is only valid in the same device - // and in the same execution. - uint64 hash_code = 4; - - // For debug-only, the name of the type pointed to by this handle, if - // available. - string maybe_type_name = 5; -}; diff --git a/ge/common/proto/tensorflow/tensor.proto b/ge/common/proto/tensorflow/tensor.proto deleted file mode 100644 index 48eeb6c4..00000000 --- a/ge/common/proto/tensorflow/tensor.proto +++ /dev/null @@ -1,102 +0,0 @@ -/** - * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow - * - * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "TensorProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "resource_handle.proto"; -import "tensor_shape.proto"; -import "types.proto"; - -// Protocol buffer representing a tensor. -message TensorProto { - DataType dtype = 1; - - // Shape of the tensor. - TensorShapeProto tensor_shape = 2; - - // Only one of the representations below is set, one of "tensor_contents" and - // the "xxx_val" attributes. We are not using oneof because as oneofs cannot - // contain repeated fields it would require another extra set of messages. - - // Version number. - // - // In version 0, if the "repeated xxx" representations contain only one - // element, that element is repeated to fill the shape. This makes it easy - // to represent a constant Tensor with a single value. - int32 version_number = 3; - - // Serialized raw tensor content from either Tensor::AsProtoTensorContent or - // memcpy in tensorflow::grpc::EncodeTensorToByteBuffer. This representation - // can be used for all tensor types. The purpose of this representation is to - // reduce serialization overhead during RPC call by avoiding serialization of - // many repeated small items. - bytes tensor_content = 4; - - // Type specific representations that make it easy to create tensor protos in - // all languages. Only the representation corresponding to "dtype" can - // be set. The values hold the flattened representation of the tensor in - // row major order. - - // DT_HALF, DT_BFLOAT16. Note that since protobuf has no int16 type, we'll - // have some pointless zero padding for each value here. - repeated int32 half_val = 13 [packed = true]; - - // DT_FLOAT. - repeated float float_val = 5 [packed = true]; - - // DT_DOUBLE. - repeated double double_val = 6 [packed = true]; - - // DT_INT32, DT_INT16, DT_INT8, DT_UINT8. - repeated int32 int_val = 7 [packed = true]; - - // DT_STRING - repeated bytes string_val = 8; - - // DT_COMPLEX64. scomplex_val(2*i) and scomplex_val(2*i+1) are real - // and imaginary parts of i-th single precision complex. - repeated float scomplex_val = 9 [packed = true]; - - // DT_INT64 - repeated int64 int64_val = 10 [packed = true]; - - // DT_BOOL - repeated bool bool_val = 11 [packed = true]; - - // DT_COMPLEX128. dcomplex_val(2*i) and dcomplex_val(2*i+1) are real - // and imaginary parts of i-th double precision complex. - repeated double dcomplex_val = 12 [packed = true]; - - // DT_RESOURCE - repeated ResourceHandleProto resource_handle_val = 14; - - // DT_VARIANT - repeated VariantTensorDataProto variant_val = 15; - - // DT_UINT32 - repeated uint32 uint32_val = 16 [packed = true]; - - // DT_UINT64 - repeated uint64 uint64_val = 17 [packed = true]; -}; - -// Protocol buffer representing the serialization format of DT_VARIANT tensors. -message VariantTensorDataProto { - // Name of the type of objects being serialized. - string type_name = 1; - // Portions of the object that are not Tensors. - bytes metadata = 2; - // Tensors contained within objects being serialized. - repeated TensorProto tensors = 3; -} diff --git a/ge/common/proto/tensorflow/tensor_shape.proto b/ge/common/proto/tensorflow/tensor_shape.proto deleted file mode 100644 index 3a6d8c5a..00000000 --- a/ge/common/proto/tensorflow/tensor_shape.proto +++ /dev/null @@ -1,53 +0,0 @@ -/** - * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow - * - * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -// Protocol buffer representing the shape of tensors. - -syntax = "proto3"; -option cc_enable_arenas = true; -option java_outer_classname = "TensorShapeProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -package domi.tensorflow; - -// Dimensions of a tensor. -message TensorShapeProto { - // One dimension of the tensor. - message Dim { - // Size of the tensor in that dimension. - // This value must be >= -1, but values of -1 are reserved for "unknown" - // shapes (values of -1 mean "unknown" dimension). Certain wrappers - // that work with TensorShapeProto may fail at runtime when deserializing - // a TensorShapeProto containing a dim value of -1. - int64 size = 1; - - // Optional name of the tensor dimension. - string name = 2; - }; - - // Dimensions of the tensor, such as {"input", 30}, {"output", 40} - // for a 30 x 40 2D tensor. If an entry has size -1, this - // corresponds to a dimension of unknown size. The names are - // optional. - // - // The order of entries in "dim" matters: It indicates the layout of the - // values in the tensor in-memory representation. - // - // The first entry in "dim" is the outermost dimension used to layout the - // values, the last entry is the innermost dimension. This matches the - // in-memory layout of RowMajor Eigen tensors. - // - // If "dim.size()" > 0, "unknown_rank" must be false. - repeated Dim dim = 2; - - // If true, the number of dimensions in the shape is unknown. - // - // If true, "dim.size()" must be 0. - bool unknown_rank = 3; -}; diff --git a/ge/common/proto/tensorflow/types.proto b/ge/common/proto/tensorflow/types.proto deleted file mode 100644 index f40e49cb..00000000 --- a/ge/common/proto/tensorflow/types.proto +++ /dev/null @@ -1,82 +0,0 @@ -/** - * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow - * - * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "TypesProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -// LINT.IfChange -enum DataType { - // Not a legal value for DataType. Used to indicate a DataType field - // has not been set. - DT_INVALID = 0; - - // Data types that all computation devices are expected to be - // capable to support. - DT_FLOAT = 1; - DT_DOUBLE = 2; - DT_INT32 = 3; - DT_UINT8 = 4; - DT_INT16 = 5; - DT_INT8 = 6; - DT_STRING = 7; - DT_COMPLEX64 = 8; // Single-precision complex - DT_INT64 = 9; - DT_BOOL = 10; - DT_QINT8 = 11; // Quantized int8 - DT_QUINT8 = 12; // Quantized uint8 - DT_QINT32 = 13; // Quantized int32 - DT_BFLOAT16 = 14; // Float32 truncated to 16 bits. Only for cast ops. - DT_QINT16 = 15; // Quantized int16 - DT_QUINT16 = 16; // Quantized uint16 - DT_UINT16 = 17; - DT_COMPLEX128 = 18; // Double-precision complex - DT_HALF = 19; - DT_RESOURCE = 20; - DT_VARIANT = 21; // Arbitrary C++ data types - DT_UINT32 = 22; - DT_UINT64 = 23; - - // Do not use! These are only for parameters. Every enum above - // should have a corresponding value below (verified by types_test). - DT_FLOAT_REF = 101; - DT_DOUBLE_REF = 102; - DT_INT32_REF = 103; - DT_UINT8_REF = 104; - DT_INT16_REF = 105; - DT_INT8_REF = 106; - DT_STRING_REF = 107; - DT_COMPLEX64_REF = 108; - DT_INT64_REF = 109; - DT_BOOL_REF = 110; - DT_QINT8_REF = 111; - DT_QUINT8_REF = 112; - DT_QINT32_REF = 113; - DT_BFLOAT16_REF = 114; - DT_QINT16_REF = 115; - DT_QUINT16_REF = 116; - DT_UINT16_REF = 117; - DT_COMPLEX128_REF = 118; - DT_HALF_REF = 119; - DT_RESOURCE_REF = 120; - DT_VARIANT_REF = 121; - DT_UINT32_REF = 122; - DT_UINT64_REF = 123; -} -// LINT.ThenChange( -// https://www.tensorflow.org/code/tensorflow/c/c_api.h, -// https://www.tensorflow.org/code/tensorflow/go/tensor.go, -// https://www.tensorflow.org/code/tensorflow/core/framework/tensor.cc, -// https://www.tensorflow.org/code/tensorflow/core/framework/types.h, -// https://www.tensorflow.org/code/tensorflow/core/framework/types.cc, -// https://www.tensorflow.org/code/tensorflow/python/framework/dtypes.py, -// https://www.tensorflow.org/code/tensorflow/python/framework/function.py) diff --git a/ge/common/proto/tensorflow/versions.proto b/ge/common/proto/tensorflow/versions.proto deleted file mode 100644 index 4e81548f..00000000 --- a/ge/common/proto/tensorflow/versions.proto +++ /dev/null @@ -1,39 +0,0 @@ -/** - * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow - * - * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "VersionsProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -// Version information for a piece of serialized data -// -// There are different types of versions for each type of data -// (GraphDef, etc.), but they all have the same common shape -// described here. -// -// Each consumer has "consumer" and "min_producer" versions (specified -// elsewhere). A consumer is allowed to consume this data if -// -// producer >= min_producer -// consumer >= min_consumer -// consumer not in bad_consumers -// -message VersionDef { - // The version of the code that produced this data. - int32 producer = 1; - - // Any consumer below this version is not allowed to consume this data. - int32 min_consumer = 2; - - // Specific consumer versions which are disallowed (e.g. due to bugs). - repeated int32 bad_consumers = 3; -}; diff --git a/ge/executor/proto/dump_task.proto b/ge/executor/proto/dump_task.proto deleted file mode 100644 index a2411ddb..00000000 --- a/ge/executor/proto/dump_task.proto +++ /dev/null @@ -1,113 +0,0 @@ -syntax = "proto3"; -package toolkit.dump; - -enum OutputDataType { - DT_UNDEFINED = 0; - DT_FLOAT = 1; - DT_FLOAT16 = 2; - DT_INT8 = 3; - DT_UINT8 = 4; - DT_INT16 = 5; - DT_UINT16 = 6; - DT_INT32 = 7; - DT_INT64 = 8; - DT_UINT32 = 9; - DT_UINT64 = 10; - DT_BOOL = 11; - DT_DOUBLE = 12; - DT_STRING = 13; - DT_DUAL_SUB_INT8 = 14; - DT_DUAL_SUB_UINT8 = 15; - DT_COMPLEX64 = 16; - DT_COMPLEX128 = 17; - DT_QINT8 = 18; - DT_QINT16 = 19; - DT_QINT32 = 20; - DT_QUINT8 = 21; - DT_QUINT16 = 22; - DT_RESOURCE = 23; - DT_STRING_REF = 24; - DT_DUAL = 25; - DT_VARIANT = 26; -} - -enum OutputFormat { - FORMAT_NCHW = 0; - FORMAT_NHWC = 1; - FORMAT_ND = 2; - FORMAT_NC1HWC0 = 3; - FORMAT_FRACTAL_Z = 4; - FORMAT_NC1C0HWPAD = 5; - FORMAT_NHWC1C0 = 6; - FORMAT_FSR_NCHW = 7; - FORMAT_FRACTAL_DECONV = 8; - FORMAT_C1HWNC0 = 9; - FORMAT_FRACTAL_DECONV_TRANSPOSE = 10; - FORMAT_FRACTAL_DECONV_SP_STRIDE_TRANS = 11; - FORMAT_NC1HWC0_C04 = 12; - FORMAT_FRACTAL_Z_C04 = 13; - FORMAT_CHWN = 14; - FORMAT_FRACTAL_DECONV_SP_STRIDE8_TRANS = 15; - FORMAT_HWCN = 16; - FORMAT_NC1KHKWHWC0 = 17; - FORMAT_BN_WEIGHT = 18; - FORMAT_FILTER_HWCK = 19; - FORMAT_HASHTABLE_LOOKUP_LOOKUPS=20; - FORMAT_HASHTABLE_LOOKUP_KEYS = 21; - FORMAT_HASHTABLE_LOOKUP_VALUE = 22; - FORMAT_HASHTABLE_LOOKUP_OUTPUT = 23; - FORMAT_HASHTABLE_LOOKUP_HITS=24; - FORMAT_C1HWNCoC0 = 25; - FORMAT_MD = 26; - FORMAT_NDHWC = 27; - FORMAT_FRACTAL_ZZ = 28; - FORMAT_FRACTAL_NZ = 29; - FORMAT_RESERVED = 30; -} - -message OriginalOp { - string name = 1; - uint32 output_index = 2; - OutputDataType data_type = 3; - OutputFormat format = 4; -} - -message Shape { - repeated uint64 dim = 1; -} - -message OpOutput { - OutputDataType data_type = 1; - OutputFormat format = 2; - Shape shape = 3; - OriginalOp original_op = 4; // the original op corresponding to the output - bytes data = 5; - uint64 size = 6; -} - -message OpInput { - OutputDataType data_type = 1; - OutputFormat format = 2; - Shape shape = 3; - bytes data = 4; - uint64 size = 5; -} - -enum BufferType { - L1 = 0; -} - -message OpBuffer { - BufferType buffer_type = 1; - bytes data = 2; - uint64 size = 3; -} - -message DumpData{ - string version = 1; - uint64 dump_time = 2; - repeated OpOutput output = 3; - repeated OpInput input = 4; - repeated OpBuffer buffer = 5; - string op_name = 6; -} diff --git a/ge/executor/proto/ge_ir.proto b/ge/executor/proto/ge_ir.proto deleted file mode 100644 index c0ef3071..00000000 --- a/ge/executor/proto/ge_ir.proto +++ /dev/null @@ -1,193 +0,0 @@ -syntax = "proto3"; - -package ge.proto; - -enum DataType -{ - DT_UNDEFINED = 0; // Used to indicate a DataType field has not been set. - DT_FLOAT = 1; // float type - DT_FLOAT16 = 2; // fp16 type - DT_INT8 = 3; // int8 type - DT_UINT8 = 4; // uint8 type - DT_INT16 = 5; // int16 type - DT_UINT16 = 6; // uint16 type - DT_INT32 = 7; // - DT_INT64 = 8; // int64 type - DT_UINT32 = 9; // unsigned int32 - DT_UINT64 = 10; // unsigned int64 - DT_BOOL = 11; // bool type - DT_DOUBLE = 12; // double type - DT_STRING = 13; // string type - DT_DUAL_SUB_INT8 = 14; /**< dual output int8 type */ - DT_DUAL_SUB_UINT8 = 15; /**< dual output uint8 type */ - DT_COMPLEX64 = 16; // complex64 type - DT_COMPLEX128 = 17; // complex128 type - DT_QINT8 = 18; // qint8 type - DT_QINT16 = 19; // qint16 type - DT_QINT32 = 20; // qint32 type - DT_QUINT8 = 21; // quint8 type - DT_QUINT16 = 22; // quint16 type - DT_RESOURCE = 23; // resource type - DT_STRING_REF = 24; // string_ref type - DT_DUAL = 25; /**< dual output type */ - DT_VARIANT = 26; // variant type - DT_BF16 = 27; // bf16 type - DT_INT4 = 28; // int4 type -} - -message AttrDef -{ - message ListValue - { - enum ListValueType{ - VT_LIST_NONE = 0; - VT_LIST_STRING = 1; - VT_LIST_INT = 2; - VT_LIST_FLOAT = 3; - VT_LIST_BOOL = 4; - VT_LIST_BYTES = 5; - VT_LIST_TENSOR_DESC = 6; - VT_LIST_TENSOR = 7; - VT_LIST_GRAPH = 8; - VT_LIST_NAMED_ATTRS = 9; - VT_LIST_DATA_TYPE = 10; - } - repeated bytes s = 2; // "list(string)" - repeated int64 i = 3; // "list(int)" - repeated float f = 4; // "list(float)" - repeated bool b = 5; // "list(bool)" - repeated bytes bt = 7; - repeated TensorDescriptor td = 8; - repeated TensorDef t = 9; - repeated GraphDef g = 10; - repeated NamedAttrs na = 11; - repeated int64 dt = 12; // list ge::DataType - - ListValueType val_type = 20; - } - - message ListListInt{ - message ListInt{ - repeated int64 list_i = 1; // list int - } - repeated ListInt list_list_i = 1; // list list int - } - - oneof value - { - bytes s = 2; // "string" - int64 i = 3; // "int" - float f = 4; // "float" - bool b = 5; // "bool" - bytes bt = 7; - ListValue list = 1; // any "list(...)" - NamedAttrs func = 10; // Used to support attr nesting - TensorDescriptor td = 11; // GeTensorDesc type - TensorDef t = 12; // GeTensor type - GraphDef g = 13; // Graph type - ListListInt list_list_int = 14; // List List Int type - int64 dt = 15; // ge::DataType - } -} - -// A list of attr names and their values. The whole list is attached -// with a string name. E.g., MatMul[T=float]. -message NamedAttrs -{ - string name = 1; - map attr = 2; -} - -// Shape / dimension description, using row-major order -message ShapeDef -{ - repeated int64 dim = 1; // Size of each dimension -} - -// Multidimensional data description -message TensorDescriptor -{ - string name = 1; // Optional parameter, tensor name - - DataType dtype = 2; // tensor datatype - ShapeDef shape = 3; // Shape / dimension - string layout = 4; // Tensor format, eg: "NCHW", "NHWC", "CHW", "ND" - - bool has_out_attr = 9; - int64 size = 10; - int64 weight_size = 11; - bool reuse_input = 12; - bool output_tensor = 13; - string device_type = 14; - bool input_tensor =15; - int64 real_dim_cnt = 16; - int64 reuse_input_index = 17; - int64 data_offset = 18; - int64 cmps_size = 19; - string cmps_tab = 20; - int64 cmps_tab_offset = 21; - - map attr = 5; // Set of extra parameter fields -} - -// GeTensor definition -message TensorDef -{ - TensorDescriptor desc = 1; // Tensor description - bytes data = 2; // Tensor data -} - - -// Operator description -message OpDef -{ - string name = 1; // name - string type = 2; // type - - repeated string input = 5; // input original op name + outgoing index. op_name:index - - map attr = 10; // Set of operator parameter fields - - bool has_out_attr = 20; - int64 id = 21; - int64 stream_id =22; - repeated string input_name = 23; - repeated string src_name = 24; - repeated int64 src_index = 25; - repeated string dst_name = 26; - repeated int64 dst_index = 27; - repeated int64 input_i = 28; - repeated int64 output_i = 29; - repeated int64 workspace = 30; - repeated int64 workspace_bytes = 31; - repeated bool is_input_const = 32; - repeated TensorDescriptor input_desc = 33; - repeated TensorDescriptor output_desc = 34; - repeated string subgraph_name = 35; -} - -// Graph definition -message GraphDef -{ - string name = 1; // name - - repeated string input = 4; // Graph input - repeated string output = 5; // Graph output - - repeated OpDef op = 6; // List of operators - - map attr = 11; // Extended field -} - -// model definition -message ModelDef -{ - string name = 1; // name - uint32 version = 2; // IR Proto verion - string custom_version = 3; // User model version number, passed in by user - - repeated GraphDef graph = 7; // Graph definition,graph[0] represents the main diagram in modeldef - - map attr = 11; // Extended field -} - diff --git a/ge/executor/proto/insert_op.proto b/ge/executor/proto/insert_op.proto deleted file mode 100644 index 7d708865..00000000 --- a/ge/executor/proto/insert_op.proto +++ /dev/null @@ -1,140 +0,0 @@ -syntax = "proto3"; - -package domi; - -message InsertNewOps { - repeated AippOpParams aipp_op = 1; - repeated MultiShapeOpParams multi_shape_op = 2; -} - -message AippOpParams { - enum InputFormat { - UNDEFINED = 0; - YUV420SP_U8 = 1; - XRGB8888_U8 = 2; - RGB888_U8 = 3; - YUV400_U8 = 4; - NC1HWC0DI_FP16 = 5; - NC1HWC0DI_S8 = 6; - ARGB8888_U8 = 7; - YUYV_U8 = 8; - YUV422SP_U8 = 9; - AYUV444_U8 = 10; - RAW10 = 11; - RAW12 = 12; - RAW16 = 13; - RAW24 = 14; - RGB16 = 15; - RGB20 = 16; - RGB24 = 17; - RGB8_IR = 18; - RGB16_IR = 19; - RGB24_IR = 20; - } - - enum AippMode { - undefined = 0; - static = 1; - dynamic = 2; - } - - // AIPPģʽ־̬AIPPͶ̬AIPP - AippMode aipp_mode = 1; - - // related_input_rankΪΪͣ÷Χ>=0, <=DataӵĸĬֵΪ0 - // ʶģ͵ĵڼAIPPģ룬ҪԵ2AIPPrelated_input_rankΪ1 - uint32 related_input_rank = 2; - - // related_input_name is optional and the top name of data node which inserts aipp - string related_input_name = 6; - - // input_edge_idxΪѡΪͣ÷ΧΪ>=0 - // øòãڶDataӲͬͬAIPPòûãĬ϶related_input_rankָģAIPP - // ֵ <= Dataߵĸ - repeated uint32 input_edge_idx = 3; - - // [Begin] ̬AIPPþ̬AIPPʱЧ - uint32 max_src_image_size = 4; - - // Ƿ֧תĬϲ֧֣֧תʱжĿռʧ - bool support_rotation = 5; - - // [End] ̬AIPP - - - // [Begin] ̬AIPPö̬AIPPʱЧ - InputFormat input_format = 51; - bool csc_switch = 52; - float cpadding_value = 53; - bool rbuv_swap_switch = 54; - bool ax_swap_switch = 55; - bool single_line_mode = 56; - - int32 src_image_size_w = 57; - int32 src_image_size_h = 58; - - bool crop = 59; - int32 load_start_pos_w = 60; - int32 load_start_pos_h = 61; - int32 crop_size_w = 62; - int32 crop_size_h = 63; - - bool resize = 64; - int32 resize_output_w = 65; - int32 resize_output_h = 66; - - bool padding = 67; - int32 left_padding_size = 68; - int32 right_padding_size = 69; - int32 top_padding_size = 70; - int32 bottom_padding_size = 71; - float padding_value = 72; - - int32 mean_chn_0 = 10; - int32 mean_chn_1 = 11; - int32 mean_chn_2 = 12; - int32 mean_chn_3 = 19; - float min_chn_0 = 13; - float min_chn_1 = 14; - float min_chn_2 = 15; - float min_chn_3 = 20; - repeated float var_reci_chn_0 = 16; - repeated float var_reci_chn_1 = 17; - repeated float var_reci_chn_2 = 18; - repeated float var_reci_chn_3 = 21; - - repeated int32 matrix_r0c0 = 30; - repeated int32 matrix_r0c1 = 31; - repeated int32 matrix_r0c2 = 32; - repeated int32 matrix_r1c0 = 33; - repeated int32 matrix_r1c1 = 34; - repeated int32 matrix_r1c2 = 35; - repeated int32 matrix_r2c0 = 36; - repeated int32 matrix_r2c1 = 37; - repeated int32 matrix_r2c2 = 38; - repeated int32 output_bias_0 = 39; - repeated int32 output_bias_1 = 40; - repeated int32 output_bias_2 = 41; - repeated int32 input_bias_0 = 42; - repeated int32 input_bias_1 = 43; - repeated int32 input_bias_2 = 44; - - // [End] ̬AIPP - - // The n number that is used for raw/rgbir data into f16 transformation. - // The transformation equation is x/(2^n). If set to 0, no transform is performed. - uint32 raw_rgbir_to_f16_n = 45; -} - -message MultiShapeOpParams { - enum MultiShapeMode { - batch = 0; //̬batch - resolution = 1; //ֱ̬ʣչ - } - - MultiShapeMode mode = 1; //ģʽ - uint32 related_input_rank = 2; //Ӳ뵽ĸ - - - repeated uint32 batch_list = 11; //batch_listֵbatch_listĸ28֮ -} diff --git a/ge/executor/proto/om.proto b/ge/executor/proto/om.proto deleted file mode 100644 index e15e5f80..00000000 --- a/ge/executor/proto/om.proto +++ /dev/null @@ -1,396 +0,0 @@ -/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Apache License for more details at - * http://www.apache.org/licenses/LICENSE-2.0 - */ -syntax = "proto3"; - -package domi; - -enum TargetType -{ - MINI = 0; - TINY = 1; - LITE = 2; -} - -// offline model -message ModelDef { - string name = 1; - uint32 version = 2; - - uint64 memory_size = 10; - uint32 stream_num = 11; - uint32 event_num = 12; - uint64 weight_size = 13; - uint32 label_num = 15; - repeated OpDef op = 20; - TargetType target_type = 23; - - map attr = 30; -}; - -// operator define -message OpDef { - string name = 1; - string type = 2; - - uint32 id = 3; - uint32 stream_id = 4; - - repeated string input_name = 5; - - repeated string src_name = 8; - repeated int32 src_index = 9; - repeated int64 input = 10; - repeated int64 output = 11; - repeated TensorDescriptor input_desc = 12; - repeated TensorDescriptor output_desc = 13; - repeated WeightDef weights = 14; - repeated string dst_name = 15; - repeated int32 dst_index = 16; - - repeated int64 workspace = 20; - repeated uint32 workspace_bytes = 21; - - repeated string weight_name = 22; - repeated bool is_input_const = 23; - - map attr = 30; - - QuantizeFactorParams quantize_factor = 31; - - oneof op_params { - // start at 100 here - SendOpParams sender_param = 100; - RecvOpParams receiver_param = 200; - ConvolutionOpParams convolution_param = 300; - PoolingOpParams pooling_param = 400; - EltwiseOpParams eltwise_param = 500; - BatchNormOpParams batchnorm_param = 600; - ScaleOpParams scale_param = 700; - FullConnectionOpParams full_connection_param = 800; - SoftmaxOpParams softmax_param = 900; - ActivationOpParams activation_param = 1000; - ReshapeOpParams reshape_param = 1100; - } -}; - -message SendOpParams { - uint32 event_id = 1; -}; - -message RecvOpParams { - uint32 event_id = 1; -}; - -enum QuantizeScaleType -{ - VECTOR_SCALE = 0; - SCALAR_SCALE = 1; -} - -enum QuantizeScaleMode -{ - NORMAL_MODE = 0; - SQRT_MODE = 1; -} - -enum QuantizeAlgorithm -{ - NON_OFFSET_ALGO = 0; - HALF_OFFSET_ALGO = 1; - ALL_OFFSET_ALGO = 2; -} -message QuantizeFactor -{ - QuantizeScaleMode scale_mode = 1; - bytes scale_value = 2; - int64 scale_offset = 3; - bytes offset_data_value = 4; - int64 offset_data_offset = 5; - bytes offset_weight_value = 6; - int64 offset_weight_offset = 7; - bytes offset_pad_value = 8; - int64 offset_pad_offset = 9; -}; - -message QuantizeCalcFactor -{ - bytes offsetw = 1; - int64 offsetw_offset = 2; - bytes offsetd = 3; - int64 offsetd_offset = 4; - bytes scalereq = 5; - int64 scaledreq_offset = 6; - bytes offsetdnext = 7; - int64 offsetdnext_offset = 8; -} - -message QuantizeFactorParams -{ - QuantizeAlgorithm quantize_algo = 1; - QuantizeScaleType scale_type = 2; - QuantizeFactor quantize_param = 3; - QuantizeFactor dequantize_param = 4; - QuantizeFactor requantize_param = 5; - QuantizeCalcFactor quantizecalc_param = 6; -}; - -message ConvolutionOpParams { - int32 mode = 1; - int32 algo = 2; - int32 pad_mode = 3; - uint32 group = 4; - uint32 num_output = 5; - - repeated uint32 pad = 10; - repeated uint32 stride = 11; - repeated uint32 dilation = 12; - repeated uint32 kernel = 13; - - float alpha = 20; - float beta = 21; - - WeightDef filter = 40; - WeightDef bias = 41; - - bool relu_flag = 62; - repeated uint32 adj = 70; - repeated uint32 target_shape = 71; - repeated uint32 before_pad = 72; -}; - -message PoolingOpParams { - int32 mode = 1; - int32 nan_opt = 2; - int32 pad_mode = 3; - bool global_pooling = 4; - - repeated uint32 window = 10; - repeated uint32 pad = 11; - repeated uint32 stride = 12; - bool ceil_mode = 13; - int32 data_mode = 14; - - float alpha = 20; - float beta = 21; - repeated uint32 before_pad = 22; -}; - -message EltwiseOpParams { - int32 mode = 1; - repeated float coeff = 2; - float alpha = 3; - float beta = 4; - repeated WeightDef weight = 5; - bool relu_flag = 6; -}; - -message ActivationOpParams { - int32 mode = 1; - float coef = 2; - float alpha = 3; - float beta = 4; -}; - -message BatchNormOpParams { - int32 mode = 1; - - float alpha = 2; - float beta = 3; - double epsilon = 4;//optinal,[default = 1e-5] - bool use_global_stats = 5; //optinal,by default true,testing mode - float moving_average_fraction = 6; //optinal,[default = .999]; - - WeightDef estimated_mean = 7; - WeightDef estimated_variance = 8; - - WeightDef scale = 9; - WeightDef bias = 10; -}; - -message ScaleOpParams { - WeightDef scale = 1; - WeightDef bias = 2; -}; - -message ReshapeOpParams { - float alpha = 1; - float beta = 2; - ShapeDef shape = 3; - int32 axis = 4; - int32 num_axes = 5; - int32 format = 6; -}; - -message SoftmaxOpParams { - int32 algo = 1; - int32 mode = 2; - float alpha = 3; - float beta = 4; -}; - -message FullConnectionOpParams { - WeightDef filter = 1; - WeightDef bias = 2; - uint32 num_output = 3; - bool relu_flag = 12; -}; - -message FlattenOpParams { - float alpha = 1; - float beta = 2; - int32 start_axis = 3; - int32 end_axis = 4; -} - -message AddLimitedOpParams { - float alpha = 1; - float beta = 2; - int32 axis = 3; - bool broadcast = 4; - - repeated WeightDef weight = 10; -}; - -message MulLimitedOpParams { - float alpha = 1; - float beta = 2; - int32 axis = 3; - bool broadcast = 4; - - repeated WeightDef weight = 10; -}; - -message AddOpParams { - float alpha = 1; - float beta = 2; - - repeated WeightDef weight = 10; -}; - -message MulOpParams { - float alpha = 1; - float beta = 2; - - repeated WeightDef weight = 10; -}; - -message SubOpParams { - float alpha = 1; - float beta = 2; - - repeated WeightDef weight = 10; -}; - -message BiasAddOpParams { - float alpha = 1; - float beta = 2; - - WeightDef bias = 10; -}; - -message MatMulOpParams { - float alpha = 1; - float beta = 2; - bool transposeX = 3; - bool transposeW = 4; - - WeightDef filter = 10; - WeightDef bias = 12; -}; - -message RsqrtOpParams { - float alpha = 1; - float beta = 2; -}; - - -message WeightDef { - int32 format = 1; - int32 data_type = 2; - ShapeDef shape = 3; - bytes data = 4; - int64 data_offset = 5; - uint32 cmps_size = 6; - bytes cmps_tab = 7; - int64 cmps_tab_offset = 10; - CompressInfo cmps_info = 8; - AllOffsetQuantizeInfo alloffset_quantize_info = 11; -} - -message ShapeDef { - repeated int64 dim = 1; -} - -enum DeviceType { - NPU = 0; // In default, we will use NPU. - CPU = 1; // CPU -} - -message AllOffsetQuantizeInfo { - float scale = 1; - int32 offset = 2; -} - -message TensorDescriptor { - int32 format = 1; - int32 data_type = 2; - repeated int64 dim = 3; - uint32 size = 4; - bool reuse_input = 5; - bool output_tensor = 7; - DeviceType device_type = 8; - bool input_tensor = 9; - uint32 real_dim_cnt = 10; - uint32 reuse_input_index = 11; - AllOffsetQuantizeInfo alloffset_quantize_info = 12; -} - -message CompressInfo { - int32 blockRow = 1; // block row - int32 blockCol = 2; // block col - int32 fractalK = 3; // fractal K - int32 fractalN = 4; // fractal N - int32 lastFractalK = 5; // K of last fractal - int32 lastFractalN = 6; // N of last fractal - int32 cubeSize = 7; // cube's length - int32 loadDir = 8; // data load directtiono 0:col load 1:row load -} - -message AttrDef { - message ListValue { - repeated string s = 2; // "list(string)" - repeated int64 i = 3 [packed = true]; // "list(int)" - repeated float f = 4 [packed = true]; // "list(float)" - repeated bool b = 5 [packed = true]; // "list(bool)" - repeated uint32 u = 6 [packed = true]; // "list(uint)" - repeated bytes bt = 7; - } - - oneof value { - string s = 2; // "string" - int64 i = 3; // "int" - float f = 4; // "float" - bool b = 5; // "bool" - uint32 u = 6; // "uint32" - bytes bt = 7; - ListValue list = 1; // any "list(...)" - NamedAttrs func = 10; - } -} - -// A list of attr names and their values. The whole list is attached -// with a string name. E.g., MatMul[T=float]. -message NamedAttrs { - string name = 1; - map attr = 2; -} - diff --git a/ge/executor/proto/op_mapping.proto b/ge/executor/proto/op_mapping.proto deleted file mode 100644 index d626eb49..00000000 --- a/ge/executor/proto/op_mapping.proto +++ /dev/null @@ -1,75 +0,0 @@ -syntax = "proto3"; -package toolkit.aicpu.dump; - -message Shape { - repeated uint64 dim = 1; -} - -message Output { - int32 data_type = 1; - int32 format = 2; - Shape shape = 3; - uint64 address = 4; - string original_name = 5; - int32 original_output_index = 6; - int32 original_output_data_type = 7; - int32 original_output_format = 8; - uint64 size = 9; - Shape origin_shape = 10; -} - -message Input { - int32 data_type =1; - int32 format = 2; - Shape shape = 3; - uint64 address = 4; - uint64 size = 5; - Shape origin_shape = 6; -} - -enum BufferType { - L1 = 0; -} - -message OpBuffer { - BufferType buffer_type = 1; - uint64 address = 2; - uint64 size = 3; -} - -message Op { - string op_name = 1; - string op_type = 2; -} - -message Task { - uint32 task_id = 1; - uint32 stream_id = 2; - Op op = 3; - repeated Output output = 4; - bool end_graph = 5; - repeated Input input = 6; - repeated OpBuffer buffer = 7; -} - -message OpMappingInfo { - string dump_path = 1; - oneof model_name_param { - string model_name = 2; - } - oneof model_id_param { - uint32 model_id = 3; - } - oneof step_id { - uint64 step_id_addr = 4; - } - oneof iterations_per_loop { - uint64 iterations_per_loop_addr = 5; - } - oneof loop_cond { - uint64 loop_cond_addr = 6; - } - uint32 flag = 7; // 0x01 load, 0x00 unload - repeated Task task = 8; - string dump_step = 9; -} \ No newline at end of file diff --git a/ge/executor/proto/task.proto b/ge/executor/proto/task.proto deleted file mode 100644 index 0da5631e..00000000 --- a/ge/executor/proto/task.proto +++ /dev/null @@ -1,179 +0,0 @@ -/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Apache License for more details at - * http://www.apache.org/licenses/LICENSE-2.0 - */ -syntax = "proto3"; - -package domi; - -message ModelTaskDef { - string version = 1; - - map attr = 9; // Extended field - repeated TaskDef task = 10; - - uint64 memory_size = 11; - uint32 stream_num = 12; - uint32 event_num = 13; - uint64 weight_size = 14; - - repeated bytes op = 15; // input/output opdef in bytes - - uint64 base_addr = 16; // base addr - uint64 weight_addr = 17; // weight addr - uint32 batch_num = 18; -} - - -message TaskDef { - uint32 id = 1; - uint32 type = 2; - - uint32 stream_id = 10; - uint32 event_id = 11; - - KernelDef kernel = 20; - KernelExDef kernel_ex = 21; - KernelHcclDef kernel_hccl = 25; - EventExDef event_ex = 26; - LogTimeStampDef log_timestamp = 28; - - uint32 label_id = 30; - - MemcpyAsyncDef memcpy_async = 31; - StreamSwitchDef stream_switch = 32; - StreamActiveDef stream_active = 33; - bytes private_def = 34; - uint64 ops_kernel_store_ptr = 35; // adjustments to other fields in the future - StreamSwitchNDef stream_switch_n = 36; - - LabelSetDef label_set = 37; - LabelGotoExDef label_goto_ex = 38; - LabelSwitchByIndexDef label_switch_by_index = 39; - KernelDefWithHandle kernel_with_handle = 40; -} - -message KernelDef { - KernelContext context = 1; - - string stub_func = 10; - uint32 block_dim = 11; - uint32 args_size = 12; - bytes args = 13; - bytes sm_desc = 14; - bytes flowtable = 15; - string so_name = 16; - string kernel_name = 17; - bytes kernel_ext_info = 18; - uint32 kernel_ext_info_size = 19; -} - -message KernelDefWithHandle { - KernelContext context = 1; - - uint64 handle = 10; - string dev_func = 11; - uint32 block_dim = 12; - uint32 args_size = 13; - bytes args = 14; - bytes sm_desc = 15; - string original_kernel_key = 16; - string node_info = 17; -} - -message KernelContext { - uint32 kernel_type = 1; - uint32 op_id = 2; // OP type in CCE - uint32 kernel_func_id = 3; - uint32 op_index = 4; // TE/Custom operator - bool is_flowtable = 5; // Identify whether args is a flowtable structure - bytes args_offset = 6; // args offset information - uint32 args_count = 7; // args count - repeated uint32 origin_op_index = 8; -} - - -message KernelExDef { - uint32 flags = 1; - - uint32 op_index = 4; - uint32 args_size = 12; - bytes args = 13; - bytes task_info = 14; // serialized nodeDef, funcDef, inputoutput - uint32 task_info_size = 15; - bytes kernel_ext_info = 16; - uint32 kernel_ext_info_size = 17; -} - - -message KernelHcclDef { - uint32 op_index = 8; - string hccl_type = 9; -} - - -message EventExDef { - uint32 op_index = 1; - uint32 event_type = 2; -} - -message LogTimeStampDef { - uint64 logid = 1; - bool notify = 2; - uint32 flat = 3; -} - -message MemcpyAsyncDef { - uint64 dst = 1; - uint64 dst_max = 2; - uint64 src = 3; - uint64 count = 4; - uint32 kind = 5; - uint32 op_index = 6; -} - -message StreamSwitchDef { - uint32 op_index = 1; - uint32 true_stream_id = 2; - int64 value = 3; - uint64 value_ptr = 4; - uint32 data_type = 5; -} - -message StreamActiveDef { - uint32 op_index = 1; - uint32 active_stream_id = 2; -} - -message StreamSwitchNDef { - uint32 op_index = 1; - uint32 size = 2; - repeated int64 target_value = 3; - repeated uint32 true_stream_id = 4; - uint32 element_size = 5; - uint32 data_type = 6; -} - -message LabelSetDef { - uint32 op_index = 1; - uint32 label_id = 2; - uint32 model_id = 3; -} - -message LabelGotoExDef { - uint32 op_index = 1; - uint32 label_id = 2; - uint32 model_id = 3; -} - -message LabelSwitchByIndexDef { - uint32 op_index = 1; - uint32 label_max = 2; -} diff --git a/ge/ge_local_engine/proto/task.proto b/ge/ge_local_engine/proto/task.proto deleted file mode 100644 index 0da5631e..00000000 --- a/ge/ge_local_engine/proto/task.proto +++ /dev/null @@ -1,179 +0,0 @@ -/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Apache License for more details at - * http://www.apache.org/licenses/LICENSE-2.0 - */ -syntax = "proto3"; - -package domi; - -message ModelTaskDef { - string version = 1; - - map attr = 9; // Extended field - repeated TaskDef task = 10; - - uint64 memory_size = 11; - uint32 stream_num = 12; - uint32 event_num = 13; - uint64 weight_size = 14; - - repeated bytes op = 15; // input/output opdef in bytes - - uint64 base_addr = 16; // base addr - uint64 weight_addr = 17; // weight addr - uint32 batch_num = 18; -} - - -message TaskDef { - uint32 id = 1; - uint32 type = 2; - - uint32 stream_id = 10; - uint32 event_id = 11; - - KernelDef kernel = 20; - KernelExDef kernel_ex = 21; - KernelHcclDef kernel_hccl = 25; - EventExDef event_ex = 26; - LogTimeStampDef log_timestamp = 28; - - uint32 label_id = 30; - - MemcpyAsyncDef memcpy_async = 31; - StreamSwitchDef stream_switch = 32; - StreamActiveDef stream_active = 33; - bytes private_def = 34; - uint64 ops_kernel_store_ptr = 35; // adjustments to other fields in the future - StreamSwitchNDef stream_switch_n = 36; - - LabelSetDef label_set = 37; - LabelGotoExDef label_goto_ex = 38; - LabelSwitchByIndexDef label_switch_by_index = 39; - KernelDefWithHandle kernel_with_handle = 40; -} - -message KernelDef { - KernelContext context = 1; - - string stub_func = 10; - uint32 block_dim = 11; - uint32 args_size = 12; - bytes args = 13; - bytes sm_desc = 14; - bytes flowtable = 15; - string so_name = 16; - string kernel_name = 17; - bytes kernel_ext_info = 18; - uint32 kernel_ext_info_size = 19; -} - -message KernelDefWithHandle { - KernelContext context = 1; - - uint64 handle = 10; - string dev_func = 11; - uint32 block_dim = 12; - uint32 args_size = 13; - bytes args = 14; - bytes sm_desc = 15; - string original_kernel_key = 16; - string node_info = 17; -} - -message KernelContext { - uint32 kernel_type = 1; - uint32 op_id = 2; // OP type in CCE - uint32 kernel_func_id = 3; - uint32 op_index = 4; // TE/Custom operator - bool is_flowtable = 5; // Identify whether args is a flowtable structure - bytes args_offset = 6; // args offset information - uint32 args_count = 7; // args count - repeated uint32 origin_op_index = 8; -} - - -message KernelExDef { - uint32 flags = 1; - - uint32 op_index = 4; - uint32 args_size = 12; - bytes args = 13; - bytes task_info = 14; // serialized nodeDef, funcDef, inputoutput - uint32 task_info_size = 15; - bytes kernel_ext_info = 16; - uint32 kernel_ext_info_size = 17; -} - - -message KernelHcclDef { - uint32 op_index = 8; - string hccl_type = 9; -} - - -message EventExDef { - uint32 op_index = 1; - uint32 event_type = 2; -} - -message LogTimeStampDef { - uint64 logid = 1; - bool notify = 2; - uint32 flat = 3; -} - -message MemcpyAsyncDef { - uint64 dst = 1; - uint64 dst_max = 2; - uint64 src = 3; - uint64 count = 4; - uint32 kind = 5; - uint32 op_index = 6; -} - -message StreamSwitchDef { - uint32 op_index = 1; - uint32 true_stream_id = 2; - int64 value = 3; - uint64 value_ptr = 4; - uint32 data_type = 5; -} - -message StreamActiveDef { - uint32 op_index = 1; - uint32 active_stream_id = 2; -} - -message StreamSwitchNDef { - uint32 op_index = 1; - uint32 size = 2; - repeated int64 target_value = 3; - repeated uint32 true_stream_id = 4; - uint32 element_size = 5; - uint32 data_type = 6; -} - -message LabelSetDef { - uint32 op_index = 1; - uint32 label_id = 2; - uint32 model_id = 3; -} - -message LabelGotoExDef { - uint32 op_index = 1; - uint32 label_id = 2; - uint32 model_id = 3; -} - -message LabelSwitchByIndexDef { - uint32 op_index = 1; - uint32 label_max = 2; -} diff --git a/ge/offline/proto/ge_ir.proto b/ge/offline/proto/ge_ir.proto deleted file mode 100644 index c0ef3071..00000000 --- a/ge/offline/proto/ge_ir.proto +++ /dev/null @@ -1,193 +0,0 @@ -syntax = "proto3"; - -package ge.proto; - -enum DataType -{ - DT_UNDEFINED = 0; // Used to indicate a DataType field has not been set. - DT_FLOAT = 1; // float type - DT_FLOAT16 = 2; // fp16 type - DT_INT8 = 3; // int8 type - DT_UINT8 = 4; // uint8 type - DT_INT16 = 5; // int16 type - DT_UINT16 = 6; // uint16 type - DT_INT32 = 7; // - DT_INT64 = 8; // int64 type - DT_UINT32 = 9; // unsigned int32 - DT_UINT64 = 10; // unsigned int64 - DT_BOOL = 11; // bool type - DT_DOUBLE = 12; // double type - DT_STRING = 13; // string type - DT_DUAL_SUB_INT8 = 14; /**< dual output int8 type */ - DT_DUAL_SUB_UINT8 = 15; /**< dual output uint8 type */ - DT_COMPLEX64 = 16; // complex64 type - DT_COMPLEX128 = 17; // complex128 type - DT_QINT8 = 18; // qint8 type - DT_QINT16 = 19; // qint16 type - DT_QINT32 = 20; // qint32 type - DT_QUINT8 = 21; // quint8 type - DT_QUINT16 = 22; // quint16 type - DT_RESOURCE = 23; // resource type - DT_STRING_REF = 24; // string_ref type - DT_DUAL = 25; /**< dual output type */ - DT_VARIANT = 26; // variant type - DT_BF16 = 27; // bf16 type - DT_INT4 = 28; // int4 type -} - -message AttrDef -{ - message ListValue - { - enum ListValueType{ - VT_LIST_NONE = 0; - VT_LIST_STRING = 1; - VT_LIST_INT = 2; - VT_LIST_FLOAT = 3; - VT_LIST_BOOL = 4; - VT_LIST_BYTES = 5; - VT_LIST_TENSOR_DESC = 6; - VT_LIST_TENSOR = 7; - VT_LIST_GRAPH = 8; - VT_LIST_NAMED_ATTRS = 9; - VT_LIST_DATA_TYPE = 10; - } - repeated bytes s = 2; // "list(string)" - repeated int64 i = 3; // "list(int)" - repeated float f = 4; // "list(float)" - repeated bool b = 5; // "list(bool)" - repeated bytes bt = 7; - repeated TensorDescriptor td = 8; - repeated TensorDef t = 9; - repeated GraphDef g = 10; - repeated NamedAttrs na = 11; - repeated int64 dt = 12; // list ge::DataType - - ListValueType val_type = 20; - } - - message ListListInt{ - message ListInt{ - repeated int64 list_i = 1; // list int - } - repeated ListInt list_list_i = 1; // list list int - } - - oneof value - { - bytes s = 2; // "string" - int64 i = 3; // "int" - float f = 4; // "float" - bool b = 5; // "bool" - bytes bt = 7; - ListValue list = 1; // any "list(...)" - NamedAttrs func = 10; // Used to support attr nesting - TensorDescriptor td = 11; // GeTensorDesc type - TensorDef t = 12; // GeTensor type - GraphDef g = 13; // Graph type - ListListInt list_list_int = 14; // List List Int type - int64 dt = 15; // ge::DataType - } -} - -// A list of attr names and their values. The whole list is attached -// with a string name. E.g., MatMul[T=float]. -message NamedAttrs -{ - string name = 1; - map attr = 2; -} - -// Shape / dimension description, using row-major order -message ShapeDef -{ - repeated int64 dim = 1; // Size of each dimension -} - -// Multidimensional data description -message TensorDescriptor -{ - string name = 1; // Optional parameter, tensor name - - DataType dtype = 2; // tensor datatype - ShapeDef shape = 3; // Shape / dimension - string layout = 4; // Tensor format, eg: "NCHW", "NHWC", "CHW", "ND" - - bool has_out_attr = 9; - int64 size = 10; - int64 weight_size = 11; - bool reuse_input = 12; - bool output_tensor = 13; - string device_type = 14; - bool input_tensor =15; - int64 real_dim_cnt = 16; - int64 reuse_input_index = 17; - int64 data_offset = 18; - int64 cmps_size = 19; - string cmps_tab = 20; - int64 cmps_tab_offset = 21; - - map attr = 5; // Set of extra parameter fields -} - -// GeTensor definition -message TensorDef -{ - TensorDescriptor desc = 1; // Tensor description - bytes data = 2; // Tensor data -} - - -// Operator description -message OpDef -{ - string name = 1; // name - string type = 2; // type - - repeated string input = 5; // input original op name + outgoing index. op_name:index - - map attr = 10; // Set of operator parameter fields - - bool has_out_attr = 20; - int64 id = 21; - int64 stream_id =22; - repeated string input_name = 23; - repeated string src_name = 24; - repeated int64 src_index = 25; - repeated string dst_name = 26; - repeated int64 dst_index = 27; - repeated int64 input_i = 28; - repeated int64 output_i = 29; - repeated int64 workspace = 30; - repeated int64 workspace_bytes = 31; - repeated bool is_input_const = 32; - repeated TensorDescriptor input_desc = 33; - repeated TensorDescriptor output_desc = 34; - repeated string subgraph_name = 35; -} - -// Graph definition -message GraphDef -{ - string name = 1; // name - - repeated string input = 4; // Graph input - repeated string output = 5; // Graph output - - repeated OpDef op = 6; // List of operators - - map attr = 11; // Extended field -} - -// model definition -message ModelDef -{ - string name = 1; // name - uint32 version = 2; // IR Proto verion - string custom_version = 3; // User model version number, passed in by user - - repeated GraphDef graph = 7; // Graph definition,graph[0] represents the main diagram in modeldef - - map attr = 11; // Extended field -} - diff --git a/ge/offline/proto/insert_op.proto b/ge/offline/proto/insert_op.proto deleted file mode 100644 index 7d708865..00000000 --- a/ge/offline/proto/insert_op.proto +++ /dev/null @@ -1,140 +0,0 @@ -syntax = "proto3"; - -package domi; - -message InsertNewOps { - repeated AippOpParams aipp_op = 1; - repeated MultiShapeOpParams multi_shape_op = 2; -} - -message AippOpParams { - enum InputFormat { - UNDEFINED = 0; - YUV420SP_U8 = 1; - XRGB8888_U8 = 2; - RGB888_U8 = 3; - YUV400_U8 = 4; - NC1HWC0DI_FP16 = 5; - NC1HWC0DI_S8 = 6; - ARGB8888_U8 = 7; - YUYV_U8 = 8; - YUV422SP_U8 = 9; - AYUV444_U8 = 10; - RAW10 = 11; - RAW12 = 12; - RAW16 = 13; - RAW24 = 14; - RGB16 = 15; - RGB20 = 16; - RGB24 = 17; - RGB8_IR = 18; - RGB16_IR = 19; - RGB24_IR = 20; - } - - enum AippMode { - undefined = 0; - static = 1; - dynamic = 2; - } - - // AIPPģʽ־̬AIPPͶ̬AIPP - AippMode aipp_mode = 1; - - // related_input_rankΪΪͣ÷Χ>=0, <=DataӵĸĬֵΪ0 - // ʶģ͵ĵڼAIPPģ룬ҪԵ2AIPPrelated_input_rankΪ1 - uint32 related_input_rank = 2; - - // related_input_name is optional and the top name of data node which inserts aipp - string related_input_name = 6; - - // input_edge_idxΪѡΪͣ÷ΧΪ>=0 - // øòãڶDataӲͬͬAIPPòûãĬ϶related_input_rankָģAIPP - // ֵ <= Dataߵĸ - repeated uint32 input_edge_idx = 3; - - // [Begin] ̬AIPPþ̬AIPPʱЧ - uint32 max_src_image_size = 4; - - // Ƿ֧תĬϲ֧֣֧תʱжĿռʧ - bool support_rotation = 5; - - // [End] ̬AIPP - - - // [Begin] ̬AIPPö̬AIPPʱЧ - InputFormat input_format = 51; - bool csc_switch = 52; - float cpadding_value = 53; - bool rbuv_swap_switch = 54; - bool ax_swap_switch = 55; - bool single_line_mode = 56; - - int32 src_image_size_w = 57; - int32 src_image_size_h = 58; - - bool crop = 59; - int32 load_start_pos_w = 60; - int32 load_start_pos_h = 61; - int32 crop_size_w = 62; - int32 crop_size_h = 63; - - bool resize = 64; - int32 resize_output_w = 65; - int32 resize_output_h = 66; - - bool padding = 67; - int32 left_padding_size = 68; - int32 right_padding_size = 69; - int32 top_padding_size = 70; - int32 bottom_padding_size = 71; - float padding_value = 72; - - int32 mean_chn_0 = 10; - int32 mean_chn_1 = 11; - int32 mean_chn_2 = 12; - int32 mean_chn_3 = 19; - float min_chn_0 = 13; - float min_chn_1 = 14; - float min_chn_2 = 15; - float min_chn_3 = 20; - repeated float var_reci_chn_0 = 16; - repeated float var_reci_chn_1 = 17; - repeated float var_reci_chn_2 = 18; - repeated float var_reci_chn_3 = 21; - - repeated int32 matrix_r0c0 = 30; - repeated int32 matrix_r0c1 = 31; - repeated int32 matrix_r0c2 = 32; - repeated int32 matrix_r1c0 = 33; - repeated int32 matrix_r1c1 = 34; - repeated int32 matrix_r1c2 = 35; - repeated int32 matrix_r2c0 = 36; - repeated int32 matrix_r2c1 = 37; - repeated int32 matrix_r2c2 = 38; - repeated int32 output_bias_0 = 39; - repeated int32 output_bias_1 = 40; - repeated int32 output_bias_2 = 41; - repeated int32 input_bias_0 = 42; - repeated int32 input_bias_1 = 43; - repeated int32 input_bias_2 = 44; - - // [End] ̬AIPP - - // The n number that is used for raw/rgbir data into f16 transformation. - // The transformation equation is x/(2^n). If set to 0, no transform is performed. - uint32 raw_rgbir_to_f16_n = 45; -} - -message MultiShapeOpParams { - enum MultiShapeMode { - batch = 0; //̬batch - resolution = 1; //ֱ̬ʣչ - } - - MultiShapeMode mode = 1; //ģʽ - uint32 related_input_rank = 2; //Ӳ뵽ĸ - - - repeated uint32 batch_list = 11; //batch_listֵbatch_listĸ28֮ -} diff --git a/ge/offline/proto/om.proto b/ge/offline/proto/om.proto deleted file mode 100644 index e15e5f80..00000000 --- a/ge/offline/proto/om.proto +++ /dev/null @@ -1,396 +0,0 @@ -/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Apache License for more details at - * http://www.apache.org/licenses/LICENSE-2.0 - */ -syntax = "proto3"; - -package domi; - -enum TargetType -{ - MINI = 0; - TINY = 1; - LITE = 2; -} - -// offline model -message ModelDef { - string name = 1; - uint32 version = 2; - - uint64 memory_size = 10; - uint32 stream_num = 11; - uint32 event_num = 12; - uint64 weight_size = 13; - uint32 label_num = 15; - repeated OpDef op = 20; - TargetType target_type = 23; - - map attr = 30; -}; - -// operator define -message OpDef { - string name = 1; - string type = 2; - - uint32 id = 3; - uint32 stream_id = 4; - - repeated string input_name = 5; - - repeated string src_name = 8; - repeated int32 src_index = 9; - repeated int64 input = 10; - repeated int64 output = 11; - repeated TensorDescriptor input_desc = 12; - repeated TensorDescriptor output_desc = 13; - repeated WeightDef weights = 14; - repeated string dst_name = 15; - repeated int32 dst_index = 16; - - repeated int64 workspace = 20; - repeated uint32 workspace_bytes = 21; - - repeated string weight_name = 22; - repeated bool is_input_const = 23; - - map attr = 30; - - QuantizeFactorParams quantize_factor = 31; - - oneof op_params { - // start at 100 here - SendOpParams sender_param = 100; - RecvOpParams receiver_param = 200; - ConvolutionOpParams convolution_param = 300; - PoolingOpParams pooling_param = 400; - EltwiseOpParams eltwise_param = 500; - BatchNormOpParams batchnorm_param = 600; - ScaleOpParams scale_param = 700; - FullConnectionOpParams full_connection_param = 800; - SoftmaxOpParams softmax_param = 900; - ActivationOpParams activation_param = 1000; - ReshapeOpParams reshape_param = 1100; - } -}; - -message SendOpParams { - uint32 event_id = 1; -}; - -message RecvOpParams { - uint32 event_id = 1; -}; - -enum QuantizeScaleType -{ - VECTOR_SCALE = 0; - SCALAR_SCALE = 1; -} - -enum QuantizeScaleMode -{ - NORMAL_MODE = 0; - SQRT_MODE = 1; -} - -enum QuantizeAlgorithm -{ - NON_OFFSET_ALGO = 0; - HALF_OFFSET_ALGO = 1; - ALL_OFFSET_ALGO = 2; -} -message QuantizeFactor -{ - QuantizeScaleMode scale_mode = 1; - bytes scale_value = 2; - int64 scale_offset = 3; - bytes offset_data_value = 4; - int64 offset_data_offset = 5; - bytes offset_weight_value = 6; - int64 offset_weight_offset = 7; - bytes offset_pad_value = 8; - int64 offset_pad_offset = 9; -}; - -message QuantizeCalcFactor -{ - bytes offsetw = 1; - int64 offsetw_offset = 2; - bytes offsetd = 3; - int64 offsetd_offset = 4; - bytes scalereq = 5; - int64 scaledreq_offset = 6; - bytes offsetdnext = 7; - int64 offsetdnext_offset = 8; -} - -message QuantizeFactorParams -{ - QuantizeAlgorithm quantize_algo = 1; - QuantizeScaleType scale_type = 2; - QuantizeFactor quantize_param = 3; - QuantizeFactor dequantize_param = 4; - QuantizeFactor requantize_param = 5; - QuantizeCalcFactor quantizecalc_param = 6; -}; - -message ConvolutionOpParams { - int32 mode = 1; - int32 algo = 2; - int32 pad_mode = 3; - uint32 group = 4; - uint32 num_output = 5; - - repeated uint32 pad = 10; - repeated uint32 stride = 11; - repeated uint32 dilation = 12; - repeated uint32 kernel = 13; - - float alpha = 20; - float beta = 21; - - WeightDef filter = 40; - WeightDef bias = 41; - - bool relu_flag = 62; - repeated uint32 adj = 70; - repeated uint32 target_shape = 71; - repeated uint32 before_pad = 72; -}; - -message PoolingOpParams { - int32 mode = 1; - int32 nan_opt = 2; - int32 pad_mode = 3; - bool global_pooling = 4; - - repeated uint32 window = 10; - repeated uint32 pad = 11; - repeated uint32 stride = 12; - bool ceil_mode = 13; - int32 data_mode = 14; - - float alpha = 20; - float beta = 21; - repeated uint32 before_pad = 22; -}; - -message EltwiseOpParams { - int32 mode = 1; - repeated float coeff = 2; - float alpha = 3; - float beta = 4; - repeated WeightDef weight = 5; - bool relu_flag = 6; -}; - -message ActivationOpParams { - int32 mode = 1; - float coef = 2; - float alpha = 3; - float beta = 4; -}; - -message BatchNormOpParams { - int32 mode = 1; - - float alpha = 2; - float beta = 3; - double epsilon = 4;//optinal,[default = 1e-5] - bool use_global_stats = 5; //optinal,by default true,testing mode - float moving_average_fraction = 6; //optinal,[default = .999]; - - WeightDef estimated_mean = 7; - WeightDef estimated_variance = 8; - - WeightDef scale = 9; - WeightDef bias = 10; -}; - -message ScaleOpParams { - WeightDef scale = 1; - WeightDef bias = 2; -}; - -message ReshapeOpParams { - float alpha = 1; - float beta = 2; - ShapeDef shape = 3; - int32 axis = 4; - int32 num_axes = 5; - int32 format = 6; -}; - -message SoftmaxOpParams { - int32 algo = 1; - int32 mode = 2; - float alpha = 3; - float beta = 4; -}; - -message FullConnectionOpParams { - WeightDef filter = 1; - WeightDef bias = 2; - uint32 num_output = 3; - bool relu_flag = 12; -}; - -message FlattenOpParams { - float alpha = 1; - float beta = 2; - int32 start_axis = 3; - int32 end_axis = 4; -} - -message AddLimitedOpParams { - float alpha = 1; - float beta = 2; - int32 axis = 3; - bool broadcast = 4; - - repeated WeightDef weight = 10; -}; - -message MulLimitedOpParams { - float alpha = 1; - float beta = 2; - int32 axis = 3; - bool broadcast = 4; - - repeated WeightDef weight = 10; -}; - -message AddOpParams { - float alpha = 1; - float beta = 2; - - repeated WeightDef weight = 10; -}; - -message MulOpParams { - float alpha = 1; - float beta = 2; - - repeated WeightDef weight = 10; -}; - -message SubOpParams { - float alpha = 1; - float beta = 2; - - repeated WeightDef weight = 10; -}; - -message BiasAddOpParams { - float alpha = 1; - float beta = 2; - - WeightDef bias = 10; -}; - -message MatMulOpParams { - float alpha = 1; - float beta = 2; - bool transposeX = 3; - bool transposeW = 4; - - WeightDef filter = 10; - WeightDef bias = 12; -}; - -message RsqrtOpParams { - float alpha = 1; - float beta = 2; -}; - - -message WeightDef { - int32 format = 1; - int32 data_type = 2; - ShapeDef shape = 3; - bytes data = 4; - int64 data_offset = 5; - uint32 cmps_size = 6; - bytes cmps_tab = 7; - int64 cmps_tab_offset = 10; - CompressInfo cmps_info = 8; - AllOffsetQuantizeInfo alloffset_quantize_info = 11; -} - -message ShapeDef { - repeated int64 dim = 1; -} - -enum DeviceType { - NPU = 0; // In default, we will use NPU. - CPU = 1; // CPU -} - -message AllOffsetQuantizeInfo { - float scale = 1; - int32 offset = 2; -} - -message TensorDescriptor { - int32 format = 1; - int32 data_type = 2; - repeated int64 dim = 3; - uint32 size = 4; - bool reuse_input = 5; - bool output_tensor = 7; - DeviceType device_type = 8; - bool input_tensor = 9; - uint32 real_dim_cnt = 10; - uint32 reuse_input_index = 11; - AllOffsetQuantizeInfo alloffset_quantize_info = 12; -} - -message CompressInfo { - int32 blockRow = 1; // block row - int32 blockCol = 2; // block col - int32 fractalK = 3; // fractal K - int32 fractalN = 4; // fractal N - int32 lastFractalK = 5; // K of last fractal - int32 lastFractalN = 6; // N of last fractal - int32 cubeSize = 7; // cube's length - int32 loadDir = 8; // data load directtiono 0:col load 1:row load -} - -message AttrDef { - message ListValue { - repeated string s = 2; // "list(string)" - repeated int64 i = 3 [packed = true]; // "list(int)" - repeated float f = 4 [packed = true]; // "list(float)" - repeated bool b = 5 [packed = true]; // "list(bool)" - repeated uint32 u = 6 [packed = true]; // "list(uint)" - repeated bytes bt = 7; - } - - oneof value { - string s = 2; // "string" - int64 i = 3; // "int" - float f = 4; // "float" - bool b = 5; // "bool" - uint32 u = 6; // "uint32" - bytes bt = 7; - ListValue list = 1; // any "list(...)" - NamedAttrs func = 10; - } -} - -// A list of attr names and their values. The whole list is attached -// with a string name. E.g., MatMul[T=float]. -message NamedAttrs { - string name = 1; - map attr = 2; -} - diff --git a/ge/offline/proto/task.proto b/ge/offline/proto/task.proto deleted file mode 100644 index 0da5631e..00000000 --- a/ge/offline/proto/task.proto +++ /dev/null @@ -1,179 +0,0 @@ -/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Apache License for more details at - * http://www.apache.org/licenses/LICENSE-2.0 - */ -syntax = "proto3"; - -package domi; - -message ModelTaskDef { - string version = 1; - - map attr = 9; // Extended field - repeated TaskDef task = 10; - - uint64 memory_size = 11; - uint32 stream_num = 12; - uint32 event_num = 13; - uint64 weight_size = 14; - - repeated bytes op = 15; // input/output opdef in bytes - - uint64 base_addr = 16; // base addr - uint64 weight_addr = 17; // weight addr - uint32 batch_num = 18; -} - - -message TaskDef { - uint32 id = 1; - uint32 type = 2; - - uint32 stream_id = 10; - uint32 event_id = 11; - - KernelDef kernel = 20; - KernelExDef kernel_ex = 21; - KernelHcclDef kernel_hccl = 25; - EventExDef event_ex = 26; - LogTimeStampDef log_timestamp = 28; - - uint32 label_id = 30; - - MemcpyAsyncDef memcpy_async = 31; - StreamSwitchDef stream_switch = 32; - StreamActiveDef stream_active = 33; - bytes private_def = 34; - uint64 ops_kernel_store_ptr = 35; // adjustments to other fields in the future - StreamSwitchNDef stream_switch_n = 36; - - LabelSetDef label_set = 37; - LabelGotoExDef label_goto_ex = 38; - LabelSwitchByIndexDef label_switch_by_index = 39; - KernelDefWithHandle kernel_with_handle = 40; -} - -message KernelDef { - KernelContext context = 1; - - string stub_func = 10; - uint32 block_dim = 11; - uint32 args_size = 12; - bytes args = 13; - bytes sm_desc = 14; - bytes flowtable = 15; - string so_name = 16; - string kernel_name = 17; - bytes kernel_ext_info = 18; - uint32 kernel_ext_info_size = 19; -} - -message KernelDefWithHandle { - KernelContext context = 1; - - uint64 handle = 10; - string dev_func = 11; - uint32 block_dim = 12; - uint32 args_size = 13; - bytes args = 14; - bytes sm_desc = 15; - string original_kernel_key = 16; - string node_info = 17; -} - -message KernelContext { - uint32 kernel_type = 1; - uint32 op_id = 2; // OP type in CCE - uint32 kernel_func_id = 3; - uint32 op_index = 4; // TE/Custom operator - bool is_flowtable = 5; // Identify whether args is a flowtable structure - bytes args_offset = 6; // args offset information - uint32 args_count = 7; // args count - repeated uint32 origin_op_index = 8; -} - - -message KernelExDef { - uint32 flags = 1; - - uint32 op_index = 4; - uint32 args_size = 12; - bytes args = 13; - bytes task_info = 14; // serialized nodeDef, funcDef, inputoutput - uint32 task_info_size = 15; - bytes kernel_ext_info = 16; - uint32 kernel_ext_info_size = 17; -} - - -message KernelHcclDef { - uint32 op_index = 8; - string hccl_type = 9; -} - - -message EventExDef { - uint32 op_index = 1; - uint32 event_type = 2; -} - -message LogTimeStampDef { - uint64 logid = 1; - bool notify = 2; - uint32 flat = 3; -} - -message MemcpyAsyncDef { - uint64 dst = 1; - uint64 dst_max = 2; - uint64 src = 3; - uint64 count = 4; - uint32 kind = 5; - uint32 op_index = 6; -} - -message StreamSwitchDef { - uint32 op_index = 1; - uint32 true_stream_id = 2; - int64 value = 3; - uint64 value_ptr = 4; - uint32 data_type = 5; -} - -message StreamActiveDef { - uint32 op_index = 1; - uint32 active_stream_id = 2; -} - -message StreamSwitchNDef { - uint32 op_index = 1; - uint32 size = 2; - repeated int64 target_value = 3; - repeated uint32 true_stream_id = 4; - uint32 element_size = 5; - uint32 data_type = 6; -} - -message LabelSetDef { - uint32 op_index = 1; - uint32 label_id = 2; - uint32 model_id = 3; -} - -message LabelGotoExDef { - uint32 op_index = 1; - uint32 label_id = 2; - uint32 model_id = 3; -} - -message LabelSwitchByIndexDef { - uint32 op_index = 1; - uint32 label_max = 2; -} diff --git a/ge/proto/caffe/caffe.proto b/ge/proto/caffe/caffe.proto deleted file mode 100644 index 20615fed..00000000 --- a/ge/proto/caffe/caffe.proto +++ /dev/null @@ -1,1829 +0,0 @@ -/** - * This file is part of Open Source Software caffe, version 1.0 https://github.com/BVLC/caffe - * - * This file is included by GraphEngine so as to support model format conversion from caffe model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -syntax = "proto2"; - -package domi.caffe; - -// Specifies the shape (dimensions) of a Blob. -message BlobShape { - repeated int64 dim = 1 [packed = true]; -} - -message BlobProto { - optional BlobShape shape = 7; - repeated float data = 5 [packed = true]; - repeated float diff = 6 [packed = true]; - repeated double double_data = 8 [packed = true]; - repeated double double_diff = 9 [packed = true]; - optional bytes int8_data = 10; - repeated int32 int32_data = 11 [packed = true]; - repeated uint64 uint64_data = 12 [packed = true]; - // 4D dimensions -- deprecated. Use "shape" instead. - optional int32 num = 1 [default = 0]; - optional int32 channels = 2 [default = 0]; - optional int32 height = 3 [default = 0]; - optional int32 width = 4 [default = 0]; -} - -// The BlobProtoVector is simply a way to pass multiple blobproto instances -// around. -message BlobProtoVector { - repeated BlobProto blobs = 1; -} - -message Datum { - optional int32 channels = 1; - optional int32 height = 2; - optional int32 width = 3; - // the actual image data, in bytes - optional bytes data = 4; - optional int32 label = 5; - // Optionally, the datum could also hold float data. - repeated float float_data = 6; - // If true data contains an encoded image that need to be decoded - optional bool encoded = 7 [default = false]; -} - -message FillerParameter { - // The filler type. - optional string type = 1 [default = 'constant']; - optional float value = 2 [default = 0]; // the value in constant filler - optional float min = 3 [default = 0]; // the min value in uniform filler - optional float max = 4 [default = 1]; // the max value in uniform filler - optional float mean = 5 [default = 0]; // the mean value in Gaussian filler - optional float std = 6 [default = 1]; // the std value in Gaussian filler - // The expected number of non-zero output weights for a given input in - // Gaussian filler -- the default -1 means don't perform sparsification. - optional int32 sparse = 7 [default = -1]; - // Normalize the filler variance by fan_in, fan_out, or their average. - // Applies to 'xavier' and 'msra' fillers. - enum VarianceNorm { - FAN_IN = 0; - FAN_OUT = 1; - AVERAGE = 2; - } - optional VarianceNorm variance_norm = 8 [default = FAN_IN]; -} - -message NetParameter { - optional string name = 1; // consider giving the network a name - // DEPRECATED. See InputParameter. The input blobs to the network. - repeated string input = 3; - // DEPRECATED. See InputParameter. The shape of the input blobs. - repeated BlobShape input_shape = 8; - - // 4D input dimensions -- deprecated. Use "input_shape" instead. - // If specified, for each input blob there should be four - // values specifying the num, channels, height and width of the input blob. - // Thus, there should be a total of (4 * #input) numbers. - repeated int32 input_dim = 4; - - // Whether the network will force every layer to carry out backward operation. - // If set False, then whether to carry out backward is determined - // automatically according to the net structure and learning rates. - optional bool force_backward = 5 [default = false]; - // The current "state" of the network, including the phase, level, and stage. - // Some layers may be included/excluded depending on this state and the states - // specified in the layers' include and exclude fields. - optional NetState state = 6; - - // Print debugging information about results while running Net::Forward, - // Net::Backward, and Net::Update. - optional bool debug_info = 7 [default = false]; - - // The layers that make up the net. Each of their configurations, including - // connectivity and behavior, is specified as a LayerParameter. - repeated LayerParameter layer = 100; // ID 100 so layers are printed last. - - // DEPRECATED: use 'layer' instead. - repeated V1LayerParameter layers = 2; -} - -// NOTE -// Update the next available ID when you add a new SolverParameter field. -// -// SolverParameter next available ID: 42 (last added: layer_wise_reduce) -message SolverParameter { - ////////////////////////////////////////////////////////////////////////////// - // Specifying the train and test networks - // - // Exactly one train net must be specified using one of the following fields: - // train_net_param, train_net, net_param, net - // One or more test nets may be specified using any of the following fields: - // test_net_param, test_net, net_param, net - // If more than one test net field is specified (e.g., both net and - // test_net are specified), they will be evaluated in the field order given - // above: (1) test_net_param, (2) test_net, (3) net_param/net. - // A test_iter must be specified for each test_net. - // A test_level and/or a test_stage may also be specified for each test_net. - ////////////////////////////////////////////////////////////////////////////// - - // Proto filename for the train net, possibly combined with one or more - // test nets. - optional string net = 24; - // Inline train net param, possibly combined with one or more test nets. - optional NetParameter net_param = 25; - - optional string train_net = 1; // Proto filename for the train net. - repeated string test_net = 2; // Proto filenames for the test nets. - optional NetParameter train_net_param = 21; // Inline train net params. - repeated NetParameter test_net_param = 22; // Inline test net params. - - // The states for the train/test nets. Must be unspecified or - // specified once per net. - // - // By default, all states will have solver = true; - // train_state will have phase = TRAIN, - // and all test_state's will have phase = TEST. - // Other defaults are set according to the NetState defaults. - optional NetState train_state = 26; - repeated NetState test_state = 27; - - // The number of iterations for each test net. - repeated int32 test_iter = 3; - - // The number of iterations between two testing phases. - optional int32 test_interval = 4 [default = 0]; - optional bool test_compute_loss = 19 [default = false]; - // If true, run an initial test pass before the first iteration, - // ensuring memory availability and printing the starting value of the loss. - optional bool test_initialization = 32 [default = true]; - optional float base_lr = 5; // The base learning rate - // the number of iterations between displaying info. If display = 0, no info - // will be displayed. - optional int32 display = 6; - // Display the loss averaged over the last average_loss iterations - optional int32 average_loss = 33 [default = 1]; - optional int32 max_iter = 7; // the maximum number of iterations - // accumulate gradients over `iter_size` x `batch_size` instances - optional int32 iter_size = 36 [default = 1]; - - // The learning rate decay policy. The currently implemented learning rate - // policies are as follows: - // - fixed: always return base_lr. - // - step: return base_lr * gamma ^ (floor(iter / step)) - // - exp: return base_lr * gamma ^ iter - // - inv: return base_lr * (1 + gamma * iter) ^ (- power) - // - multistep: similar to step but it allows non uniform steps defined by - // stepvalue - // - poly: the effective learning rate follows a polynomial decay, to be - // zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) - // - sigmoid: the effective learning rate follows a sigmod decay - // return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) - // - // where base_lr, max_iter, gamma, step, stepvalue and power are defined - // in the solver parameter protocol buffer, and iter is the current iteration. - optional string lr_policy = 8; - optional float gamma = 9; // The parameter to compute the learning rate. - optional float power = 10; // The parameter to compute the learning rate. - optional float momentum = 11; // The momentum value. - optional float weight_decay = 12; // The weight decay. - // regularization types supported: L1 and L2 - // controlled by weight_decay - optional string regularization_type = 29 [default = "L2"]; - // the stepsize for learning rate policy "step" - optional int32 stepsize = 13; - // the stepsize for learning rate policy "multistep" - repeated int32 stepvalue = 34; - - // Set clip_gradients to >= 0 to clip parameter gradients to that L2 norm, - // whenever their actual L2 norm is larger. - optional float clip_gradients = 35 [default = -1]; - - optional int32 snapshot = 14 [default = 0]; // The snapshot interval - optional string snapshot_prefix = 15; // The prefix for the snapshot. - // whether to snapshot diff in the results or not. Snapshotting diff will help - // debugging but the final protocol buffer size will be much larger. - optional bool snapshot_diff = 16 [default = false]; - enum SnapshotFormat { - HDF5 = 0; - BINARYPROTO = 1; - } - optional SnapshotFormat snapshot_format = 37 [default = BINARYPROTO]; - // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default. - enum SolverMode { - CPU = 0; - GPU = 1; - } - optional SolverMode solver_mode = 17 [default = GPU]; - // the device_id will that be used in GPU mode. Use device_id = 0 in default. - optional int32 device_id = 18 [default = 0]; - // If non-negative, the seed with which the Solver will initialize the Caffe - // random number generator -- useful for reproducible results. Otherwise, - // (and by default) initialize using a seed derived from the system clock. - optional int64 random_seed = 20 [default = -1]; - - // type of the solver - optional string type = 40 [default = "SGD"]; - - // numerical stability for RMSProp, AdaGrad and AdaDelta and Adam - optional float delta = 31 [default = 1e-8]; - // parameters for the Adam solver - optional float momentum2 = 39 [default = 0.999]; - - // RMSProp decay value - // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t) - optional float rms_decay = 38 [default = 0.99]; - - // If true, print information about the state of the net that may help with - // debugging learning problems. - optional bool debug_info = 23 [default = false]; - - // If false, don't save a snapshot after training finishes. - optional bool snapshot_after_train = 28 [default = true]; - - // DEPRECATED: old solver enum types, use string instead - enum SolverType { - SGD = 0; - NESTEROV = 1; - ADAGRAD = 2; - RMSPROP = 3; - ADADELTA = 4; - ADAM = 5; - } - // DEPRECATED: use type instead of solver_type - optional SolverType solver_type = 30 [default = SGD]; - - // Overlap compute and communication for data parallel training - optional bool layer_wise_reduce = 41 [default = true]; -} - -// A message that stores the solver snapshots -message SolverState { - optional int32 iter = 1; // The current iteration - optional string learned_net = 2; // The file that stores the learned net. - repeated BlobProto history = 3; // The history for sgd solvers - optional int32 current_step = 4 [default = 0]; // The current step for learning rate -} - -enum Phase { - TRAIN = 0; - TEST = 1; -} - -message NetState { - optional Phase phase = 1 [default = TEST]; - optional int32 level = 2 [default = 0]; - repeated string stage = 3; -} - -message NetStateRule { - // Set phase to require the NetState have a particular phase (TRAIN or TEST) - // to meet this rule. - optional Phase phase = 1; - - // Set the minimum and/or maximum levels in which the layer should be used. - // Leave undefined to meet the rule regardless of level. - optional int32 min_level = 2; - optional int32 max_level = 3; - - // Customizable sets of stages to include or exclude. - // The net must have ALL of the specified stages and NONE of the specified - // "not_stage"s to meet the rule. - // (Use multiple NetStateRules to specify conjunctions of stages.) - repeated string stage = 4; - repeated string not_stage = 5; -} - -// Specifies training parameters (multipliers on global learning constants, -// and the name and other settings used for weight sharing). -message ParamSpec { - // The names of the parameter blobs -- useful for sharing parameters among - // layers, but never required otherwise. To share a parameter between two - // layers, give it a (non-empty) name. - optional string name = 1; - - // Whether to require shared weights to have the same shape, or just the same - // count -- defaults to STRICT if unspecified. - optional DimCheckMode share_mode = 2; - enum DimCheckMode { - // STRICT (default) requires that num, channels, height, width each match. - STRICT = 0; - // PERMISSIVE requires only the count (num*channels*height*width) to match. - PERMISSIVE = 1; - } - - // The multiplier on the global learning rate for this parameter. - optional float lr_mult = 3 [default = 1.0]; - - // The multiplier on the global weight decay for this parameter. - optional float decay_mult = 4 [default = 1.0]; -} - -// NOTE -// Update the next available ID when you add a new LayerParameter field. -// -// LayerParameter next available layer-specific ID: 151 (last added: smooth_l1_loss_param) -message LayerParameter { - optional string name = 1; // the layer name - optional string type = 2; // the layer type - repeated string bottom = 3; // the name of each bottom blob - repeated string top = 4; // the name of each top blob - - // The train / test phase for computation. - optional Phase phase = 10; - - // The amount of weight to assign each top blob in the objective. - // Each layer assigns a default value, usually of either 0 or 1, - // to each top blob. - repeated float loss_weight = 5; - - // Specifies training parameters (multipliers on global learning constants, - // and the name and other settings used for weight sharing). - repeated ParamSpec param = 6; - - // The blobs containing the numeric parameters of the layer. - repeated BlobProto blobs = 7; - - // Specifies whether to backpropagate to each bottom. If unspecified, - // Caffe will automatically infer whether each input needs backpropagation - // to compute parameter gradients. If set to true for some inputs, - // backpropagation to those inputs is forced; if set false for some inputs, - // backpropagation to those inputs is skipped. - // - // The size must be either 0 or equal to the number of bottoms. - repeated bool propagate_down = 11; - - // Rules controlling whether and when a layer is included in the network, - // based on the current NetState. You may specify a non-zero number of rules - // to include OR exclude, but not both. If no include or exclude rules are - // specified, the layer is always included. If the current NetState meets - // ANY (i.e., one or more) of the specified rules, the layer is - // included/excluded. - repeated NetStateRule include = 8; - repeated NetStateRule exclude = 9; - - // Parameters for data pre-processing. - optional TransformationParameter transform_param = 100; - - // Parameters shared by loss layers. - optional LossParameter loss_param = 101; - - // Layer type-specific parameters. - // - // Note: certain layers may have more than one computational engine - // for their implementation. These layers include an Engine type and - // engine parameter for selecting the implementation. - // The default for the engine is set by the ENGINE switch at compile-time. - optional AccuracyParameter accuracy_param = 102; - optional ArgMaxParameter argmax_param = 103; - optional BatchNormParameter batch_norm_param = 139; - optional BiasParameter bias_param = 141; - optional ConcatParameter concat_param = 104; - optional ContrastiveLossParameter contrastive_loss_param = 105; - optional ConvolutionParameter convolution_param = 106; - optional CropParameter crop_param = 144; - optional DataParameter data_param = 107; - optional DetectionOutputParameter detection_output_param = 150; - optional DropoutParameter dropout_param = 108; - optional DummyDataParameter dummy_data_param = 109; - optional EltwiseParameter eltwise_param = 110; - optional ELUParameter elu_param = 140; - optional EmbedParameter embed_param = 137; - optional ExpParameter exp_param = 111; - optional FlattenParameter flatten_param = 135; - optional HDF5DataParameter hdf5_data_param = 112; - optional HDF5OutputParameter hdf5_output_param = 113; - optional HingeLossParameter hinge_loss_param = 114; - optional ImageDataParameter image_data_param = 115; - optional InfogainLossParameter infogain_loss_param = 116; - optional InnerProductParameter inner_product_param = 117; - optional InputParameter input_param = 143; - optional LogParameter log_param = 134; - optional LRNParameter lrn_param = 118; - optional MemoryDataParameter memory_data_param = 119; - optional MVNParameter mvn_param = 120; - optional ParameterParameter parameter_param = 145; - optional PoolingParameter pooling_param = 121; - optional PowerParameter power_param = 122; - optional PReLUParameter prelu_param = 131; - optional PythonParameter python_param = 130; - optional RecurrentParameter recurrent_param = 146; - optional ReductionParameter reduction_param = 136; - optional ReLUParameter relu_param = 123; - optional ReshapeParameter reshape_param = 133; - optional ScaleParameter scale_param = 142; - optional SigmoidParameter sigmoid_param = 124; - optional SmoothL1LossParameter smooth_l1_loss_param = 148; - optional SoftmaxParameter softmax_param = 125; - optional SPPParameter spp_param = 132; - optional SliceParameter slice_param = 126; - optional TanHParameter tanh_param = 127; - optional ThresholdParameter threshold_param = 128; - optional TileParameter tile_param = 138; - optional WindowDataParameter window_data_param = 129; - optional PermuteParameter permute_param = 202; - optional PriorBoxParameter prior_box_param = 203; - optional NormalizeParameter norm_param = 206; - optional PSROIPoolingParameter psroi_pooling_param = 207; - optional FreespaceExtractParameter freespace_extract_param = 151; - optional PostprocessParameter postprocess_param = 152; - optional SpatialTransformParameter spatial_transform_param = 153; - optional ROIAlignParameter roi_align_param = 154; - optional ReorgParameter reorg_param = 155; - optional RegionParameter region_param = 156; - optional ReverseParameter reverse_param = 157; - optional InterpParameter interp_param = 158; - optional ShuffleChannelParameter shuffle_channel_param = 159; - optional UpsampleParameter upsample_param = 160; - optional ROIPoolingParameter roi_pooling_param = 161; - optional YoloParameter yolo_param = 199; - optional YoloV3DetectionOutputParameter yolov3_detection_output_param = 200; - optional ProposalParameter proposal_param = 201; - optional FSRDetectionOutputParameter fsrdetectionoutput_param = 222; - optional SSDDetectionOutputParameter ssddetectionoutput_param = 232; - optional YoloV2DetectionOutputParameter yolov2_detection_output_param = 204; - optional QuantParameter quant_param = 208; - optional CondTakeParameter condtake_param = 233; - optional MatrixInverseParameter matrix_inverse_param = 210; - optional WarpPerspectiveParameter warp_perspective_param = 234; - optional BatchMatMulParameter batch_matmul_param = 235; - optional SpatialTransformerParameter st_param = 5000; - optional YoloV3DetectionOutputV2Parameter yolov3_detection_output_v2_param = 5001; -} - -// Message that stores parameters used to apply transformation -// to the data layer's data -message TransformationParameter { - // For data pre-processing, we can do simple scaling and subtracting the - // data mean, if provided. Note that the mean subtraction is always carried - // out before scaling. - optional float scale = 1 [default = 1]; - // Specify if we want to randomly mirror data. - optional bool mirror = 2 [default = false]; - // Specify if we would like to randomly crop an image. - optional uint32 crop_size = 3 [default = 0]; - // mean_file and mean_value cannot be specified at the same time - optional string mean_file = 4; - // if specified can be repeated once (would substract it from all the channels) - // or can be repeated the same number of times as channels - // (would subtract them from the corresponding channel) - repeated float mean_value = 5; - // Force the decoded image to have 3 color channels. - optional bool force_color = 6 [default = false]; - // Force the decoded image to have 1 color channels. - optional bool force_gray = 7 [default = false]; -} - -// Message that stores parameters shared by loss layers -message LossParameter { - // If specified, ignore instances with the given label. - optional int32 ignore_label = 1; - // How to normalize the loss for loss layers that aggregate across batches, - // spatial dimensions, or other dimensions. Currently only implemented in - // SoftmaxWithLoss and SigmoidCrossEntropyLoss layers. - enum NormalizationMode { - // Divide by the number of examples in the batch times spatial dimensions. - // Outputs that receive the ignore label will NOT be ignored in computing - // the normalization factor. - FULL = 0; - // Divide by the total number of output locations that do not take the - // ignore_label. If ignore_label is not set, this behaves like FULL. - VALID = 1; - // Divide by the batch size. - BATCH_SIZE = 2; - // Do not normalize the loss. - NONE = 3; - } - // For historical reasons, the default normalization for - // SigmoidCrossEntropyLoss is BATCH_SIZE and *not* VALID. - optional NormalizationMode normalization = 3 [default = VALID]; - // Deprecated. Ignored if normalization is specified. If normalization - // is not specified, then setting this to false will be equivalent to - // normalization = BATCH_SIZE to be consistent with previous behavior. - optional bool normalize = 2; -} - -// Messages that store parameters used by individual layer types follow, in -// alphabetical order. - -message AccuracyParameter { - // When computing accuracy, count as correct by comparing the true label to - // the top k scoring classes. By default, only compare to the top scoring - // class (i.e. argmax). - optional uint32 top_k = 1 [default = 1]; - - // The "label" axis of the prediction blob, whose argmax corresponds to the - // predicted label -- may be negative to index from the end (e.g., -1 for the - // last axis). For example, if axis == 1 and the predictions are - // (N x C x H x W), the label blob is expected to contain N*H*W ground truth - // labels with integer values in {0, 1, ..., C-1}. - optional int32 axis = 2 [default = 1]; - - // If specified, ignore instances with the given label. - optional int32 ignore_label = 3; -} - -message ArgMaxParameter { - // If true produce pairs (argmax, maxval) - optional bool out_max_val = 1 [default = false]; - optional uint32 top_k = 2 [default = 1]; - // The axis along which to maximise -- may be negative to index from the - // end (e.g., -1 for the last axis). - // By default ArgMaxLayer maximizes over the flattened trailing dimensions - // for each index of the first / num dimension. - optional int32 axis = 3; -} - -message ConcatParameter { - // The axis along which to concatenate -- may be negative to index from the - // end (e.g., -1 for the last axis). Other axes must have the - // same dimension for all the bottom blobs. - // By default, ConcatLayer concatenates blobs along the "channels" axis (1). - optional int32 axis = 2 [default = 1]; - - // DEPRECATED: alias for "axis" -- does not support negative indexing. - optional uint32 concat_dim = 1 [default = 1]; -} - -message BatchNormParameter { - // If false, normalization is performed over the current mini-batch - // and global statistics are accumulated (but not yet used) by a moving - // average. - // If true, those accumulated mean and variance values are used for the - // normalization. - // By default, it is set to false when the network is in the training - // phase and true when the network is in the testing phase. - optional bool use_global_stats = 1; - // What fraction of the moving average remains each iteration? - // Smaller values make the moving average decay faster, giving more - // weight to the recent values. - // Each iteration updates the moving average @f$S_{t-1}@f$ with the - // current mean @f$ Y_t @f$ by - // @f$ S_t = (1-\beta)Y_t + \beta \cdot S_{t-1} @f$, where @f$ \beta @f$ - // is the moving_average_fraction parameter. - optional float moving_average_fraction = 2 [default = .999]; - // Small value to add to the variance estimate so that we don't divide by - // zero. - optional float eps = 3 [default = 1e-5]; -} - -message BiasParameter { - // The first axis of bottom[0] (the first input Blob) along which to apply - // bottom[1] (the second input Blob). May be negative to index from the end - // (e.g., -1 for the last axis). - // - // For example, if bottom[0] is 4D with shape 100x3x40x60, the output - // top[0] will have the same shape, and bottom[1] may have any of the - // following shapes (for the given value of axis): - // (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60 - // (axis == 1 == -3) 3; 3x40; 3x40x60 - // (axis == 2 == -2) 40; 40x60 - // (axis == 3 == -1) 60 - // Furthermore, bottom[1] may have the empty shape (regardless of the value of - // "axis") -- a scalar bias. - optional int32 axis = 1 [default = 1]; - - // (num_axes is ignored unless just one bottom is given and the bias is - // a learned parameter of the layer. Otherwise, num_axes is determined by the - // number of axes by the second bottom.) - // The number of axes of the input (bottom[0]) covered by the bias - // parameter, or -1 to cover all axes of bottom[0] starting from `axis`. - // Set num_axes := 0, to add a zero-axis Blob: a scalar. - optional int32 num_axes = 2 [default = 1]; - - // (filler is ignored unless just one bottom is given and the bias is - // a learned parameter of the layer.) - // The initialization for the learned bias parameter. - // Default is the zero (0) initialization, resulting in the BiasLayer - // initially performing the identity operation. - optional FillerParameter filler = 3; - optional bool bias_from_blob = 4 [default = true]; -} - -message ContrastiveLossParameter { - // margin for dissimilar pair - optional float margin = 1 [default = 1.0]; - // The first implementation of this cost did not exactly match the cost of - // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2. - // legacy_version = false (the default) uses (margin - d)^2 as proposed in the - // Hadsell paper. New models should probably use this version. - // legacy_version = true uses (margin - d^2). This is kept to support / - // reproduce existing models and results - optional bool legacy_version = 2 [default = false]; -} - -message ConvolutionParameter { - optional uint32 num_output = 1; // The number of outputs for the layer - optional bool bias_term = 2 [default = true]; // whether to have bias terms - - // Pad, kernel size, and stride are all given as a single value for equal - // dimensions in all spatial dimensions, or once per spatial dimension. - repeated uint32 pad = 3; // The padding size; defaults to 0 - repeated uint32 kernel_size = 4; // The kernel size - repeated uint32 stride = 6; // The stride; defaults to 1 - // Factor used to dilate the kernel, (implicitly) zero-filling the resulting - // holes. (Kernel dilation is sometimes referred to by its use in the - // algorithme à trous from Holschneider et al. 1987.) - repeated uint32 dilation = 18; // The dilation; defaults to 1 - - // For 2D convolution only, the *_h and *_w versions may also be used to - // specify both spatial dimensions. - optional uint32 pad_h = 9 [default = 0]; // The padding height (2D only) - optional uint32 pad_w = 10 [default = 0]; // The padding width (2D only) - optional uint32 kernel_h = 11; // The kernel height (2D only) - optional uint32 kernel_w = 12; // The kernel width (2D only) - optional uint32 stride_h = 13; // The stride height (2D only) - optional uint32 stride_w = 14; // The stride width (2D only) - - optional uint32 group = 5 [default = 1]; // The group size for group conv - - optional FillerParameter weight_filler = 7; // The filler for the weight - optional FillerParameter bias_filler = 8; // The filler for the bias - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 15 [default = DEFAULT]; - - // The axis to interpret as "channels" when performing convolution. - // Preceding dimensions are treated as independent inputs; - // succeeding dimensions are treated as "spatial". - // With (N, C, H, W) inputs, and axis == 1 (the default), we perform - // N independent 2D convolutions, sliding C-channel (or (C/g)-channels, for - // groups g>1) filters across the spatial axes (H, W) of the input. - // With (N, C, D, H, W) inputs, and axis == 1, we perform - // N independent 3D convolutions, sliding (C/g)-channels - // filters across the spatial axes (D, H, W) of the input. - optional int32 axis = 16 [default = 1]; - - // Whether to force use of the general ND convolution, even if a specific - // implementation for blobs of the appropriate number of spatial dimensions - // is available. (Currently, there is only a 2D-specific convolution - // implementation; for input blobs with num_axes != 2, this option is - // ignored and the ND implementation will be used.) - optional bool force_nd_im2col = 17 [default = false]; -} - -message CropParameter { - // To crop, elements of the first bottom are selected to fit the dimensions - // of the second, reference bottom. The crop is configured by - // - the crop `axis` to pick the dimensions for cropping - // - the crop `offset` to set the shift for all/each dimension - // to align the cropped bottom with the reference bottom. - // All dimensions up to but excluding `axis` are preserved, while - // the dimensions including and trailing `axis` are cropped. - // If only one `offset` is set, then all dimensions are offset by this amount. - // Otherwise, the number of offsets must equal the number of cropped axes to - // shift the crop in each dimension accordingly. - // Note: standard dimensions are N,C,H,W so the default is a spatial crop, - // and `axis` may be negative to index from the end (e.g., -1 for the last - // axis). - optional int32 axis = 1 [default = 2]; - repeated uint32 offset = 2; -} - -message DataParameter { - enum DB { - LEVELDB = 0; - LMDB = 1; - } - // Specify the data source. - optional string source = 1; - // Specify the batch size. - optional uint32 batch_size = 4; - // The rand_skip variable is for the data layer to skip a few data points - // to avoid all asynchronous sgd clients to start at the same point. The skip - // point would be set as rand_skip * rand(0,1). Note that rand_skip should not - // be larger than the number of keys in the database. - // DEPRECATED. Each solver accesses a different subset of the database. - optional uint32 rand_skip = 7 [default = 0]; - optional DB backend = 8 [default = LEVELDB]; - // DEPRECATED. See TransformationParameter. For data pre-processing, we can do - // simple scaling and subtracting the data mean, if provided. Note that the - // mean subtraction is always carried out before scaling. - optional float scale = 2 [default = 1]; - optional string mean_file = 3; - // DEPRECATED. See TransformationParameter. Specify if we would like to randomly - // crop an image. - optional uint32 crop_size = 5 [default = 0]; - // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror - // data. - optional bool mirror = 6 [default = false]; - // Force the encoded image to have 3 color channels - optional bool force_encoded_color = 9 [default = false]; - // Prefetch queue (Increase if data feeding bandwidth varies, within the - // limit of device memory for GPU training) - optional uint32 prefetch = 10 [default = 4]; -} - -message DropoutParameter { - optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio - optional bool scale_train = 2 [default = true]; // scale train or test phase -} - -// DummyDataLayer fills any number of arbitrarily shaped blobs with random -// (or constant) data generated by "Fillers" (see "message FillerParameter"). -message DummyDataParameter { - // This layer produces N >= 1 top blobs. DummyDataParameter must specify 1 or N - // shape fields, and 0, 1 or N data_fillers. - // - // If 0 data_fillers are specified, ConstantFiller with a value of 0 is used. - // If 1 data_filler is specified, it is applied to all top blobs. If N are - // specified, the ith is applied to the ith top blob. - repeated FillerParameter data_filler = 1; - repeated BlobShape shape = 6; - - // 4D dimensions -- deprecated. Use "shape" instead. - repeated uint32 num = 2; - repeated uint32 channels = 3; - repeated uint32 height = 4; - repeated uint32 width = 5; -} - -message EltwiseParameter { - enum EltwiseOp { - PROD = 0; - SUM = 1; - MAX = 2; - } - optional EltwiseOp operation = 1 [default = SUM]; // element-wise operation - repeated float coeff = 2; // blob-wise coefficient for SUM operation - - // Whether to use an asymptotically slower (for >2 inputs) but stabler method - // of computing the gradient for the PROD operation. (No effect for SUM op.) - optional bool stable_prod_grad = 3 [default = true]; -} - -// Message that stores parameters used by ELULayer -message ELUParameter { - // Described in: - // Clevert, D.-A., Unterthiner, T., & Hochreiter, S. (2015). Fast and Accurate - // Deep Network Learning by Exponential Linear Units (ELUs). arXiv - optional float alpha = 1 [default = 1]; -} - -// Message that stores parameters used by EmbedLayer -message EmbedParameter { - optional uint32 num_output = 1; // The number of outputs for the layer - // The input is given as integers to be interpreted as one-hot - // vector indices with dimension num_input. Hence num_input should be - // 1 greater than the maximum possible input value. - optional uint32 input_dim = 2; - - optional bool bias_term = 3 [default = true]; // Whether to use a bias term - optional FillerParameter weight_filler = 4; // The filler for the weight - optional FillerParameter bias_filler = 5; // The filler for the bias - -} - -// Message that stores parameters used by ExpLayer -message ExpParameter { - // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. - // Or if base is set to the default (-1), base is set to e, - // so y = exp(shift + scale * x). - optional float base = 1 [default = -1.0]; - optional float scale = 2 [default = 1.0]; - optional float shift = 3 [default = 0.0]; -} - -/// Message that stores parameters used by FlattenLayer -message FlattenParameter { - // The first axis to flatten: all preceding axes are retained in the output. - // May be negative to index from the end (e.g., -1 for the last axis). - optional int32 axis = 1 [default = 1]; - - // The last axis to flatten: all following axes are retained in the output. - // May be negative to index from the end (e.g., the default -1 for the last - // axis). - optional int32 end_axis = 2 [default = -1]; -} - -// Message that stores parameters used by HDF5DataLayer -message HDF5DataParameter { - // Specify the data source. - optional string source = 1; - // Specify the batch size. - optional uint32 batch_size = 2; - - // Specify whether to shuffle the data. - // If shuffle == true, the ordering of the HDF5 files is shuffled, - // and the ordering of data within any given HDF5 file is shuffled, - // but data between different files are not interleaved; all of a file's - // data are output (in a random order) before moving onto another file. - optional bool shuffle = 3 [default = false]; -} - -message HDF5OutputParameter { - optional string file_name = 1; -} - -message HingeLossParameter { - enum Norm { - L1 = 1; - L2 = 2; - } - // Specify the Norm to use L1 or L2 - optional Norm norm = 1 [default = L1]; -} - -message ImageDataParameter { - // Specify the data source. - optional string source = 1; - // Specify the batch size. - optional uint32 batch_size = 4 [default = 1]; - // The rand_skip variable is for the data layer to skip a few data points - // to avoid all asynchronous sgd clients to start at the same point. The skip - // point would be set as rand_skip * rand(0,1). Note that rand_skip should not - // be larger than the number of keys in the database. - optional uint32 rand_skip = 7 [default = 0]; - // Whether or not ImageLayer should shuffle the list of files at every epoch. - optional bool shuffle = 8 [default = false]; - // It will also resize images if new_height or new_width are not zero. - optional uint32 new_height = 9 [default = 0]; - optional uint32 new_width = 10 [default = 0]; - // Specify if the images are color or gray - optional bool is_color = 11 [default = true]; - // DEPRECATED. See TransformationParameter. For data pre-processing, we can do - // simple scaling and subtracting the data mean, if provided. Note that the - // mean subtraction is always carried out before scaling. - optional float scale = 2 [default = 1]; - optional string mean_file = 3; - // DEPRECATED. See TransformationParameter. Specify if we would like to randomly - // crop an image. - optional uint32 crop_size = 5 [default = 0]; - // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror - // data. - optional bool mirror = 6 [default = false]; - optional string root_folder = 12 [default = ""]; -} - -message InfogainLossParameter { - // Specify the infogain matrix source. - optional string source = 1; - optional int32 axis = 2 [default = 1]; // axis of prob -} - -message InnerProductParameter { - optional uint32 num_output = 1; // The number of outputs for the layer - optional bool bias_term = 2 [default = true]; // whether to have bias terms - optional FillerParameter weight_filler = 3; // The filler for the weight - optional FillerParameter bias_filler = 4; // The filler for the bias - - // The first axis to be lumped into a single inner product computation; - // all preceding axes are retained in the output. - // May be negative to index from the end (e.g., -1 for the last axis). - optional int32 axis = 5 [default = 1]; - // Specify whether to transpose the weight matrix or not. - // If transpose == true, any operations will be performed on the transpose - // of the weight matrix. The weight matrix itself is not going to be transposed - // but rather the transfer flag of operations will be toggled accordingly. - optional bool transpose = 6 [default = false]; -} - -message InputParameter { - // This layer produces N >= 1 top blob(s) to be assigned manually. - // Define N shapes to set a shape for each top. - // Define 1 shape to set the same shape for every top. - // Define no shape to defer to reshaping manually. - repeated BlobShape shape = 1; -} - -// Message that stores parameters used by LogLayer -message LogParameter { - // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0. - // Or if base is set to the default (-1), base is set to e, - // so y = ln(shift + scale * x) = log_e(shift + scale * x) - optional float base = 1 [default = -1.0]; - optional float scale = 2 [default = 1.0]; - optional float shift = 3 [default = 0.0]; -} - -// Message that stores parameters used by LRNLayer -message LRNParameter { - optional uint32 local_size = 1 [default = 5]; - optional float alpha = 2 [default = 1.]; - optional float beta = 3 [default = 0.75]; - enum NormRegion { - ACROSS_CHANNELS = 0; - WITHIN_CHANNEL = 1; - } - optional NormRegion norm_region = 4 [default = ACROSS_CHANNELS]; - optional float k = 5 [default = 1.]; - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 6 [default = DEFAULT]; -} - -message MemoryDataParameter { - optional uint32 batch_size = 1; - optional uint32 channels = 2; - optional uint32 height = 3; - optional uint32 width = 4; -} - -message MVNParameter { - // This parameter can be set to false to normalize mean only - optional bool normalize_variance = 1 [default = true]; - - // This parameter can be set to true to perform DNN-like MVN - optional bool across_channels = 2 [default = false]; - - // Epsilon for not dividing by zero while normalizing variance - optional float eps = 3 [default = 1e-9]; -} - -message ParameterParameter { - optional BlobShape shape = 1; -} - -message PoolingParameter { - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional PoolMethod pool = 1 [default = MAX]; // The pooling method - // Pad, kernel size, and stride are all given as a single value for equal - // dimensions in height and width or as Y, X pairs. - optional uint32 pad = 4 [default = 0]; // The padding size (equal in Y, X) - optional uint32 pad_h = 9 [default = 0]; // The padding height - optional uint32 pad_w = 10 [default = 0]; // The padding width - optional uint32 kernel_size = 2; // The kernel size (square) - optional uint32 kernel_h = 5; // The kernel height - optional uint32 kernel_w = 6; // The kernel width - optional uint32 stride = 3 [default = 1]; // The stride (equal in Y, X) - optional uint32 stride_h = 7; // The stride height - optional uint32 stride_w = 8; // The stride width - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 11 [default = DEFAULT]; - // If global_pooling then it will pool over the size of the bottom by doing - // kernel_h = bottom->height and kernel_w = bottom->width - optional bool global_pooling = 12 [default = false]; - optional bool ceil_mode = 13 [default = true]; - // How to calculate the output size - using ceil (default) or floor rounding. - enum RoundMode { - CEIL = 0; - FLOOR = 1; - } - optional RoundMode round_mode = 14 [default = CEIL]; -} - -message PowerParameter { - // PowerLayer computes outputs y = (shift + scale * x) ^ power. - optional float power = 1 [default = 1.0]; - optional float scale = 2 [default = 1.0]; - optional float shift = 3 [default = 0.0]; -} - -message PythonParameter { - optional string module = 1; - optional string layer = 2; - // This value is set to the attribute `param_str` of the `PythonLayer` object - // in Python before calling the `setup()` method. This could be a number, - // string, dictionary in Python dict format, JSON, etc. You may parse this - // string in `setup` method and use it in `forward` and `backward`. - optional string param_str = 3 [default = '']; - // Whether this PythonLayer is shared among worker solvers during data parallelism. - // If true, each worker solver sequentially run forward from this layer. - // This value should be set true if you are using it as a data layer. - optional bool share_in_parallel = 4 [default = false]; -} - -// Message that stores parameters used by RecurrentLayer -message RecurrentParameter { - // The dimension of the output (and usually hidden state) representation -- - // must be explicitly set to non-zero. - optional uint32 num_output = 1 [default = 0]; - - optional FillerParameter weight_filler = 2; // The filler for the weight - optional FillerParameter bias_filler = 3; // The filler for the bias - - // Whether to enable displaying debug_info in the unrolled recurrent net. - optional bool debug_info = 4 [default = false]; - - // Whether to add as additional inputs (bottoms) the initial hidden state - // blobs, and add as additional outputs (tops) the final timestep hidden state - // blobs. The number of additional bottom/top blobs required depends on the - // recurrent architecture -- e.g., 1 for RNNs, 2 for LSTMs. - optional bool expose_hidden = 5 [default = false]; -} - -// Message that stores parameters used by ReductionLayer -message ReductionParameter { - enum ReductionOp { - SUM = 1; - ASUM = 2; - SUMSQ = 3; - MEAN = 4; - } - - optional ReductionOp operation = 1 [default = SUM]; // reduction operation - - // The first axis to reduce to a scalar -- may be negative to index from the - // end (e.g., -1 for the last axis). - // (Currently, only reduction along ALL "tail" axes is supported; reduction - // of axis M through N, where N < num_axes - 1, is unsupported.) - // Suppose we have an n-axis bottom Blob with shape: - // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)). - // If axis == m, the output Blob will have shape - // (d0, d1, d2, ..., d(m-1)), - // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1)) - // times, each including (dm * d(m+1) * ... * d(n-1)) individual data. - // If axis == 0 (the default), the output Blob always has the empty shape - // (count 1), performing reduction across the entire input -- - // often useful for creating new loss functions. - optional int32 axis = 2 [default = 0]; - - optional float coeff = 3 [default = 1.0]; // coefficient for output -} - -// Message that stores parameters used by ReLULayer -message ReLUParameter { - // Allow non-zero slope for negative inputs to speed up optimization - // Described in: - // Maas, A. L., Hannun, A. Y., & Ng, A. Y. (2013). Rectifier nonlinearities - // improve neural network acoustic models. In ICML Workshop on Deep Learning - // for Audio, Speech, and Language Processing. - optional float negative_slope = 1 [default = 0]; - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 2 [default = DEFAULT]; -} - -message ReshapeParameter { - // Specify the output dimensions. If some of the dimensions are set to 0, - // the corresponding dimension from the bottom layer is used (unchanged). - // Exactly one dimension may be set to -1, in which case its value is - // inferred from the count of the bottom blob and the remaining dimensions. - // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8: - // - // layer { - // type: "Reshape" bottom: "input" top: "output" - // reshape_param { ... } - // } - // - // If "input" is 2D with shape 2 x 8, then the following reshape_param - // specifications are all equivalent, producing a 3D blob "output" with shape - // 2 x 2 x 4: - // - // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 0 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 0 dim: 2 dim: -1 } } - // reshape_param { shape { dim: 0 dim:-1 dim: 4 } } - // - optional BlobShape shape = 1; - - // axis and num_axes control the portion of the bottom blob's shape that are - // replaced by (included in) the reshape. By default (axis == 0 and - // num_axes == -1), the entire bottom blob shape is included in the reshape, - // and hence the shape field must specify the entire output shape. - // - // axis may be non-zero to retain some portion of the beginning of the input - // shape (and may be negative to index from the end; e.g., -1 to begin the - // reshape after the last axis, including nothing in the reshape, - // -2 to include only the last axis, etc.). - // - // For example, suppose "input" is a 2D blob with shape 2 x 8. - // Then the following ReshapeLayer specifications are all equivalent, - // producing a blob "output" with shape 2 x 2 x 4: - // - // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 2 dim: 4 } axis: 1 } - // reshape_param { shape { dim: 2 dim: 4 } axis: -3 } - // - // num_axes specifies the extent of the reshape. - // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on - // input axes in the range [axis, axis+num_axes]. - // num_axes may also be -1, the default, to include all remaining axes - // (starting from axis). - // - // For example, suppose "input" is a 2D blob with shape 2 x 8. - // Then the following ReshapeLayer specifications are equivalent, - // producing a blob "output" with shape 1 x 2 x 8. - // - // reshape_param { shape { dim: 1 dim: 2 dim: 8 } } - // reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 } - // reshape_param { shape { dim: 1 } num_axes: 0 } - // - // On the other hand, these would produce output blob shape 2 x 1 x 8: - // - // reshape_param { shape { dim: 2 dim: 1 dim: 8 } } - // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 } - // - optional int32 axis = 2 [default = 0]; - optional int32 num_axes = 3 [default = -1]; -} - - -message ScaleParameter { - // The first axis of bottom[0] (the first input Blob) along which to apply - // bottom[1] (the second input Blob). May be negative to index from the end - // (e.g., -1 for the last axis). - // - // For example, if bottom[0] is 4D with shape 100x3x40x60, the output - // top[0] will have the same shape, and bottom[1] may have any of the - // following shapes (for the given value of axis): - // (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60 - // (axis == 1 == -3) 3; 3x40; 3x40x60 - // (axis == 2 == -2) 40; 40x60 - // (axis == 3 == -1) 60 - // Furthermore, bottom[1] may have the empty shape (regardless of the value of - // "axis") -- a scalar multiplier. - optional int32 axis = 1 [default = 1]; - - // (num_axes is ignored unless just one bottom is given and the scale is - // a learned parameter of the layer. Otherwise, num_axes is determined by the - // number of axes by the second bottom.) - // The number of axes of the input (bottom[0]) covered by the scale - // parameter, or -1 to cover all axes of bottom[0] starting from `axis`. - // Set num_axes := 0, to multiply with a zero-axis Blob: a scalar. - optional int32 num_axes = 2 [default = 1]; - - // (filler is ignored unless just one bottom is given and the scale is - // a learned parameter of the layer.) - // The initialization for the learned scale parameter. - // Default is the unit (1) initialization, resulting in the ScaleLayer - // initially performing the identity operation. - optional FillerParameter filler = 3; - - // Whether to also learn a bias (equivalent to a ScaleLayer+BiasLayer, but - // may be more efficient). Initialized with bias_filler (defaults to 0). - optional bool bias_term = 4 [default = false]; - optional FillerParameter bias_filler = 5; - optional bool scale_from_blob = 6 [default = true]; -} - -message SigmoidParameter { - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 1 [default = DEFAULT]; -} - -message SliceParameter { - // The axis along which to slice -- may be negative to index from the end - // (e.g., -1 for the last axis). - // By default, SliceLayer concatenates blobs along the "channels" axis (1). - optional int32 axis = 3 [default = 1]; - repeated uint32 slice_point = 2; - - // DEPRECATED: alias for "axis" -- does not support negative indexing. - optional uint32 slice_dim = 1 [default = 1]; -} - -message SmoothL1LossParameter { - // SmoothL1Loss(x) = - // 0.5 * (sigma * x) ** 2 -- if x < 1.0 / sigma / sigma - // |x| - 0.5 / sigma / sigma -- otherwise - optional float sigma = 1 [default = 1]; -} - -// Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer -message SoftmaxParameter { - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 1 [default = DEFAULT]; - - // The axis along which to perform the softmax -- may be negative to index - // from the end (e.g., -1 for the last axis). - // Any other axes will be evaluated as independent softmaxes. - optional int32 axis = 2 [default = 1]; -} - -message TanHParameter { - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 1 [default = DEFAULT]; -} - -// Message that stores parameters used by TileLayer -message TileParameter { - // The index of the axis to tile. - optional int32 axis = 1 [default = 1]; - - // The number of copies (tiles) of the blob to output. - optional int32 tiles = 2; -} - -// Message that stores parameters used by ThresholdLayer -message ThresholdParameter { - optional float threshold = 1 [default = 0]; // Strictly positive values -} - -message WindowDataParameter { - // Specify the data source. - optional string source = 1; - // For data pre-processing, we can do simple scaling and subtracting the - // data mean, if provided. Note that the mean subtraction is always carried - // out before scaling. - optional float scale = 2 [default = 1]; - optional string mean_file = 3; - // Specify the batch size. - optional uint32 batch_size = 4; - // Specify if we would like to randomly crop an image. - optional uint32 crop_size = 5 [default = 0]; - // Specify if we want to randomly mirror data. - optional bool mirror = 6 [default = false]; - // Foreground (object) overlap threshold - optional float fg_threshold = 7 [default = 0.5]; - // Background (non-object) overlap threshold - optional float bg_threshold = 8 [default = 0.5]; - // Fraction of batch that should be foreground objects - optional float fg_fraction = 9 [default = 0.25]; - // Amount of contextual padding to add around a window - // (used only by the window_data_layer) - optional uint32 context_pad = 10 [default = 0]; - // Mode for cropping out a detection window - // warp: cropped window is warped to a fixed size and aspect ratio - // square: the tightest square around the window is cropped - optional string crop_mode = 11 [default = "warp"]; - // cache_images: will load all images in memory for faster access - optional bool cache_images = 12 [default = false]; - // append root_folder to locate images - optional string root_folder = 13 [default = ""]; -} - -message SPPParameter { - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional uint32 pyramid_height = 1; - optional PoolMethod pool = 2 [default = MAX]; // The pooling method - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 6 [default = DEFAULT]; -} - -// DEPRECATED: use LayerParameter. -message V1LayerParameter { - repeated string bottom = 2; - repeated string top = 3; - optional string name = 4; - repeated NetStateRule include = 32; - repeated NetStateRule exclude = 33; - enum LayerType { - NONE = 0; - ABSVAL = 35; - ACCURACY = 1; - ARGMAX = 30; - BNLL = 2; - CONCAT = 3; - CONTRASTIVE_LOSS = 37; - CONVOLUTION = 4; - DATA = 5; - DECONVOLUTION = 39; - DROPOUT = 6; - DUMMY_DATA = 32; - EUCLIDEAN_LOSS = 7; - ELTWISE = 25; - EXP = 38; - FLATTEN = 8; - HDF5_DATA = 9; - HDF5_OUTPUT = 10; - HINGE_LOSS = 28; - IM2COL = 11; - IMAGE_DATA = 12; - INFOGAIN_LOSS = 13; - INNER_PRODUCT = 14; - LRN = 15; - MEMORY_DATA = 29; - MULTINOMIAL_LOGISTIC_LOSS = 16; - MVN = 34; - POOLING = 17; - POWER = 26; - RELU = 18; - SIGMOID = 19; - SIGMOID_CROSS_ENTROPY_LOSS = 27; - SILENCE = 36; - SOFTMAX = 20; - SOFTMAX_LOSS = 21; - SPLIT = 22; - SLICE = 33; - TANH = 23; - WINDOW_DATA = 24; - THRESHOLD = 31; - QUANT = 208; - DEQUANT = 209; - } - optional LayerType type = 5; - repeated BlobProto blobs = 6; - repeated string param = 1001; - repeated DimCheckMode blob_share_mode = 1002; - enum DimCheckMode { - STRICT = 0; - PERMISSIVE = 1; - } - repeated float blobs_lr = 7; - repeated float weight_decay = 8; - repeated float loss_weight = 35; - optional AccuracyParameter accuracy_param = 27; - optional ArgMaxParameter argmax_param = 23; - optional ConcatParameter concat_param = 9; - optional ContrastiveLossParameter contrastive_loss_param = 40; - optional ConvolutionParameter convolution_param = 10; - optional DataParameter data_param = 11; - optional DropoutParameter dropout_param = 12; - optional DummyDataParameter dummy_data_param = 26; - optional EltwiseParameter eltwise_param = 24; - optional ExpParameter exp_param = 41; - optional HDF5DataParameter hdf5_data_param = 13; - optional HDF5OutputParameter hdf5_output_param = 14; - optional HingeLossParameter hinge_loss_param = 29; - optional ImageDataParameter image_data_param = 15; - optional InfogainLossParameter infogain_loss_param = 16; - optional InnerProductParameter inner_product_param = 17; - optional LRNParameter lrn_param = 18; - optional MemoryDataParameter memory_data_param = 22; - optional MVNParameter mvn_param = 34; - optional PoolingParameter pooling_param = 19; - optional PowerParameter power_param = 21; - optional ReLUParameter relu_param = 30; - optional SigmoidParameter sigmoid_param = 38; - optional SoftmaxParameter softmax_param = 39; - optional SliceParameter slice_param = 31; - optional TanHParameter tanh_param = 37; - optional ThresholdParameter threshold_param = 25; - optional WindowDataParameter window_data_param = 20; - optional TransformationParameter transform_param = 36; - optional LossParameter loss_param = 42; - optional V0LayerParameter layer = 1; -} - -// DEPRECATED: V0LayerParameter is the old way of specifying layer parameters -// in Caffe. We keep this message type around for legacy support. -message V0LayerParameter { - optional string name = 1; // the layer name - optional string type = 2; // the string to specify the layer type - - // Parameters to specify layers with inner products. - optional uint32 num_output = 3; // The number of outputs for the layer - optional bool biasterm = 4 [default = true]; // whether to have bias terms - optional FillerParameter weight_filler = 5; // The filler for the weight - optional FillerParameter bias_filler = 6; // The filler for the bias - - optional uint32 pad = 7 [default = 0]; // The padding size - optional uint32 kernelsize = 8; // The kernel size - optional uint32 group = 9 [default = 1]; // The group size for group conv - optional uint32 stride = 10 [default = 1]; // The stride - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional PoolMethod pool = 11 [default = MAX]; // The pooling method - optional float dropout_ratio = 12 [default = 0.5]; // dropout ratio - - optional uint32 local_size = 13 [default = 5]; // for local response norm - optional float alpha = 14 [default = 1.]; // for local response norm - optional float beta = 15 [default = 0.75]; // for local response norm - optional float k = 22 [default = 1.]; - - // For data layers, specify the data source - optional string source = 16; - // For data pre-processing, we can do simple scaling and subtracting the - // data mean, if provided. Note that the mean subtraction is always carried - // out before scaling. - optional float scale = 17 [default = 1]; - optional string meanfile = 18; - // For data layers, specify the batch size. - optional uint32 batchsize = 19; - // For data layers, specify if we would like to randomly crop an image. - optional uint32 cropsize = 20 [default = 0]; - // For data layers, specify if we want to randomly mirror data. - optional bool mirror = 21 [default = false]; - - // The blobs containing the numeric parameters of the layer - repeated BlobProto blobs = 50; - // The ratio that is multiplied on the global learning rate. If you want to - // set the learning ratio for one blob, you need to set it for all blobs. - repeated float blobs_lr = 51; - // The weight decay that is multiplied on the global weight decay. - repeated float weight_decay = 52; - - // The rand_skip variable is for the data layer to skip a few data points - // to avoid all asynchronous sgd clients to start at the same point. The skip - // point would be set as rand_skip * rand(0,1). Note that rand_skip should not - // be larger than the number of keys in the database. - optional uint32 rand_skip = 53 [default = 0]; - - // Fields related to detection (det_*) - // foreground (object) overlap threshold - optional float det_fg_threshold = 54 [default = 0.5]; - // background (non-object) overlap threshold - optional float det_bg_threshold = 55 [default = 0.5]; - // Fraction of batch that should be foreground objects - optional float det_fg_fraction = 56 [default = 0.25]; - - // optional bool OBSOLETE_can_clobber = 57 [default = true]; - - // Amount of contextual padding to add around a window - // (used only by the window_data_layer) - optional uint32 det_context_pad = 58 [default = 0]; - - // Mode for cropping out a detection window - // warp: cropped window is warped to a fixed size and aspect ratio - // square: the tightest square around the window is cropped - optional string det_crop_mode = 59 [default = "warp"]; - - // For ReshapeLayer, one needs to specify the new dimensions. - optional int32 new_num = 60 [default = 0]; - optional int32 new_channels = 61 [default = 0]; - optional int32 new_height = 62 [default = 0]; - optional int32 new_width = 63 [default = 0]; - - // Whether or not ImageLayer should shuffle the list of files at every epoch. - // It will also resize images if new_height or new_width are not zero. - optional bool shuffle_images = 64 [default = false]; - - // For ConcatLayer, one needs to specify the dimension for concatenation, and - // the other dimensions must be the same for all the bottom blobs. - // By default it will concatenate blobs along the channels dimension. - optional uint32 concat_dim = 65 [default = 1]; - - optional HDF5OutputParameter hdf5_output_param = 1001; -} - -message PReLUParameter { - // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: - // Surpassing Human-Level Performance on ImageNet Classification, 2015. - - // Initial value of a_i. Default is a_i=0.25 for all i. - optional FillerParameter filler = 1; - // Whether or not slope parameters are shared across channels. - optional bool channel_shared = 2 [default = false]; -} - -// Message that stores parameters used by DetectionOutputLayer -//message DetectionOutputParameter { -// optional int32 num_classes = 1 [default = 21]; -// optional float nms_threshold = 2 [default = 0.3]; -// optional int32 top_k = 3; -// optional float confidence_threshold = 4 [default = 0.8]; -//} - -// Message that store parameters used by PriorBoxLayer -message PriorBoxParameter { - // Encode/decode type. - enum CodeType { - CORNER = 1; - CENTER_SIZE = 2; - CORNER_SIZE = 3; - } - // Minimum box size (in pixels). Required! - repeated float min_size = 1; - // Maximum box size (in pixels). Required! - repeated float max_size = 2; - // Various of aspect ratios. Duplicate ratios will be ignored. - // If none is provided, we use default ratio 1. - repeated float aspect_ratio = 3; - // If true, will flip each aspect ratio. - // For example, if there is aspect ratio "r", - // we will generate aspect ratio "1.0/r" as well. - optional bool flip = 4 [default = true]; - // If true, will clip the prior so that it is within [0, 1] - optional bool clip = 5 [default = false]; - // Variance for adjusting the prior bboxes. - repeated float variance = 6; - // By default, we calculate img_height, img_width, step_x, step_y based on - // bottom[0] (feat) and bottom[1] (img). Unless these values are explicitely - // provided. - // Explicitly provide the img_size. - optional uint32 img_size = 7; - // Either img_size or img_h/img_w should be specified; not both. - optional uint32 img_h = 8; - optional uint32 img_w = 9; - - // Explicitly provide the step size. - optional float step = 10; - // Either step or step_h/step_w should be specified; not both. - optional float step_h = 11; - optional float step_w = 12; - - // Offset to the top left corner of each cell. - optional float offset = 13 [default = 0.5]; -} - -// Message that stores parameters used by PermutetLayer -message PermuteParameter { - // The new orders of the axes of data. Notice it should be with - // in the same range as the input data, and it starts from 0. - // Do not provide repeated order. - repeated uint32 order = 1; -} - -message NormalizeParameter { - optional bool across_spatial = 1 [default = true]; - // Initial value of scale. Default is 1.0 for all - optional FillerParameter scale_filler = 2; - // Whether or not scale parameters are shared across channels. - optional bool channel_shared = 3 [default = true]; - // Epsilon for not dividing by zero while normalizing variance - optional float eps = 4 [default = 1e-10]; -} - -// needed by ssd -message SaveOutputParameter { - // Output directory. If not empty, we will save the results. - optional string output_directory = 1; - // Output name prefix. - optional string output_name_prefix = 2; - // Output format. - // VOC - PASCAL VOC output format. - // COCO - MS COCO output format. - optional string output_format = 3; - // If you want to output results, must also provide the following two files. - // Otherwise, we will ignore saving results. - // label map file. - optional string label_map_file = 4; - // A file which contains a list of names and sizes with same order - // of the input DB. The file is in the following format: - // name height width - // ... - optional string name_size_file = 5; - // Number of test images. It can be less than the lines specified in - // name_size_file. For example, when we only want to evaluate on part - // of the test images. - optional uint32 num_test_image = 6; - // The resize parameter used in saving the data. - // optional ResizeParameter resize_param = 7; -} - -message NonMaximumSuppressionParameter { - // Threshold to be used in nms. - optional float nms_threshold = 1 [default = 0.3]; - // Maximum number of results to be kept. - optional int32 top_k = 2; - // Parameter for adaptive nms. - optional float eta = 3 [default = 1.0]; -} - -message GeneralNmsParameter { - optional int32 post_top_k = 1 ; - optional float nms_threshold = 2 [default = 0]; - optional float iou_threshold_decay = 3 [default = 1.0]; - optional float coor_scale_factor = 4 [default = 1.0]; -} - -// Message that store parameters used by DetectionOutputLayer, ssd/fasterRcnn -message DetectionOutputParameter { - optional int32 num_classes = 1; - optional bool share_location = 2 [default = true]; - optional int32 background_label_id = 3 [default = 0]; - optional NonMaximumSuppressionParameter nms_param = 4; - optional SaveOutputParameter save_output_param = 5; - optional PriorBoxParameter.CodeType code_type = 6 [default = CENTER_SIZE]; - optional bool variance_encoded_in_target = 8 [default = true]; - optional int32 keep_top_k = 7; - optional float confidence_threshold = 9; - optional float nms_threshold = 13; - optional int32 top_k = 14; - optional int32 boxes = 15 [default = 1]; - optional bool relative = 17 [default = true]; - optional float objectness_threshold = 18 [default = 0.5]; - optional float class_threshold = 19 [default = 0.5]; - repeated float biases = 20; - optional GeneralNmsParameter general_nms_param = 21; - optional float objectness_score = 22; -} -message PSROIPoolingParameter { - required float spatial_scale = 1; - required int32 output_dim = 2; // output channel number - required int32 group_size = 3; // number of groups to encode position-sensitive score maps -} -// Message that stores parameters used by FreespaceExtractLayer -message FreespaceExtractParameter { - optional float org_height = 1; -} - -// Message that stores parameters used by DetectpostprocessLayer -message PostprocessParameter { - optional float nms_thresh = 1 [default = 0.3]; - optional float conf_thresh = 2 [default = 0.5]; - optional uint32 post_nms_topn = 3 [default = 100]; - optional uint32 cls_num = 4 [default = 12]; - repeated float bbox_reg_weights = 5; -} - -// Message that stores parameters used by SpatialTransformLayer -message SpatialTransformParameter { - optional uint32 output_h = 1 [default = 0]; - optional uint32 output_w = 2 [default = 0]; - optional float border_value = 3 [default = 0]; - repeated float affine_transform = 4; - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 15 [default = DEFAULT]; -} -message ROIAlignParameter { - // Pad, kernel size, and stride are all given as a single value for equal - // dimensions in height and width or as Y, X pairs. - optional uint32 pooled_h = 1 [default = 0]; // The pooled output height - optional uint32 pooled_w = 2 [default = 0]; // The pooled output width - // Multiplicative spatial scale factor to translate ROI coords from their - // input scale to the scale used when pooling - optional float spatial_scale = 3 [default = 1]; - optional int32 sampling_ratio = 4 [default = -1]; - optional int32 roi_end_mode = 5 [default = 0]; -} - -message RegionParameter { - optional uint32 classes = 1 [default = 20]; // Category of classification - optional uint32 coords = 2 [default = 4]; // Coordinates of box - optional uint32 boxes = 3 [default = 1]; // Number of boxes predicted per grid - optional uint32 softmax = 4 [default = 0]; - optional string softmax_tree = 5 [default = ""]; - optional uint32 background = 6 [default = 0]; -} -message ReorgParameter{ - optional uint32 stride = 2 [default = 2]; - optional bool reverse = 1 [default = false]; -} -message ReverseParameter{ - repeated int32 axis = 1; -} -message InterpParameter{ - optional int32 height = 1 [default = 0];//Height of output - optional int32 width = 2 [default = 0];//Width of output - optional int32 zoom_factor = 3 [default = 1];//zoom factor - optional int32 shrink_factor = 4 [default = 1];//shrink factor - optional int32 pad_beg = 5 [default = 0];//padding at begin of input - optional int32 pad_end = 6 [default = 0];//padding at end of input -} -message ShuffleChannelParameter{ - optional uint32 group = 1[default = 1]; // The number of group -} -message UpsampleParameter{ - optional float scale = 1[default = 1]; - optional int32 stride = 2[default = 2]; - optional int32 stride_h = 3[default = 2]; - optional int32 stride_w = 4[default=2]; -} -message ROIPoolingParameter { - required int32 pooled_h = 1; - required int32 pooled_w = 2; - optional float spatial_scale = 3 [default=0.0625]; - optional float spatial_scale_h = 4; - optional float spatial_scale_w = 5; -} - -message YoloParameter { - optional int32 boxes = 1 [default = 3]; - optional int32 coords = 2 [default = 4]; - optional int32 classes = 3 [default = 80]; - optional string yolo_version = 4 [default = "V3"]; - optional bool softmax = 5 [default = false]; - optional bool background = 6 [default = false]; - optional bool softmaxtree = 7 [default = false]; -} - -message YoloV3DetectionOutputParameter { - optional int32 boxes = 1 [default = 3]; - optional int32 classes = 2 [default = 80]; - optional bool relative = 3 [default = true]; - optional float obj_threshold = 4 [default = 0.5]; - optional float score_threshold = 5 [default = 0.5]; - optional float iou_threshold = 6 [default = 0.45]; - optional int32 pre_nms_topn = 7 [default = 512]; - optional int32 post_nms_topn = 8 [default = 1024]; - repeated float biases_high = 9; - repeated float biases_mid = 10; - repeated float biases_low = 11; - optional int32 coords = 12 [default = 4]; - repeated float biases = 13; - optional bool resize_origin_img_to_net = 14 [default = false]; -} - -message YoloV3DetectionOutputV2Parameter { - optional int32 boxes = 1 [default = 3]; - optional int32 classes = 2 [default = 80]; - optional bool relative = 3 [default = true]; - optional float obj_threshold = 4 [default = 0.5]; - optional float score_threshold = 5 [default = 0.5]; - optional float iou_threshold = 6 [default = 0.45]; - optional int32 pre_nms_topn = 7 [default = 512]; - optional int32 post_nms_topn = 8 [default = 1024]; - repeated float biases_high = 9; - repeated float biases_mid = 10; - repeated float biases_low = 11; - optional int32 coords = 12 [default = 4]; - repeated float biases = 13; - optional bool resize_origin_img_to_net = 14 [default = false]; - optional int32 out_box_dim = 15 [default = 3]; -} - -message ProposalParameter { - optional float feat_stride = 1 [default = 16]; - optional float base_size = 2 [default = 16]; - optional float min_size = 3 [default = 16]; - repeated float ratio = 4; - repeated float scale = 5; - optional int32 pre_nms_topn = 6 [default = 3000]; - optional int32 post_nms_topn = 7 [default = 304]; - optional float iou_threshold = 8 [default = 0.7]; - optional bool output_actual_rois_num = 9 [default = false]; -} - -message FSRDetectionOutputParameter { - required int32 num_classes = 1; - required float score_threshold = 2; - required float iou_threshold = 3; - optional int32 batch_rois = 4 [default = 1]; -} - -message SSDDetectionOutputParameter { - required int32 num_classes= 1 [default = 2]; - optional bool share_location = 2 [default = true]; - optional int32 background_label_id = 3 [default = 0]; - optional float iou_threshold = 4 [default = 0.3]; - optional int32 top_k = 5 [default = 200]; - optional float eta = 6 [default = 1.0]; - optional bool variance_encoded_in_target = 7 [default = false]; - optional int32 code_type = 8 [default = 1]; - optional int32 keep_top_k = 9 [default = -1]; - optional float confidence_threshold = 10 [default = 0.0]; -} -message YoloV2DetectionOutputParameter { - optional int32 boxes = 1 [default = 5]; - optional int32 classes = 2 [default = 80]; - optional bool relative = 3 [default = true]; - optional float obj_threshold = 4 [default = 0.5]; - optional float score_threshold = 5 [default = 0.5]; - optional float iou_threshold = 6 [default = 0.45]; - optional int32 pre_nms_topn = 7 [default = 512]; - optional int32 post_nms_topn = 8 [default = 1024]; - repeated float biases = 9; - optional int32 coords = 10 [default = 4]; - optional bool resize_origin_img_to_net = 11 [default = false]; -} - -message QuantParameter { - optional float scale = 2; - optional bytes offset = 3; -} - -message BatchMatMulParameter{ - optional bool adj_x1 = 1 [default = false]; - optional bool adj_x2 = 2 [default = false]; -} - -message CondTakeParameter { - required string mode = 1; - required float val = 2; - optional float eps = 3 [default = 1e-06]; -} - -message MatrixInverseParameter { - optional bool adjoint = 1 [default = false]; -} - -message WarpPerspectiveParameter { - required int32 out_height = 1; - required int32 out_width = 2; - optional float constant = 3; - optional string border_type = 4 [default = 'BORDER_CONSTANT']; -} - -message SpatialTransformerParameter { - // How to use the parameter passed by localisation network - optional string transform_type = 1 [default = "affine"]; - // What is the sampling technique - optional string sampler_type = 2 [default = "bilinear"]; - - // If not set,stay same with the input dimension H and W - optional int32 output_H = 3; - optional int32 output_W = 4; - // If false, only compute dTheta, DO NOT compute dU - optional bool to_compute_dU = 5 [default = true]; - - // The default value for some parameters - optional double theta_1_1 = 6; - optional double theta_1_2 = 7; - optional double theta_1_3 = 8; - optional double theta_2_1 = 9; - optional double theta_2_2 = 10; - optional double theta_2_3 = 11; -} diff --git a/ge/proto/dump_task.proto b/ge/proto/dump_task.proto deleted file mode 100644 index a2411ddb..00000000 --- a/ge/proto/dump_task.proto +++ /dev/null @@ -1,113 +0,0 @@ -syntax = "proto3"; -package toolkit.dump; - -enum OutputDataType { - DT_UNDEFINED = 0; - DT_FLOAT = 1; - DT_FLOAT16 = 2; - DT_INT8 = 3; - DT_UINT8 = 4; - DT_INT16 = 5; - DT_UINT16 = 6; - DT_INT32 = 7; - DT_INT64 = 8; - DT_UINT32 = 9; - DT_UINT64 = 10; - DT_BOOL = 11; - DT_DOUBLE = 12; - DT_STRING = 13; - DT_DUAL_SUB_INT8 = 14; - DT_DUAL_SUB_UINT8 = 15; - DT_COMPLEX64 = 16; - DT_COMPLEX128 = 17; - DT_QINT8 = 18; - DT_QINT16 = 19; - DT_QINT32 = 20; - DT_QUINT8 = 21; - DT_QUINT16 = 22; - DT_RESOURCE = 23; - DT_STRING_REF = 24; - DT_DUAL = 25; - DT_VARIANT = 26; -} - -enum OutputFormat { - FORMAT_NCHW = 0; - FORMAT_NHWC = 1; - FORMAT_ND = 2; - FORMAT_NC1HWC0 = 3; - FORMAT_FRACTAL_Z = 4; - FORMAT_NC1C0HWPAD = 5; - FORMAT_NHWC1C0 = 6; - FORMAT_FSR_NCHW = 7; - FORMAT_FRACTAL_DECONV = 8; - FORMAT_C1HWNC0 = 9; - FORMAT_FRACTAL_DECONV_TRANSPOSE = 10; - FORMAT_FRACTAL_DECONV_SP_STRIDE_TRANS = 11; - FORMAT_NC1HWC0_C04 = 12; - FORMAT_FRACTAL_Z_C04 = 13; - FORMAT_CHWN = 14; - FORMAT_FRACTAL_DECONV_SP_STRIDE8_TRANS = 15; - FORMAT_HWCN = 16; - FORMAT_NC1KHKWHWC0 = 17; - FORMAT_BN_WEIGHT = 18; - FORMAT_FILTER_HWCK = 19; - FORMAT_HASHTABLE_LOOKUP_LOOKUPS=20; - FORMAT_HASHTABLE_LOOKUP_KEYS = 21; - FORMAT_HASHTABLE_LOOKUP_VALUE = 22; - FORMAT_HASHTABLE_LOOKUP_OUTPUT = 23; - FORMAT_HASHTABLE_LOOKUP_HITS=24; - FORMAT_C1HWNCoC0 = 25; - FORMAT_MD = 26; - FORMAT_NDHWC = 27; - FORMAT_FRACTAL_ZZ = 28; - FORMAT_FRACTAL_NZ = 29; - FORMAT_RESERVED = 30; -} - -message OriginalOp { - string name = 1; - uint32 output_index = 2; - OutputDataType data_type = 3; - OutputFormat format = 4; -} - -message Shape { - repeated uint64 dim = 1; -} - -message OpOutput { - OutputDataType data_type = 1; - OutputFormat format = 2; - Shape shape = 3; - OriginalOp original_op = 4; // the original op corresponding to the output - bytes data = 5; - uint64 size = 6; -} - -message OpInput { - OutputDataType data_type = 1; - OutputFormat format = 2; - Shape shape = 3; - bytes data = 4; - uint64 size = 5; -} - -enum BufferType { - L1 = 0; -} - -message OpBuffer { - BufferType buffer_type = 1; - bytes data = 2; - uint64 size = 3; -} - -message DumpData{ - string version = 1; - uint64 dump_time = 2; - repeated OpOutput output = 3; - repeated OpInput input = 4; - repeated OpBuffer buffer = 5; - string op_name = 6; -} diff --git a/ge/proto/fusion_model.proto b/ge/proto/fusion_model.proto deleted file mode 100755 index c92c5581..00000000 --- a/ge/proto/fusion_model.proto +++ /dev/null @@ -1,21 +0,0 @@ -/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Apache License for more details at - * http://www.apache.org/licenses/LICENSE-2.0 - */ -syntax = "proto3"; - -import "om.proto"; - -package domi; - -message FusionModelDef { - string version = 1; - repeated OpDef fusion_op = 2; -} \ No newline at end of file diff --git a/ge/proto/fwk_adapter.proto b/ge/proto/fwk_adapter.proto deleted file mode 100644 index 9335c926..00000000 --- a/ge/proto/fwk_adapter.proto +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Apache License for more details at - * http://www.apache.org/licenses/LICENSE-2.0 - */ -syntax = "proto3"; - -package aicpu.FWKAdapter; -option cc_enable_arenas = true; - - -// Defines an struct for input and output. -message TensorDataInfo { - - // value DataType - uint32 dtype = 1; - - // shape dim - repeated int64 dim = 2; - - // data point addr - int64 data_addr = 3; -} - -message KernelRunParam { - // input - repeated TensorDataInfo input = 1; - // output - repeated TensorDataInfo output = 2; -} - diff --git a/ge/proto/ge_api.proto b/ge/proto/ge_api.proto deleted file mode 100755 index 331c5aea..00000000 --- a/ge/proto/ge_api.proto +++ /dev/null @@ -1,88 +0,0 @@ -syntax = "proto3"; -package ge.api_pb; - -import "ge_ir.proto"; - -// GE initialize -message GEInitialize { - map options = 1; -}; - -// initialize response -message GEInitializeResponse { - uint32 status = 1; - uint32 clientId = 2; -}; - -// GE finalize -message GEFinalize { - bool final = 1; - uint32 clientId = 2; -}; - -message GEFinalizeResponse { - uint32 status = 1; -}; - -// GE Session -message CreateSession{ - map options = 1; -}; - -message CreateSessionResponse { - uint32 status = 1; - uint64 sessionId = 2; -}; - -//GE AddGraph -//model serialize :: serializegraph -message SessionAddGraph{ - uint32 graphId = 1; - uint64 sessionId = 2; - ge.proto.GraphDef graph = 3; -}; - -message SessionAddGraphResponse { - uint32 status = 1; -}; - -//GE SessionRemoveGraph -message SessionRemoveGraph{ - uint32 graphId = 1; - uint64 sessionId = 2; -}; - -message SessionRemoveGraphResponse { - uint32 status = 1; -}; - -message SessionRunGraph{ - uint32 graphId = 1; - uint64 sessionId = 2; - repeated ge.proto.TensorDef tensor = 3; -}; - -message SessionBuildGraph{ - uint32 graphId = 1; - uint64 sessionId = 2; - repeated ge.proto.TensorDef tensor = 3; - string savePath = 4; -}; - -message SessionRunGraphResponse { - uint32 status = 1; - repeated ge.proto.TensorDef tensor = 2; -}; - -message SessionBuildGraphResponse { - uint32 status = 1; -}; - -message DestroySession{ - bool final = 1; - uint64 sessionId = 2; -}; - -message DestroySessionResponse { - uint32 status = 1; -}; diff --git a/ge/proto/ge_ir.proto b/ge/proto/ge_ir.proto deleted file mode 100644 index c0ef3071..00000000 --- a/ge/proto/ge_ir.proto +++ /dev/null @@ -1,193 +0,0 @@ -syntax = "proto3"; - -package ge.proto; - -enum DataType -{ - DT_UNDEFINED = 0; // Used to indicate a DataType field has not been set. - DT_FLOAT = 1; // float type - DT_FLOAT16 = 2; // fp16 type - DT_INT8 = 3; // int8 type - DT_UINT8 = 4; // uint8 type - DT_INT16 = 5; // int16 type - DT_UINT16 = 6; // uint16 type - DT_INT32 = 7; // - DT_INT64 = 8; // int64 type - DT_UINT32 = 9; // unsigned int32 - DT_UINT64 = 10; // unsigned int64 - DT_BOOL = 11; // bool type - DT_DOUBLE = 12; // double type - DT_STRING = 13; // string type - DT_DUAL_SUB_INT8 = 14; /**< dual output int8 type */ - DT_DUAL_SUB_UINT8 = 15; /**< dual output uint8 type */ - DT_COMPLEX64 = 16; // complex64 type - DT_COMPLEX128 = 17; // complex128 type - DT_QINT8 = 18; // qint8 type - DT_QINT16 = 19; // qint16 type - DT_QINT32 = 20; // qint32 type - DT_QUINT8 = 21; // quint8 type - DT_QUINT16 = 22; // quint16 type - DT_RESOURCE = 23; // resource type - DT_STRING_REF = 24; // string_ref type - DT_DUAL = 25; /**< dual output type */ - DT_VARIANT = 26; // variant type - DT_BF16 = 27; // bf16 type - DT_INT4 = 28; // int4 type -} - -message AttrDef -{ - message ListValue - { - enum ListValueType{ - VT_LIST_NONE = 0; - VT_LIST_STRING = 1; - VT_LIST_INT = 2; - VT_LIST_FLOAT = 3; - VT_LIST_BOOL = 4; - VT_LIST_BYTES = 5; - VT_LIST_TENSOR_DESC = 6; - VT_LIST_TENSOR = 7; - VT_LIST_GRAPH = 8; - VT_LIST_NAMED_ATTRS = 9; - VT_LIST_DATA_TYPE = 10; - } - repeated bytes s = 2; // "list(string)" - repeated int64 i = 3; // "list(int)" - repeated float f = 4; // "list(float)" - repeated bool b = 5; // "list(bool)" - repeated bytes bt = 7; - repeated TensorDescriptor td = 8; - repeated TensorDef t = 9; - repeated GraphDef g = 10; - repeated NamedAttrs na = 11; - repeated int64 dt = 12; // list ge::DataType - - ListValueType val_type = 20; - } - - message ListListInt{ - message ListInt{ - repeated int64 list_i = 1; // list int - } - repeated ListInt list_list_i = 1; // list list int - } - - oneof value - { - bytes s = 2; // "string" - int64 i = 3; // "int" - float f = 4; // "float" - bool b = 5; // "bool" - bytes bt = 7; - ListValue list = 1; // any "list(...)" - NamedAttrs func = 10; // Used to support attr nesting - TensorDescriptor td = 11; // GeTensorDesc type - TensorDef t = 12; // GeTensor type - GraphDef g = 13; // Graph type - ListListInt list_list_int = 14; // List List Int type - int64 dt = 15; // ge::DataType - } -} - -// A list of attr names and their values. The whole list is attached -// with a string name. E.g., MatMul[T=float]. -message NamedAttrs -{ - string name = 1; - map attr = 2; -} - -// Shape / dimension description, using row-major order -message ShapeDef -{ - repeated int64 dim = 1; // Size of each dimension -} - -// Multidimensional data description -message TensorDescriptor -{ - string name = 1; // Optional parameter, tensor name - - DataType dtype = 2; // tensor datatype - ShapeDef shape = 3; // Shape / dimension - string layout = 4; // Tensor format, eg: "NCHW", "NHWC", "CHW", "ND" - - bool has_out_attr = 9; - int64 size = 10; - int64 weight_size = 11; - bool reuse_input = 12; - bool output_tensor = 13; - string device_type = 14; - bool input_tensor =15; - int64 real_dim_cnt = 16; - int64 reuse_input_index = 17; - int64 data_offset = 18; - int64 cmps_size = 19; - string cmps_tab = 20; - int64 cmps_tab_offset = 21; - - map attr = 5; // Set of extra parameter fields -} - -// GeTensor definition -message TensorDef -{ - TensorDescriptor desc = 1; // Tensor description - bytes data = 2; // Tensor data -} - - -// Operator description -message OpDef -{ - string name = 1; // name - string type = 2; // type - - repeated string input = 5; // input original op name + outgoing index. op_name:index - - map attr = 10; // Set of operator parameter fields - - bool has_out_attr = 20; - int64 id = 21; - int64 stream_id =22; - repeated string input_name = 23; - repeated string src_name = 24; - repeated int64 src_index = 25; - repeated string dst_name = 26; - repeated int64 dst_index = 27; - repeated int64 input_i = 28; - repeated int64 output_i = 29; - repeated int64 workspace = 30; - repeated int64 workspace_bytes = 31; - repeated bool is_input_const = 32; - repeated TensorDescriptor input_desc = 33; - repeated TensorDescriptor output_desc = 34; - repeated string subgraph_name = 35; -} - -// Graph definition -message GraphDef -{ - string name = 1; // name - - repeated string input = 4; // Graph input - repeated string output = 5; // Graph output - - repeated OpDef op = 6; // List of operators - - map attr = 11; // Extended field -} - -// model definition -message ModelDef -{ - string name = 1; // name - uint32 version = 2; // IR Proto verion - string custom_version = 3; // User model version number, passed in by user - - repeated GraphDef graph = 7; // Graph definition,graph[0] represents the main diagram in modeldef - - map attr = 11; // Extended field -} - diff --git a/ge/proto/insert_op.proto b/ge/proto/insert_op.proto deleted file mode 100644 index 7d708865..00000000 --- a/ge/proto/insert_op.proto +++ /dev/null @@ -1,140 +0,0 @@ -syntax = "proto3"; - -package domi; - -message InsertNewOps { - repeated AippOpParams aipp_op = 1; - repeated MultiShapeOpParams multi_shape_op = 2; -} - -message AippOpParams { - enum InputFormat { - UNDEFINED = 0; - YUV420SP_U8 = 1; - XRGB8888_U8 = 2; - RGB888_U8 = 3; - YUV400_U8 = 4; - NC1HWC0DI_FP16 = 5; - NC1HWC0DI_S8 = 6; - ARGB8888_U8 = 7; - YUYV_U8 = 8; - YUV422SP_U8 = 9; - AYUV444_U8 = 10; - RAW10 = 11; - RAW12 = 12; - RAW16 = 13; - RAW24 = 14; - RGB16 = 15; - RGB20 = 16; - RGB24 = 17; - RGB8_IR = 18; - RGB16_IR = 19; - RGB24_IR = 20; - } - - enum AippMode { - undefined = 0; - static = 1; - dynamic = 2; - } - - // AIPPģʽ־̬AIPPͶ̬AIPP - AippMode aipp_mode = 1; - - // related_input_rankΪΪͣ÷Χ>=0, <=DataӵĸĬֵΪ0 - // ʶģ͵ĵڼAIPPģ룬ҪԵ2AIPPrelated_input_rankΪ1 - uint32 related_input_rank = 2; - - // related_input_name is optional and the top name of data node which inserts aipp - string related_input_name = 6; - - // input_edge_idxΪѡΪͣ÷ΧΪ>=0 - // øòãڶDataӲͬͬAIPPòûãĬ϶related_input_rankָģAIPP - // ֵ <= Dataߵĸ - repeated uint32 input_edge_idx = 3; - - // [Begin] ̬AIPPþ̬AIPPʱЧ - uint32 max_src_image_size = 4; - - // Ƿ֧תĬϲ֧֣֧תʱжĿռʧ - bool support_rotation = 5; - - // [End] ̬AIPP - - - // [Begin] ̬AIPPö̬AIPPʱЧ - InputFormat input_format = 51; - bool csc_switch = 52; - float cpadding_value = 53; - bool rbuv_swap_switch = 54; - bool ax_swap_switch = 55; - bool single_line_mode = 56; - - int32 src_image_size_w = 57; - int32 src_image_size_h = 58; - - bool crop = 59; - int32 load_start_pos_w = 60; - int32 load_start_pos_h = 61; - int32 crop_size_w = 62; - int32 crop_size_h = 63; - - bool resize = 64; - int32 resize_output_w = 65; - int32 resize_output_h = 66; - - bool padding = 67; - int32 left_padding_size = 68; - int32 right_padding_size = 69; - int32 top_padding_size = 70; - int32 bottom_padding_size = 71; - float padding_value = 72; - - int32 mean_chn_0 = 10; - int32 mean_chn_1 = 11; - int32 mean_chn_2 = 12; - int32 mean_chn_3 = 19; - float min_chn_0 = 13; - float min_chn_1 = 14; - float min_chn_2 = 15; - float min_chn_3 = 20; - repeated float var_reci_chn_0 = 16; - repeated float var_reci_chn_1 = 17; - repeated float var_reci_chn_2 = 18; - repeated float var_reci_chn_3 = 21; - - repeated int32 matrix_r0c0 = 30; - repeated int32 matrix_r0c1 = 31; - repeated int32 matrix_r0c2 = 32; - repeated int32 matrix_r1c0 = 33; - repeated int32 matrix_r1c1 = 34; - repeated int32 matrix_r1c2 = 35; - repeated int32 matrix_r2c0 = 36; - repeated int32 matrix_r2c1 = 37; - repeated int32 matrix_r2c2 = 38; - repeated int32 output_bias_0 = 39; - repeated int32 output_bias_1 = 40; - repeated int32 output_bias_2 = 41; - repeated int32 input_bias_0 = 42; - repeated int32 input_bias_1 = 43; - repeated int32 input_bias_2 = 44; - - // [End] ̬AIPP - - // The n number that is used for raw/rgbir data into f16 transformation. - // The transformation equation is x/(2^n). If set to 0, no transform is performed. - uint32 raw_rgbir_to_f16_n = 45; -} - -message MultiShapeOpParams { - enum MultiShapeMode { - batch = 0; //̬batch - resolution = 1; //ֱ̬ʣչ - } - - MultiShapeMode mode = 1; //ģʽ - uint32 related_input_rank = 2; //Ӳ뵽ĸ - - - repeated uint32 batch_list = 11; //batch_listֵbatch_listĸ28֮ -} diff --git a/ge/proto/om.proto b/ge/proto/om.proto deleted file mode 100644 index e15e5f80..00000000 --- a/ge/proto/om.proto +++ /dev/null @@ -1,396 +0,0 @@ -/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Apache License for more details at - * http://www.apache.org/licenses/LICENSE-2.0 - */ -syntax = "proto3"; - -package domi; - -enum TargetType -{ - MINI = 0; - TINY = 1; - LITE = 2; -} - -// offline model -message ModelDef { - string name = 1; - uint32 version = 2; - - uint64 memory_size = 10; - uint32 stream_num = 11; - uint32 event_num = 12; - uint64 weight_size = 13; - uint32 label_num = 15; - repeated OpDef op = 20; - TargetType target_type = 23; - - map attr = 30; -}; - -// operator define -message OpDef { - string name = 1; - string type = 2; - - uint32 id = 3; - uint32 stream_id = 4; - - repeated string input_name = 5; - - repeated string src_name = 8; - repeated int32 src_index = 9; - repeated int64 input = 10; - repeated int64 output = 11; - repeated TensorDescriptor input_desc = 12; - repeated TensorDescriptor output_desc = 13; - repeated WeightDef weights = 14; - repeated string dst_name = 15; - repeated int32 dst_index = 16; - - repeated int64 workspace = 20; - repeated uint32 workspace_bytes = 21; - - repeated string weight_name = 22; - repeated bool is_input_const = 23; - - map attr = 30; - - QuantizeFactorParams quantize_factor = 31; - - oneof op_params { - // start at 100 here - SendOpParams sender_param = 100; - RecvOpParams receiver_param = 200; - ConvolutionOpParams convolution_param = 300; - PoolingOpParams pooling_param = 400; - EltwiseOpParams eltwise_param = 500; - BatchNormOpParams batchnorm_param = 600; - ScaleOpParams scale_param = 700; - FullConnectionOpParams full_connection_param = 800; - SoftmaxOpParams softmax_param = 900; - ActivationOpParams activation_param = 1000; - ReshapeOpParams reshape_param = 1100; - } -}; - -message SendOpParams { - uint32 event_id = 1; -}; - -message RecvOpParams { - uint32 event_id = 1; -}; - -enum QuantizeScaleType -{ - VECTOR_SCALE = 0; - SCALAR_SCALE = 1; -} - -enum QuantizeScaleMode -{ - NORMAL_MODE = 0; - SQRT_MODE = 1; -} - -enum QuantizeAlgorithm -{ - NON_OFFSET_ALGO = 0; - HALF_OFFSET_ALGO = 1; - ALL_OFFSET_ALGO = 2; -} -message QuantizeFactor -{ - QuantizeScaleMode scale_mode = 1; - bytes scale_value = 2; - int64 scale_offset = 3; - bytes offset_data_value = 4; - int64 offset_data_offset = 5; - bytes offset_weight_value = 6; - int64 offset_weight_offset = 7; - bytes offset_pad_value = 8; - int64 offset_pad_offset = 9; -}; - -message QuantizeCalcFactor -{ - bytes offsetw = 1; - int64 offsetw_offset = 2; - bytes offsetd = 3; - int64 offsetd_offset = 4; - bytes scalereq = 5; - int64 scaledreq_offset = 6; - bytes offsetdnext = 7; - int64 offsetdnext_offset = 8; -} - -message QuantizeFactorParams -{ - QuantizeAlgorithm quantize_algo = 1; - QuantizeScaleType scale_type = 2; - QuantizeFactor quantize_param = 3; - QuantizeFactor dequantize_param = 4; - QuantizeFactor requantize_param = 5; - QuantizeCalcFactor quantizecalc_param = 6; -}; - -message ConvolutionOpParams { - int32 mode = 1; - int32 algo = 2; - int32 pad_mode = 3; - uint32 group = 4; - uint32 num_output = 5; - - repeated uint32 pad = 10; - repeated uint32 stride = 11; - repeated uint32 dilation = 12; - repeated uint32 kernel = 13; - - float alpha = 20; - float beta = 21; - - WeightDef filter = 40; - WeightDef bias = 41; - - bool relu_flag = 62; - repeated uint32 adj = 70; - repeated uint32 target_shape = 71; - repeated uint32 before_pad = 72; -}; - -message PoolingOpParams { - int32 mode = 1; - int32 nan_opt = 2; - int32 pad_mode = 3; - bool global_pooling = 4; - - repeated uint32 window = 10; - repeated uint32 pad = 11; - repeated uint32 stride = 12; - bool ceil_mode = 13; - int32 data_mode = 14; - - float alpha = 20; - float beta = 21; - repeated uint32 before_pad = 22; -}; - -message EltwiseOpParams { - int32 mode = 1; - repeated float coeff = 2; - float alpha = 3; - float beta = 4; - repeated WeightDef weight = 5; - bool relu_flag = 6; -}; - -message ActivationOpParams { - int32 mode = 1; - float coef = 2; - float alpha = 3; - float beta = 4; -}; - -message BatchNormOpParams { - int32 mode = 1; - - float alpha = 2; - float beta = 3; - double epsilon = 4;//optinal,[default = 1e-5] - bool use_global_stats = 5; //optinal,by default true,testing mode - float moving_average_fraction = 6; //optinal,[default = .999]; - - WeightDef estimated_mean = 7; - WeightDef estimated_variance = 8; - - WeightDef scale = 9; - WeightDef bias = 10; -}; - -message ScaleOpParams { - WeightDef scale = 1; - WeightDef bias = 2; -}; - -message ReshapeOpParams { - float alpha = 1; - float beta = 2; - ShapeDef shape = 3; - int32 axis = 4; - int32 num_axes = 5; - int32 format = 6; -}; - -message SoftmaxOpParams { - int32 algo = 1; - int32 mode = 2; - float alpha = 3; - float beta = 4; -}; - -message FullConnectionOpParams { - WeightDef filter = 1; - WeightDef bias = 2; - uint32 num_output = 3; - bool relu_flag = 12; -}; - -message FlattenOpParams { - float alpha = 1; - float beta = 2; - int32 start_axis = 3; - int32 end_axis = 4; -} - -message AddLimitedOpParams { - float alpha = 1; - float beta = 2; - int32 axis = 3; - bool broadcast = 4; - - repeated WeightDef weight = 10; -}; - -message MulLimitedOpParams { - float alpha = 1; - float beta = 2; - int32 axis = 3; - bool broadcast = 4; - - repeated WeightDef weight = 10; -}; - -message AddOpParams { - float alpha = 1; - float beta = 2; - - repeated WeightDef weight = 10; -}; - -message MulOpParams { - float alpha = 1; - float beta = 2; - - repeated WeightDef weight = 10; -}; - -message SubOpParams { - float alpha = 1; - float beta = 2; - - repeated WeightDef weight = 10; -}; - -message BiasAddOpParams { - float alpha = 1; - float beta = 2; - - WeightDef bias = 10; -}; - -message MatMulOpParams { - float alpha = 1; - float beta = 2; - bool transposeX = 3; - bool transposeW = 4; - - WeightDef filter = 10; - WeightDef bias = 12; -}; - -message RsqrtOpParams { - float alpha = 1; - float beta = 2; -}; - - -message WeightDef { - int32 format = 1; - int32 data_type = 2; - ShapeDef shape = 3; - bytes data = 4; - int64 data_offset = 5; - uint32 cmps_size = 6; - bytes cmps_tab = 7; - int64 cmps_tab_offset = 10; - CompressInfo cmps_info = 8; - AllOffsetQuantizeInfo alloffset_quantize_info = 11; -} - -message ShapeDef { - repeated int64 dim = 1; -} - -enum DeviceType { - NPU = 0; // In default, we will use NPU. - CPU = 1; // CPU -} - -message AllOffsetQuantizeInfo { - float scale = 1; - int32 offset = 2; -} - -message TensorDescriptor { - int32 format = 1; - int32 data_type = 2; - repeated int64 dim = 3; - uint32 size = 4; - bool reuse_input = 5; - bool output_tensor = 7; - DeviceType device_type = 8; - bool input_tensor = 9; - uint32 real_dim_cnt = 10; - uint32 reuse_input_index = 11; - AllOffsetQuantizeInfo alloffset_quantize_info = 12; -} - -message CompressInfo { - int32 blockRow = 1; // block row - int32 blockCol = 2; // block col - int32 fractalK = 3; // fractal K - int32 fractalN = 4; // fractal N - int32 lastFractalK = 5; // K of last fractal - int32 lastFractalN = 6; // N of last fractal - int32 cubeSize = 7; // cube's length - int32 loadDir = 8; // data load directtiono 0:col load 1:row load -} - -message AttrDef { - message ListValue { - repeated string s = 2; // "list(string)" - repeated int64 i = 3 [packed = true]; // "list(int)" - repeated float f = 4 [packed = true]; // "list(float)" - repeated bool b = 5 [packed = true]; // "list(bool)" - repeated uint32 u = 6 [packed = true]; // "list(uint)" - repeated bytes bt = 7; - } - - oneof value { - string s = 2; // "string" - int64 i = 3; // "int" - float f = 4; // "float" - bool b = 5; // "bool" - uint32 u = 6; // "uint32" - bytes bt = 7; - ListValue list = 1; // any "list(...)" - NamedAttrs func = 10; - } -} - -// A list of attr names and their values. The whole list is attached -// with a string name. E.g., MatMul[T=float]. -message NamedAttrs { - string name = 1; - map attr = 2; -} - diff --git a/ge/proto/op_mapping.proto b/ge/proto/op_mapping.proto deleted file mode 100644 index d626eb49..00000000 --- a/ge/proto/op_mapping.proto +++ /dev/null @@ -1,75 +0,0 @@ -syntax = "proto3"; -package toolkit.aicpu.dump; - -message Shape { - repeated uint64 dim = 1; -} - -message Output { - int32 data_type = 1; - int32 format = 2; - Shape shape = 3; - uint64 address = 4; - string original_name = 5; - int32 original_output_index = 6; - int32 original_output_data_type = 7; - int32 original_output_format = 8; - uint64 size = 9; - Shape origin_shape = 10; -} - -message Input { - int32 data_type =1; - int32 format = 2; - Shape shape = 3; - uint64 address = 4; - uint64 size = 5; - Shape origin_shape = 6; -} - -enum BufferType { - L1 = 0; -} - -message OpBuffer { - BufferType buffer_type = 1; - uint64 address = 2; - uint64 size = 3; -} - -message Op { - string op_name = 1; - string op_type = 2; -} - -message Task { - uint32 task_id = 1; - uint32 stream_id = 2; - Op op = 3; - repeated Output output = 4; - bool end_graph = 5; - repeated Input input = 6; - repeated OpBuffer buffer = 7; -} - -message OpMappingInfo { - string dump_path = 1; - oneof model_name_param { - string model_name = 2; - } - oneof model_id_param { - uint32 model_id = 3; - } - oneof step_id { - uint64 step_id_addr = 4; - } - oneof iterations_per_loop { - uint64 iterations_per_loop_addr = 5; - } - oneof loop_cond { - uint64 loop_cond_addr = 6; - } - uint32 flag = 7; // 0x01 load, 0x00 unload - repeated Task task = 8; - string dump_step = 9; -} \ No newline at end of file diff --git a/ge/proto/optimizer_priority.proto b/ge/proto/optimizer_priority.proto deleted file mode 100644 index 769619cf..00000000 --- a/ge/proto/optimizer_priority.proto +++ /dev/null @@ -1,7 +0,0 @@ -syntax = "proto3"; -package ge.optimizers; - -// Default: GE>FE>AICPU -message Priority{ - repeated string optimizer = 1; -} \ No newline at end of file diff --git a/ge/proto/task.proto b/ge/proto/task.proto deleted file mode 100644 index 0da5631e..00000000 --- a/ge/proto/task.proto +++ /dev/null @@ -1,179 +0,0 @@ -/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Apache License for more details at - * http://www.apache.org/licenses/LICENSE-2.0 - */ -syntax = "proto3"; - -package domi; - -message ModelTaskDef { - string version = 1; - - map attr = 9; // Extended field - repeated TaskDef task = 10; - - uint64 memory_size = 11; - uint32 stream_num = 12; - uint32 event_num = 13; - uint64 weight_size = 14; - - repeated bytes op = 15; // input/output opdef in bytes - - uint64 base_addr = 16; // base addr - uint64 weight_addr = 17; // weight addr - uint32 batch_num = 18; -} - - -message TaskDef { - uint32 id = 1; - uint32 type = 2; - - uint32 stream_id = 10; - uint32 event_id = 11; - - KernelDef kernel = 20; - KernelExDef kernel_ex = 21; - KernelHcclDef kernel_hccl = 25; - EventExDef event_ex = 26; - LogTimeStampDef log_timestamp = 28; - - uint32 label_id = 30; - - MemcpyAsyncDef memcpy_async = 31; - StreamSwitchDef stream_switch = 32; - StreamActiveDef stream_active = 33; - bytes private_def = 34; - uint64 ops_kernel_store_ptr = 35; // adjustments to other fields in the future - StreamSwitchNDef stream_switch_n = 36; - - LabelSetDef label_set = 37; - LabelGotoExDef label_goto_ex = 38; - LabelSwitchByIndexDef label_switch_by_index = 39; - KernelDefWithHandle kernel_with_handle = 40; -} - -message KernelDef { - KernelContext context = 1; - - string stub_func = 10; - uint32 block_dim = 11; - uint32 args_size = 12; - bytes args = 13; - bytes sm_desc = 14; - bytes flowtable = 15; - string so_name = 16; - string kernel_name = 17; - bytes kernel_ext_info = 18; - uint32 kernel_ext_info_size = 19; -} - -message KernelDefWithHandle { - KernelContext context = 1; - - uint64 handle = 10; - string dev_func = 11; - uint32 block_dim = 12; - uint32 args_size = 13; - bytes args = 14; - bytes sm_desc = 15; - string original_kernel_key = 16; - string node_info = 17; -} - -message KernelContext { - uint32 kernel_type = 1; - uint32 op_id = 2; // OP type in CCE - uint32 kernel_func_id = 3; - uint32 op_index = 4; // TE/Custom operator - bool is_flowtable = 5; // Identify whether args is a flowtable structure - bytes args_offset = 6; // args offset information - uint32 args_count = 7; // args count - repeated uint32 origin_op_index = 8; -} - - -message KernelExDef { - uint32 flags = 1; - - uint32 op_index = 4; - uint32 args_size = 12; - bytes args = 13; - bytes task_info = 14; // serialized nodeDef, funcDef, inputoutput - uint32 task_info_size = 15; - bytes kernel_ext_info = 16; - uint32 kernel_ext_info_size = 17; -} - - -message KernelHcclDef { - uint32 op_index = 8; - string hccl_type = 9; -} - - -message EventExDef { - uint32 op_index = 1; - uint32 event_type = 2; -} - -message LogTimeStampDef { - uint64 logid = 1; - bool notify = 2; - uint32 flat = 3; -} - -message MemcpyAsyncDef { - uint64 dst = 1; - uint64 dst_max = 2; - uint64 src = 3; - uint64 count = 4; - uint32 kind = 5; - uint32 op_index = 6; -} - -message StreamSwitchDef { - uint32 op_index = 1; - uint32 true_stream_id = 2; - int64 value = 3; - uint64 value_ptr = 4; - uint32 data_type = 5; -} - -message StreamActiveDef { - uint32 op_index = 1; - uint32 active_stream_id = 2; -} - -message StreamSwitchNDef { - uint32 op_index = 1; - uint32 size = 2; - repeated int64 target_value = 3; - repeated uint32 true_stream_id = 4; - uint32 element_size = 5; - uint32 data_type = 6; -} - -message LabelSetDef { - uint32 op_index = 1; - uint32 label_id = 2; - uint32 model_id = 3; -} - -message LabelGotoExDef { - uint32 op_index = 1; - uint32 label_id = 2; - uint32 model_id = 3; -} - -message LabelSwitchByIndexDef { - uint32 op_index = 1; - uint32 label_max = 2; -} diff --git a/ge/proto/tensorflow/attr_value.proto b/ge/proto/tensorflow/attr_value.proto deleted file mode 100644 index 438d7163..00000000 --- a/ge/proto/tensorflow/attr_value.proto +++ /dev/null @@ -1,70 +0,0 @@ -/** - * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow - * - * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "AttrValueProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "tensor.proto"; -import "tensor_shape.proto"; -import "types.proto"; - -// Protocol buffer representing the value for an attr used to configure an Op. -// Comment indicates the corresponding attr type. Only the field matching the -// attr type may be filled. -message AttrValue { - // LINT.IfChange - message ListValue { - repeated bytes s = 2; // "list(string)" - repeated int64 i = 3 [packed = true]; // "list(int)" - repeated float f = 4 [packed = true]; // "list(float)" - repeated bool b = 5 [packed = true]; // "list(bool)" - repeated DataType type = 6 [packed = true]; // "list(type)" - repeated TensorShapeProto shape = 7; // "list(shape)" - repeated TensorProto tensor = 8; // "list(tensor)" - repeated NameAttrList func = 9; // "list(attr)" - } - // LINT.ThenChange(https://www.tensorflow.org/code/tensorflow/c/c_api.cc) - - oneof value { - bytes s = 2; // "string" - int64 i = 3; // "int" - float f = 4; // "float" - bool b = 5; // "bool" - DataType type = 6; // "type" - TensorShapeProto shape = 7; // "shape" - TensorProto tensor = 8; // "tensor" - ListValue list = 1; // any "list(...)" - - // "func" represents a function. func.name is a function's name or - // a primitive op's name. func.attr.first is the name of an attr - // defined for that function. func.attr.second is the value for - // that attr in the instantiation. - NameAttrList func = 10; - - // This is a placeholder only used in nodes defined inside a - // function. It indicates the attr value will be supplied when - // the function is instantiated. For example, let us suppose a - // node "N" in function "FN". "N" has an attr "A" with value - // placeholder = "foo". When FN is instantiated with attr "foo" - // set to "bar", the instantiated node N's attr A will have been - // given the value "bar". - string placeholder = 9; - } -} - -// A list of attr names and their values. The whole list is attached -// with a string name. E.g., MatMul[T=float]. -message NameAttrList { - string name = 1; - map attr = 2; -} diff --git a/ge/proto/tensorflow/function.proto b/ge/proto/tensorflow/function.proto deleted file mode 100644 index 44681e32..00000000 --- a/ge/proto/tensorflow/function.proto +++ /dev/null @@ -1,108 +0,0 @@ -/** - * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow - * - * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "FunctionProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "attr_value.proto"; -import "node_def.proto"; -import "op_def.proto"; - -// A library is a set of named functions. -message FunctionDefLibrary { - repeated FunctionDef function = 1; - repeated GradientDef gradient = 2; -} - -// A function can be instantiated when the runtime can bind every attr -// with a value. When a GraphDef has a call to a function, it must -// have binding for every attr defined in the signature. -// * device spec, etc. -message FunctionDef { - // The definition of the function's name, arguments, return values, - // attrs etc. - OpDef signature = 1; - - // Attributes specific to this function definition. - map attr = 5; - - // NOTE: field id 2 deleted on Jan 11, 2017, GraphDef version 21. - reserved 2; - - // In both of the following fields, there is the need to specify an - // output that is used as either the input to another node (in - // `node_def`) or as a return value of the function (in `ret`). - // Unlike the NodeDefs in GraphDef, we need to be able to specify a - // list in some cases (instead of just single outputs). Also, we - // need to be able to deal with lists of unknown length (so the - // output index may not be known at function definition time). So - // we use the following format instead: - // * "fun_in" where "fun_in" is the name of a function input arg in - // the `signature` field above. This represents that input, whether - // it is a single tensor or a list. - // * "fun_in:0" gives the first element of a function input arg (a - // non-list input is considered a list of length 1 for these - // purposes). - // * "node:out" where "node" is the name of a node in `node_def` and - // "out" is the name one of its op's output arguments (the name - // comes from the OpDef of the node's op). This represents that - // node's output, whether it is a single tensor or a list. - // Note: We enforce that an op's output arguments are never - // renamed in the backwards-compatibility test. - // * "node:out:0" gives the first element of a node output arg (a - // non-list output is considered a list of length 1 for these - // purposes). - // - // NOT CURRENTLY SUPPORTED (but may be in the future): - // * "node:out:-1" gives last element in a node output list - // * "node:out:1:" gives a list with all but the first element in a - // node output list - // * "node:out::-1" gives a list with all but the last element in a - // node output list - - // The body of the function. Unlike the NodeDefs in a GraphDef, attrs - // may have values of type `placeholder` and the `input` field uses - // the "output" format above. - - // By convention, "op" in node_def is resolved by consulting with a - // user-defined library first. If not resolved, "func" is assumed to - // be a builtin op. - repeated NodeDef node_def = 3; - - // A mapping from the output arg names from `signature` to the - // outputs from `node_def` that should be returned by the function. - map ret = 4; -} - -// GradientDef defines the gradient function of a function defined in -// a function library. -// -// A gradient function g (specified by gradient_func) for a function f -// (specified by function_name) must follow the following: -// -// The function 'f' must be a numerical function which takes N inputs -// and produces M outputs. Its gradient function 'g', which is a -// function taking N + M inputs and produces N outputs. -// -// I.e. if we have -// (y1, y2, ..., y_M) = f(x1, x2, ..., x_N), -// then, g is -// (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N, -// dL/dy1, dL/dy2, ..., dL/dy_M), -// where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the -// loss function). dL/dx_i is the partial derivative of L with respect -// to x_i. -message GradientDef { - string function_name = 1; // The function name. - string gradient_func = 2; // The gradient function's name. -} diff --git a/ge/proto/tensorflow/graph.proto b/ge/proto/tensorflow/graph.proto deleted file mode 100644 index 73bfc6ee..00000000 --- a/ge/proto/tensorflow/graph.proto +++ /dev/null @@ -1,64 +0,0 @@ -/** - * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow - * - * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "GraphProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "node_def.proto"; -import "function.proto"; -import "versions.proto"; - -// Represents the graph of operations -message GraphDef { - repeated NodeDef node = 1; - - // Compatibility versions of the graph. See core/public/version.h for version - // history. The GraphDef version is distinct from the TensorFlow version, and - // each release of TensorFlow will support a range of GraphDef versions. - VersionDef versions = 4; - - // Deprecated single version field; use versions above instead. Since all - // GraphDef changes before "versions" was introduced were forward - // compatible, this field is entirely ignored. - int32 version = 3 [deprecated = true]; - - // EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET. - // - // "library" provides user-defined functions. - // - // Naming: - // * library.function.name are in a flat namespace. - // NOTE: We may need to change it to be hierarchical to support - // different orgs. E.g., - // { "/google/nn", { ... }}, - // { "/google/vision", { ... }} - // { "/org_foo/module_bar", { ... }} - // map named_lib; - // * If node[i].op is the name of one function in "library", - // node[i] is deemed as a function call. Otherwise, node[i].op - // must be a primitive operation supported by the runtime. - // - // - // Function call semantics: - // - // * The callee may start execution as soon as some of its inputs - // are ready. The caller may want to use Tuple() mechanism to - // ensure all inputs are ready in the same time. - // - // * The consumer of return values may start executing as soon as - // the return values the consumer depends on are ready. The - // consumer may want to use Tuple() mechanism to ensure the - // consumer does not start until all return values of the callee - // function are ready. - FunctionDefLibrary library = 2; -}; diff --git a/ge/proto/tensorflow/graph_library.proto b/ge/proto/tensorflow/graph_library.proto deleted file mode 100644 index 7bca0838..00000000 --- a/ge/proto/tensorflow/graph_library.proto +++ /dev/null @@ -1,22 +0,0 @@ -/** - * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow - * - * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -syntax = "proto3"; - -package domi.tensorflow; - -import "graph.proto"; - -message GeGraphDef { - string name = 1; - GraphDef graph = 2; -} - -message GraphDefLibrary { - repeated GeGraphDef graph_def = 1; -}; \ No newline at end of file diff --git a/ge/proto/tensorflow/node_def.proto b/ge/proto/tensorflow/node_def.proto deleted file mode 100644 index 50cf5cac..00000000 --- a/ge/proto/tensorflow/node_def.proto +++ /dev/null @@ -1,71 +0,0 @@ -/** - * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow - * - * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "NodeProto"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "attr_value.proto"; - -message NodeDef { - // The name given to this operator. Used for naming inputs, - // logging, visualization, etc. Unique within a single GraphDef. - // Must match the regexp "[A-Za-z0-9.][A-Za-z0-9_./]*". - string name = 1; - - // The operation name. There may be custom parameters in attrs. - // Op names starting with an underscore are reserved for internal use. - string op = 2; - - // Each input is "node:src_output" with "node" being a string name and - // "src_output" indicating which output tensor to use from "node". If - // "src_output" is 0 the ":0" suffix can be omitted. Regular inputs - // may optionally be followed by control inputs that have the format - // "^node". - repeated string input = 3; - - // A (possibly partial) specification for the device on which this - // node should be placed. - // The expected syntax for this string is as follows: - // - // DEVICE_SPEC ::= PARTIAL_SPEC - // - // PARTIAL_SPEC ::= ("/" CONSTRAINT) * - // CONSTRAINT ::= ("job:" JOB_NAME) - // | ("replica:" [1-9][0-9]*) - // | ("task:" [1-9][0-9]*) - // | ("device:" [A-Za-z]* ":" ([1-9][0-9]* | "*") ) - // - // Valid values for this string include: - // * "/job:worker/replica:0/task:1/device:GPU:3" (full specification) - // * "/job:worker/device:GPU:3" (partial specification) - // * "" (no specification) - // - // If the constraints do not resolve to a single device (or if this - // field is empty or not present), the runtime will attempt to - // choose a device automatically. - string device = 4; - - // Operation-specific graph-construction-time configuration. - // Note that this should include all attrs defined in the - // corresponding OpDef, including those with a value matching - // the default -- this allows the default to change and makes - // NodeDefs easier to interpret on their own. However, if - // an attr with a default is not specified in this list, the - // default will be used. - // The "names" (keys) must match the regexp "[a-z][a-z0-9_]+" (and - // one of the names from the corresponding OpDef's attr field). - // The values must have a type matching the corresponding OpDef - // attr's type field. - // Add some examples here showing best practices. - map attr = 5; -}; diff --git a/ge/proto/tensorflow/op_def.proto b/ge/proto/tensorflow/op_def.proto deleted file mode 100644 index 7f0e8ce2..00000000 --- a/ge/proto/tensorflow/op_def.proto +++ /dev/null @@ -1,172 +0,0 @@ -/** - * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow - * - * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "OpDefProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "attr_value.proto"; -import "types.proto"; - -// Defines an operation. A NodeDef in a GraphDef specifies an Op by -// using the "op" field which should match the name of a OpDef. -// LINT.IfChange -message OpDef { - // Op names starting with an underscore are reserved for internal use. - // Names should be CamelCase and match the regexp "[A-Z][a-zA-Z0-9_]*". - string name = 1; - - // For describing inputs and outputs. - message ArgDef { - // Name for the input/output. Should match the regexp "[a-z][a-z0-9_]*". - string name = 1; - - // Human readable description. - string description = 2; - - // Describes the type of one or more tensors that are accepted/produced - // by this input/output arg. The only legal combinations are: - // * For a single tensor: either the "type" field is set or the - // "type_attr" field is set to the name of an attr with type "type". - // * For a sequence of tensors with the same type: the "number_attr" - // field will be set to the name of an attr with type "int", and - // either the "type" or "type_attr" field will be set as for - // single tensors. - // * For a sequence of tensors, the "type_list_attr" field will be set - // to the name of an attr with type "list(type)". - DataType type = 3; - string type_attr = 4; // if specified, attr must have type "type" - string number_attr = 5; // if specified, attr must have type "int" - // If specified, attr must have type "list(type)", and none of - // type, type_attr, and number_attr may be specified. - string type_list_attr = 6; - - // For inputs: if true, the inputs are required to be refs. - // By default, inputs can be either refs or non-refs. - // For outputs: if true, outputs are refs, otherwise they are not. - bool is_ref = 16; - }; - - // Description of the input(s). - repeated ArgDef input_arg = 2; - - // Description of the output(s). - repeated ArgDef output_arg = 3; - - // Description of the graph-construction-time configuration of this - // Op. That is to say, this describes the attr fields that will - // be specified in the NodeDef. - message AttrDef { - // A descriptive name for the argument. May be used, e.g. by the - // Python client, as a keyword argument name, and so should match - // the regexp "[a-z][a-z0-9_]+". - string name = 1; - - // One of the type names from attr_value.proto ("string", "list(string)", - // "int", etc.). - string type = 2; - - // A reasonable default for this attribute if the user does not supply - // a value. If not specified, the user must supply a value. - AttrValue default_value = 3; - - // Human-readable description. - string description = 4; - - - // --- Constraints --- - // These constraints are only in effect if specified. Default is no - // constraints. - - // For type == "int", this is a minimum value. For "list(___)" - // types, this is the minimum length. - bool has_minimum = 5; - int64 minimum = 6; - - // The set of allowed values. Has type that is the "list" version - // of the "type" field above (uses the "list" field of AttrValue). - // If type == "type" or "list(type)" above, then the "type" field - // of "allowed_values.list" has the set of allowed DataTypes. - // If type == "string" or "list(string)", then the "s" field of - // "allowed_values.list" has the set of allowed strings. - AttrValue allowed_values = 7; - } - repeated AttrDef attr = 4; - - // Optional deprecation based on GraphDef versions. - OpDeprecation deprecation = 8; - - // One-line human-readable description of what the Op does. - string summary = 5; - - // Additional, longer human-readable description of what the Op does. - string description = 6; - - // ------------------------------------------------------------------------- - // Which optimizations this operation can participate in. - - // True if the operation is commutative ("op(a,b) == op(b,a)" for all inputs) - bool is_commutative = 18; - - // If is_aggregate is true, then this operation accepts N >= 2 - // inputs and produces 1 output all of the same type. Should be - // associative and commutative, and produce output with the same - // shape as the input. The optimizer may replace an aggregate op - // taking input from multiple devices with a tree of aggregate ops - // that aggregate locally within each device (and possibly within - // groups of nearby devices) before communicating. - bool is_aggregate = 16; // for things like add - - // Other optimizations go here, like - // can_alias_input, rewrite_when_output_unused, partitioning_strategy, etc. - - // ------------------------------------------------------------------------- - // Optimization constraints. - - // Ops are marked as stateful if their behavior depends on some state beyond - // their input tensors (e.g. variable reading op) or if they have - // a side-effect (e.g. printing or asserting ops). Equivalently, stateless ops - // must always produce the same output for the same input and have - // no side-effects. - // - // By default Ops may be moved between devices. Stateful ops should - // either not be moved, or should only be moved if that state can also - // be moved (e.g. via some sort of save / restore). - // Stateful ops are guaranteed to never be optimized away by Common - // Subexpression Elimination (CSE). - bool is_stateful = 17; // for things like variables, queue - - // ------------------------------------------------------------------------- - // Non-standard options. - - // By default, all inputs to an Op must be initialized Tensors. Ops - // that may initialize tensors for the first time should set this - // field to true, to allow the Op to take an uninitialized Tensor as - // input. - bool allows_uninitialized_input = 19; // for Assign, etc. -}; -// LINT.ThenChange( -// https://www.tensorflow.org/code/tensorflow/core/framework/op_def_util.cc) - -// Information about version-dependent deprecation of an op -message OpDeprecation { - // First GraphDef version at which the op is disallowed. - int32 version = 1; - - // Explanation of why it was deprecated and what to use instead. - string explanation = 2; -}; - -// A collection of OpDefs -message OpList { - repeated OpDef op = 1; -}; diff --git a/ge/proto/tensorflow/resource_handle.proto b/ge/proto/tensorflow/resource_handle.proto deleted file mode 100644 index 91c46c9a..00000000 --- a/ge/proto/tensorflow/resource_handle.proto +++ /dev/null @@ -1,37 +0,0 @@ -/** - * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow - * - * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "ResourceHandle"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -// Protocol buffer representing a handle to a tensorflow resource. Handles are -// not valid across executions, but can be serialized back and forth from within -// a single run. -message ResourceHandleProto { - // Unique name for the device containing the resource. - string device = 1; - - // Container in which this resource is placed. - string container = 2; - - // Unique name of this resource. - string name = 3; - - // Hash code for the type of the resource. Is only valid in the same device - // and in the same execution. - uint64 hash_code = 4; - - // For debug-only, the name of the type pointed to by this handle, if - // available. - string maybe_type_name = 5; -}; diff --git a/ge/proto/tensorflow/tensor.proto b/ge/proto/tensorflow/tensor.proto deleted file mode 100644 index 48eeb6c4..00000000 --- a/ge/proto/tensorflow/tensor.proto +++ /dev/null @@ -1,102 +0,0 @@ -/** - * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow - * - * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "TensorProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "resource_handle.proto"; -import "tensor_shape.proto"; -import "types.proto"; - -// Protocol buffer representing a tensor. -message TensorProto { - DataType dtype = 1; - - // Shape of the tensor. - TensorShapeProto tensor_shape = 2; - - // Only one of the representations below is set, one of "tensor_contents" and - // the "xxx_val" attributes. We are not using oneof because as oneofs cannot - // contain repeated fields it would require another extra set of messages. - - // Version number. - // - // In version 0, if the "repeated xxx" representations contain only one - // element, that element is repeated to fill the shape. This makes it easy - // to represent a constant Tensor with a single value. - int32 version_number = 3; - - // Serialized raw tensor content from either Tensor::AsProtoTensorContent or - // memcpy in tensorflow::grpc::EncodeTensorToByteBuffer. This representation - // can be used for all tensor types. The purpose of this representation is to - // reduce serialization overhead during RPC call by avoiding serialization of - // many repeated small items. - bytes tensor_content = 4; - - // Type specific representations that make it easy to create tensor protos in - // all languages. Only the representation corresponding to "dtype" can - // be set. The values hold the flattened representation of the tensor in - // row major order. - - // DT_HALF, DT_BFLOAT16. Note that since protobuf has no int16 type, we'll - // have some pointless zero padding for each value here. - repeated int32 half_val = 13 [packed = true]; - - // DT_FLOAT. - repeated float float_val = 5 [packed = true]; - - // DT_DOUBLE. - repeated double double_val = 6 [packed = true]; - - // DT_INT32, DT_INT16, DT_INT8, DT_UINT8. - repeated int32 int_val = 7 [packed = true]; - - // DT_STRING - repeated bytes string_val = 8; - - // DT_COMPLEX64. scomplex_val(2*i) and scomplex_val(2*i+1) are real - // and imaginary parts of i-th single precision complex. - repeated float scomplex_val = 9 [packed = true]; - - // DT_INT64 - repeated int64 int64_val = 10 [packed = true]; - - // DT_BOOL - repeated bool bool_val = 11 [packed = true]; - - // DT_COMPLEX128. dcomplex_val(2*i) and dcomplex_val(2*i+1) are real - // and imaginary parts of i-th double precision complex. - repeated double dcomplex_val = 12 [packed = true]; - - // DT_RESOURCE - repeated ResourceHandleProto resource_handle_val = 14; - - // DT_VARIANT - repeated VariantTensorDataProto variant_val = 15; - - // DT_UINT32 - repeated uint32 uint32_val = 16 [packed = true]; - - // DT_UINT64 - repeated uint64 uint64_val = 17 [packed = true]; -}; - -// Protocol buffer representing the serialization format of DT_VARIANT tensors. -message VariantTensorDataProto { - // Name of the type of objects being serialized. - string type_name = 1; - // Portions of the object that are not Tensors. - bytes metadata = 2; - // Tensors contained within objects being serialized. - repeated TensorProto tensors = 3; -} diff --git a/ge/proto/tensorflow/tensor_shape.proto b/ge/proto/tensorflow/tensor_shape.proto deleted file mode 100644 index 3a6d8c5a..00000000 --- a/ge/proto/tensorflow/tensor_shape.proto +++ /dev/null @@ -1,53 +0,0 @@ -/** - * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow - * - * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -// Protocol buffer representing the shape of tensors. - -syntax = "proto3"; -option cc_enable_arenas = true; -option java_outer_classname = "TensorShapeProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -package domi.tensorflow; - -// Dimensions of a tensor. -message TensorShapeProto { - // One dimension of the tensor. - message Dim { - // Size of the tensor in that dimension. - // This value must be >= -1, but values of -1 are reserved for "unknown" - // shapes (values of -1 mean "unknown" dimension). Certain wrappers - // that work with TensorShapeProto may fail at runtime when deserializing - // a TensorShapeProto containing a dim value of -1. - int64 size = 1; - - // Optional name of the tensor dimension. - string name = 2; - }; - - // Dimensions of the tensor, such as {"input", 30}, {"output", 40} - // for a 30 x 40 2D tensor. If an entry has size -1, this - // corresponds to a dimension of unknown size. The names are - // optional. - // - // The order of entries in "dim" matters: It indicates the layout of the - // values in the tensor in-memory representation. - // - // The first entry in "dim" is the outermost dimension used to layout the - // values, the last entry is the innermost dimension. This matches the - // in-memory layout of RowMajor Eigen tensors. - // - // If "dim.size()" > 0, "unknown_rank" must be false. - repeated Dim dim = 2; - - // If true, the number of dimensions in the shape is unknown. - // - // If true, "dim.size()" must be 0. - bool unknown_rank = 3; -}; diff --git a/ge/proto/tensorflow/types.proto b/ge/proto/tensorflow/types.proto deleted file mode 100644 index f40e49cb..00000000 --- a/ge/proto/tensorflow/types.proto +++ /dev/null @@ -1,82 +0,0 @@ -/** - * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow - * - * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "TypesProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -// LINT.IfChange -enum DataType { - // Not a legal value for DataType. Used to indicate a DataType field - // has not been set. - DT_INVALID = 0; - - // Data types that all computation devices are expected to be - // capable to support. - DT_FLOAT = 1; - DT_DOUBLE = 2; - DT_INT32 = 3; - DT_UINT8 = 4; - DT_INT16 = 5; - DT_INT8 = 6; - DT_STRING = 7; - DT_COMPLEX64 = 8; // Single-precision complex - DT_INT64 = 9; - DT_BOOL = 10; - DT_QINT8 = 11; // Quantized int8 - DT_QUINT8 = 12; // Quantized uint8 - DT_QINT32 = 13; // Quantized int32 - DT_BFLOAT16 = 14; // Float32 truncated to 16 bits. Only for cast ops. - DT_QINT16 = 15; // Quantized int16 - DT_QUINT16 = 16; // Quantized uint16 - DT_UINT16 = 17; - DT_COMPLEX128 = 18; // Double-precision complex - DT_HALF = 19; - DT_RESOURCE = 20; - DT_VARIANT = 21; // Arbitrary C++ data types - DT_UINT32 = 22; - DT_UINT64 = 23; - - // Do not use! These are only for parameters. Every enum above - // should have a corresponding value below (verified by types_test). - DT_FLOAT_REF = 101; - DT_DOUBLE_REF = 102; - DT_INT32_REF = 103; - DT_UINT8_REF = 104; - DT_INT16_REF = 105; - DT_INT8_REF = 106; - DT_STRING_REF = 107; - DT_COMPLEX64_REF = 108; - DT_INT64_REF = 109; - DT_BOOL_REF = 110; - DT_QINT8_REF = 111; - DT_QUINT8_REF = 112; - DT_QINT32_REF = 113; - DT_BFLOAT16_REF = 114; - DT_QINT16_REF = 115; - DT_QUINT16_REF = 116; - DT_UINT16_REF = 117; - DT_COMPLEX128_REF = 118; - DT_HALF_REF = 119; - DT_RESOURCE_REF = 120; - DT_VARIANT_REF = 121; - DT_UINT32_REF = 122; - DT_UINT64_REF = 123; -} -// LINT.ThenChange( -// https://www.tensorflow.org/code/tensorflow/c/c_api.h, -// https://www.tensorflow.org/code/tensorflow/go/tensor.go, -// https://www.tensorflow.org/code/tensorflow/core/framework/tensor.cc, -// https://www.tensorflow.org/code/tensorflow/core/framework/types.h, -// https://www.tensorflow.org/code/tensorflow/core/framework/types.cc, -// https://www.tensorflow.org/code/tensorflow/python/framework/dtypes.py, -// https://www.tensorflow.org/code/tensorflow/python/framework/function.py) diff --git a/ge/proto/tensorflow/versions.proto b/ge/proto/tensorflow/versions.proto deleted file mode 100644 index 4e81548f..00000000 --- a/ge/proto/tensorflow/versions.proto +++ /dev/null @@ -1,39 +0,0 @@ -/** - * This file is part of Open Source Software TensorFlow, version 1.15.0 https://github.com/tensorflow/tensorflow - * - * This file is included by GraphEngine so as to support model format conversion from tensorflow model to GraphEngine model. - * This file in this distribution may have been modified by Huawei Technologies Co., Ltd ("Huawei Modifications"). - * All Huawei Modifications are Copyright 2019-2020 Huawei Technologies Co., Ltd. - */ - -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "VersionsProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -// Version information for a piece of serialized data -// -// There are different types of versions for each type of data -// (GraphDef, etc.), but they all have the same common shape -// described here. -// -// Each consumer has "consumer" and "min_producer" versions (specified -// elsewhere). A consumer is allowed to consume this data if -// -// producer >= min_producer -// consumer >= min_consumer -// consumer not in bad_consumers -// -message VersionDef { - // The version of the code that produced this data. - int32 producer = 1; - - // Any consumer below this version is not allowed to consume this data. - int32 min_consumer = 2; - - // Specific consumer versions which are disallowed (e.g. due to bugs). - repeated int32 bad_consumers = 3; -}; From 18496bce86f7f55519b8c6bf5efd6b00ecb9a665 Mon Sep 17 00:00:00 2001 From: yskhhh Date: Thu, 17 Jun 2021 14:49:57 +0800 Subject: [PATCH 044/226] add debug task info --- .../aicore/aicore_node_executor.cc | 6 +++ .../node_executor/aicore/aicore_op_task.h | 2 + ge/single_op/single_op.cc | 5 ++ ge/single_op/task/build_task_utils.cc | 54 +++++++++++++++++-- ge/single_op/task/build_task_utils.h | 8 +++ ge/single_op/task/op_task.cc | 1 + ge/single_op/task/op_task.h | 2 + tests/ut/ge/hybrid/ge_hybrid_unittest.cc | 31 +++++++++++ tests/ut/ge/single_op/single_op_unittest.cc | 13 ++++- 9 files changed, 118 insertions(+), 4 deletions(-) diff --git a/ge/hybrid/node_executor/aicore/aicore_node_executor.cc b/ge/hybrid/node_executor/aicore/aicore_node_executor.cc index 7ebb9e39..efa864ed 100755 --- a/ge/hybrid/node_executor/aicore/aicore_node_executor.cc +++ b/ge/hybrid/node_executor/aicore/aicore_node_executor.cc @@ -18,6 +18,7 @@ #include "framework/common/taskdown_common.h" #include "hybrid/executor/hybrid_execution_context.h" #include "external/runtime/rt_error_codes.h" +#include "single_op/task/build_task_utils.h" namespace ge { namespace hybrid { @@ -196,6 +197,11 @@ Status AiCoreNodeTask::ExecuteAsync(TaskContext &context, std::function RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[AiCoreNodeLaunchKernel] Start"); GE_CHK_STATUS_RET_NOLOG((*it)->LaunchKernel(context.GetStream())); GE_CHK_STATUS_RET_NOLOG(CheckOverflow(context)); + GE_CHECK_NOTNULL(context.GetExecutionContext()->model); + GELOGD("[DEBUG_TASK_INFO : Executor Task] %s/%s %s", + context.GetExecutionContext()->model->GetModelName().c_str(), + (*it)->GetName().empty() ? (*it)->GetLogName().c_str() : (*it)->GetName().c_str(), + BuildTaskUtils::GetTaskInfo(context).c_str()); // save profiling data uint32_t task_id = 0; uint32_t stream_id = 0; diff --git a/ge/hybrid/node_executor/aicore/aicore_op_task.h b/ge/hybrid/node_executor/aicore/aicore_op_task.h index 8d7b7f1e..9db958d2 100755 --- a/ge/hybrid/node_executor/aicore/aicore_op_task.h +++ b/ge/hybrid/node_executor/aicore/aicore_op_task.h @@ -72,6 +72,8 @@ class AiCoreOpTask { const std::string& GetName() const; + const std::string& GetLogName() const {return log_name_;} + bool GetClearAtomic() const {return clear_atomic_;} uint32_t GetBlockDim() const {return block_dim_;} diff --git a/ge/single_op/single_op.cc b/ge/single_op/single_op.cc index d09e8398..fc34e513 100755 --- a/ge/single_op/single_op.cc +++ b/ge/single_op/single_op.cc @@ -297,6 +297,9 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status SingleOp::ExecuteAsync(c for (auto &task : tasks_) { ret = task->LaunchKernel(stream_); + GELOGD("[DEBUG_TASK_INFO : Static Task] %s %s", + task->GetTaskName().c_str(), + BuildTaskUtils::GetTaskInfo(task->GetOpdesc(), inputs, outputs).c_str()); if (ret != SUCCESS) { return ret; } @@ -447,6 +450,8 @@ Status DynamicSingleOp::ExecuteAsync(const vector &input_desc, } else { GE_CHK_STATUS_RET_NOLOG(op_task_->LaunchKernel(input_desc, input_buffers, output_desc, output_buffers, stream_)); } + GELOGD("[DEBUG_TASK_INFO : Dynamic Task] %s", + BuildTaskUtils::GetTaskInfo(op_task_->GetOpdesc(), input_buffers, output_buffers).c_str()); GE_CHK_STATUS_RET_NOLOG(op_task_->OpenDump(stream_)); GE_CHK_STATUS_RET_NOLOG(ProfilingTaskInfo(op_task_.get(), kShapeTypeDynamic)); return SUCCESS; diff --git a/ge/single_op/task/build_task_utils.cc b/ge/single_op/task/build_task_utils.cc index 9e4d55e1..b3a7ae09 100644 --- a/ge/single_op/task/build_task_utils.cc +++ b/ge/single_op/task/build_task_utils.cc @@ -70,7 +70,9 @@ std::vector BuildTaskUtils::GetKernelArgs(const OpDescPtr &op_desc, return JoinAddresses(addresses); } -std::string BuildTaskUtils::GetTaskInfo(const OpDescPtr &op_desc) { +std::string BuildTaskUtils::InnerGetTaskInfo(const OpDescPtr &op_desc, + const std::vector &input_addrs, + const std::vector &output_addrs) { std::stringstream ss; if (op_desc != nullptr) { auto op_type = op_desc->GetType(); @@ -87,7 +89,10 @@ std::string BuildTaskUtils::GetTaskInfo(const OpDescPtr &op_desc) { } ss << TypeUtils::DataTypeToSerialString(input->GetDataType()) << " "; ss << TypeUtils::FormatToSerialString(input->GetFormat()); - ss << VectorToString(input->GetShape().GetDims()); + ss << VectorToString(input->GetShape().GetDims()) << " "; + if (idx < input_addrs.size()) { + ss << input_addrs[idx]; + } if (idx < op_desc->GetInputsSize() - 1) { ss << ","; } @@ -101,7 +106,10 @@ std::string BuildTaskUtils::GetTaskInfo(const OpDescPtr &op_desc) { const GeShape &out_shape = output->GetShape(); const auto &dims = out_shape.GetDims(); ss << TypeUtils::FormatToSerialString(out_format); - ss << VectorToString(dims); + ss << VectorToString(dims) << " "; + if (idx < output_addrs.size()) { + ss << output_addrs[idx]; + } if (idx < op_desc->GetOutputsSize() - 1) { ss << ","; } @@ -110,4 +118,44 @@ std::string BuildTaskUtils::GetTaskInfo(const OpDescPtr &op_desc) { } return ss.str(); } + +std::string BuildTaskUtils::GetTaskInfo(const OpDescPtr &op_desc) { + vector input_addrs; + vector output_addrs; + return InnerGetTaskInfo(op_desc, input_addrs, output_addrs); +} + +std::string BuildTaskUtils::GetTaskInfo(const OpDescPtr &op_desc, + const std::vector &inputs, + const std::vector &outputs) { + vector input_addrs; + vector output_addrs; + GE_CHECK_NOTNULL_EXEC(op_desc, return ""); + if (op_desc->GetAllInputsSize() == inputs.size()) { + std::for_each(inputs.begin(), inputs.end(), [&](const DataBuffer &db) { input_addrs.push_back(db.data); }); + } + if (op_desc->GetOutputsSize() == outputs.size()) { + std::for_each(outputs.begin(), outputs.end(), [&](const DataBuffer &db) { output_addrs.push_back(db.data); }); + } + return InnerGetTaskInfo(op_desc, input_addrs, output_addrs); +} + +std::string BuildTaskUtils::GetTaskInfo(const hybrid::TaskContext &task_context) { + auto &node_item = task_context.GetNodeItem(); + auto op_desc = node_item.GetOpDesc(); + GE_CHECK_NOTNULL_EXEC(op_desc, return ""); + vector input_addrs; + vector output_addrs; + if (op_desc->GetAllInputsSize() == static_cast(task_context.NumInputs())) { + for (size_t i = 0; i < op_desc->GetAllInputsSize(); ++i) { + input_addrs.push_back(task_context.GetInput(i)->GetData()); + } + } + if (op_desc->GetOutputsSize() == static_cast(task_context.NumOutputs())) { + for (size_t i = 0; i < op_desc->GetOutputsSize(); ++i) { + output_addrs.push_back(task_context.GetOutput(i)->GetData()); + } + } + return InnerGetTaskInfo(op_desc, input_addrs, output_addrs); +} } // namespace ge diff --git a/ge/single_op/task/build_task_utils.h b/ge/single_op/task/build_task_utils.h index 7a2369e4..68894f5b 100644 --- a/ge/single_op/task/build_task_utils.h +++ b/ge/single_op/task/build_task_utils.h @@ -23,6 +23,7 @@ #include "graph/op_desc.h" #include "single_op/single_op.h" #include "single_op/single_op_model.h" +#include "hybrid/node_executor/task_context.h" namespace ge { class BuildTaskUtils { @@ -35,7 +36,14 @@ class BuildTaskUtils { bool keep_workspace = true); static std::vector JoinAddresses(const std::vector> &addresses); static std::vector GetKernelArgs(const OpDescPtr &op_desc, const SingleOpModelParam ¶m); + static std::string InnerGetTaskInfo(const OpDescPtr &op_desc, + const std::vector &input_addrs, + const std::vector &output_addrs); static std::string GetTaskInfo(const OpDescPtr &op_desc); + static std::string GetTaskInfo(const OpDescPtr &op_desc, + const std::vector &inputs, + const std::vector &outputs); + static std::string GetTaskInfo(const hybrid::TaskContext& task_context); template static std::string VectorToString(const std::vector &values) { std::stringstream ss; diff --git a/ge/single_op/task/op_task.cc b/ge/single_op/task/op_task.cc index e48677f8..02fc96f3 100755 --- a/ge/single_op/task/op_task.cc +++ b/ge/single_op/task/op_task.cc @@ -89,6 +89,7 @@ Status OpTask::OpenDump(rtStream_t stream) { void TbeOpTask::SetStubFunc(const std::string &name, const void *stub_func) { this->stub_name_ = name; this->stub_func_ = stub_func; + this->task_name_ = name; } void TbeOpTask::SetKernelArgs(std::unique_ptr &&args, size_t arg_size, uint32_t block_dim, diff --git a/ge/single_op/task/op_task.h b/ge/single_op/task/op_task.h index ed6cf40f..5efb4472 100644 --- a/ge/single_op/task/op_task.h +++ b/ge/single_op/task/op_task.h @@ -44,6 +44,7 @@ class OpTask { virtual Status UpdateArgTable(const SingleOpModelParam ¶m); void SetModelArgs(std::string model_name, uint32_t model_id); Status GetProfilingArgs(TaskDescInfo &task_desc_info, uint32_t &model_id); + const std::string &GetTaskName() const {return task_name_;} void SetOpDesc(const OpDescPtr &op_desc) { op_desc_ = op_desc; } @@ -66,6 +67,7 @@ class OpTask { std::string model_name_; uint32_t model_id_ = 0; uint32_t block_dim_ = 1; + std::string task_name_; }; class TbeOpTask : public OpTask { diff --git a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc index 228af832..24a60413 100644 --- a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc +++ b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc @@ -34,12 +34,14 @@ #include "hybrid/executor/hybrid_execution_context.h" #include "hybrid/executor/hybrid_model_executor.h" #include "hybrid/node_executor/aicore/aicore_task_builder.h" +#include "hybrid/node_executor/aicore/aicore_node_executor.h" #include "graph/load/model_manager/tbe_handle_store.h" #include "graph/manager/graph_mem_allocator.h" #include "hybrid/common/npu_memory_allocator.h" #include "graph/types.h" #include "graph/utils/tensor_utils.h" #include "graph/testcase/ge_graph/graph_builder_utils.h" +#include "single_op/task/build_task_utils.h" #include "graph/op_desc_impl.h" #undef private #undef protected @@ -746,4 +748,33 @@ TEST_F(UtestGeHybrid, TestParseDependencies) { AttrUtils::SetTensor(tensor_desc, "_value", tensor); std::set dependent_for_shape_inference; ASSERT_EQ(builder.ParseDependencies(*node_item, deps, dependent_for_shape_inference), SUCCESS); +} + +TEST_F(UtestGeHybrid, TestTaskExecuteAsync) { + auto graph = make_shared("graph"); + OpDescPtr op_desc = CreateOpDesc("Add", "Add"); + GeShape shape({2, 16}); + GeTensorDesc tensor_desc(shape); + op_desc->AddInputDesc(tensor_desc); + op_desc->AddInputDesc(tensor_desc); + op_desc->AddOutputDesc(tensor_desc); + auto node = graph->AddNode(op_desc); + std::unique_ptr node_item; + NodeItem::Create(node, node_item); + node_item->input_start = 0; + node_item->output_start = 0; + + GraphExecutionContext execution_context; + GraphItem graph_item; + SubgraphContext subgraph_context(&graph_item, &execution_context); + ASSERT_EQ(subgraph_context.Init(), SUCCESS); + subgraph_context.all_inputs_.resize(2); + subgraph_context.all_outputs_.resize(1); + auto node_state = subgraph_context.GetOrCreateNodeState(node_item.get()); + auto task_context = *node_state->GetTaskContext(); + ASSERT_NE(BuildTaskUtils::GetTaskInfo(task_context), ""); + std::unique_ptr task1(new AiCoreOpTask()); + std::vector> tasks; + AiCoreNodeTask node_task(std::move(tasks)); + ASSERT_EQ(node_task.ExecuteAsync(task_context, nullptr), SUCCESS); } \ No newline at end of file diff --git a/tests/ut/ge/single_op/single_op_unittest.cc b/tests/ut/ge/single_op/single_op_unittest.cc index db3de7ec..831f3f16 100644 --- a/tests/ut/ge/single_op/single_op_unittest.cc +++ b/tests/ut/ge/single_op/single_op_unittest.cc @@ -23,6 +23,7 @@ #define private public #include "single_op/single_op.h" #include "single_op/single_op_manager.h" +#include "single_op/task/build_task_utils.h" #undef private #undef protected @@ -126,9 +127,19 @@ TEST_F(UtestSingleOp, test_singleop_execute_async1) { SingleOpModelParam model_params; single_op.running_param_.reset(new (std::nothrow)SingleOpModelParam(model_params)); single_op.args_.resize(1); + + auto *tbe_task = new (std::nothrow) TbeOpTask(); + ge::OpDescPtr op_desc = std::make_shared("Mul", MATMUL); + EXPECT_EQ(op_desc->AddInputDesc("x", GeTensorDesc(GeShape({2}), FORMAT_NCHW)), GRAPH_SUCCESS); + EXPECT_EQ(op_desc->AddOutputDesc("x", GeTensorDesc(GeShape({2}), FORMAT_NCHW)), GRAPH_SUCCESS); + EXPECT_NE(BuildTaskUtils::GetTaskInfo(op_desc), ""); + ge::ComputeGraphPtr graph = std::make_shared("default"); + ge::NodePtr node = graph->AddNode(op_desc); + tbe_task->node_ = node; + tbe_task->op_desc_ = op_desc; + single_op.tasks_.push_back(tbe_task); EXPECT_EQ(single_op.hybrid_model_executor_, nullptr); EXPECT_EQ(single_op.running_param_->mem_base, nullptr); - EXPECT_EQ(single_op.tasks_.size(), 0); EXPECT_EQ(single_op.ExecuteAsync(input_buffers, output_buffers), SUCCESS); } From 9476853d22cfe73ed8270b9aa0890e96c9ebb70c Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Thu, 17 Jun 2021 14:51:39 +0800 Subject: [PATCH 045/226] Adaptation rectification of op_tiling. --- .../node_executor/aicore/aicore_op_task.cc | 24 +++++++++---------- .../node_executor/aicore/aicore_op_task.h | 4 ++-- ge/single_op/task/op_task.cc | 13 +++++----- metadef | 2 +- parser | 2 +- 5 files changed, 22 insertions(+), 23 deletions(-) diff --git a/ge/hybrid/node_executor/aicore/aicore_op_task.cc b/ge/hybrid/node_executor/aicore/aicore_op_task.cc index 8cd24bd1..76082cb3 100644 --- a/ge/hybrid/node_executor/aicore/aicore_op_task.cc +++ b/ge/hybrid/node_executor/aicore/aicore_op_task.cc @@ -25,7 +25,7 @@ #include "single_op/task/build_task_utils.h" #include "single_op/task/tbe_task_builder.h" -using optiling::OpRunInfo; +using optiling::utils::OpRunInfo; namespace ge { namespace hybrid { @@ -359,9 +359,7 @@ Status AiCoreOpTask::UpdateTilingInfo(TaskContext &context) { GE_CHECK_NOTNULL(op_desc); GELOGD("[%s] Start to update tiling info for task: [%s]", node->GetName().c_str(), stub_name_.c_str()); - OpRunInfo tiling_info; - tiling_info.block_dim = -1; // codex: Using uninitialized value - tiling_info.clear_atomic = true; + OpRunInfo tiling_info(-1, true, 0); auto execution_context = context.GetExecutionContext(); @@ -370,12 +368,14 @@ Status AiCoreOpTask::UpdateTilingInfo(TaskContext &context) { RECORD_EXECUTION_EVENT(execution_context, context.GetNodeName(), "[CalcTilingInfo] End"); // update op args by tiling info - block_dim_ = static_cast(tiling_info.block_dim); - op_desc->SetWorkspaceBytes(tiling_info.workspaces); - clear_atomic_ = tiling_info.clear_atomic; - - tiling_data_ = tiling_info.tiling_data.str(); - tiling_key_ = tiling_info.tiling_key; + block_dim_ = tiling_info.GetBlockDim(); + clear_atomic_ = tiling_info.GetClearAtomic(); + std::vector workspaces; + tiling_info.GetAllWorkspaces(workspaces); + op_desc->SetWorkspaceBytes(workspaces); + + tiling_data_ = tiling_info.GetAllTilingData().str(); + tiling_key_ = tiling_info.GetTilingKey(); GELOGD("Successfully getting [tiling_key] : %u", tiling_key_); if (tiling_data_.empty()) { GELOGD("[%s] Tiling data is empty.", op_desc->GetName().c_str()); @@ -412,7 +412,7 @@ Status AiCoreOpTask::UpdateTilingInfo(TaskContext &context) { Status AiCoreOpTask::CalcTilingInfo(const NodePtr &node, OpRunInfo &tiling_info) { GELOGD("[%s] Start to invoke OpParaCalculate.", node->GetName().c_str()); - GE_CHK_STATUS_RET(OpParaCalculate(*node, tiling_info), + GE_CHK_STATUS_RET(optiling::OpParaCalculateV2(*node, tiling_info), "[Invoke][OpParaCalculate]Failed calc tiling data of node %s.", node->GetName().c_str()); GELOGD("[%s] Done invoking OpParaCalculate successfully.", node->GetName().c_str()); @@ -633,7 +633,7 @@ std::string AtomicAddrCleanOpTask::GetKeyForKernelName(const OpDesc &op_desc) co Status AtomicAddrCleanOpTask::CalcTilingInfo(const NodePtr &node, OpRunInfo &tiling_info) { GELOGD("[%s] Start to invoke OpAtomicCalculate.", node->GetName().c_str()); - GE_CHK_STATUS_RET(OpAtomicCalculate(*node, tiling_info), + GE_CHK_STATUS_RET(optiling::OpAtomicCalculateV2(*node, tiling_info), "[Invoke][OpAtomicCalculate]Failed calc tiling data of node %s.", node->GetName().c_str()); GELOGD("[%s] Done invoking OpAtomicCalculate successfully.", node->GetName().c_str()); diff --git a/ge/hybrid/node_executor/aicore/aicore_op_task.h b/ge/hybrid/node_executor/aicore/aicore_op_task.h index 8d7b7f1e..3c8db8c9 100755 --- a/ge/hybrid/node_executor/aicore/aicore_op_task.h +++ b/ge/hybrid/node_executor/aicore/aicore_op_task.h @@ -85,7 +85,7 @@ class AiCoreOpTask { virtual std::string GetKeyForTvmMagic() const; virtual std::string GetKeyForTvmMetaData() const; virtual std::string GetKeyForKernelName(const OpDesc &op_desc) const; - virtual Status CalcTilingInfo(const NodePtr &node, optiling::OpRunInfo &tiling_info); + virtual Status CalcTilingInfo(const NodePtr &node, optiling::utils::OpRunInfo &tiling_info); std::unique_ptr tiling_buffer_ = nullptr; std::string tiling_data_; @@ -130,7 +130,7 @@ class AtomicAddrCleanOpTask : public AiCoreOpTask { std::string GetKeyForTvmMagic() const override; std::string GetKeyForTvmMetaData() const override; std::string GetKeyForKernelName(const OpDesc &op_desc) const override; - Status CalcTilingInfo(const NodePtr &node, optiling::OpRunInfo &tiling_info) override; + Status CalcTilingInfo(const NodePtr &node, optiling::utils::OpRunInfo &tiling_info) override; private: Status InitAtomicAddrCleanIndices(const OpDesc &op_desc); diff --git a/ge/single_op/task/op_task.cc b/ge/single_op/task/op_task.cc index e48677f8..66d70e7e 100755 --- a/ge/single_op/task/op_task.cc +++ b/ge/single_op/task/op_task.cc @@ -224,18 +224,17 @@ Status TbeOpTask::LaunchKernel(rtStream_t stream) { Status TbeOpTask::UpdateRunInfo() { // invoke OpParaCalculate GELOGD("Start to invoke OpParaCalculate."); - optiling::OpRunInfo run_info; - run_info.block_dim = 0; - auto ret = optiling::OpParaCalculate(*node_, run_info); + optiling::utils::OpRunInfo run_info(0, true, 0); + auto ret = optiling::OpParaCalculateV2(*node_, run_info); if (ret != GRAPH_SUCCESS) { GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "[Invoke][OpParaCalculate] failed, ret = %u.", ret); REPORT_INNER_ERROR("E19999", "invoke OpParaCalculate failed, ret = %u.", ret); return ACL_ERROR_GE_INTERNAL_ERROR; } - block_dim_ = run_info.block_dim; - tiling_data_ = run_info.tiling_data.str(); - tiling_key_ = run_info.tiling_key; - run_info_workspaces_ = run_info.workspaces; + block_dim_ = run_info.GetBlockDim(); + tiling_data_ = run_info.GetAllTilingData().str(); + tiling_key_ = run_info.GetTilingKey(); + run_info.GetAllWorkspaces(run_info_workspaces_); GELOGD("Done invoking OpParaCalculate successfully. block_dim = %u, tiling size = %zu, tiling_key = %u", block_dim_, tiling_data_.size(), tiling_key_); return SUCCESS; diff --git a/metadef b/metadef index 8c5fd448..e189fc7f 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit 8c5fd4486f870d8b63213565aa39fdf1ba1e497a +Subproject commit e189fc7f4da9f7714f009d70da4db627de17955d diff --git a/parser b/parser index 3073129b..db5ce472 160000 --- a/parser +++ b/parser @@ -1 +1 @@ -Subproject commit 3073129b68c0fae12a8b7531d60782e39128a28c +Subproject commit db5ce472de0086c3e2abdaab3b0685c1d2656c96 From bd1beee90c760f8d6a61b255bfa5c54a7939fd99 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Thu, 17 Jun 2021 15:41:02 +0800 Subject: [PATCH 046/226] Fix zip bug. --- build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.sh b/build.sh index 96c46e1a..bd471c99 100755 --- a/build.sh +++ b/build.sh @@ -371,6 +371,6 @@ elif [ "X$MINDSPORE_MODE" = "Xon" ] then cd "${OUTPUT_PATH}" find ./ -name graphengine_lib.tar -exec rm {} \; - tar -cf graphengine_lib.tar lib + tar -zcf graphengine_lib.tar lib fi echo "---------------- GraphEngine package archive generated ----------------" From fd51637c46ac0b2518e43b884a426e016ee198a4 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Thu, 17 Jun 2021 15:44:03 +0800 Subject: [PATCH 047/226] Fix zip bug. --- build.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/build.sh b/build.sh index bd471c99..61f86945 100755 --- a/build.sh +++ b/build.sh @@ -355,13 +355,13 @@ generate_package() if [ "x${PLATFORM}" = "xtrain" ] then - tar -cf graphengine_lib.tar fwkacllib + tar -zcf graphengine_lib.tar fwkacllib elif [ "x${PLATFORM}" = "xinference" ] then - tar -cf graphengine_lib.tar acllib atc + tar -zcf graphengine_lib.tar acllib atc elif [ "x${PLATFORM}" = "xall" ] then - tar -cf graphengine_lib.tar fwkacllib acllib atc + tar -zcf graphengine_lib.tar fwkacllib acllib atc fi } From f840399fc3fe240952a00ed567210db7d779135e Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Thu, 17 Jun 2021 20:49:25 +0800 Subject: [PATCH 048/226] Aicpu single_op always dynamic execute. --- ge/single_op/task/op_task.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/ge/single_op/task/op_task.cc b/ge/single_op/task/op_task.cc index 66d70e7e..1bee6634 100755 --- a/ge/single_op/task/op_task.cc +++ b/ge/single_op/task/op_task.cc @@ -450,7 +450,6 @@ Status AiCpuBaseTask::SetExtInfoAndType(const std::string &kernel_ext_info, uint GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateSessionInfo(ULLONG_MAX, kernel_id, false), "[Update][SessionInfo] failed."); - GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateExecuteMode(true), "[Update][ExecuteMode] failed."); GE_CHK_RT_RET(rtMalloc(&ext_info_addr_dev_, aicpu_ext_handle_->GetExtInfoLen(), RT_MEMORY_HBM)); GE_CHK_RT_RET(rtMemcpy(ext_info_addr_dev_, aicpu_ext_handle_->GetExtInfoLen(), From 6f130e22904dec2413815135520ce95f44d49f80 Mon Sep 17 00:00:00 2001 From: wangkai Date: Fri, 18 Jun 2021 10:37:55 +0800 Subject: [PATCH 049/226] add link header targets Signed-off-by: wangkai --- ge/common/CMakeLists.txt | 24 ++++++++++++++---------- ge/executor/CMakeLists.txt | 20 +++++++++++++------- 2 files changed, 27 insertions(+), 17 deletions(-) mode change 100644 => 100755 ge/executor/CMakeLists.txt diff --git a/ge/common/CMakeLists.txt b/ge/common/CMakeLists.txt index 7974a46d..f55ff427 100755 --- a/ge/common/CMakeLists.txt +++ b/ge/common/CMakeLists.txt @@ -84,12 +84,11 @@ target_include_directories(ge_common PRIVATE ${CMAKE_BINARY_DIR} ${CMAKE_BINARY_DIR}/proto/graphengine_protos #### yellow zone #### - ${GE_DEPEND_DIR}/inc - ${GE_DEPEND_DIR}/inc/cce + $<$>:${GE_DEPEND_DIR}/inc> + $<$>:${GE_DEPEND_DIR}/inc/cce> #### blue zone #### - #${GE_DEPEND_DIR}/include - ${GE_CODE_DIR}/third_party/fwkacllib/inc - ${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain + $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc> + $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain> ) target_link_options(ge_common PRIVATE @@ -98,6 +97,9 @@ target_link_options(ge_common PRIVATE target_link_libraries(ge_common PRIVATE $ + $<$>:$> + $<$>:$> + $<$>:$> static_mmpa -Wl,--no-as-needed graph @@ -151,16 +153,18 @@ target_include_directories(ge_common_static PRIVATE ${CMAKE_BINARY_DIR} ${CMAKE_BINARY_DIR}/proto/graphengine_protos #### yellow zone #### - ${GE_DEPEND_DIR}/inc - ${GE_DEPEND_DIR}/inc/cce + $<$>:${GE_DEPEND_DIR}/inc> + $<$>:${GE_DEPEND_DIR}/inc/cce> #### blue zone #### - #${GE_DEPEND_DIR}/include - ${GE_CODE_DIR}/third_party/fwkacllib/inc - ${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain + $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc> + $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain> ) target_link_libraries(ge_common_static PRIVATE $ + $<$>:$> + $<$>:$> + $<$>:$> ascend_protobuf_static json c_sec diff --git a/ge/executor/CMakeLists.txt b/ge/executor/CMakeLists.txt old mode 100644 new mode 100755 index b04216b8..b6342973 --- a/ge/executor/CMakeLists.txt +++ b/ge/executor/CMakeLists.txt @@ -187,15 +187,18 @@ target_include_directories(ge_executor SYSTEM PRIVATE ${CMAKE_BINARY_DIR} ${CMAKE_BINARY_DIR}/proto/graphengine_protos #### yellow zone #### - ${GE_CODE_DIR}/../inc - ${GE_CODE_DIR}/../inc/cce + $<$>:${GE_DEPEND_DIR}/inc> + $<$>:${GE_DEPEND_DIR}/inc/cce> #### blue zone #### - ${GE_CODE_DIR}/third_party/fwkacllib/inc - ${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain + $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc> + $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain> ) target_link_libraries(ge_executor PRIVATE $ + $<$>:$> + $<$>:$> + $<$>:$> json ascend_protobuf_static c_sec @@ -238,10 +241,10 @@ target_include_directories(ge_executor_shared PRIVATE ${CMAKE_BINARY_DIR} ${CMAKE_BINARY_DIR}/proto/graphengine_protos #### yellow zone #### - ${GE_CODE_DIR}/../inc - ${GE_CODE_DIR}/../inc/cce + $<$>:${GE_DEPEND_DIR}/inc> + $<$>:${GE_DEPEND_DIR}/inc/cce> #### blue zone #### - ${GE_CODE_DIR}/third_party/fwkacllib/inc + $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc> ) target_link_options(ge_executor_shared PRIVATE @@ -251,6 +254,9 @@ target_link_options(ge_executor_shared PRIVATE target_link_libraries(ge_executor_shared PRIVATE $ + $<$>:$> + $<$>:$> + $<$>:$> -Wl,--no-as-needed ge_common runtime From 676ce23b556e20d8f49eabccc25e3ab51bf8803a Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Fri, 18 Jun 2021 11:03:51 +0800 Subject: [PATCH 050/226] =?UTF-8?q?=E5=9B=9E=E9=80=80=20'Pull=20Request=20?= =?UTF-8?q?ls=20:=20Adaptation=20rectification=20of=20op=5Ftiling.'?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../node_executor/aicore/aicore_op_task.cc | 24 +++++++++---------- .../node_executor/aicore/aicore_op_task.h | 4 ++-- ge/single_op/task/op_task.cc | 13 +++++----- 3 files changed, 21 insertions(+), 20 deletions(-) diff --git a/ge/hybrid/node_executor/aicore/aicore_op_task.cc b/ge/hybrid/node_executor/aicore/aicore_op_task.cc index 76082cb3..8cd24bd1 100644 --- a/ge/hybrid/node_executor/aicore/aicore_op_task.cc +++ b/ge/hybrid/node_executor/aicore/aicore_op_task.cc @@ -25,7 +25,7 @@ #include "single_op/task/build_task_utils.h" #include "single_op/task/tbe_task_builder.h" -using optiling::utils::OpRunInfo; +using optiling::OpRunInfo; namespace ge { namespace hybrid { @@ -359,7 +359,9 @@ Status AiCoreOpTask::UpdateTilingInfo(TaskContext &context) { GE_CHECK_NOTNULL(op_desc); GELOGD("[%s] Start to update tiling info for task: [%s]", node->GetName().c_str(), stub_name_.c_str()); - OpRunInfo tiling_info(-1, true, 0); + OpRunInfo tiling_info; + tiling_info.block_dim = -1; // codex: Using uninitialized value + tiling_info.clear_atomic = true; auto execution_context = context.GetExecutionContext(); @@ -368,14 +370,12 @@ Status AiCoreOpTask::UpdateTilingInfo(TaskContext &context) { RECORD_EXECUTION_EVENT(execution_context, context.GetNodeName(), "[CalcTilingInfo] End"); // update op args by tiling info - block_dim_ = tiling_info.GetBlockDim(); - clear_atomic_ = tiling_info.GetClearAtomic(); - std::vector workspaces; - tiling_info.GetAllWorkspaces(workspaces); - op_desc->SetWorkspaceBytes(workspaces); - - tiling_data_ = tiling_info.GetAllTilingData().str(); - tiling_key_ = tiling_info.GetTilingKey(); + block_dim_ = static_cast(tiling_info.block_dim); + op_desc->SetWorkspaceBytes(tiling_info.workspaces); + clear_atomic_ = tiling_info.clear_atomic; + + tiling_data_ = tiling_info.tiling_data.str(); + tiling_key_ = tiling_info.tiling_key; GELOGD("Successfully getting [tiling_key] : %u", tiling_key_); if (tiling_data_.empty()) { GELOGD("[%s] Tiling data is empty.", op_desc->GetName().c_str()); @@ -412,7 +412,7 @@ Status AiCoreOpTask::UpdateTilingInfo(TaskContext &context) { Status AiCoreOpTask::CalcTilingInfo(const NodePtr &node, OpRunInfo &tiling_info) { GELOGD("[%s] Start to invoke OpParaCalculate.", node->GetName().c_str()); - GE_CHK_STATUS_RET(optiling::OpParaCalculateV2(*node, tiling_info), + GE_CHK_STATUS_RET(OpParaCalculate(*node, tiling_info), "[Invoke][OpParaCalculate]Failed calc tiling data of node %s.", node->GetName().c_str()); GELOGD("[%s] Done invoking OpParaCalculate successfully.", node->GetName().c_str()); @@ -633,7 +633,7 @@ std::string AtomicAddrCleanOpTask::GetKeyForKernelName(const OpDesc &op_desc) co Status AtomicAddrCleanOpTask::CalcTilingInfo(const NodePtr &node, OpRunInfo &tiling_info) { GELOGD("[%s] Start to invoke OpAtomicCalculate.", node->GetName().c_str()); - GE_CHK_STATUS_RET(optiling::OpAtomicCalculateV2(*node, tiling_info), + GE_CHK_STATUS_RET(OpAtomicCalculate(*node, tiling_info), "[Invoke][OpAtomicCalculate]Failed calc tiling data of node %s.", node->GetName().c_str()); GELOGD("[%s] Done invoking OpAtomicCalculate successfully.", node->GetName().c_str()); diff --git a/ge/hybrid/node_executor/aicore/aicore_op_task.h b/ge/hybrid/node_executor/aicore/aicore_op_task.h index 3c8db8c9..8d7b7f1e 100755 --- a/ge/hybrid/node_executor/aicore/aicore_op_task.h +++ b/ge/hybrid/node_executor/aicore/aicore_op_task.h @@ -85,7 +85,7 @@ class AiCoreOpTask { virtual std::string GetKeyForTvmMagic() const; virtual std::string GetKeyForTvmMetaData() const; virtual std::string GetKeyForKernelName(const OpDesc &op_desc) const; - virtual Status CalcTilingInfo(const NodePtr &node, optiling::utils::OpRunInfo &tiling_info); + virtual Status CalcTilingInfo(const NodePtr &node, optiling::OpRunInfo &tiling_info); std::unique_ptr tiling_buffer_ = nullptr; std::string tiling_data_; @@ -130,7 +130,7 @@ class AtomicAddrCleanOpTask : public AiCoreOpTask { std::string GetKeyForTvmMagic() const override; std::string GetKeyForTvmMetaData() const override; std::string GetKeyForKernelName(const OpDesc &op_desc) const override; - Status CalcTilingInfo(const NodePtr &node, optiling::utils::OpRunInfo &tiling_info) override; + Status CalcTilingInfo(const NodePtr &node, optiling::OpRunInfo &tiling_info) override; private: Status InitAtomicAddrCleanIndices(const OpDesc &op_desc); diff --git a/ge/single_op/task/op_task.cc b/ge/single_op/task/op_task.cc index 66d70e7e..e48677f8 100755 --- a/ge/single_op/task/op_task.cc +++ b/ge/single_op/task/op_task.cc @@ -224,17 +224,18 @@ Status TbeOpTask::LaunchKernel(rtStream_t stream) { Status TbeOpTask::UpdateRunInfo() { // invoke OpParaCalculate GELOGD("Start to invoke OpParaCalculate."); - optiling::utils::OpRunInfo run_info(0, true, 0); - auto ret = optiling::OpParaCalculateV2(*node_, run_info); + optiling::OpRunInfo run_info; + run_info.block_dim = 0; + auto ret = optiling::OpParaCalculate(*node_, run_info); if (ret != GRAPH_SUCCESS) { GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "[Invoke][OpParaCalculate] failed, ret = %u.", ret); REPORT_INNER_ERROR("E19999", "invoke OpParaCalculate failed, ret = %u.", ret); return ACL_ERROR_GE_INTERNAL_ERROR; } - block_dim_ = run_info.GetBlockDim(); - tiling_data_ = run_info.GetAllTilingData().str(); - tiling_key_ = run_info.GetTilingKey(); - run_info.GetAllWorkspaces(run_info_workspaces_); + block_dim_ = run_info.block_dim; + tiling_data_ = run_info.tiling_data.str(); + tiling_key_ = run_info.tiling_key; + run_info_workspaces_ = run_info.workspaces; GELOGD("Done invoking OpParaCalculate successfully. block_dim = %u, tiling size = %zu, tiling_key = %u", block_dim_, tiling_data_.size(), tiling_key_); return SUCCESS; From e2741096ad309785b713f9613a1a2266b4d2faf6 Mon Sep 17 00:00:00 2001 From: zhou_chao1993 Date: Fri, 18 Jun 2021 16:39:55 +0800 Subject: [PATCH 051/226] delete repeat code --- ge/hybrid/executor/hybrid_model_async_executor.cc | 2 -- 1 file changed, 2 deletions(-) diff --git a/ge/hybrid/executor/hybrid_model_async_executor.cc b/ge/hybrid/executor/hybrid_model_async_executor.cc index 930412e3..63e48c92 100644 --- a/ge/hybrid/executor/hybrid_model_async_executor.cc +++ b/ge/hybrid/executor/hybrid_model_async_executor.cc @@ -78,8 +78,6 @@ Status HybridModelAsyncExecutor::Start(const std::shared_ptr &lis GetThreadLocalContext() = *executor_->GetContext()->ge_context; GetContext().SetSessionId(executor_->GetContext()->session_id); GetContext().SetContextId(executor_->GetContext()->context_id); - GE_CHECK_NOTNULL(executor_->GetContext()->ge_context); - GetThreadLocalContext() = *executor_->GetContext()->ge_context; return RunInternal(); }); From c2a1076a8734a5dbb80f5336ee9bcd8b21bec817 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=82=A8=E6=98=9F?= Date: Sat, 19 Jun 2021 15:15:51 +0800 Subject: [PATCH 052/226] =?UTF-8?q?=E5=9B=9E=E9=80=80=20'Pull=20Request=20?= =?UTF-8?q?!1784=20:=20Create=20NodeExecute=20on-demand'?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ge/hybrid/node_executor/node_executor.cc | 75 +++++++------ ge/hybrid/node_executor/node_executor.h | 7 +- tests/ut/ge/CMakeLists.txt | 2 - .../node_executor/node_executor_unittest.cc | 103 ------------------ 4 files changed, 44 insertions(+), 143 deletions(-) delete mode 100644 tests/ut/ge/hybrid/node_executor/node_executor_unittest.cc diff --git a/ge/hybrid/node_executor/node_executor.cc b/ge/hybrid/node_executor/node_executor.cc index 04225557..5f3d6e45 100755 --- a/ge/hybrid/node_executor/node_executor.cc +++ b/ge/hybrid/node_executor/node_executor.cc @@ -58,8 +58,8 @@ Status NodeExecutor::CompileTask(const HybridModel &model, const NodePtr &node, } Status NodeExecutorManager::EnsureInitialized() { + GE_CHK_STATUS_RET(InitializeExecutors()); std::lock_guard lk(mu_); - ++ref_count_; if (initialized_) { return SUCCESS; } @@ -115,14 +115,17 @@ NodeExecutorManager::ExecutorType NodeExecutorManager::ResolveExecutorType(Node return it->second; } -Status NodeExecutorManager::GetExecutor(Node &node, const NodeExecutor **executor) { +Status NodeExecutorManager::GetExecutor(Node &node, const NodeExecutor **executor) const { auto executor_type = ResolveExecutorType(node); - GELOGD("[%s] Set node executor by type: %d.", node.GetName().c_str(), static_cast(executor_type)); const auto it = executors_.find(executor_type); if (it == executors_.end()) { - return GetOrCreateExecutor(executor_type, executor); + REPORT_INNER_ERROR("E19999", "Failed to get executor by type: %d.", static_cast(executor_type)); + GELOGE(INTERNAL_ERROR, "[Check][ExecutorType]Failed to get executor by type: %d.", + static_cast(executor_type)); + return INTERNAL_ERROR; } + GELOGD("[%s] Set node executor by type: %d.", node.GetName().c_str(), static_cast(executor_type)); *executor = it->second.get(); return SUCCESS; } @@ -175,50 +178,51 @@ Status NodeExecutorManager::CalcOpRunningParam(Node &node) const { return OpsKernelBuilderManager::Instance().CalcOpRunningParam(node); } -Status NodeExecutorManager::GetOrCreateExecutor(ExecutorType executor_type, const NodeExecutor **out_executor) { +Status NodeExecutorManager::InitializeExecutors() { std::lock_guard lk(mu_); - const auto executor_it = executors_.find(executor_type); - if (executor_it != executors_.end()) { - *out_executor = executor_it->second.get(); + if (executor_initialized_) { + ++ref_count_; + GELOGI("Executor is already initialized. add ref count to [%d]", ref_count_); return SUCCESS; } - GELOGI("Start to Initialize NodeExecutor, type = %d", static_cast(executor_type)); - auto it = builders_.find(executor_type); - if (it == builders_.end()) { - REPORT_CALL_ERROR("E19999", "Create NodeExecutor failed for executor type = %d", - static_cast(executor_type)); - GELOGE(INTERNAL_ERROR, "[Create][NodeExecutor] failed for executor type = %d", static_cast(executor_type)); - return INTERNAL_ERROR; - } + GELOGI("Start to Initialize NodeExecutors"); + for (auto &it : builders_) { + auto engine_type = it.first; + auto build_fn = it.second; + GE_CHECK_NOTNULL(build_fn); + auto executor = std::unique_ptr(build_fn()); + if (executor == nullptr) { + REPORT_CALL_ERROR("E19999", "Create NodeExecutor failed for engine type = %d", + static_cast(engine_type)); + GELOGE(INTERNAL_ERROR, "[Create][NodeExecutor] failed for engine type = %d", static_cast(engine_type)); + return INTERNAL_ERROR; + } - auto build_fn = it->second; - GE_CHECK_NOTNULL(build_fn); - auto executor = std::unique_ptr(build_fn()); - if (executor == nullptr) { - REPORT_CALL_ERROR("E19999", "Create NodeExecutor failed for executor type = %d", - static_cast(executor_type)); - GELOGE(INTERNAL_ERROR, "[Create][NodeExecutor] failed for engine type = %d", static_cast(executor_type)); - return INTERNAL_ERROR; - } + GELOGD("Executor of engine type = %d was created successfully", static_cast(engine_type)); + auto ret = executor->Initialize(); + if (ret != SUCCESS) { + REPORT_CALL_ERROR("E19999", "Initialize NodeExecutor failed for type = %d", static_cast(engine_type)); + GELOGE(ret, "[Initialize][NodeExecutor] failed for type = %d", static_cast(engine_type)); + for (auto &executor_it : executors_) { + executor_it.second->Finalize(); + } + executors_.clear(); + return ret; + } - GELOGD("Executor of engine type = %d was created successfully", static_cast(executor_type)); - auto ret = executor->Initialize(); - if (ret != SUCCESS) { - REPORT_CALL_ERROR("E19999", "Initialize NodeExecutor failed for type = %d", static_cast(executor_type)); - GELOGE(ret, "[Initialize][NodeExecutor] failed for type = %d", static_cast(executor_type)); - return ret; + executors_.emplace(engine_type, std::move(executor)); } - *out_executor = executor.get(); - executors_.emplace(executor_type, std::move(executor)); - GELOGI("Initializing NodeExecutor successfully, type = %d", static_cast(executor_type)); + ++ref_count_; + executor_initialized_ = true; + GELOGI("Initializing NodeExecutors successfully."); return SUCCESS; } void NodeExecutorManager::FinalizeExecutors() { std::lock_guard lk(mu_); - if (ref_count_ <= 0) { + if (!executor_initialized_) { GELOGD("No need for finalizing for not initialized."); return; } @@ -233,6 +237,7 @@ void NodeExecutorManager::FinalizeExecutors() { it.second->Finalize(); } executors_.clear(); + executor_initialized_ = false; GELOGD("Done invoking Finalize successfully."); } diff --git a/ge/hybrid/node_executor/node_executor.h b/ge/hybrid/node_executor/node_executor.h index 97c9cee9..fffd4e7d 100644 --- a/ge/hybrid/node_executor/node_executor.h +++ b/ge/hybrid/node_executor/node_executor.h @@ -179,6 +179,8 @@ class NodeExecutorManager { */ Status EnsureInitialized(); + Status InitializeExecutors(); + void FinalizeExecutors(); /** @@ -194,7 +196,7 @@ class NodeExecutorManager { * @param executor executor * @return SUCCESS on success, error code otherwise */ - Status GetExecutor(Node &node, const NodeExecutor **executor); + Status GetExecutor(Node &node, const NodeExecutor **executor) const; /** * Resolve executor type by node @@ -204,13 +206,12 @@ class NodeExecutorManager { ExecutorType ResolveExecutorType(Node &node) const; private: - Status GetOrCreateExecutor(ExecutorType executor_type, const NodeExecutor **executor); - std::map> executors_; std::map> builders_; std::map engine_mapping_; std::mutex mu_; bool initialized_ = false; + bool executor_initialized_ = false; int ref_count_ = 0; }; diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index 631e18f8..8b024820 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -839,7 +839,6 @@ set(HYBRID_TEST_FILES "hybrid/executor/subgraph_executor_unittest.cc" "hybrid/executor/worker/execution_engine_unittest.cc" "hybrid/model/hybrid_model_builder_unittest.cc" - "hybrid/node_executor/node_executor_unittest.cc" "hybrid/node_executor/rts/rts_node_task_unittest.cc" "hybrid/node_executor/host_cpu/host_cpu_node_task_unittest.cc" "hybrid/node_executor/ge_local/ge_local_node_executor_unittest.cc" @@ -847,7 +846,6 @@ set(HYBRID_TEST_FILES "hybrid/executor/hybrid_model_async_executor_unittest.cc" "hybrid/executor/hybrid_model_pipeline_executor_unittest.cc" "hybrid/node_executor/aicore/aicore_task_compiler_unittest.cc" - ) set(OTHERS_TEST_FILES diff --git a/tests/ut/ge/hybrid/node_executor/node_executor_unittest.cc b/tests/ut/ge/hybrid/node_executor/node_executor_unittest.cc deleted file mode 100644 index 8a1240d3..00000000 --- a/tests/ut/ge/hybrid/node_executor/node_executor_unittest.cc +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include - -#define private public -#define protected public -#include "hybrid/node_executor/node_executor.h" -#undef protected -#undef private - -using namespace std; -using namespace testing; - -namespace ge { -using namespace hybrid; - -namespace { - bool finalized = false; -} - -class NodeExecutorTest : public testing::Test { - protected: - void SetUp() {} - void TearDown() { } -}; - -class FailureNodeExecutor : public NodeExecutor { - public: - Status Initialize() override { - return INTERNAL_ERROR; - } -}; - -class SuccessNodeExecutor : public NodeExecutor { - public: - Status Initialize() override { - initialized = true; - finalized = false; - return SUCCESS; - } - - Status Finalize() override { - finalized = true; - } - - bool initialized = false; -}; - -REGISTER_NODE_EXECUTOR_BUILDER(NodeExecutorManager::ExecutorType::AICORE, FailureNodeExecutor); -REGISTER_NODE_EXECUTOR_BUILDER(NodeExecutorManager::ExecutorType::AICPU_TF, SuccessNodeExecutor); - -TEST_F(NodeExecutorTest, TestGetOrCreateExecutor) { - auto &manager = NodeExecutorManager::GetInstance(); - const NodeExecutor *executor = nullptr; - Status ret = SUCCESS; - // no builder - ret = manager.GetOrCreateExecutor(NodeExecutorManager::ExecutorType::RESERVED, &executor); - ASSERT_EQ(ret, INTERNAL_ERROR); - // initialize failure - ret = manager.GetOrCreateExecutor(NodeExecutorManager::ExecutorType::AICORE, &executor); - ASSERT_EQ(ret, INTERNAL_ERROR); - ret = manager.GetOrCreateExecutor(NodeExecutorManager::ExecutorType::AICPU_TF, &executor); - ASSERT_EQ(ret, SUCCESS); - ASSERT_TRUE(executor != nullptr); - ret = manager.GetOrCreateExecutor(NodeExecutorManager::ExecutorType::AICPU_TF, &executor); - ASSERT_EQ(ret, SUCCESS); - ASSERT_TRUE(executor != nullptr); - ASSERT_TRUE(((SuccessNodeExecutor*)executor)->initialized); -} - -TEST_F(NodeExecutorTest, TestInitAndFinalize) { - auto &manager = NodeExecutorManager::GetInstance(); - manager.FinalizeExecutors(); - manager.EnsureInitialized(); - manager.EnsureInitialized(); - const NodeExecutor *executor = nullptr; - auto ret = manager.GetOrCreateExecutor(NodeExecutorManager::ExecutorType::AICPU_TF, &executor); - ASSERT_EQ(ret, SUCCESS); - ASSERT_TRUE(executor != nullptr); - ASSERT_TRUE(((SuccessNodeExecutor*)executor)->initialized); - manager.FinalizeExecutors(); - ASSERT_FALSE(manager.executors_.empty()); - manager.FinalizeExecutors(); - ASSERT_TRUE(manager.executors_.empty()); - ASSERT_TRUE(finalized); -} -} // namespace ge From 0b04317d23e4405c93a1eb2f5e80925fae758523 Mon Sep 17 00:00:00 2001 From: wangxiaotian22 Date: Tue, 15 Jun 2021 21:08:33 +0800 Subject: [PATCH 053/226] fix cmetric --- ge/common/util.cc | 82 ++++++------------- ge/graph/load/model_manager/davinci_model.cc | 4 +- ge/graph/preprocess/insert_op/ge_aipp_op.cc | 2 +- .../node_executor/hccl/hccl_node_executor.cc | 3 +- ge/ir_build/option_utils.cc | 2 +- ge/offline/main.cc | 3 +- tests/depends/mmpa/src/mmpa_stub.cc | 7 ++ tests/ut/ge/CMakeLists.txt | 1 + tests/ut/ge/common/util_unittest.cc | 63 ++++++++++++++ .../ge/graph/load/davinci_model_unittest.cc | 3 + tests/ut/ge/graph_ir/ge_ir_build_unittest.cc | 9 +- 11 files changed, 115 insertions(+), 64 deletions(-) create mode 100644 tests/ut/ge/common/util_unittest.cc diff --git a/ge/common/util.cc b/ge/common/util.cc index 448efc0f..dfb5bac4 100644 --- a/ge/common/util.cc +++ b/ge/common/util.cc @@ -340,15 +340,24 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::string RealPath(const char return res; } +void PathValidErrReport(const std::string &file_path, const std::string &atc_param, const std::string &reason) { + if (!atc_param.empty()) { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({atc_param, file_path, reason})); + } else { + REPORT_INNER_ERROR("E19999", "Path[%s] invalid, reason:%s", file_path.c_str(), reason.c_str()); + } +} + FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool CheckInputPathValid(const std::string &file_path, const std::string &atc_param) { // The specified path is empty std::map args_map; if (file_path.empty()) { - if (atc_param != "") { - ErrorManager::GetInstance().ATCReportErrMessage("E10004", {"parameter"}, {atc_param}); + if (!atc_param.empty()) { + REPORT_INPUT_ERROR("E10004", std::vector({"parameter"}), std::vector({atc_param})); } else { - REPORT_INNER_ERROR("E19999", "Param file_path is empty, check invalid"); + REPORT_INNER_ERROR("E19999", "Param file_path is empty, check invalid."); } GELOGW("Input parameter %s is empty.", file_path.c_str()); return false; @@ -356,13 +365,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool CheckInputPathValid(const std::string real_path = RealPath(file_path.c_str()); // Unable to get absolute path (does not exist or does not have permission to access) if (real_path.empty()) { - if (atc_param != "") { - std::string reason = "realpath error, errmsg:" + std::string(strerror(errno)); - ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"}, - {atc_param, file_path, reason}); - } else { - REPORT_INNER_ERROR("E19999", "Path[%s]'s realpath is empty, errmsg[%s]", file_path.c_str(), strerror(errno)); - } + std::string reason = "realpath error, errmsg:" + std::string(strerror(errno)); + PathValidErrReport(file_path, atc_param, reason); GELOGW("Path[%s]'s realpath is empty, errmsg[%s]", file_path.c_str(), strerror(errno)); return false; } @@ -378,23 +382,12 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool CheckInputPathValid(const GE_CHK_BOOL_TRUE_EXEC_WITH_LOG( !ValidateStr(real_path, mode), - if (atc_param != "") { - ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"}, - {atc_param, real_path, kPathValidReason}); - } else { - REPORT_INNER_ERROR("E19999", "Path[%s] has invalid char, %s", file_path.c_str(), kPathValidReason); - } + PathValidErrReport(file_path, atc_param, kPathValidReason); return false, "Invalid value for %s[%s], %s.", atc_param.c_str(), real_path.c_str(), kPathValidReason); // The absolute path points to a file that is not readable if (mmAccess2(real_path.c_str(), M_R_OK) != EN_OK) { - if (atc_param != "") { - std::string reason = "cat not access, errmsg:" + std::string(strerror(errno)); - ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"}, - {atc_param, file_path, reason}); - } else { - REPORT_INNER_ERROR("E19999", "Path[%s] can't acccess, errmsg:%s", file_path.c_str(), strerror(errno)); - } + PathValidErrReport(file_path, atc_param, "cat not access, errmsg:" + std::string(strerror(errno))); GELOGW("Read file[%s] failed, errmsg[%s]", file_path.c_str(), strerror(errno)); return false; } @@ -406,10 +399,10 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool CheckOutputPathValid(const const std::string &atc_param) { // The specified path is empty if (file_path.empty()) { - if (atc_param != "") { - ErrorManager::GetInstance().ATCReportErrMessage("E10004", {"parameter"}, {atc_param}); + if (!atc_param.empty()) { + REPORT_INPUT_ERROR("E10004", std::vector({"parameter"}), std::vector({atc_param})); } else { - REPORT_INNER_ERROR("E19999", "Param file_path is empty, check invalid"); + REPORT_INNER_ERROR("E19999", "Param file_path is empty, check invalid."); } ErrorManager::GetInstance().ATCReportErrMessage("E10004", {"parameter"}, {atc_param}); GELOGW("Input parameter's value is empty."); @@ -417,17 +410,10 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool CheckOutputPathValid(const } GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(strlen(file_path.c_str()) >= MMPA_MAX_PATH, - if (atc_param != "") { - std::string reason = "len is too long, it must be less than " + - std::to_string(MMPA_MAX_PATH); - ErrorManager::GetInstance().ATCReportErrMessage( - "E10001", {"parameter", "value", "reason"}, - {atc_param, file_path, reason}); - } else { - REPORT_INNER_ERROR("E19999", "Path[%s] len is too long, it must be less than %d", - file_path.c_str(), MMPA_MAX_PATH); - } - return "", "Path[%s] len is too long, it must be less than %d", file_path.c_str(), + std::string reason = "len is too long, it must be less than " + + std::to_string(MMPA_MAX_PATH); + PathValidErrReport(file_path, atc_param, reason); + return false, "Path[%s] len is too long, it must be less than %d", file_path.c_str(), MMPA_MAX_PATH); // A regular matching expression to verify the validity of the input file path @@ -441,12 +427,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool CheckOutputPathValid(const GE_CHK_BOOL_TRUE_EXEC_WITH_LOG( !ValidateStr(file_path, mode), - if (atc_param != "") { - ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"}, - {atc_param, file_path, kPathValidReason}); - } else { - REPORT_INNER_ERROR("E19999", "Path[%s] has invalid char, %s", file_path.c_str(), kPathValidReason); - } + PathValidErrReport(file_path, atc_param, kPathValidReason); return false, "Invalid value for %s[%s], %s.", atc_param.c_str(), file_path.c_str(), kPathValidReason); std::string real_path = RealPath(file_path.c_str()); @@ -454,13 +435,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool CheckOutputPathValid(const if (!real_path.empty()) { // File is not readable or writable if (mmAccess2(real_path.c_str(), M_W_OK | M_F_OK) != EN_OK) { - if (atc_param != "") { - std::string reason = "cat not access, errmsg:" + std::string(strerror(errno)); - ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"}, - {atc_param, file_path, reason}); - } else { - REPORT_INNER_ERROR("E19999", "Path[%s] can't acccess, errmsg:%s", file_path.c_str(), strerror(errno)); - } + PathValidErrReport(file_path, atc_param, "cat not access, errmsg:" + std::string(strerror(errno))); GELOGW("Write file[%s] failed, errmsg[%s]", real_path.c_str(), strerror(errno)); return false; } @@ -479,12 +454,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool CheckOutputPathValid(const std::string prefix_path = std::string(file_path).substr(0, static_cast(path_split_pos)); // Determine whether the specified path is valid by creating the path if (CreateDirectory(prefix_path) != 0) { - if (atc_param != "") { - ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"}, - {atc_param, file_path, "Can not create directory"}); - } else { - REPORT_INNER_ERROR("E19999", "Path[%s] Can not create directory", file_path.c_str()); - } + PathValidErrReport(file_path, atc_param, "Can not create directory"); GELOGW("Can not create directory[%s].", file_path.c_str()); return false; } diff --git a/ge/graph/load/model_manager/davinci_model.cc b/ge/graph/load/model_manager/davinci_model.cc index 5b67c205..929ae158 100755 --- a/ge/graph/load/model_manager/davinci_model.cc +++ b/ge/graph/load/model_manager/davinci_model.cc @@ -3463,11 +3463,11 @@ bool DavinciModel::CheckUserAndModelSize(const int64_t &size, const int64_t &op_ } // The input and model input size can not be exactly equal because user input is not definite. if ((size + kDataMemAlignSizeCompare) < op_size) { - REPORT_INNER_ERROR("E19999", "%s size:%ld from user add align:%u < input_op_size:%ld in model, model_id:%u, " + REPORT_INNER_ERROR("E19999", "%s size:%ld from user add align:%u < op_size:%ld in model, model_id:%u, " "check invalid", input_or_output.c_str(), size, kDataMemAlignSizeCompare, op_size, model_id_); GELOGE(ACL_ERROR_GE_PARAM_INVALID, - "[Check][Param] %s size:%ld from user add align:%u < input_op_size:%ld in model, model_id:%u", + "[Check][Param] %s size:%ld from user add align:%u < op_size:%ld in model, model_id:%u", input_or_output.c_str(), size, kDataMemAlignSizeCompare, op_size, model_id_); return false; } diff --git a/ge/graph/preprocess/insert_op/ge_aipp_op.cc b/ge/graph/preprocess/insert_op/ge_aipp_op.cc index 5c191af7..2ea41b01 100755 --- a/ge/graph/preprocess/insert_op/ge_aipp_op.cc +++ b/ge/graph/preprocess/insert_op/ge_aipp_op.cc @@ -114,7 +114,7 @@ Status GetDataDimN(const ge::NodePtr &data_node, ge::Format format, int64_t &bat std::vector({ data_node->GetName() + " format", TypeUtils::FormatToSerialString(format), - "only format " + TypeUtils::FormatToSerialString(FORMAT_NCHW) + " and "+ + "only format " + TypeUtils::FormatToSerialString(FORMAT_NCHW) + " and " + TypeUtils::FormatToSerialString(FORMAT_NHWC) + " supported which dynamic aipp is linked"})); GELOGE(PARAM_INVALID, "[Check][Param] Not support data format:%s, node:%s", diff --git a/ge/hybrid/node_executor/hccl/hccl_node_executor.cc b/ge/hybrid/node_executor/hccl/hccl_node_executor.cc index 72092cd8..d942695e 100644 --- a/ge/hybrid/node_executor/hccl/hccl_node_executor.cc +++ b/ge/hybrid/node_executor/hccl/hccl_node_executor.cc @@ -24,6 +24,7 @@ #include "graph/types.h" #include "hybrid/executor/hybrid_execution_context.h" #include "hccl/hcom.h" +#include "runtime/event.h" namespace ge { namespace { @@ -325,7 +326,7 @@ Status RdmaNodeTask::ExecuteAsync(TaskContext &context, std::function do rtEvent_t evt = nullptr; if (context.GetExecutionContext()->hccl_stream != nullptr) { - GE_CHK_RT_RET(rtEventCreateWithFlag(&evt, 0x01)); + GE_CHK_RT_RET(rtEventCreateWithFlag(&evt, RT_EVENT_WITH_FLAG)); GE_CHK_RT_RET(rtStreamWaitEvent(context.GetExecutionContext()->hccl_stream, evt)); } TaskContext *p_ctx = &context; diff --git a/ge/ir_build/option_utils.cc b/ge/ir_build/option_utils.cc index cecc2588..e2b08495 100755 --- a/ge/ir_build/option_utils.cc +++ b/ge/ir_build/option_utils.cc @@ -204,7 +204,7 @@ bool CheckDynamicImagesizeInputShapeValid(map> shape_map if (!input_format.empty() && !ge::TypeUtils::IsFormatValid(input_format.c_str())) { GELOGE(ge::PARAM_INVALID, "[Check][DynamicImagesizeInputShape] input_format [%s] invalid, can not support now.", input_format.c_str()); - REPORT_INPUT_ERROR("E10003", std::vector({"parameter","value","reason"}), + REPORT_INPUT_ERROR("E10003", std::vector({"parameter", "value", "reason"}), std::vector({"input_format", input_format, "this format is not support"})); return false; } diff --git a/ge/offline/main.cc b/ge/offline/main.cc index a1ae476b..14db1ded 100755 --- a/ge/offline/main.cc +++ b/ge/offline/main.cc @@ -953,8 +953,7 @@ domi::Status GenerateModel(std::map &options, std::string output ge::Model load_model = ge::Model("loadmodel", "version2"); auto ret1 = load_model.LoadFromFile(FLAGS_model); if (ret1 != ge::GRAPH_SUCCESS) { - REPORT_INPUT_ERROR("E10041", std::vector({"file"}), std::vector({FLAGS_model})); - REPORT_CALL_ERROR("E19999", "load from model file:%s failed", FLAGS_model.c_str()); + REPORT_INPUT_ERROR("E10041", std::vector({"parameter"}), std::vector({FLAGS_model})); DOMI_LOGE("Load model from %s failed, please check model file or " "input parameter[--framework] is correct", FLAGS_model.c_str()); (void)ge_generator.Finalize(); diff --git a/tests/depends/mmpa/src/mmpa_stub.cc b/tests/depends/mmpa/src/mmpa_stub.cc index a82621ef..aae8de9f 100644 --- a/tests/depends/mmpa/src/mmpa_stub.cc +++ b/tests/depends/mmpa/src/mmpa_stub.cc @@ -220,6 +220,13 @@ VOID mmScandirFree(mmDirent **entryList, INT32 count) INT32 mmAccess2(const CHAR *pathName, INT32 mode) { + if (pathName == NULL) { + return EN_INVALID_PARAM; + } + INT32 ret = access(pathName, mode); + if (ret != EN_OK) { + return EN_ERROR; + } return 0; } diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index 63579109..b820e465 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -761,6 +761,7 @@ set(MULTI_PARTS_TEST_FILES "graph_ir/ge_ir_build_unittest.cc" "graph/transop_util_unittest.cc" "common/datatype_transfer_unittest.cc" + "common/util_unittest.cc" "common/dump_manager_unittest.cc" "common/dump_op_unittest.cc" "common/dump_exception_unittest.cc" diff --git a/tests/ut/ge/common/util_unittest.cc b/tests/ut/ge/common/util_unittest.cc new file mode 100644 index 00000000..6df3db96 --- /dev/null +++ b/tests/ut/ge/common/util_unittest.cc @@ -0,0 +1,63 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "common/util.h" + +namespace ge { +namespace formats { +class UtestUtilTransfer : public testing::Test { + protected: + void SetUp() {} + void TearDown() {} +}; + + +INT32 mmAccess2(const CHAR *pathName, INT32 mode) +{ + return -1; +} + +TEST_F(UtestUtilTransfer, CheckOutputPathValid) { + EXPECT_EQ(CheckOutputPathValid("", ""), false); + EXPECT_EQ(CheckOutputPathValid("", "model"), false); + + char max_file_path[14097] = {0}; + memset(max_file_path, 1, 14097); + EXPECT_EQ(CheckOutputPathValid(max_file_path, "model"), false); + + EXPECT_EQ(CheckOutputPathValid("$#%", ""), false); + + // system("touch test_util"); + // system("chmod 555 test_util"); + // EXPECT_EQ(CheckOutputPathValid("./test_util", ""), false); + // system("rm -r test_util"); +} + +TEST_F(UtestUtilTransfer, CheckInputPathValid) { + EXPECT_EQ(CheckInputPathValid("", ""), false); + EXPECT_EQ(CheckInputPathValid("", "model"), false); + + EXPECT_EQ(CheckInputPathValid("$#%", ""), false); + + EXPECT_EQ(CheckInputPathValid("./test_util", ""), false); + +} + +} +} + diff --git a/tests/ut/ge/graph/load/davinci_model_unittest.cc b/tests/ut/ge/graph/load/davinci_model_unittest.cc index 3f9cc850..378f2f07 100644 --- a/tests/ut/ge/graph/load/davinci_model_unittest.cc +++ b/tests/ut/ge/graph/load/davinci_model_unittest.cc @@ -1035,6 +1035,9 @@ TEST_F(UtestDavinciModel, NnExecute) { ProfilingManager::Instance().device_id_.emplace_back(0); model.task_list_.resize(1); EXPECT_EQ(model.NnExecute(stream, false, input_data, output_data), SUCCESS); + + input_data.blobs[0].length = 128; + EXPECT_NE(model.NnExecute(stream, false, input_data, output_data), SUCCESS); } TEST_F(UtestDavinciModel, update_io_addr_success) { DavinciModel model(0, nullptr); diff --git a/tests/ut/ge/graph_ir/ge_ir_build_unittest.cc b/tests/ut/ge/graph_ir/ge_ir_build_unittest.cc index e14178d8..047c9e1d 100644 --- a/tests/ut/ge/graph_ir/ge_ir_build_unittest.cc +++ b/tests/ut/ge/graph_ir/ge_ir_build_unittest.cc @@ -368,7 +368,14 @@ TEST(UtestIrBuild, check_modify_mixlist_param) { {"ge.exec.modify_mixlist", "/modify.json"} }; ModelBufferData model; - + auto ret = aclgrphBuildModel(graph, build_options, model); EXPECT_EQ(ret, GRAPH_PARAM_INVALID); +} + +TEST(UtestIrCommon, check_dynamic_imagesize_input_shape_valid_format_empty) { + std::map> shape_map; + std::string dynamic_image_size = ""; + bool ret = CheckDynamicImagesizeInputShapeValid(shape_map, "123", dynamic_image_size); + EXPECT_EQ(ret, false); } \ No newline at end of file From 1942c5e74e506f6da8e119186cc4ef7921b3761f Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Mon, 21 Jun 2021 11:37:17 +0800 Subject: [PATCH 054/226] Fix bug of single_op kernel bin register. --- ge/hybrid/node_executor/aicore/aicore_op_task.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ge/hybrid/node_executor/aicore/aicore_op_task.cc b/ge/hybrid/node_executor/aicore/aicore_op_task.cc index 76082cb3..95877181 100644 --- a/ge/hybrid/node_executor/aicore/aicore_op_task.cc +++ b/ge/hybrid/node_executor/aicore/aicore_op_task.cc @@ -81,7 +81,7 @@ Status AiCoreOpTask::Init(const OpDesc &op_desc, const domi::TaskDef &task_def) Status AiCoreOpTask::RegisterTbeHandle(const OpDesc &op_desc) { rtError_t rt_ret = rtQueryFunctionRegistered(stub_name_.c_str()); - if (rt_ret != RT_ERROR_NONE || is_single_op_) { + if (rt_ret != RT_ERROR_NONE) { auto op_desc_ptr = MakeShared(op_desc); GE_CHECK_NOTNULL(op_desc_ptr); auto tbe_kernel = op_desc_ptr->TryGetExtAttr(GetKeyForTbeKernel(), TBEKernelPtr()); @@ -194,7 +194,7 @@ Status AiCoreOpTask::RegisterKernelHandle(const OpDesc &op_desc) { Status AiCoreOpTask::InitWithKernelDef(const OpDesc &op_desc, const domi::TaskDef &task_def) { const domi::KernelDef &kernel_def = task_def.kernel(); const domi::KernelContext &context = kernel_def.context(); - stub_name_ = kernel_def.stub_func(); + stub_name_ = is_single_op_ ? to_string(log_id_) + kernel_def.stub_func() : kernel_def.stub_func(); GE_CHK_STATUS_RET(RegisterTbeHandle(op_desc)); GE_CHK_RT_RET(rtGetFunctionByName(stub_name_.c_str(), &stub_func_)); args_size_ = kernel_def.args_size(); From 7337be63533554bb571a2d56a73b5b6d37c78eba Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Mon, 21 Jun 2021 14:56:16 +0800 Subject: [PATCH 055/226] Refersh arg addr in aicpu node executor. --- ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc | 4 ---- 1 file changed, 4 deletions(-) diff --git a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc index c2ebf654..c83a76d1 100755 --- a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc +++ b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc @@ -64,10 +64,6 @@ Status AicpuNodeTaskBase::InitExtInfo(const std::string &kernel_ext_info, int64_ GE_CHK_STATUS_RET(aicpu_ext_handle_.UpdateSessionInfoSessionId(session_id), "[Update][SessionInfoSessionId] failed, session_id:%ld.", session_id); - bool execute_mode = !aicpu_ext_handle_.IsNeedRefreshIOAddr() && !node_item_->is_dynamic; - GE_CHK_STATUS_RET(aicpu_ext_handle_.UpdateExecuteMode(execute_mode), - "[Update][ExecuteMode] failed, node:%s.", node_name_.c_str()); - // copy task args buf GE_CHK_STATUS_RET(AllocTensorBuffer(aicpu_ext_handle_.GetExtInfoLen(), ext_info_addr_dev_), "[Invoke][AllocTensorBuffer]Node[%s] alloc kernel_ext_info buf failed, size=%zu", From b01ce212e745795f6b8ba52df549bc905c5c6376 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=99=88=E5=8D=8E?= Date: Sat, 19 Jun 2021 11:08:02 +0800 Subject: [PATCH 056/226] opt info --- CMakeLists.txt | 1 + ge/common/CMakeLists.txt | 8 ++ ge/common/ge_opt_info.cc | 58 +++++++++ ge/common/ge_opt_info.h | 31 +++++ ge/graph/manager/graph_manager.cc | 9 +- tests/CMakeLists.txt | 1 + tests/depends/opt_info/CMakeLists.txt | 37 ++++++ tests/depends/opt_info/src/opt_info_stub.cc | 46 +++++++ tests/framework/cmake/graphengine.cmake | 2 + tests/st/testcase/test_ge_opt_info.cc | 123 ++++++++++++++++++ tests/ut/ge/CMakeLists.txt | 4 + tests/ut/ge/common/ge_opt_info_unittest.cc | 82 ++++++++++++ third_party/fwkacllib/inc/opt_info/opt_info.h | 34 +++++ 13 files changed, 435 insertions(+), 1 deletion(-) create mode 100644 ge/common/ge_opt_info.cc create mode 100644 ge/common/ge_opt_info.h create mode 100644 tests/depends/opt_info/CMakeLists.txt create mode 100644 tests/depends/opt_info/src/opt_info_stub.cc create mode 100644 tests/st/testcase/test_ge_opt_info.cc create mode 100644 tests/ut/ge/common/ge_opt_info_unittest.cc create mode 100644 third_party/fwkacllib/inc/opt_info/opt_info.h diff --git a/CMakeLists.txt b/CMakeLists.txt index bed5b995..77a759ba 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -95,6 +95,7 @@ else () #find_module(ascendcl_static libascendcl.a ${GE_LIB_PATH}) else() find_module(slog libalog.so ${ASCEND_ATC_DIR}) + find_module(opt_feature libopt_feature.so ${ASCEND_ATC_DIR}) find_module(static_mmpa libmmpa.a ${ASCEND_ATC_DIR}) if(PLATFORM STREQUAL "train") find_module(adump_server libadump_server.a ${ASCEND_RUNTIME_DIR}) diff --git a/ge/common/CMakeLists.txt b/ge/common/CMakeLists.txt index f55ff427..91f3b27d 100755 --- a/ge/common/CMakeLists.txt +++ b/ge/common/CMakeLists.txt @@ -43,6 +43,7 @@ set(SRC_LIST "op/ge_op_utils.cc" "thread_pool.cc" "ge/tbe_plugin_manager.cc" + "ge_opt_info.cc" ) if (NOT ENABLE_D AND NOT ENABLE_ACL) @@ -86,9 +87,11 @@ target_include_directories(ge_common PRIVATE #### yellow zone #### $<$>:${GE_DEPEND_DIR}/inc> $<$>:${GE_DEPEND_DIR}/inc/cce> + $<$>:${GE_DEPEND_DIR}../abl/licctrl> #### blue zone #### $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc> $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain> + $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info> ) target_link_options(ge_common PRIVATE @@ -108,6 +111,7 @@ target_link_libraries(ge_common PRIVATE c_sec error_manager slog + opt_feature -Wl,--as-needed json $<$>:-lrt> @@ -155,9 +159,11 @@ target_include_directories(ge_common_static PRIVATE #### yellow zone #### $<$>:${GE_DEPEND_DIR}/inc> $<$>:${GE_DEPEND_DIR}/inc/cce> + $<$>:${GE_DEPEND_DIR}/../abl/licctrl> #### blue zone #### $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc> $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain> + $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info> ) target_link_libraries(ge_common_static PRIVATE @@ -213,6 +219,7 @@ target_include_directories(ge_common PRIVATE ${CMAKE_BINARY_DIR}/proto/graphengine_protos ${GE_CODE_DIR}/third_party/fwkacllib/inc ${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain + ${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info ) target_link_options(ge_common PRIVATE @@ -228,6 +235,7 @@ target_link_libraries(ge_common PRIVATE c_sec error_manager slog + opt_feature static_mmpa -Wl,--as-needed json diff --git a/ge/common/ge_opt_info.cc b/ge/common/ge_opt_info.cc new file mode 100644 index 00000000..c6bac480 --- /dev/null +++ b/ge/common/ge_opt_info.cc @@ -0,0 +1,58 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common/ge_opt_info.h" + +#include +#include +#include "graph/ge_local_context.h" +#include "ge/ge_api_types.h" +#include "common/debug/ge_log.h" +#include "opt_info.h" + +namespace ge { +Status GeOptInfo::SetOptInfo() { + std::string soc_ver; + graphStatus ret = GetThreadLocalContext().GetOption(SOC_VERSION, soc_ver); + if (ret != GRAPH_SUCCESS) { + REPORT_CALL_ERROR("E19999", "Get soc version failed."); + GELOGE(FAILED, "[Get][SocVersion]Get soc version failed."); + return FAILED; + } + GELOGD("Soc version:%s.", soc_ver.c_str()); + std::map opt_info; + // the first arg does not work at present. + if (gelc::GetOptInfo(gelc::kOffline, soc_ver, opt_info) != gelc::SUCCESS) { + REPORT_CALL_ERROR("E19999", "Get optional information failed, is_offline:%d, soc version:%s", + gelc::kOffline, soc_ver.c_str()); + GELOGE(FAILED, "[Get][OptInfo]Get optional information failed, is_offline:%d, soc version:%s", + gelc::kOffline, soc_ver.c_str()); + return FAILED; + } + // do nothing if get empty information + if (opt_info.empty()) { + GELOGI("Optional information is empty."); + return SUCCESS; + } + std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); + for (const auto &itr : opt_info) { + graph_options.emplace(itr.first, itr.second); + GELOGI("Get optional information success, key:%s, value:%s.", itr.first.c_str(), itr.second.c_str()); + } + GetThreadLocalContext().SetGraphOption(graph_options); + return SUCCESS; +} +} // namespace ge diff --git a/ge/common/ge_opt_info.h b/ge/common/ge_opt_info.h new file mode 100644 index 00000000..4ec9a59f --- /dev/null +++ b/ge/common/ge_opt_info.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef GE_COMMON_GE_OPT_INFO_H_ +#define GE_COMMON_GE_OPT_INFO_H_ + +#include "ge/ge_api_error_codes.h" +#include "register/register_types.h" + +namespace ge { +class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY GeOptInfo { + public: + GeOptInfo() = default; + static Status SetOptInfo(); +}; +} // namespace ge + +#endif // GE_COMMON_GE_OPT_INFO_H_ diff --git a/ge/graph/manager/graph_manager.cc b/ge/graph/manager/graph_manager.cc index bf04ed58..3861e6ac 100755 --- a/ge/graph/manager/graph_manager.cc +++ b/ge/graph/manager/graph_manager.cc @@ -27,6 +27,7 @@ #include "common/math/math_util.h" #include "common/thread_pool.h" #include "common/dump/dump_manager.h" +#include "common/ge_opt_info.h" #include "analyzer/analyzer.h" #include "graph/common/ge_call_wrapper.h" #include "graph/common/local_context.h" @@ -949,7 +950,7 @@ Status GraphManager::SetRtContext(rtContext_t rt_context, rtCtxMode_t mode, uint rtError_t rt_ret = rtCtxCreate(&rt_context, mode, ge::GetContext().DeviceId()); if (rt_ret != RT_ERROR_NONE) { - REPORT_CALL_ERROR("E19999", "Call rtCtxCreate faileded, session_id:%lu, graph_id:%u, mode:%d", + REPORT_CALL_ERROR("E19999", "Call rtCtxCreate failed, session_id:%lu, graph_id:%u, mode:%d", session_id, graph_id, mode); GELOGE(FAILED, "[Call][RtCtxCreate] faileded, session_id:%lu, graph_id:%u, mode:%d", session_id, graph_id, mode); return FAILED; @@ -1001,6 +1002,12 @@ Status GraphManager::PreRun(const GraphNodePtr &graph_node, const std::vector + c_sec +) + +target_include_directories(opt_feature_stub INTERFACE ${CMAKE_CURRENT_LIST_DIR}/src) diff --git a/tests/depends/opt_info/src/opt_info_stub.cc b/tests/depends/opt_info/src/opt_info_stub.cc new file mode 100644 index 00000000..df518c4b --- /dev/null +++ b/tests/depends/opt_info/src/opt_info_stub.cc @@ -0,0 +1,46 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "opt_info.h" +#include +#include +#include +#include + +namespace gelc { +namespace { +const std::vector kSocVersions = {"Ascend910"}; +} + +void SetAllOptInfo(std::map &opt_infos) { + opt_infos.emplace("opt_module.fe", "all"); + opt_infos.emplace("opt_module.pass", "all"); + opt_infos.emplace("opt_module.op_tune", "all"); + opt_infos.emplace("opt_module.rl_tune", "all"); + opt_infos.emplace("opt_module.aoe", "all"); +} + +Status GetOptInfo(WorkMode mode, const std::string &soc_ver, + std::map &opt_infos) { + if (std::find(kSocVersions.begin(), kSocVersions.end(), soc_ver)== kSocVersions.end()) { + SetAllOptInfo(opt_infos); + return SUCCESS; + } + opt_infos.emplace("opt_module.fe", "all"); + opt_infos.emplace("opt_module.pass", "all"); + opt_infos.emplace("opt_module.op_tune", "all"); + return SUCCESS; +} +} // namespace gelc diff --git a/tests/framework/cmake/graphengine.cmake b/tests/framework/cmake/graphengine.cmake index 81aa00cc..c4380016 100644 --- a/tests/framework/cmake/graphengine.cmake +++ b/tests/framework/cmake/graphengine.cmake @@ -103,6 +103,7 @@ list(APPEND INCLUDE_DIRECTORIES "${GE_CODE_DIR}/third_party/fwkacllib/inc/cce" "${GE_CODE_DIR}/third_party/fwkacllib/inc/ops" "${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain" + "${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info" "${GE_CODE_DIR}/tests/ut/ge" "${GE_CODE_DIR}/tests/ut/common" "${CMAKE_BINARY_DIR}" @@ -117,6 +118,7 @@ list(APPEND STUB_LIBS runtime_stub profiler_stub hccl_stub + opt_feature_stub error_manager_stub ascend_protobuf json diff --git a/tests/st/testcase/test_ge_opt_info.cc b/tests/st/testcase/test_ge_opt_info.cc new file mode 100644 index 00000000..8fc47a9b --- /dev/null +++ b/tests/st/testcase/test_ge_opt_info.cc @@ -0,0 +1,123 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "easy_graph/graph/box.h" +#include "easy_graph/graph/node.h" +#include "easy_graph/builder/graph_dsl.h" +#include "easy_graph/builder/box_builder.h" +#include "easy_graph/layout/graph_layout.h" +#include "easy_graph/layout/engines/graph_easy/graph_easy_option.h" +#include "easy_graph/layout/engines/graph_easy/graph_easy_executor.h" +#include "graph/graph.h" +#include "graph/compute_graph.h" +#include "framework/common/types.h" +#include "graph/debug/ge_attr_define.h" +#include "ge_graph_dsl/graph_dsl.h" +#include "ge_graph_dsl/op_desc/op_desc_cfg_box.h" +#define protected public +#define private public +#include "common/ge_opt_info.h" +#undef private +#undef protected + +namespace ge { +class STEST_opt_info : public testing::Test { + protected: + void SetUp() {} + void TearDown() {} +}; + +TEST_F(STEST_opt_info, get_opt_info_all) { + std::map options = {{ge::SOC_VERSION, "Ascend310"}}; + GetThreadLocalContext().SetGlobalOption(options); + + /// data1 data2 + /// \ / + /// add + // build graph + DEF_GRAPH(g1) { + CHAIN(NODE("data1", DATA)->NODE("add", ADD)); + CHAIN(NODE("data2", DATA)->NODE("add")); + }); + + auto graph = ToGeGraph(g1); + + // new session & add graph + Session session(options); + auto ret = session.AddGraph(1, graph, options); + EXPECT_EQ(ret, SUCCESS); + // build input tensor + std::vector inputs; + // build_graph through session + ret = session.BuildGraph(1, inputs); + EXPECT_EQ(ret, SUCCESS); + + std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); + auto itr = graph_options.find("opt_module.fe"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.pass"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.op_tune"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.rl_tune"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.aoe"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); +} + +TEST_F(STEST_opt_info, get_opt_info_success) { + std::map options = {{ge::SOC_VERSION, "Ascend910"}}; + GetThreadLocalContext().SetGlobalOption(options); + + /// data1 data2 + /// \ / + /// add + // build graph + DEF_GRAPH(g1) { + CHAIN(NODE("data1", DATA)->NODE("add", ADD)); + CHAIN(NODE("data2", DATA)->NODE("add")); + }); + + auto graph = ToGeGraph(g1); + + // new session & add graph + Session session(options); + auto ret = session.AddGraph(1, graph, options); + EXPECT_EQ(ret, SUCCESS); + // build input tensor + std::vector inputs; + // build_graph through session + ret = session.BuildGraph(1, inputs); + EXPECT_EQ(ret, SUCCESS); + + std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); + auto itr = graph_options.find("opt_module.fe"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.pass"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.op_tune"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); +} +} // namespace ge diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index 631e18f8..ea9e2360 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -62,6 +62,7 @@ include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc) include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc/cce) include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc/ops) include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain) +include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info) include_directories(${GE_CODE_DIR}/tests/ut/ge) include_directories(${GE_CODE_DIR}/tests/ut/common) include_directories(${CMAKE_BINARY_DIR}) @@ -172,6 +173,7 @@ set(COMMON_SRC_FILES "${GE_CODE_DIR}/ge/common/dump/exception_dumper.cc" "${GE_CODE_DIR}/ge/common/dump/opdebug_register.cc" "${GE_CODE_DIR}/ge/common/dump/dump_op.cc" + "${GE_CODE_DIR}/ge/common/ge_opt_info.cc" "${GE_CODE_DIR}/ge/common/helper/om_file_helper.cc" "${GE_CODE_DIR}/ge/model/ge_root_model.cc" "${GE_CODE_DIR}/ge/common/model_parser/model_parser.cc" @@ -768,6 +770,7 @@ set(MULTI_PARTS_TEST_FILES "common/dump_op_unittest.cc" "common/dump_exception_unittest.cc" "common/opdebug_register_unittest.cc" + "common/ge_opt_info_unittest.cc" "common/format_transfer_unittest.cc" "common/format_transfer_transpose_unittest.cc" "common/format_transfer_nchw_5d_unittest.cc" @@ -863,6 +866,7 @@ list(APPEND COMMON_SHARED_LIBRARIES mmpa_stub hccl_stub error_manager_stub + opt_feature_stub ascend_protobuf json ) diff --git a/tests/ut/ge/common/ge_opt_info_unittest.cc b/tests/ut/ge/common/ge_opt_info_unittest.cc new file mode 100644 index 00000000..3ac51615 --- /dev/null +++ b/tests/ut/ge/common/ge_opt_info_unittest.cc @@ -0,0 +1,82 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#define protected public +#define private public +#include "common/ge_opt_info.h" +#include "graph/ge_local_context.h" +#include "external/ge/ge_api_types.h" +#undef private +#undef protected + +namespace ge { +class UTEST_opt_info : public testing::Test { + protected: + void SetUp() {} + void TearDown() {} +}; + +TEST_F(UTEST_opt_info, get_opt_info_success) { + std::map options = {{ge::SOC_VERSION, "Ascend910"}}; + GetThreadLocalContext().SetGlobalOption(options); + auto ret = GeOptInfo::SetOptInfo(); + EXPECT_EQ(ret, ge::SUCCESS); + std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); + auto itr = graph_options.find("opt_module.fe"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.pass"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.op_tune"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); +} + +TEST_F(UTEST_opt_info, get_opt_info_all) { + std::map global_options = {{ge::SOC_VERSION, "Ascend310"}}; + GetThreadLocalContext().SetGlobalOption(global_options); + auto ret = GeOptInfo::SetOptInfo(); + EXPECT_EQ(ret, ge::SUCCESS); + std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); + auto itr = graph_options.find("opt_module.fe"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.pass"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.op_tune"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.rl_tune"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.aoe"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); +} + +TEST_F(UTEST_opt_info, get_opt_info_failed) { + std::map options; + GetThreadLocalContext().SetGlobalOption(options); + auto ret = GeOptInfo::SetOptInfo(); + EXPECT_EQ(ret, ge::FAILED); +} + +} // namespace ge diff --git a/third_party/fwkacllib/inc/opt_info/opt_info.h b/third_party/fwkacllib/inc/opt_info/opt_info.h new file mode 100644 index 00000000..ea9bb529 --- /dev/null +++ b/third_party/fwkacllib/inc/opt_info/opt_info.h @@ -0,0 +1,34 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +namespace gelc { +using Status = uint32_t; +using WorkMode = uint32_t; +const Status SUCCESS = 0x0; +const Status FAILED = 0xFFFFFFFF; +const WorkMode kOffline = 0x0; +const WorkMode kInline = 0x01; + +extern "C" { +__attribute__((visibility ("default"))) +Status GetOptInfo(WorkMode mode, const std::string &soc_ver, + std::map &opt_info_map); +} +} // namespace gelc + From e0939a797a9c413ce37a17131ed7132f1664fb4c Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Mon, 21 Jun 2021 15:36:19 +0800 Subject: [PATCH 057/226] Adaptation rectification of op_tiling. --- .../node_executor/aicore/aicore_op_task.cc | 24 +++++++++---------- .../node_executor/aicore/aicore_op_task.h | 4 ++-- ge/single_op/task/op_task.cc | 13 +++++----- 3 files changed, 20 insertions(+), 21 deletions(-) diff --git a/ge/hybrid/node_executor/aicore/aicore_op_task.cc b/ge/hybrid/node_executor/aicore/aicore_op_task.cc index 8cd24bd1..76082cb3 100644 --- a/ge/hybrid/node_executor/aicore/aicore_op_task.cc +++ b/ge/hybrid/node_executor/aicore/aicore_op_task.cc @@ -25,7 +25,7 @@ #include "single_op/task/build_task_utils.h" #include "single_op/task/tbe_task_builder.h" -using optiling::OpRunInfo; +using optiling::utils::OpRunInfo; namespace ge { namespace hybrid { @@ -359,9 +359,7 @@ Status AiCoreOpTask::UpdateTilingInfo(TaskContext &context) { GE_CHECK_NOTNULL(op_desc); GELOGD("[%s] Start to update tiling info for task: [%s]", node->GetName().c_str(), stub_name_.c_str()); - OpRunInfo tiling_info; - tiling_info.block_dim = -1; // codex: Using uninitialized value - tiling_info.clear_atomic = true; + OpRunInfo tiling_info(-1, true, 0); auto execution_context = context.GetExecutionContext(); @@ -370,12 +368,14 @@ Status AiCoreOpTask::UpdateTilingInfo(TaskContext &context) { RECORD_EXECUTION_EVENT(execution_context, context.GetNodeName(), "[CalcTilingInfo] End"); // update op args by tiling info - block_dim_ = static_cast(tiling_info.block_dim); - op_desc->SetWorkspaceBytes(tiling_info.workspaces); - clear_atomic_ = tiling_info.clear_atomic; - - tiling_data_ = tiling_info.tiling_data.str(); - tiling_key_ = tiling_info.tiling_key; + block_dim_ = tiling_info.GetBlockDim(); + clear_atomic_ = tiling_info.GetClearAtomic(); + std::vector workspaces; + tiling_info.GetAllWorkspaces(workspaces); + op_desc->SetWorkspaceBytes(workspaces); + + tiling_data_ = tiling_info.GetAllTilingData().str(); + tiling_key_ = tiling_info.GetTilingKey(); GELOGD("Successfully getting [tiling_key] : %u", tiling_key_); if (tiling_data_.empty()) { GELOGD("[%s] Tiling data is empty.", op_desc->GetName().c_str()); @@ -412,7 +412,7 @@ Status AiCoreOpTask::UpdateTilingInfo(TaskContext &context) { Status AiCoreOpTask::CalcTilingInfo(const NodePtr &node, OpRunInfo &tiling_info) { GELOGD("[%s] Start to invoke OpParaCalculate.", node->GetName().c_str()); - GE_CHK_STATUS_RET(OpParaCalculate(*node, tiling_info), + GE_CHK_STATUS_RET(optiling::OpParaCalculateV2(*node, tiling_info), "[Invoke][OpParaCalculate]Failed calc tiling data of node %s.", node->GetName().c_str()); GELOGD("[%s] Done invoking OpParaCalculate successfully.", node->GetName().c_str()); @@ -633,7 +633,7 @@ std::string AtomicAddrCleanOpTask::GetKeyForKernelName(const OpDesc &op_desc) co Status AtomicAddrCleanOpTask::CalcTilingInfo(const NodePtr &node, OpRunInfo &tiling_info) { GELOGD("[%s] Start to invoke OpAtomicCalculate.", node->GetName().c_str()); - GE_CHK_STATUS_RET(OpAtomicCalculate(*node, tiling_info), + GE_CHK_STATUS_RET(optiling::OpAtomicCalculateV2(*node, tiling_info), "[Invoke][OpAtomicCalculate]Failed calc tiling data of node %s.", node->GetName().c_str()); GELOGD("[%s] Done invoking OpAtomicCalculate successfully.", node->GetName().c_str()); diff --git a/ge/hybrid/node_executor/aicore/aicore_op_task.h b/ge/hybrid/node_executor/aicore/aicore_op_task.h index 8d7b7f1e..3c8db8c9 100755 --- a/ge/hybrid/node_executor/aicore/aicore_op_task.h +++ b/ge/hybrid/node_executor/aicore/aicore_op_task.h @@ -85,7 +85,7 @@ class AiCoreOpTask { virtual std::string GetKeyForTvmMagic() const; virtual std::string GetKeyForTvmMetaData() const; virtual std::string GetKeyForKernelName(const OpDesc &op_desc) const; - virtual Status CalcTilingInfo(const NodePtr &node, optiling::OpRunInfo &tiling_info); + virtual Status CalcTilingInfo(const NodePtr &node, optiling::utils::OpRunInfo &tiling_info); std::unique_ptr tiling_buffer_ = nullptr; std::string tiling_data_; @@ -130,7 +130,7 @@ class AtomicAddrCleanOpTask : public AiCoreOpTask { std::string GetKeyForTvmMagic() const override; std::string GetKeyForTvmMetaData() const override; std::string GetKeyForKernelName(const OpDesc &op_desc) const override; - Status CalcTilingInfo(const NodePtr &node, optiling::OpRunInfo &tiling_info) override; + Status CalcTilingInfo(const NodePtr &node, optiling::utils::OpRunInfo &tiling_info) override; private: Status InitAtomicAddrCleanIndices(const OpDesc &op_desc); diff --git a/ge/single_op/task/op_task.cc b/ge/single_op/task/op_task.cc index e48677f8..66d70e7e 100755 --- a/ge/single_op/task/op_task.cc +++ b/ge/single_op/task/op_task.cc @@ -224,18 +224,17 @@ Status TbeOpTask::LaunchKernel(rtStream_t stream) { Status TbeOpTask::UpdateRunInfo() { // invoke OpParaCalculate GELOGD("Start to invoke OpParaCalculate."); - optiling::OpRunInfo run_info; - run_info.block_dim = 0; - auto ret = optiling::OpParaCalculate(*node_, run_info); + optiling::utils::OpRunInfo run_info(0, true, 0); + auto ret = optiling::OpParaCalculateV2(*node_, run_info); if (ret != GRAPH_SUCCESS) { GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "[Invoke][OpParaCalculate] failed, ret = %u.", ret); REPORT_INNER_ERROR("E19999", "invoke OpParaCalculate failed, ret = %u.", ret); return ACL_ERROR_GE_INTERNAL_ERROR; } - block_dim_ = run_info.block_dim; - tiling_data_ = run_info.tiling_data.str(); - tiling_key_ = run_info.tiling_key; - run_info_workspaces_ = run_info.workspaces; + block_dim_ = run_info.GetBlockDim(); + tiling_data_ = run_info.GetAllTilingData().str(); + tiling_key_ = run_info.GetTilingKey(); + run_info.GetAllWorkspaces(run_info_workspaces_); GELOGD("Done invoking OpParaCalculate successfully. block_dim = %u, tiling size = %zu, tiling_key = %u", block_dim_, tiling_data_.size(), tiling_key_); return SUCCESS; From df52801ea551999be69e9d42285aa9b06008ff92 Mon Sep 17 00:00:00 2001 From: wuweikang Date: Thu, 13 May 2021 16:14:41 +0800 Subject: [PATCH 058/226] remove updation of session_id --- ge/graph/manager/graph_manager.cc | 2 +- ge/hybrid/model/hybrid_model_builder.cc | 46 +++++++++++++++---- ge/hybrid/model/hybrid_model_builder.h | 1 + ge/model/ge_root_model.h | 5 ++ .../executor/subgraph_executor_unittest.cc | 3 ++ .../model/hybrid_model_builder_unittest.cc | 26 +++++++++-- 6 files changed, 67 insertions(+), 16 deletions(-) diff --git a/ge/graph/manager/graph_manager.cc b/ge/graph/manager/graph_manager.cc index bf04ed58..329db797 100755 --- a/ge/graph/manager/graph_manager.cc +++ b/ge/graph/manager/graph_manager.cc @@ -3132,10 +3132,10 @@ void GraphManager::PreRunThread(GraphManager *graph_manager) { } // Avoid repeatively prerun for graphs owns same graph_id in online inference concurrency if (count > 1 && graph_node->GetBuildFlag()) { - graph_node->Lock(); GELOGD("Avoid repeatively prerun, graph_id:%u.", args.graph_id); // In online inference concurrency senario, graph_node is allowed to be locked for 'count' times graph_node->SetSemSize(count); + graph_node->Lock(); graph_manager->run_args_q_.Push(RunArgs( { graph_node, args.graph_id, args.session_id, args.error_context, args.input_tensor, graph_node->GetGeRootModel(), GetThreadLocalContext(), args.callback })); GELOGI("[PreRunThread] Loop end. Start to run with cached build model."); diff --git a/ge/hybrid/model/hybrid_model_builder.cc b/ge/hybrid/model/hybrid_model_builder.cc index 5337a0cf..01e189d3 100755 --- a/ge/hybrid/model/hybrid_model_builder.cc +++ b/ge/hybrid/model/hybrid_model_builder.cc @@ -147,6 +147,7 @@ Status HybridModelBuilder::Build() { GE_CHK_STATUS_RET(ValidateParams(), "[Invoke][ValidateParams] failed, model_name_:[%s]", GetGraphName()); hybrid_model_.model_name_ = ge_root_model_->GetModelName(); GELOGI("[%s] Start to build hybrid model.", GetGraphName()); + GE_CHK_STATUS_RET(CopyGraph(), "[Invoke][CopyGraph] failed, model_name_:[%s]", GetGraphName()); GE_CHK_STATUS_RET(InitRuntimeParams(), "[Invoke][InitRuntimeParams] failed, model_name_:[%s]", GetGraphName()); GE_CHK_STATUS_RET(RecoverGraphUnknownFlag(), "[Invoke][RecoverGraphUnknownFlag] failed, model_name_:[%s]", GetGraphName()); @@ -174,8 +175,8 @@ Status HybridModelBuilder::BuildForSingleOp() { hybrid_model_.model_name_ = ge_root_model_->GetRootGraph()->GetName(); GELOGI("[%s] Start to build hybrid model.", GetGraphName()); auto ret = ge_root_model_->GetSubgraphInstanceNameToModel(); - const GeModelPtr ge_model = ret[ge_root_model_->GetRootGraph()->GetName()]; - GE_CHK_STATUS_RET(IndexTaskDefs(ge_root_model_->GetRootGraph(), ge_model), + const GeModelPtr ge_model = ret[hybrid_model_.root_graph_->GetName()]; + GE_CHK_STATUS_RET(IndexTaskDefs(hybrid_model_.root_graph_, ge_model), "[Invoke][IndexTaskDefs] failed, model_name_:[%s]", GetGraphName()); GE_CHK_STATUS_RET(LoadGraph(), "[Invoke][LoadGraph] failed, model_name_:[%s]", GetGraphName()); GE_CHK_STATUS_RET(InitWeights(), "[Invoke][InitWeights] failed, model_name_:[%s]", GetGraphName()); @@ -190,6 +191,29 @@ Status HybridModelBuilder::ValidateParams() { return SUCCESS; } +Status HybridModelBuilder::CopyGraph() { + GELOGD("Copy compute graph begin."); + auto root_graph = ge_root_model_->GetRootGraph(); + + ge_root_model_->IncreaseBuildTimes(); + std::string new_graph_name = ge_root_model_->GetRootGraph()->GetName() + "_" + + std::to_string(ge_root_model_->GetBuildTimes()); + ComputeGraphPtr new_root_graph = MakeShared(new_graph_name); + GE_CHECK_NOTNULL(new_root_graph); + int32_t depth = 0; + std::map node_old_2_new; + std::map op_desc_old_2_new; + graphStatus ret = GraphUtils::CopyComputeGraph(root_graph, new_root_graph, node_old_2_new, op_desc_old_2_new, depth); + if (ret != GRAPH_SUCCESS) { + GELOGE(GRAPH_FAILED, "Copy compute graph failed."); + return GRAPH_FAILED; + } + hybrid_model_.root_graph_ = new_root_graph; + + GELOGD("Copy compute graph[%s] success.", new_graph_name.c_str()); + return SUCCESS; +} + Status HybridModelBuilder::BuildNodeItem(const NodePtr &node, NodeItem &node_item) { auto op_desc = node->GetOpDesc(); GE_CHK_STATUS_RET(ParseForceInfershapeNodes(node, node_item), @@ -814,7 +838,7 @@ Status HybridModelBuilder::BuildOutputMapping(GraphItem &graph_item, } Status HybridModelBuilder::LoadGraph() { - auto root_graph = ge_root_model_->GetRootGraph(); + auto root_graph = hybrid_model_.root_graph_; if (!GetContext().GetHostExecFlag()) { std::shared_ptr merged_graph; GELOGI("Before merging subgraphs DirectNodesSize = %zu, GetAllNodesSize = %zu", @@ -828,7 +852,6 @@ Status HybridModelBuilder::LoadGraph() { root_graph->GetAllNodesSize()); } - hybrid_model_.root_graph_ = root_graph; GE_CHK_STATUS_RET(RelinkNextIteration(), "[%s] Relink NextIteration failed", GetGraphName()); // Reset node id by topological order across all subgraphs int64_t index = 0; @@ -877,6 +900,7 @@ Status HybridModelBuilder::LoadGraph() { } for (auto &it : hybrid_model_.known_shape_sub_models_) { auto node_item = MutableNodeItem(it.first); + GE_CHECK_NOTNULL(node_item); AscendString graph_name; GE_CHK_GRAPH_STATUS_RET(it.second->GetGraph().GetName(graph_name), "Failed to get subgraph name"); auto subgraph = hybrid_model_.GetRootGraph()->GetSubgraph(graph_name.GetString()); @@ -1125,7 +1149,9 @@ Status HybridModelBuilder::InitWeights() { sub_weight_buffer->GetSize()); auto subgraph = GraphUtils::GetComputeGraph(subgraph_model.second->GetGraph()); if (subgraph != ge_root_model_->GetRootGraph()) { - subgraph = ge_root_model_->GetRootGraph()->GetSubgraph(subgraph_model.first); + subgraph = hybrid_model_.root_graph_->GetSubgraph(subgraph_model.first); + } else { + subgraph = hybrid_model_.root_graph_; } GE_CHECK_NOTNULL(subgraph); hybrid_model_.weight_buffer_map_.emplace(subgraph->GetName(), std::move(sub_weight_buffer)); @@ -1282,7 +1308,7 @@ Status HybridModelBuilder::IndexTaskDefs(const ComputeGraphPtr &sub_graph, const } Status HybridModelBuilder::IndexTaskDefs() { - const auto root_graph = ge_root_model_->GetRootGraph(); + const auto &root_graph = hybrid_model_.root_graph_; const auto &root_graph_name = root_graph->GetName(); if (SetOutputNameAttr(*root_graph) != SUCCESS) { GELOGW("Set output name attr failed."); @@ -1316,7 +1342,7 @@ Status HybridModelBuilder::IndexTaskDefs() { Status HybridModelBuilder::IndexSpecialNodes() { GELOGD("Start to index special nodes"); - const auto &root_graph = ge_root_model_->GetRootGraph(); + const auto &root_graph = hybrid_model_.root_graph_; for (auto &node : root_graph->GetAllNodes()) { GE_CHECK_NOTNULL(node); GE_CHECK_NOTNULL(node->GetOpDesc()); @@ -1471,7 +1497,7 @@ Status HybridModelBuilder::InitRuntimeParams() { runtime_param_.session_id = ret ? static_cast(value) : 0; ret = ge::AttrUtils::GetInt(first_model, ATTR_MODEL_TASK_GEN_VAR_ADDR, value); runtime_param_.logic_var_base = ret ? static_cast(value) : 0; - runtime_param_.graph_id = ge_root_model_->GetRootGraph()->GetGraphID(); + runtime_param_.graph_id = hybrid_model_.root_graph_->GetGraphID(); value = 0; for (auto &it : ge_root_model_->GetSubgraphInstanceNameToModel()) { (void) ge::AttrUtils::GetInt(it.second, ATTR_MODEL_VAR_SIZE, value); @@ -1608,7 +1634,7 @@ Status HybridModelBuilder::TransAllVarData() { } Status HybridModelBuilder::CopyVarData() { - GE_CHK_STATUS_RET(TransVarDataUtils::CopyVarData(ge_root_model_->GetRootGraph(), + GE_CHK_STATUS_RET(TransVarDataUtils::CopyVarData(hybrid_model_.root_graph_, runtime_param_.session_id, hybrid_model_.device_id_), "[Invoke][CopyVarData] failed."); @@ -1691,7 +1717,7 @@ Status HybridModelBuilder::LoadKnownShapedSubgraph(ComputeGraph &graph, NodeItem } Status HybridModelBuilder::RecoverGraphUnknownFlag() { - const auto &root_graph = ge_root_model_->GetRootGraph(); + const auto &root_graph = hybrid_model_.root_graph_; for (auto &sub_graph : root_graph->GetAllSubgraphs()) { GE_CHECK_NOTNULL(sub_graph); for (const auto &node : sub_graph->GetDirectNode()) { diff --git a/ge/hybrid/model/hybrid_model_builder.h b/ge/hybrid/model/hybrid_model_builder.h index 92974441..3ab43b7f 100644 --- a/ge/hybrid/model/hybrid_model_builder.h +++ b/ge/hybrid/model/hybrid_model_builder.h @@ -56,6 +56,7 @@ class HybridModelBuilder { Status BuildOutputMapping(GraphItem &partitioned_call, const NodeItem &node_item, bool is_root_graph); Status ValidateParams(); Status LoadGraph(); + Status CopyGraph(); Status LoadGeModel(ComputeGraph &graph, const GeModelPtr &ge_model); Status LoadTask(NodeItem &node_item); Status LoadTasks(); diff --git a/ge/model/ge_root_model.h b/ge/model/ge_root_model.h index 9e8e116e..b6e3d081 100755 --- a/ge/model/ge_root_model.h +++ b/ge/model/ge_root_model.h @@ -60,6 +60,10 @@ class GeRootModel { bool GetTrainFlag() const { return train_flag_; } + int32_t GetBuildTimes() const { return hybrid_build_times_; } + + void IncreaseBuildTimes() { hybrid_build_times_++; } + private: ComputeGraphPtr root_graph_ = nullptr; std::map subgraph_instance_name_to_model_; @@ -69,6 +73,7 @@ class GeRootModel { bool train_flag_ = false; std::string model_name_; bool is_specific_stream_ = false; + int32_t hybrid_build_times_ = 0; }; } // namespace ge using GeRootModelPtr = std::shared_ptr; diff --git a/tests/ut/ge/hybrid/executor/subgraph_executor_unittest.cc b/tests/ut/ge/hybrid/executor/subgraph_executor_unittest.cc index 2dc3b639..827705ae 100644 --- a/tests/ut/ge/hybrid/executor/subgraph_executor_unittest.cc +++ b/tests/ut/ge/hybrid/executor/subgraph_executor_unittest.cc @@ -249,6 +249,9 @@ TEST_F(UtestSubgraphExecutor, cond_graph_schedule_tasks) { graph_context.callback_manager = std::unique_ptr(new CallbackManager()); ASSERT_EQ(graph_context.callback_manager->Init(), SUCCESS); + auto root_graph = hybrid_model.root_graph_; + switch_t = root_graph->FindNode("switch_t"); + switch_f = root_graph->FindNode("switch_f"); const auto node_it_t = hybrid_model.node_items_.find(switch_t); const auto node_it_f = hybrid_model.node_items_.find(switch_f); ASSERT_NE(hybrid_model.node_items_.end(), node_it_t); diff --git a/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc b/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc index 2ab82350..95669b73 100644 --- a/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc +++ b/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc @@ -214,11 +214,17 @@ TEST_F(UtestHybridModelBuilder, normal_hybrid_model_build) { ASSERT_EQ(it->second->frame_index_, index); ASSERT_EQ(it->second->parent_frame_, -1); }; - TestFrameGroup(enter1, control_group_index); - TestFrameGroup(active1, control_group_index); - TestFrameGroup(active2, control_group_index); - TestFrameGroup(active3, control_group_index); - TestFrameGroup(output1, -1); + auto root_graph = hybrid_model.root_graph_; + auto enter1_node = root_graph->FindNode("enter"); + auto active1_node = root_graph->FindNode("active1"); + auto active2_node = root_graph->FindNode("active2"); + auto active3_node = root_graph->FindNode("active3"); + auto output1_node = root_graph->FindNode("net_output"); + TestFrameGroup(enter1_node, control_group_index); + TestFrameGroup(active1_node, control_group_index); + TestFrameGroup(active2_node, control_group_index); + TestFrameGroup(active3_node, control_group_index); + TestFrameGroup(output1_node, -1); engine_mapping.clear(); task_executor.clear(); @@ -346,4 +352,14 @@ EXPECT_EQ(hybrid_model_builder.InitVariableTensors(), SUCCESS); EXPECT_EQ(hybrid_model_builder.hybrid_model_.variable_tensors_.size(), 1); HostMemManager::Instance().var_memory_base_map_.clear(); } + +TEST_F(UtestHybridModelBuilder, copy_graph_success) { +ComputeGraphPtr graph = std::make_shared("test"); +GeRootModelPtr ge_root_model = make_shared(graph); +HybridModel hybrid_model(ge_root_model); +HybridModelBuilder hybrid_model_builder(hybrid_model); + +Status st = hybrid_model_builder.CopyGraph(); +EXPECT_EQ(st, SUCCESS); +} } // namespace ge From f8154fbef674da3320cdbc325c5957678be3133a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=99=88=E5=8D=8E?= Date: Mon, 21 Jun 2021 17:39:14 +0800 Subject: [PATCH 059/226] opt info --- ge/common/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ge/common/CMakeLists.txt b/ge/common/CMakeLists.txt index 91f3b27d..d81bb00a 100755 --- a/ge/common/CMakeLists.txt +++ b/ge/common/CMakeLists.txt @@ -87,7 +87,7 @@ target_include_directories(ge_common PRIVATE #### yellow zone #### $<$>:${GE_DEPEND_DIR}/inc> $<$>:${GE_DEPEND_DIR}/inc/cce> - $<$>:${GE_DEPEND_DIR}../abl/licctrl> + $<$>:${GE_DEPEND_DIR}/abl/licctrl> #### blue zone #### $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc> $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain> @@ -159,7 +159,7 @@ target_include_directories(ge_common_static PRIVATE #### yellow zone #### $<$>:${GE_DEPEND_DIR}/inc> $<$>:${GE_DEPEND_DIR}/inc/cce> - $<$>:${GE_DEPEND_DIR}/../abl/licctrl> + $<$>:${GE_DEPEND_DIR}/abl/licctrl> #### blue zone #### $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc> $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain> From 096981679abff4bc050f69aa6736ea45ae77db09 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Mon, 21 Jun 2021 18:35:43 +0800 Subject: [PATCH 060/226] Update submodule. --- metadef | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metadef b/metadef index e189fc7f..f75dbad2 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit e189fc7f4da9f7714f009d70da4db627de17955d +Subproject commit f75dbad2f2249608080e482acc6d723e04fec3da From 64b22f8c98452144804528b69d453fd22ab61474 Mon Sep 17 00:00:00 2001 From: wangzhengjun Date: Mon, 21 Jun 2021 19:32:53 +0800 Subject: [PATCH 061/226] skip control flow op when replace node with empty tensor --- .../passes/replace_with_empty_const_pass.cc | 20 +++++++++ .../replace_with_empty_const_pass_unittest.cc | 45 +++++++++++++++++++ 2 files changed, 65 insertions(+) diff --git a/ge/graph/passes/replace_with_empty_const_pass.cc b/ge/graph/passes/replace_with_empty_const_pass.cc index 9459c852..3176d1ee 100644 --- a/ge/graph/passes/replace_with_empty_const_pass.cc +++ b/ge/graph/passes/replace_with_empty_const_pass.cc @@ -21,7 +21,23 @@ #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" #include "graph/utils/graph_utils.h" +#include "graph/utils/node_utils.h" +namespace { +const std::unordered_set kControlFlowOps = { + ge::SWITCH, + ge::REFSWITCH, + ge::MERGE, + ge::REFMERGE, + ge::ENTER, + ge::REFENTER, + ge::NEXTITERATION, + ge::REFNEXTITERATION, + ge::EXIT, + ge::REFEXIT, + ge::LOOPCOND +}; +} namespace ge { Status ReplaceWithEmptyConstPass::Run(NodePtr &node) { GELOGD("ReplaceWithEmptyConstPass in."); @@ -39,6 +55,10 @@ Status ReplaceWithEmptyConstPass::Run(NodePtr &node) { GELOGI("Node %s is const. Ignore current pass.", node->GetName().c_str()); return SUCCESS; } + if (kControlFlowOps.count(NodeUtils::GetNodeType(node)) != 0) { + GELOGI("Node %s is control flow op. Ignore current pass.", node->GetName().c_str()); + return SUCCESS; + } // Node like no op, it has no output if (node->GetOpDesc()->GetAllOutputsDescPtr().empty()) { GELOGI("Node %s has no output desc. Ignore current pass.", node->GetName().c_str()); diff --git a/tests/ut/ge/graph/passes/replace_with_empty_const_pass_unittest.cc b/tests/ut/ge/graph/passes/replace_with_empty_const_pass_unittest.cc index 6711b0d3..d353498c 100644 --- a/tests/ut/ge/graph/passes/replace_with_empty_const_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/replace_with_empty_const_pass_unittest.cc @@ -57,6 +57,36 @@ ut::GraphBuilder Graph1Builder() { builder.AddDataEdge(cast1, 0, conv2d, 0); return builder; } + +/// data1 const1 +/// \ / +/// add1 +/// | +/// data2 -> switch1 (empty) +/// | +/// conv2d +ut::GraphBuilder Graph2Builder() { + ut::GraphBuilder builder = ut::GraphBuilder("graph2"); + auto data1 = builder.AddNode("data1", "Data", 0, 1); + auto data2 = builder.AddNode("data2", "Data", 0, 1); + auto const1 = builder.AddNode("const1", "Const", 0, 1); + auto add1 = builder.AddNode("add1", "Add", 2, 1); + auto switch1 = builder.AddNode("switch1", "Switch", 2, 1); + auto conv2d = builder.AddNode("conv2d", "Conv2D", 1, 0); + + add1->GetOpDesc()->AddInputDesc(GeTensorDesc(GeShape({1, 1, 8, 8}),FORMAT_NCHW)); + add1->GetOpDesc()->AddInputDesc(GeTensorDesc(GeShape({1, 1, 8, 8}),FORMAT_NCHW)); + add1->GetOpDesc()->AddOutputDesc(GeTensorDesc(GeShape({1, 1, 8, 8}),FORMAT_NCHW)); + GeTensorDesc empty_tensor(GeShape({1, 0, 8, 8}),FORMAT_NCHW); + switch1->GetOpDesc()->UpdateOutputDesc(0, empty_tensor); + + builder.AddDataEdge(data1, 0, add1, 0); + builder.AddDataEdge(const1, 0, add1, 1); + builder.AddDataEdge(add1, 0, switch1, 0); + builder.AddDataEdge(data2, 0, switch1, 1); + builder.AddDataEdge(switch1, 0, conv2d, 0); + return builder; +} } // namespace @@ -85,4 +115,19 @@ TEST_F(UtestReplaceWithEmptyConstPass, replace_whith_empty_const_success) { auto conv2d = graph->FindNode("conv2d"); EXPECT_EQ(conv2d->GetInDataNodes().at(0)->GetType(),"Const"); } + +TEST_F(UtestReplaceWithEmptyConstPass, replace_whith_empty_switch_skip) { + auto builder = Graph2Builder(); + auto graph = builder.GetGraph(); + graph->SetSessionID(0); + ReplaceWithEmptyConstPass replace_with_empty_const_pass; + + EXPECT_EQ(graph->GetDirectNodesSize(), 6); + // run pass on switch1, graph still has 6 nodes + auto switch1 = graph->FindNode("switch1"); + EXPECT_NE(switch1, nullptr); + Status ret = replace_with_empty_const_pass.Run(switch1); + EXPECT_EQ(ret, SUCCESS); + EXPECT_EQ(graph->GetDirectNodesSize(), 6); +} } // namespace ge From 0b31eb4bda4f73b54d6f23ce064a240a4cbf5fa8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=99=88=E5=8D=8E?= Date: Tue, 22 Jun 2021 09:33:39 +0800 Subject: [PATCH 062/226] =?UTF-8?q?=E5=9B=9E=E9=80=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- CMakeLists.txt | 3 +- ge/common/CMakeLists.txt | 8 -- ge/common/ge_opt_info.cc | 58 --------- ge/common/ge_opt_info.h | 31 ----- ge/graph/manager/graph_manager.cc | 7 - tests/CMakeLists.txt | 1 - tests/depends/opt_info/CMakeLists.txt | 37 ------ tests/depends/opt_info/src/opt_info_stub.cc | 46 ------- tests/framework/cmake/graphengine.cmake | 2 - tests/st/testcase/test_ge_opt_info.cc | 123 ------------------ tests/ut/ge/CMakeLists.txt | 4 - tests/ut/ge/common/ge_opt_info_unittest.cc | 82 ------------ third_party/fwkacllib/inc/opt_info/opt_info.h | 34 ----- 13 files changed, 1 insertion(+), 435 deletions(-) delete mode 100644 ge/common/ge_opt_info.cc delete mode 100644 ge/common/ge_opt_info.h delete mode 100644 tests/depends/opt_info/CMakeLists.txt delete mode 100644 tests/depends/opt_info/src/opt_info_stub.cc delete mode 100644 tests/st/testcase/test_ge_opt_info.cc delete mode 100644 tests/ut/ge/common/ge_opt_info_unittest.cc delete mode 100644 third_party/fwkacllib/inc/opt_info/opt_info.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 77a759ba..e3cc1e32 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -95,7 +95,6 @@ else () #find_module(ascendcl_static libascendcl.a ${GE_LIB_PATH}) else() find_module(slog libalog.so ${ASCEND_ATC_DIR}) - find_module(opt_feature libopt_feature.so ${ASCEND_ATC_DIR}) find_module(static_mmpa libmmpa.a ${ASCEND_ATC_DIR}) if(PLATFORM STREQUAL "train") find_module(adump_server libadump_server.a ${ASCEND_RUNTIME_DIR}) @@ -180,4 +179,4 @@ else () add_subdirectory(ge) -endif () \ No newline at end of file +endif () diff --git a/ge/common/CMakeLists.txt b/ge/common/CMakeLists.txt index d81bb00a..f55ff427 100755 --- a/ge/common/CMakeLists.txt +++ b/ge/common/CMakeLists.txt @@ -43,7 +43,6 @@ set(SRC_LIST "op/ge_op_utils.cc" "thread_pool.cc" "ge/tbe_plugin_manager.cc" - "ge_opt_info.cc" ) if (NOT ENABLE_D AND NOT ENABLE_ACL) @@ -87,11 +86,9 @@ target_include_directories(ge_common PRIVATE #### yellow zone #### $<$>:${GE_DEPEND_DIR}/inc> $<$>:${GE_DEPEND_DIR}/inc/cce> - $<$>:${GE_DEPEND_DIR}/abl/licctrl> #### blue zone #### $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc> $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain> - $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info> ) target_link_options(ge_common PRIVATE @@ -111,7 +108,6 @@ target_link_libraries(ge_common PRIVATE c_sec error_manager slog - opt_feature -Wl,--as-needed json $<$>:-lrt> @@ -159,11 +155,9 @@ target_include_directories(ge_common_static PRIVATE #### yellow zone #### $<$>:${GE_DEPEND_DIR}/inc> $<$>:${GE_DEPEND_DIR}/inc/cce> - $<$>:${GE_DEPEND_DIR}/abl/licctrl> #### blue zone #### $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc> $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain> - $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info> ) target_link_libraries(ge_common_static PRIVATE @@ -219,7 +213,6 @@ target_include_directories(ge_common PRIVATE ${CMAKE_BINARY_DIR}/proto/graphengine_protos ${GE_CODE_DIR}/third_party/fwkacllib/inc ${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain - ${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info ) target_link_options(ge_common PRIVATE @@ -235,7 +228,6 @@ target_link_libraries(ge_common PRIVATE c_sec error_manager slog - opt_feature static_mmpa -Wl,--as-needed json diff --git a/ge/common/ge_opt_info.cc b/ge/common/ge_opt_info.cc deleted file mode 100644 index c6bac480..00000000 --- a/ge/common/ge_opt_info.cc +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "common/ge_opt_info.h" - -#include -#include -#include "graph/ge_local_context.h" -#include "ge/ge_api_types.h" -#include "common/debug/ge_log.h" -#include "opt_info.h" - -namespace ge { -Status GeOptInfo::SetOptInfo() { - std::string soc_ver; - graphStatus ret = GetThreadLocalContext().GetOption(SOC_VERSION, soc_ver); - if (ret != GRAPH_SUCCESS) { - REPORT_CALL_ERROR("E19999", "Get soc version failed."); - GELOGE(FAILED, "[Get][SocVersion]Get soc version failed."); - return FAILED; - } - GELOGD("Soc version:%s.", soc_ver.c_str()); - std::map opt_info; - // the first arg does not work at present. - if (gelc::GetOptInfo(gelc::kOffline, soc_ver, opt_info) != gelc::SUCCESS) { - REPORT_CALL_ERROR("E19999", "Get optional information failed, is_offline:%d, soc version:%s", - gelc::kOffline, soc_ver.c_str()); - GELOGE(FAILED, "[Get][OptInfo]Get optional information failed, is_offline:%d, soc version:%s", - gelc::kOffline, soc_ver.c_str()); - return FAILED; - } - // do nothing if get empty information - if (opt_info.empty()) { - GELOGI("Optional information is empty."); - return SUCCESS; - } - std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); - for (const auto &itr : opt_info) { - graph_options.emplace(itr.first, itr.second); - GELOGI("Get optional information success, key:%s, value:%s.", itr.first.c_str(), itr.second.c_str()); - } - GetThreadLocalContext().SetGraphOption(graph_options); - return SUCCESS; -} -} // namespace ge diff --git a/ge/common/ge_opt_info.h b/ge/common/ge_opt_info.h deleted file mode 100644 index 4ec9a59f..00000000 --- a/ge/common/ge_opt_info.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef GE_COMMON_GE_OPT_INFO_H_ -#define GE_COMMON_GE_OPT_INFO_H_ - -#include "ge/ge_api_error_codes.h" -#include "register/register_types.h" - -namespace ge { -class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY GeOptInfo { - public: - GeOptInfo() = default; - static Status SetOptInfo(); -}; -} // namespace ge - -#endif // GE_COMMON_GE_OPT_INFO_H_ diff --git a/ge/graph/manager/graph_manager.cc b/ge/graph/manager/graph_manager.cc index 3861e6ac..b862a7d6 100755 --- a/ge/graph/manager/graph_manager.cc +++ b/ge/graph/manager/graph_manager.cc @@ -27,7 +27,6 @@ #include "common/math/math_util.h" #include "common/thread_pool.h" #include "common/dump/dump_manager.h" -#include "common/ge_opt_info.h" #include "analyzer/analyzer.h" #include "graph/common/ge_call_wrapper.h" #include "graph/common/local_context.h" @@ -1002,12 +1001,6 @@ Status GraphManager::PreRun(const GraphNodePtr &graph_node, const std::vector - c_sec -) - -target_include_directories(opt_feature_stub INTERFACE ${CMAKE_CURRENT_LIST_DIR}/src) diff --git a/tests/depends/opt_info/src/opt_info_stub.cc b/tests/depends/opt_info/src/opt_info_stub.cc deleted file mode 100644 index df518c4b..00000000 --- a/tests/depends/opt_info/src/opt_info_stub.cc +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "opt_info.h" -#include -#include -#include -#include - -namespace gelc { -namespace { -const std::vector kSocVersions = {"Ascend910"}; -} - -void SetAllOptInfo(std::map &opt_infos) { - opt_infos.emplace("opt_module.fe", "all"); - opt_infos.emplace("opt_module.pass", "all"); - opt_infos.emplace("opt_module.op_tune", "all"); - opt_infos.emplace("opt_module.rl_tune", "all"); - opt_infos.emplace("opt_module.aoe", "all"); -} - -Status GetOptInfo(WorkMode mode, const std::string &soc_ver, - std::map &opt_infos) { - if (std::find(kSocVersions.begin(), kSocVersions.end(), soc_ver)== kSocVersions.end()) { - SetAllOptInfo(opt_infos); - return SUCCESS; - } - opt_infos.emplace("opt_module.fe", "all"); - opt_infos.emplace("opt_module.pass", "all"); - opt_infos.emplace("opt_module.op_tune", "all"); - return SUCCESS; -} -} // namespace gelc diff --git a/tests/framework/cmake/graphengine.cmake b/tests/framework/cmake/graphengine.cmake index c4380016..81aa00cc 100644 --- a/tests/framework/cmake/graphengine.cmake +++ b/tests/framework/cmake/graphengine.cmake @@ -103,7 +103,6 @@ list(APPEND INCLUDE_DIRECTORIES "${GE_CODE_DIR}/third_party/fwkacllib/inc/cce" "${GE_CODE_DIR}/third_party/fwkacllib/inc/ops" "${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain" - "${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info" "${GE_CODE_DIR}/tests/ut/ge" "${GE_CODE_DIR}/tests/ut/common" "${CMAKE_BINARY_DIR}" @@ -118,7 +117,6 @@ list(APPEND STUB_LIBS runtime_stub profiler_stub hccl_stub - opt_feature_stub error_manager_stub ascend_protobuf json diff --git a/tests/st/testcase/test_ge_opt_info.cc b/tests/st/testcase/test_ge_opt_info.cc deleted file mode 100644 index 8fc47a9b..00000000 --- a/tests/st/testcase/test_ge_opt_info.cc +++ /dev/null @@ -1,123 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include "easy_graph/graph/box.h" -#include "easy_graph/graph/node.h" -#include "easy_graph/builder/graph_dsl.h" -#include "easy_graph/builder/box_builder.h" -#include "easy_graph/layout/graph_layout.h" -#include "easy_graph/layout/engines/graph_easy/graph_easy_option.h" -#include "easy_graph/layout/engines/graph_easy/graph_easy_executor.h" -#include "graph/graph.h" -#include "graph/compute_graph.h" -#include "framework/common/types.h" -#include "graph/debug/ge_attr_define.h" -#include "ge_graph_dsl/graph_dsl.h" -#include "ge_graph_dsl/op_desc/op_desc_cfg_box.h" -#define protected public -#define private public -#include "common/ge_opt_info.h" -#undef private -#undef protected - -namespace ge { -class STEST_opt_info : public testing::Test { - protected: - void SetUp() {} - void TearDown() {} -}; - -TEST_F(STEST_opt_info, get_opt_info_all) { - std::map options = {{ge::SOC_VERSION, "Ascend310"}}; - GetThreadLocalContext().SetGlobalOption(options); - - /// data1 data2 - /// \ / - /// add - // build graph - DEF_GRAPH(g1) { - CHAIN(NODE("data1", DATA)->NODE("add", ADD)); - CHAIN(NODE("data2", DATA)->NODE("add")); - }); - - auto graph = ToGeGraph(g1); - - // new session & add graph - Session session(options); - auto ret = session.AddGraph(1, graph, options); - EXPECT_EQ(ret, SUCCESS); - // build input tensor - std::vector inputs; - // build_graph through session - ret = session.BuildGraph(1, inputs); - EXPECT_EQ(ret, SUCCESS); - - std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); - auto itr = graph_options.find("opt_module.fe"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.pass"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.op_tune"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.rl_tune"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.aoe"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); -} - -TEST_F(STEST_opt_info, get_opt_info_success) { - std::map options = {{ge::SOC_VERSION, "Ascend910"}}; - GetThreadLocalContext().SetGlobalOption(options); - - /// data1 data2 - /// \ / - /// add - // build graph - DEF_GRAPH(g1) { - CHAIN(NODE("data1", DATA)->NODE("add", ADD)); - CHAIN(NODE("data2", DATA)->NODE("add")); - }); - - auto graph = ToGeGraph(g1); - - // new session & add graph - Session session(options); - auto ret = session.AddGraph(1, graph, options); - EXPECT_EQ(ret, SUCCESS); - // build input tensor - std::vector inputs; - // build_graph through session - ret = session.BuildGraph(1, inputs); - EXPECT_EQ(ret, SUCCESS); - - std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); - auto itr = graph_options.find("opt_module.fe"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.pass"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.op_tune"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); -} -} // namespace ge diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index 3ea4d1a7..8b024820 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -62,7 +62,6 @@ include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc) include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc/cce) include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc/ops) include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain) -include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info) include_directories(${GE_CODE_DIR}/tests/ut/ge) include_directories(${GE_CODE_DIR}/tests/ut/common) include_directories(${CMAKE_BINARY_DIR}) @@ -173,7 +172,6 @@ set(COMMON_SRC_FILES "${GE_CODE_DIR}/ge/common/dump/exception_dumper.cc" "${GE_CODE_DIR}/ge/common/dump/opdebug_register.cc" "${GE_CODE_DIR}/ge/common/dump/dump_op.cc" - "${GE_CODE_DIR}/ge/common/ge_opt_info.cc" "${GE_CODE_DIR}/ge/common/helper/om_file_helper.cc" "${GE_CODE_DIR}/ge/model/ge_root_model.cc" "${GE_CODE_DIR}/ge/common/model_parser/model_parser.cc" @@ -770,7 +768,6 @@ set(MULTI_PARTS_TEST_FILES "common/dump_op_unittest.cc" "common/dump_exception_unittest.cc" "common/opdebug_register_unittest.cc" - "common/ge_opt_info_unittest.cc" "common/format_transfer_unittest.cc" "common/format_transfer_transpose_unittest.cc" "common/format_transfer_nchw_5d_unittest.cc" @@ -864,7 +861,6 @@ list(APPEND COMMON_SHARED_LIBRARIES mmpa_stub hccl_stub error_manager_stub - opt_feature_stub ascend_protobuf json ) diff --git a/tests/ut/ge/common/ge_opt_info_unittest.cc b/tests/ut/ge/common/ge_opt_info_unittest.cc deleted file mode 100644 index 3ac51615..00000000 --- a/tests/ut/ge/common/ge_opt_info_unittest.cc +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include - -#define protected public -#define private public -#include "common/ge_opt_info.h" -#include "graph/ge_local_context.h" -#include "external/ge/ge_api_types.h" -#undef private -#undef protected - -namespace ge { -class UTEST_opt_info : public testing::Test { - protected: - void SetUp() {} - void TearDown() {} -}; - -TEST_F(UTEST_opt_info, get_opt_info_success) { - std::map options = {{ge::SOC_VERSION, "Ascend910"}}; - GetThreadLocalContext().SetGlobalOption(options); - auto ret = GeOptInfo::SetOptInfo(); - EXPECT_EQ(ret, ge::SUCCESS); - std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); - auto itr = graph_options.find("opt_module.fe"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.pass"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.op_tune"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); -} - -TEST_F(UTEST_opt_info, get_opt_info_all) { - std::map global_options = {{ge::SOC_VERSION, "Ascend310"}}; - GetThreadLocalContext().SetGlobalOption(global_options); - auto ret = GeOptInfo::SetOptInfo(); - EXPECT_EQ(ret, ge::SUCCESS); - std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); - auto itr = graph_options.find("opt_module.fe"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.pass"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.op_tune"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.rl_tune"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.aoe"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); -} - -TEST_F(UTEST_opt_info, get_opt_info_failed) { - std::map options; - GetThreadLocalContext().SetGlobalOption(options); - auto ret = GeOptInfo::SetOptInfo(); - EXPECT_EQ(ret, ge::FAILED); -} - -} // namespace ge diff --git a/third_party/fwkacllib/inc/opt_info/opt_info.h b/third_party/fwkacllib/inc/opt_info/opt_info.h deleted file mode 100644 index ea9bb529..00000000 --- a/third_party/fwkacllib/inc/opt_info/opt_info.h +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include - -namespace gelc { -using Status = uint32_t; -using WorkMode = uint32_t; -const Status SUCCESS = 0x0; -const Status FAILED = 0xFFFFFFFF; -const WorkMode kOffline = 0x0; -const WorkMode kInline = 0x01; - -extern "C" { -__attribute__((visibility ("default"))) -Status GetOptInfo(WorkMode mode, const std::string &soc_ver, - std::map &opt_info_map); -} -} // namespace gelc - From e6e71fe317d331002491b27b3b26c6009c2a1ecf Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Sat, 19 Jun 2021 17:18:30 +0800 Subject: [PATCH 063/226] Normalize include files... --- ge/CMakeLists.txt | 30 ++-------- ge/analyzer/analyzer.cc | 2 +- ge/client/ge_api.cc | 4 +- ge/common/CMakeLists.txt | 14 ----- ge/common/base64.h | 4 +- ge/common/dump/dump_manager.h | 2 +- ge/common/dump/dump_op.h | 2 +- ge/common/dump/dump_properties.cc | 2 +- ge/common/dump/opdebug_register.cc | 2 +- ge/common/dump/opdebug_register.h | 4 +- .../format_transfer_fractal_z.cc | 2 +- .../format_transfer_nchw_fz_c04.cc | 2 +- ge/common/ge/datatype_util.h | 2 +- ge/common/ge/plugin_manager.h | 4 +- ge/common/ge_format_util.cc | 2 +- ge/common/helper/model_cache_helper.h | 2 +- ge/common/math/fp16_math.cc | 2 +- ge/common/model_parser/model_parser.cc | 2 +- ge/common/op/attr_value_util.cc | 2 +- ge/common/profiling/ge_profiling.cc | 2 +- ge/common/profiling/ge_runner_profiling.cc | 2 +- ge/common/profiling/profiling_manager.cc | 2 +- ge/common/properties_manager.cc | 2 +- ge/common/properties_manager.h | 2 +- ge/common/thread_pool.cc | 2 +- ge/common/thread_pool.h | 2 +- ge/common/types.cc | 2 +- ge/engine_manager/dnnengine_manager.cc | 2 +- ge/engine_manager/dnnengine_manager.h | 4 +- ge/executor/CMakeLists.txt | 6 -- ge/executor/ge_executor.cc | 6 +- ge/ge_local_engine/CMakeLists.txt | 10 ---- ge/ge_local_engine/engine/ge_local_engine.cc | 6 +- ge/ge_local_engine/engine/host_cpu_engine.cc | 2 +- ge/ge_local_engine/engine/host_cpu_engine.h | 2 +- .../ge_local_ops_kernel_builder.cc | 4 +- .../ge_local_ops_kernel_info.cc | 6 +- .../ops_kernel_store/op/ge_deleted_op.cc | 2 +- .../ops_kernel_store/op/no_op.cc | 2 +- ge/ge_local_engine/ops_kernel_store/op/op.cc | 2 +- ge/ge_local_engine/ops_kernel_store/op/op.h | 2 +- .../ops_kernel_store/op/op_factory.cc | 2 +- ge/ge_runtime/CMakeLists.txt | 8 --- ge/ge_runtime/model_runner.cc | 8 +-- ge/ge_runtime/output.cc | 4 +- ge/ge_runtime/output.h | 4 +- ge/ge_runtime/runtime_model.cc | 14 ++--- ge/ge_runtime/runtime_model.h | 4 +- ge/ge_runtime/task/task.h | 2 +- ge/ge_runtime/task/task_factory.h | 4 +- ge/generator/ge_generator.cc | 10 ++-- ge/generator/generator_api.cc | 6 +- ge/graph/build/graph_builder.cc | 2 +- ge/graph/build/graph_builder.h | 10 ++-- ge/graph/build/label_allocator.cc | 6 +- ge/graph/build/memory/block_mem_assigner.cc | 4 +- ge/graph/build/memory/block_mem_assigner.h | 6 +- ge/graph/build/memory/hybrid_mem_assigner.h | 4 +- ge/graph/build/memory/mem_assigner.h | 4 +- ge/graph/build/memory/memory_assigner.cc | 2 +- ge/graph/build/memory/var_mem_assign_util.cc | 6 +- ge/graph/build/memory/var_mem_assign_util.h | 4 +- ge/graph/build/model_builder.cc | 10 ++-- ge/graph/build/model_builder.h | 8 +-- ge/graph/build/run_context.cc | 2 +- ge/graph/build/run_context.h | 2 +- ge/graph/build/stream_allocator.cc | 2 +- ge/graph/build/stream_graph_optimizer.cc | 4 +- ge/graph/build/stream_graph_optimizer.h | 2 +- ge/graph/build/task_generator.cc | 6 +- ge/graph/build/task_generator.h | 2 +- ge/graph/common/bcast.cc | 2 +- ge/graph/common/bcast.h | 6 +- ge/graph/common/local_context.cc | 6 +- ge/graph/common/local_context.h | 2 +- ge/graph/common/omg_util.h | 4 +- ge/graph/common/transop_util.cc | 2 +- ge/graph/execute/graph_execute.h | 12 ++-- ge/graph/label/case_label_maker.cc | 6 +- ge/graph/label/if_label_maker.cc | 6 +- ge/graph/label/label_maker.cc | 4 +- .../label/partitioned_call_label_maker.cc | 6 +- ge/graph/label/while_label_maker.cc | 6 +- ge/graph/load/graph_loader.cc | 2 +- ge/graph/load/graph_loader.h | 6 +- ge/graph/load/model_manager/aipp_utils.cc | 4 +- ge/graph/load/model_manager/aipp_utils.h | 4 +- .../load/model_manager/cpu_queue_schedule.cc | 4 +- .../load/model_manager/cpu_queue_schedule.h | 2 +- ge/graph/load/model_manager/data_dumper.cc | 2 +- ge/graph/load/model_manager/data_dumper.h | 2 +- ge/graph/load/model_manager/data_inputer.cc | 6 +- ge/graph/load/model_manager/data_inputer.h | 4 +- ge/graph/load/model_manager/davinci_model.cc | 8 +-- ge/graph/load/model_manager/davinci_model.h | 12 ++-- ge/graph/load/model_manager/model_manager.cc | 2 +- ge/graph/load/model_manager/model_manager.h | 12 ++-- ge/graph/load/model_manager/model_utils.cc | 6 +- ge/graph/load/model_manager/model_utils.h | 4 +- .../task_info/kernel_ex_task_info.cc | 2 +- .../task_info/kernel_task_info.cc | 4 +- .../task_info/super_kernel/super_kernel.cc | 2 +- .../super_kernel/super_kernel_factory.cc | 2 +- .../super_kernel/super_kernel_factory.h | 2 +- .../load/model_manager/tbe_handle_store.cc | 4 +- .../load/model_manager/tbe_handle_store.h | 2 +- .../load/model_manager/zero_copy_offset.h | 2 +- ge/graph/load/model_manager/zero_copy_task.cc | 2 +- ge/graph/manager/graph_manager.h | 4 +- ge/graph/manager/graph_manager_utils.cc | 4 +- ge/graph/manager/graph_manager_utils.h | 10 ++-- ge/graph/manager/graph_var_manager.h | 2 +- ge/graph/manager/host_mem_manager.h | 2 +- .../manager/model_manager/event_manager.h | 6 +- ge/graph/manager/trans_var_data_utils.cc | 6 +- ge/graph/manager/trans_var_data_utils.h | 2 +- ge/graph/manager/util/debug.h | 6 +- ge/graph/manager/util/hcom_util.cc | 6 +- ge/graph/manager/util/hcom_util.h | 8 +-- ge/graph/optimize/common/params.h | 2 +- ge/graph/optimize/graph_optimize.h | 6 +- ge/graph/optimize/summary_optimize.cc | 2 +- ge/graph/partition/dynamic_shape_partition.h | 2 +- ge/graph/partition/engine_place.cc | 2 +- ge/graph/partition/engine_place.h | 2 +- ge/graph/partition/graph_partition.cc | 2 +- ge/graph/partition/graph_partition.h | 2 +- ge/graph/partition/stage_partition.cc | 4 +- ge/graph/partition/stage_partition.h | 2 +- ge/graph/passes/addn_pass.h | 4 +- .../passes/aicpu_constant_folding_pass.cc | 4 +- ge/graph/passes/atomic_addr_clean_pass.cc | 2 +- ge/graph/passes/atomic_addr_clean_pass.h | 2 +- ge/graph/passes/attach_stream_label_pass.cc | 2 +- ge/graph/passes/base_pass.cc | 2 +- ge/graph/passes/bitcast_pass.h | 4 +- ge/graph/passes/buffer_pool_memory_pass.h | 2 +- .../common_subexpression_elimination_pass.cc | 2 +- .../common_subexpression_elimination_pass.h | 2 +- ge/graph/passes/compile_nodes_pass.cc | 2 +- ge/graph/passes/cond_pass.cc | 2 +- ge/graph/passes/cond_remove_pass.cc | 2 +- ge/graph/passes/constant_folding_pass.cc | 2 +- ge/graph/passes/constant_fuse_same_pass.h | 2 +- ge/graph/passes/data_pass.h | 2 +- ge/graph/passes/dimension_adjust_pass.h | 6 +- ge/graph/passes/dimension_compute_pass.cc | 2 +- .../passes/end_of_sequence_add_control_pass.h | 2 +- ge/graph/passes/flow_ctrl_pass.h | 2 +- ge/graph/passes/for_pass.cc | 2 +- .../fuse_data_nodes_with_common_input_pass.cc | 2 +- .../fuse_data_nodes_with_common_input_pass.h | 2 +- ge/graph/passes/get_original_format_pass.cc | 6 +- ge/graph/passes/global_step_insert_pass.h | 2 +- ge/graph/passes/guarantee_const_pass.cc | 4 +- .../passes/hccl_continuous_memcpy_pass.cc | 4 +- ge/graph/passes/hccl_continuous_memcpy_pass.h | 2 +- ge/graph/passes/hccl_group_pass.cc | 2 +- ge/graph/passes/hccl_memcpy_pass.cc | 4 +- ge/graph/passes/hccl_memcpy_pass.h | 2 +- .../passes/hccl_tailing_optimization_pass.cc | 2 +- ge/graph/passes/infershape_pass.cc | 4 +- .../input_output_connection_identify_pass.cc | 2 +- .../input_output_connection_identify_pass.h | 2 +- ge/graph/passes/iterator_op_pass.cc | 4 +- ge/graph/passes/iterator_op_pass.h | 2 +- ge/graph/passes/link_gen_mask_nodes_pass.cc | 2 +- ge/graph/passes/link_gen_mask_nodes_pass.h | 2 +- .../mark_force_unknown_for_cond_pass.cc | 2 +- .../passes/mark_graph_unknown_status_pass.h | 2 +- .../passes/mark_node_unknown_shape_pass.h | 2 +- ge/graph/passes/mark_same_addr_pass.h | 2 +- ge/graph/passes/merge_input_memcpy_pass.cc | 2 +- ge/graph/passes/merge_to_stream_merge_pass.cc | 2 +- ge/graph/passes/net_output_pass.h | 2 +- ge/graph/passes/no_use_reshape_remove_pass.cc | 2 +- ge/graph/passes/parallel_group_pass.h | 2 +- ge/graph/passes/pass_manager.cc | 8 +-- ge/graph/passes/pass_utils.cc | 8 +-- ge/graph/passes/pass_utils.h | 2 +- ge/graph/passes/permute_pass.cc | 4 +- ge/graph/passes/print_op_pass.h | 2 +- ge/graph/passes/prune_pass.cc | 4 +- .../passes/ref_identity_delete_op_pass.cc | 2 +- ge/graph/passes/remove_same_const_pass.cc | 2 +- ge/graph/passes/remove_same_const_pass.h | 2 +- ge/graph/passes/replace_transshape_pass.cc | 2 +- .../passes/resource_pair_add_control_pass.cc | 6 +- .../resource_pair_remove_control_pass.cc | 6 +- .../same_transdata_breadth_fusion_pass.cc | 4 +- ge/graph/passes/save_pass.cc | 2 +- ge/graph/passes/save_pass.h | 2 +- .../passes/shape_operate_op_remove_pass.cc | 6 +- ge/graph/passes/stop_gradient_pass.h | 2 +- .../passes/subexpression_migration_pass.cc | 2 +- .../passes/subexpression_migration_pass.h | 2 +- .../passes/subgraph_const_migration_pass.cc | 2 +- .../passes/subgraph_const_migration_pass.h | 2 +- ge/graph/passes/switch_data_edges_bypass.cc | 8 +-- ge/graph/passes/switch_logic_remove_pass.cc | 2 +- .../passes/switch_to_stream_switch_pass.cc | 2 +- .../passes/transop_breadth_fusion_pass.cc | 2 +- ge/graph/passes/transop_depth_fusion_pass.cc | 4 +- .../transop_nearby_allreduce_fusion_pass.cc | 4 +- .../transop_symmetry_elimination_pass.cc | 4 +- .../transop_without_reshape_fusion_pass.cc | 4 +- ge/graph/passes/unused_args_clean_pass.cc | 2 +- ge/graph/passes/unused_args_clean_pass.h | 2 +- ge/graph/passes/variable_op_pass.cc | 2 +- ge/graph/passes/variable_op_pass.h | 2 +- ...ble_ref_useless_control_out_delete_pass.cc | 2 +- ge/graph/preprocess/graph_preprocess.cc | 8 +-- ge/graph/preprocess/graph_preprocess.h | 10 ++-- .../preprocess/insert_op/base_insert_op.h | 4 +- ge/graph/preprocess/insert_op/ge_aipp_op.cc | 4 +- ge/graph/preprocess/insert_op/ge_aipp_op.h | 2 +- .../insert_op/util_insert_aipp_op.cc | 4 +- ge/graph/preprocess/multi_batch_options.cc | 2 +- ge/host_kernels/broadcast_args_kernel.cc | 6 +- .../broadcast_gradient_args_kernel.cc | 6 +- ge/host_kernels/cast_kernel.cc | 8 +-- ge/host_kernels/concat_offset_kernel.cc | 6 +- ge/host_kernels/concat_v2_kernel.cc | 4 +- ge/host_kernels/dynamic_stitch_kernel.cc | 6 +- ge/host_kernels/empty_kernel.cc | 4 +- ge/host_kernels/expanddims_kernel.cc | 6 +- ge/host_kernels/fill_kernel.cc | 4 +- ge/host_kernels/floordiv_kernel.cc | 4 +- ge/host_kernels/floormod_kernel.cc | 4 +- ge/host_kernels/gather_v2_kernel.cc | 8 +-- ge/host_kernels/greater_kernel.cc | 6 +- ge/host_kernels/identity_kernel.cc | 2 +- ge/host_kernels/kernel_utils.cc | 4 +- ge/host_kernels/kernel_utils.h | 4 +- ge/host_kernels/maximum_kernel.cc | 6 +- ge/host_kernels/mul_kernel.cc | 6 +- ge/host_kernels/pack_kernel.cc | 6 +- ge/host_kernels/permute_kernel.cc | 8 +-- ge/host_kernels/range_kernel.cc | 6 +- ge/host_kernels/rank_kernel.cc | 8 +-- ge/host_kernels/reduce_prod_kernel.cc | 4 +- ge/host_kernels/reformat_kernel.cc | 8 +-- ge/host_kernels/reshape_kernel.cc | 6 +- ge/host_kernels/rsqrt_kernel.cc | 8 +-- ge/host_kernels/size_kernel.cc | 6 +- ge/host_kernels/slice_d_kernel.cc | 4 +- ge/host_kernels/slice_kernel.cc | 8 +-- ge/host_kernels/squeeze_kernel.cc | 6 +- ge/host_kernels/ssd_prior_box_kernel.cc | 2 +- ge/host_kernels/sub_kernel.cc | 4 +- ge/host_kernels/transdata_kernel.cc | 8 +-- ge/host_kernels/transpose_kernel.cc | 8 +-- ge/host_kernels/unpack_kernel.cc | 8 +-- ge/host_kernels/unsqueeze_kernel.cc | 6 +- ge/hybrid/common/npu_memory_allocator.cc | 2 +- ge/hybrid/common/npu_memory_allocator.h | 2 +- ge/hybrid/common/tensor_value.h | 2 +- .../executor/hybrid_execution_context.cc | 2 +- .../executor/hybrid_model_async_executor.cc | 2 +- ge/hybrid/executor/hybrid_model_executor.cc | 2 +- .../hybrid_model_pipeline_executor.cc | 18 +++++- .../executor/hybrid_model_pipeline_executor.h | 18 +++++- ge/hybrid/executor/hybrid_profiler.cc | 2 +- ge/hybrid/executor/node_state.h | 2 +- ge/hybrid/executor/rt_callback_manager.h | 2 +- ge/hybrid/executor/subgraph_context.cc | 2 +- ge/hybrid/hybrid_davinci_model.cc | 2 +- ge/hybrid/hybrid_davinci_model_stub.cc | 2 +- ge/hybrid/model/graph_item.cc | 2 +- ge/hybrid/model/hybrid_model.cc | 4 +- ge/hybrid/model/hybrid_model_builder.cc | 2 +- ge/hybrid/model/node_item.cc | 4 +- .../aicore/aicore_node_executor.cc | 2 +- .../node_executor/aicore/aicore_op_task.cc | 2 +- .../node_executor/aicore/aicore_op_task.h | 2 +- .../aicore/aicore_task_builder.cc | 6 +- .../aicore/aicore_task_builder.h | 2 +- .../aicore/aicore_task_compiler.cc | 2 +- .../aicore/aicore_task_compiler.h | 2 +- .../node_executor/aicpu/aicpu_node_executor.h | 2 +- .../compiledsubgraph/known_node_executor.cc | 2 +- .../controlop/control_op_executor.cc | 2 +- .../node_executor/hccl/hccl_node_executor.cc | 4 +- ge/hybrid/node_executor/node_executor.h | 2 +- .../partitioned_call_node_executor.cc | 2 +- .../node_executor/rts/rts_node_executor.cc | 4 +- ge/hybrid/node_executor/rts/rts_node_task.cc | 2 +- ge/hybrid/node_executor/task_context.cc | 4 +- ge/inc/graph_pass.h | 6 +- ge/inc/kernel.h | 4 +- ge/inc/kernel_factory.h | 2 +- ge/inc/pass.h | 2 +- ge/init/gelib.cc | 2 +- ge/init/gelib.h | 4 +- ge/ir_build/attr_options/attr_options.h | 56 +++++++++---------- ge/ir_build/attr_options/keep_dtype_option.cc | 2 +- ge/ir_build/attr_options/utils.cc | 2 +- .../attr_options/weight_compress_option.cc | 2 +- ge/ir_build/ge_ir_build.cc | 4 +- ge/ir_build/option_utils.cc | 2 +- ge/model/ge_model.cc | 2 +- ge/model/ge_model.h | 2 +- ge/model/ge_root_model.cc | 2 +- ge/offline/CMakeLists.txt | 16 ------ ge/offline/main.cc | 18 +++--- ge/offline/single_op_parser.cc | 4 +- ge/offline/single_op_parser.h | 4 +- .../ops_kernel_builder_manager.cc | 2 +- ge/opskernel_manager/ops_kernel_manager.cc | 4 +- ge/opskernel_manager/ops_kernel_manager.h | 6 +- ge/plugin/engine/CMakeLists.txt | 3 +- ge/plugin/engine/dnnengines.h | 2 +- ge/plugin/engine/engine_manage.h | 2 +- ge/session/inner_session.cc | 2 +- ge/session/inner_session.h | 2 +- ge/session/omg.cc | 20 +++---- ge/session/session_manager.h | 4 +- ge/single_op/single_op.cc | 4 +- ge/single_op/single_op.h | 4 +- ge/single_op/single_op_model.cc | 8 +-- ge/single_op/single_op_model.h | 2 +- ge/single_op/stream_resource.h | 2 +- .../task/aicpu_kernel_task_builder.cc | 2 +- ge/single_op/task/op_task.cc | 2 +- ge/single_op/task/op_task.h | 2 +- ge/single_op/task/rts_kernel_task_builder.cc | 2 +- .../common/profiling/ge_runner_profiling.h | 2 +- 327 files changed, 656 insertions(+), 701 deletions(-) diff --git a/ge/CMakeLists.txt b/ge/CMakeLists.txt index 81e2d539..2b9122da 100755 --- a/ge/CMakeLists.txt +++ b/ge/CMakeLists.txt @@ -754,22 +754,15 @@ target_compile_options(ge_runner PRIVATE target_include_directories(ge_runner SYSTEM PRIVATE ${GE_CODE_DIR}/ge - ${GE_CODE_DIR}/ge/analyzer ${GE_CODE_DIR}/inc ${GE_CODE_DIR}/inc/external ${GE_CODE_DIR}/inc/framework - ${GE_CODE_DIR}/inc/framework/common - ${METADEF_DIR} ${METADEF_DIR}/inc - ${METADEF_DIR}/inc/external/graph ${METADEF_DIR}/inc/external - ${METADEF_DIR}/inc/graph ${CMAKE_BINARY_DIR} ${CMAKE_BINARY_DIR}/proto/graphengine_protos #### yellow zone #### ${GE_CODE_DIR}/../inc - ${GE_CODE_DIR}/../inc/external - ${GE_CODE_DIR}/../inc/cce ${GE_CODE_DIR}/../toolchain/ide/ide-daemon/external ${GE_CODE_DIR}/../abl/adump/external #### blue zone @@ -835,22 +828,15 @@ target_compile_options(ge_compiler PRIVATE target_include_directories(ge_compiler SYSTEM PRIVATE ${GE_CODE_DIR}/ge - ${GE_CODE_DIR}/ge/analyzer ${GE_CODE_DIR}/inc ${GE_CODE_DIR}/inc/external ${GE_CODE_DIR}/inc/framework - ${GE_CODE_DIR}/inc/framework/common - ${METADEF_DIR} ${METADEF_DIR}/inc - ${METADEF_DIR}/inc/external/graph ${METADEF_DIR}/inc/external - ${METADEF_DIR}/inc/graph ${CMAKE_BINARY_DIR} ${CMAKE_BINARY_DIR}/proto/graphengine_protos #### yellow zone #### ${GE_CODE_DIR}/../inc - ${GE_CODE_DIR}/../inc/external - ${GE_CODE_DIR}/../inc/cce ${GE_CODE_DIR}/../toolchain/ide/ide-daemon/external ${GE_CODE_DIR}/../abl/adump/external #### blue zone #### @@ -1000,18 +986,14 @@ set_target_properties(atc_stub_ge_compiler PROPERTIES ) target_include_directories(atc_stub_ge_compiler PRIVATE - ${GE_CODE_DIR} ${GE_CODE_DIR}/ge - ${GE_CODE_DIR}/ge/analyzer ${GE_CODE_DIR}/inc ${GE_CODE_DIR}/inc/framework - ${GE_CODE_DIR}/inc/framework/common ${GE_CODE_DIR}/inc/external ${METADEF_DIR}/inc/external - ${METADEF_DIR}/inc/external/graph - ${METADEF_DIR}/inc/graph + ${METADEF_DIR}/inc #### yellow zone #### - ${GE_CODE_DIR}/../inc/cce + ${GE_CODE_DIR}/../inc ${GE_CODE_DIR}/../toolchain/ide/ide-daemon/external ${GE_CODE_DIR}/../abl/adump/external #### blue zone #### @@ -1041,18 +1023,14 @@ set_target_properties(fwk_stub_ge_runner PROPERTIES ) target_include_directories(fwk_stub_ge_runner PRIVATE - ${GE_CODE_DIR} ${GE_CODE_DIR}/ge - ${GE_CODE_DIR}/ge/analyzer ${GE_CODE_DIR}/inc ${GE_CODE_DIR}/inc/external ${GE_CODE_DIR}/inc/framework - ${GE_CODE_DIR}/inc/framework/common ${METADEF_DIR}/inc/external - ${METADEF_DIR}/inc/external/graph - ${METADEF_DIR}/inc/graph + ${METADEF_DIR}/inc #### yellow zone #### - ${GE_CODE_DIR}/../inc/cce + ${GE_CODE_DIR}/../inc ${GE_CODE_DIR}/../toolchain/ide/ide-daemon/external ${GE_CODE_DIR}/../abl/adump/external #### blue zone #### diff --git a/ge/analyzer/analyzer.cc b/ge/analyzer/analyzer.cc index 95036267..97b59411 100755 --- a/ge/analyzer/analyzer.cc +++ b/ge/analyzer/analyzer.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "analyzer.h" +#include "analyzer/analyzer.h" #include #include diff --git a/ge/client/ge_api.cc b/ge/client/ge_api.cc index 1aa4c41d..aa88cfb4 100644 --- a/ge/client/ge_api.cc +++ b/ge/client/ge_api.cc @@ -14,10 +14,10 @@ * limitations under the License. */ -#include "ge/ge_api.h" +#include "external/ge/ge_api.h" #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "framework/common/debug/ge_log.h" #include "common/ge/datatype_util.h" #include "proto/ge_api.pb.h" diff --git a/ge/common/CMakeLists.txt b/ge/common/CMakeLists.txt index f55ff427..313f1ff3 100755 --- a/ge/common/CMakeLists.txt +++ b/ge/common/CMakeLists.txt @@ -72,20 +72,15 @@ target_compile_options(ge_common PRIVATE target_include_directories(ge_common PRIVATE ${GE_CODE_DIR}/ge - ${GE_CODE_DIR}/ge/common - ${GE_CODE_DIR}/ge/common/op ${GE_CODE_DIR}/inc/external ${GE_CODE_DIR}/inc ${GE_CODE_DIR}/inc/framework ${METADEF_DIR}/inc ${METADEF_DIR}/inc/external - ${METADEF_DIR}/inc/external/graph - ${METADEF_DIR}/inc/graph ${CMAKE_BINARY_DIR} ${CMAKE_BINARY_DIR}/proto/graphengine_protos #### yellow zone #### $<$>:${GE_DEPEND_DIR}/inc> - $<$>:${GE_DEPEND_DIR}/inc/cce> #### blue zone #### $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc> $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain> @@ -141,20 +136,15 @@ target_compile_options(ge_common_static PRIVATE target_include_directories(ge_common_static PRIVATE ${GE_CODE_DIR}/ge - ${GE_CODE_DIR}/ge/common - ${GE_CODE_DIR}/ge/common/op ${GE_CODE_DIR}/inc ${GE_CODE_DIR}/inc/external ${GE_CODE_DIR}/inc/framework ${METADEF_DIR}/inc ${METADEF_DIR}/inc/external - ${METADEF_DIR}/inc/external/graph - ${METADEF_DIR}/inc/graph ${CMAKE_BINARY_DIR} ${CMAKE_BINARY_DIR}/proto/graphengine_protos #### yellow zone #### $<$>:${GE_DEPEND_DIR}/inc> - $<$>:${GE_DEPEND_DIR}/inc/cce> #### blue zone #### $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc> $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain> @@ -200,15 +190,11 @@ target_compile_options(ge_common PRIVATE target_include_directories(ge_common PRIVATE ${GE_CODE_DIR}/ge - ${GE_CODE_DIR}/ge/common - ${GE_CODE_DIR}/ge/common/op ${GE_CODE_DIR}/inc/external ${GE_CODE_DIR}/inc ${GE_CODE_DIR}/inc/framework ${METADEF_DIR}/inc ${METADEF_DIR}/inc/external - ${METADEF_DIR}/inc/external/graph - ${METADEF_DIR}/inc/graph ${CMAKE_BINARY_DIR} ${CMAKE_BINARY_DIR}/proto/graphengine_protos ${GE_CODE_DIR}/third_party/fwkacllib/inc diff --git a/ge/common/base64.h b/ge/common/base64.h index a537e585..22a78b46 100644 --- a/ge/common/base64.h +++ b/ge/common/base64.h @@ -20,8 +20,8 @@ #include #include -#include "debug/ge_log.h" -#include "ge_error_codes.h" +#include "framework/common/debug/ge_log.h" +#include "external/ge/ge_error_codes.h" namespace ge { namespace { diff --git a/ge/common/dump/dump_manager.h b/ge/common/dump/dump_manager.h index fa96de93..69152bcf 100644 --- a/ge/common/dump/dump_manager.h +++ b/ge/common/dump/dump_manager.h @@ -20,7 +20,7 @@ #include #include "common/dump/dump_properties.h" -#include "common/ge_types.h" +#include "framework/common/ge_types.h" namespace ge { class DumpManager { diff --git a/ge/common/dump/dump_op.h b/ge/common/dump/dump_op.h index b664495a..73922cb3 100755 --- a/ge/common/dump/dump_op.h +++ b/ge/common/dump/dump_op.h @@ -19,7 +19,7 @@ #include -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "common/properties_manager.h" #include "proto/op_mapping.pb.h" #include "runtime/stream.h" diff --git a/ge/common/dump/dump_properties.cc b/ge/common/dump/dump_properties.cc index 08bddf43..ef755540 100644 --- a/ge/common/dump/dump_properties.cc +++ b/ge/common/dump/dump_properties.cc @@ -20,7 +20,7 @@ #include #include "common/ge/ge_util.h" -#include "common/util.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "framework/common/debug/log.h" #include "framework/common/ge_types.h" diff --git a/ge/common/dump/opdebug_register.cc b/ge/common/dump/opdebug_register.cc index 816455a0..41c85ff6 100644 --- a/ge/common/dump/opdebug_register.cc +++ b/ge/common/dump/opdebug_register.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "opdebug_register.h" +#include "common/dump/opdebug_register.h" namespace { const size_t kOpDebugMemorySize = 2048UL; diff --git a/ge/common/dump/opdebug_register.h b/ge/common/dump/opdebug_register.h index 1826287d..5b927b67 100644 --- a/ge/common/dump/opdebug_register.h +++ b/ge/common/dump/opdebug_register.h @@ -18,8 +18,8 @@ #define GE_COMMON_DUMP_OPDEBUG_REGISTER_H_ #include -#include "common/debug/ge_log.h" -#include "common/debug/log.h" +#include "framework/common/debug/ge_log.h" +#include "framework/common/debug/log.h" #include "graph/load/model_manager/data_dumper.h" namespace ge { diff --git a/ge/common/formats/format_transfers/format_transfer_fractal_z.cc b/ge/common/formats/format_transfers/format_transfer_fractal_z.cc index ddce348b..38125979 100644 --- a/ge/common/formats/format_transfers/format_transfer_fractal_z.cc +++ b/ge/common/formats/format_transfers/format_transfer_fractal_z.cc @@ -19,7 +19,7 @@ #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/formats/utils/formats_definitions.h" #include "common/formats/utils/formats_trans_utils.h" #include "framework/common/debug/ge_log.h" diff --git a/ge/common/formats/format_transfers/format_transfer_nchw_fz_c04.cc b/ge/common/formats/format_transfers/format_transfer_nchw_fz_c04.cc index 5efe486c..88de4d14 100644 --- a/ge/common/formats/format_transfers/format_transfer_nchw_fz_c04.cc +++ b/ge/common/formats/format_transfers/format_transfer_nchw_fz_c04.cc @@ -23,7 +23,7 @@ #include "common/formats/utils/formats_definitions.h" #include "common/formats/utils/formats_trans_utils.h" -#include "common/util.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "graph/utils/type_utils.h" diff --git a/ge/common/ge/datatype_util.h b/ge/common/ge/datatype_util.h index e42b25a7..c3b41b81 100644 --- a/ge/common/ge/datatype_util.h +++ b/ge/common/ge/datatype_util.h @@ -20,7 +20,7 @@ #include #include -#include "graph/types.h" +#include "external/graph/types.h" namespace ge { static const int32_t kGeSizeFloat = sizeof(float); diff --git a/ge/common/ge/plugin_manager.h b/ge/common/ge/plugin_manager.h index 8c351e62..0869704f 100755 --- a/ge/common/ge/plugin_manager.h +++ b/ge/common/ge/plugin_manager.h @@ -26,8 +26,8 @@ #include #include -#include "common/ge_inner_error_codes.h" -#include "engine/dnnengine.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/engine/dnnengine.h" #include "framework/common/debug/ge_log.h" #include "mmpa/mmpa_api.h" diff --git a/ge/common/ge_format_util.cc b/ge/common/ge_format_util.cc index d0240224..f3dee571 100755 --- a/ge/common/ge_format_util.cc +++ b/ge/common/ge_format_util.cc @@ -15,7 +15,7 @@ */ #include "framework/common/ge_format_util.h" -#include "formats/formats.h" +#include "common/formats/formats.h" namespace ge { GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Status GeFormatUtil::TransShape(const TensorDesc &src_desc, diff --git a/ge/common/helper/model_cache_helper.h b/ge/common/helper/model_cache_helper.h index 398d6c03..13253cbe 100755 --- a/ge/common/helper/model_cache_helper.h +++ b/ge/common/helper/model_cache_helper.h @@ -21,7 +21,7 @@ #include #include -#include "ge/ge_api_error_codes.h" +#include "external/ge/ge_api_error_codes.h" #include "graph/compute_graph.h" #include "graph/manager/graph_var_manager.h" #include "model/ge_model.h" diff --git a/ge/common/math/fp16_math.cc b/ge/common/math/fp16_math.cc index e465c953..6a9c2fb3 100755 --- a/ge/common/math/fp16_math.cc +++ b/ge/common/math/fp16_math.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "fp16_math.h" +#include "common/math/fp16_math.h" #include "external/register/register_types.h" namespace ge { diff --git a/ge/common/model_parser/model_parser.cc b/ge/common/model_parser/model_parser.cc index ef9ab9e6..7447cdf8 100644 --- a/ge/common/model_parser/model_parser.cc +++ b/ge/common/model_parser/model_parser.cc @@ -20,7 +20,7 @@ #include #include "securec.h" -#include "common/helper/model_helper.h" +#include "framework/common/helper/model_helper.h" namespace ge { FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelParserBase::ModelParserBase() {} diff --git a/ge/common/op/attr_value_util.cc b/ge/common/op/attr_value_util.cc index 4315a25d..8be0ecd1 100644 --- a/ge/common/op/attr_value_util.cc +++ b/ge/common/op/attr_value_util.cc @@ -17,7 +17,7 @@ #include "framework/common/op/attr_value_util.h" #include "framework/common/debug/log.h" #include "framework/common/util.h" -#include "register/register_types.h" +#include "external/register/register_types.h" namespace ge { #define DEFINE_SET_ATTR_VALUE_ONE(ARG_TYPE, FIELD) \ diff --git a/ge/common/profiling/ge_profiling.cc b/ge/common/profiling/ge_profiling.cc index d0343326..6ec1143c 100644 --- a/ge/common/profiling/ge_profiling.cc +++ b/ge/common/profiling/ge_profiling.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "common/profiling/ge_profiling.h" +#include "framework/common/profiling/ge_profiling.h" #include "runtime/base.h" #include "common/profiling/profiling_manager.h" #include "framework/common/debug/ge_log.h" diff --git a/ge/common/profiling/ge_runner_profiling.cc b/ge/common/profiling/ge_runner_profiling.cc index 067aafe3..f74ce384 100644 --- a/ge/common/profiling/ge_runner_profiling.cc +++ b/ge/common/profiling/ge_runner_profiling.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "common/profiling/ge_runner_profiling.h" +#include "framework/common/profiling/ge_runner_profiling.h" #include "init/gelib.h" bool IsInitialize() { diff --git a/ge/common/profiling/profiling_manager.cc b/ge/common/profiling/profiling_manager.cc index 61210de6..6707d78e 100644 --- a/ge/common/profiling/profiling_manager.cc +++ b/ge/common/profiling/profiling_manager.cc @@ -21,7 +21,7 @@ #include "framework/common/string_util.h" #include "graph/ge_context.h" #include "graph/utils/type_utils.h" -#include "graph/types.h" +#include "external/graph/types.h" #include "runtime/base.h" #include "graph/load/model_manager/davinci_model.h" #include "mmpa/mmpa_api.h" diff --git a/ge/common/properties_manager.cc b/ge/common/properties_manager.cc index e1f4c66e..0c5ef1fe 100644 --- a/ge/common/properties_manager.cc +++ b/ge/common/properties_manager.cc @@ -21,7 +21,7 @@ #include #include "common/ge/ge_util.h" -#include "common/util.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "framework/common/debug/log.h" #include "framework/common/ge_types.h" diff --git a/ge/common/properties_manager.h b/ge/common/properties_manager.h index 7079eecb..f3f9d7c9 100644 --- a/ge/common/properties_manager.h +++ b/ge/common/properties_manager.h @@ -25,7 +25,7 @@ #include "common/dump/dump_properties.h" #include "graph/op_desc.h" -#include "common/ge_compiler_options.h" +#include "framework/common/ge_compiler_options.h" namespace ge { // Configuration property management diff --git a/ge/common/thread_pool.cc b/ge/common/thread_pool.cc index dead0127..f9b7bb99 100644 --- a/ge/common/thread_pool.cc +++ b/ge/common/thread_pool.cc @@ -23,7 +23,7 @@ #include #include -#include "register/register_types.h" +#include "external/register/register_types.h" namespace ge { FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ThreadPool::ThreadPool(uint32_t size) : is_stoped_(false) { diff --git a/ge/common/thread_pool.h b/ge/common/thread_pool.h index e173618f..7e52edcc 100755 --- a/ge/common/thread_pool.h +++ b/ge/common/thread_pool.h @@ -31,7 +31,7 @@ #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" #include "external/ge/ge_api_error_codes.h" -#include "graph/types.h" +#include "external/graph/types.h" #include "common/ge/ge_util.h" namespace ge { diff --git a/ge/common/types.cc b/ge/common/types.cc index 98ae7737..b1127483 100644 --- a/ge/common/types.cc +++ b/ge/common/types.cc @@ -15,7 +15,7 @@ */ #include "framework/common/types.h" -#include "graph/types.h" +#include "external/graph/types.h" namespace ge { // dump diff --git a/ge/engine_manager/dnnengine_manager.cc b/ge/engine_manager/dnnengine_manager.cc index e89fc847..9e338295 100644 --- a/ge/engine_manager/dnnengine_manager.cc +++ b/ge/engine_manager/dnnengine_manager.cc @@ -22,7 +22,7 @@ #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/ge/ge_util.h" #include "common/util/error_manager/error_manager.h" #include "framework/common/debug/ge_log.h" diff --git a/ge/engine_manager/dnnengine_manager.h b/ge/engine_manager/dnnengine_manager.h index c3ae5b95..42da3596 100755 --- a/ge/engine_manager/dnnengine_manager.h +++ b/ge/engine_manager/dnnengine_manager.h @@ -26,9 +26,9 @@ #include "nlohmann/json.hpp" #include "common/ge/plugin_manager.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "common/opskernel/ops_kernel_info_types.h" -#include "engine/dnnengine.h" +#include "framework/engine/dnnengine.h" #include "graph/op_desc.h" #include "graph/node.h" diff --git a/ge/executor/CMakeLists.txt b/ge/executor/CMakeLists.txt index b6342973..f258dffe 100755 --- a/ge/executor/CMakeLists.txt +++ b/ge/executor/CMakeLists.txt @@ -182,13 +182,10 @@ target_include_directories(ge_executor SYSTEM PRIVATE ${GE_CODE_DIR}/inc/framework ${METADEF_DIR}/inc ${METADEF_DIR}/inc/external - ${METADEF_DIR}/inc/external/graph - ${METADEF_DIR}/inc/graph ${CMAKE_BINARY_DIR} ${CMAKE_BINARY_DIR}/proto/graphengine_protos #### yellow zone #### $<$>:${GE_DEPEND_DIR}/inc> - $<$>:${GE_DEPEND_DIR}/inc/cce> #### blue zone #### $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc> $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain> @@ -236,13 +233,10 @@ target_include_directories(ge_executor_shared PRIVATE ${GE_CODE_DIR}/inc/framework ${METADEF_DIR}/inc ${METADEF_DIR}/inc/external - ${METADEF_DIR}/inc/external/graph - ${METADEF_DIR}/inc/graph ${CMAKE_BINARY_DIR} ${CMAKE_BINARY_DIR}/proto/graphengine_protos #### yellow zone #### $<$>:${GE_DEPEND_DIR}/inc> - $<$>:${GE_DEPEND_DIR}/inc/cce> #### blue zone #### $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc> ) diff --git a/ge/executor/ge_executor.cc b/ge/executor/ge_executor.cc index 049d012f..486764bd 100755 --- a/ge/executor/ge_executor.cc +++ b/ge/executor/ge_executor.cc @@ -14,13 +14,13 @@ * limitations under the License. */ -#include "executor/ge_executor.h" +#include "framework/executor/ge_executor.h" #include #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/ge/ge_util.h" -#include "common/helper/model_helper.h" +#include "framework/common/helper/model_helper.h" #include "common/profiling/profiling_manager.h" #include "common/dump/dump_manager.h" #include "graph/execute/graph_execute.h" diff --git a/ge/ge_local_engine/CMakeLists.txt b/ge/ge_local_engine/CMakeLists.txt index 3675d333..01a10eaa 100755 --- a/ge/ge_local_engine/CMakeLists.txt +++ b/ge/ge_local_engine/CMakeLists.txt @@ -41,8 +41,6 @@ target_include_directories(ge_local_engine PRIVATE ${GE_CODE_DIR}/inc/framework ${METADEF_DIR}/inc ${METADEF_DIR}/inc/external - ${METADEF_DIR}/inc/external/graph - ${METADEF_DIR}/inc/graph ${CMAKE_BINARY_DIR} ${CMAKE_BINARY_DIR}/proto/graphengine_protos #### yellow zone #### @@ -91,8 +89,6 @@ target_include_directories(atc_ge_local_engine PRIVATE ${GE_CODE_DIR}/inc/framework ${METADEF_DIR}/inc ${METADEF_DIR}/inc/external - ${METADEF_DIR}/inc/external/graph - ${METADEF_DIR}/inc/graph ${CMAKE_BINARY_DIR} ${CMAKE_BINARY_DIR}/proto/graphengine_protos #### yellow zone #### @@ -146,8 +142,6 @@ target_include_directories(ge_local_opskernel_builder PRIVATE ${GE_CODE_DIR}/inc/framework ${METADEF_DIR}/inc ${METADEF_DIR}/inc/external - ${METADEF_DIR}/inc/external/graph - ${METADEF_DIR}/inc/graph ${CMAKE_BINARY_DIR} ${CMAKE_BINARY_DIR}/proto/graphengine_protos #### yellow zone #### @@ -197,8 +191,6 @@ target_include_directories(atc_ge_local_opskernel_builder PRIVATE ${GE_CODE_DIR}/inc/framework ${METADEF_DIR}/inc ${METADEF_DIR}/inc/external - ${METADEF_DIR}/inc/external/graph - ${METADEF_DIR}/inc/graph ${CMAKE_BINARY_DIR} ${CMAKE_BINARY_DIR}/proto/graphengine_protos #### yellow zone #### @@ -254,8 +246,6 @@ target_include_directories(ge_local_opskernel_builder_static PRIVATE ${GE_CODE_DIR}/inc/framework ${METADEF_DIR}/inc ${METADEF_DIR}/inc/external - ${METADEF_DIR}/inc/external/graph - ${METADEF_DIR}/inc/graph ${CMAKE_BINARY_DIR} ${CMAKE_BINARY_DIR}/proto/graphengine_protos #### yellow zone #### diff --git a/ge/ge_local_engine/engine/ge_local_engine.cc b/ge/ge_local_engine/engine/ge_local_engine.cc index ac3e5473..910bb924 100755 --- a/ge/ge_local_engine/engine/ge_local_engine.cc +++ b/ge/ge_local_engine/engine/ge_local_engine.cc @@ -19,10 +19,10 @@ #include #include #include "framework/common/debug/ge_log.h" -#include "common/ge_inner_error_codes.h" -#include "common/constant/constant.h" +#include "framework/common/ge_inner_error_codes.h" +#include "ge_local_engine/common/constant/constant.h" #include "common/ge/ge_util.h" -#include "ops_kernel_store/ge_local_ops_kernel_info.h" +#include "ge_local_engine/ops_kernel_store/ge_local_ops_kernel_info.h" namespace ge { namespace ge_local { diff --git a/ge/ge_local_engine/engine/host_cpu_engine.cc b/ge/ge_local_engine/engine/host_cpu_engine.cc index cd68ae15..488a5ee8 100755 --- a/ge/ge_local_engine/engine/host_cpu_engine.cc +++ b/ge/ge_local_engine/engine/host_cpu_engine.cc @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "host_cpu_engine.h" +#include "ge_local_engine/engine/host_cpu_engine.h" #include "graph/common/omg_util.h" #include "graph/utils/op_desc_utils.h" #include "graph/utils/tensor_adapter.h" diff --git a/ge/ge_local_engine/engine/host_cpu_engine.h b/ge/ge_local_engine/engine/host_cpu_engine.h index d13fcae1..5d6fa664 100644 --- a/ge/ge_local_engine/engine/host_cpu_engine.h +++ b/ge/ge_local_engine/engine/host_cpu_engine.h @@ -33,7 +33,7 @@ #include #include "framework/common/ge_inner_error_codes.h" #include "graph/node.h" -#include "graph/operator.h" +#include "external/graph/operator.h" #include "external/../register/register.h" namespace ge { diff --git a/ge/ge_local_engine/ops_kernel_store/ge_local_ops_kernel_builder.cc b/ge/ge_local_engine/ops_kernel_store/ge_local_ops_kernel_builder.cc index 5842fe29..33aa407d 100644 --- a/ge/ge_local_engine/ops_kernel_store/ge_local_ops_kernel_builder.cc +++ b/ge/ge_local_engine/ops_kernel_store/ge_local_ops_kernel_builder.cc @@ -14,9 +14,9 @@ * limitations under the License. */ -#include "ge_local_ops_kernel_builder.h" +#include "ge_local_engine/ops_kernel_store/ge_local_ops_kernel_builder.h" #include -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "common/ge/ge_util.h" #include "framework/common/debug/ge_log.h" #include "graph/utils/node_utils.h" diff --git a/ge/ge_local_engine/ops_kernel_store/ge_local_ops_kernel_info.cc b/ge/ge_local_engine/ops_kernel_store/ge_local_ops_kernel_info.cc index 504c3f2f..d775309d 100755 --- a/ge/ge_local_engine/ops_kernel_store/ge_local_ops_kernel_info.cc +++ b/ge/ge_local_engine/ops_kernel_store/ge_local_ops_kernel_info.cc @@ -16,14 +16,14 @@ #include "ge_local_engine/ops_kernel_store/ge_local_ops_kernel_info.h" #include -#include "common/constant/constant.h" +#include "ge_local_engine/common/constant/constant.h" #include "common/ge/ge_util.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "framework/common/debug/ge_log.h" #include "graph/utils/node_utils.h" #include "graph/utils/tensor_utils.h" #include "graph/utils/type_utils.h" -#include "op/op_factory.h" +#include "ge_local_engine/ops_kernel_store/op/op_factory.h" #include "proto/task.pb.h" namespace ge { diff --git a/ge/ge_local_engine/ops_kernel_store/op/ge_deleted_op.cc b/ge/ge_local_engine/ops_kernel_store/op/ge_deleted_op.cc index ee601a99..dc4abfb8 100755 --- a/ge/ge_local_engine/ops_kernel_store/op/ge_deleted_op.cc +++ b/ge/ge_local_engine/ops_kernel_store/op/ge_deleted_op.cc @@ -16,7 +16,7 @@ #include "ge_local_engine/ops_kernel_store/op/ge_deleted_op.h" #include "framework/common/debug/ge_log.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "ge_local_engine/ops_kernel_store/op/op_factory.h" namespace ge { diff --git a/ge/ge_local_engine/ops_kernel_store/op/no_op.cc b/ge/ge_local_engine/ops_kernel_store/op/no_op.cc index c2104693..45d9da47 100755 --- a/ge/ge_local_engine/ops_kernel_store/op/no_op.cc +++ b/ge/ge_local_engine/ops_kernel_store/op/no_op.cc @@ -16,7 +16,7 @@ #include "ge_local_engine/ops_kernel_store/op/no_op.h" #include "framework/common/debug/ge_log.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "ge_local_engine/ops_kernel_store/op/op_factory.h" namespace ge { diff --git a/ge/ge_local_engine/ops_kernel_store/op/op.cc b/ge/ge_local_engine/ops_kernel_store/op/op.cc index 11229b2c..c2ef0091 100644 --- a/ge/ge_local_engine/ops_kernel_store/op/op.cc +++ b/ge/ge_local_engine/ops_kernel_store/op/op.cc @@ -16,7 +16,7 @@ #include "ge_local_engine/ops_kernel_store/op/op.h" #include "framework/common/debug/ge_log.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "graph/op_desc.h" #include "graph/utils/anchor_utils.h" #include "graph/utils/tensor_utils.h" diff --git a/ge/ge_local_engine/ops_kernel_store/op/op.h b/ge/ge_local_engine/ops_kernel_store/op/op.h index b75a8bed..004723e1 100644 --- a/ge/ge_local_engine/ops_kernel_store/op/op.h +++ b/ge/ge_local_engine/ops_kernel_store/op/op.h @@ -20,7 +20,7 @@ #include #include #include -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "graph/node.h" namespace ge { diff --git a/ge/ge_local_engine/ops_kernel_store/op/op_factory.cc b/ge/ge_local_engine/ops_kernel_store/op/op_factory.cc index 2e56b7bb..18f3b7b9 100644 --- a/ge/ge_local_engine/ops_kernel_store/op/op_factory.cc +++ b/ge/ge_local_engine/ops_kernel_store/op/op_factory.cc @@ -16,7 +16,7 @@ #include "ge_local_engine/ops_kernel_store/op/op_factory.h" #include "framework/common/debug/ge_log.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "graph/op_desc.h" namespace ge { diff --git a/ge/ge_runtime/CMakeLists.txt b/ge/ge_runtime/CMakeLists.txt index b00dd5b3..3243766f 100644 --- a/ge/ge_runtime/CMakeLists.txt +++ b/ge/ge_runtime/CMakeLists.txt @@ -34,21 +34,13 @@ target_compile_definitions(ge_runtime PRIVATE target_include_directories(ge_runtime PRIVATE ${CMAKE_CURRENT_LIST_DIR} - ${GE_CODE_DIR} ${GE_CODE_DIR}/ge ${GE_CODE_DIR}/inc - ${GE_CODE_DIR}/inc/graph ${GE_CODE_DIR}/inc/external ${GE_CODE_DIR}/inc/framework - ${GE_CODE_DIR}/inc/framework/common - ${GE_CODE_DIR}/inc/framework/ge_runtime - ${GE_CODE_DIR}/inc/cce ${GE_CODE_DIR}/third_party/fwkacllib/inc - ${METADEF_DIR} ${METADEF_DIR}/inc - ${METADEF_DIR}/inc/external/graph ${METADEF_DIR}/inc/external - ${METADEF_DIR}/inc/graph ${CMAKE_BINARY_DIR} ${CMAKE_BINARY_DIR}/proto/ge ) diff --git a/ge/ge_runtime/model_runner.cc b/ge/ge_runtime/model_runner.cc index 9961ab4e..9338aae2 100644 --- a/ge/ge_runtime/model_runner.cc +++ b/ge/ge_runtime/model_runner.cc @@ -14,12 +14,12 @@ * limitations under the License. */ -#include "ge_runtime/model_runner.h" -#include "./runtime_model.h" +#include "framework/ge_runtime/model_runner.h" +#include "ge_runtime/runtime_model.h" #include "framework/common/debug/ge_log.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "common/ge/ge_util.h" -#include "ge_runtime/davinci_model.h" +#include "framework/ge_runtime/davinci_model.h" #include "graph/op_desc.h" namespace ge { diff --git a/ge/ge_runtime/output.cc b/ge/ge_runtime/output.cc index 90c33bb4..0be053d5 100644 --- a/ge/ge_runtime/output.cc +++ b/ge/ge_runtime/output.cc @@ -15,8 +15,8 @@ */ #include "ge_runtime/output.h" -#include "common/ge_inner_error_codes.h" -#include "common/util.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" namespace ge { diff --git a/ge/ge_runtime/output.h b/ge/ge_runtime/output.h index 1f7f91ee..61fcf94e 100644 --- a/ge/ge_runtime/output.h +++ b/ge/ge_runtime/output.h @@ -19,8 +19,8 @@ #include #include -#include "ge_runtime/davinci_model.h" -#include "common/ge_types.h" +#include "framework/ge_runtime/davinci_model.h" +#include "framework/common/ge_types.h" namespace ge { namespace model_runner { diff --git a/ge/ge_runtime/runtime_model.cc b/ge/ge_runtime/runtime_model.cc index efaad251..490ac25b 100644 --- a/ge/ge_runtime/runtime_model.cc +++ b/ge/ge_runtime/runtime_model.cc @@ -16,15 +16,15 @@ #include "ge_runtime/runtime_model.h" #include -#include "./model_context.h" -#include "./task/task.h" -#include "common/ge_inner_error_codes.h" -#include "common/types.h" -#include "common/util.h" +#include "ge_runtime/model_context.h" +#include "ge_runtime/task/task.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "framework/common/op/op_parser_util.h" -#include "graph/types.h" -#include "task/task_factory.h" +#include "external/graph/types.h" +#include "ge_runtime/task/task_factory.h" #include "ge/common/math/math_util.h" namespace ge { diff --git a/ge/ge_runtime/runtime_model.h b/ge/ge_runtime/runtime_model.h index d0c466d4..429a143f 100644 --- a/ge/ge_runtime/runtime_model.h +++ b/ge/ge_runtime/runtime_model.h @@ -20,8 +20,8 @@ #include #include #include -#include "ge_runtime/davinci_model.h" -#include "common/ge_types.h" +#include "framework/ge_runtime/davinci_model.h" +#include "framework/common/ge_types.h" #include "runtime/base.h" #include "runtime/rt_model.h" diff --git a/ge/ge_runtime/task/task.h b/ge/ge_runtime/task/task.h index c255fd22..8170f3ca 100644 --- a/ge/ge_runtime/task/task.h +++ b/ge/ge_runtime/task/task.h @@ -23,7 +23,7 @@ #include #include "runtime/rt_model.h" #include "ge_runtime/model_context.h" -#include "ge_runtime/task_info.h" +#include "framework/ge_runtime/task_info.h" #include "external/runtime/rt_error_codes.h" namespace ge { diff --git a/ge/ge_runtime/task/task_factory.h b/ge/ge_runtime/task/task_factory.h index 670d1fef..f19b7419 100644 --- a/ge/ge_runtime/task/task_factory.h +++ b/ge/ge_runtime/task/task_factory.h @@ -21,9 +21,9 @@ #include #include #include -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "framework/common/debug/ge_log.h" -#include "ge_runtime/task_info.h" +#include "framework/ge_runtime/task_info.h" namespace ge { namespace model_runner { diff --git a/ge/generator/ge_generator.cc b/ge/generator/ge_generator.cc index 58047c89..24b35bca 100644 --- a/ge/generator/ge_generator.cc +++ b/ge/generator/ge_generator.cc @@ -14,19 +14,19 @@ * limitations under the License. */ -#include "generator/ge_generator.h" +#include "framework/generator/ge_generator.h" #include #include "common/ge/ge_util.h" #include "common/ge/plugin_manager.h" -#include "common/helper/model_helper.h" -#include "common/helper/om_file_helper.h" -#include "common/util.h" +#include "framework/common/helper/model_helper.h" +#include "framework/common/helper/om_file_helper.h" +#include "framework/common/util.h" #include "common/util/error_manager/error_manager.h" #include "framework/common/debug/ge_log.h" #include "framework/common/debug/log.h" -#include "ge/ge_api.h" +#include "external/ge/ge_api.h" #include "graph/debug/ge_attr_define.h" #include "graph/ge_context.h" #include "graph/manager/graph_manager.h" diff --git a/ge/generator/generator_api.cc b/ge/generator/generator_api.cc index b64a9eb3..56a35130 100644 --- a/ge/generator/generator_api.cc +++ b/ge/generator/generator_api.cc @@ -13,11 +13,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "generator/generator_api.h" +#include "framework/generator/generator_api.h" #include "common/ge/ge_util.h" -#include "common/util.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" -#include "generator/ge_generator.h" +#include "framework/generator/ge_generator.h" #include "graph/ge_attr_value.h" #include "graph/ge_tensor.h" #include "graph/op_desc.h" diff --git a/ge/graph/build/graph_builder.cc b/ge/graph/build/graph_builder.cc index 8b172e63..96dea02e 100644 --- a/ge/graph/build/graph_builder.cc +++ b/ge/graph/build/graph_builder.cc @@ -17,7 +17,7 @@ #include "graph/build/graph_builder.h" #include "graph/build/memory/graph_mem_assigner.h" #include "common/ge/ge_util.h" -#include "common/helper/model_helper.h" +#include "framework/common/helper/model_helper.h" #include "graph/build/logical_stream_allocator.h" #include "graph/build/run_context.h" #include "graph/build/stream_graph_optimizer.h" diff --git a/ge/graph/build/graph_builder.h b/ge/graph/build/graph_builder.h index fb9ab6bd..c4b16814 100644 --- a/ge/graph/build/graph_builder.h +++ b/ge/graph/build/graph_builder.h @@ -22,16 +22,16 @@ #include #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/debug/memory_dumper.h" #include "common/properties_manager.h" -#include "common/string_util.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/string_util.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "graph/build/model_builder.h" #include "graph/build/task_generator.h" #include "graph/compute_graph.h" -#include "graph/graph.h" +#include "external/graph/graph.h" #include "graph/manager/graph_manager_utils.h" #include "graph/model.h" #include "graph/node.h" diff --git a/ge/graph/build/label_allocator.cc b/ge/graph/build/label_allocator.cc index dd7ee828..6d81c17d 100644 --- a/ge/graph/build/label_allocator.cc +++ b/ge/graph/build/label_allocator.cc @@ -14,11 +14,11 @@ * limitations under the License. */ -#include "label_allocator.h" +#include "graph/build/label_allocator.h" #include "framework/common/types.h" -#include "common/util.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/util.h" +#include "framework/common/ge_inner_error_codes.h" #include "graph/debug/ge_attr_define.h" #include "graph/utils/graph_utils.h" #include "graph/label/label_maker.h" diff --git a/ge/graph/build/memory/block_mem_assigner.cc b/ge/graph/build/memory/block_mem_assigner.cc index 9b81eae3..159e68a7 100755 --- a/ge/graph/build/memory/block_mem_assigner.cc +++ b/ge/graph/build/memory/block_mem_assigner.cc @@ -24,7 +24,7 @@ #include "graph/buffer.h" #include "graph/ge_attr_value.h" #include "graph/ge_context.h" -#include "graph/types.h" +#include "external/graph/types.h" #include "graph/node.h" #include "graph/utils/graph_utils.h" #include "graph/utils/node_utils.h" @@ -36,7 +36,7 @@ #include "graph/common/local_context.h" #include "graph/optimize/common/params.h" -#include "omg/omg_inner_types.h" +#include "framework/omg/omg_inner_types.h" #include "runtime/mem.h" using std::map; diff --git a/ge/graph/build/memory/block_mem_assigner.h b/ge/graph/build/memory/block_mem_assigner.h index 231cce09..651daed5 100755 --- a/ge/graph/build/memory/block_mem_assigner.h +++ b/ge/graph/build/memory/block_mem_assigner.h @@ -24,9 +24,9 @@ #include #include #include -#include "common/ge_inner_error_codes.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "graph/build/memory/mem_assigner.h" #include "graph/compute_graph.h" #include "graph/utils/graph_utils.h" diff --git a/ge/graph/build/memory/hybrid_mem_assigner.h b/ge/graph/build/memory/hybrid_mem_assigner.h index 2bdfd5c5..33bb152b 100755 --- a/ge/graph/build/memory/hybrid_mem_assigner.h +++ b/ge/graph/build/memory/hybrid_mem_assigner.h @@ -22,8 +22,8 @@ #include "graph/build/memory/block_mem_assigner.h" #include "graph/compute_graph.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/types.h" +#include "framework/common/util.h" namespace ge { using BlockMemAssignerPtr = std::shared_ptr; diff --git a/ge/graph/build/memory/mem_assigner.h b/ge/graph/build/memory/mem_assigner.h index 7d0252d9..d607b989 100755 --- a/ge/graph/build/memory/mem_assigner.h +++ b/ge/graph/build/memory/mem_assigner.h @@ -17,8 +17,8 @@ #ifndef GE_GRAPH_BUILD_MEMORY_MEM_ASSIGNER_H_ #define GE_GRAPH_BUILD_MEMORY_MEM_ASSIGNER_H_ -#include "common/ge_inner_error_codes.h" -#include "memory/memory_assigner.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/memory/memory_assigner.h" namespace ge { static const int64_t kInvalidOffset = -1; diff --git a/ge/graph/build/memory/memory_assigner.cc b/ge/graph/build/memory/memory_assigner.cc index 570aae07..6e49827f 100755 --- a/ge/graph/build/memory/memory_assigner.cc +++ b/ge/graph/build/memory/memory_assigner.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "memory/memory_assigner.h" +#include "framework/memory/memory_assigner.h" #include #include "framework/common/debug/ge_log.h" #include "graph/build/memory/graph_mem_assigner.h" diff --git a/ge/graph/build/memory/var_mem_assign_util.cc b/ge/graph/build/memory/var_mem_assign_util.cc index b8138a30..adddf6bd 100755 --- a/ge/graph/build/memory/var_mem_assign_util.cc +++ b/ge/graph/build/memory/var_mem_assign_util.cc @@ -16,14 +16,14 @@ #include "graph/build/memory/var_mem_assign_util.h" #include -#include "common/types.h" +#include "framework/common/types.h" #include "framework/common/debug/ge_log.h" #include "graph/common/transop_util.h" #include "graph/debug/ge_attr_define.h" #include "graph/manager/graph_mem_allocator.h" #include "graph/manager/graph_var_manager.h" -#include "graph/tensor.h" -#include "graph/types.h" +#include "external/graph/tensor.h" +#include "external/graph/types.h" #include "graph/utils/attr_utils.h" #include "graph/utils/graph_utils.h" #include "graph/utils/tensor_utils.h" diff --git a/ge/graph/build/memory/var_mem_assign_util.h b/ge/graph/build/memory/var_mem_assign_util.h index 9528dbdb..26da9111 100644 --- a/ge/graph/build/memory/var_mem_assign_util.h +++ b/ge/graph/build/memory/var_mem_assign_util.h @@ -17,8 +17,8 @@ #ifndef GE_GRAPH_BUILD_MEMORY_VAR_MEM_ASSIGN_UTIL_H_ #define GE_GRAPH_BUILD_MEMORY_VAR_MEM_ASSIGN_UTIL_H_ #include -#include "common/debug/log.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/debug/log.h" +#include "framework/common/ge_inner_error_codes.h" #include "graph/utils/node_utils.h" namespace ge { diff --git a/ge/graph/build/model_builder.cc b/ge/graph/build/model_builder.cc index 431e4882..d38e89fe 100755 --- a/ge/graph/build/model_builder.cc +++ b/ge/graph/build/model_builder.cc @@ -22,7 +22,7 @@ #include "common/dump/dump_manager.h" #include "framework/common/debug/ge_log.h" #include "graph/anchor.h" -#include "graph/attr_value.h" +#include "external/graph/attr_value.h" #include "graph/buffer.h" #include "graph/build/stream_allocator.h" #include "graph/common/omg_util.h" @@ -31,11 +31,11 @@ #include "graph/debug/ge_attr_define.h" #include "graph/ge_attr_value.h" #include "graph/ge_context.h" -#include "graph/ge_error_codes.h" +#include "external/graph/ge_error_codes.h" #include "graph/manager/graph_mem_allocator.h" #include "graph/manager/graph_var_manager.h" #include "graph/optimize/common/params.h" -#include "graph/types.h" +#include "external/graph/types.h" #include "graph/utils/attr_utils.h" #include "graph/utils/graph_utils.h" #include "graph/utils/node_utils.h" @@ -43,8 +43,8 @@ #include "graph/utils/tensor_utils.h" #include "graph/utils/type_utils.h" #include "init/gelib.h" -#include "memory/memory_assigner.h" -#include "omg/version.h" +#include "framework/memory/memory_assigner.h" +#include "framework/omg/version.h" #include "register/op_registry.h" #include "graph/passes/set_input_output_offset_pass.h" #include "graph/build/memory/block_mem_assigner.h" diff --git a/ge/graph/build/model_builder.h b/ge/graph/build/model_builder.h index 6f097329..151e6006 100644 --- a/ge/graph/build/model_builder.h +++ b/ge/graph/build/model_builder.h @@ -23,17 +23,17 @@ #include #include #include -#include "common/op/ge_op_utils.h" +#include "framework/common/op/ge_op_utils.h" #include "common/tbe_kernel_store.h" #include "common/cust_aicpu_kernel_store.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "graph/compute_graph.h" #include "graph/manager/graph_manager_utils.h" #include "graph/model.h" #include "graph/node.h" #include "model/ge_model.h" -#include "omg/omg_inner_types.h" +#include "framework/omg/omg_inner_types.h" namespace ge { class ModelBuilder { diff --git a/ge/graph/build/run_context.cc b/ge/graph/build/run_context.cc index 05e40b63..e7f07c0a 100644 --- a/ge/graph/build/run_context.cc +++ b/ge/graph/build/run_context.cc @@ -15,7 +15,7 @@ */ #include "graph/build/run_context.h" -#include "common/util.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "graph/debug/ge_attr_define.h" #include "graph/common/omg_util.h" diff --git a/ge/graph/build/run_context.h b/ge/graph/build/run_context.h index 82f799aa..20ba76d4 100755 --- a/ge/graph/build/run_context.h +++ b/ge/graph/build/run_context.h @@ -18,7 +18,7 @@ #define GE_GRAPH_BUILD_RUN_CONTEXT_H_ #include -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "common/opskernel/ops_kernel_info_types.h" #include "framework/common/types.h" #include "graph/compute_graph.h" diff --git a/ge/graph/build/stream_allocator.cc b/ge/graph/build/stream_allocator.cc index d896925c..bc34a228 100644 --- a/ge/graph/build/stream_allocator.cc +++ b/ge/graph/build/stream_allocator.cc @@ -27,7 +27,7 @@ #include "graph/ge_context.h" #include "graph/utils/graph_utils.h" #include "init/gelib.h" -#include "common/string_util.h" +#include "framework/common/string_util.h" #include "common/util/error_manager/error_manager.h" using std::map; diff --git a/ge/graph/build/stream_graph_optimizer.cc b/ge/graph/build/stream_graph_optimizer.cc index 30142c2b..acf91ad5 100644 --- a/ge/graph/build/stream_graph_optimizer.cc +++ b/ge/graph/build/stream_graph_optimizer.cc @@ -13,11 +13,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "stream_graph_optimizer.h" +#include "graph/build/stream_graph_optimizer.h" #include -#include "common/util.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "graph/utils/node_utils.h" #include "graph/utils/tensor_utils.h" diff --git a/ge/graph/build/stream_graph_optimizer.h b/ge/graph/build/stream_graph_optimizer.h index d69fa7ba..ec32f7fb 100644 --- a/ge/graph/build/stream_graph_optimizer.h +++ b/ge/graph/build/stream_graph_optimizer.h @@ -18,7 +18,7 @@ #define GE_GRAPH_BUILD_OPTIMIZE_STREAM_GRAPH_H_ #include -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "common/opskernel/ops_kernel_info_types.h" #include "framework/common/types.h" #include "graph/compute_graph.h" diff --git a/ge/graph/build/task_generator.cc b/ge/graph/build/task_generator.cc index f9456aab..5dee37d6 100755 --- a/ge/graph/build/task_generator.cc +++ b/ge/graph/build/task_generator.cc @@ -18,8 +18,8 @@ #include #include #include "common/profiling/profiling_manager.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "graph/debug/ge_attr_define.h" @@ -32,7 +32,7 @@ #include "graph/common/ge_call_wrapper.h" #include "init/gelib.h" #include "graph/ge_local_context.h" -#include "ge/ge_api_types.h" +#include "external/ge/ge_api_types.h" #include "opskernel_manager/ops_kernel_builder_manager.h" using domi::LogTimeStampDef; diff --git a/ge/graph/build/task_generator.h b/ge/graph/build/task_generator.h index 40cef3ba..6f460906 100755 --- a/ge/graph/build/task_generator.h +++ b/ge/graph/build/task_generator.h @@ -21,7 +21,7 @@ #include #include #include -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "common/opskernel/ops_kernel_info_types.h" #include "framework/common/types.h" #include "graph/compute_graph.h" diff --git a/ge/graph/common/bcast.cc b/ge/graph/common/bcast.cc index b36b50b2..fcc8f9a1 100644 --- a/ge/graph/common/bcast.cc +++ b/ge/graph/common/bcast.cc @@ -19,7 +19,7 @@ #include #include "common/math_util.h" -#include "common/util.h" +#include "framework/common/util.h" using domi::Status; diff --git a/ge/graph/common/bcast.h b/ge/graph/common/bcast.h index a8399896..184751fe 100644 --- a/ge/graph/common/bcast.h +++ b/ge/graph/common/bcast.h @@ -21,11 +21,11 @@ #include #include -#include "common/debug/log.h" -#include "common/types.h" +#include "framework/common/debug/log.h" +#include "framework/common/types.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" -#include "graph/attr_value.h" +#include "external/graph/attr_value.h" #include "graph/ge_tensor.h" #include "graph/utils/tensor_adapter.h" diff --git a/ge/graph/common/local_context.cc b/ge/graph/common/local_context.cc index d3e66861..fa2f78e0 100644 --- a/ge/graph/common/local_context.cc +++ b/ge/graph/common/local_context.cc @@ -16,9 +16,9 @@ #include "graph/common/local_context.h" -#include "common/ge_inner_error_codes.h" -#include "common/debug/ge_log.h" -#include "omg/omg_inner_types.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/debug/ge_log.h" +#include "framework/omg/omg_inner_types.h" namespace ge { namespace { diff --git a/ge/graph/common/local_context.h b/ge/graph/common/local_context.h index 83367766..4aa95855 100644 --- a/ge/graph/common/local_context.h +++ b/ge/graph/common/local_context.h @@ -17,7 +17,7 @@ #ifndef GE_GRAPH_COMMON_LOCAL_CONTEXT_H_ #define GE_GRAPH_COMMON_LOCAL_CONTEXT_H_ -#include "omg/omg_inner_types.h" +#include "framework/omg/omg_inner_types.h" namespace ge { void SetLocalOmgContext(OmgContext &context); diff --git a/ge/graph/common/omg_util.h b/ge/graph/common/omg_util.h index edaafa45..d55cc7c8 100644 --- a/ge/graph/common/omg_util.h +++ b/ge/graph/common/omg_util.h @@ -22,8 +22,8 @@ #include #include -#include "common/types.h" -#include "common/util.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "graph/node.h" namespace ge { diff --git a/ge/graph/common/transop_util.cc b/ge/graph/common/transop_util.cc index 62b4c4e4..871ecdb1 100755 --- a/ge/graph/common/transop_util.cc +++ b/ge/graph/common/transop_util.cc @@ -16,7 +16,7 @@ #include "graph/common/transop_util.h" -#include "common/types.h" +#include "framework/common/types.h" #include "graph/utils/type_utils.h" #include "framework/common/debug/ge_log.h" diff --git a/ge/graph/execute/graph_execute.h b/ge/graph/execute/graph_execute.h index aa791c9b..879a124c 100755 --- a/ge/graph/execute/graph_execute.h +++ b/ge/graph/execute/graph_execute.h @@ -24,14 +24,14 @@ #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/debug/memory_dumper.h" -#include "common/ge_types.h" +#include "framework/common/ge_types.h" #include "common/properties_manager.h" -#include "common/string_util.h" -#include "common/types.h" -#include "common/util.h" -#include "ge/ge_api_types.h" +#include "framework/common/string_util.h" +#include "framework/common/types.h" +#include "framework/common/util.h" +#include "external/ge/ge_api_types.h" #include "graph/compute_graph.h" #include "graph/manager/graph_context.h" #include "graph/manager/graph_manager_utils.h" diff --git a/ge/graph/label/case_label_maker.cc b/ge/graph/label/case_label_maker.cc index 3fdb1783..88d698d1 100644 --- a/ge/graph/label/case_label_maker.cc +++ b/ge/graph/label/case_label_maker.cc @@ -13,10 +13,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "case_label_maker.h" +#include "graph/label/case_label_maker.h" -#include "common/util.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/util.h" +#include "framework/common/ge_inner_error_codes.h" #include "framework/common/types.h" #include "framework/common/op/ge_op_utils.h" #include "graph/debug/ge_attr_define.h" diff --git a/ge/graph/label/if_label_maker.cc b/ge/graph/label/if_label_maker.cc index 72b33015..df911e70 100644 --- a/ge/graph/label/if_label_maker.cc +++ b/ge/graph/label/if_label_maker.cc @@ -13,10 +13,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "if_label_maker.h" +#include "graph/label/if_label_maker.h" -#include "common/util.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/util.h" +#include "framework/common/ge_inner_error_codes.h" #include "framework/common/types.h" #include "framework/common/op/ge_op_utils.h" #include "graph/debug/ge_attr_define.h" diff --git a/ge/graph/label/label_maker.cc b/ge/graph/label/label_maker.cc index 638cbbae..47eeda86 100644 --- a/ge/graph/label/label_maker.cc +++ b/ge/graph/label/label_maker.cc @@ -16,8 +16,8 @@ #include "graph/label/label_maker.h" -#include "common/util.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/util.h" +#include "framework/common/ge_inner_error_codes.h" #include "framework/common/types.h" #include "framework/common/op/ge_op_utils.h" #include "graph/debug/ge_attr_define.h" diff --git a/ge/graph/label/partitioned_call_label_maker.cc b/ge/graph/label/partitioned_call_label_maker.cc index 7b4bcbd8..ec8b8c89 100644 --- a/ge/graph/label/partitioned_call_label_maker.cc +++ b/ge/graph/label/partitioned_call_label_maker.cc @@ -13,10 +13,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "partitioned_call_label_maker.h" +#include "graph/label/partitioned_call_label_maker.h" -#include "common/util.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/util.h" +#include "framework/common/ge_inner_error_codes.h" #include "framework/common/types.h" #include "graph/debug/ge_attr_define.h" #include "graph/utils/graph_utils.h" diff --git a/ge/graph/label/while_label_maker.cc b/ge/graph/label/while_label_maker.cc index cd6b3743..7e6b8a98 100644 --- a/ge/graph/label/while_label_maker.cc +++ b/ge/graph/label/while_label_maker.cc @@ -13,10 +13,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "while_label_maker.h" +#include "graph/label/while_label_maker.h" -#include "common/util.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/util.h" +#include "framework/common/ge_inner_error_codes.h" #include "framework/common/types.h" #include "framework/common/op/ge_op_utils.h" #include "graph/debug/ge_attr_define.h" diff --git a/ge/graph/load/graph_loader.cc b/ge/graph/load/graph_loader.cc index 94b90d69..b2a61106 100755 --- a/ge/graph/load/graph_loader.cc +++ b/ge/graph/load/graph_loader.cc @@ -19,7 +19,7 @@ #include #include -#include "common/helper/model_helper.h" +#include "framework/common/helper/model_helper.h" #include "common/model_parser/model_parser.h" #include "graph/ge_context.h" #include "graph/load/model_manager/model_manager.h" diff --git a/ge/graph/load/graph_loader.h b/ge/graph/load/graph_loader.h index e11af749..f6324c98 100755 --- a/ge/graph/load/graph_loader.h +++ b/ge/graph/load/graph_loader.h @@ -21,9 +21,9 @@ #include #include -#include "common/debug/log.h" -#include "common/fmk_types.h" -#include "common/ge_types.h" +#include "framework/common/debug/log.h" +#include "framework/common/fmk_types.h" +#include "framework/common/ge_types.h" #include "graph/compute_graph.h" #include "graph/manager/graph_manager_utils.h" #include "graph/model.h" diff --git a/ge/graph/load/model_manager/aipp_utils.cc b/ge/graph/load/model_manager/aipp_utils.cc index 8a18c421..a9f885f8 100755 --- a/ge/graph/load/model_manager/aipp_utils.cc +++ b/ge/graph/load/model_manager/aipp_utils.cc @@ -18,8 +18,8 @@ #include -#include "common/debug/log.h" -#include "common/op/ge_op_utils.h" +#include "framework/common/debug/log.h" +#include "framework/common/op/ge_op_utils.h" #include "framework/common/util.h" #include "graph/debug/ge_attr_define.h" #include "graph/utils/attr_utils.h" diff --git a/ge/graph/load/model_manager/aipp_utils.h b/ge/graph/load/model_manager/aipp_utils.h index 78107f3e..237eeced 100755 --- a/ge/graph/load/model_manager/aipp_utils.h +++ b/ge/graph/load/model_manager/aipp_utils.h @@ -19,8 +19,8 @@ #include -#include "common/ge_inner_error_codes.h" -#include "common/ge_types.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/ge_types.h" #include "graph/op_desc.h" #include "proto/insert_op.pb.h" diff --git a/ge/graph/load/model_manager/cpu_queue_schedule.cc b/ge/graph/load/model_manager/cpu_queue_schedule.cc index 9821aa73..0ec80b34 100644 --- a/ge/graph/load/model_manager/cpu_queue_schedule.cc +++ b/ge/graph/load/model_manager/cpu_queue_schedule.cc @@ -15,8 +15,8 @@ */ #include "graph/load/model_manager/cpu_queue_schedule.h" -#include "common/debug/ge_log.h" -#include "common/debug/log.h" +#include "framework/common/debug/ge_log.h" +#include "framework/common/debug/log.h" namespace { const uint32_t kCoreDim = 1; // for rtCpuKernelLaunch diff --git a/ge/graph/load/model_manager/cpu_queue_schedule.h b/ge/graph/load/model_manager/cpu_queue_schedule.h index 8dc44538..d3c8915e 100644 --- a/ge/graph/load/model_manager/cpu_queue_schedule.h +++ b/ge/graph/load/model_manager/cpu_queue_schedule.h @@ -19,7 +19,7 @@ #include #include -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "graph/load/model_manager/task_info/task_info.h" #include "graph/load/model_manager/zero_copy_offset.h" #include "runtime/kernel.h" diff --git a/ge/graph/load/model_manager/data_dumper.cc b/ge/graph/load/model_manager/data_dumper.cc index c96b3885..7b5d9df9 100644 --- a/ge/graph/load/model_manager/data_dumper.cc +++ b/ge/graph/load/model_manager/data_dumper.cc @@ -24,7 +24,7 @@ #include "common/debug/memory_dumper.h" #include "common/properties_manager.h" -#include "common/util.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "framework/common/util.h" #include "graph/anchor.h" diff --git a/ge/graph/load/model_manager/data_dumper.h b/ge/graph/load/model_manager/data_dumper.h index d1714950..2851e63a 100755 --- a/ge/graph/load/model_manager/data_dumper.h +++ b/ge/graph/load/model_manager/data_dumper.h @@ -29,7 +29,7 @@ #include "proto/ge_ir.pb.h" #include "proto/op_mapping.pb.h" #include "runtime/mem.h" -#include "task_info/task_info.h" +#include "graph/load/model_manager/task_info/task_info.h" #include "framework/common/ge_types.h" #include "runtime/base.h" diff --git a/ge/graph/load/model_manager/data_inputer.cc b/ge/graph/load/model_manager/data_inputer.cc index d286b9b4..d68e95aa 100755 --- a/ge/graph/load/model_manager/data_inputer.cc +++ b/ge/graph/load/model_manager/data_inputer.cc @@ -18,9 +18,9 @@ #include -#include "common/debug/log.h" -#include "common/scope_guard.h" -#include "common/types.h" +#include "framework/common/debug/log.h" +#include "framework/common/scope_guard.h" +#include "framework/common/types.h" namespace ge { domi::Status InputDataWrapper::Init(const InputData &input, const OutputData &output) { diff --git a/ge/graph/load/model_manager/data_inputer.h b/ge/graph/load/model_manager/data_inputer.h index b8d145d4..28b6fb26 100755 --- a/ge/graph/load/model_manager/data_inputer.h +++ b/ge/graph/load/model_manager/data_inputer.h @@ -22,8 +22,8 @@ #include #include "common/blocking_queue.h" -#include "common/ge_types.h" -#include "common/types.h" +#include "framework/common/ge_types.h" +#include "framework/common/types.h" namespace ge { /// diff --git a/ge/graph/load/model_manager/davinci_model.cc b/ge/graph/load/model_manager/davinci_model.cc index 97238a4a..b6c4c7b0 100755 --- a/ge/graph/load/model_manager/davinci_model.cc +++ b/ge/graph/load/model_manager/davinci_model.cc @@ -21,14 +21,14 @@ #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/formats/formats.h" #include "common/formats/utils/formats_trans_utils.h" #include "common/math/math_util.h" -#include "common/op/ge_op_utils.h" +#include "framework/common/op/ge_op_utils.h" #include "common/profiling/profiling_manager.h" #include "common/properties_manager.h" -#include "common/scope_guard.h" +#include "framework/common/scope_guard.h" #include "common/thread_pool.h" #include "framework/common/debug/ge_log.h" #include "framework/common/util.h" @@ -36,7 +36,7 @@ #include "graph/compute_graph.h" #include "graph/debug/ge_attr_define.h" #include "graph/ge_context.h" -#include "graph/graph.h" +#include "external/graph/graph.h" #include "graph/load/model_manager/cpu_queue_schedule.h" #include "graph/load/model_manager/model_manager.h" #include "graph/load/model_manager/tbe_handle_store.h" diff --git a/ge/graph/load/model_manager/davinci_model.h b/ge/graph/load/model_manager/davinci_model.h index 4c06ad98..1e964855 100755 --- a/ge/graph/load/model_manager/davinci_model.h +++ b/ge/graph/load/model_manager/davinci_model.h @@ -24,14 +24,14 @@ #include #include -#include "common/ge_types.h" -#include "common/helper/model_helper.h" -#include "common/helper/om_file_helper.h" +#include "framework/common/ge_types.h" +#include "framework/common/helper/model_helper.h" +#include "framework/common/helper/om_file_helper.h" #include "common/opskernel/ge_task_info.h" #include "common/properties_manager.h" #include "common/dump/exception_dumper.h" #include "common/dump/opdebug_register.h" -#include "common/types.h" +#include "framework/common/types.h" #include "framework/common/util.h" #include "graph/debug/ge_attr_define.h" #include "graph/load/model_manager/aipp_utils.h" @@ -43,12 +43,12 @@ #include "graph/model.h" #include "graph/node.h" #include "graph/op_desc.h" -#include "graph/operator.h" +#include "external/graph/operator.h" #include "graph/utils/attr_utils.h" #include "graph/utils/tensor_utils.h" #include "mmpa/mmpa_api.h" #include "proto/task.pb.h" -#include "task_info/task_info.h" +#include "graph/load/model_manager/task_info/task_info.h" #include "graph/common/local_context.h" using std::mutex; diff --git a/ge/graph/load/model_manager/model_manager.cc b/ge/graph/load/model_manager/model_manager.cc index 6a563d2f..2cb31074 100755 --- a/ge/graph/load/model_manager/model_manager.cc +++ b/ge/graph/load/model_manager/model_manager.cc @@ -21,7 +21,7 @@ #include "aicpu/aicpu_schedule/aicpu_op_type_list.h" #include "common/model_parser/model_parser.h" #include "common/dump/dump_manager.h" -#include "common/l2_cache_optimize.h" +#include "framework/common/l2_cache_optimize.h" #include "common/profiling/profiling_manager.h" #include "graph/common/ge_call_wrapper.h" #include "graph/load/model_manager/davinci_model.h" diff --git a/ge/graph/load/model_manager/model_manager.h b/ge/graph/load/model_manager/model_manager.h index e35bb7aa..63a03dd7 100755 --- a/ge/graph/load/model_manager/model_manager.h +++ b/ge/graph/load/model_manager/model_manager.h @@ -26,13 +26,13 @@ #include #include #include "cce/aicpu_engine_struct.h" -#include "common/ge_inner_error_codes.h" -#include "common/ge_types.h" -#include "common/helper/model_helper.h" -#include "common/helper/om_file_helper.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/ge_types.h" +#include "framework/common/helper/model_helper.h" +#include "framework/common/helper/om_file_helper.h" #include "common/properties_manager.h" -#include "common/types.h" -#include "ge/ge_api_types.h" +#include "framework/common/types.h" +#include "external/ge/ge_api_types.h" #include "graph/ge_context.h" #include "graph/model.h" #include "hybrid/hybrid_davinci_model.h" diff --git a/ge/graph/load/model_manager/model_utils.cc b/ge/graph/load/model_manager/model_utils.cc index 224a3331..a31837ca 100755 --- a/ge/graph/load/model_manager/model_utils.cc +++ b/ge/graph/load/model_manager/model_utils.cc @@ -16,11 +16,11 @@ #include "graph/load/model_manager/model_utils.h" #include -#include "common/debug/log.h" -#include "common/op/ge_op_utils.h" +#include "framework/common/debug/log.h" +#include "framework/common/op/ge_op_utils.h" #include "graph/utils/tensor_utils.h" #include "graph/manager/graph_var_manager.h" -#include "graph/types.h" +#include "external/graph/types.h" #include "graph/build/memory/block_mem_assigner.h" #include "common/math/math_util.h" diff --git a/ge/graph/load/model_manager/model_utils.h b/ge/graph/load/model_manager/model_utils.h index 8ce1b060..0eadc7a8 100755 --- a/ge/graph/load/model_manager/model_utils.h +++ b/ge/graph/load/model_manager/model_utils.h @@ -19,8 +19,8 @@ #include -#include "common/ge_inner_error_codes.h" -#include "common/types.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/types.h" #include "graph/load/model_manager/task_info/task_info.h" #include "graph/op_desc.h" #include "graph/utils/tensor_adapter.h" diff --git a/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc b/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc index 356919f6..a4b3de75 100644 --- a/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc +++ b/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc @@ -23,7 +23,7 @@ #include "common/properties_manager.h" #include "framework/common/debug/ge_log.h" #include "framework/common/fmk_error_codes.h" -#include "graph/attr_value.h" +#include "external/graph/attr_value.h" #include "graph/load/model_manager/davinci_model.h" #include "graph/load/model_manager/model_manager.h" #include "hybrid/node_executor/aicpu/aicpu_ext_info.h" diff --git a/ge/graph/load/model_manager/task_info/kernel_task_info.cc b/ge/graph/load/model_manager/task_info/kernel_task_info.cc index 919a56cd..bfb6e24b 100755 --- a/ge/graph/load/model_manager/task_info/kernel_task_info.cc +++ b/ge/graph/load/model_manager/task_info/kernel_task_info.cc @@ -29,8 +29,8 @@ #include "graph/load/model_manager/model_manager.h" #include "graph/load/model_manager/model_utils.h" #include "runtime/kernel.h" -#include "super_kernel/super_kernel.h" -#include "super_kernel/super_kernel_factory.h" +#include "graph/load/model_manager/task_info/super_kernel/super_kernel.h" +#include "graph/load/model_manager/task_info/super_kernel/super_kernel_factory.h" #include "cce/aicpu_engine_struct.h" #include "hybrid/node_executor/aicpu/aicpu_ext_info.h" #include "framework/common/debug/log.h" diff --git a/ge/graph/load/model_manager/task_info/super_kernel/super_kernel.cc b/ge/graph/load/model_manager/task_info/super_kernel/super_kernel.cc index 44aac465..b5db845d 100644 --- a/ge/graph/load/model_manager/task_info/super_kernel/super_kernel.cc +++ b/ge/graph/load/model_manager/task_info/super_kernel/super_kernel.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "super_kernel.h" +#include "graph/load/model_manager/task_info/super_kernel/super_kernel.h" #include "framework/common/debug/ge_log.h" namespace ge { diff --git a/ge/graph/load/model_manager/task_info/super_kernel/super_kernel_factory.cc b/ge/graph/load/model_manager/task_info/super_kernel/super_kernel_factory.cc index 07dc5d19..d1f53cc4 100644 --- a/ge/graph/load/model_manager/task_info/super_kernel/super_kernel_factory.cc +++ b/ge/graph/load/model_manager/task_info/super_kernel/super_kernel_factory.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "super_kernel_factory.h" +#include "graph/load/model_manager/task_info/super_kernel/super_kernel_factory.h" #include "framework/common/debug/ge_log.h" namespace ge { diff --git a/ge/graph/load/model_manager/task_info/super_kernel/super_kernel_factory.h b/ge/graph/load/model_manager/task_info/super_kernel/super_kernel_factory.h index c5058b6a..741d1c13 100644 --- a/ge/graph/load/model_manager/task_info/super_kernel/super_kernel_factory.h +++ b/ge/graph/load/model_manager/task_info/super_kernel/super_kernel_factory.h @@ -18,7 +18,7 @@ #define SUPER_KERNEL_FACTORY_H #include -#include "super_kernel.h" +#include "graph/load/model_manager/task_info/super_kernel/super_kernel.h" #include "framework/common/debug/log.h" namespace ge { diff --git a/ge/graph/load/model_manager/tbe_handle_store.cc b/ge/graph/load/model_manager/tbe_handle_store.cc index 36207aa2..d20b1bbf 100755 --- a/ge/graph/load/model_manager/tbe_handle_store.cc +++ b/ge/graph/load/model_manager/tbe_handle_store.cc @@ -13,10 +13,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "tbe_handle_store.h" +#include "graph/load/model_manager/tbe_handle_store.h" #include -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "framework/common/debug/ge_log.h" #include "runtime/kernel.h" diff --git a/ge/graph/load/model_manager/tbe_handle_store.h b/ge/graph/load/model_manager/tbe_handle_store.h index 6c3ad750..ba934fc3 100644 --- a/ge/graph/load/model_manager/tbe_handle_store.h +++ b/ge/graph/load/model_manager/tbe_handle_store.h @@ -25,7 +25,7 @@ #include #include -#include "common/fmk_types.h" +#include "framework/common/fmk_types.h" #include "graph/op_kernel_bin.h" namespace ge { diff --git a/ge/graph/load/model_manager/zero_copy_offset.h b/ge/graph/load/model_manager/zero_copy_offset.h index 2dea5666..f3dd07a8 100644 --- a/ge/graph/load/model_manager/zero_copy_offset.h +++ b/ge/graph/load/model_manager/zero_copy_offset.h @@ -29,7 +29,7 @@ #include "graph/utils/attr_utils.h" #include "graph/utils/tensor_utils.h" #include "runtime/mem.h" -#include "task_info/task_info.h" +#include "graph/load/model_manager/task_info/task_info.h" using std::map; using std::set; diff --git a/ge/graph/load/model_manager/zero_copy_task.cc b/ge/graph/load/model_manager/zero_copy_task.cc index 4957f8ea..85be6d7b 100755 --- a/ge/graph/load/model_manager/zero_copy_task.cc +++ b/ge/graph/load/model_manager/zero_copy_task.cc @@ -19,7 +19,7 @@ #include "framework/common/debug/ge_log.h" #include "framework/common/util.h" #include "graph/load/model_manager/model_utils.h" -#include "common/ge_compiler_options.h" +#include "framework/common/ge_compiler_options.h" namespace ge { ZeroCopyTask::ZeroCopyTask(const string &name, uint8_t *args, size_t size) diff --git a/ge/graph/manager/graph_manager.h b/ge/graph/manager/graph_manager.h index 945a5e5d..93ce354a 100644 --- a/ge/graph/manager/graph_manager.h +++ b/ge/graph/manager/graph_manager.h @@ -26,10 +26,10 @@ #include #include "common/blocking_queue.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "common/helper/model_cache_helper.h" #include "external/graph/types.h" -#include "ge/ge_api_types.h" +#include "external/ge/ge_api_types.h" #include "graph/build/graph_builder.h" #include "graph/execute/graph_execute.h" #include "graph/ge_local_context.h" diff --git a/ge/graph/manager/graph_manager_utils.cc b/ge/graph/manager/graph_manager_utils.cc index a70b15a6..42251b10 100644 --- a/ge/graph/manager/graph_manager_utils.cc +++ b/ge/graph/manager/graph_manager_utils.cc @@ -21,12 +21,12 @@ #include "framework/common/debug/ge_log.h" #include "common/ge/ge_util.h" -#include "common/string_util.h" +#include "framework/common/string_util.h" #include "graph/debug/ge_attr_define.h" #include "graph/compute_graph.h" #include "graph/op_desc.h" #include "graph/optimize/common/params.h" -#include "omg/omg_inner_types.h" +#include "framework/omg/omg_inner_types.h" #include "runtime/mem.h" namespace ge { diff --git a/ge/graph/manager/graph_manager_utils.h b/ge/graph/manager/graph_manager_utils.h index d38b4321..6ed76e57 100644 --- a/ge/graph/manager/graph_manager_utils.h +++ b/ge/graph/manager/graph_manager_utils.h @@ -27,17 +27,17 @@ #include #include "common/blocking_queue.h" -#include "common/ge_types.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/ge_types.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" #include "graph/compute_graph.h" -#include "graph/graph.h" +#include "external/graph/graph.h" #include "graph/model.h" #include "model/ge_model.h" #include "model/ge_root_model.h" -#include "register/register_fmk_types.h" +#include "external/register/register_fmk_types.h" #include "external/ge/ge_api_types.h" namespace ge { diff --git a/ge/graph/manager/graph_var_manager.h b/ge/graph/manager/graph_var_manager.h index 0da12f9c..736466c4 100755 --- a/ge/graph/manager/graph_var_manager.h +++ b/ge/graph/manager/graph_var_manager.h @@ -30,7 +30,7 @@ #include "framework/common/l2_cache_optimize.h" #include "graph/ge_tensor.h" #include "graph/op_desc.h" -#include "graph/tensor.h" +#include "external/graph/tensor.h" #include "runtime/mem.h" namespace ge { diff --git a/ge/graph/manager/host_mem_manager.h b/ge/graph/manager/host_mem_manager.h index 84d5aebe..6ff19edb 100644 --- a/ge/graph/manager/host_mem_manager.h +++ b/ge/graph/manager/host_mem_manager.h @@ -32,7 +32,7 @@ #include "framework/common/l2_cache_optimize.h" #include "graph/ge_tensor.h" #include "graph/op_desc.h" -#include "graph/tensor.h" +#include "external/graph/tensor.h" #include "runtime/mem.h" namespace ge { diff --git a/ge/graph/manager/model_manager/event_manager.h b/ge/graph/manager/model_manager/event_manager.h index a7464e0c..2cb1c3f6 100644 --- a/ge/graph/manager/model_manager/event_manager.h +++ b/ge/graph/manager/model_manager/event_manager.h @@ -20,9 +20,9 @@ #include -#include "common/fmk_error_codes.h" -#include "common/fmk_types.h" -#include "common/util.h" +#include "framework/common/fmk_error_codes.h" +#include "framework/common/fmk_types.h" +#include "framework/common/util.h" #include "runtime/event.h" namespace ge { diff --git a/ge/graph/manager/trans_var_data_utils.cc b/ge/graph/manager/trans_var_data_utils.cc index 621eba79..4c25dff1 100644 --- a/ge/graph/manager/trans_var_data_utils.cc +++ b/ge/graph/manager/trans_var_data_utils.cc @@ -16,14 +16,14 @@ #include "graph/manager/trans_var_data_utils.h" -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/debug/memory_dumper.h" #include "common/formats/formats.h" #include "common/formats/utils/formats_trans_utils.h" -#include "common/op/ge_op_utils.h" +#include "framework/common/op/ge_op_utils.h" #include "framework/common/debug/ge_log.h" #include "graph/manager/graph_var_manager.h" -#include "graph/types.h" +#include "external/graph/types.h" #include "graph/utils/type_utils.h" #include "common/thread_pool.h" #include diff --git a/ge/graph/manager/trans_var_data_utils.h b/ge/graph/manager/trans_var_data_utils.h index 95ebd09a..d5096ef2 100755 --- a/ge/graph/manager/trans_var_data_utils.h +++ b/ge/graph/manager/trans_var_data_utils.h @@ -24,7 +24,7 @@ #include "graph/utils/tensor_utils.h" #include "graph/node.h" #include "runtime/context.h" -#include "graph_var_manager.h" +#include "graph/manager/graph_var_manager.h" namespace ge { class TransVarDataUtils { diff --git a/ge/graph/manager/util/debug.h b/ge/graph/manager/util/debug.h index e1b13caf..02cacb72 100755 --- a/ge/graph/manager/util/debug.h +++ b/ge/graph/manager/util/debug.h @@ -33,10 +33,10 @@ #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/debug/memory_dumper.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "mmpa/mmpa_api.h" #include "proto/om.pb.h" #include "runtime/kernel.h" diff --git a/ge/graph/manager/util/hcom_util.cc b/ge/graph/manager/util/hcom_util.cc index 2da19cc9..8e12ff27 100644 --- a/ge/graph/manager/util/hcom_util.cc +++ b/ge/graph/manager/util/hcom_util.cc @@ -16,10 +16,10 @@ #include "graph/manager/util/hcom_util.h" -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/math/math_util.h" -#include "common/op/attr_value_util.h" -#include "common/op/ge_op_utils.h" +#include "framework/common/op/attr_value_util.h" +#include "framework/common/op/ge_op_utils.h" #include "graph/utils/tensor_utils.h" #include "graph/utils/type_utils.h" diff --git a/ge/graph/manager/util/hcom_util.h b/ge/graph/manager/util/hcom_util.h index f80ced35..96ef92bf 100644 --- a/ge/graph/manager/util/hcom_util.h +++ b/ge/graph/manager/util/hcom_util.h @@ -21,11 +21,11 @@ #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/opskernel/ge_task_info.h" -#include "common/string_util.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/string_util.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "graph/op_desc.h" #include "hccl/hcom.h" #include "proto/task.pb.h" diff --git a/ge/graph/optimize/common/params.h b/ge/graph/optimize/common/params.h index d5b66b8f..fbe58c6b 100644 --- a/ge/graph/optimize/common/params.h +++ b/ge/graph/optimize/common/params.h @@ -20,7 +20,7 @@ #include #include "common/singleton.h" -#include "common/types.h" +#include "framework/common/types.h" namespace ge { class Params : public Singleton { diff --git a/ge/graph/optimize/graph_optimize.h b/ge/graph/optimize/graph_optimize.h index 702b7e33..a3d359b6 100755 --- a/ge/graph/optimize/graph_optimize.h +++ b/ge/graph/optimize/graph_optimize.h @@ -25,13 +25,13 @@ #include #include -#include "common/ge_inner_error_codes.h" -#include "common/ge_types.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/ge_types.h" #include "common/optimizer/graph_optimizer.h" #include "graph/compute_graph.h" #include "graph/manager/graph_context.h" #include "graph/manager/graph_manager_utils.h" -#include "omg/omg_inner_types.h" +#include "framework/omg/omg_inner_types.h" namespace ge { using ComputeGraphPtr = std::shared_ptr; diff --git a/ge/graph/optimize/summary_optimize.cc b/ge/graph/optimize/summary_optimize.cc index d3c02d3e..08a27c4e 100644 --- a/ge/graph/optimize/summary_optimize.cc +++ b/ge/graph/optimize/summary_optimize.cc @@ -21,7 +21,7 @@ #include "graph/optimize/graph_optimize.h" #include "graph/utils/graph_utils.h" #include "graph/utils/tensor_utils.h" -#include "omg/omg_inner_types.h" +#include "framework/omg/omg_inner_types.h" namespace { const char *const kSummary = "Summary"; diff --git a/ge/graph/partition/dynamic_shape_partition.h b/ge/graph/partition/dynamic_shape_partition.h index bd3b128f..31146570 100644 --- a/ge/graph/partition/dynamic_shape_partition.h +++ b/ge/graph/partition/dynamic_shape_partition.h @@ -21,7 +21,7 @@ #include #include #include -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "graph/compute_graph.h" namespace ge { diff --git a/ge/graph/partition/engine_place.cc b/ge/graph/partition/engine_place.cc index 93cc3e61..8639f015 100755 --- a/ge/graph/partition/engine_place.cc +++ b/ge/graph/partition/engine_place.cc @@ -22,7 +22,7 @@ #include #include -#include "common/op/ge_op_utils.h" +#include "framework/common/op/ge_op_utils.h" #include "common/util/error_manager/error_manager.h" #include "graph/utils/graph_utils.h" #include "graph/utils/op_desc_utils.h" diff --git a/ge/graph/partition/engine_place.h b/ge/graph/partition/engine_place.h index 5dc3e6a0..125babb6 100755 --- a/ge/graph/partition/engine_place.h +++ b/ge/graph/partition/engine_place.h @@ -20,7 +20,7 @@ #include #include -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "graph/compute_graph.h" namespace ge { diff --git a/ge/graph/partition/graph_partition.cc b/ge/graph/partition/graph_partition.cc index a810aab0..6f221d97 100755 --- a/ge/graph/partition/graph_partition.cc +++ b/ge/graph/partition/graph_partition.cc @@ -24,7 +24,7 @@ #include "analyzer/analyzer.h" #include "common/ge/ge_util.h" -#include "common/op/ge_op_utils.h" +#include "framework/common/op/ge_op_utils.h" #include "framework/common/types.h" #include "graph/debug/ge_attr_define.h" #include "graph/manager/graph_manager_utils.h" diff --git a/ge/graph/partition/graph_partition.h b/ge/graph/partition/graph_partition.h index f34c67e6..560aa9e7 100644 --- a/ge/graph/partition/graph_partition.h +++ b/ge/graph/partition/graph_partition.h @@ -28,7 +28,7 @@ #include #include "graph/compute_graph.h" #include "graph/manager/graph_manager_utils.h" -#include "graph/operator_reg.h" +#include "external/graph/operator_reg.h" #include "graph/partition/engine_place.h" namespace ge { diff --git a/ge/graph/partition/stage_partition.cc b/ge/graph/partition/stage_partition.cc index 309e24c4..68b4209f 100644 --- a/ge/graph/partition/stage_partition.cc +++ b/ge/graph/partition/stage_partition.cc @@ -21,8 +21,8 @@ #include "graph/debug/ge_attr_define.h" #include "graph/utils/graph_utils.h" #include "graph/utils/op_desc_utils.h" -#include "common/util.h" -#include "common/types.h" +#include "framework/common/util.h" +#include "framework/common/types.h" namespace ge { namespace { diff --git a/ge/graph/partition/stage_partition.h b/ge/graph/partition/stage_partition.h index bac00e6b..99aac2b9 100644 --- a/ge/graph/partition/stage_partition.h +++ b/ge/graph/partition/stage_partition.h @@ -21,7 +21,7 @@ #include #include #include -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "graph/compute_graph.h" namespace ge { diff --git a/ge/graph/passes/addn_pass.h b/ge/graph/passes/addn_pass.h index 373d1842..075ff9fc 100644 --- a/ge/graph/passes/addn_pass.h +++ b/ge/graph/passes/addn_pass.h @@ -17,10 +17,10 @@ #ifndef GE_GRAPH_PASSES_ADDN_PASS_H_ #define GE_GRAPH_PASSES_ADDN_PASS_H_ -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "framework/common/debug/ge_log.h" #include "framework/common/types.h" -#include "graph/graph.h" +#include "external/graph/graph.h" #include "graph/passes/base_pass.h" #include "graph/passes/pass_utils.h" #include "graph/utils/graph_utils.h" diff --git a/ge/graph/passes/aicpu_constant_folding_pass.cc b/ge/graph/passes/aicpu_constant_folding_pass.cc index 8fdb51a1..d33d4db2 100644 --- a/ge/graph/passes/aicpu_constant_folding_pass.cc +++ b/ge/graph/passes/aicpu_constant_folding_pass.cc @@ -19,9 +19,9 @@ #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/ge/ge_util.h" -#include "common/types.h" +#include "framework/common/types.h" #include "framework/common/debug/ge_log.h" #include "graph/debug/ge_attr_define.h" #include "graph/utils/attr_utils.h" diff --git a/ge/graph/passes/atomic_addr_clean_pass.cc b/ge/graph/passes/atomic_addr_clean_pass.cc index 9f202c77..cc22d126 100755 --- a/ge/graph/passes/atomic_addr_clean_pass.cc +++ b/ge/graph/passes/atomic_addr_clean_pass.cc @@ -22,7 +22,7 @@ #include #include -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "common/ge/ge_util.h" #include "graph/common/ge_call_wrapper.h" #include "graph/debug/ge_attr_define.h" diff --git a/ge/graph/passes/atomic_addr_clean_pass.h b/ge/graph/passes/atomic_addr_clean_pass.h index 0d0b8fff..30162359 100755 --- a/ge/graph/passes/atomic_addr_clean_pass.h +++ b/ge/graph/passes/atomic_addr_clean_pass.h @@ -19,7 +19,7 @@ #include -#include "graph/graph.h" +#include "external/graph/graph.h" #include "inc/graph_pass.h" namespace ge { diff --git a/ge/graph/passes/attach_stream_label_pass.cc b/ge/graph/passes/attach_stream_label_pass.cc index d5b28ec7..bcf86bc2 100644 --- a/ge/graph/passes/attach_stream_label_pass.cc +++ b/ge/graph/passes/attach_stream_label_pass.cc @@ -15,7 +15,7 @@ */ #include "graph/passes/attach_stream_label_pass.h" -#include "ge/ge_api_types.h" +#include "external/ge/ge_api_types.h" #include "graph/common/omg_util.h" using std::string; diff --git a/ge/graph/passes/base_pass.cc b/ge/graph/passes/base_pass.cc index 165e7e81..a1551eb2 100755 --- a/ge/graph/passes/base_pass.cc +++ b/ge/graph/passes/base_pass.cc @@ -19,7 +19,7 @@ #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "framework/common/debug/ge_log.h" #include "graph/compute_graph.h" #include "graph/utils/graph_utils.h" diff --git a/ge/graph/passes/bitcast_pass.h b/ge/graph/passes/bitcast_pass.h index 34acaf57..60990dea 100644 --- a/ge/graph/passes/bitcast_pass.h +++ b/ge/graph/passes/bitcast_pass.h @@ -17,10 +17,10 @@ #ifndef GE_GRAPH_PASSES_BITCAST_PASS_H_ #define GE_GRAPH_PASSES_BITCAST_PASS_H_ -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "framework/common/debug/ge_log.h" #include "framework/common/types.h" -#include "graph/graph.h" +#include "external/graph/graph.h" #include "graph/op_desc.h" #include "graph/passes/base_pass.h" #include "graph/passes/pass_utils.h" diff --git a/ge/graph/passes/buffer_pool_memory_pass.h b/ge/graph/passes/buffer_pool_memory_pass.h index e3d1c159..89fc5363 100644 --- a/ge/graph/passes/buffer_pool_memory_pass.h +++ b/ge/graph/passes/buffer_pool_memory_pass.h @@ -18,7 +18,7 @@ #define GE_GRAPH_PASSES_BUFFER_POOL_MEMORY_PASS_H_ #include -#include "graph/graph.h" +#include "external/graph/graph.h" #include "inc/graph_pass.h" namespace ge { diff --git a/ge/graph/passes/common_subexpression_elimination_pass.cc b/ge/graph/passes/common_subexpression_elimination_pass.cc index 852ed98a..c41a5cf5 100644 --- a/ge/graph/passes/common_subexpression_elimination_pass.cc +++ b/ge/graph/passes/common_subexpression_elimination_pass.cc @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "common_subexpression_elimination_pass.h" +#include "graph/passes/common_subexpression_elimination_pass.h" #include #include diff --git a/ge/graph/passes/common_subexpression_elimination_pass.h b/ge/graph/passes/common_subexpression_elimination_pass.h index 83bfbace..b182f8b9 100644 --- a/ge/graph/passes/common_subexpression_elimination_pass.h +++ b/ge/graph/passes/common_subexpression_elimination_pass.h @@ -16,7 +16,7 @@ #ifndef GE_COMMON_SUBEXPRESSION_ELIMINATION_H_ #define GE_COMMON_SUBEXPRESSION_ELIMINATION_H_ -#include "graph/types.h" +#include "external/graph/types.h" #include "inc/graph_pass.h" namespace ge { diff --git a/ge/graph/passes/compile_nodes_pass.cc b/ge/graph/passes/compile_nodes_pass.cc index d0dcec16..1e734178 100755 --- a/ge/graph/passes/compile_nodes_pass.cc +++ b/ge/graph/passes/compile_nodes_pass.cc @@ -19,7 +19,7 @@ #include #include "common/ge/ge_util.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "framework/common/debug/ge_log.h" #include "graph/debug/ge_attr_define.h" #include "graph/common/ge_call_wrapper.h" diff --git a/ge/graph/passes/cond_pass.cc b/ge/graph/passes/cond_pass.cc index 116e4f89..47a75cd8 100644 --- a/ge/graph/passes/cond_pass.cc +++ b/ge/graph/passes/cond_pass.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "graph/passes/cond_pass.h" -#include "common/op/ge_op_utils.h" +#include "framework/common/op/ge_op_utils.h" #include "graph/utils/graph_utils.h" #include "graph/utils/type_utils.h" #include "graph/utils/node_utils.h" diff --git a/ge/graph/passes/cond_remove_pass.cc b/ge/graph/passes/cond_remove_pass.cc index 478858a9..91e44458 100644 --- a/ge/graph/passes/cond_remove_pass.cc +++ b/ge/graph/passes/cond_remove_pass.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "graph/passes/cond_remove_pass.h" -#include "common/op/ge_op_utils.h" +#include "framework/common/op/ge_op_utils.h" #include "graph/utils/graph_utils.h" #include "graph/utils/node_utils.h" #include "graph/utils/type_utils.h" diff --git a/ge/graph/passes/constant_folding_pass.cc b/ge/graph/passes/constant_folding_pass.cc index 25fe26da..6607388f 100644 --- a/ge/graph/passes/constant_folding_pass.cc +++ b/ge/graph/passes/constant_folding_pass.cc @@ -17,7 +17,7 @@ #include "graph/passes/constant_folding_pass.h" #include -#include "graph/operator_factory.h" +#include "external/graph/operator_factory.h" #include "graph/utils/node_utils.h" #include "graph/utils/type_utils.h" #include "init/gelib.h" diff --git a/ge/graph/passes/constant_fuse_same_pass.h b/ge/graph/passes/constant_fuse_same_pass.h index 3ff2d6b7..a7326c32 100755 --- a/ge/graph/passes/constant_fuse_same_pass.h +++ b/ge/graph/passes/constant_fuse_same_pass.h @@ -22,7 +22,7 @@ #include #include #include "graph/aligned_ptr.h" -#include "graph/types.h" +#include "external/graph/types.h" #include "inc/graph_pass.h" namespace ge { diff --git a/ge/graph/passes/data_pass.h b/ge/graph/passes/data_pass.h index 519ae046..6f841139 100644 --- a/ge/graph/passes/data_pass.h +++ b/ge/graph/passes/data_pass.h @@ -17,7 +17,7 @@ #ifndef GE_GRAPH_PASSES_DATA_PASS_H_ #define GE_GRAPH_PASSES_DATA_PASS_H_ -#include "graph/graph.h" +#include "external/graph/graph.h" #include "inc/graph_pass.h" namespace ge { diff --git a/ge/graph/passes/dimension_adjust_pass.h b/ge/graph/passes/dimension_adjust_pass.h index 7766f140..a84f0d8d 100755 --- a/ge/graph/passes/dimension_adjust_pass.h +++ b/ge/graph/passes/dimension_adjust_pass.h @@ -17,10 +17,10 @@ #ifndef GE_GRAPH_PASSES_DIMENSION_ADJUST_PASS_H_ #define GE_GRAPH_PASSES_DIMENSION_ADJUST_PASS_H_ -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "framework/common/debug/ge_log.h" -#include "common/ge_inner_error_codes.h" -#include "common/types.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/types.h" #include "graph/common/omg_util.h" #include "graph/passes/base_pass.h" #include "graph/utils/attr_utils.h" diff --git a/ge/graph/passes/dimension_compute_pass.cc b/ge/graph/passes/dimension_compute_pass.cc index 350faf71..a24a6bd4 100755 --- a/ge/graph/passes/dimension_compute_pass.cc +++ b/ge/graph/passes/dimension_compute_pass.cc @@ -20,7 +20,7 @@ #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" #include "graph/utils/attr_utils.h" diff --git a/ge/graph/passes/end_of_sequence_add_control_pass.h b/ge/graph/passes/end_of_sequence_add_control_pass.h index dcc65848..32ee0b25 100644 --- a/ge/graph/passes/end_of_sequence_add_control_pass.h +++ b/ge/graph/passes/end_of_sequence_add_control_pass.h @@ -17,7 +17,7 @@ #ifndef GE_GRAPH_PASSES_END_OF_SEQUENCE_ADD_CONTROL_EDGE_PASS_H_ #define GE_GRAPH_PASSES_END_OF_SEQUENCE_ADD_CONTROL_EDGE_PASS_H_ -#include "graph/graph.h" +#include "external/graph/graph.h" #include "inc/graph_pass.h" namespace ge { diff --git a/ge/graph/passes/flow_ctrl_pass.h b/ge/graph/passes/flow_ctrl_pass.h index 74f3cce0..cf1af97a 100755 --- a/ge/graph/passes/flow_ctrl_pass.h +++ b/ge/graph/passes/flow_ctrl_pass.h @@ -20,7 +20,7 @@ #include #include -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "inc/graph_pass.h" namespace ge { diff --git a/ge/graph/passes/for_pass.cc b/ge/graph/passes/for_pass.cc index 7d09f370..260e6ea0 100644 --- a/ge/graph/passes/for_pass.cc +++ b/ge/graph/passes/for_pass.cc @@ -16,7 +16,7 @@ #include "graph/passes/for_pass.h" #include "common/ge/ge_util.h" -#include "common/op/ge_op_utils.h" +#include "framework/common/op/ge_op_utils.h" #include "framework/common/debug/ge_log.h" #include "framework/common/debug/log.h" #include "framework/common/ge_inner_error_codes.h" diff --git a/ge/graph/passes/fuse_data_nodes_with_common_input_pass.cc b/ge/graph/passes/fuse_data_nodes_with_common_input_pass.cc index ec7b2388..280afb6f 100644 --- a/ge/graph/passes/fuse_data_nodes_with_common_input_pass.cc +++ b/ge/graph/passes/fuse_data_nodes_with_common_input_pass.cc @@ -21,7 +21,7 @@ #include #include #include -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "graph/utils/op_desc_utils.h" #include "graph/utils/type_utils.h" #include "graph/utils/node_utils.h" diff --git a/ge/graph/passes/fuse_data_nodes_with_common_input_pass.h b/ge/graph/passes/fuse_data_nodes_with_common_input_pass.h index 9ff6ab89..33543ded 100755 --- a/ge/graph/passes/fuse_data_nodes_with_common_input_pass.h +++ b/ge/graph/passes/fuse_data_nodes_with_common_input_pass.h @@ -20,7 +20,7 @@ #include #include #include -#include "graph/types.h" +#include "external/graph/types.h" #include "inc/graph_pass.h" namespace ge { diff --git a/ge/graph/passes/get_original_format_pass.cc b/ge/graph/passes/get_original_format_pass.cc index 670cd50c..4b27dd0e 100644 --- a/ge/graph/passes/get_original_format_pass.cc +++ b/ge/graph/passes/get_original_format_pass.cc @@ -18,9 +18,9 @@ #include -#include "common/debug/log.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/debug/log.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "framework/omg/omg_inner_types.h" #include "graph/utils/attr_utils.h" diff --git a/ge/graph/passes/global_step_insert_pass.h b/ge/graph/passes/global_step_insert_pass.h index da83e93a..16be3d4a 100755 --- a/ge/graph/passes/global_step_insert_pass.h +++ b/ge/graph/passes/global_step_insert_pass.h @@ -20,7 +20,7 @@ #include #include -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "inc/graph_pass.h" namespace ge { diff --git a/ge/graph/passes/guarantee_const_pass.cc b/ge/graph/passes/guarantee_const_pass.cc index 1d369f38..b1df73a9 100644 --- a/ge/graph/passes/guarantee_const_pass.cc +++ b/ge/graph/passes/guarantee_const_pass.cc @@ -19,8 +19,8 @@ #include #include "framework/common/debug/ge_log.h" -#include "common/ge_inner_error_codes.h" -#include "common/types.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/types.h" #include "graph/common/omg_util.h" #include "graph/utils/attr_utils.h" #include "graph/utils/graph_utils.h" diff --git a/ge/graph/passes/hccl_continuous_memcpy_pass.cc b/ge/graph/passes/hccl_continuous_memcpy_pass.cc index 56cbb005..7f4597b3 100644 --- a/ge/graph/passes/hccl_continuous_memcpy_pass.cc +++ b/ge/graph/passes/hccl_continuous_memcpy_pass.cc @@ -18,9 +18,9 @@ #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "framework/common/debug/ge_log.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "common/ge/ge_util.h" #include "framework/common/types.h" #include "graph/utils/graph_utils.h" diff --git a/ge/graph/passes/hccl_continuous_memcpy_pass.h b/ge/graph/passes/hccl_continuous_memcpy_pass.h index 5fbb6fd0..d710531d 100644 --- a/ge/graph/passes/hccl_continuous_memcpy_pass.h +++ b/ge/graph/passes/hccl_continuous_memcpy_pass.h @@ -20,7 +20,7 @@ #include #include -#include "graph/graph.h" +#include "external/graph/graph.h" #include "inc/graph_pass.h" namespace ge { diff --git a/ge/graph/passes/hccl_group_pass.cc b/ge/graph/passes/hccl_group_pass.cc index bbfd9b56..35baade6 100644 --- a/ge/graph/passes/hccl_group_pass.cc +++ b/ge/graph/passes/hccl_group_pass.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "hccl_group_pass.h" +#include "graph/passes/hccl_group_pass.h" #include #include "framework/common/debug/ge_log.h" #include "graph/debug/ge_attr_define.h" diff --git a/ge/graph/passes/hccl_memcpy_pass.cc b/ge/graph/passes/hccl_memcpy_pass.cc index d56ee342..2d83bf51 100755 --- a/ge/graph/passes/hccl_memcpy_pass.cc +++ b/ge/graph/passes/hccl_memcpy_pass.cc @@ -18,9 +18,9 @@ #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "framework/common/debug/ge_log.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "common/ge/ge_util.h" #include "framework/common/types.h" #include "graph/utils/graph_utils.h" diff --git a/ge/graph/passes/hccl_memcpy_pass.h b/ge/graph/passes/hccl_memcpy_pass.h index b75b27d1..e6ee519b 100755 --- a/ge/graph/passes/hccl_memcpy_pass.h +++ b/ge/graph/passes/hccl_memcpy_pass.h @@ -20,7 +20,7 @@ #include #include -#include "graph/graph.h" +#include "external/graph/graph.h" #include "inc/graph_pass.h" namespace ge { diff --git a/ge/graph/passes/hccl_tailing_optimization_pass.cc b/ge/graph/passes/hccl_tailing_optimization_pass.cc index e1e2f276..d952885d 100644 --- a/ge/graph/passes/hccl_tailing_optimization_pass.cc +++ b/ge/graph/passes/hccl_tailing_optimization_pass.cc @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "hccl_tailing_optimization_pass.h" +#include "graph/passes/hccl_tailing_optimization_pass.h" #include "graph/common/transop_util.h" namespace ge { diff --git a/ge/graph/passes/infershape_pass.cc b/ge/graph/passes/infershape_pass.cc index b74d1c97..60a2f09a 100755 --- a/ge/graph/passes/infershape_pass.cc +++ b/ge/graph/passes/infershape_pass.cc @@ -24,8 +24,8 @@ #include "graph/utils/node_utils.h" #include "graph/common/omg_util.h" #include "graph/debug/ge_attr_define.h" -#include "utils/tensor_utils.h" -#include "utils/type_utils.h" +#include "graph/utils/tensor_utils.h" +#include "graph/utils/type_utils.h" namespace ge { diff --git a/ge/graph/passes/input_output_connection_identify_pass.cc b/ge/graph/passes/input_output_connection_identify_pass.cc index 5779fb41..d5551bdc 100644 --- a/ge/graph/passes/input_output_connection_identify_pass.cc +++ b/ge/graph/passes/input_output_connection_identify_pass.cc @@ -23,7 +23,7 @@ #include #include "common/ge/ge_util.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "framework/common/debug/ge_log.h" #include "graph/debug/ge_attr_define.h" #include "graph/utils/graph_utils.h" diff --git a/ge/graph/passes/input_output_connection_identify_pass.h b/ge/graph/passes/input_output_connection_identify_pass.h index 97ed315d..c4a4653e 100755 --- a/ge/graph/passes/input_output_connection_identify_pass.h +++ b/ge/graph/passes/input_output_connection_identify_pass.h @@ -19,7 +19,7 @@ #include #include -#include "graph/graph.h" +#include "external/graph/graph.h" #include "inc/graph_pass.h" namespace ge { diff --git a/ge/graph/passes/iterator_op_pass.cc b/ge/graph/passes/iterator_op_pass.cc index 3e85887b..d1de809d 100644 --- a/ge/graph/passes/iterator_op_pass.cc +++ b/ge/graph/passes/iterator_op_pass.cc @@ -21,13 +21,13 @@ #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "framework/common/debug/ge_log.h" #include "common/ge/ge_util.h" #include "framework/common/debug/ge_log.h" #include "graph/anchor.h" #include "graph/common/omg_util.h" -#include "graph/graph.h" +#include "external/graph/graph.h" #include "graph/node.h" #include "graph/passes/pass_utils.h" #include "graph/utils/graph_utils.h" diff --git a/ge/graph/passes/iterator_op_pass.h b/ge/graph/passes/iterator_op_pass.h index d9303358..611109dc 100644 --- a/ge/graph/passes/iterator_op_pass.h +++ b/ge/graph/passes/iterator_op_pass.h @@ -17,7 +17,7 @@ #ifndef GE_GRAPH_PASSES_ITERATOR_OP_PASS_H_ #define GE_GRAPH_PASSES_ITERATOR_OP_PASS_H_ -#include "graph/graph.h" +#include "external/graph/graph.h" #include "inc/graph_pass.h" namespace ge { diff --git a/ge/graph/passes/link_gen_mask_nodes_pass.cc b/ge/graph/passes/link_gen_mask_nodes_pass.cc index 522c20ad..9ff3bfd7 100755 --- a/ge/graph/passes/link_gen_mask_nodes_pass.cc +++ b/ge/graph/passes/link_gen_mask_nodes_pass.cc @@ -18,7 +18,7 @@ #include -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "framework/common/debug/ge_log.h" #include "framework/common/types.h" #include "init/gelib.h" diff --git a/ge/graph/passes/link_gen_mask_nodes_pass.h b/ge/graph/passes/link_gen_mask_nodes_pass.h index 12d68f1b..c6c1e703 100644 --- a/ge/graph/passes/link_gen_mask_nodes_pass.h +++ b/ge/graph/passes/link_gen_mask_nodes_pass.h @@ -21,7 +21,7 @@ #include #include -#include "graph/graph.h" +#include "external/graph/graph.h" #include "inc/graph_pass.h" namespace ge { diff --git a/ge/graph/passes/mark_force_unknown_for_cond_pass.cc b/ge/graph/passes/mark_force_unknown_for_cond_pass.cc index 74babadc..fbf69c04 100644 --- a/ge/graph/passes/mark_force_unknown_for_cond_pass.cc +++ b/ge/graph/passes/mark_force_unknown_for_cond_pass.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "mark_force_unknown_for_cond_pass.h" +#include "graph/passes/mark_force_unknown_for_cond_pass.h" #include diff --git a/ge/graph/passes/mark_graph_unknown_status_pass.h b/ge/graph/passes/mark_graph_unknown_status_pass.h index a1148c6e..2cc86dbd 100644 --- a/ge/graph/passes/mark_graph_unknown_status_pass.h +++ b/ge/graph/passes/mark_graph_unknown_status_pass.h @@ -16,7 +16,7 @@ #ifndef GE_GRAPH_PASSES_MARK_GRAPH_UNKNOWN_STATUS_PASS_H_ #define GE_GRAPH_PASSES_MARK_GRAPH_UNKNOWN_STATUS_PASS_H_ -#include "graph/graph.h" +#include "external/graph/graph.h" #include "inc/graph_pass.h" namespace ge { diff --git a/ge/graph/passes/mark_node_unknown_shape_pass.h b/ge/graph/passes/mark_node_unknown_shape_pass.h index b78b7826..acd12582 100644 --- a/ge/graph/passes/mark_node_unknown_shape_pass.h +++ b/ge/graph/passes/mark_node_unknown_shape_pass.h @@ -16,7 +16,7 @@ #ifndef GE_GRAPH_PASSES_MARK_NODE_UNKNOWN_SHAPE_PASS_H_ #define GE_GRAPH_PASSES_MARK_NODE_UNKNOWN_SHAPE_PASS_H_ -#include "graph/graph.h" +#include "external/graph/graph.h" #include "inc/graph_pass.h" namespace ge { diff --git a/ge/graph/passes/mark_same_addr_pass.h b/ge/graph/passes/mark_same_addr_pass.h index 518fe418..adf971a2 100644 --- a/ge/graph/passes/mark_same_addr_pass.h +++ b/ge/graph/passes/mark_same_addr_pass.h @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "graph/graph.h" +#include "external/graph/graph.h" #include "inc/graph_pass.h" #ifndef GE_GRAPH_PASSES_MARK_SAME_ADDR_PASS_H_ diff --git a/ge/graph/passes/merge_input_memcpy_pass.cc b/ge/graph/passes/merge_input_memcpy_pass.cc index 00c04131..044d4ad9 100644 --- a/ge/graph/passes/merge_input_memcpy_pass.cc +++ b/ge/graph/passes/merge_input_memcpy_pass.cc @@ -17,7 +17,7 @@ #include "graph/passes/merge_input_memcpy_pass.h" #include "common/ge/ge_util.h" -#include "ge/ge_api_types.h" +#include "external/ge/ge_api_types.h" #include "graph/common/omg_util.h" namespace ge { diff --git a/ge/graph/passes/merge_to_stream_merge_pass.cc b/ge/graph/passes/merge_to_stream_merge_pass.cc index dbcff620..c58def59 100644 --- a/ge/graph/passes/merge_to_stream_merge_pass.cc +++ b/ge/graph/passes/merge_to_stream_merge_pass.cc @@ -16,7 +16,7 @@ #include "graph/passes/merge_to_stream_merge_pass.h" #include "common/ge/ge_util.h" -#include "ge/ge_api_types.h" +#include "external/ge/ge_api_types.h" #include "graph/common/omg_util.h" namespace ge { diff --git a/ge/graph/passes/net_output_pass.h b/ge/graph/passes/net_output_pass.h index ab190169..fecccc35 100644 --- a/ge/graph/passes/net_output_pass.h +++ b/ge/graph/passes/net_output_pass.h @@ -22,7 +22,7 @@ #include #include -#include "graph/types.h" +#include "external/graph/types.h" #include "inc/graph_pass.h" namespace ge { diff --git a/ge/graph/passes/no_use_reshape_remove_pass.cc b/ge/graph/passes/no_use_reshape_remove_pass.cc index b3074565..e0a0ceb8 100644 --- a/ge/graph/passes/no_use_reshape_remove_pass.cc +++ b/ge/graph/passes/no_use_reshape_remove_pass.cc @@ -19,7 +19,7 @@ #include #include -#include "common/op/ge_op_utils.h" +#include "framework/common/op/ge_op_utils.h" #include "external/graph/types.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" diff --git a/ge/graph/passes/parallel_group_pass.h b/ge/graph/passes/parallel_group_pass.h index 9b895598..cdcdabab 100644 --- a/ge/graph/passes/parallel_group_pass.h +++ b/ge/graph/passes/parallel_group_pass.h @@ -19,7 +19,7 @@ #include #include -#include "graph/graph.h" +#include "external/graph/graph.h" #include "inc/graph_pass.h" namespace ge { diff --git a/ge/graph/passes/pass_manager.cc b/ge/graph/passes/pass_manager.cc index fa2f1e17..7c9aa414 100644 --- a/ge/graph/passes/pass_manager.cc +++ b/ge/graph/passes/pass_manager.cc @@ -15,12 +15,12 @@ */ #include "inc/pass_manager.h" -#include "common/debug/log.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/debug/log.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "graph/utils/node_utils.h" #include "graph/common/ge_call_wrapper.h" -#include "omg/omg_inner_types.h" +#include "framework/omg/omg_inner_types.h" namespace ge { const vector>& PassManager::GraphPasses() const { return names_to_graph_passes_; } diff --git a/ge/graph/passes/pass_utils.cc b/ge/graph/passes/pass_utils.cc index c0ef7685..d5306f5f 100644 --- a/ge/graph/passes/pass_utils.cc +++ b/ge/graph/passes/pass_utils.cc @@ -23,10 +23,10 @@ #include #include "framework/common/debug/ge_log.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "common/ge/ge_util.h" -#include "common/op/ge_op_utils.h" -#include "common/types.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/types.h" #include "graph/common/omg_util.h" #include "graph/debug/ge_attr_define.h" #include "graph/ge_tensor.h" @@ -35,7 +35,7 @@ #include "graph/utils/op_desc_utils.h" #include "graph/utils/tensor_utils.h" #include "graph/utils/type_utils.h" -#include "utils/node_utils.h" +#include "graph/utils/node_utils.h" #include "common/formats/utils/formats_trans_utils.h" namespace ge { diff --git a/ge/graph/passes/pass_utils.h b/ge/graph/passes/pass_utils.h index bd506d09..475c4e77 100755 --- a/ge/graph/passes/pass_utils.h +++ b/ge/graph/passes/pass_utils.h @@ -19,7 +19,7 @@ #include #include "framework/common/debug/ge_log.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "graph/compute_graph.h" namespace ge { diff --git a/ge/graph/passes/permute_pass.cc b/ge/graph/passes/permute_pass.cc index 8254db72..21222b2c 100644 --- a/ge/graph/passes/permute_pass.cc +++ b/ge/graph/passes/permute_pass.cc @@ -17,8 +17,8 @@ #include "graph/passes/permute_pass.h" #include #include -#include "common/debug/log.h" -#include "common/types.h" +#include "framework/common/debug/log.h" +#include "framework/common/types.h" #include "graph/utils/attr_utils.h" #include "graph/utils/op_desc_utils.h" #include "inc/kernel.h" diff --git a/ge/graph/passes/print_op_pass.h b/ge/graph/passes/print_op_pass.h index deaf559b..7ee19d5d 100755 --- a/ge/graph/passes/print_op_pass.h +++ b/ge/graph/passes/print_op_pass.h @@ -21,7 +21,7 @@ #include "framework/common/types.h" #include "graph/debug/ge_attr_define.h" #include "graph/common/omg_util.h" -#include "graph/graph.h" +#include "external/graph/graph.h" #include "graph/passes/base_pass.h" #include "graph/utils/graph_utils.h" #include "graph/passes/pass_utils.h" diff --git a/ge/graph/passes/prune_pass.cc b/ge/graph/passes/prune_pass.cc index 1e2ec4ab..cc6c7618 100644 --- a/ge/graph/passes/prune_pass.cc +++ b/ge/graph/passes/prune_pass.cc @@ -19,8 +19,8 @@ #include #include #include -#include "common/debug/log.h" -#include "common/types.h" +#include "framework/common/debug/log.h" +#include "framework/common/types.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" #include "graph/utils/node_utils.h" diff --git a/ge/graph/passes/ref_identity_delete_op_pass.cc b/ge/graph/passes/ref_identity_delete_op_pass.cc index 39794cff..7bc5804b 100644 --- a/ge/graph/passes/ref_identity_delete_op_pass.cc +++ b/ge/graph/passes/ref_identity_delete_op_pass.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "ref_identity_delete_op_pass.h" +#include "graph/passes/ref_identity_delete_op_pass.h" #include #include #include "graph/common/transop_util.h" diff --git a/ge/graph/passes/remove_same_const_pass.cc b/ge/graph/passes/remove_same_const_pass.cc index a06eea43..947ff3f3 100644 --- a/ge/graph/passes/remove_same_const_pass.cc +++ b/ge/graph/passes/remove_same_const_pass.cc @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "remove_same_const_pass.h" +#include "graph/passes/remove_same_const_pass.h" #include #include diff --git a/ge/graph/passes/remove_same_const_pass.h b/ge/graph/passes/remove_same_const_pass.h index 08905bd2..6934a472 100644 --- a/ge/graph/passes/remove_same_const_pass.h +++ b/ge/graph/passes/remove_same_const_pass.h @@ -16,7 +16,7 @@ #ifndef GE_GRAPH_PASSES_REMOVE_SAME_CONST_PASS_H_ #define GE_GRAPH_PASSES_REMOVE_SAME_CONST_PASS_H_ -#include "graph/types.h" +#include "external/graph/types.h" #include "inc/graph_pass.h" namespace ge { diff --git a/ge/graph/passes/replace_transshape_pass.cc b/ge/graph/passes/replace_transshape_pass.cc index 28957a61..c7844619 100644 --- a/ge/graph/passes/replace_transshape_pass.cc +++ b/ge/graph/passes/replace_transshape_pass.cc @@ -19,7 +19,7 @@ #include #include "common/ge/ge_util.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "framework/common/debug/ge_log.h" #include "graph/common/omg_util.h" #include "graph/utils/graph_utils.h" diff --git a/ge/graph/passes/resource_pair_add_control_pass.cc b/ge/graph/passes/resource_pair_add_control_pass.cc index a104a95e..14f04fe0 100755 --- a/ge/graph/passes/resource_pair_add_control_pass.cc +++ b/ge/graph/passes/resource_pair_add_control_pass.cc @@ -21,9 +21,9 @@ #include #include #include "framework/common/debug/ge_log.h" -#include "common/ge_inner_error_codes.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "graph/utils/attr_utils.h" #include "graph/utils/tensor_adapter.h" diff --git a/ge/graph/passes/resource_pair_remove_control_pass.cc b/ge/graph/passes/resource_pair_remove_control_pass.cc index 73b96008..138efb43 100755 --- a/ge/graph/passes/resource_pair_remove_control_pass.cc +++ b/ge/graph/passes/resource_pair_remove_control_pass.cc @@ -21,9 +21,9 @@ #include #include #include "framework/common/debug/ge_log.h" -#include "common/ge_inner_error_codes.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "graph/utils/attr_utils.h" #include "graph/utils/tensor_adapter.h" diff --git a/ge/graph/passes/same_transdata_breadth_fusion_pass.cc b/ge/graph/passes/same_transdata_breadth_fusion_pass.cc index 60f5c7c9..afd78a4d 100644 --- a/ge/graph/passes/same_transdata_breadth_fusion_pass.cc +++ b/ge/graph/passes/same_transdata_breadth_fusion_pass.cc @@ -20,8 +20,8 @@ #include #include #include -#include "common/ge_inner_error_codes.h" -#include "common/types.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/types.h" #include "graph/debug/ge_attr_define.h" #include "graph/utils/graph_utils.h" #include "graph/utils/op_desc_utils.h" diff --git a/ge/graph/passes/save_pass.cc b/ge/graph/passes/save_pass.cc index 1181461b..6fec3a3b 100755 --- a/ge/graph/passes/save_pass.cc +++ b/ge/graph/passes/save_pass.cc @@ -20,7 +20,7 @@ #include #include #include "framework/common/debug/ge_log.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "graph/utils/graph_utils.h" namespace ge { diff --git a/ge/graph/passes/save_pass.h b/ge/graph/passes/save_pass.h index 512dfa62..8efcc46e 100755 --- a/ge/graph/passes/save_pass.h +++ b/ge/graph/passes/save_pass.h @@ -17,7 +17,7 @@ #ifndef GE_GRAPH_PASSES_SAVE_PASS_H_ #define GE_GRAPH_PASSES_SAVE_PASS_H_ -#include "graph/graph.h" +#include "external/graph/graph.h" #include "inc/graph_pass.h" namespace ge { diff --git a/ge/graph/passes/shape_operate_op_remove_pass.cc b/ge/graph/passes/shape_operate_op_remove_pass.cc index a703f1c9..f6ce0ec1 100755 --- a/ge/graph/passes/shape_operate_op_remove_pass.cc +++ b/ge/graph/passes/shape_operate_op_remove_pass.cc @@ -15,9 +15,9 @@ */ #include "graph/passes/shape_operate_op_remove_pass.h" -#include "common/debug/log.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/debug/log.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "graph/utils/attr_utils.h" using domi::SUCCESS; diff --git a/ge/graph/passes/stop_gradient_pass.h b/ge/graph/passes/stop_gradient_pass.h index 808174bc..5132b889 100755 --- a/ge/graph/passes/stop_gradient_pass.h +++ b/ge/graph/passes/stop_gradient_pass.h @@ -18,7 +18,7 @@ #define GE_GRAPH_PASSES_STOP_GRADIENT_PASS_H_ #include "framework/common/debug/ge_log.h" -#include "common/types.h" +#include "framework/common/types.h" #include "framework/common/ge_inner_error_codes.h" #include "graph/common/omg_util.h" #include "graph/passes/base_pass.h" diff --git a/ge/graph/passes/subexpression_migration_pass.cc b/ge/graph/passes/subexpression_migration_pass.cc index 6265851a..f39e02e5 100755 --- a/ge/graph/passes/subexpression_migration_pass.cc +++ b/ge/graph/passes/subexpression_migration_pass.cc @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "subexpression_migration_pass.h" +#include "graph/passes/subexpression_migration_pass.h" #include "graph/utils/node_utils.h" #include "ge_local_engine/engine/host_cpu_engine.h" diff --git a/ge/graph/passes/subexpression_migration_pass.h b/ge/graph/passes/subexpression_migration_pass.h index d2733fcf..52326798 100755 --- a/ge/graph/passes/subexpression_migration_pass.h +++ b/ge/graph/passes/subexpression_migration_pass.h @@ -17,7 +17,7 @@ #ifndef GE_COMMON_SUBEXPRESSION_MIGRATION_H_ #define GE_COMMON_SUBEXPRESSION_MIGRATION_H_ -#include "graph/types.h" +#include "external/graph/types.h" #include "inc/graph_pass.h" #include diff --git a/ge/graph/passes/subgraph_const_migration_pass.cc b/ge/graph/passes/subgraph_const_migration_pass.cc index d15e60cf..eac0c84b 100644 --- a/ge/graph/passes/subgraph_const_migration_pass.cc +++ b/ge/graph/passes/subgraph_const_migration_pass.cc @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "subgraph_const_migration_pass.h" +#include "graph/passes/subgraph_const_migration_pass.h" #include "graph/utils/node_utils.h" #include "ge_local_engine/engine/host_cpu_engine.h" diff --git a/ge/graph/passes/subgraph_const_migration_pass.h b/ge/graph/passes/subgraph_const_migration_pass.h index 2834fd66..e43a3049 100755 --- a/ge/graph/passes/subgraph_const_migration_pass.h +++ b/ge/graph/passes/subgraph_const_migration_pass.h @@ -17,7 +17,7 @@ #ifndef GE_COMMON_SUBGRAPH_CONST_MIGRATION_H_ #define GE_COMMON_SUBGRAPH_CONST_MIGRATION_H_ -#include "graph/types.h" +#include "external/graph/types.h" #include "inc/graph_pass.h" #include diff --git a/ge/graph/passes/switch_data_edges_bypass.cc b/ge/graph/passes/switch_data_edges_bypass.cc index 5f66a0ca..c7b46b7c 100644 --- a/ge/graph/passes/switch_data_edges_bypass.cc +++ b/ge/graph/passes/switch_data_edges_bypass.cc @@ -14,13 +14,13 @@ * limitations under the License. */ -#include "switch_data_edges_bypass.h" +#include "graph/passes/switch_data_edges_bypass.h" #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/ge/ge_util.h" -#include "common/op/ge_op_utils.h" -#include "common/util.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/util.h" #include "graph/utils/node_utils.h" namespace ge { diff --git a/ge/graph/passes/switch_logic_remove_pass.cc b/ge/graph/passes/switch_logic_remove_pass.cc index 13b409c5..0d6bc2ce 100644 --- a/ge/graph/passes/switch_logic_remove_pass.cc +++ b/ge/graph/passes/switch_logic_remove_pass.cc @@ -21,7 +21,7 @@ #include "framework/common/debug/ge_log.h" #include "graph/utils/graph_utils.h" #include "graph/passes/pass_utils.h" -#include "common/util.h" +#include "framework/common/util.h" namespace ge { namespace { diff --git a/ge/graph/passes/switch_to_stream_switch_pass.cc b/ge/graph/passes/switch_to_stream_switch_pass.cc index e4ab0111..77a7c9db 100644 --- a/ge/graph/passes/switch_to_stream_switch_pass.cc +++ b/ge/graph/passes/switch_to_stream_switch_pass.cc @@ -17,7 +17,7 @@ #include "graph/passes/switch_to_stream_switch_pass.h" #include #include "common/ge/ge_util.h" -#include "ge/ge_api_types.h" +#include "external/ge/ge_api_types.h" #include "graph/common/omg_util.h" #include "graph/ge_context.h" #include "graph/utils/type_utils.h" diff --git a/ge/graph/passes/transop_breadth_fusion_pass.cc b/ge/graph/passes/transop_breadth_fusion_pass.cc index 58b40a5f..5b8e1940 100644 --- a/ge/graph/passes/transop_breadth_fusion_pass.cc +++ b/ge/graph/passes/transop_breadth_fusion_pass.cc @@ -19,7 +19,7 @@ #include #include -#include "common/types.h" +#include "framework/common/types.h" #include "graph/common/transop_util.h" #include "graph/utils/node_utils.h" diff --git a/ge/graph/passes/transop_depth_fusion_pass.cc b/ge/graph/passes/transop_depth_fusion_pass.cc index ea4add35..66ce346a 100755 --- a/ge/graph/passes/transop_depth_fusion_pass.cc +++ b/ge/graph/passes/transop_depth_fusion_pass.cc @@ -17,8 +17,8 @@ #include "graph/passes/transop_depth_fusion_pass.h" #include -#include "common/ge_inner_error_codes.h" -#include "common/types.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/types.h" #include "graph/compute_graph.h" #include "graph/ge_tensor.h" #include "graph/op_desc.h" diff --git a/ge/graph/passes/transop_nearby_allreduce_fusion_pass.cc b/ge/graph/passes/transop_nearby_allreduce_fusion_pass.cc index 76233f53..483575a4 100644 --- a/ge/graph/passes/transop_nearby_allreduce_fusion_pass.cc +++ b/ge/graph/passes/transop_nearby_allreduce_fusion_pass.cc @@ -16,8 +16,8 @@ #include "graph/passes/transop_nearby_allreduce_fusion_pass.h" #include "framework/common/debug/ge_log.h" -#include "common/debug/log.h" -#include "common/types.h" +#include "framework/common/debug/log.h" +#include "framework/common/types.h" #include "graph/utils/graph_utils.h" #include "graph/common/transop_util.h" diff --git a/ge/graph/passes/transop_symmetry_elimination_pass.cc b/ge/graph/passes/transop_symmetry_elimination_pass.cc index 665f4bd8..fe0e48f9 100644 --- a/ge/graph/passes/transop_symmetry_elimination_pass.cc +++ b/ge/graph/passes/transop_symmetry_elimination_pass.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "transop_symmetry_elimination_pass.h" +#include "graph/passes/transop_symmetry_elimination_pass.h" #include "common/formats/utils/formats_trans_utils.h" #include "framework/common/debug/ge_log.h" #include "framework/common/util.h" @@ -23,7 +23,7 @@ #include "graph/utils/graph_utils.h" #include "graph/utils/node_utils.h" #include "graph/utils/type_utils.h" -#include "types.h" +#include "framework/common/types.h" namespace { const std::set white_list_op{ge::TRANSPOSED, ge::RESHAPE, ge::REFORMAT, ge::CAST, ge::TRANSDATA}; diff --git a/ge/graph/passes/transop_without_reshape_fusion_pass.cc b/ge/graph/passes/transop_without_reshape_fusion_pass.cc index 7e80299b..10e619b9 100644 --- a/ge/graph/passes/transop_without_reshape_fusion_pass.cc +++ b/ge/graph/passes/transop_without_reshape_fusion_pass.cc @@ -20,8 +20,8 @@ #include #include #include "common/ge/ge_util.h" -#include "common/ge_inner_error_codes.h" -#include "common/types.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/types.h" #include "graph/common/transop_util.h" #include "graph/compute_graph.h" #include "graph/debug/ge_attr_define.h" diff --git a/ge/graph/passes/unused_args_clean_pass.cc b/ge/graph/passes/unused_args_clean_pass.cc index 33250311..bc338b86 100755 --- a/ge/graph/passes/unused_args_clean_pass.cc +++ b/ge/graph/passes/unused_args_clean_pass.cc @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "unused_args_clean_pass.h" +#include "graph/passes/unused_args_clean_pass.h" #include "graph/utils/node_utils.h" diff --git a/ge/graph/passes/unused_args_clean_pass.h b/ge/graph/passes/unused_args_clean_pass.h index 90a146b2..400cc802 100644 --- a/ge/graph/passes/unused_args_clean_pass.h +++ b/ge/graph/passes/unused_args_clean_pass.h @@ -16,7 +16,7 @@ #ifndef GE_COMMON_CASE_ARGS_CLEAN_H_ #define GE_COMMON_CASE_ARGS_CLEAN_H_ -#include "graph/types.h" +#include "external/graph/types.h" #include "inc/graph_pass.h" #include diff --git a/ge/graph/passes/variable_op_pass.cc b/ge/graph/passes/variable_op_pass.cc index 862b7016..e803949e 100644 --- a/ge/graph/passes/variable_op_pass.cc +++ b/ge/graph/passes/variable_op_pass.cc @@ -21,7 +21,7 @@ #include "common/formats/formats.h" #include "common/formats/utils/formats_trans_utils.h" #include "graph/ge_context.h" -#include "graph/graph.h" +#include "external/graph/graph.h" #include "graph/manager/graph_var_manager.h" #include "graph/utils/graph_utils.h" #include "graph/utils/tensor_utils.h" diff --git a/ge/graph/passes/variable_op_pass.h b/ge/graph/passes/variable_op_pass.h index 3b18882c..d442fdf4 100755 --- a/ge/graph/passes/variable_op_pass.h +++ b/ge/graph/passes/variable_op_pass.h @@ -19,7 +19,7 @@ #include #include #include "graph/common/transop_util.h" -#include "graph/graph.h" +#include "external/graph/graph.h" #include "graph/manager/graph_var_manager.h" #include "graph/manager/util/variable_accelerate_ctrl.h" #include "inc/graph_pass.h" diff --git a/ge/graph/passes/variable_ref_useless_control_out_delete_pass.cc b/ge/graph/passes/variable_ref_useless_control_out_delete_pass.cc index 1c8eb0ec..cac6bf75 100644 --- a/ge/graph/passes/variable_ref_useless_control_out_delete_pass.cc +++ b/ge/graph/passes/variable_ref_useless_control_out_delete_pass.cc @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "variable_ref_useless_control_out_delete_pass.h" +#include "graph/passes/variable_ref_useless_control_out_delete_pass.h" namespace ge { Status VariableRefUselessControlOutDeletePass::Run(ge::ComputeGraphPtr graph) { diff --git a/ge/graph/preprocess/graph_preprocess.cc b/ge/graph/preprocess/graph_preprocess.cc index a73c6a96..6fd83623 100644 --- a/ge/graph/preprocess/graph_preprocess.cc +++ b/ge/graph/preprocess/graph_preprocess.cc @@ -24,9 +24,9 @@ #include "common/formats/format_transfers/format_transfer_transpose.h" #include "common/formats/utils/formats_trans_utils.h" #include "common/util/error_manager/error_manager.h" -#include "common/helper/model_helper.h" +#include "framework/common/helper/model_helper.h" #include "common/math/math_util.h" -#include "common/op/ge_op_utils.h" +#include "framework/common/op/ge_op_utils.h" #include "ir_build/option_utils.h" #include "graph/common/ge_call_wrapper.h" #include "graph/common/local_context.h" @@ -39,7 +39,7 @@ #include "graph/passes/addn_pass.h" #include "graph/passes/aicpu_constant_folding_pass.h" #include "graph/passes/assert_pass.h" -#include "ge/ge_api_types.h" +#include "external/ge/ge_api_types.h" #include "graph/passes/common_subexpression_elimination_pass.h" #include "graph/passes/cond_pass.h" #include "graph/passes/cond_remove_pass.h" @@ -79,7 +79,7 @@ #include "graph/utils/type_utils.h" #include "inc/pass_manager.h" #include "init/gelib.h" -#include "multi_batch_copy_graph.h" +#include "graph/preprocess/multi_batch_copy_graph.h" #include "graph/passes/data_pass.h" #include "graph/passes/mark_agnostic_pass.h" diff --git a/ge/graph/preprocess/graph_preprocess.h b/ge/graph/preprocess/graph_preprocess.h index 22bc566c..3dfe1797 100755 --- a/ge/graph/preprocess/graph_preprocess.h +++ b/ge/graph/preprocess/graph_preprocess.h @@ -21,13 +21,13 @@ #include #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/debug/memory_dumper.h" #include "common/model_parser/model_parser.h" #include "common/properties_manager.h" -#include "common/string_util.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/string_util.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "graph/compute_graph.h" #include "graph/manager/graph_manager_utils.h" #include "graph/manager/util/variable_accelerate_ctrl.h" @@ -35,7 +35,7 @@ #include "graph/node.h" #include "graph/utils/graph_utils.h" #include "graph/utils/tensor_utils.h" -#include "omg/omg_inner_types.h" +#include "framework/omg/omg_inner_types.h" #include "runtime/context.h" namespace ge { diff --git a/ge/graph/preprocess/insert_op/base_insert_op.h b/ge/graph/preprocess/insert_op/base_insert_op.h index b0d7a7a6..6b1eb177 100644 --- a/ge/graph/preprocess/insert_op/base_insert_op.h +++ b/ge/graph/preprocess/insert_op/base_insert_op.h @@ -21,8 +21,8 @@ #include #include #include -#include "common/fmk_error_codes.h" -#include "common/types.h" +#include "framework/common/fmk_error_codes.h" +#include "framework/common/types.h" #include "framework/common/ge_inner_error_codes.h" #include "graph/compute_graph.h" #include "proto/insert_op.pb.h" diff --git a/ge/graph/preprocess/insert_op/ge_aipp_op.cc b/ge/graph/preprocess/insert_op/ge_aipp_op.cc index 5c191af7..5d3a2a85 100755 --- a/ge/graph/preprocess/insert_op/ge_aipp_op.cc +++ b/ge/graph/preprocess/insert_op/ge_aipp_op.cc @@ -20,10 +20,10 @@ #include #include #include -#include "base_insert_op.h" +#include "graph/preprocess/insert_op/base_insert_op.h" #include "common/dynamic_aipp.h" #include "common/ge/ge_util.h" -#include "common/util.h" +#include "framework/common/util.h" #include "common/util/error_manager/error_manager.h" #include "external/graph/operator_factory.h" #include "framework/common/debug/ge_log.h" diff --git a/ge/graph/preprocess/insert_op/ge_aipp_op.h b/ge/graph/preprocess/insert_op/ge_aipp_op.h index 5e509dda..87f80291 100755 --- a/ge/graph/preprocess/insert_op/ge_aipp_op.h +++ b/ge/graph/preprocess/insert_op/ge_aipp_op.h @@ -19,7 +19,7 @@ #include #include -#include "common/op/attr_value_util.h" +#include "framework/common/op/attr_value_util.h" #include "graph/preprocess/insert_op/base_insert_op.h" #include "proto/insert_op.pb.h" diff --git a/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc b/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc index d76b79b9..3cd26139 100755 --- a/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc +++ b/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc @@ -20,8 +20,8 @@ #include "common/dynamic_aipp.h" #include "common/formats/utils/formats_trans_utils.h" #include "common/ge/ge_util.h" -#include "common/op/ge_op_utils.h" -#include "common/util.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/util.h" #include "common/util/error_manager/error_manager.h" #include "framework/common/debug/ge_log.h" #include "framework/common/debug/log.h" diff --git a/ge/graph/preprocess/multi_batch_options.cc b/ge/graph/preprocess/multi_batch_options.cc index b3e5b616..21cbc0c2 100644 --- a/ge/graph/preprocess/multi_batch_options.cc +++ b/ge/graph/preprocess/multi_batch_options.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "multi_batch_options.h" +#include "graph/preprocess/multi_batch_options.h" #include "framework/common/debug/ge_log.h" #include "framework/omg/omg_inner_types.h" diff --git a/ge/host_kernels/broadcast_args_kernel.cc b/ge/host_kernels/broadcast_args_kernel.cc index d8880db9..796142f4 100644 --- a/ge/host_kernels/broadcast_args_kernel.cc +++ b/ge/host_kernels/broadcast_args_kernel.cc @@ -18,9 +18,9 @@ #include -#include "common/op/ge_op_utils.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "framework/common/ge_inner_error_codes.h" #include "graph/common/bcast.h" #include "graph/passes/pass_utils.h" diff --git a/ge/host_kernels/broadcast_gradient_args_kernel.cc b/ge/host_kernels/broadcast_gradient_args_kernel.cc index 51ff4a4c..59993171 100644 --- a/ge/host_kernels/broadcast_gradient_args_kernel.cc +++ b/ge/host_kernels/broadcast_gradient_args_kernel.cc @@ -17,9 +17,9 @@ #include -#include "common/op/ge_op_utils.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" #include "graph/common/bcast.h" diff --git a/ge/host_kernels/cast_kernel.cc b/ge/host_kernels/cast_kernel.cc index 056081a1..3f09974f 100644 --- a/ge/host_kernels/cast_kernel.cc +++ b/ge/host_kernels/cast_kernel.cc @@ -19,13 +19,13 @@ #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/formats/formats.h" #include "common/formats/utils/formats_trans_utils.h" #include "common/fp16_t.h" -#include "common/op/ge_op_utils.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" #include "graph/common/bcast.h" diff --git a/ge/host_kernels/concat_offset_kernel.cc b/ge/host_kernels/concat_offset_kernel.cc index b6940eb4..79552183 100644 --- a/ge/host_kernels/concat_offset_kernel.cc +++ b/ge/host_kernels/concat_offset_kernel.cc @@ -18,9 +18,9 @@ #include -#include "common/ge_inner_error_codes.h" -#include "common/op/ge_op_utils.h" -#include "common/types.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/types.h" #include "framework/common/debug/ge_log.h" #include "graph/utils/type_utils.h" #include "inc/kernel_factory.h" diff --git a/ge/host_kernels/concat_v2_kernel.cc b/ge/host_kernels/concat_v2_kernel.cc index 234d8c8a..c5a7d889 100644 --- a/ge/host_kernels/concat_v2_kernel.cc +++ b/ge/host_kernels/concat_v2_kernel.cc @@ -19,9 +19,9 @@ #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/fp16_t.h" -#include "common/op/ge_op_utils.h" +#include "framework/common/op/ge_op_utils.h" #include "framework/common/debug/ge_log.h" #include "host_kernels/kernel_utils.h" #include "graph/utils/type_utils.h" diff --git a/ge/host_kernels/dynamic_stitch_kernel.cc b/ge/host_kernels/dynamic_stitch_kernel.cc index 52f6cdcf..0313c856 100644 --- a/ge/host_kernels/dynamic_stitch_kernel.cc +++ b/ge/host_kernels/dynamic_stitch_kernel.cc @@ -20,10 +20,10 @@ #include #include "common/fp16_t.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "common/math/math_util.h" -#include "common/op/ge_op_utils.h" -#include "common/types.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/types.h" #include "framework/common/debug/ge_log.h" #include "graph/utils/type_utils.h" #include "inc/kernel_factory.h" diff --git a/ge/host_kernels/empty_kernel.cc b/ge/host_kernels/empty_kernel.cc index 61310abc..68ba7f9f 100644 --- a/ge/host_kernels/empty_kernel.cc +++ b/ge/host_kernels/empty_kernel.cc @@ -19,8 +19,8 @@ #include #include "common/fp16_t.h" -#include "common/op/ge_op_utils.h" -#include "common/types.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/types.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" #include "host_kernels/kernel_utils.h" diff --git a/ge/host_kernels/expanddims_kernel.cc b/ge/host_kernels/expanddims_kernel.cc index f304fbdb..d6ea0287 100644 --- a/ge/host_kernels/expanddims_kernel.cc +++ b/ge/host_kernels/expanddims_kernel.cc @@ -18,9 +18,9 @@ #include -#include "common/ge_inner_error_codes.h" -#include "common/op/ge_op_utils.h" -#include "common/types.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/types.h" #include "framework/common/debug/ge_log.h" #include "host_kernels/kernel_utils.h" #include "inc/kernel_factory.h" diff --git a/ge/host_kernels/fill_kernel.cc b/ge/host_kernels/fill_kernel.cc index 0022791c..e41c5bf3 100644 --- a/ge/host_kernels/fill_kernel.cc +++ b/ge/host_kernels/fill_kernel.cc @@ -20,8 +20,8 @@ #include #include "common/fp16_t.h" -#include "common/ge_inner_error_codes.h" -#include "common/op/ge_op_utils.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/op/ge_op_utils.h" #include "framework/common/debug/ge_log.h" #include "host_kernels/kernel_utils.h" #include "graph/utils/type_utils.h" diff --git a/ge/host_kernels/floordiv_kernel.cc b/ge/host_kernels/floordiv_kernel.cc index df381212..566a45a3 100644 --- a/ge/host_kernels/floordiv_kernel.cc +++ b/ge/host_kernels/floordiv_kernel.cc @@ -21,8 +21,8 @@ #include #include -#include "common/op/ge_op_utils.h" -#include "common/types.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/types.h" #include "framework/common/debug/ge_log.h" #include "host_kernels/kernel_utils.h" #include "graph/utils/type_utils.h" diff --git a/ge/host_kernels/floormod_kernel.cc b/ge/host_kernels/floormod_kernel.cc index 31e4e19b..bef6d014 100644 --- a/ge/host_kernels/floormod_kernel.cc +++ b/ge/host_kernels/floormod_kernel.cc @@ -19,8 +19,8 @@ #include #include -#include "common/types.h" -#include "common/util.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" #include "graph/common/bcast.h" diff --git a/ge/host_kernels/gather_v2_kernel.cc b/ge/host_kernels/gather_v2_kernel.cc index 5702954c..45445143 100644 --- a/ge/host_kernels/gather_v2_kernel.cc +++ b/ge/host_kernels/gather_v2_kernel.cc @@ -20,10 +20,10 @@ #include #include "common/fp16_t.h" -#include "common/ge_inner_error_codes.h" -#include "common/op/ge_op_utils.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "host_kernels/kernel_utils.h" #include "graph/utils/type_utils.h" diff --git a/ge/host_kernels/greater_kernel.cc b/ge/host_kernels/greater_kernel.cc index a245ec8d..3e62db04 100644 --- a/ge/host_kernels/greater_kernel.cc +++ b/ge/host_kernels/greater_kernel.cc @@ -19,10 +19,10 @@ #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/fp16_t.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" #include "graph/common/bcast.h" diff --git a/ge/host_kernels/identity_kernel.cc b/ge/host_kernels/identity_kernel.cc index ef1446a8..30f55027 100644 --- a/ge/host_kernels/identity_kernel.cc +++ b/ge/host_kernels/identity_kernel.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "identity_kernel.h" +#include "host_kernels/identity_kernel.h" #include "inc/kernel_factory.h" #include "framework/common/types.h" diff --git a/ge/host_kernels/kernel_utils.cc b/ge/host_kernels/kernel_utils.cc index 595f9517..6447fa43 100755 --- a/ge/host_kernels/kernel_utils.cc +++ b/ge/host_kernels/kernel_utils.cc @@ -18,8 +18,8 @@ #include -#include "common/ge_inner_error_codes.h" -#include "common/types.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/types.h" #include "framework/common/debug/ge_log.h" #include "graph/utils/op_desc_utils.h" #include "graph/utils/tensor_utils.h" diff --git a/ge/host_kernels/kernel_utils.h b/ge/host_kernels/kernel_utils.h index c9c90634..7a7545ea 100755 --- a/ge/host_kernels/kernel_utils.h +++ b/ge/host_kernels/kernel_utils.h @@ -20,8 +20,8 @@ #include #include -#include "common/ge_inner_error_codes.h" -#include "common/util.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "graph/compute_graph.h" diff --git a/ge/host_kernels/maximum_kernel.cc b/ge/host_kernels/maximum_kernel.cc index 2ced113f..314bc7be 100644 --- a/ge/host_kernels/maximum_kernel.cc +++ b/ge/host_kernels/maximum_kernel.cc @@ -19,10 +19,10 @@ #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/fp16_t.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" #include "graph/common/bcast.h" diff --git a/ge/host_kernels/mul_kernel.cc b/ge/host_kernels/mul_kernel.cc index b01a5c79..e3657197 100644 --- a/ge/host_kernels/mul_kernel.cc +++ b/ge/host_kernels/mul_kernel.cc @@ -19,10 +19,10 @@ #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/math/math_util.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" #include "graph/common/bcast.h" diff --git a/ge/host_kernels/pack_kernel.cc b/ge/host_kernels/pack_kernel.cc index bf7a2a1f..103f4029 100644 --- a/ge/host_kernels/pack_kernel.cc +++ b/ge/host_kernels/pack_kernel.cc @@ -18,10 +18,10 @@ #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/formats/utils/formats_trans_utils.h" -#include "common/ge_inner_error_codes.h" -#include "common/op/ge_op_utils.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/op/ge_op_utils.h" #include "framework/common/debug/ge_log.h" #include "graph/debug/ge_attr_define.h" #include "host_kernels/kernel_utils.h" diff --git a/ge/host_kernels/permute_kernel.cc b/ge/host_kernels/permute_kernel.cc index 327c94f8..93d56415 100755 --- a/ge/host_kernels/permute_kernel.cc +++ b/ge/host_kernels/permute_kernel.cc @@ -19,11 +19,11 @@ #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "framework/common/debug/ge_log.h" -#include "common/op/ge_op_utils.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "graph/common/bcast.h" #include "graph/utils/type_utils.h" #include "inc/kernel_factory.h" diff --git a/ge/host_kernels/range_kernel.cc b/ge/host_kernels/range_kernel.cc index 97254fff..d8f8200a 100644 --- a/ge/host_kernels/range_kernel.cc +++ b/ge/host_kernels/range_kernel.cc @@ -19,10 +19,10 @@ #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/fp16_t.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" #include "graph/utils/type_utils.h" diff --git a/ge/host_kernels/rank_kernel.cc b/ge/host_kernels/rank_kernel.cc index b246b976..9bc404f3 100755 --- a/ge/host_kernels/rank_kernel.cc +++ b/ge/host_kernels/rank_kernel.cc @@ -19,12 +19,12 @@ #include #include -#include "graph/types.h" -#include "common/ge_inner_error_codes.h" -#include "common/op/ge_op_utils.h" +#include "external/graph/types.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/op/ge_op_utils.h" #include "framework/common/debug/ge_log.h" #include "inc/kernel_factory.h" -#include "omg/omg_inner_types.h" +#include "framework/omg/omg_inner_types.h" #include "framework/common/types.h" namespace { diff --git a/ge/host_kernels/reduce_prod_kernel.cc b/ge/host_kernels/reduce_prod_kernel.cc index 4837a921..efe48997 100644 --- a/ge/host_kernels/reduce_prod_kernel.cc +++ b/ge/host_kernels/reduce_prod_kernel.cc @@ -20,8 +20,8 @@ #include #include "common/math/math_util.h" -#include "common/op/ge_op_utils.h" -#include "common/types.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/types.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" #include "host_kernels/kernel_utils.h" diff --git a/ge/host_kernels/reformat_kernel.cc b/ge/host_kernels/reformat_kernel.cc index c1942983..b841ed39 100644 --- a/ge/host_kernels/reformat_kernel.cc +++ b/ge/host_kernels/reformat_kernel.cc @@ -17,10 +17,10 @@ #include "host_kernels/reformat_kernel.h" #include "common/formats/utils/formats_trans_utils.h" #include "common/ge/ge_util.h" -#include "common/ge_inner_error_codes.h" -#include "common/op/ge_op_utils.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "host_kernels/kernel_utils.h" #include "graph/utils/type_utils.h" diff --git a/ge/host_kernels/reshape_kernel.cc b/ge/host_kernels/reshape_kernel.cc index 7c4f58f6..bead8468 100644 --- a/ge/host_kernels/reshape_kernel.cc +++ b/ge/host_kernels/reshape_kernel.cc @@ -16,9 +16,9 @@ #include "host_kernels/reshape_kernel.h" -#include "common/ge_inner_error_codes.h" -#include "common/op/ge_op_utils.h" -#include "common/types.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/types.h" #include "framework/common/debug/ge_log.h" #include "host_kernels/kernel_utils.h" #include "inc/kernel_factory.h" diff --git a/ge/host_kernels/rsqrt_kernel.cc b/ge/host_kernels/rsqrt_kernel.cc index 74c78787..f1f74a99 100755 --- a/ge/host_kernels/rsqrt_kernel.cc +++ b/ge/host_kernels/rsqrt_kernel.cc @@ -19,10 +19,10 @@ #include -#include "common/debug/ge_log.h" -#include "common/debug/log.h" -#include "common/ge_inner_error_codes.h" -#include "common/op/ge_op_utils.h" +#include "framework/common/debug/ge_log.h" +#include "framework/common/debug/log.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/op/ge_op_utils.h" #include "framework/common/debug/ge_log.h" #include "host_kernels/kernel_utils.h" #include "inc/kernel_factory.h" diff --git a/ge/host_kernels/size_kernel.cc b/ge/host_kernels/size_kernel.cc index caa5febc..9f7bc0ff 100644 --- a/ge/host_kernels/size_kernel.cc +++ b/ge/host_kernels/size_kernel.cc @@ -19,8 +19,8 @@ #include #include -#include "common/debug/log.h" -#include "common/op/ge_op_utils.h" +#include "framework/common/debug/log.h" +#include "framework/common/op/ge_op_utils.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" #include "framework/common/types.h" @@ -28,7 +28,7 @@ #include "host_kernels/kernel_utils.h" #include "graph/passes/pass_utils.h" #include "inc/kernel_factory.h" -#include "omg/omg_inner_types.h" +#include "framework/omg/omg_inner_types.h" namespace ge { namespace { diff --git a/ge/host_kernels/slice_d_kernel.cc b/ge/host_kernels/slice_d_kernel.cc index b8572290..60caac38 100644 --- a/ge/host_kernels/slice_d_kernel.cc +++ b/ge/host_kernels/slice_d_kernel.cc @@ -19,8 +19,8 @@ #include #include "common/fp16_t.h" -#include "common/op/ge_op_utils.h" -#include "common/types.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/types.h" #include "framework/common/debug/ge_log.h" #include "host_kernels/kernel_utils.h" #include "graph/utils/type_utils.h" diff --git a/ge/host_kernels/slice_kernel.cc b/ge/host_kernels/slice_kernel.cc index 6e398e96..0b1e7325 100644 --- a/ge/host_kernels/slice_kernel.cc +++ b/ge/host_kernels/slice_kernel.cc @@ -18,10 +18,10 @@ #include -#include "common/ge_inner_error_codes.h" -#include "common/op/ge_op_utils.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "graph/utils/type_utils.h" #include "host_kernels/kernel_utils.h" diff --git a/ge/host_kernels/squeeze_kernel.cc b/ge/host_kernels/squeeze_kernel.cc index 4a2c6725..852a46a1 100644 --- a/ge/host_kernels/squeeze_kernel.cc +++ b/ge/host_kernels/squeeze_kernel.cc @@ -16,9 +16,9 @@ #include "host_kernels/squeeze_kernel.h" -#include "common/ge_inner_error_codes.h" -#include "common/op/ge_op_utils.h" -#include "common/types.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/types.h" #include "framework/common/debug/ge_log.h" #include "host_kernels/kernel_utils.h" #include "inc/kernel_factory.h" diff --git a/ge/host_kernels/ssd_prior_box_kernel.cc b/ge/host_kernels/ssd_prior_box_kernel.cc index 3661fa9d..496cc185 100644 --- a/ge/host_kernels/ssd_prior_box_kernel.cc +++ b/ge/host_kernels/ssd_prior_box_kernel.cc @@ -23,7 +23,7 @@ #include "common/math/math_util.h" #include "common/math_util.h" -#include "common/types.h" +#include "framework/common/types.h" #include "framework/common/util.h" #include "graph/debug/ge_attr_define.h" #include "graph/passes/pass_utils.h" diff --git a/ge/host_kernels/sub_kernel.cc b/ge/host_kernels/sub_kernel.cc index deb36cb3..84c334b0 100644 --- a/ge/host_kernels/sub_kernel.cc +++ b/ge/host_kernels/sub_kernel.cc @@ -20,9 +20,9 @@ #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/math/math_util.h" -#include "common/op/ge_op_utils.h" +#include "framework/common/op/ge_op_utils.h" #include "graph/common/bcast.h" #include "graph/utils/type_utils.h" #include "inc/kernel_factory.h" diff --git a/ge/host_kernels/transdata_kernel.cc b/ge/host_kernels/transdata_kernel.cc index 2b16b075..a06db78b 100644 --- a/ge/host_kernels/transdata_kernel.cc +++ b/ge/host_kernels/transdata_kernel.cc @@ -19,13 +19,13 @@ #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/formats/formats.h" #include "common/formats/utils/formats_trans_utils.h" #include "common/fp16_t.h" -#include "common/op/ge_op_utils.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" #include "graph/common/bcast.h" diff --git a/ge/host_kernels/transpose_kernel.cc b/ge/host_kernels/transpose_kernel.cc index 03d112aa..9291ecf5 100755 --- a/ge/host_kernels/transpose_kernel.cc +++ b/ge/host_kernels/transpose_kernel.cc @@ -17,13 +17,13 @@ #include "host_kernels/transpose_kernel.h" #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/formats/format_transfers/format_transfer_transpose.h" #include "common/formats/formats.h" #include "common/formats/utils/formats_trans_utils.h" -#include "common/op/ge_op_utils.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" #include "host_kernels/kernel_utils.h" diff --git a/ge/host_kernels/unpack_kernel.cc b/ge/host_kernels/unpack_kernel.cc index 1c28151f..a90e3616 100755 --- a/ge/host_kernels/unpack_kernel.cc +++ b/ge/host_kernels/unpack_kernel.cc @@ -15,10 +15,10 @@ */ #include "host_kernels/unpack_kernel.h" -#include "common/debug/ge_log.h" -#include "common/op/ge_op_utils.h" -#include "common/op/ge_op_utils.h" -#include "common/types.h" +#include "framework/common/debug/ge_log.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/types.h" #include "graph/debug/ge_attr_define.h" #include "inc/kernel_factory.h" diff --git a/ge/host_kernels/unsqueeze_kernel.cc b/ge/host_kernels/unsqueeze_kernel.cc index 4ceaba3f..d2b0d05f 100644 --- a/ge/host_kernels/unsqueeze_kernel.cc +++ b/ge/host_kernels/unsqueeze_kernel.cc @@ -16,9 +16,9 @@ #include "host_kernels/unsqueeze_kernel.h" #include -#include "common/ge_inner_error_codes.h" -#include "common/op/ge_op_utils.h" -#include "common/types.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/types.h" #include "framework/common/debug/ge_log.h" #include "host_kernels/kernel_utils.h" #include "inc/kernel_factory.h" diff --git a/ge/hybrid/common/npu_memory_allocator.cc b/ge/hybrid/common/npu_memory_allocator.cc index b66038d9..8a9aa0cc 100644 --- a/ge/hybrid/common/npu_memory_allocator.cc +++ b/ge/hybrid/common/npu_memory_allocator.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "npu_memory_allocator.h" +#include "hybrid/common/npu_memory_allocator.h" #include #include "framework/common/debug/log.h" #include "graph/manager/graph_mem_manager.h" diff --git a/ge/hybrid/common/npu_memory_allocator.h b/ge/hybrid/common/npu_memory_allocator.h index 55cb13ad..8df89108 100644 --- a/ge/hybrid/common/npu_memory_allocator.h +++ b/ge/hybrid/common/npu_memory_allocator.h @@ -23,7 +23,7 @@ #include #include #include "external/ge/ge_api_error_codes.h" -#include "memory/memory_api.h" +#include "framework/memory/memory_api.h" namespace ge { namespace hybrid { diff --git a/ge/hybrid/common/tensor_value.h b/ge/hybrid/common/tensor_value.h index 348e4e6d..c20074fd 100644 --- a/ge/hybrid/common/tensor_value.h +++ b/ge/hybrid/common/tensor_value.h @@ -20,7 +20,7 @@ #include #include #include -#include "memory/memory_api.h" +#include "framework/memory/memory_api.h" #include "framework/common/util.h" namespace ge { diff --git a/ge/hybrid/executor/hybrid_execution_context.cc b/ge/hybrid/executor/hybrid_execution_context.cc index 0f978bf8..2de9b1ce 100644 --- a/ge/hybrid/executor/hybrid_execution_context.cc +++ b/ge/hybrid/executor/hybrid_execution_context.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "hybrid_execution_context.h" +#include "hybrid/executor/hybrid_execution_context.h" #include namespace ge { diff --git a/ge/hybrid/executor/hybrid_model_async_executor.cc b/ge/hybrid/executor/hybrid_model_async_executor.cc index 930412e3..1519a880 100644 --- a/ge/hybrid/executor/hybrid_model_async_executor.cc +++ b/ge/hybrid/executor/hybrid_model_async_executor.cc @@ -19,7 +19,7 @@ #include "graph/utils/tensor_utils.h" #include "graph/utils/type_utils.h" #include "graph/ge_context.h" -#include "graph/types.h" +#include "external/graph/types.h" #include "graph/debug/ge_attr_define.h" #include "graph/manager/graph_caching_allocator.h" #include "graph/manager/graph_mem_allocator.h" diff --git a/ge/hybrid/executor/hybrid_model_executor.cc b/ge/hybrid/executor/hybrid_model_executor.cc index 9bf70d26..58da451c 100755 --- a/ge/hybrid/executor/hybrid_model_executor.cc +++ b/ge/hybrid/executor/hybrid_model_executor.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "hybrid_model_executor.h" +#include "hybrid/executor/hybrid_model_executor.h" #include "graph/ge_context.h" #include "graph/runtime_inference_context.h" #include "graph/utils/tensor_utils.h" diff --git a/ge/hybrid/executor/hybrid_model_pipeline_executor.cc b/ge/hybrid/executor/hybrid_model_pipeline_executor.cc index c0bd5c7d..45e61138 100644 --- a/ge/hybrid/executor/hybrid_model_pipeline_executor.cc +++ b/ge/hybrid/executor/hybrid_model_pipeline_executor.cc @@ -1,4 +1,20 @@ -#include "hybrid_model_pipeline_executor.h" +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hybrid/executor/hybrid_model_pipeline_executor.h" #include "common/math/math_util.h" #include "common/dump/dump_manager.h" diff --git a/ge/hybrid/executor/hybrid_model_pipeline_executor.h b/ge/hybrid/executor/hybrid_model_pipeline_executor.h index c59e1462..f694c4e4 100644 --- a/ge/hybrid/executor/hybrid_model_pipeline_executor.h +++ b/ge/hybrid/executor/hybrid_model_pipeline_executor.h @@ -1,3 +1,19 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + #ifndef GE_HYBRID_EXECUTOR_HYBRID_MODEL_PIPELINE_EXECUTOR_H_ #define GE_HYBRID_EXECUTOR_HYBRID_MODEL_PIPELINE_EXECUTOR_H_ @@ -6,7 +22,7 @@ #include "hybrid/executor/hybrid_execution_context.h" #include "hybrid/executor/rt_callback_manager.h" #include "hybrid/executor/subgraph_executor.h" -#include "hybrid_model_executor.h" +#include "hybrid/executor/hybrid_model_executor.h" namespace ge { namespace hybrid { diff --git a/ge/hybrid/executor/hybrid_profiler.cc b/ge/hybrid/executor/hybrid_profiler.cc index 384dc770..f9231a39 100644 --- a/ge/hybrid/executor/hybrid_profiler.cc +++ b/ge/hybrid/executor/hybrid_profiler.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "hybrid_profiler.h" +#include "hybrid/executor/hybrid_profiler.h" #include #include #include diff --git a/ge/hybrid/executor/node_state.h b/ge/hybrid/executor/node_state.h index b80b60b0..727402f1 100644 --- a/ge/hybrid/executor/node_state.h +++ b/ge/hybrid/executor/node_state.h @@ -24,7 +24,7 @@ #include "common/blocking_queue.h" #include "external/ge/ge_api_error_codes.h" #include "hybrid/model/node_item.h" -#include "node_done_manager.h" +#include "hybrid/executor/node_done_manager.h" namespace ge { namespace hybrid { diff --git a/ge/hybrid/executor/rt_callback_manager.h b/ge/hybrid/executor/rt_callback_manager.h index 9c062134..15b0dede 100644 --- a/ge/hybrid/executor/rt_callback_manager.h +++ b/ge/hybrid/executor/rt_callback_manager.h @@ -23,7 +23,7 @@ #include #include "common/blocking_queue.h" -#include "ge/ge_api_error_codes.h" +#include "external/ge/ge_api_error_codes.h" #include "runtime/rt.h" namespace ge { diff --git a/ge/hybrid/executor/subgraph_context.cc b/ge/hybrid/executor/subgraph_context.cc index 5e97a9a2..4b748e3f 100644 --- a/ge/hybrid/executor/subgraph_context.cc +++ b/ge/hybrid/executor/subgraph_context.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "subgraph_context.h" +#include "hybrid/executor/subgraph_context.h" #include "hybrid/executor/hybrid_model_executor.h" namespace ge { diff --git a/ge/hybrid/hybrid_davinci_model.cc b/ge/hybrid/hybrid_davinci_model.cc index 7368784c..c4500b6d 100755 --- a/ge/hybrid/hybrid_davinci_model.cc +++ b/ge/hybrid/hybrid_davinci_model.cc @@ -15,7 +15,7 @@ */ #include -#include "hybrid_davinci_model.h" +#include "hybrid/hybrid_davinci_model.h" #include "hybrid/model/hybrid_model.h" #include "hybrid/executor/hybrid_model_async_executor.h" #include "hybrid/node_executor/node_executor.h" diff --git a/ge/hybrid/hybrid_davinci_model_stub.cc b/ge/hybrid/hybrid_davinci_model_stub.cc index 67cd29b8..b8a2f242 100644 --- a/ge/hybrid/hybrid_davinci_model_stub.cc +++ b/ge/hybrid/hybrid_davinci_model_stub.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "hybrid_davinci_model.h" +#include "hybrid/hybrid_davinci_model.h" namespace ge { namespace hybrid { diff --git a/ge/hybrid/model/graph_item.cc b/ge/hybrid/model/graph_item.cc index c38e0a0d..ca23108d 100644 --- a/ge/hybrid/model/graph_item.cc +++ b/ge/hybrid/model/graph_item.cc @@ -15,7 +15,7 @@ */ #include "framework/common/util.h" -#include "graph_item.h" +#include "hybrid/model/graph_item.h" namespace ge { namespace hybrid { diff --git a/ge/hybrid/model/hybrid_model.cc b/ge/hybrid/model/hybrid_model.cc index 5e496c3b..e6ddbb8d 100644 --- a/ge/hybrid/model/hybrid_model.cc +++ b/ge/hybrid/model/hybrid_model.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "hybrid_model.h" +#include "hybrid/model/hybrid_model.h" #include #include "graph/debug/ge_attr_define.h" #include "graph/load/model_manager/model_utils.h" @@ -25,7 +25,7 @@ #include "hybrid/common/npu_memory_allocator.h" #include "hybrid/model/hybrid_model_builder.h" #include "hybrid/node_executor/node_executor.h" -#include "common/op/ge_op_utils.h" +#include "framework/common/op/ge_op_utils.h" namespace ge { namespace hybrid { diff --git a/ge/hybrid/model/hybrid_model_builder.cc b/ge/hybrid/model/hybrid_model_builder.cc index 5337a0cf..d3f00253 100755 --- a/ge/hybrid/model/hybrid_model_builder.cc +++ b/ge/hybrid/model/hybrid_model_builder.cc @@ -17,7 +17,7 @@ #include "hybrid/model/hybrid_model_builder.h" #include #include "common/math/math_util.h" -#include "common/op/ge_op_utils.h" +#include "framework/common/op/ge_op_utils.h" #include "graph/ge_context.h" #include "graph/build/memory/var_mem_assign_util.h" #include "graph/debug/ge_attr_define.h" diff --git a/ge/hybrid/model/node_item.cc b/ge/hybrid/model/node_item.cc index cef06fc6..5c3d7db3 100644 --- a/ge/hybrid/model/node_item.cc +++ b/ge/hybrid/model/node_item.cc @@ -14,9 +14,9 @@ * limitations under the License. */ -#include "node_item.h" +#include "hybrid/model/node_item.h" #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "graph/common/omg_util.h" #include "graph/compute_graph.h" #include "graph/debug/ge_attr_define.h" diff --git a/ge/hybrid/node_executor/aicore/aicore_node_executor.cc b/ge/hybrid/node_executor/aicore/aicore_node_executor.cc index 7ebb9e39..c2ce24a4 100755 --- a/ge/hybrid/node_executor/aicore/aicore_node_executor.cc +++ b/ge/hybrid/node_executor/aicore/aicore_node_executor.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "aicore_node_executor.h" +#include "hybrid/node_executor/aicore/aicore_node_executor.h" #include "framework/common/taskdown_common.h" #include "hybrid/executor/hybrid_execution_context.h" #include "external/runtime/rt_error_codes.h" diff --git a/ge/hybrid/node_executor/aicore/aicore_op_task.cc b/ge/hybrid/node_executor/aicore/aicore_op_task.cc index 76082cb3..bca79d7f 100644 --- a/ge/hybrid/node_executor/aicore/aicore_op_task.cc +++ b/ge/hybrid/node_executor/aicore/aicore_op_task.cc @@ -21,7 +21,7 @@ #include "hybrid/executor/hybrid_execution_context.h" #include "hybrid/node_executor/aicore/aicore_task_builder.h" #include "graph/load/model_manager/tbe_handle_store.h" -#include "graph/types.h" +#include "external/graph/types.h" #include "single_op/task/build_task_utils.h" #include "single_op/task/tbe_task_builder.h" diff --git a/ge/hybrid/node_executor/aicore/aicore_op_task.h b/ge/hybrid/node_executor/aicore/aicore_op_task.h index 3c8db8c9..b03bd9e4 100755 --- a/ge/hybrid/node_executor/aicore/aicore_op_task.h +++ b/ge/hybrid/node_executor/aicore/aicore_op_task.h @@ -19,7 +19,7 @@ #include #include -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "runtime/stream.h" #include "hybrid/common/tensor_value.h" #include "hybrid/node_executor/task_context.h" diff --git a/ge/hybrid/node_executor/aicore/aicore_task_builder.cc b/ge/hybrid/node_executor/aicore/aicore_task_builder.cc index 114451b3..0ba71fe4 100755 --- a/ge/hybrid/node_executor/aicore/aicore_task_builder.cc +++ b/ge/hybrid/node_executor/aicore/aicore_task_builder.cc @@ -14,9 +14,9 @@ * limitations under the License. */ -#include "aicore_task_builder.h" -#include "common/debug/log.h" -#include "aicore_node_executor.h" +#include "hybrid/node_executor/aicore/aicore_task_builder.h" +#include "framework/common/debug/log.h" +#include "hybrid/node_executor/aicore/aicore_node_executor.h" namespace ge { namespace hybrid { diff --git a/ge/hybrid/node_executor/aicore/aicore_task_builder.h b/ge/hybrid/node_executor/aicore/aicore_task_builder.h index 6a472a21..e57538ba 100755 --- a/ge/hybrid/node_executor/aicore/aicore_task_builder.h +++ b/ge/hybrid/node_executor/aicore/aicore_task_builder.h @@ -19,7 +19,7 @@ #include #include -#include "aicore_op_task.h" +#include "hybrid/node_executor/aicore/aicore_op_task.h" #include "framework/common/debug/ge_log.h" #include "graph/utils/attr_utils.h" #include "graph/op_kernel_bin.h" diff --git a/ge/hybrid/node_executor/aicore/aicore_task_compiler.cc b/ge/hybrid/node_executor/aicore/aicore_task_compiler.cc index 0cdea5d5..43563bb6 100755 --- a/ge/hybrid/node_executor/aicore/aicore_task_compiler.cc +++ b/ge/hybrid/node_executor/aicore/aicore_task_compiler.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "aicore_task_compiler.h" +#include "hybrid/node_executor/aicore/aicore_task_compiler.h" #include "framework/common/debug/log.h" #include "graph/debug/ge_attr_define.h" #include "opskernel_manager/ops_kernel_builder_manager.h" diff --git a/ge/hybrid/node_executor/aicore/aicore_task_compiler.h b/ge/hybrid/node_executor/aicore/aicore_task_compiler.h index 4cb4dc58..2778aeb0 100755 --- a/ge/hybrid/node_executor/aicore/aicore_task_compiler.h +++ b/ge/hybrid/node_executor/aicore/aicore_task_compiler.h @@ -19,7 +19,7 @@ #include #include "opskernel_manager/ops_kernel_manager.h" -#include "aicore_node_executor.h" +#include "hybrid/node_executor/aicore/aicore_node_executor.h" namespace ge { namespace hybrid { diff --git a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.h b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.h index 7577d486..14bc8fcc 100644 --- a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.h +++ b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.h @@ -20,7 +20,7 @@ #include "external/graph/types.h" #include "cce/aicpu_engine_struct.h" #include "hybrid/node_executor/node_executor.h" -#include "aicpu_ext_info.h" +#include "hybrid/node_executor/aicpu/aicpu_ext_info.h" namespace ge { namespace hybrid { diff --git a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc index 8b839849..4db223e0 100755 --- a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc +++ b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc @@ -20,7 +20,7 @@ #include "framework/common/fmk_error_codes.h" #include "common/dump/dump_manager.h" #include "common/ge/ge_util.h" -#include "graph/attr_value.h" +#include "external/graph/attr_value.h" #include "graph/debug/ge_attr_define.h" #include "graph/utils/graph_utils.h" #include "graph/load/model_manager/model_utils.h" diff --git a/ge/hybrid/node_executor/controlop/control_op_executor.cc b/ge/hybrid/node_executor/controlop/control_op_executor.cc index d55607ff..fa44d761 100644 --- a/ge/hybrid/node_executor/controlop/control_op_executor.cc +++ b/ge/hybrid/node_executor/controlop/control_op_executor.cc @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "control_op_executor.h" +#include "hybrid/node_executor/controlop/control_op_executor.h" #include "graph/utils/node_utils.h" #include "graph/utils/type_utils.h" #include "hybrid/executor/hybrid_execution_context.h" diff --git a/ge/hybrid/node_executor/hccl/hccl_node_executor.cc b/ge/hybrid/node_executor/hccl/hccl_node_executor.cc index 72092cd8..522d0649 100644 --- a/ge/hybrid/node_executor/hccl/hccl_node_executor.cc +++ b/ge/hybrid/node_executor/hccl/hccl_node_executor.cc @@ -17,11 +17,11 @@ #include "hybrid/node_executor/hccl/hccl_node_executor.h" #include "common/ge/plugin_manager.h" #include "common/math/math_util.h" -#include "graph/attr_value.h" +#include "external/graph/attr_value.h" #include "graph/debug/ge_attr_define.h" #include "graph/manager/util/hcom_util.h" #include "graph/utils/type_utils.h" -#include "graph/types.h" +#include "external/graph/types.h" #include "hybrid/executor/hybrid_execution_context.h" #include "hccl/hcom.h" diff --git a/ge/hybrid/node_executor/node_executor.h b/ge/hybrid/node_executor/node_executor.h index fffd4e7d..ad4a9296 100644 --- a/ge/hybrid/node_executor/node_executor.h +++ b/ge/hybrid/node_executor/node_executor.h @@ -20,7 +20,7 @@ #include "external/ge/ge_api_error_codes.h" #include "common/opskernel/ops_kernel_builder.h" #include "graph/node.h" -#include "task_context.h" +#include "hybrid/node_executor/task_context.h" namespace ge { const uint32_t MEMORY_ALIGN_RATIO = 2; diff --git a/ge/hybrid/node_executor/partitioned_call/partitioned_call_node_executor.cc b/ge/hybrid/node_executor/partitioned_call/partitioned_call_node_executor.cc index 28a5dea1..ad1f7e61 100755 --- a/ge/hybrid/node_executor/partitioned_call/partitioned_call_node_executor.cc +++ b/ge/hybrid/node_executor/partitioned_call/partitioned_call_node_executor.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "partitioned_call_node_executor.h" +#include "hybrid/node_executor/partitioned_call/partitioned_call_node_executor.h" #include "graph/utils/node_utils.h" namespace ge { diff --git a/ge/hybrid/node_executor/rts/rts_node_executor.cc b/ge/hybrid/node_executor/rts/rts_node_executor.cc index 3ad791b6..5cd971df 100644 --- a/ge/hybrid/node_executor/rts/rts_node_executor.cc +++ b/ge/hybrid/node_executor/rts/rts_node_executor.cc @@ -17,9 +17,9 @@ #include "hybrid/node_executor/rts/rts_node_executor.h" #include "hybrid/node_executor/rts/rts_task_factory.h" -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/ge/ge_util.h" -#include "common/types.h" +#include "framework/common/types.h" #include "graph/common/omg_util.h" #include "graph/utils/tensor_utils.h" #include "hybrid/model/hybrid_model.h" diff --git a/ge/hybrid/node_executor/rts/rts_node_task.cc b/ge/hybrid/node_executor/rts/rts_node_task.cc index 104196ee..9af54815 100644 --- a/ge/hybrid/node_executor/rts/rts_node_task.cc +++ b/ge/hybrid/node_executor/rts/rts_node_task.cc @@ -22,7 +22,7 @@ #include "graph/utils/type_utils.h" #include "graph/utils/node_utils.h" #include "common/ge/ge_util.h" -#include "common/op/ge_op_utils.h" +#include "framework/common/op/ge_op_utils.h" namespace { constexpr uint8_t kSwitchPredIndex = 0; diff --git a/ge/hybrid/node_executor/task_context.cc b/ge/hybrid/node_executor/task_context.cc index fe580c1e..78ccb54a 100644 --- a/ge/hybrid/node_executor/task_context.cc +++ b/ge/hybrid/node_executor/task_context.cc @@ -14,11 +14,11 @@ * limitations under the License. */ -#include "task_context.h" +#include "hybrid/node_executor/task_context.h" #include "framework/common/ge_inner_error_codes.h" #include "framework/common/debug/log.h" #include "graph/utils/tensor_utils.h" -#include "graph/types.h" +#include "external/graph/types.h" #include "graph/debug/ge_attr_define.h" #include "hybrid/executor/hybrid_execution_context.h" #include "hybrid/executor/subgraph_executor.h" diff --git a/ge/inc/graph_pass.h b/ge/inc/graph_pass.h index 642b94ea..a9cc7a32 100644 --- a/ge/inc/graph_pass.h +++ b/ge/inc/graph_pass.h @@ -20,9 +20,9 @@ #include #include -#include "common/op/attr_value_util.h" -#include "common/op/ge_op_utils.h" -#include "common/types.h" +#include "framework/common/op/attr_value_util.h" +#include "framework/common/op/ge_op_utils.h" +#include "framework/common/types.h" #include "framework/common/debug/ge_log.h" #include "graph/compute_graph.h" #include "graph/utils/attr_utils.h" diff --git a/ge/inc/kernel.h b/ge/inc/kernel.h index 84af5234..a83776a9 100644 --- a/ge/inc/kernel.h +++ b/ge/inc/kernel.h @@ -19,9 +19,9 @@ #include -#include "common/op/ge_op_utils.h" +#include "framework/common/op/ge_op_utils.h" #include "graph/compute_graph.h" -#include "graph/graph.h" +#include "external/graph/graph.h" #include "graph/op_desc.h" using std::vector; diff --git a/ge/inc/kernel_factory.h b/ge/inc/kernel_factory.h index 61455836..e532b894 100644 --- a/ge/inc/kernel_factory.h +++ b/ge/inc/kernel_factory.h @@ -24,7 +24,7 @@ #include "common/ge/ge_util.h" #include "framework/common/debug/ge_log.h" -#include "graph/graph.h" +#include "external/graph/graph.h" using std::string; diff --git a/ge/inc/pass.h b/ge/inc/pass.h index 9f8519e1..56f77fef 100644 --- a/ge/inc/pass.h +++ b/ge/inc/pass.h @@ -19,7 +19,7 @@ #include -#include "common/fmk_error_codes.h" +#include "framework/common/fmk_error_codes.h" namespace ge { /// diff --git a/ge/init/gelib.cc b/ge/init/gelib.cc index 2374e75f..b34871a9 100644 --- a/ge/init/gelib.cc +++ b/ge/init/gelib.cc @@ -33,7 +33,7 @@ #include "framework/common/util.h" #include "framework/omg/ge_init.h" #include "analyzer/analyzer.h" -#include "ge/ge_api_types.h" +#include "external/ge/ge_api_types.h" #include "ge_local_engine/engine/host_cpu_engine.h" #include "graph/common/ge_call_wrapper.h" #include "graph/ge_context.h" diff --git a/ge/init/gelib.h b/ge/init/gelib.h index ed6fe5d4..eb367578 100644 --- a/ge/init/gelib.h +++ b/ge/init/gelib.h @@ -23,8 +23,8 @@ #include "engine_manager/dnnengine_manager.h" #include "opskernel_manager/ops_kernel_manager.h" #include "session/session_manager.h" -#include "common/ge_inner_error_codes.h" -#include "common/ge_types.h" +#include "framework/common/ge_inner_error_codes.h" +#include "framework/common/ge_types.h" using std::string; using std::map; diff --git a/ge/ir_build/attr_options/attr_options.h b/ge/ir_build/attr_options/attr_options.h index 7c0f4f4f..b1b794c0 100644 --- a/ge/ir_build/attr_options/attr_options.h +++ b/ge/ir_build/attr_options/attr_options.h @@ -1,29 +1,29 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef ATTR_OPTIONS_H_ -#define ATTR_OPTIONS_H_ - -#include -#include "graph/compute_graph.h" -#include "graph/ge_error_codes.h" - -namespace ge { -bool IsOriginalOpFind(OpDescPtr &op_desc, const std::string &op_name); - -graphStatus KeepDtypeFunc(ComputeGraphPtr &graph, const std::string &cfg_path); -graphStatus WeightCompressFunc(ComputeGraphPtr &graph, const std::string &cfg_path); -} // namespace +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef ATTR_OPTIONS_H_ +#define ATTR_OPTIONS_H_ + +#include +#include "graph/compute_graph.h" +#include "external/graph/ge_error_codes.h" + +namespace ge { +bool IsOriginalOpFind(OpDescPtr &op_desc, const std::string &op_name); + +graphStatus KeepDtypeFunc(ComputeGraphPtr &graph, const std::string &cfg_path); +graphStatus WeightCompressFunc(ComputeGraphPtr &graph, const std::string &cfg_path); +} // namespace #endif // ATTR_OPTIONS_H_ \ No newline at end of file diff --git a/ge/ir_build/attr_options/keep_dtype_option.cc b/ge/ir_build/attr_options/keep_dtype_option.cc index dfdd0df3..9da08cc0 100644 --- a/ge/ir_build/attr_options/keep_dtype_option.cc +++ b/ge/ir_build/attr_options/keep_dtype_option.cc @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "attr_options.h" +#include "ir_build/attr_options/attr_options.h" #include #include #include diff --git a/ge/ir_build/attr_options/utils.cc b/ge/ir_build/attr_options/utils.cc index f0b559ec..ed63ffe3 100644 --- a/ge/ir_build/attr_options/utils.cc +++ b/ge/ir_build/attr_options/utils.cc @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "attr_options.h" +#include "ir_build/attr_options/attr_options.h" #include #include "graph/debug/ge_attr_define.h" #include "common/util/error_manager/error_manager.h" diff --git a/ge/ir_build/attr_options/weight_compress_option.cc b/ge/ir_build/attr_options/weight_compress_option.cc index 3c057d04..b59c6adc 100644 --- a/ge/ir_build/attr_options/weight_compress_option.cc +++ b/ge/ir_build/attr_options/weight_compress_option.cc @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "attr_options.h" +#include "ir_build/attr_options/attr_options.h" #include #include #include diff --git a/ge/ir_build/ge_ir_build.cc b/ge/ir_build/ge_ir_build.cc index befffa93..ea521f5b 100644 --- a/ge/ir_build/ge_ir_build.cc +++ b/ge/ir_build/ge_ir_build.cc @@ -25,8 +25,8 @@ #include "framework/common/util.h" #include "framework/omg/omg_inner_types.h" #include "framework/omg/omg_inner_types.h" -#include "ge/ge_api_types.h" -#include "generator/ge_generator.h" +#include "external/ge/ge_api_types.h" +#include "framework/generator/ge_generator.h" #include "graph/compute_graph.h" #include "graph/ge_tensor.h" #include "graph/utils/type_utils.h" diff --git a/ge/ir_build/option_utils.cc b/ge/ir_build/option_utils.cc index cecc2588..e2665eac 100755 --- a/ge/ir_build/option_utils.cc +++ b/ge/ir_build/option_utils.cc @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "option_utils.h" +#include "ir_build/option_utils.h" #include "common/util/error_manager/error_manager.h" #include "external/ge/ge_api_types.h" #include "framework/common/string_util.h" diff --git a/ge/model/ge_model.cc b/ge/model/ge_model.cc index bcccc6f8..1bf35afc 100755 --- a/ge/model/ge_model.cc +++ b/ge/model/ge_model.cc @@ -16,7 +16,7 @@ #include "model/ge_model.h" #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "graph/debug/ge_attr_define.h" #include "graph/utils/attr_utils.h" diff --git a/ge/model/ge_model.h b/ge/model/ge_model.h index 08db8cc3..6356c621 100755 --- a/ge/model/ge_model.h +++ b/ge/model/ge_model.h @@ -26,7 +26,7 @@ #include "framework/common/debug/log.h" #include "framework/common/fmk_error_codes.h" #include "graph/buffer.h" -#include "graph/graph.h" +#include "external/graph/graph.h" #include "proto/task.pb.h" namespace ge { diff --git a/ge/model/ge_root_model.cc b/ge/model/ge_root_model.cc index 68f868dd..b6a1e175 100644 --- a/ge/model/ge_root_model.cc +++ b/ge/model/ge_root_model.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "ge_root_model.h" +#include "model/ge_root_model.h" #include "graph/debug/ge_attr_define.h" namespace ge { void GeRootModel::SetSubgraphInstanceNameToModel(string instance_name, GeModelPtr ge_model) { diff --git a/ge/offline/CMakeLists.txt b/ge/offline/CMakeLists.txt index a520652f..3a320226 100644 --- a/ge/offline/CMakeLists.txt +++ b/ge/offline/CMakeLists.txt @@ -30,25 +30,17 @@ target_compile_definitions(atc_atc.bin PRIVATE target_include_directories(atc_atc.bin PRIVATE ${CMAKE_CURRENT_LIST_DIR} - ${GE_CODE_DIR} ${GE_CODE_DIR}/ge ${GE_CODE_DIR}/inc/external - ${GE_CODE_DIR}/common/inc/external - ${GE_CODE_DIR}/common/inc/external/graph ${GE_CODE_DIR}/inc ${GE_CODE_DIR}/inc/framework ${METADEF_DIR}/inc - ${METADEF_DIR}/inc/graph - ${METADEF_DIR}/inc/register ${METADEF_DIR}/inc/external - ${METADEF_DIR}/inc/external/graph - ${METADEF_DIR}/inc/external/register ${PARSER_DIR} ${CMAKE_BINARY_DIR} ${CMAKE_BINARY_DIR}/proto/graphengine_protos #### yellow zone #### ${GE_CODE_DIR}/../inc - ${GE_CODE_DIR}/../inc/common #### blue zone #### ${GE_CODE_DIR}/third_party/fwkacllib/inc ${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain @@ -107,25 +99,17 @@ target_compile_definitions(fwk_atc.bin PRIVATE target_include_directories(fwk_atc.bin PRIVATE ${CMAKE_CURRENT_LIST_DIR} - ${GE_CODE_DIR} ${GE_CODE_DIR}/ge ${GE_CODE_DIR}/inc/external - ${GE_CODE_DIR}/common/inc/external - ${GE_CODE_DIR}/common/inc/external/graph ${GE_CODE_DIR}/inc ${GE_CODE_DIR}/inc/framework ${METADEF_DIR}/inc - ${METADEF_DIR}/inc/graph - ${METADEF_DIR}/inc/register ${METADEF_DIR}/inc/external - ${METADEF_DIR}/inc/external/graph - ${METADEF_DIR}/inc/external/register ${PARSER_DIR} ${CMAKE_BINARY_DIR} ${CMAKE_BINARY_DIR}/proto/graphengine_protos #### yellow zone #### ${GE_CODE_DIR}/../inc - ${GE_CODE_DIR}/../inc/common #### blue zone #### ${GE_CODE_DIR}/third_party/fwkacllib/inc ${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain diff --git a/ge/offline/main.cc b/ge/offline/main.cc index a1ae476b..80a71b7f 100755 --- a/ge/offline/main.cc +++ b/ge/offline/main.cc @@ -23,26 +23,26 @@ #include #include #include -#include "common/gflags_util.h" -#include "common/util.h" +#include "framework/common/gflags_util.h" +#include "framework/common/util.h" #include "common/util/error_manager/error_manager.h" #include "framework/common/debug/ge_log.h" -#include "ge/ge_api.h" -#include "generator/ge_generator.h" +#include "external/ge/ge_api.h" +#include "framework/generator/ge_generator.h" #include "graph/anchor.h" #include "graph/debug/ge_attr_define.h" -#include "graph/graph.h" +#include "external/graph/graph.h" #include "graph/op_desc.h" #include "graph/utils/graph_utils.h" #include "graph/utils/type_utils.h" #include "init/gelib.h" #include "ir_build/option_utils.h" -#include "omg/omg.h" -#include "omg/parser/parser_factory.h" -#include "omg/parser/parser_inner_ctx.h" +#include "framework/omg/omg.h" +#include "framework/omg/parser/parser_factory.h" +#include "framework/omg/parser/parser_inner_ctx.h" #include "parser/common/register_tbe.h" #include "register/op_registry.h" -#include "single_op_parser.h" +#include "offline/single_op_parser.h" #include "external/ge/ge_ir_build.h" using domi::BuildMode; diff --git a/ge/offline/single_op_parser.cc b/ge/offline/single_op_parser.cc index dac2e15c..6bc5cb3d 100644 --- a/ge/offline/single_op_parser.cc +++ b/ge/offline/single_op_parser.cc @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "single_op_parser.h" +#include "offline/single_op_parser.h" #include #include @@ -24,7 +24,7 @@ #include "framework/common/debug/ge_log.h" #include "common/util/error_manager/error_manager.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "framework/common/util.h" #include "graph/utils/tensor_utils.h" #include "graph/utils/type_utils.h" diff --git a/ge/offline/single_op_parser.h b/ge/offline/single_op_parser.h index 11f5512e..25699552 100644 --- a/ge/offline/single_op_parser.h +++ b/ge/offline/single_op_parser.h @@ -21,8 +21,8 @@ #include -#include "ge/ge_api_error_codes.h" -#include "graph/types.h" +#include "external/ge/ge_api_error_codes.h" +#include "external/graph/types.h" #include "graph/ge_attr_value.h" #include "graph/op_desc.h" diff --git a/ge/opskernel_manager/ops_kernel_builder_manager.cc b/ge/opskernel_manager/ops_kernel_builder_manager.cc index 04262e1b..33ffddf5 100644 --- a/ge/opskernel_manager/ops_kernel_builder_manager.cc +++ b/ge/opskernel_manager/ops_kernel_builder_manager.cc @@ -15,7 +15,7 @@ */ #include "init/gelib.h" -#include "ops_kernel_builder_manager.h" +#include "opskernel_manager/ops_kernel_builder_manager.h" #include "register/ops_kernel_builder_registry.h" namespace ge { diff --git a/ge/opskernel_manager/ops_kernel_manager.cc b/ge/opskernel_manager/ops_kernel_manager.cc index ac5e9153..fc7bbdc2 100644 --- a/ge/opskernel_manager/ops_kernel_manager.cc +++ b/ge/opskernel_manager/ops_kernel_manager.cc @@ -24,9 +24,9 @@ #include #include #include -#include "../init/gelib.h" +#include "init/gelib.h" #include "framework/common/debug/ge_log.h" -#include "ge/ge_api.h" +#include "external/ge/ge_api.h" #include "proto/optimizer_priority.pb.h" namespace { diff --git a/ge/opskernel_manager/ops_kernel_manager.h b/ge/opskernel_manager/ops_kernel_manager.h index 19d703e3..5a72dc50 100644 --- a/ge/opskernel_manager/ops_kernel_manager.h +++ b/ge/opskernel_manager/ops_kernel_manager.h @@ -23,15 +23,15 @@ #include #include -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/ge/plugin_manager.h" #include "common/ge/op_tiling_manager.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "common/opskernel/ops_kernel_info_store.h" #include "common/optimizer/graph_optimizer.h" #include "graph/optimize/graph_optimize.h" #include "framework/common/ge_inner_error_codes.h" -#include "ge/ge_api_types.h" +#include "external/ge/ge_api_types.h" #include "runtime/base.h" using std::string; diff --git a/ge/plugin/engine/CMakeLists.txt b/ge/plugin/engine/CMakeLists.txt index b4ea9c52..b8628ad1 100644 --- a/ge/plugin/engine/CMakeLists.txt +++ b/ge/plugin/engine/CMakeLists.txt @@ -24,9 +24,8 @@ target_compile_definitions(engine PRIVATE target_include_directories(engine PRIVATE ${GE_CODE_DIR}/ge - ${GE_CODE_DIR}/inc/ + ${GE_CODE_DIR}/inc ${GE_CODE_DIR}/inc/framework - ${GE_CODE_DIR}/inc/framework/common ${GE_CODE_DIR}/inc/external ${METADEF_DIR}/inc ${METADEF_DIR}/inc/external diff --git a/ge/plugin/engine/dnnengines.h b/ge/plugin/engine/dnnengines.h index 0633c104..829c83f1 100644 --- a/ge/plugin/engine/dnnengines.h +++ b/ge/plugin/engine/dnnengines.h @@ -21,7 +21,7 @@ #include #include -#include "engine/dnnengine.h" +#include "framework/engine/dnnengine.h" #include "plugin/engine/engine_manage.h" namespace ge { diff --git a/ge/plugin/engine/engine_manage.h b/ge/plugin/engine/engine_manage.h index 7eb88805..a047e5de 100644 --- a/ge/plugin/engine/engine_manage.h +++ b/ge/plugin/engine/engine_manage.h @@ -36,7 +36,7 @@ #include #include -#include "engine/dnnengine.h" +#include "framework/engine/dnnengine.h" namespace ge { using DNNEnginePtr = std::shared_ptr; diff --git a/ge/session/inner_session.cc b/ge/session/inner_session.cc index 8248eecf..aabbe19c 100755 --- a/ge/session/inner_session.cc +++ b/ge/session/inner_session.cc @@ -24,7 +24,7 @@ #include "adx_datadump_server.h" #include "common/dump/dump_properties.h" #include "common/dump/dump_manager.h" -#include "common/util.h" +#include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "graph/ge_context.h" #include "graph/ge_global_options.h" diff --git a/ge/session/inner_session.h b/ge/session/inner_session.h index a2ec35df..35fe4692 100644 --- a/ge/session/inner_session.h +++ b/ge/session/inner_session.h @@ -21,7 +21,7 @@ #include #include #include "framework/common/ge_types.h" -#include "ge/ge_api_types.h" +#include "external/ge/ge_api_types.h" #include "graph/manager/graph_manager.h" namespace ge { diff --git a/ge/session/omg.cc b/ge/session/omg.cc index 878b0b39..f7f3def7 100755 --- a/ge/session/omg.cc +++ b/ge/session/omg.cc @@ -14,21 +14,21 @@ * limitations under the License. */ -#include "omg/omg.h" +#include "framework/omg/omg.h" #include #include #include #include "common/auth/file_saver.h" -#include "common/debug/log.h" +#include "framework/common/debug/log.h" #include "common/debug/memory_dumper.h" #include "common/ge/ge_util.h" -#include "common/helper/model_helper.h" +#include "framework/common/helper/model_helper.h" #include "common/model_parser/model_parser.h" #include "common/model_saver.h" #include "common/properties_manager.h" -#include "common/string_util.h" -#include "common/types.h" -#include "common/util.h" +#include "framework/common/string_util.h" +#include "framework/common/types.h" +#include "framework/common/util.h" #include "common/util/error_manager/error_manager.h" #include "framework/common/debug/ge_log.h" #include "framework/omg/parser/parser_inner_ctx.h" @@ -39,10 +39,10 @@ #include "graph/optimize/common/params.h" #include "graph/utils/type_utils.h" #include "ir_build/option_utils.h" -#include "omg/omg_inner_types.h" -#include "omg/parser/model_parser.h" -#include "omg/parser/parser_factory.h" -#include "omg/parser/weights_parser.h" +#include "framework/omg/omg_inner_types.h" +#include "framework/omg/parser/model_parser.h" +#include "framework/omg/parser/parser_factory.h" +#include "framework/omg/parser/weights_parser.h" #include "parser/common/pre_checker.h" #include "parser/common/convert/pb2json.h" #include "proto/ge_ir.pb.h" diff --git a/ge/session/session_manager.h b/ge/session/session_manager.h index 17152b0a..4c3429c2 100644 --- a/ge/session/session_manager.h +++ b/ge/session/session_manager.h @@ -22,8 +22,8 @@ #include #include #include -#include "common/ge_inner_error_codes.h" -#include "ge/ge_api_types.h" +#include "framework/common/ge_inner_error_codes.h" +#include "external/ge/ge_api_types.h" #include "session/inner_session.h" #include "runtime/base.h" diff --git a/ge/single_op/single_op.cc b/ge/single_op/single_op.cc index d09e8398..9df6d5dd 100755 --- a/ge/single_op/single_op.cc +++ b/ge/single_op/single_op.cc @@ -16,8 +16,8 @@ #include "single_op/single_op.h" -#include "common/fmk_types.h" -#include "common/ge_types.h" +#include "framework/common/fmk_types.h" +#include "framework/common/ge_types.h" #include "common/math/math_util.h" #include "common/profiling/profiling_manager.h" #include "framework/common/debug/ge_log.h" diff --git a/ge/single_op/single_op.h b/ge/single_op/single_op.h index deb4532e..7e05dd5f 100755 --- a/ge/single_op/single_op.h +++ b/ge/single_op/single_op.h @@ -23,10 +23,10 @@ #include #include -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "framework/executor/ge_executor.h" #include "runtime/stream.h" -#include "task/op_task.h" +#include "single_op/task/op_task.h" #include "cce/aicpu_engine_struct.h" #include "hybrid/executor/hybrid_model_executor.h" diff --git a/ge/single_op/single_op_model.cc b/ge/single_op/single_op_model.cc index 90a6362c..eefa5165 100755 --- a/ge/single_op/single_op_model.cc +++ b/ge/single_op/single_op_model.cc @@ -28,10 +28,10 @@ #include "graph/utils/graph_utils.h" #include "graph/utils/tensor_utils.h" #include "runtime/rt.h" -#include "task/aicpu_task_builder.h" -#include "task/aicpu_kernel_task_builder.h" -#include "task/rts_kernel_task_builder.h" -#include "task/tbe_task_builder.h" +#include "single_op/task/aicpu_task_builder.h" +#include "single_op/task/aicpu_kernel_task_builder.h" +#include "single_op/task/rts_kernel_task_builder.h" +#include "single_op/task/tbe_task_builder.h" #include "hybrid/executor/hybrid_model_executor.h" #include "hybrid/node_executor/node_executor.h" diff --git a/ge/single_op/single_op_model.h b/ge/single_op/single_op_model.h index 529a442d..bf3ad050 100755 --- a/ge/single_op/single_op_model.h +++ b/ge/single_op/single_op_model.h @@ -23,7 +23,7 @@ #include #include -#include "common/helper/model_helper.h" +#include "framework/common/helper/model_helper.h" #include "single_op/single_op.h" #include "single_op/stream_resource.h" #include "single_op/task/op_task.h" diff --git a/ge/single_op/stream_resource.h b/ge/single_op/stream_resource.h index aecb38c8..8986634b 100755 --- a/ge/single_op/stream_resource.h +++ b/ge/single_op/stream_resource.h @@ -23,7 +23,7 @@ #include #include -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "runtime/stream.h" #include "single_op/single_op.h" diff --git a/ge/single_op/task/aicpu_kernel_task_builder.cc b/ge/single_op/task/aicpu_kernel_task_builder.cc index 18f13691..2f0856bf 100755 --- a/ge/single_op/task/aicpu_kernel_task_builder.cc +++ b/ge/single_op/task/aicpu_kernel_task_builder.cc @@ -17,7 +17,7 @@ #include "single_op/task/aicpu_kernel_task_builder.h" #include "framework/common/taskdown_common.h" #include "graph/load/model_manager/model_manager.h" -#include "build_task_utils.h" +#include "single_op/task/build_task_utils.h" namespace ge { AiCpuCCTaskBuilder::AiCpuCCTaskBuilder(const OpDescPtr &op_desc, const domi::KernelDef &kernel_def) diff --git a/ge/single_op/task/op_task.cc b/ge/single_op/task/op_task.cc index 66d70e7e..b189ab00 100755 --- a/ge/single_op/task/op_task.cc +++ b/ge/single_op/task/op_task.cc @@ -29,7 +29,7 @@ #include "framework/common/debug/log.h" #include "register/op_tiling.h" #include "runtime/rt.h" -#include "build_task_utils.h" +#include "single_op/task/build_task_utils.h" namespace ge { namespace { diff --git a/ge/single_op/task/op_task.h b/ge/single_op/task/op_task.h index ed6cf40f..2fbb4dc7 100644 --- a/ge/single_op/task/op_task.h +++ b/ge/single_op/task/op_task.h @@ -23,7 +23,7 @@ #include "common/dump/dump_op.h" #include "common/dump/dump_properties.h" -#include "common/ge_inner_error_codes.h" +#include "framework/common/ge_inner_error_codes.h" #include "graph/op_kernel_bin.h" #include "runtime/stream.h" #include "graph/node.h" diff --git a/ge/single_op/task/rts_kernel_task_builder.cc b/ge/single_op/task/rts_kernel_task_builder.cc index aad78fd9..07bcbd19 100644 --- a/ge/single_op/task/rts_kernel_task_builder.cc +++ b/ge/single_op/task/rts_kernel_task_builder.cc @@ -15,7 +15,7 @@ */ #include "single_op/task/rts_kernel_task_builder.h" -#include "build_task_utils.h" +#include "single_op/task/build_task_utils.h" namespace ge { namespace { diff --git a/inc/framework/common/profiling/ge_runner_profiling.h b/inc/framework/common/profiling/ge_runner_profiling.h index 011797a3..27e19bce 100644 --- a/inc/framework/common/profiling/ge_runner_profiling.h +++ b/inc/framework/common/profiling/ge_runner_profiling.h @@ -17,7 +17,7 @@ #ifndef INC_FRAMEWORK_COMMON_GE_RUNNER_PROFILING_H_ #define INC_FRAMEWORK_COMMON_GE_RUNNER_PROFILING_H_ -#include "profiling/ge_profiling.h" +#include "framework/common/profiling/ge_profiling.h" GE_FUNC_VISIBILITY bool IsInitialize(); From 109973df92d23d8bbbf88d29d5d0bd34a9bc6617 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Tue, 22 Jun 2021 11:21:13 +0800 Subject: [PATCH 064/226] Update submodule for proto --- metadef | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metadef b/metadef index f75dbad2..310610e5 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit f75dbad2f2249608080e482acc6d723e04fec3da +Subproject commit 310610e5392e01659d214ad380e9ed2c39f9f5a3 From af91789d4559ee3913dd07470bd918c84282e326 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E7=A3=8A?= Date: Mon, 21 Jun 2021 21:10:25 +0800 Subject: [PATCH 065/226] update protobuf to 3.13.0 --- cmake/external_libs/protobuf_static.cmake | 2 +- parser | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/external_libs/protobuf_static.cmake b/cmake/external_libs/protobuf_static.cmake index b8ff90bb..51f6ffbc 100755 --- a/cmake/external_libs/protobuf_static.cmake +++ b/cmake/external_libs/protobuf_static.cmake @@ -13,7 +13,7 @@ if ((${CMAKE_INSTALL_PREFIX} STREQUAL /usr/local) OR endif() if(GE_PB_PKG) - set(REQ_URL "${GE_PB_PKG}/libs/protobuf/v3.8.0.tar.gz") + set(REQ_URL "${GE_PB_PKG}/libs/protobuf/v3.13.0.tar.gz") else() if (ENABLE_GITEE) set(REQ_URL "https://gitee.com/mirrors/protobuf_source/repository/archive/v3.13.0.tar.gz") diff --git a/parser b/parser index db5ce472..79536a19 160000 --- a/parser +++ b/parser @@ -1 +1 @@ -Subproject commit db5ce472de0086c3e2abdaab3b0685c1d2656c96 +Subproject commit 79536a196f89cf7a1f5852ff7304b9a7d7b12eff From beea153eb9be9a76e86233b192a2b25b65c5d8d3 Mon Sep 17 00:00:00 2001 From: liudingyan Date: Tue, 22 Jun 2021 12:53:55 +0800 Subject: [PATCH 066/226] atc test keepdtype --- ge/ir_build/attr_options/attr_options.h | 5 ++- ge/ir_build/attr_options/keep_dtype_option.cc | 35 +++++++++++----- ge/ir_build/attr_options/utils.cc | 39 +++++++++++++++++- tests/ut/ge/graph_ir/ge_ir_build_unittest.cc | 41 +++++++++++++++---- 4 files changed, 97 insertions(+), 23 deletions(-) diff --git a/ge/ir_build/attr_options/attr_options.h b/ge/ir_build/attr_options/attr_options.h index b1b794c0..9ea2b9a1 100644 --- a/ge/ir_build/attr_options/attr_options.h +++ b/ge/ir_build/attr_options/attr_options.h @@ -18,11 +18,12 @@ #include #include "graph/compute_graph.h" -#include "external/graph/ge_error_codes.h" +#include "graph/ge_error_codes.h" namespace ge { bool IsOriginalOpFind(OpDescPtr &op_desc, const std::string &op_name); - +bool IsOpTypeEqual(const ge::NodePtr &node, const std::string &op_type); +bool IsContainOpType(const std::string &cfg_line, std::string &op_type); graphStatus KeepDtypeFunc(ComputeGraphPtr &graph, const std::string &cfg_path); graphStatus WeightCompressFunc(ComputeGraphPtr &graph, const std::string &cfg_path); } // namespace diff --git a/ge/ir_build/attr_options/keep_dtype_option.cc b/ge/ir_build/attr_options/keep_dtype_option.cc index 9da08cc0..88f238c0 100644 --- a/ge/ir_build/attr_options/keep_dtype_option.cc +++ b/ge/ir_build/attr_options/keep_dtype_option.cc @@ -32,18 +32,24 @@ void KeepDtypeReportError(const std::vector &invalid_list, const st size_t list_size = invalid_list.size(); err_msg << "config file contains " << list_size; if (list_size == 1) { - err_msg << " operator not in the graph, op name:"; + err_msg << " operator not in the graph, "; } else { - err_msg << " operators not in the graph, op names:"; + err_msg << " operators not in the graph, "; } - + std::string cft_type; for (size_t i = 0; i < list_size; i++) { if (i == kMaxOpsNum) { err_msg << ".."; break; } - err_msg << invalid_list[i]; - if (i != list_size - 1) { + bool istype = IsContainOpType(invalid_list[i], cft_type); + if (!istype) { + err_msg << "op name:"; + } else { + err_msg << "op type:"; + } + err_msg << cft_type; + if (i != (list_size - 1)) { err_msg << " "; } } @@ -72,7 +78,7 @@ graphStatus KeepDtypeFunc(ComputeGraphPtr &graph, const std::string &cfg_path) { return GRAPH_FAILED; } - std::string op_name; + std::string op_name, op_type; std::vector invalid_list; while (std::getline(ifs, op_name)) { if (op_name.empty()) { @@ -80,13 +86,20 @@ graphStatus KeepDtypeFunc(ComputeGraphPtr &graph, const std::string &cfg_path) { } op_name = StringUtils::Trim(op_name); bool is_find = false; - for (auto &node_ptr : graph->GetDirectNode()) { + bool is_type = IsContainOpType(op_name, op_type); + for (auto &node_ptr : graph->GetAllNodes()) { auto op_desc = node_ptr->GetOpDesc(); GE_CHECK_NOTNULL(op_desc); - - if ((op_desc->GetName() == op_name) || IsOriginalOpFind(op_desc, op_name)) { - is_find = true; - (void)AttrUtils::SetInt(op_desc, ATTR_NAME_KEEP_DTYPE, 1); + if (is_type) { + if (IsOpTypeEqual(node_ptr, op_type)) { + is_find = true; + (void)AttrUtils::SetInt(op_desc, ATTR_NAME_KEEP_DTYPE, 1); + } + } else { + if (op_desc->GetName() == op_name || IsOriginalOpFind(op_desc, op_name)) { + is_find = true; + (void)AttrUtils::SetInt(op_desc, ATTR_NAME_KEEP_DTYPE, 1); + } } } if (!is_find) { diff --git a/ge/ir_build/attr_options/utils.cc b/ge/ir_build/attr_options/utils.cc index ed63ffe3..5398c220 100644 --- a/ge/ir_build/attr_options/utils.cc +++ b/ge/ir_build/attr_options/utils.cc @@ -16,9 +16,12 @@ #include "ir_build/attr_options/attr_options.h" #include #include "graph/debug/ge_attr_define.h" -#include "common/util/error_manager/error_manager.h" - +#include "framework/common/debug/ge_log.h" +#include "graph/common/omg_util.h" namespace ge { + namespace { + const std::string CFG_PRE_OPTYPE = "OpType::"; +} bool IsOriginalOpFind(OpDescPtr &op_desc, const std::string &op_name) { std::vector original_op_names; if (!AttrUtils::GetListStr(op_desc, ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES, original_op_names)) { @@ -33,4 +36,36 @@ bool IsOriginalOpFind(OpDescPtr &op_desc, const std::string &op_name) { return false; } + +bool IsOpTypeEqual(const ge::NodePtr &node, const std::string &op_type) { + if (op_type != node->GetOpDesc()->GetType()) { + return false; + } + std::string origin_type; + auto ret = GetOriginalType(node, origin_type); + if (ret != SUCCESS) { + GELOGW("[Get][OriginalType] from op:%s failed.", node->GetName().c_str()); + return false; + } + if (op_type != origin_type) { + return false; + } + return true; +} + +bool IsContainOpType(const std::string &cfg_line, std::string &op_type) { + op_type = cfg_line; + size_t pos = op_type.find(CFG_PRE_OPTYPE); + if (pos != std::string::npos) { + if (pos == 0) { + op_type = cfg_line.substr(CFG_PRE_OPTYPE.length()); + return true; + } else { + GELOGW("[Check][Param] %s must be at zero pos of %s", CFG_PRE_OPTYPE.c_str(), cfg_line.c_str()); + } + return false; + } + GELOGW("[Check][Param] %s not contain optype", cfg_line.c_str()); + return false; +} } // namespace ge \ No newline at end of file diff --git a/tests/ut/ge/graph_ir/ge_ir_build_unittest.cc b/tests/ut/ge/graph_ir/ge_ir_build_unittest.cc index 047c9e1d..197c9300 100644 --- a/tests/ut/ge/graph_ir/ge_ir_build_unittest.cc +++ b/tests/ut/ge/graph_ir/ge_ir_build_unittest.cc @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - +#include #include #include "ir_build/option_utils.h" #include "graph/testcase/ge_graph/graph_builder_utils.h" @@ -21,7 +21,7 @@ #include "graph/utils/graph_utils.h" #include "ge/ge_ir_build.h" #include "graph/ops_stub.h" - +#include "ge/ir_build/attr_options/attr_options.h" #define protected public #define private public @@ -70,6 +70,22 @@ static ComputeGraphPtr BuildComputeGraph() { return builder.GetGraph(); } +static ComputeGraphPtr BuildComputeGraph1() { + auto builder = ut::GraphBuilder("test"); + auto data1 = builder.AddNode("input1", DATA, 1, 1, FORMAT_NCHW, DT_FLOAT, {1, 2, 3}); + auto data2 = builder.AddNode("input2", DATA, 1, 1, FORMAT_NCHW, DT_FLOAT, {4, 10}); + auto addn1 = builder.AddNode("addn1", AddNYes, 2, 1); + auto node1 = builder.AddNode("addd", "Mul", 2, 1); + auto node2 = builder.AddNode("ffm", "FrameworkOp", 2, 1); + auto netoutput = builder.AddNode("netoutput", NETOUTPUT, 1, 0); + + builder.AddDataEdge(data1, 0, addn1, 0); + builder.AddDataEdge(data2, 0, addn1, 1); + builder.AddDataEdge(addn1, 0,netoutput, 0); + + return builder.GetGraph(); +} + // data not set attr index; // but becasue of op proto, register attr index. so all data index is zero; static Graph BuildIrGraph() { @@ -89,10 +105,12 @@ static Graph BuildIrGraph1() { auto data1 = op::Data("data1").set_attr_index(0); auto data2 = op::Data("data2").set_attr_index(1); auto data3 = op::Data("data3"); - std::vector inputs {data1, data2, data3}; + auto data4 = op::Data("Test"); + std::vector inputs {data1, data2, data3, data4}; std::vector outputs; Graph graph("test_graph"); + graph.AddNodeByOp(Operator("gg", "Mul")); graph.SetInputs(inputs).SetOutputs(outputs); return graph; } @@ -373,9 +391,16 @@ TEST(UtestIrBuild, check_modify_mixlist_param) { EXPECT_EQ(ret, GRAPH_PARAM_INVALID); } -TEST(UtestIrCommon, check_dynamic_imagesize_input_shape_valid_format_empty) { - std::map> shape_map; - std::string dynamic_image_size = ""; - bool ret = CheckDynamicImagesizeInputShapeValid(shape_map, "123", dynamic_image_size); - EXPECT_EQ(ret, false); +TEST(UtestIrBuild, atc_cfg_optype_param) { + ComputeGraphPtr graph = BuildComputeGraph1(); + FILE *fp = fopen("./keep.txt", "w+"); + if (fp) { + fprintf(fp, "Test\n"); + fprintf(fp, "OpType::Mul\n"); + fprintf(fp, "Optype::Sub\n"); + fclose(fp); + } + auto ret = KeepDtypeFunc(graph, "./keep.txt"); + (void)remove("./keep.txt"); + EXPECT_EQ(ret, GRAPH_PARAM_INVALID); } \ No newline at end of file From fe77ec974ade91b5a41d713bf7ed0639ac92f176 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Tue, 22 Jun 2021 16:20:29 +0800 Subject: [PATCH 067/226] Fix BuildPartitionFrame failed --- ge/graph/partition/dynamic_shape_partition.cc | 25 ++++++++++--------- ge/graph/partition/dynamic_shape_partition.h | 4 ++- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/ge/graph/partition/dynamic_shape_partition.cc b/ge/graph/partition/dynamic_shape_partition.cc index 1db47498..8fc19ff2 100755 --- a/ge/graph/partition/dynamic_shape_partition.cc +++ b/ge/graph/partition/dynamic_shape_partition.cc @@ -284,9 +284,6 @@ Status DynamicShapePartitioner::InitClusters() { auto cluster = MakeShared(rank++, type, node, this); REQUIRE_NOT_NULL(cluster, "[New][Memory] for cluster failed."); node_2_cluster_[node] = cluster; - if (cluster->IsUnknownShape()) { - ordered_cluster_.push_back(cluster); - } int64_t group_index = -1; if (AttrUtils::GetInt(node->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, group_index)) { @@ -306,7 +303,7 @@ Status DynamicShapePartitioner::InitClusters() { return SUCCESS; } -Status DynamicShapePartitioner::TopologicalSortClusters() { +Status DynamicShapePartitioner::TopologicalSortClusters(const OrderedFilter &ordered_filter) { ordered_cluster_.clear(); // BFS topological sort clusters for known shape cluster std::queue ready_clusters; @@ -331,7 +328,7 @@ Status DynamicShapePartitioner::TopologicalSortClusters() { auto cluster = ready_clusters.front(); ready_clusters.pop(); cluster->UpdateRank(rank++); - if (cluster->IsKnownShape() || cluster->IsInputNode()) { + if (ordered_filter == nullptr || ordered_filter(cluster)) { ordered_cluster_.push_back(cluster); } for (const auto &out_cluster : cluster->Outputs()) { @@ -378,7 +375,6 @@ void DynamicShapePartitioner::MergeClustersControlFlow() { continue; } - bool is_unknown_cluster = cluster->IsUnknownShape(); for (++rit; rit != control_cluster.rend(); ++rit) { const auto &cluster_from = *rit; if (all_merged_clusters.count(cluster_from) > 0) { @@ -395,11 +391,6 @@ void DynamicShapePartitioner::MergeClustersControlFlow() { } } } - - if (!is_unknown_cluster && cluster->IsUnknownShape()) { - GELOGD("Add to ordered cluster: %s", cluster->DebugString().c_str()); - ordered_cluster_.push_back(cluster); - } } } @@ -475,9 +466,19 @@ void DynamicShapePartitioner::MergeClustersInputData() { } Status DynamicShapePartitioner::MergeClusters() { + const auto filter_known = [](const ClusterPtr &cluster) { + return cluster->IsKnownShape() || cluster->IsInputNode(); + }; + const auto filter_unknown = [](const ClusterPtr &cluster) { + return cluster->IsUnknownShape(); + }; + MergeClustersControlFlow(); + REQUIRE_SUCCESS(TopologicalSortClusters(filter_unknown), + "[TopologicalSort][Clusters] after merge control flow clusters failed."); MergeClustersUnknownShape(); - REQUIRE_SUCCESS(TopologicalSortClusters(), "[TopologicalSort][Clusters] after merge unknown shape clusters failed."); + REQUIRE_SUCCESS(TopologicalSortClusters(filter_known), + "[TopologicalSort][Clusters] after merge unknown shape clusters failed."); MergeClustersKnownShape(); MergeClustersInputData(); return SUCCESS; diff --git a/ge/graph/partition/dynamic_shape_partition.h b/ge/graph/partition/dynamic_shape_partition.h index 31146570..0eb282a2 100644 --- a/ge/graph/partition/dynamic_shape_partition.h +++ b/ge/graph/partition/dynamic_shape_partition.h @@ -111,6 +111,8 @@ class DynamicShapePartitioner { Status Partition(); + using OrderedFilter = std::function &cluster)>; + private: Status PartitionImpl(); // Collect nodes that satisfy the unknowshape rules: @@ -138,7 +140,7 @@ class DynamicShapePartitioner { // Merge clusters step3 void MergeClustersInputData(); // Topological sort clusters after merge unknown shape clusters. - Status TopologicalSortClusters(); + Status TopologicalSortClusters(const OrderedFilter &ordered_filter); // Deduplicate merged clusters void PruneUniqueClusters(); // Establish the input-output anchors for each partition of the cluster and record links to other clusters From 3df837167ca0a7fa738b10815e5e8180ac1b59e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=99=88=E5=8D=8E?= Date: Tue, 22 Jun 2021 15:03:25 +0800 Subject: [PATCH 068/226] opt_info --- CMakeLists.txt | 1 + ge/CMakeLists.txt | 8 ++ ge/ge_opt_info/ge_opt_info.cc | 58 +++++++++ ge/ge_opt_info/ge_opt_info.h | 31 +++++ ge/graph/manager/graph_manager.cc | 7 + tests/CMakeLists.txt | 1 + tests/depends/opt_info/CMakeLists.txt | 37 ++++++ tests/depends/opt_info/src/opt_info_stub.cc | 46 +++++++ tests/framework/cmake/graphengine.cmake | 2 + tests/st/testcase/test_ge_opt_info.cc | 123 ++++++++++++++++++ tests/ut/ge/CMakeLists.txt | 14 ++ .../ut/ge/ge_opt_info/ge_opt_info_unittest.cc | 82 ++++++++++++ third_party/fwkacllib/inc/opt_info/opt_info.h | 34 +++++ 13 files changed, 444 insertions(+) create mode 100644 ge/ge_opt_info/ge_opt_info.cc create mode 100644 ge/ge_opt_info/ge_opt_info.h create mode 100644 tests/depends/opt_info/CMakeLists.txt create mode 100644 tests/depends/opt_info/src/opt_info_stub.cc create mode 100644 tests/st/testcase/test_ge_opt_info.cc create mode 100644 tests/ut/ge/ge_opt_info/ge_opt_info_unittest.cc create mode 100644 third_party/fwkacllib/inc/opt_info/opt_info.h diff --git a/CMakeLists.txt b/CMakeLists.txt index e3cc1e32..41520b14 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -95,6 +95,7 @@ else () #find_module(ascendcl_static libascendcl.a ${GE_LIB_PATH}) else() find_module(slog libalog.so ${ASCEND_ATC_DIR}) + find_module(opt_feature libopt_feature.so ${ASCEND_ATC_DIR}) find_module(static_mmpa libmmpa.a ${ASCEND_ATC_DIR}) if(PLATFORM STREQUAL "train") find_module(adump_server libadump_server.a ${ASCEND_RUNTIME_DIR}) diff --git a/ge/CMakeLists.txt b/ge/CMakeLists.txt index 2b9122da..5db2e7a9 100755 --- a/ge/CMakeLists.txt +++ b/ge/CMakeLists.txt @@ -434,6 +434,7 @@ set(TRAIN_SRC_LIST "graph/build/memory/max_block_mem_assigner.cc" "graph/build/memory/var_mem_assign_util.cc" "graph/build/memory/buffer_pool_mem_assigner.cc" + "ge_opt_info/ge_opt_info.cc" ) set(INFER_SRC_LIST @@ -711,6 +712,7 @@ set(INFER_SRC_LIST "graph/build/memory/max_block_mem_assigner.cc" "graph/build/memory/var_mem_assign_util.cc" "graph/build/memory/buffer_pool_mem_assigner.cc" + "ge_opt_info/ge_opt_info.cc" ) if (NOT ENABLE_D AND NOT ENABLE_ACL AND NOT ENABLE_MS_TESTCASES) @@ -765,11 +767,13 @@ target_include_directories(ge_runner SYSTEM PRIVATE ${GE_CODE_DIR}/../inc ${GE_CODE_DIR}/../toolchain/ide/ide-daemon/external ${GE_CODE_DIR}/../abl/adump/external + ${GE_CODE_DIR}/../abl/licctrl #### blue zone ${ASCEND_DIR}/driver/include ${ASCEND_DIR}/fwkacllib/include ${GE_CODE_DIR}/third_party/fwkacllib/inc ${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain + ${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info ) target_link_options(ge_runner PRIVATE @@ -792,6 +796,7 @@ target_link_libraries(ge_runner PRIVATE runtime error_manager ascend_hal_stub + opt_feature -Wl,--as-needed json -lrt @@ -839,11 +844,13 @@ target_include_directories(ge_compiler SYSTEM PRIVATE ${GE_CODE_DIR}/../inc ${GE_CODE_DIR}/../toolchain/ide/ide-daemon/external ${GE_CODE_DIR}/../abl/adump/external + ${GE_CODE_DIR}/../abl/licctrl #### blue zone #### ${ASCEND_DIR}/driver/include ${ASCEND_DIR}/fwkacllib/include ${GE_CODE_DIR}/third_party/fwkacllib/inc ${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain + ${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info ) target_link_options(ge_compiler PRIVATE @@ -863,6 +870,7 @@ target_link_libraries(ge_compiler PRIVATE error_manager slog runtime_compile + opt_feature -Wl,--as-needed json -lrt diff --git a/ge/ge_opt_info/ge_opt_info.cc b/ge/ge_opt_info/ge_opt_info.cc new file mode 100644 index 00000000..8c1b84ab --- /dev/null +++ b/ge/ge_opt_info/ge_opt_info.cc @@ -0,0 +1,58 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ge_opt_info/ge_opt_info.h" + +#include +#include +#include "graph/ge_local_context.h" +#include "ge/ge_api_types.h" +#include "common/debug/ge_log.h" +#include "opt_info.h" + +namespace ge { +Status GeOptInfo::SetOptInfo() { + std::string soc_ver; + graphStatus ret = GetThreadLocalContext().GetOption(SOC_VERSION, soc_ver); + if (ret != GRAPH_SUCCESS) { + REPORT_CALL_ERROR("E19999", "Get soc version failed."); + GELOGE(FAILED, "[Get][SocVersion]Get soc version failed."); + return FAILED; + } + GELOGD("Soc version:%s.", soc_ver.c_str()); + std::map opt_info; + // the first arg does not work at present. + if (gelc::GetOptInfo(gelc::kOffline, soc_ver, opt_info) != gelc::SUCCESS) { + REPORT_CALL_ERROR("E19999", "Get optional information failed, is_offline:%d, soc version:%s", + gelc::kOffline, soc_ver.c_str()); + GELOGE(FAILED, "[Get][OptInfo]Get optional information failed, is_offline:%d, soc version:%s", + gelc::kOffline, soc_ver.c_str()); + return FAILED; + } + // do nothing if get empty information + if (opt_info.empty()) { + GELOGI("Optional information is empty."); + return SUCCESS; + } + std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); + for (const auto &itr : opt_info) { + graph_options.emplace(itr.first, itr.second); + GELOGI("Get optional information success, key:%s, value:%s.", itr.first.c_str(), itr.second.c_str()); + } + GetThreadLocalContext().SetGraphOption(graph_options); + return SUCCESS; +} +} // namespace ge diff --git a/ge/ge_opt_info/ge_opt_info.h b/ge/ge_opt_info/ge_opt_info.h new file mode 100644 index 00000000..935dff25 --- /dev/null +++ b/ge/ge_opt_info/ge_opt_info.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef GE_OPT_INFO_GE_OPT_INFO_H_ +#define GE_OPT_INFO_GE_OPT_INFO_H_ + +#include "ge/ge_api_error_codes.h" +#include "register/register_types.h" + +namespace ge { +class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY GeOptInfo { + public: + GeOptInfo() = default; + static Status SetOptInfo(); +}; +} // namespace ge + +#endif // GE_OPT_INFO_GE_OPT_INFO_H_ diff --git a/ge/graph/manager/graph_manager.cc b/ge/graph/manager/graph_manager.cc index f36c1c0d..0b27fdf3 100755 --- a/ge/graph/manager/graph_manager.cc +++ b/ge/graph/manager/graph_manager.cc @@ -27,6 +27,7 @@ #include "common/math/math_util.h" #include "common/thread_pool.h" #include "common/dump/dump_manager.h" +#include "ge_opt_info/ge_opt_info.h" #include "analyzer/analyzer.h" #include "graph/common/ge_call_wrapper.h" #include "graph/common/local_context.h" @@ -1001,6 +1002,12 @@ Status GraphManager::PreRun(const GraphNodePtr &graph_node, const std::vector + c_sec +) + +target_include_directories(opt_feature_stub INTERFACE ${CMAKE_CURRENT_LIST_DIR}/src) diff --git a/tests/depends/opt_info/src/opt_info_stub.cc b/tests/depends/opt_info/src/opt_info_stub.cc new file mode 100644 index 00000000..df518c4b --- /dev/null +++ b/tests/depends/opt_info/src/opt_info_stub.cc @@ -0,0 +1,46 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "opt_info.h" +#include +#include +#include +#include + +namespace gelc { +namespace { +const std::vector kSocVersions = {"Ascend910"}; +} + +void SetAllOptInfo(std::map &opt_infos) { + opt_infos.emplace("opt_module.fe", "all"); + opt_infos.emplace("opt_module.pass", "all"); + opt_infos.emplace("opt_module.op_tune", "all"); + opt_infos.emplace("opt_module.rl_tune", "all"); + opt_infos.emplace("opt_module.aoe", "all"); +} + +Status GetOptInfo(WorkMode mode, const std::string &soc_ver, + std::map &opt_infos) { + if (std::find(kSocVersions.begin(), kSocVersions.end(), soc_ver)== kSocVersions.end()) { + SetAllOptInfo(opt_infos); + return SUCCESS; + } + opt_infos.emplace("opt_module.fe", "all"); + opt_infos.emplace("opt_module.pass", "all"); + opt_infos.emplace("opt_module.op_tune", "all"); + return SUCCESS; +} +} // namespace gelc diff --git a/tests/framework/cmake/graphengine.cmake b/tests/framework/cmake/graphengine.cmake index 81aa00cc..c4380016 100644 --- a/tests/framework/cmake/graphengine.cmake +++ b/tests/framework/cmake/graphengine.cmake @@ -103,6 +103,7 @@ list(APPEND INCLUDE_DIRECTORIES "${GE_CODE_DIR}/third_party/fwkacllib/inc/cce" "${GE_CODE_DIR}/third_party/fwkacllib/inc/ops" "${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain" + "${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info" "${GE_CODE_DIR}/tests/ut/ge" "${GE_CODE_DIR}/tests/ut/common" "${CMAKE_BINARY_DIR}" @@ -117,6 +118,7 @@ list(APPEND STUB_LIBS runtime_stub profiler_stub hccl_stub + opt_feature_stub error_manager_stub ascend_protobuf json diff --git a/tests/st/testcase/test_ge_opt_info.cc b/tests/st/testcase/test_ge_opt_info.cc new file mode 100644 index 00000000..457473b1 --- /dev/null +++ b/tests/st/testcase/test_ge_opt_info.cc @@ -0,0 +1,123 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "easy_graph/graph/box.h" +#include "easy_graph/graph/node.h" +#include "easy_graph/builder/graph_dsl.h" +#include "easy_graph/builder/box_builder.h" +#include "easy_graph/layout/graph_layout.h" +#include "easy_graph/layout/engines/graph_easy/graph_easy_option.h" +#include "easy_graph/layout/engines/graph_easy/graph_easy_executor.h" +#include "graph/graph.h" +#include "graph/compute_graph.h" +#include "framework/common/types.h" +#include "graph/debug/ge_attr_define.h" +#include "ge_graph_dsl/graph_dsl.h" +#include "ge_graph_dsl/op_desc/op_desc_cfg_box.h" +#define protected public +#define private public +#include "ge_opt_info/ge_opt_info.h" +#undef private +#undef protected + +namespace ge { +class STEST_opt_info : public testing::Test { + protected: + void SetUp() {} + void TearDown() {} +}; + +TEST_F(STEST_opt_info, get_opt_info_all) { + std::map options = {{ge::SOC_VERSION, "Ascend310"}}; + GetThreadLocalContext().SetGlobalOption(options); + + /// data1 data2 + /// \ / + /// add + // build graph + DEF_GRAPH(g1) { + CHAIN(NODE("data1", DATA)->NODE("add", ADD)); + CHAIN(NODE("data2", DATA)->NODE("add")); + }); + + auto graph = ToGeGraph(g1); + + // new session & add graph + Session session(options); + auto ret = session.AddGraph(1, graph, options); + EXPECT_EQ(ret, SUCCESS); + // build input tensor + std::vector inputs; + // build_graph through session + ret = session.BuildGraph(1, inputs); + EXPECT_EQ(ret, SUCCESS); + + std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); + auto itr = graph_options.find("opt_module.fe"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.pass"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.op_tune"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.rl_tune"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.aoe"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); +} + +TEST_F(STEST_opt_info, get_opt_info_success) { + std::map options = {{ge::SOC_VERSION, "Ascend910"}}; + GetThreadLocalContext().SetGlobalOption(options); + + /// data1 data2 + /// \ / + /// add + // build graph + DEF_GRAPH(g1) { + CHAIN(NODE("data1", DATA)->NODE("add", ADD)); + CHAIN(NODE("data2", DATA)->NODE("add")); + }); + + auto graph = ToGeGraph(g1); + + // new session & add graph + Session session(options); + auto ret = session.AddGraph(1, graph, options); + EXPECT_EQ(ret, SUCCESS); + // build input tensor + std::vector inputs; + // build_graph through session + ret = session.BuildGraph(1, inputs); + EXPECT_EQ(ret, SUCCESS); + + std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); + auto itr = graph_options.find("opt_module.fe"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.pass"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.op_tune"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); +} +} // namespace ge diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index f1ede616..37906457 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -62,6 +62,7 @@ include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc) include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc/cce) include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc/ops) include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain) +include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info) include_directories(${GE_CODE_DIR}/tests/ut/ge) include_directories(${GE_CODE_DIR}/tests/ut/common) include_directories(${CMAKE_BINARY_DIR}) @@ -346,6 +347,7 @@ set(COMMON_SRC_FILES "${GE_CODE_DIR}/ge/common/ge/datatype_util.cc" "${GE_CODE_DIR}/ge/ge_local_engine/engine/host_cpu_engine.cc" "${GE_CODE_DIR}/ge/session/omg.cc" + "${GE_CODE_DIR}/ge/ge_opt_info/ge_opt_info.cc" ) set(COMMON_FORMAT_SRC_FILES @@ -453,6 +455,7 @@ set(GRAPH_EXECUTE_COMMON_SRC_FILES "${GE_CODE_DIR}/ge/graph/manager/graph_manager.cc" "${GE_CODE_DIR}/ge/graph/manager/graph_context.cc" "${GE_CODE_DIR}/ge/graph/manager/util/rt_context_util.cc" + "${GE_CODE_DIR}/ge/ge_opt_info/ge_opt_info.cc" "${GE_CODE_DIR}/ge/graph/manager/graph_context.h" ) @@ -628,6 +631,10 @@ set(SINGLE_OP_SRC_FILES "${GE_CODE_DIR}/ge/hybrid/hybrid_davinci_model.cc" ) +set(GE_OPT_INFO_SRC_FILES + "${GE_CODE_DIR}/ge/ge_opt_info/ge_opt_info.cc" +) + # test files set(COMMON_TEST_FILES "graph/passes/graph_builder_utils.cc" @@ -813,6 +820,10 @@ set(MULTI_PARTS_TEST_FILES "common/host_cpu_engine_unittest.cc" ) +set(GE_OPT_INFO_TEST_FILES + "ge_opt_info/ge_opt_info_unittest.cc" +) + set(GENERATOR_TEST_FILES "generator/ge_generator_unittest.cc" ) @@ -862,6 +873,7 @@ list(APPEND COMMON_SHARED_LIBRARIES mmpa_stub hccl_stub error_manager_stub + opt_feature_stub ascend_protobuf json ) @@ -1107,10 +1119,12 @@ target_link_libraries(ut_libge_multiparts_utest # libge_others_utest add_executable(ut_libge_others_utest + ${GE_OPT_INFO_SRC_FILES} ${COMMON_TEST_FILES} ${PASS_TEST_FILES} ${EXECUTE_TEST_FILES} ${OTHERS_TEST_FILES} + ${GE_OPT_INFO_TEST_FILES} ) target_compile_options(ut_libge_others_utest PRIVATE diff --git a/tests/ut/ge/ge_opt_info/ge_opt_info_unittest.cc b/tests/ut/ge/ge_opt_info/ge_opt_info_unittest.cc new file mode 100644 index 00000000..20c123e9 --- /dev/null +++ b/tests/ut/ge/ge_opt_info/ge_opt_info_unittest.cc @@ -0,0 +1,82 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#define protected public +#define private public +#include "ge_opt_info/ge_opt_info.h" +#include "graph/ge_local_context.h" +#include "external/ge/ge_api_types.h" +#undef private +#undef protected + +namespace ge { +class UTEST_opt_info : public testing::Test { + protected: + void SetUp() {} + void TearDown() {} +}; + +TEST_F(UTEST_opt_info, get_opt_info_success) { + std::map options = {{ge::SOC_VERSION, "Ascend910"}}; + GetThreadLocalContext().SetGlobalOption(options); + auto ret = GeOptInfo::SetOptInfo(); + EXPECT_EQ(ret, ge::SUCCESS); + std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); + auto itr = graph_options.find("opt_module.fe"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.pass"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.op_tune"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); +} + +TEST_F(UTEST_opt_info, get_opt_info_all) { + std::map global_options = {{ge::SOC_VERSION, "Ascend310"}}; + GetThreadLocalContext().SetGlobalOption(global_options); + auto ret = GeOptInfo::SetOptInfo(); + EXPECT_EQ(ret, ge::SUCCESS); + std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); + auto itr = graph_options.find("opt_module.fe"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.pass"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.op_tune"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.rl_tune"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.aoe"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); +} + +TEST_F(UTEST_opt_info, get_opt_info_failed) { + std::map options; + GetThreadLocalContext().SetGlobalOption(options); + auto ret = GeOptInfo::SetOptInfo(); + EXPECT_EQ(ret, ge::FAILED); +} + +} // namespace ge diff --git a/third_party/fwkacllib/inc/opt_info/opt_info.h b/third_party/fwkacllib/inc/opt_info/opt_info.h new file mode 100644 index 00000000..ea9bb529 --- /dev/null +++ b/third_party/fwkacllib/inc/opt_info/opt_info.h @@ -0,0 +1,34 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +namespace gelc { +using Status = uint32_t; +using WorkMode = uint32_t; +const Status SUCCESS = 0x0; +const Status FAILED = 0xFFFFFFFF; +const WorkMode kOffline = 0x0; +const WorkMode kInline = 0x01; + +extern "C" { +__attribute__((visibility ("default"))) +Status GetOptInfo(WorkMode mode, const std::string &soc_ver, + std::map &opt_info_map); +} +} // namespace gelc + From 69a27208a039ef30f389a0d3ea8f7247a214c4e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=B6=9B?= Date: Tue, 22 Jun 2021 19:17:03 +0800 Subject: [PATCH 069/226] =?UTF-8?q?=E5=9B=9E=E9=80=80=20'Pull=20Request=20?= =?UTF-8?q?!1704=20:=20remove=20updation=20of=20session=5Fid'?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ge/graph/manager/graph_manager.cc | 2 +- ge/hybrid/model/hybrid_model_builder.cc | 46 ++++--------------- ge/hybrid/model/hybrid_model_builder.h | 1 - ge/model/ge_root_model.h | 5 -- .../executor/subgraph_executor_unittest.cc | 3 -- .../model/hybrid_model_builder_unittest.cc | 26 ++--------- 6 files changed, 16 insertions(+), 67 deletions(-) diff --git a/ge/graph/manager/graph_manager.cc b/ge/graph/manager/graph_manager.cc index f36c1c0d..b862a7d6 100755 --- a/ge/graph/manager/graph_manager.cc +++ b/ge/graph/manager/graph_manager.cc @@ -3132,10 +3132,10 @@ void GraphManager::PreRunThread(GraphManager *graph_manager) { } // Avoid repeatively prerun for graphs owns same graph_id in online inference concurrency if (count > 1 && graph_node->GetBuildFlag()) { + graph_node->Lock(); GELOGD("Avoid repeatively prerun, graph_id:%u.", args.graph_id); // In online inference concurrency senario, graph_node is allowed to be locked for 'count' times graph_node->SetSemSize(count); - graph_node->Lock(); graph_manager->run_args_q_.Push(RunArgs( { graph_node, args.graph_id, args.session_id, args.error_context, args.input_tensor, graph_node->GetGeRootModel(), GetThreadLocalContext(), args.callback })); GELOGI("[PreRunThread] Loop end. Start to run with cached build model."); diff --git a/ge/hybrid/model/hybrid_model_builder.cc b/ge/hybrid/model/hybrid_model_builder.cc index c050875e..d3f00253 100755 --- a/ge/hybrid/model/hybrid_model_builder.cc +++ b/ge/hybrid/model/hybrid_model_builder.cc @@ -147,7 +147,6 @@ Status HybridModelBuilder::Build() { GE_CHK_STATUS_RET(ValidateParams(), "[Invoke][ValidateParams] failed, model_name_:[%s]", GetGraphName()); hybrid_model_.model_name_ = ge_root_model_->GetModelName(); GELOGI("[%s] Start to build hybrid model.", GetGraphName()); - GE_CHK_STATUS_RET(CopyGraph(), "[Invoke][CopyGraph] failed, model_name_:[%s]", GetGraphName()); GE_CHK_STATUS_RET(InitRuntimeParams(), "[Invoke][InitRuntimeParams] failed, model_name_:[%s]", GetGraphName()); GE_CHK_STATUS_RET(RecoverGraphUnknownFlag(), "[Invoke][RecoverGraphUnknownFlag] failed, model_name_:[%s]", GetGraphName()); @@ -175,8 +174,8 @@ Status HybridModelBuilder::BuildForSingleOp() { hybrid_model_.model_name_ = ge_root_model_->GetRootGraph()->GetName(); GELOGI("[%s] Start to build hybrid model.", GetGraphName()); auto ret = ge_root_model_->GetSubgraphInstanceNameToModel(); - const GeModelPtr ge_model = ret[hybrid_model_.root_graph_->GetName()]; - GE_CHK_STATUS_RET(IndexTaskDefs(hybrid_model_.root_graph_, ge_model), + const GeModelPtr ge_model = ret[ge_root_model_->GetRootGraph()->GetName()]; + GE_CHK_STATUS_RET(IndexTaskDefs(ge_root_model_->GetRootGraph(), ge_model), "[Invoke][IndexTaskDefs] failed, model_name_:[%s]", GetGraphName()); GE_CHK_STATUS_RET(LoadGraph(), "[Invoke][LoadGraph] failed, model_name_:[%s]", GetGraphName()); GE_CHK_STATUS_RET(InitWeights(), "[Invoke][InitWeights] failed, model_name_:[%s]", GetGraphName()); @@ -191,29 +190,6 @@ Status HybridModelBuilder::ValidateParams() { return SUCCESS; } -Status HybridModelBuilder::CopyGraph() { - GELOGD("Copy compute graph begin."); - auto root_graph = ge_root_model_->GetRootGraph(); - - ge_root_model_->IncreaseBuildTimes(); - std::string new_graph_name = ge_root_model_->GetRootGraph()->GetName() + "_" + - std::to_string(ge_root_model_->GetBuildTimes()); - ComputeGraphPtr new_root_graph = MakeShared(new_graph_name); - GE_CHECK_NOTNULL(new_root_graph); - int32_t depth = 0; - std::map node_old_2_new; - std::map op_desc_old_2_new; - graphStatus ret = GraphUtils::CopyComputeGraph(root_graph, new_root_graph, node_old_2_new, op_desc_old_2_new, depth); - if (ret != GRAPH_SUCCESS) { - GELOGE(GRAPH_FAILED, "Copy compute graph failed."); - return GRAPH_FAILED; - } - hybrid_model_.root_graph_ = new_root_graph; - - GELOGD("Copy compute graph[%s] success.", new_graph_name.c_str()); - return SUCCESS; -} - Status HybridModelBuilder::BuildNodeItem(const NodePtr &node, NodeItem &node_item) { auto op_desc = node->GetOpDesc(); GE_CHK_STATUS_RET(ParseForceInfershapeNodes(node, node_item), @@ -838,7 +814,7 @@ Status HybridModelBuilder::BuildOutputMapping(GraphItem &graph_item, } Status HybridModelBuilder::LoadGraph() { - auto root_graph = hybrid_model_.root_graph_; + auto root_graph = ge_root_model_->GetRootGraph(); if (!GetContext().GetHostExecFlag()) { std::shared_ptr merged_graph; GELOGI("Before merging subgraphs DirectNodesSize = %zu, GetAllNodesSize = %zu", @@ -852,6 +828,7 @@ Status HybridModelBuilder::LoadGraph() { root_graph->GetAllNodesSize()); } + hybrid_model_.root_graph_ = root_graph; GE_CHK_STATUS_RET(RelinkNextIteration(), "[%s] Relink NextIteration failed", GetGraphName()); // Reset node id by topological order across all subgraphs int64_t index = 0; @@ -900,7 +877,6 @@ Status HybridModelBuilder::LoadGraph() { } for (auto &it : hybrid_model_.known_shape_sub_models_) { auto node_item = MutableNodeItem(it.first); - GE_CHECK_NOTNULL(node_item); AscendString graph_name; GE_CHK_GRAPH_STATUS_RET(it.second->GetGraph().GetName(graph_name), "Failed to get subgraph name"); auto subgraph = hybrid_model_.GetRootGraph()->GetSubgraph(graph_name.GetString()); @@ -1149,9 +1125,7 @@ Status HybridModelBuilder::InitWeights() { sub_weight_buffer->GetSize()); auto subgraph = GraphUtils::GetComputeGraph(subgraph_model.second->GetGraph()); if (subgraph != ge_root_model_->GetRootGraph()) { - subgraph = hybrid_model_.root_graph_->GetSubgraph(subgraph_model.first); - } else { - subgraph = hybrid_model_.root_graph_; + subgraph = ge_root_model_->GetRootGraph()->GetSubgraph(subgraph_model.first); } GE_CHECK_NOTNULL(subgraph); hybrid_model_.weight_buffer_map_.emplace(subgraph->GetName(), std::move(sub_weight_buffer)); @@ -1308,7 +1282,7 @@ Status HybridModelBuilder::IndexTaskDefs(const ComputeGraphPtr &sub_graph, const } Status HybridModelBuilder::IndexTaskDefs() { - const auto &root_graph = hybrid_model_.root_graph_; + const auto root_graph = ge_root_model_->GetRootGraph(); const auto &root_graph_name = root_graph->GetName(); if (SetOutputNameAttr(*root_graph) != SUCCESS) { GELOGW("Set output name attr failed."); @@ -1342,7 +1316,7 @@ Status HybridModelBuilder::IndexTaskDefs() { Status HybridModelBuilder::IndexSpecialNodes() { GELOGD("Start to index special nodes"); - const auto &root_graph = hybrid_model_.root_graph_; + const auto &root_graph = ge_root_model_->GetRootGraph(); for (auto &node : root_graph->GetAllNodes()) { GE_CHECK_NOTNULL(node); GE_CHECK_NOTNULL(node->GetOpDesc()); @@ -1497,7 +1471,7 @@ Status HybridModelBuilder::InitRuntimeParams() { runtime_param_.session_id = ret ? static_cast(value) : 0; ret = ge::AttrUtils::GetInt(first_model, ATTR_MODEL_TASK_GEN_VAR_ADDR, value); runtime_param_.logic_var_base = ret ? static_cast(value) : 0; - runtime_param_.graph_id = hybrid_model_.root_graph_->GetGraphID(); + runtime_param_.graph_id = ge_root_model_->GetRootGraph()->GetGraphID(); value = 0; for (auto &it : ge_root_model_->GetSubgraphInstanceNameToModel()) { (void) ge::AttrUtils::GetInt(it.second, ATTR_MODEL_VAR_SIZE, value); @@ -1634,7 +1608,7 @@ Status HybridModelBuilder::TransAllVarData() { } Status HybridModelBuilder::CopyVarData() { - GE_CHK_STATUS_RET(TransVarDataUtils::CopyVarData(hybrid_model_.root_graph_, + GE_CHK_STATUS_RET(TransVarDataUtils::CopyVarData(ge_root_model_->GetRootGraph(), runtime_param_.session_id, hybrid_model_.device_id_), "[Invoke][CopyVarData] failed."); @@ -1717,7 +1691,7 @@ Status HybridModelBuilder::LoadKnownShapedSubgraph(ComputeGraph &graph, NodeItem } Status HybridModelBuilder::RecoverGraphUnknownFlag() { - const auto &root_graph = hybrid_model_.root_graph_; + const auto &root_graph = ge_root_model_->GetRootGraph(); for (auto &sub_graph : root_graph->GetAllSubgraphs()) { GE_CHECK_NOTNULL(sub_graph); for (const auto &node : sub_graph->GetDirectNode()) { diff --git a/ge/hybrid/model/hybrid_model_builder.h b/ge/hybrid/model/hybrid_model_builder.h index 3ab43b7f..92974441 100644 --- a/ge/hybrid/model/hybrid_model_builder.h +++ b/ge/hybrid/model/hybrid_model_builder.h @@ -56,7 +56,6 @@ class HybridModelBuilder { Status BuildOutputMapping(GraphItem &partitioned_call, const NodeItem &node_item, bool is_root_graph); Status ValidateParams(); Status LoadGraph(); - Status CopyGraph(); Status LoadGeModel(ComputeGraph &graph, const GeModelPtr &ge_model); Status LoadTask(NodeItem &node_item); Status LoadTasks(); diff --git a/ge/model/ge_root_model.h b/ge/model/ge_root_model.h index b6e3d081..9e8e116e 100755 --- a/ge/model/ge_root_model.h +++ b/ge/model/ge_root_model.h @@ -60,10 +60,6 @@ class GeRootModel { bool GetTrainFlag() const { return train_flag_; } - int32_t GetBuildTimes() const { return hybrid_build_times_; } - - void IncreaseBuildTimes() { hybrid_build_times_++; } - private: ComputeGraphPtr root_graph_ = nullptr; std::map subgraph_instance_name_to_model_; @@ -73,7 +69,6 @@ class GeRootModel { bool train_flag_ = false; std::string model_name_; bool is_specific_stream_ = false; - int32_t hybrid_build_times_ = 0; }; } // namespace ge using GeRootModelPtr = std::shared_ptr; diff --git a/tests/ut/ge/hybrid/executor/subgraph_executor_unittest.cc b/tests/ut/ge/hybrid/executor/subgraph_executor_unittest.cc index 827705ae..2dc3b639 100644 --- a/tests/ut/ge/hybrid/executor/subgraph_executor_unittest.cc +++ b/tests/ut/ge/hybrid/executor/subgraph_executor_unittest.cc @@ -249,9 +249,6 @@ TEST_F(UtestSubgraphExecutor, cond_graph_schedule_tasks) { graph_context.callback_manager = std::unique_ptr(new CallbackManager()); ASSERT_EQ(graph_context.callback_manager->Init(), SUCCESS); - auto root_graph = hybrid_model.root_graph_; - switch_t = root_graph->FindNode("switch_t"); - switch_f = root_graph->FindNode("switch_f"); const auto node_it_t = hybrid_model.node_items_.find(switch_t); const auto node_it_f = hybrid_model.node_items_.find(switch_f); ASSERT_NE(hybrid_model.node_items_.end(), node_it_t); diff --git a/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc b/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc index 95669b73..2ab82350 100644 --- a/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc +++ b/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc @@ -214,17 +214,11 @@ TEST_F(UtestHybridModelBuilder, normal_hybrid_model_build) { ASSERT_EQ(it->second->frame_index_, index); ASSERT_EQ(it->second->parent_frame_, -1); }; - auto root_graph = hybrid_model.root_graph_; - auto enter1_node = root_graph->FindNode("enter"); - auto active1_node = root_graph->FindNode("active1"); - auto active2_node = root_graph->FindNode("active2"); - auto active3_node = root_graph->FindNode("active3"); - auto output1_node = root_graph->FindNode("net_output"); - TestFrameGroup(enter1_node, control_group_index); - TestFrameGroup(active1_node, control_group_index); - TestFrameGroup(active2_node, control_group_index); - TestFrameGroup(active3_node, control_group_index); - TestFrameGroup(output1_node, -1); + TestFrameGroup(enter1, control_group_index); + TestFrameGroup(active1, control_group_index); + TestFrameGroup(active2, control_group_index); + TestFrameGroup(active3, control_group_index); + TestFrameGroup(output1, -1); engine_mapping.clear(); task_executor.clear(); @@ -352,14 +346,4 @@ EXPECT_EQ(hybrid_model_builder.InitVariableTensors(), SUCCESS); EXPECT_EQ(hybrid_model_builder.hybrid_model_.variable_tensors_.size(), 1); HostMemManager::Instance().var_memory_base_map_.clear(); } - -TEST_F(UtestHybridModelBuilder, copy_graph_success) { -ComputeGraphPtr graph = std::make_shared("test"); -GeRootModelPtr ge_root_model = make_shared(graph); -HybridModel hybrid_model(ge_root_model); -HybridModelBuilder hybrid_model_builder(hybrid_model); - -Status st = hybrid_model_builder.CopyGraph(); -EXPECT_EQ(st, SUCCESS); -} } // namespace ge From ad3e70748e8cfebdc8fa141af8d8cfd6a8cbd1aa Mon Sep 17 00:00:00 2001 From: chuxing Date: Sat, 19 Jun 2021 16:41:25 +0800 Subject: [PATCH 070/226] Init hccl node executor on-demand --- ge/hybrid/model/hybrid_model_builder.cc | 22 ++++ ge/hybrid/model/hybrid_model_builder.h | 1 + ge/hybrid/node_executor/node_executor.cc | 80 +++++++------- ge/hybrid/node_executor/node_executor.h | 9 +- tests/ut/ge/CMakeLists.txt | 2 + .../model/hybrid_model_builder_unittest.cc | 27 +++++ .../node_executor/node_executor_unittest.cc | 103 ++++++++++++++++++ 7 files changed, 200 insertions(+), 44 deletions(-) create mode 100644 tests/ut/ge/hybrid/node_executor/node_executor_unittest.cc diff --git a/ge/hybrid/model/hybrid_model_builder.cc b/ge/hybrid/model/hybrid_model_builder.cc index d3f00253..bb405605 100755 --- a/ge/hybrid/model/hybrid_model_builder.cc +++ b/ge/hybrid/model/hybrid_model_builder.cc @@ -1227,6 +1227,28 @@ Status HybridModelBuilder::LoadGeModel(ComputeGraph &sub_graph, const GeModelPtr hybrid_model_.known_shape_sub_models_.emplace(parent_node, ge_model); } + GE_CHK_STATUS_RET_NOLOG(InitHcclExecutorOnDemand(ge_model)); + return SUCCESS; +} + +Status HybridModelBuilder::InitHcclExecutorOnDemand(const GeModelPtr &ge_model) { + if (NodeExecutorManager::GetInstance().IsExecutorInitialized(NodeExecutorManager::ExecutorType::HCCL)) { + return SUCCESS; + } + + // HCCL tasks in known-shaped subgraph which resides in a dynamic root graph + // still depends on the initialization of the HcclExecutor + auto tasks = ge_model->GetModelTaskDefPtr()->task(); + for (int i = 0; i < tasks.size(); ++i) { + const domi::TaskDef &task_def = tasks[i]; + auto task_type = static_cast(task_def.type()); + if (task_type == RT_MODEL_TASK_HCCL) { + const NodeExecutor *unused = nullptr; + GE_CHK_STATUS_RET_NOLOG(NodeExecutorManager::GetInstance() + .GetOrCreateExecutor(NodeExecutorManager::ExecutorType::HCCL, &unused)); + return SUCCESS; + } + } return SUCCESS; } diff --git a/ge/hybrid/model/hybrid_model_builder.h b/ge/hybrid/model/hybrid_model_builder.h index 92974441..9c1eb187 100644 --- a/ge/hybrid/model/hybrid_model_builder.h +++ b/ge/hybrid/model/hybrid_model_builder.h @@ -57,6 +57,7 @@ class HybridModelBuilder { Status ValidateParams(); Status LoadGraph(); Status LoadGeModel(ComputeGraph &graph, const GeModelPtr &ge_model); + static Status InitHcclExecutorOnDemand(const GeModelPtr &ge_model); Status LoadTask(NodeItem &node_item); Status LoadTasks(); Status IdentifyVariableOutputs(NodeItem &node_item, const ComputeGraphPtr &subgraph); diff --git a/ge/hybrid/node_executor/node_executor.cc b/ge/hybrid/node_executor/node_executor.cc index 5f3d6e45..9e9354d9 100755 --- a/ge/hybrid/node_executor/node_executor.cc +++ b/ge/hybrid/node_executor/node_executor.cc @@ -58,8 +58,8 @@ Status NodeExecutor::CompileTask(const HybridModel &model, const NodePtr &node, } Status NodeExecutorManager::EnsureInitialized() { - GE_CHK_STATUS_RET(InitializeExecutors()); std::lock_guard lk(mu_); + ++ref_count_; if (initialized_) { return SUCCESS; } @@ -115,17 +115,14 @@ NodeExecutorManager::ExecutorType NodeExecutorManager::ResolveExecutorType(Node return it->second; } -Status NodeExecutorManager::GetExecutor(Node &node, const NodeExecutor **executor) const { +Status NodeExecutorManager::GetExecutor(Node &node, const NodeExecutor **executor) { auto executor_type = ResolveExecutorType(node); + GELOGD("[%s] Set node executor by type: %d.", node.GetName().c_str(), static_cast(executor_type)); const auto it = executors_.find(executor_type); if (it == executors_.end()) { - REPORT_INNER_ERROR("E19999", "Failed to get executor by type: %d.", static_cast(executor_type)); - GELOGE(INTERNAL_ERROR, "[Check][ExecutorType]Failed to get executor by type: %d.", - static_cast(executor_type)); - return INTERNAL_ERROR; + return GetOrCreateExecutor(executor_type, executor); } - GELOGD("[%s] Set node executor by type: %d.", node.GetName().c_str(), static_cast(executor_type)); *executor = it->second.get(); return SUCCESS; } @@ -178,51 +175,55 @@ Status NodeExecutorManager::CalcOpRunningParam(Node &node) const { return OpsKernelBuilderManager::Instance().CalcOpRunningParam(node); } -Status NodeExecutorManager::InitializeExecutors() { +bool NodeExecutorManager::IsExecutorInitialized(NodeExecutorManager::ExecutorType executor_type) { + std::lock_guard lk(mu_); + return executors_.find(executor_type) != executors_.end(); +} + +Status NodeExecutorManager::GetOrCreateExecutor(ExecutorType executor_type, const NodeExecutor **out_executor) { std::lock_guard lk(mu_); - if (executor_initialized_) { - ++ref_count_; - GELOGI("Executor is already initialized. add ref count to [%d]", ref_count_); + const auto executor_it = executors_.find(executor_type); + if (executor_it != executors_.end()) { + *out_executor = executor_it->second.get(); return SUCCESS; } - GELOGI("Start to Initialize NodeExecutors"); - for (auto &it : builders_) { - auto engine_type = it.first; - auto build_fn = it.second; - GE_CHECK_NOTNULL(build_fn); - auto executor = std::unique_ptr(build_fn()); - if (executor == nullptr) { - REPORT_CALL_ERROR("E19999", "Create NodeExecutor failed for engine type = %d", - static_cast(engine_type)); - GELOGE(INTERNAL_ERROR, "[Create][NodeExecutor] failed for engine type = %d", static_cast(engine_type)); - return INTERNAL_ERROR; - } + GELOGI("Start to Initialize NodeExecutor, type = %d", static_cast(executor_type)); + auto it = builders_.find(executor_type); + if (it == builders_.end()) { + REPORT_CALL_ERROR("E19999", "Create NodeExecutor failed for executor type = %d", + static_cast(executor_type)); + GELOGE(INTERNAL_ERROR, "[Create][NodeExecutor] failed for executor type = %d", static_cast(executor_type)); + return INTERNAL_ERROR; + } - GELOGD("Executor of engine type = %d was created successfully", static_cast(engine_type)); - auto ret = executor->Initialize(); - if (ret != SUCCESS) { - REPORT_CALL_ERROR("E19999", "Initialize NodeExecutor failed for type = %d", static_cast(engine_type)); - GELOGE(ret, "[Initialize][NodeExecutor] failed for type = %d", static_cast(engine_type)); - for (auto &executor_it : executors_) { - executor_it.second->Finalize(); - } - executors_.clear(); - return ret; - } + auto build_fn = it->second; + GE_CHECK_NOTNULL(build_fn); + auto executor = std::unique_ptr(build_fn()); + if (executor == nullptr) { + REPORT_CALL_ERROR("E19999", "Create NodeExecutor failed for executor type = %d", + static_cast(executor_type)); + GELOGE(INTERNAL_ERROR, "[Create][NodeExecutor] failed for engine type = %d", static_cast(executor_type)); + return INTERNAL_ERROR; + } - executors_.emplace(engine_type, std::move(executor)); + GELOGD("Executor of engine type = %d was created successfully", static_cast(executor_type)); + auto ret = executor->Initialize(); + if (ret != SUCCESS) { + REPORT_CALL_ERROR("E19999", "Initialize NodeExecutor failed for type = %d", static_cast(executor_type)); + GELOGE(ret, "[Initialize][NodeExecutor] failed for type = %d", static_cast(executor_type)); + return ret; } - ++ref_count_; - executor_initialized_ = true; - GELOGI("Initializing NodeExecutors successfully."); + *out_executor = executor.get(); + executors_.emplace(executor_type, std::move(executor)); + GELOGI("Initializing NodeExecutor successfully, type = %d", static_cast(executor_type)); return SUCCESS; } void NodeExecutorManager::FinalizeExecutors() { std::lock_guard lk(mu_); - if (!executor_initialized_) { + if (ref_count_ <= 0) { GELOGD("No need for finalizing for not initialized."); return; } @@ -237,7 +238,6 @@ void NodeExecutorManager::FinalizeExecutors() { it.second->Finalize(); } executors_.clear(); - executor_initialized_ = false; GELOGD("Done invoking Finalize successfully."); } diff --git a/ge/hybrid/node_executor/node_executor.h b/ge/hybrid/node_executor/node_executor.h index ad4a9296..0e4a8464 100644 --- a/ge/hybrid/node_executor/node_executor.h +++ b/ge/hybrid/node_executor/node_executor.h @@ -179,8 +179,6 @@ class NodeExecutorManager { */ Status EnsureInitialized(); - Status InitializeExecutors(); - void FinalizeExecutors(); /** @@ -196,7 +194,7 @@ class NodeExecutorManager { * @param executor executor * @return SUCCESS on success, error code otherwise */ - Status GetExecutor(Node &node, const NodeExecutor **executor) const; + Status GetExecutor(Node &node, const NodeExecutor **executor); /** * Resolve executor type by node @@ -205,13 +203,16 @@ class NodeExecutorManager { */ ExecutorType ResolveExecutorType(Node &node) const; + Status GetOrCreateExecutor(ExecutorType executor_type, const NodeExecutor **executor); + + bool IsExecutorInitialized(ExecutorType executor_type); + private: std::map> executors_; std::map> builders_; std::map engine_mapping_; std::mutex mu_; bool initialized_ = false; - bool executor_initialized_ = false; int ref_count_ = 0; }; diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index f1ede616..06b3e0f2 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -840,6 +840,7 @@ set(HYBRID_TEST_FILES "hybrid/executor/subgraph_executor_unittest.cc" "hybrid/executor/worker/execution_engine_unittest.cc" "hybrid/model/hybrid_model_builder_unittest.cc" + "hybrid/node_executor/node_executor_unittest.cc" "hybrid/node_executor/rts/rts_node_task_unittest.cc" "hybrid/node_executor/host_cpu/host_cpu_node_task_unittest.cc" "hybrid/node_executor/ge_local/ge_local_node_executor_unittest.cc" @@ -847,6 +848,7 @@ set(HYBRID_TEST_FILES "hybrid/executor/hybrid_model_async_executor_unittest.cc" "hybrid/executor/hybrid_model_pipeline_executor_unittest.cc" "hybrid/node_executor/aicore/aicore_task_compiler_unittest.cc" + ) set(OTHERS_TEST_FILES diff --git a/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc b/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc index 2ab82350..5567aca2 100644 --- a/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc +++ b/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc @@ -346,4 +346,31 @@ EXPECT_EQ(hybrid_model_builder.InitVariableTensors(), SUCCESS); EXPECT_EQ(hybrid_model_builder.hybrid_model_.variable_tensors_.size(), 1); HostMemManager::Instance().var_memory_base_map_.clear(); } + +TEST_F(UtestHybridModelBuilder, TestInitHcclExecutorOnDemand) { + NodeExecutorManager::GetInstance().builders_.erase(NodeExecutorManager::ExecutorType::HCCL); + // build aicore task + domi::ModelTaskDef model_task_def; + std::shared_ptr model_task_def_ptr = make_shared(model_task_def); + GeModelPtr ge_model = make_shared(); + ge_model->SetModelTaskDef(model_task_def_ptr); + + // No hccl task + domi::TaskDef *task_def = model_task_def_ptr->add_task(); + task_def->set_type(RT_MODEL_TASK_MEMCPY_ASYNC); + ASSERT_EQ(HybridModelBuilder::InitHcclExecutorOnDemand(ge_model), SUCCESS); + + // get executor failed due to no builder + task_def = model_task_def_ptr->add_task(); + task_def->set_type(RT_MODEL_TASK_HCCL); + ASSERT_EQ(HybridModelBuilder::InitHcclExecutorOnDemand(ge_model), INTERNAL_ERROR); + + // get executor success + REGISTER_NODE_EXECUTOR_BUILDER(NodeExecutorManager::ExecutorType::HCCL, NodeExecutor); + ASSERT_EQ(HybridModelBuilder::InitHcclExecutorOnDemand(ge_model), SUCCESS); + + // repeat get, do not access builder + NodeExecutorManager::GetInstance().builders_.erase(NodeExecutorManager::ExecutorType::HCCL); + ASSERT_EQ(HybridModelBuilder::InitHcclExecutorOnDemand(ge_model), SUCCESS); +} } // namespace ge diff --git a/tests/ut/ge/hybrid/node_executor/node_executor_unittest.cc b/tests/ut/ge/hybrid/node_executor/node_executor_unittest.cc new file mode 100644 index 00000000..8a1240d3 --- /dev/null +++ b/tests/ut/ge/hybrid/node_executor/node_executor_unittest.cc @@ -0,0 +1,103 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#define private public +#define protected public +#include "hybrid/node_executor/node_executor.h" +#undef protected +#undef private + +using namespace std; +using namespace testing; + +namespace ge { +using namespace hybrid; + +namespace { + bool finalized = false; +} + +class NodeExecutorTest : public testing::Test { + protected: + void SetUp() {} + void TearDown() { } +}; + +class FailureNodeExecutor : public NodeExecutor { + public: + Status Initialize() override { + return INTERNAL_ERROR; + } +}; + +class SuccessNodeExecutor : public NodeExecutor { + public: + Status Initialize() override { + initialized = true; + finalized = false; + return SUCCESS; + } + + Status Finalize() override { + finalized = true; + } + + bool initialized = false; +}; + +REGISTER_NODE_EXECUTOR_BUILDER(NodeExecutorManager::ExecutorType::AICORE, FailureNodeExecutor); +REGISTER_NODE_EXECUTOR_BUILDER(NodeExecutorManager::ExecutorType::AICPU_TF, SuccessNodeExecutor); + +TEST_F(NodeExecutorTest, TestGetOrCreateExecutor) { + auto &manager = NodeExecutorManager::GetInstance(); + const NodeExecutor *executor = nullptr; + Status ret = SUCCESS; + // no builder + ret = manager.GetOrCreateExecutor(NodeExecutorManager::ExecutorType::RESERVED, &executor); + ASSERT_EQ(ret, INTERNAL_ERROR); + // initialize failure + ret = manager.GetOrCreateExecutor(NodeExecutorManager::ExecutorType::AICORE, &executor); + ASSERT_EQ(ret, INTERNAL_ERROR); + ret = manager.GetOrCreateExecutor(NodeExecutorManager::ExecutorType::AICPU_TF, &executor); + ASSERT_EQ(ret, SUCCESS); + ASSERT_TRUE(executor != nullptr); + ret = manager.GetOrCreateExecutor(NodeExecutorManager::ExecutorType::AICPU_TF, &executor); + ASSERT_EQ(ret, SUCCESS); + ASSERT_TRUE(executor != nullptr); + ASSERT_TRUE(((SuccessNodeExecutor*)executor)->initialized); +} + +TEST_F(NodeExecutorTest, TestInitAndFinalize) { + auto &manager = NodeExecutorManager::GetInstance(); + manager.FinalizeExecutors(); + manager.EnsureInitialized(); + manager.EnsureInitialized(); + const NodeExecutor *executor = nullptr; + auto ret = manager.GetOrCreateExecutor(NodeExecutorManager::ExecutorType::AICPU_TF, &executor); + ASSERT_EQ(ret, SUCCESS); + ASSERT_TRUE(executor != nullptr); + ASSERT_TRUE(((SuccessNodeExecutor*)executor)->initialized); + manager.FinalizeExecutors(); + ASSERT_FALSE(manager.executors_.empty()); + manager.FinalizeExecutors(); + ASSERT_TRUE(manager.executors_.empty()); + ASSERT_TRUE(finalized); +} +} // namespace ge From f0942201572d6430d1d6ca4b808fe72f65577210 Mon Sep 17 00:00:00 2001 From: lianghao Date: Mon, 21 Jun 2021 21:51:33 +0800 Subject: [PATCH 071/226] train_graph_flag --- ge/graph/manager/graph_manager.cc | 23 +++++++++---------- ge/graph/manager/graph_manager.h | 2 +- ge/graph/passes/global_step_insert_pass.cc | 11 --------- ge/ir_build/ge_ir_build.cc | 1 + .../buffer_pool_mem_assigner_unittest.cc | 5 ++++ .../global_step_insert_pass_unittest.cc | 7 +----- 6 files changed, 19 insertions(+), 30 deletions(-) diff --git a/ge/graph/manager/graph_manager.cc b/ge/graph/manager/graph_manager.cc index f36c1c0d..01a2e502 100755 --- a/ge/graph/manager/graph_manager.cc +++ b/ge/graph/manager/graph_manager.cc @@ -122,6 +122,7 @@ const char *const kVectorEngine = "VectorEngine"; const char *const kAIcoreEngine = "AIcoreEngine"; const int32_t kDynamicDimsTypeIsGetNext = 0; const int32_t kDynamicDimsTypeIsData = 1; +const int32_t kBase = 10; const char *const kGetNextName = "IteratorV2"; const uint32_t kInitGraphCount = 1; const uint32_t kNotAdded = 0; @@ -1788,7 +1789,7 @@ Status GraphManager::ParseOptions(const std::map &opti return GE_GRAPH_OPTIONS_INVALID); // ge.graphType - ret = ParseTrainGraphFlag(options_.run_graph_flag, options_.train_graph_flag); + ret = ParseTrainGraphFlag(options_.train_graph_flag); GE_IF_BOOL_EXEC(ret != SUCCESS, GELOGE(GE_GRAPH_OPTIONS_INVALID, "[Parse][TrainGraphFlag] Key:ge.runFlag value is invalid"); return GE_GRAPH_OPTIONS_INVALID); @@ -1833,19 +1834,17 @@ Status GraphManager::ParseOptions(const std::map &opti return SUCCESS; } -Status GraphManager::ParseTrainGraphFlag(const bool &run_flag, bool &train_flag) { - std::shared_ptr ge_instance_ptr = ge::GELib::GetInstance(); - if (ge_instance_ptr == nullptr) { - GELOGW("[Initialize] set train_graph_flag to 0 when GE is not initialized or finalized"); - train_flag = false; - } else if (!ge_instance_ptr->isTrainMode()) { - train_flag = false; - } else { // ge_instance_ptr->isTrainMode() is true - train_flag = true; - if (!run_flag) { - GELOGW("Key:ge.runFlag, its value %d is invalid, it must be 1 when GElib::is_train_mode_ flag is 1", run_flag); +// OPTION_GRAPH_RUN_MODE is supposed to be a session-level option, but it used to be set to global-level in the past. +// If can not parse from session, it can parse from global by GetContext(). +Status GraphManager::ParseTrainGraphFlag(bool &train_flag) { + train_flag = false; + string run_mode; + if (GetContext().GetOption(ge::OPTION_GRAPH_RUN_MODE, run_mode) == SUCCESS && !run_mode.empty()) { + if (GraphRunMode(std::strtol(run_mode.c_str(), nullptr, kBase)) >= TRAIN) { + train_flag = true; } } + GELOGI("Is train flag: %d.", train_flag); return SUCCESS; } diff --git a/ge/graph/manager/graph_manager.h b/ge/graph/manager/graph_manager.h index 93ce354a..3475da6d 100644 --- a/ge/graph/manager/graph_manager.h +++ b/ge/graph/manager/graph_manager.h @@ -292,7 +292,7 @@ class GraphManager { static Status ParseParallelNum(const std::string ¶llel_num, const std::string &key, int &num); - static Status ParseTrainGraphFlag(const bool &run_flag, bool &train_flag); + static Status ParseTrainGraphFlag(bool &train_flag); static bool IsPerfLevelInvalid(int32_t perf_level); diff --git a/ge/graph/passes/global_step_insert_pass.cc b/ge/graph/passes/global_step_insert_pass.cc index f27641fc..297e4ee2 100755 --- a/ge/graph/passes/global_step_insert_pass.cc +++ b/ge/graph/passes/global_step_insert_pass.cc @@ -28,10 +28,6 @@ #include "graph/passes/pass_utils.h" #include "graph/ge_context.h" -namespace { -const char *const kFlagOff = "0"; -} // namespace - namespace ge { NodePtr GlobalStepInsertPass::InsertOp(ComputeGraphPtr &compute_graph, const string &node_type, @@ -80,13 +76,6 @@ NodePtr GlobalStepInsertPass::InsertOp(ComputeGraphPtr &compute_graph, } Status GlobalStepInsertPass::Run(ComputeGraphPtr compute_graph) { - // run_flag off means offline, no need insert global step node which type is variable - std::string run_flag; - if (ge::GetContext().GetOption(ge::RUN_FLAG, run_flag) == GRAPH_SUCCESS && run_flag == kFlagOff) { - GELOGI("compute_graph [%u] [%s] skip insert global step", compute_graph->GetGraphID(), - compute_graph->GetName().c_str()); - return SUCCESS; - } NodePtr output_node = compute_graph->FindFirstNodeMatchType(NETOUTPUT); if (output_node == nullptr) { GELOGD("Node type %s can't be found in graph %u", NETOUTPUT, compute_graph->GetGraphID()); diff --git a/ge/ir_build/ge_ir_build.cc b/ge/ir_build/ge_ir_build.cc index ea521f5b..a7671a74 100644 --- a/ge/ir_build/ge_ir_build.cc +++ b/ge/ir_build/ge_ir_build.cc @@ -574,6 +574,7 @@ graphStatus Impl::Init(const Graph &graph, const std::map(string(ge::RUN_FLAG), to_string(0))); options_.insert(std::pair(string(ge::TRAIN_FLAG), to_string(0))); options_.insert(std::pair(string(ge::SAVE_ORIGINAL_MODEL), to_string(0))); + options_.insert(std::pair(string(ge::OPTION_GRAPH_RUN_MODE), to_string(0))); // print ge option map ge::PrintOptionMap(options_, "ge option"); diff --git a/tests/ut/ge/graph/build/buffer_pool_mem_assigner_unittest.cc b/tests/ut/ge/graph/build/buffer_pool_mem_assigner_unittest.cc index 96283250..05141785 100644 --- a/tests/ut/ge/graph/build/buffer_pool_mem_assigner_unittest.cc +++ b/tests/ut/ge/graph/build/buffer_pool_mem_assigner_unittest.cc @@ -29,6 +29,7 @@ #include "graph/build/memory/buffer_pool_mem_assigner.h" #include "graph/build/memory/graph_mem_assigner.h" #include "graph/build/stream_allocator.h" +#include "graph/ge_local_context.h" #undef protected #undef private @@ -260,6 +261,10 @@ TEST_F(UtestBufferPoolMemAssignerTest, buffer_pool_serial_graph_assign_success) } TEST_F(UtestBufferPoolMemAssignerTest, buffer_pool_subgraph_with_inner_dependency_assign_success) { + std::string build_mode; + std::map options_map; + options_map.insert({ge::OPTION_GRAPH_RUN_MODE, "1"}); + ge::GetThreadLocalContext().SetGraphOption(options_map); ut::BufferPoolGraphBuilder builder("SubgraphWithInnerDependency"); ge::ComputeGraphPtr graph = builder.BuildSubgraphWithInnerDependency(); BufferPoolMemoryPass buffer_pool_mem_pass; diff --git a/tests/ut/ge/graph/passes/global_step_insert_pass_unittest.cc b/tests/ut/ge/graph/passes/global_step_insert_pass_unittest.cc index 9da2565d..cc9a4077 100644 --- a/tests/ut/ge/graph/passes/global_step_insert_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/global_step_insert_pass_unittest.cc @@ -34,7 +34,6 @@ #include "graph/tuning_utils.h" #include "graph_builder_utils.h" #include "graph/ge_context.h" -#include "graph/ge_local_context.h" #include "inc/pass_manager.h" #undef protected #undef private @@ -62,13 +61,9 @@ static ComputeGraphPtr BuildGraph1() { TEST_F(UtestGlobalStepInsertPass, skip_insert) { auto graph = BuildGraph1(); - std::string build_mode; - std::map options_map; - options_map.insert({ge::RUN_FLAG, "0"}); - ge::GetThreadLocalContext().SetGraphOption(options_map); GlobalStepInsertPass pass; Status status = pass.Run(graph); EXPECT_EQ(status, SUCCESS); NodePtr found_node = graph->FindNode(NODE_NAME_GLOBAL_STEP); - EXPECT_EQ(found_node, nullptr); + EXPECT_NE(found_node, nullptr); } From 17a37ca7507a9b6923e99771735069cd264aa7cf Mon Sep 17 00:00:00 2001 From: lichun Date: Wed, 23 Jun 2021 14:28:45 +0800 Subject: [PATCH 072/226] add atc_params: check_report for ConvertModelToJson --- ge/offline/main.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/ge/offline/main.cc b/ge/offline/main.cc index a78ff392..6caed1b7 100755 --- a/ge/offline/main.cc +++ b/ge/offline/main.cc @@ -847,6 +847,7 @@ domi::Status GenerateInfershapeJson() { ge::Graph graph; std::map atc_params; atc_params.insert(std::pair("input_format", FLAGS_input_format)); + atc_params.insert(std::pair("check_report", FLAGS_check_report)); ret = ParseGraph(graph, atc_params, FLAGS_om.c_str(), FLAGS_weight.c_str(), (domi::FrameworkType) FLAGS_framework, "", FLAGS_target.c_str(), (ge::RunMode) FLAGS_mode, false); if (ret != ge::SUCCESS) { From f595a577dd4f4e69e1d0a6d305bff17c4374bba5 Mon Sep 17 00:00:00 2001 From: lianghuikang <505519763@qq.com> Date: Wed, 23 Jun 2021 09:31:54 +0800 Subject: [PATCH 073/226] add op_precision_mode option and support op_debug_level = 4 --- ge/ir_build/ge_ir_build.cc | 69 +++++++++++++------ ge/offline/main.cc | 36 ++++++++-- ge/session/inner_session.cc | 12 ++++ inc/external/ge/ge_api_types.h | 5 ++ tests/ut/ge/graph_ir/ge_ir_build_unittest.cc | 37 ++++++++++ tests/ut/ge/session/ge_api_unittest.cc | 2 +- tests/ut/ge/session/inner_session_unittest.cc | 10 +++ 7 files changed, 144 insertions(+), 27 deletions(-) diff --git a/ge/ir_build/ge_ir_build.cc b/ge/ir_build/ge_ir_build.cc index ea521f5b..052af2f6 100644 --- a/ge/ir_build/ge_ir_build.cc +++ b/ge/ir_build/ge_ir_build.cc @@ -263,6 +263,7 @@ class Impl { omg_context_.user_attr_index_valid = false; }; ~Impl() { (void)generator_.Finalize(); }; + graphStatus CheckBuildModeAndBuildStep(); graphStatus GetSupportedOptions(const std::map &in, std::map &out); graphStatus CheckOptions(const std::map &options); @@ -451,6 +452,37 @@ graphStatus Impl::UpdateDataOpAttr(const Graph &graph) { return GRAPH_SUCCESS; } +graphStatus Impl::CheckBuildModeAndBuildStep() { + std::string build_mode; + auto it = options_.find(BUILD_MODE); + if (it != options_.end() && !(it->second.empty())) { + if (build_mode_options.find(it->second) == build_mode_options.end()) { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({BUILD_MODE, it->second, "value is unsupported. Please check!"})); + GELOGE(GRAPH_PARAM_INVALID, "[Check][BuildMode]:%s is unsupported. Please check!", it->second.c_str()); + return GRAPH_PARAM_INVALID; + } + build_mode = it->second; + } + it = options_.find(BUILD_STEP); + if (it != options_.end() && !(it->second.empty())) { + if (build_step_options.find(it->second) == build_step_options.end()) { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({BUILD_STEP, it->second, "value is unsupported. Please check!"})); + GELOGE(GRAPH_PARAM_INVALID, "[Check][BuildStep]:%s is unsupported. Please check!", it->second.c_str()); + return GRAPH_PARAM_INVALID; + } + } else { + if (build_mode == BUILD_MODE_TUNING) { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({BUILD_MODE, it->second, "tuning must specify build step. Please check!"})); + GELOGE(GRAPH_PARAM_INVALID, "[Check][BuildMode] tuning must specify build step. Please check!"); + return GRAPH_PARAM_INVALID; + } + } + return GRAPH_SUCCESS; +} + graphStatus Impl::GetSupportedOptions(const std::map &in, std::map &out) { for (auto &ele : in) { @@ -475,29 +507,12 @@ graphStatus Impl::CheckOptions(const std::map &options } // Check options build_mode and build_step. - std::string build_mode; - auto it = options_.find(BUILD_MODE); - if (it != options_.end() && !(it->second.empty())) { - if (build_mode_options.find(it->second) == build_mode_options.end()) { - GELOGE(GRAPH_PARAM_INVALID, "[Check][BuildMode]:%s is unsupported. Please check!", it->second.c_str()); - return GRAPH_PARAM_INVALID; - } - build_mode = it->second; - } - it = options_.find(BUILD_STEP); - if (it != options_.end() && !(it->second.empty())) { - if (build_step_options.find(it->second) == build_step_options.end()) { - GELOGE(GRAPH_PARAM_INVALID, "[Check][BuildStep]:%s is unsupported. Please check!", it->second.c_str()); - return GRAPH_PARAM_INVALID; - } - } else { - if (build_mode == BUILD_MODE_TUNING) { - GELOGE(GRAPH_PARAM_INVALID, "[Check][BuildMode] tuning must specify build step. Please check!"); - return GRAPH_PARAM_INVALID; - } + ret = CheckBuildModeAndBuildStep(); + if (ret != GRAPH_SUCCESS) { + return ret; } // Check option EXEC_DISABLE_REUSED_MEMORY - it = options_.find(ge::ir_option::EXEC_DISABLE_REUSED_MEMORY); + auto it = options_.find(ge::ir_option::EXEC_DISABLE_REUSED_MEMORY); if (it != options_.end() && (CheckDisableReuseMemoryParamValid(it->second) != GRAPH_SUCCESS)) { return GRAPH_PARAM_INVALID; } @@ -505,6 +520,18 @@ graphStatus Impl::CheckOptions(const std::map &options if (ge::CheckModifyMixlistParamValid(options_) != GRAPH_SUCCESS) { return GRAPH_PARAM_INVALID; } + // Check option OP_PRECISION_MODE + it = options_.find(ge::ir_option::OP_PRECISION_MODE); + if (it != options_.end() && !it->second.empty() && !ge::CheckInputPathValid(it->second)) { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ge::ir_option::OP_PRECISION_MODE, it->second, "path is not found"})); + GELOGE(GRAPH_PARAM_INVALID, "[Check][OP_PRECISION_MODE] %s not found", it->second.c_str()); + return GRAPH_PARAM_INVALID; + } + if (it != options_.end()) { + GELOGI("Option set successfully, option_key=%s, option_value=%s", + ge::ir_option::OP_PRECISION_MODE, it->second.c_str()); + } // Check Input Format if (options_.find(kInputFormat) != options_.end()) { return CheckInputFormat(options_[kInputFormat]); diff --git a/ge/offline/main.cc b/ge/offline/main.cc index a78ff392..54bded4b 100755 --- a/ge/offline/main.cc +++ b/ge/offline/main.cc @@ -106,10 +106,14 @@ DEFINE_string(out_nodes, "", "Optional; output nodes designated by users." "Format: \"node_name1:0;node_name1:1;node_name2:0\""); +DEFINE_string(op_precision_mode, "", "Optional; operator precision mode configuration file path"); + DEFINE_string(precision_mode, "force_fp16", "Optional; precision mode." "Support force_fp16, force_fp32, allow_mix_precision, allow_fp32_to_fp16, must_keep_origin_dtype."); +DEFINE_string(modify_mixlist, "", "Optional; operator mixed precision configuration file path"); + DEFINE_string(keep_dtype, "", "Optional; config file to specify the precision used by the operator during compilation."); @@ -192,8 +196,11 @@ DEFINE_string(log, "null", "Optional; generate atc log. Support debug, info, war DEFINE_string(dump_mode, "0", "Optional; generate infershape json,only support 1 , 0."); -DEFINE_int32(op_debug_level, 0, "Optional; configure debug level of compiler. 0(default): close debug;" - "1: open TBE compiler, export ccec file and TBE instruction mapping file; 2: open ccec compiler"); +DEFINE_int32(op_debug_level, 0, "Optional; configure debug level of compiler. 0(default): close debug; " + "1: open TBE compiler, export ccec file and TBE instruction mapping file; 2: open ccec compiler; " + "3: disable debug, and keep generating kernel file (.o and .json); 4: disable debug, " + "keep generation kernel file (.o and .json) and generate the operator CCE file (.cce) " + "and the UB fusion computing description file (.json)"); DEFINE_string(enable_scope_fusion_passes, "", "Optional; validate the non-general scope fusion pass," "multiple names can be set and separated by ','."); DEFINE_string(debug_dir, "", "Optional; the path to save the intermediate files of operator compilation"); @@ -210,8 +217,6 @@ DEFINE_string(display_model_info, "0", "Optional; display model info"); DEFINE_string(device_id, "0", "Optional; user device id"); -DEFINE_string(modify_mixlist, "", "Optional; operator mixed precision configuration file path"); - class GFlagUtils { public: /** @@ -298,8 +303,10 @@ class GFlagUtils { "\"l1_optimize\", \"off_optimize\"\n" " --mdl_bank_path Set the path of the custom repository generated after model tuning.\n" "\n[Operator Tuning]\n" + " --op_precision_mode Set the path of operator precision mode configuration file (.ini)\n" " --precision_mode precision mode, support force_fp16(default), force_fp32, allow_mix_precision, " "allow_fp32_to_fp16, must_keep_origin_dtype.\n" + " --modify_mixlist Set the path of operator mixed precision configuration file.\n" " --keep_dtype Retains the precision of certain operators in inference " "scenarios by using a configuration file.\n" " --auto_tune_mode Set tune mode. E.g.: \"GA,RL\", support configure multiple, spit by ,\n" @@ -315,7 +322,8 @@ class GFlagUtils { " 2: Enable TBE pipe_all, generate the operator CCE file and Python-CCE mapping file " "(.json), and enable the CCE compiler -O0-g.\n" " 3: Disable debug, and keep generating kernel file (.o and .json)\n" - " --modify_mixlist Set the path of operator mixed precision configuration file.\n" + " 4: Disable debug, keep generation kernel file (.o and .json) and generate the " + "operator CCE file (.cce) and the UB fusion computing description file (.json)" "\n[Debug]\n" " --save_original_model Control whether to output original model. E.g.: true: output original model\n" " --log Generate log with level. Support debug, info, warning, error, null\n" @@ -365,6 +373,14 @@ class GFlagUtils { FLAGS_op_select_implmode) != ge::SUCCESS, ret = ge::FAILED, "[Check][ImplMode]check optypelist_for_implmode and op_select_implmode failed!"); + if (!FLAGS_op_precision_mode.empty() && !ge::CheckInputPathValid(FLAGS_op_precision_mode, "--op_precision_mode")) { + ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"}, + {"op_precision_mode", FLAGS_op_precision_mode.c_str(), + "path is not found"}); + GELOGE(ge::FAILED, "[Check][op_precision_mode] %s not found", FLAGS_op_precision_mode.c_str()); + ret = ge::FAILED; + } + if (ge::CheckModifyMixlistParamValid(FLAGS_precision_mode, FLAGS_modify_mixlist) != ge::SUCCESS) { ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"}, {"modify_mixlist", FLAGS_modify_mixlist.c_str(), @@ -1049,6 +1065,7 @@ static void SetEnvForSingleOp(std::map &options) { options.emplace(ge::RUN_FLAG, flag_off); options.emplace(ge::OPTION_GRAPH_RUN_MODE, flag_off); options.emplace(ge::SINGLE_OP_FLAG, flag_on); + options.emplace(ge::OP_PRECISION_MODE, FLAGS_op_precision_mode); options.emplace(ge::PRECISION_MODE, FLAGS_precision_mode); options.emplace(ge::SOC_VERSION, FLAGS_soc_version); options.emplace(ge::CORE_TYPE, FLAGS_core_type); @@ -1076,6 +1093,14 @@ domi::Status GenerateSingleOp(const std::string& json_file_path) { ge::CheckImplmodeParamValid(FLAGS_optypelist_for_implmode, FLAGS_op_select_implmode) != ge::SUCCESS, return ge::FAILED, "[Check][ImplmodeParam] fail for input optypelist_for_implmode and op_select_implmode."); + if (!FLAGS_op_precision_mode.empty() && !ge::CheckInputPathValid(FLAGS_op_precision_mode, "--op_precision_mode")) { + ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"}, + {"op_precision_mode", FLAGS_op_precision_mode.c_str(), + "path is not found"}); + GELOGE(ge::FAILED, "[Check][op_precision_mode] %s not found", FLAGS_op_precision_mode.c_str()); + return ge::FAILED; + } + if (ge::CheckModifyMixlistParamValid(FLAGS_precision_mode, FLAGS_modify_mixlist) != ge::SUCCESS) { ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"}, {"modify_mixlist", FLAGS_modify_mixlist.c_str(), @@ -1159,6 +1184,7 @@ domi::Status GenerateOmModel() { options.insert(std::pair(string(ge::CALIBRATION_CONF_FILE), FLAGS_cal_conf)); options.insert(std::pair(string(ge::OUTPUT_NODE_NAME), FLAGS_out_nodes)); options.insert(std::pair(string(ge::INSERT_OP_FILE), FLAGS_insert_op_conf)); + options.insert(std::pair(string(ge::OP_PRECISION_MODE), FLAGS_op_precision_mode)); options.insert(std::pair(string(ge::PRECISION_MODE), FLAGS_precision_mode)); options.insert(std::pair(string(ge::TUNE_DEVICE_IDS), FLAGS_device_id)); diff --git a/ge/session/inner_session.cc b/ge/session/inner_session.cc index aabbe19c..54e62d32 100755 --- a/ge/session/inner_session.cc +++ b/ge/session/inner_session.cc @@ -82,6 +82,18 @@ Status InnerSession::Initialize() { return ret; } + //Check option OP_PRECISION_MODE + auto iter = all_options.find(ge::OP_PRECISION_MODE); + if (iter != all_options.end() && !iter->second.empty() && !ge::CheckInputPathValid(iter->second)) { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ge::OP_PRECISION_MODE, iter->second, "path is not found"})); + GELOGE(PARAM_INVALID, "[Check][OP_PRECISION_MODE] %s not found", iter->second.c_str()); + return FAILED; + } + if (iter != all_options.end()) { + GELOGI("Option set successfully, option_key=%s, option_value=%s", + ge::OP_PRECISION_MODE.c_str(), iter->second.c_str()); + } // Check option modify_mixlist if (ge::CheckModifyMixlistParamValid(all_options) != ge::SUCCESS) { return FAILED; diff --git a/inc/external/ge/ge_api_types.h b/inc/external/ge/ge_api_types.h index fbd6c020..6f5bbfbf 100644 --- a/inc/external/ge/ge_api_types.h +++ b/inc/external/ge/ge_api_types.h @@ -113,6 +113,7 @@ const char *const INPUT_FP16_NODES = "ge.INPUT_NODES_SET_FP16"; const char *const OP_DEBUG_LEVEL = "ge.opDebugLevel"; const char *const PERFORMANCE_MODE = "ge.performance_mode"; const char *const MODIFY_MIXLIST = "ge.exec.modify_mixlist"; +const char *const OP_PRECISION_MODE = "ge.exec.op_precision_mode"; } // namespace configure_option // Configure stream num by Session constructor options param, // its value should be int32_t type, default value is "1" @@ -326,6 +327,8 @@ const std::string PERFORMANCE_MODE = "ge.performance_mode"; const std::string MODIFY_MIXLIST = "ge.exec.modify_mixlist"; +const std::string OP_PRECISION_MODE = "ge.exec.op_precision_mode"; + // Graph run mode enum GraphRunMode { PREDICTION = 0, TRAIN }; @@ -405,6 +408,7 @@ static const char *const OP_BANK_UPDATE = ge::OP_BANK_UPDATE_FLAG.c_str(); static const char *const OP_DEBUG_LEVEL = ge::OP_DEBUG_LEVEL.c_str(); static const char *const PERFORMANCE_MODE = ge::PERFORMANCE_MODE.c_str(); static const char *const MODIFY_MIXLIST = ge::MODIFY_MIXLIST.c_str(); +static const char *const OP_PRECISION_MODE = ge::OP_PRECISION_MODE.c_str(); // for interface: aclgrphBuildModel #ifdef __GNUC__ @@ -416,6 +420,7 @@ const std::set ir_builder_suppported_options = {INPUT_FORMAT, DYNAMIC_IMAGE_SIZE, DYNAMIC_DIMS, INSERT_OP_FILE, + OP_PRECISION_MODE, PRECISION_MODE, TUNE_DEVICE_IDS, EXEC_DISABLE_REUSED_MEMORY, diff --git a/tests/ut/ge/graph_ir/ge_ir_build_unittest.cc b/tests/ut/ge/graph_ir/ge_ir_build_unittest.cc index 197c9300..60f33ed3 100644 --- a/tests/ut/ge/graph_ir/ge_ir_build_unittest.cc +++ b/tests/ut/ge/graph_ir/ge_ir_build_unittest.cc @@ -391,6 +391,43 @@ TEST(UtestIrBuild, check_modify_mixlist_param) { EXPECT_EQ(ret, GRAPH_PARAM_INVALID); } +TEST(UtestIrBuild, check_op_precision_mode_param) { + Graph graph = BuildIrGraph1(); + const std::map build_options = { + {"ge.exec.op_precision_mode", "./op_precision_mode.ini"} + }; + ModelBufferData model; + + auto ret = aclgrphBuildModel(graph, build_options, model); + EXPECT_EQ(ret, GRAPH_PARAM_INVALID); +} + +TEST(UtestIrBuild, check_build_model_and_build_step) { + Graph graph_1 = BuildIrGraph1(); + const std::map build_options_1 = { + {"ge.buildMode", "xxx"} + }; + ModelBufferData model_1; + auto ret_1 = aclgrphBuildModel(graph_1, build_options_1, model_1); + EXPECT_NE(ret_1, GRAPH_SUCCESS); + + Graph graph_2 = BuildIrGraph1(); + const std::map build_options_2 = { + {"ge.buildStep", "xxx"} + }; + ModelBufferData model_2; + auto ret_2 = aclgrphBuildModel(graph_2, build_options_2, model_2); + EXPECT_NE(ret_2, GRAPH_SUCCESS); + + Graph graph_3 = BuildIrGraph1(); + const std::map build_options_3 = { + {"ge.buildMode", "tuning"} + }; + ModelBufferData model_3; + auto ret_3 = aclgrphBuildModel(graph_3, build_options_3, model_3); + EXPECT_NE(ret_3, GRAPH_SUCCESS); +} + TEST(UtestIrBuild, atc_cfg_optype_param) { ComputeGraphPtr graph = BuildComputeGraph1(); FILE *fp = fopen("./keep.txt", "w+"); diff --git a/tests/ut/ge/session/ge_api_unittest.cc b/tests/ut/ge/session/ge_api_unittest.cc index 2cabc4a3..9a7058f3 100644 --- a/tests/ut/ge/session/ge_api_unittest.cc +++ b/tests/ut/ge/session/ge_api_unittest.cc @@ -64,7 +64,7 @@ TEST_F(UtestGeApi, build_graph_success) { ASSERT_NE(ret, SUCCESS); } -TEST_F(UtestGeApi, ge_initialize) { +TEST_F(UtestGeApi, ge_initialize_modify_mixlist) { std::map options = { {ge::MODIFY_MIXLIST, "/mixlist.json"} }; diff --git a/tests/ut/ge/session/inner_session_unittest.cc b/tests/ut/ge/session/inner_session_unittest.cc index ecad56d6..0d20f06a 100644 --- a/tests/ut/ge/session/inner_session_unittest.cc +++ b/tests/ut/ge/session/inner_session_unittest.cc @@ -53,4 +53,14 @@ TEST_F(Utest_Inner_session, initialize) { auto ret = inner_session.Initialize(); EXPECT_NE(ret, ge::SUCCESS); } + +TEST_F(Utest_Inner_session, check_op_precision_mode) { + std::map options = { + {ge::OP_PRECISION_MODE, "./op_precision_mode.ini"} + }; + uint64_t session_id = 1; + InnerSession inner_session(session_id, options); + auto ret = inner_session.Initialize(); + EXPECT_NE(ret, ge::SUCCESS); +} } // namespace ge From 48be801e80aaac9e2b06d1c209fb92adb07064b4 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Wed, 23 Jun 2021 16:49:10 +0800 Subject: [PATCH 074/226] Fix mem leak and recursive depth protection --- ge/common/ge/tbe_plugin_manager.cc | 14 +++++++ ge/session/omg.cc | 13 ++++++ ge/single_op/single_op_model.cc | 8 ++-- ge/single_op/single_op_model.h | 2 +- ge/single_op/task/aicpu_task_builder.cc | 8 ++-- ge/single_op/task/aicpu_task_builder.h | 4 +- ge/single_op/task/op_task.cc | 4 +- ge/single_op/task/op_task.h | 1 - tests/ut/ge/CMakeLists.txt | 1 + .../ge/common/tbe_plugin_manager_unittest.cc | 40 +++++++++++++++++++ tests/ut/ge/session/omg_omg_unittest.cc | 9 +++++ .../ge/single_op/single_op_model_unittest.cc | 24 +++++++++++ 12 files changed, 112 insertions(+), 16 deletions(-) create mode 100644 tests/ut/ge/common/tbe_plugin_manager_unittest.cc diff --git a/ge/common/ge/tbe_plugin_manager.cc b/ge/common/ge/tbe_plugin_manager.cc index 94ba8a9a..c876e300 100755 --- a/ge/common/ge/tbe_plugin_manager.cc +++ b/ge/common/ge/tbe_plugin_manager.cc @@ -105,17 +105,29 @@ void TBEPluginManager::ProcessSoFullName(vector &file_list, string &caff } void TBEPluginManager::FindParserSo(const string &path, vector &file_list, string &caffe_parser_path) { + static uint32_t temp_depth = 0; + static const uint32_t max_recursive_depth = 20; // For recursive depth protection + + temp_depth++; + if (temp_depth >= max_recursive_depth) { + GELOGW("Recursive depth is become %u, Please check input!", temp_depth); + temp_depth--; + return; + } + // Path, change to absolute path string real_path = RealPath(path.c_str()); // Plugin path does not exist if (real_path.empty()) { GELOGW("RealPath is empty."); + temp_depth--; return; } INT32 is_dir = mmIsDir(real_path.c_str()); // Lib plugin path not exist if (is_dir != EN_OK) { GELOGW("%s is not a dir. errmsg:%s", real_path.c_str(), strerror(errno)); + temp_depth--; return; } @@ -123,6 +135,7 @@ void TBEPluginManager::FindParserSo(const string &path, vector &file_lis auto ret = mmScandir(real_path.c_str(), &entries, nullptr, nullptr); if (ret < EN_OK) { GELOGW("scan dir failed. path = %s, ret = %d, errmsg = %s", real_path.c_str(), ret, strerror(errno)); + temp_depth--; return; } for (int i = 0; i < ret; ++i) { @@ -142,6 +155,7 @@ void TBEPluginManager::FindParserSo(const string &path, vector &file_lis } } mmScandirFree(entries, ret); + temp_depth--; } void TBEPluginManager::GetPluginSoFileList(const string &path, vector &file_list, string &caffe_parser_path) { diff --git a/ge/session/omg.cc b/ge/session/omg.cc index f7f3def7..8d826043 100755 --- a/ge/session/omg.cc +++ b/ge/session/omg.cc @@ -221,14 +221,25 @@ static Status ParseOutputFp16NodesFormat(const string &is_output_fp16) { } void FindParserSo(const string &path, vector &file_list, string &caffe_parser_path) { + static uint32_t temp_depth = 0; + static const uint32_t max_recursive_depth = 20; // For recursive depth protection + + temp_depth++; + if (temp_depth >= max_recursive_depth) { + GELOGW("Recursive depth is become %u, Please check input!", temp_depth); + temp_depth--; + return; + } // path, Change to absolute path string real_path = RealPath(path.c_str()); if (real_path.empty()) { // plugin path does not exist + temp_depth--; return; } struct stat stat_buf; if ((stat(real_path.c_str(), &stat_buf) != 0) || (!S_ISDIR(stat_buf.st_mode))) { GELOGI("The path %s is not a directory.", real_path.c_str()); + temp_depth--; return; } @@ -237,6 +248,7 @@ void FindParserSo(const string &path, vector &file_list, string &caffe_p if (nullptr == dir) { // plugin path does not exist GELOGW("Open directory %s failed.", path.c_str()); + temp_depth--; return; } @@ -260,6 +272,7 @@ void FindParserSo(const string &path, vector &file_list, string &caffe_p FindParserSo(full_name, file_list, caffe_parser_path); } closedir(dir); + temp_depth--; return; } diff --git a/ge/single_op/single_op_model.cc b/ge/single_op/single_op_model.cc index eefa5165..1d00127a 100755 --- a/ge/single_op/single_op_model.cc +++ b/ge/single_op/single_op_model.cc @@ -380,7 +380,7 @@ Status SingleOpModel::BuildTaskList(StreamResource *stream_resource, SingleOp &s uint64_t singleop_kernel_id = aicpu_kernel_id++; GELOGI("Build singleOp TfTask, kernel_id = %lu", singleop_kernel_id); GE_CHK_STATUS_RET_NOLOG( - BuildKernelExTask(task_def.kernel_ex(), &aicpu_task, false, depend_compute_flag, singleop_kernel_id)); + BuildKernelExTask(task_def.kernel_ex(), &aicpu_task, depend_compute_flag, singleop_kernel_id)); aicpu_task->SetModelArgs(model_name_, model_id_); ParseArgTable(aicpu_task, single_op); single_op.tasks_.emplace_back(aicpu_task); @@ -458,7 +458,7 @@ Status SingleOpModel::BuildKernelTask(const domi::TaskDef &task_def, TbeOpTask * } Status SingleOpModel::BuildKernelExTask(const domi::KernelExDef &kernel_def, AiCpuTask **task, - bool dynamic_flag, bool& depend_compute_flag, uint64_t kernel_id) { + bool& depend_compute_flag, uint64_t kernel_id) { auto iter = op_list_.find(kernel_def.op_index()); if (iter == op_list_.end()) { GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, @@ -476,7 +476,7 @@ Status SingleOpModel::BuildKernelExTask(const domi::KernelExDef &kernel_def, AiC return ACL_ERROR_GE_MEMORY_ALLOCATION; } auto builder = AiCpuTaskBuilder(iter->second->GetOpDesc(), kernel_def); - auto ret = builder.BuildTask(*aicpu_task, model_params_, dynamic_flag, kernel_id); + auto ret = builder.BuildTask(*aicpu_task, model_params_, kernel_id); if (ret != SUCCESS) { GELOGE(ret, "[Build][Task] failed, kernel_id:%lu.", kernel_id); return ret; @@ -631,7 +631,7 @@ Status SingleOpModel::BuildTaskListForDynamicOp(StreamResource *stream_resource, bool depend_compute_flag = false; uint64_t dynamic_singleop_kernel_id = aicpu_kernel_id++; GELOGI("Build dynamic singleOp TfTask, kernel_id = %lu", dynamic_singleop_kernel_id); - GE_CHK_STATUS_RET_NOLOG(BuildKernelExTask(task_def.kernel_ex(), &aicpu_task, true, + GE_CHK_STATUS_RET_NOLOG(BuildKernelExTask(task_def.kernel_ex(), &aicpu_task, depend_compute_flag, dynamic_singleop_kernel_id)); if (depend_compute_flag) { if (i >= tasks.size() - 1) { diff --git a/ge/single_op/single_op_model.h b/ge/single_op/single_op_model.h index bf3ad050..747d99e9 100755 --- a/ge/single_op/single_op_model.h +++ b/ge/single_op/single_op_model.h @@ -70,7 +70,7 @@ class SingleOpModel { Status BuildTaskListForDynamicOp(StreamResource *stream_resource, DynamicSingleOp &dynamic_single_op); Status BuildKernelTask(const domi::TaskDef &task_def, TbeOpTask **task); Status BuildKernelExTask(const domi::KernelExDef &kernel_def, AiCpuTask **task, - bool dynamic_flag, bool& depend_compute_flag, uint64_t kernel_id); + bool& depend_compute_flag, uint64_t kernel_id); Status BuildCpuKernelTask(const domi::KernelDef &kernel_def, OpTask **task, uint64_t kernel_id); Status BuildModelTaskKernel(StreamResource *stream_resource, const domi::TaskDef &task_def, DynamicSingleOp &single_op); diff --git a/ge/single_op/task/aicpu_task_builder.cc b/ge/single_op/task/aicpu_task_builder.cc index 805b1306..1b945280 100755 --- a/ge/single_op/task/aicpu_task_builder.cc +++ b/ge/single_op/task/aicpu_task_builder.cc @@ -63,7 +63,7 @@ namespace ge { return SUCCESS; } - Status AiCpuTaskBuilder::InitWorkspaceAndIO(AiCpuTask &task, const SingleOpModelParam ¶m, bool dynamic_flag) { + Status AiCpuTaskBuilder::InitWorkspaceAndIO(AiCpuTask &task, const SingleOpModelParam ¶m) { if (kernel_def_.args_size() > sizeof(STR_FWK_OP_KERNEL)) { GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Check][Size]sizeof STR_FWK_OP_KERNEL is: %lu, but args_size is: %d", sizeof(STR_FWK_OP_KERNEL), kernel_def_.args_size()); @@ -83,9 +83,8 @@ namespace ge { return SUCCESS; } - Status AiCpuTaskBuilder::BuildTask(ge::AiCpuTask &task, const SingleOpModelParam ¶m, - bool dynamic_flag, uint64_t kernel_id) { - GE_CHK_STATUS_RET_NOLOG(InitWorkspaceAndIO(task, param, dynamic_flag)); + Status AiCpuTaskBuilder::BuildTask(ge::AiCpuTask &task, const SingleOpModelParam ¶m, uint64_t kernel_id) { + GE_CHK_STATUS_RET_NOLOG(InitWorkspaceAndIO(task, param)); STR_FWK_OP_KERNEL fwk_op_kernel = {0}; auto ret = SetFmkOpKernel(task.io_addr_, task.workspace_addr_, fwk_op_kernel); @@ -124,7 +123,6 @@ namespace ge { task.arg_size_ = sizeof(STR_FWK_OP_KERNEL); task.op_type_ = op_desc_->GetName(); task.task_info_ = kernel_def_.task_info(); - task.dynamic_flag_ = dynamic_flag; task.kernel_id_ = kernel_id; auto debug_info = BuildTaskUtils::GetTaskInfo(op_desc_); diff --git a/ge/single_op/task/aicpu_task_builder.h b/ge/single_op/task/aicpu_task_builder.h index fe9c9bc2..eca91254 100755 --- a/ge/single_op/task/aicpu_task_builder.h +++ b/ge/single_op/task/aicpu_task_builder.h @@ -29,12 +29,12 @@ namespace ge { AiCpuTaskBuilder(const OpDescPtr &op_desc, const domi::KernelExDef &kernel_def); ~AiCpuTaskBuilder() = default; - Status BuildTask(AiCpuTask &task, const SingleOpModelParam ¶m, bool dynamic_flag, uint64_t kernel_id); + Status BuildTask(AiCpuTask &task, const SingleOpModelParam ¶m, uint64_t kernel_id); private: static Status SetKernelArgs(void **args, STR_FWK_OP_KERNEL &kernel); Status SetFmkOpKernel(void *io_addr, void *ws_addr, STR_FWK_OP_KERNEL &kernel); - Status InitWorkspaceAndIO(AiCpuTask &task, const SingleOpModelParam ¶m, bool dynamic_flag); + Status InitWorkspaceAndIO(AiCpuTask &task, const SingleOpModelParam ¶m); const OpDescPtr op_desc_; const domi::KernelExDef &kernel_def_; diff --git a/ge/single_op/task/op_task.cc b/ge/single_op/task/op_task.cc index db2fdfeb..b6a78f9e 100755 --- a/ge/single_op/task/op_task.cc +++ b/ge/single_op/task/op_task.cc @@ -621,9 +621,7 @@ Status AiCpuBaseTask::UpdateIoAddr(const vector &inputs, const vecto AiCpuTask::~AiCpuTask() { FreeHbm(args_); FreeHbm(io_addr_); - if (dynamic_flag_) { - FreeHbm(workspace_addr_); - } + FreeHbm(workspace_addr_); FreeHbm(copy_workspace_buf_); FreeHbm(copy_ioaddr_dev_); FreeHbm(copy_input_release_flag_dev_); diff --git a/ge/single_op/task/op_task.h b/ge/single_op/task/op_task.h index 2fbb4dc7..19320bc0 100644 --- a/ge/single_op/task/op_task.h +++ b/ge/single_op/task/op_task.h @@ -192,7 +192,6 @@ class AiCpuTask : public AiCpuBaseTask { // host addr std::vector io_addr_host_; - bool dynamic_flag_ = false; // for copy task void *copy_task_args_buf_ = nullptr; void *copy_workspace_buf_ = nullptr; diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index cf573343..e3aecf80 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -818,6 +818,7 @@ set(MULTI_PARTS_TEST_FILES "session/inner_session_unittest.cc" "session/session_manager_unittest.cc" "common/host_cpu_engine_unittest.cc" + "common/tbe_plugin_manager_unittest.cc" ) set(GE_OPT_INFO_TEST_FILES diff --git a/tests/ut/ge/common/tbe_plugin_manager_unittest.cc b/tests/ut/ge/common/tbe_plugin_manager_unittest.cc new file mode 100644 index 00000000..16c1650b --- /dev/null +++ b/tests/ut/ge/common/tbe_plugin_manager_unittest.cc @@ -0,0 +1,40 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#define protected public +#define private public +#include "common/ge/tbe_plugin_manager.h" +#undef private +#undef protected + +namespace ge { +class UtestTBEPluginManager: public testing::Test { + protected: + void SetUp() {} + void TearDown() {} +}; + +TEST_F(UtestTBEPluginManager, CheckFindParserSo) { + string path = ""; + vector file_list = {}; + string caffe_parser_path = ""; + TBEPluginManager::Instance().FindParserSo(path, file_list, caffe_parser_path); + path = "/lib64"; + TBEPluginManager::Instance().FindParserSo(path, file_list, caffe_parser_path); +} +} // namespace ge diff --git a/tests/ut/ge/session/omg_omg_unittest.cc b/tests/ut/ge/session/omg_omg_unittest.cc index 334df319..6176b7c0 100644 --- a/tests/ut/ge/session/omg_omg_unittest.cc +++ b/tests/ut/ge/session/omg_omg_unittest.cc @@ -48,4 +48,13 @@ TEST_F(UtestOmg, display_model_info_success) { attr_def->mutable_list()->add_i(4); PrintModelInfo(&model_def, 1); } + +TEST_F(UtestOmg, find_parser_so) { + string path = ""; + vector file_list = {}; + string caffe_parser_path = ""; + FindParserSo(path, file_list, caffe_parser_path); + path = "/lib64"; + FindParserSo(path, file_list, caffe_parser_path); +} } // namespace ge diff --git a/tests/ut/ge/single_op/single_op_model_unittest.cc b/tests/ut/ge/single_op/single_op_model_unittest.cc index 1975f9f4..e4a53340 100644 --- a/tests/ut/ge/single_op/single_op_model_unittest.cc +++ b/tests/ut/ge/single_op/single_op_model_unittest.cc @@ -310,3 +310,27 @@ TEST_F(UtestSingleOpModel, BuildTaskList) { MemcpyAsyncTask mem_task; ASSERT_EQ(mem_task.LaunchKernel(0), SUCCESS); } + +TEST_F(UtestSingleOpModel, build_aicpu_task) { + ComputeGraphPtr graph = make_shared("single_op"); + GeModelPtr ge_model = make_shared(); + ge_model->SetGraph(GraphUtils::CreateGraphFromComputeGraph(graph)); + shared_ptr model_task_def = make_shared(); + ge_model->SetModelTaskDef(model_task_def); + + domi::TaskDef *task_def = model_task_def->add_task(); + task_def->set_type(RT_MODEL_TASK_KERNEL_EX); + + string model_data_str = "123456789"; + SingleOpModel model("model", model_data_str.c_str(), model_data_str.size()); + std::mutex stream_mu; + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + DynamicSingleOp single_op(0, &stream_mu, stream); + model.model_helper_.model_ = ge_model; + auto op_desc = std::make_shared("add", "Add"); + NodePtr node = graph->AddNode(op_desc); + model.op_list_[0] = node; + StreamResource *res = new (std::nothrow) StreamResource(1); + ASSERT_EQ(model.BuildTaskListForDynamicOp(res, single_op), SUCCESS); +} From 0ed42e96f711bbe5287f77e1f04bbed02266d7f2 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Wed, 23 Jun 2021 17:29:54 +0800 Subject: [PATCH 075/226] Fix mem leak and recursive depth protection --- ge/common/ge/tbe_plugin_manager.cc | 1 + ge/hybrid/node_executor/task_context.cc | 2 ++ 2 files changed, 3 insertions(+) diff --git a/ge/common/ge/tbe_plugin_manager.cc b/ge/common/ge/tbe_plugin_manager.cc index c876e300..6a461a6d 100755 --- a/ge/common/ge/tbe_plugin_manager.cc +++ b/ge/common/ge/tbe_plugin_manager.cc @@ -156,6 +156,7 @@ void TBEPluginManager::FindParserSo(const string &path, vector &file_lis } mmScandirFree(entries, ret); temp_depth--; + return; } void TBEPluginManager::GetPluginSoFileList(const string &path, vector &file_list, string &caffe_parser_path) { diff --git a/ge/hybrid/node_executor/task_context.cc b/ge/hybrid/node_executor/task_context.cc index 78ccb54a..c0464c87 100644 --- a/ge/hybrid/node_executor/task_context.cc +++ b/ge/hybrid/node_executor/task_context.cc @@ -43,6 +43,7 @@ TaskContext::~TaskContext() { output_tensor->Destroy(); } } + ReleaseWorkspace(); } void TaskContext::ReleaseWorkspace() { @@ -50,6 +51,7 @@ void TaskContext::ReleaseWorkspace() { for (auto ws_addr : workspaces_) { execution_context_->allocator->Deallocate(ws_addr); } + workspaces_.clear(); } std::unique_ptr TaskContext::Create(NodeState *node_state, SubgraphContext *subgraph_context) { From 350855d3dca3921f48d42553a5e1db923bd400a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=B6=9B?= Date: Wed, 23 Jun 2021 09:51:02 +0000 Subject: [PATCH 076/226] update third_party/fwkacllib/inc/runtime/rt_model.h. --- third_party/fwkacllib/inc/runtime/rt_model.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/third_party/fwkacllib/inc/runtime/rt_model.h b/third_party/fwkacllib/inc/runtime/rt_model.h index 74539222..30b8f053 100644 --- a/third_party/fwkacllib/inc/runtime/rt_model.h +++ b/third_party/fwkacllib/inc/runtime/rt_model.h @@ -50,8 +50,9 @@ typedef enum tagModelTaskType { RT_MODEL_TASK_STREAM_LABEL_SWITCH_BY_INDEX, RT_MODEL_TASK_STREAM_LABEL_GOTO, RT_MODEL_TASK_MODEL_EXIT, - RT_MODEL_TASK_FFTS_TASK, RT_MODEL_TASK_ALL_KERNEL, + RT_MODEL_TASK_PROFILER_TRACE_EX, + RT_MODEL_TASK_FFTS_TASK, } rtModelTaskType_t; typedef enum tagModelStreamType { From d30366d2c96061538a7f6fed2830b94f0a7e3403 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=B6=9B?= Date: Wed, 23 Jun 2021 17:53:02 +0800 Subject: [PATCH 077/226] =?UTF-8?q?=E5=9B=9E=E9=80=80=20'Pull=20Request=20?= =?UTF-8?q?!1840=20:=20opt=20info'?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- CMakeLists.txt | 1 - ge/CMakeLists.txt | 8 -- ge/ge_opt_info/ge_opt_info.cc | 58 --------- ge/ge_opt_info/ge_opt_info.h | 31 ----- ge/graph/manager/graph_manager.cc | 7 - tests/CMakeLists.txt | 1 - tests/depends/opt_info/CMakeLists.txt | 37 ------ tests/depends/opt_info/src/opt_info_stub.cc | 46 ------- tests/framework/cmake/graphengine.cmake | 2 - tests/st/testcase/test_ge_opt_info.cc | 123 ------------------ tests/ut/ge/CMakeLists.txt | 14 -- .../ut/ge/ge_opt_info/ge_opt_info_unittest.cc | 82 ------------ third_party/fwkacllib/inc/opt_info/opt_info.h | 34 ----- 13 files changed, 444 deletions(-) delete mode 100644 ge/ge_opt_info/ge_opt_info.cc delete mode 100644 ge/ge_opt_info/ge_opt_info.h delete mode 100644 tests/depends/opt_info/CMakeLists.txt delete mode 100644 tests/depends/opt_info/src/opt_info_stub.cc delete mode 100644 tests/st/testcase/test_ge_opt_info.cc delete mode 100644 tests/ut/ge/ge_opt_info/ge_opt_info_unittest.cc delete mode 100644 third_party/fwkacllib/inc/opt_info/opt_info.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 41520b14..e3cc1e32 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -95,7 +95,6 @@ else () #find_module(ascendcl_static libascendcl.a ${GE_LIB_PATH}) else() find_module(slog libalog.so ${ASCEND_ATC_DIR}) - find_module(opt_feature libopt_feature.so ${ASCEND_ATC_DIR}) find_module(static_mmpa libmmpa.a ${ASCEND_ATC_DIR}) if(PLATFORM STREQUAL "train") find_module(adump_server libadump_server.a ${ASCEND_RUNTIME_DIR}) diff --git a/ge/CMakeLists.txt b/ge/CMakeLists.txt index 5db2e7a9..2b9122da 100755 --- a/ge/CMakeLists.txt +++ b/ge/CMakeLists.txt @@ -434,7 +434,6 @@ set(TRAIN_SRC_LIST "graph/build/memory/max_block_mem_assigner.cc" "graph/build/memory/var_mem_assign_util.cc" "graph/build/memory/buffer_pool_mem_assigner.cc" - "ge_opt_info/ge_opt_info.cc" ) set(INFER_SRC_LIST @@ -712,7 +711,6 @@ set(INFER_SRC_LIST "graph/build/memory/max_block_mem_assigner.cc" "graph/build/memory/var_mem_assign_util.cc" "graph/build/memory/buffer_pool_mem_assigner.cc" - "ge_opt_info/ge_opt_info.cc" ) if (NOT ENABLE_D AND NOT ENABLE_ACL AND NOT ENABLE_MS_TESTCASES) @@ -767,13 +765,11 @@ target_include_directories(ge_runner SYSTEM PRIVATE ${GE_CODE_DIR}/../inc ${GE_CODE_DIR}/../toolchain/ide/ide-daemon/external ${GE_CODE_DIR}/../abl/adump/external - ${GE_CODE_DIR}/../abl/licctrl #### blue zone ${ASCEND_DIR}/driver/include ${ASCEND_DIR}/fwkacllib/include ${GE_CODE_DIR}/third_party/fwkacllib/inc ${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain - ${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info ) target_link_options(ge_runner PRIVATE @@ -796,7 +792,6 @@ target_link_libraries(ge_runner PRIVATE runtime error_manager ascend_hal_stub - opt_feature -Wl,--as-needed json -lrt @@ -844,13 +839,11 @@ target_include_directories(ge_compiler SYSTEM PRIVATE ${GE_CODE_DIR}/../inc ${GE_CODE_DIR}/../toolchain/ide/ide-daemon/external ${GE_CODE_DIR}/../abl/adump/external - ${GE_CODE_DIR}/../abl/licctrl #### blue zone #### ${ASCEND_DIR}/driver/include ${ASCEND_DIR}/fwkacllib/include ${GE_CODE_DIR}/third_party/fwkacllib/inc ${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain - ${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info ) target_link_options(ge_compiler PRIVATE @@ -870,7 +863,6 @@ target_link_libraries(ge_compiler PRIVATE error_manager slog runtime_compile - opt_feature -Wl,--as-needed json -lrt diff --git a/ge/ge_opt_info/ge_opt_info.cc b/ge/ge_opt_info/ge_opt_info.cc deleted file mode 100644 index 8c1b84ab..00000000 --- a/ge/ge_opt_info/ge_opt_info.cc +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "ge_opt_info/ge_opt_info.h" - -#include -#include -#include "graph/ge_local_context.h" -#include "ge/ge_api_types.h" -#include "common/debug/ge_log.h" -#include "opt_info.h" - -namespace ge { -Status GeOptInfo::SetOptInfo() { - std::string soc_ver; - graphStatus ret = GetThreadLocalContext().GetOption(SOC_VERSION, soc_ver); - if (ret != GRAPH_SUCCESS) { - REPORT_CALL_ERROR("E19999", "Get soc version failed."); - GELOGE(FAILED, "[Get][SocVersion]Get soc version failed."); - return FAILED; - } - GELOGD("Soc version:%s.", soc_ver.c_str()); - std::map opt_info; - // the first arg does not work at present. - if (gelc::GetOptInfo(gelc::kOffline, soc_ver, opt_info) != gelc::SUCCESS) { - REPORT_CALL_ERROR("E19999", "Get optional information failed, is_offline:%d, soc version:%s", - gelc::kOffline, soc_ver.c_str()); - GELOGE(FAILED, "[Get][OptInfo]Get optional information failed, is_offline:%d, soc version:%s", - gelc::kOffline, soc_ver.c_str()); - return FAILED; - } - // do nothing if get empty information - if (opt_info.empty()) { - GELOGI("Optional information is empty."); - return SUCCESS; - } - std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); - for (const auto &itr : opt_info) { - graph_options.emplace(itr.first, itr.second); - GELOGI("Get optional information success, key:%s, value:%s.", itr.first.c_str(), itr.second.c_str()); - } - GetThreadLocalContext().SetGraphOption(graph_options); - return SUCCESS; -} -} // namespace ge diff --git a/ge/ge_opt_info/ge_opt_info.h b/ge/ge_opt_info/ge_opt_info.h deleted file mode 100644 index 935dff25..00000000 --- a/ge/ge_opt_info/ge_opt_info.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef GE_OPT_INFO_GE_OPT_INFO_H_ -#define GE_OPT_INFO_GE_OPT_INFO_H_ - -#include "ge/ge_api_error_codes.h" -#include "register/register_types.h" - -namespace ge { -class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY GeOptInfo { - public: - GeOptInfo() = default; - static Status SetOptInfo(); -}; -} // namespace ge - -#endif // GE_OPT_INFO_GE_OPT_INFO_H_ diff --git a/ge/graph/manager/graph_manager.cc b/ge/graph/manager/graph_manager.cc index 0a4633ad..b862a7d6 100755 --- a/ge/graph/manager/graph_manager.cc +++ b/ge/graph/manager/graph_manager.cc @@ -27,7 +27,6 @@ #include "common/math/math_util.h" #include "common/thread_pool.h" #include "common/dump/dump_manager.h" -#include "ge_opt_info/ge_opt_info.h" #include "analyzer/analyzer.h" #include "graph/common/ge_call_wrapper.h" #include "graph/common/local_context.h" @@ -1002,12 +1001,6 @@ Status GraphManager::PreRun(const GraphNodePtr &graph_node, const std::vector - c_sec -) - -target_include_directories(opt_feature_stub INTERFACE ${CMAKE_CURRENT_LIST_DIR}/src) diff --git a/tests/depends/opt_info/src/opt_info_stub.cc b/tests/depends/opt_info/src/opt_info_stub.cc deleted file mode 100644 index df518c4b..00000000 --- a/tests/depends/opt_info/src/opt_info_stub.cc +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "opt_info.h" -#include -#include -#include -#include - -namespace gelc { -namespace { -const std::vector kSocVersions = {"Ascend910"}; -} - -void SetAllOptInfo(std::map &opt_infos) { - opt_infos.emplace("opt_module.fe", "all"); - opt_infos.emplace("opt_module.pass", "all"); - opt_infos.emplace("opt_module.op_tune", "all"); - opt_infos.emplace("opt_module.rl_tune", "all"); - opt_infos.emplace("opt_module.aoe", "all"); -} - -Status GetOptInfo(WorkMode mode, const std::string &soc_ver, - std::map &opt_infos) { - if (std::find(kSocVersions.begin(), kSocVersions.end(), soc_ver)== kSocVersions.end()) { - SetAllOptInfo(opt_infos); - return SUCCESS; - } - opt_infos.emplace("opt_module.fe", "all"); - opt_infos.emplace("opt_module.pass", "all"); - opt_infos.emplace("opt_module.op_tune", "all"); - return SUCCESS; -} -} // namespace gelc diff --git a/tests/framework/cmake/graphengine.cmake b/tests/framework/cmake/graphengine.cmake index c4380016..81aa00cc 100644 --- a/tests/framework/cmake/graphengine.cmake +++ b/tests/framework/cmake/graphengine.cmake @@ -103,7 +103,6 @@ list(APPEND INCLUDE_DIRECTORIES "${GE_CODE_DIR}/third_party/fwkacllib/inc/cce" "${GE_CODE_DIR}/third_party/fwkacllib/inc/ops" "${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain" - "${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info" "${GE_CODE_DIR}/tests/ut/ge" "${GE_CODE_DIR}/tests/ut/common" "${CMAKE_BINARY_DIR}" @@ -118,7 +117,6 @@ list(APPEND STUB_LIBS runtime_stub profiler_stub hccl_stub - opt_feature_stub error_manager_stub ascend_protobuf json diff --git a/tests/st/testcase/test_ge_opt_info.cc b/tests/st/testcase/test_ge_opt_info.cc deleted file mode 100644 index 457473b1..00000000 --- a/tests/st/testcase/test_ge_opt_info.cc +++ /dev/null @@ -1,123 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include "easy_graph/graph/box.h" -#include "easy_graph/graph/node.h" -#include "easy_graph/builder/graph_dsl.h" -#include "easy_graph/builder/box_builder.h" -#include "easy_graph/layout/graph_layout.h" -#include "easy_graph/layout/engines/graph_easy/graph_easy_option.h" -#include "easy_graph/layout/engines/graph_easy/graph_easy_executor.h" -#include "graph/graph.h" -#include "graph/compute_graph.h" -#include "framework/common/types.h" -#include "graph/debug/ge_attr_define.h" -#include "ge_graph_dsl/graph_dsl.h" -#include "ge_graph_dsl/op_desc/op_desc_cfg_box.h" -#define protected public -#define private public -#include "ge_opt_info/ge_opt_info.h" -#undef private -#undef protected - -namespace ge { -class STEST_opt_info : public testing::Test { - protected: - void SetUp() {} - void TearDown() {} -}; - -TEST_F(STEST_opt_info, get_opt_info_all) { - std::map options = {{ge::SOC_VERSION, "Ascend310"}}; - GetThreadLocalContext().SetGlobalOption(options); - - /// data1 data2 - /// \ / - /// add - // build graph - DEF_GRAPH(g1) { - CHAIN(NODE("data1", DATA)->NODE("add", ADD)); - CHAIN(NODE("data2", DATA)->NODE("add")); - }); - - auto graph = ToGeGraph(g1); - - // new session & add graph - Session session(options); - auto ret = session.AddGraph(1, graph, options); - EXPECT_EQ(ret, SUCCESS); - // build input tensor - std::vector inputs; - // build_graph through session - ret = session.BuildGraph(1, inputs); - EXPECT_EQ(ret, SUCCESS); - - std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); - auto itr = graph_options.find("opt_module.fe"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.pass"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.op_tune"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.rl_tune"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.aoe"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); -} - -TEST_F(STEST_opt_info, get_opt_info_success) { - std::map options = {{ge::SOC_VERSION, "Ascend910"}}; - GetThreadLocalContext().SetGlobalOption(options); - - /// data1 data2 - /// \ / - /// add - // build graph - DEF_GRAPH(g1) { - CHAIN(NODE("data1", DATA)->NODE("add", ADD)); - CHAIN(NODE("data2", DATA)->NODE("add")); - }); - - auto graph = ToGeGraph(g1); - - // new session & add graph - Session session(options); - auto ret = session.AddGraph(1, graph, options); - EXPECT_EQ(ret, SUCCESS); - // build input tensor - std::vector inputs; - // build_graph through session - ret = session.BuildGraph(1, inputs); - EXPECT_EQ(ret, SUCCESS); - - std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); - auto itr = graph_options.find("opt_module.fe"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.pass"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.op_tune"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); -} -} // namespace ge diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index cf573343..06b3e0f2 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -62,7 +62,6 @@ include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc) include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc/cce) include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc/ops) include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain) -include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info) include_directories(${GE_CODE_DIR}/tests/ut/ge) include_directories(${GE_CODE_DIR}/tests/ut/common) include_directories(${CMAKE_BINARY_DIR}) @@ -347,7 +346,6 @@ set(COMMON_SRC_FILES "${GE_CODE_DIR}/ge/common/ge/datatype_util.cc" "${GE_CODE_DIR}/ge/ge_local_engine/engine/host_cpu_engine.cc" "${GE_CODE_DIR}/ge/session/omg.cc" - "${GE_CODE_DIR}/ge/ge_opt_info/ge_opt_info.cc" ) set(COMMON_FORMAT_SRC_FILES @@ -455,7 +453,6 @@ set(GRAPH_EXECUTE_COMMON_SRC_FILES "${GE_CODE_DIR}/ge/graph/manager/graph_manager.cc" "${GE_CODE_DIR}/ge/graph/manager/graph_context.cc" "${GE_CODE_DIR}/ge/graph/manager/util/rt_context_util.cc" - "${GE_CODE_DIR}/ge/ge_opt_info/ge_opt_info.cc" "${GE_CODE_DIR}/ge/graph/manager/graph_context.h" ) @@ -631,10 +628,6 @@ set(SINGLE_OP_SRC_FILES "${GE_CODE_DIR}/ge/hybrid/hybrid_davinci_model.cc" ) -set(GE_OPT_INFO_SRC_FILES - "${GE_CODE_DIR}/ge/ge_opt_info/ge_opt_info.cc" -) - # test files set(COMMON_TEST_FILES "graph/passes/graph_builder_utils.cc" @@ -820,10 +813,6 @@ set(MULTI_PARTS_TEST_FILES "common/host_cpu_engine_unittest.cc" ) -set(GE_OPT_INFO_TEST_FILES - "ge_opt_info/ge_opt_info_unittest.cc" -) - set(GENERATOR_TEST_FILES "generator/ge_generator_unittest.cc" ) @@ -875,7 +864,6 @@ list(APPEND COMMON_SHARED_LIBRARIES mmpa_stub hccl_stub error_manager_stub - opt_feature_stub ascend_protobuf json ) @@ -1121,12 +1109,10 @@ target_link_libraries(ut_libge_multiparts_utest # libge_others_utest add_executable(ut_libge_others_utest - ${GE_OPT_INFO_SRC_FILES} ${COMMON_TEST_FILES} ${PASS_TEST_FILES} ${EXECUTE_TEST_FILES} ${OTHERS_TEST_FILES} - ${GE_OPT_INFO_TEST_FILES} ) target_compile_options(ut_libge_others_utest PRIVATE diff --git a/tests/ut/ge/ge_opt_info/ge_opt_info_unittest.cc b/tests/ut/ge/ge_opt_info/ge_opt_info_unittest.cc deleted file mode 100644 index 20c123e9..00000000 --- a/tests/ut/ge/ge_opt_info/ge_opt_info_unittest.cc +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include - -#define protected public -#define private public -#include "ge_opt_info/ge_opt_info.h" -#include "graph/ge_local_context.h" -#include "external/ge/ge_api_types.h" -#undef private -#undef protected - -namespace ge { -class UTEST_opt_info : public testing::Test { - protected: - void SetUp() {} - void TearDown() {} -}; - -TEST_F(UTEST_opt_info, get_opt_info_success) { - std::map options = {{ge::SOC_VERSION, "Ascend910"}}; - GetThreadLocalContext().SetGlobalOption(options); - auto ret = GeOptInfo::SetOptInfo(); - EXPECT_EQ(ret, ge::SUCCESS); - std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); - auto itr = graph_options.find("opt_module.fe"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.pass"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.op_tune"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); -} - -TEST_F(UTEST_opt_info, get_opt_info_all) { - std::map global_options = {{ge::SOC_VERSION, "Ascend310"}}; - GetThreadLocalContext().SetGlobalOption(global_options); - auto ret = GeOptInfo::SetOptInfo(); - EXPECT_EQ(ret, ge::SUCCESS); - std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); - auto itr = graph_options.find("opt_module.fe"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.pass"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.op_tune"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.rl_tune"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); - itr = graph_options.find("opt_module.aoe"); - EXPECT_NE(itr, graph_options.end()); - EXPECT_EQ(itr->second, "all"); -} - -TEST_F(UTEST_opt_info, get_opt_info_failed) { - std::map options; - GetThreadLocalContext().SetGlobalOption(options); - auto ret = GeOptInfo::SetOptInfo(); - EXPECT_EQ(ret, ge::FAILED); -} - -} // namespace ge diff --git a/third_party/fwkacllib/inc/opt_info/opt_info.h b/third_party/fwkacllib/inc/opt_info/opt_info.h deleted file mode 100644 index ea9bb529..00000000 --- a/third_party/fwkacllib/inc/opt_info/opt_info.h +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include - -namespace gelc { -using Status = uint32_t; -using WorkMode = uint32_t; -const Status SUCCESS = 0x0; -const Status FAILED = 0xFFFFFFFF; -const WorkMode kOffline = 0x0; -const WorkMode kInline = 0x01; - -extern "C" { -__attribute__((visibility ("default"))) -Status GetOptInfo(WorkMode mode, const std::string &soc_ver, - std::map &opt_info_map); -} -} // namespace gelc - From e7279c1a6c9a5df56da7af51de325bcd4e70c204 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Wed, 23 Jun 2021 18:00:27 +0800 Subject: [PATCH 078/226] Fix ut. --- ge/common/ge/tbe_plugin_manager.cc | 1 - ge/single_op/single_op_model.cc | 13 ++++--------- ge/single_op/single_op_model.h | 3 +-- 3 files changed, 5 insertions(+), 12 deletions(-) diff --git a/ge/common/ge/tbe_plugin_manager.cc b/ge/common/ge/tbe_plugin_manager.cc index 6a461a6d..c876e300 100755 --- a/ge/common/ge/tbe_plugin_manager.cc +++ b/ge/common/ge/tbe_plugin_manager.cc @@ -156,7 +156,6 @@ void TBEPluginManager::FindParserSo(const string &path, vector &file_lis } mmScandirFree(entries, ret); temp_depth--; - return; } void TBEPluginManager::GetPluginSoFileList(const string &path, vector &file_list, string &caffe_parser_path) { diff --git a/ge/single_op/single_op_model.cc b/ge/single_op/single_op_model.cc index 1d00127a..08a0fcbc 100755 --- a/ge/single_op/single_op_model.cc +++ b/ge/single_op/single_op_model.cc @@ -376,11 +376,10 @@ Status SingleOpModel::BuildTaskList(StreamResource *stream_resource, SingleOp &s } else if (task_type == RT_MODEL_TASK_KERNEL_EX) { GELOGD("Building AICPU_TF task"); AiCpuTask *aicpu_task = nullptr; - bool depend_compute_flag = false; uint64_t singleop_kernel_id = aicpu_kernel_id++; GELOGI("Build singleOp TfTask, kernel_id = %lu", singleop_kernel_id); GE_CHK_STATUS_RET_NOLOG( - BuildKernelExTask(task_def.kernel_ex(), &aicpu_task, depend_compute_flag, singleop_kernel_id)); + BuildKernelExTask(task_def.kernel_ex(), &aicpu_task, singleop_kernel_id)); aicpu_task->SetModelArgs(model_name_, model_id_); ParseArgTable(aicpu_task, single_op); single_op.tasks_.emplace_back(aicpu_task); @@ -457,8 +456,7 @@ Status SingleOpModel::BuildKernelTask(const domi::TaskDef &task_def, TbeOpTask * return SUCCESS; } -Status SingleOpModel::BuildKernelExTask(const domi::KernelExDef &kernel_def, AiCpuTask **task, - bool& depend_compute_flag, uint64_t kernel_id) { +Status SingleOpModel::BuildKernelExTask(const domi::KernelExDef &kernel_def, AiCpuTask **task, uint64_t kernel_id) { auto iter = op_list_.find(kernel_def.op_index()); if (iter == op_list_.end()) { GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, @@ -481,7 +479,6 @@ Status SingleOpModel::BuildKernelExTask(const domi::KernelExDef &kernel_def, AiC GELOGE(ret, "[Build][Task] failed, kernel_id:%lu.", kernel_id); return ret; } - depend_compute_flag = (aicpu_task->GetUnknownType() == DEPEND_COMPUTE); *task = aicpu_task.release(); return SUCCESS; @@ -628,12 +625,10 @@ Status SingleOpModel::BuildTaskListForDynamicOp(StreamResource *stream_resource, } GELOGD("Building AICPU_TF task"); AiCpuTask *aicpu_task = nullptr; - bool depend_compute_flag = false; uint64_t dynamic_singleop_kernel_id = aicpu_kernel_id++; GELOGI("Build dynamic singleOp TfTask, kernel_id = %lu", dynamic_singleop_kernel_id); - GE_CHK_STATUS_RET_NOLOG(BuildKernelExTask(task_def.kernel_ex(), &aicpu_task, - depend_compute_flag, dynamic_singleop_kernel_id)); - if (depend_compute_flag) { + GE_CHK_STATUS_RET_NOLOG(BuildKernelExTask(task_def.kernel_ex(), &aicpu_task, dynamic_singleop_kernel_id)); + if (aicpu_task->GetUnknownType() == DEPEND_COMPUTE) { if (i >= tasks.size() - 1) { GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Check][Task]The copy task of the fourth operator was not found."); REPORT_INNER_ERROR("E19999", "The copy task of the fourth operator was not found."); diff --git a/ge/single_op/single_op_model.h b/ge/single_op/single_op_model.h index 747d99e9..b7f6b42a 100755 --- a/ge/single_op/single_op_model.h +++ b/ge/single_op/single_op_model.h @@ -69,8 +69,7 @@ class SingleOpModel { Status BuildTaskList(StreamResource *stream_resource, SingleOp &single_op); Status BuildTaskListForDynamicOp(StreamResource *stream_resource, DynamicSingleOp &dynamic_single_op); Status BuildKernelTask(const domi::TaskDef &task_def, TbeOpTask **task); - Status BuildKernelExTask(const domi::KernelExDef &kernel_def, AiCpuTask **task, - bool& depend_compute_flag, uint64_t kernel_id); + Status BuildKernelExTask(const domi::KernelExDef &kernel_def, AiCpuTask **task, uint64_t kernel_id); Status BuildCpuKernelTask(const domi::KernelDef &kernel_def, OpTask **task, uint64_t kernel_id); Status BuildModelTaskKernel(StreamResource *stream_resource, const domi::TaskDef &task_def, DynamicSingleOp &single_op); From 3996f5df109662f59e7eca969ff9ce9392a8badf Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Wed, 23 Jun 2021 19:32:49 +0800 Subject: [PATCH 079/226] Fix bug of recursive depth protection. --- ge/common/ge/tbe_plugin_manager.cc | 16 +++++----------- ge/common/ge/tbe_plugin_manager.h | 3 ++- ge/session/omg.cc | 16 +++++----------- inc/framework/omg/omg.h | 3 ++- 4 files changed, 14 insertions(+), 24 deletions(-) diff --git a/ge/common/ge/tbe_plugin_manager.cc b/ge/common/ge/tbe_plugin_manager.cc index c876e300..70c1ab94 100755 --- a/ge/common/ge/tbe_plugin_manager.cc +++ b/ge/common/ge/tbe_plugin_manager.cc @@ -104,14 +104,12 @@ void TBEPluginManager::ProcessSoFullName(vector &file_list, string &caff } } -void TBEPluginManager::FindParserSo(const string &path, vector &file_list, string &caffe_parser_path) { - static uint32_t temp_depth = 0; +void TBEPluginManager::FindParserSo(const string &path, vector &file_list, + string &caffe_parser_path, uint32_t recursive_depth) { static const uint32_t max_recursive_depth = 20; // For recursive depth protection - temp_depth++; - if (temp_depth >= max_recursive_depth) { - GELOGW("Recursive depth is become %u, Please check input!", temp_depth); - temp_depth--; + if (recursive_depth >= max_recursive_depth) { + GELOGW("Recursive depth is become %u, Please check input!", recursive_depth); return; } @@ -120,14 +118,12 @@ void TBEPluginManager::FindParserSo(const string &path, vector &file_lis // Plugin path does not exist if (real_path.empty()) { GELOGW("RealPath is empty."); - temp_depth--; return; } INT32 is_dir = mmIsDir(real_path.c_str()); // Lib plugin path not exist if (is_dir != EN_OK) { GELOGW("%s is not a dir. errmsg:%s", real_path.c_str(), strerror(errno)); - temp_depth--; return; } @@ -135,7 +131,6 @@ void TBEPluginManager::FindParserSo(const string &path, vector &file_lis auto ret = mmScandir(real_path.c_str(), &entries, nullptr, nullptr); if (ret < EN_OK) { GELOGW("scan dir failed. path = %s, ret = %d, errmsg = %s", real_path.c_str(), ret, strerror(errno)); - temp_depth--; return; } for (int i = 0; i < ret; ++i) { @@ -151,11 +146,10 @@ void TBEPluginManager::FindParserSo(const string &path, vector &file_lis ProcessSoFullName(file_list, caffe_parser_path, full_name, caffe_parser_so_suff, aicpu_so_suff, aicpu_host_so_suff); } else { - FindParserSo(full_name, file_list, caffe_parser_path); + FindParserSo(full_name, file_list, caffe_parser_path, recursive_depth + 1); } } mmScandirFree(entries, ret); - temp_depth--; } void TBEPluginManager::GetPluginSoFileList(const string &path, vector &file_list, string &caffe_parser_path) { diff --git a/ge/common/ge/tbe_plugin_manager.h b/ge/common/ge/tbe_plugin_manager.h index 4bd8c6e3..eada3e64 100755 --- a/ge/common/ge/tbe_plugin_manager.h +++ b/ge/common/ge/tbe_plugin_manager.h @@ -57,7 +57,8 @@ class TBEPluginManager { static void ProcessSoFullName(vector &file_list, string &caffe_parser_path, string &full_name, const string &caffe_parser_so_suff, const string &aicpu_so_suff, const string &aicpu_host_so_suff); - static void FindParserSo(const string &path, vector &file_list, string &caffe_parser_path); + static void FindParserSo(const string &path, vector &file_list, string &caffe_parser_path, + uint32_t recursive_depth = 0); static void GetPluginSoFileList(const string &path, vector &file_list, string &caffe_parser_path); static void GetCustomOpPath(std::string &customop_path); void LoadCustomOpLib(); diff --git a/ge/session/omg.cc b/ge/session/omg.cc index 8d826043..a2ee176f 100755 --- a/ge/session/omg.cc +++ b/ge/session/omg.cc @@ -220,26 +220,22 @@ static Status ParseOutputFp16NodesFormat(const string &is_output_fp16) { return SUCCESS; } -void FindParserSo(const string &path, vector &file_list, string &caffe_parser_path) { - static uint32_t temp_depth = 0; +void FindParserSo(const string &path, vector &file_list, + string &caffe_parser_path, uint32_t recursive_depth) { static const uint32_t max_recursive_depth = 20; // For recursive depth protection - temp_depth++; - if (temp_depth >= max_recursive_depth) { - GELOGW("Recursive depth is become %u, Please check input!", temp_depth); - temp_depth--; + if (recursive_depth >= max_recursive_depth) { + GELOGW("Recursive depth is become %u, Please check input!", recursive_depth); return; } // path, Change to absolute path string real_path = RealPath(path.c_str()); if (real_path.empty()) { // plugin path does not exist - temp_depth--; return; } struct stat stat_buf; if ((stat(real_path.c_str(), &stat_buf) != 0) || (!S_ISDIR(stat_buf.st_mode))) { GELOGI("The path %s is not a directory.", real_path.c_str()); - temp_depth--; return; } @@ -248,7 +244,6 @@ void FindParserSo(const string &path, vector &file_list, string &caffe_p if (nullptr == dir) { // plugin path does not exist GELOGW("Open directory %s failed.", path.c_str()); - temp_depth--; return; } @@ -269,10 +264,9 @@ void FindParserSo(const string &path, vector &file_list, string &caffe_p continue; } - FindParserSo(full_name, file_list, caffe_parser_path); + FindParserSo(full_name, file_list, caffe_parser_path, recursive_depth + 1); } closedir(dir); - temp_depth--; return; } diff --git a/inc/framework/omg/omg.h b/inc/framework/omg/omg.h index a0cdb449..1c39d203 100644 --- a/inc/framework/omg/omg.h +++ b/inc/framework/omg/omg.h @@ -91,7 +91,8 @@ GE_FUNC_VISIBILITY Status ConvertFwkModelToJson(domi::FrameworkType framework, c GE_FUNC_VISIBILITY void GetGroupName(ge::proto::ModelDef &model); -GE_FUNC_VISIBILITY void FindParserSo(const string &path, vector &fileList, string &caffe_parser_path); +GE_FUNC_VISIBILITY void FindParserSo(const string &path, vector &fileList, string &caffe_parser_path, + uint32_t recursive_depth = 0); GE_FUNC_VISIBILITY Status DumpInfershapeJson(const ge::Graph &graph, const char *json_file); From 3440d44062a01b1fd871e5ce069047b279c5afaf Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Wed, 23 Jun 2021 20:31:18 +0800 Subject: [PATCH 080/226] Fix multi control from one node --- ge/ge_local_engine/engine/host_cpu_engine.cc | 9 +++------ ge/hybrid/model/hybrid_model_builder.cc | 4 ---- ge/hybrid/model/node_item.cc | 15 ++++++--------- ge/hybrid/model/node_item.h | 2 +- .../node_executor/hccl/hccl_node_executor.cc | 1 + ge/hybrid/node_executor/rts/rts_node_executor.cc | 7 +------ 6 files changed, 12 insertions(+), 26 deletions(-) diff --git a/ge/ge_local_engine/engine/host_cpu_engine.cc b/ge/ge_local_engine/engine/host_cpu_engine.cc index 488a5ee8..d9b67736 100755 --- a/ge/ge_local_engine/engine/host_cpu_engine.cc +++ b/ge/ge_local_engine/engine/host_cpu_engine.cc @@ -14,14 +14,14 @@ * limitations under the License. */ #include "ge_local_engine/engine/host_cpu_engine.h" -#include "graph/common/omg_util.h" #include "graph/utils/op_desc_utils.h" #include "graph/utils/tensor_adapter.h" +#include "graph/utils/node_utils.h" +#include "graph/utils/type_utils.h" #include "register/op_kernel_registry.h" #include "register/host_cpu_context.h" #include "common/ge/ge_util.h" #include "common/ge/plugin_manager.h" -#include "graph/utils/type_utils.h" #include "common/fp16_t.h" #include "common/math/math_util.h" @@ -123,10 +123,7 @@ bool HostCpuEngine::CheckSupported(const string &op_type) { } Status HostCpuEngine::FindOpKernel(const ge::NodePtr &node, std::unique_ptr &op_kernel) { - std::string op_type; - auto status = GetOriginalType(node, op_type); - GE_CHK_BOOL_EXEC_NOLOG(status == SUCCESS, return status); - + const std::string op_type = NodeUtils::GetNodeType(node); auto kernel = OpKernelRegistry::GetInstance().CreateHostCpuOp(op_type); if (kernel == nullptr) { GELOGD("Op of type %s is not supported by host cpu engine", op_type.c_str()); diff --git a/ge/hybrid/model/hybrid_model_builder.cc b/ge/hybrid/model/hybrid_model_builder.cc index c050875e..f6de6ef0 100755 --- a/ge/hybrid/model/hybrid_model_builder.cc +++ b/ge/hybrid/model/hybrid_model_builder.cc @@ -289,10 +289,6 @@ Status HybridModelBuilder::GetOrCreateNodeItem(const NodePtr &node, NodeItem **n return SUCCESS; } - if (node->GetType() == MEMCPYASYNC) { // Convert MemcpyAsync to Identity. - node->GetOpDesc()->SetType(IDENTITY); - } - std::unique_ptr new_node; GE_CHK_STATUS_RET(NodeItem::Create(node, new_node), "[Invoke][Create] failed, model_name_:[%s]", GetGraphName()); GE_CHK_STATUS_RET_NOLOG(NodeExecutorManager::GetInstance().GetExecutor(*node, &new_node->node_executor)); diff --git a/ge/hybrid/model/node_item.cc b/ge/hybrid/model/node_item.cc index 5c3d7db3..250562ce 100644 --- a/ge/hybrid/model/node_item.cc +++ b/ge/hybrid/model/node_item.cc @@ -15,9 +15,7 @@ */ #include "hybrid/model/node_item.h" -#include -#include "framework/common/debug/log.h" -#include "graph/common/omg_util.h" + #include "graph/compute_graph.h" #include "graph/debug/ge_attr_define.h" #include "hybrid/executor/worker/shape_inference_engine.h" @@ -98,8 +96,7 @@ Status ParseFusedSubgraph(NodeItem &node_item) { GE_CHECK_NOTNULL(node); auto op_desc = node->GetOpDesc(); GE_CHECK_NOTNULL(op_desc); - std::string node_type; - GE_CHK_STATUS_RET(GetOriginalType(node, node_type)); + const std::string node_type = NodeUtils::GetNodeType(node); if (node_type == DATA) { GE_CHK_GRAPH_STATUS_RET(ParseInputMapping(*node, *op_desc, *fused_subgraph)); } else if (node_type == kNodeTypeRetVal) { @@ -409,8 +406,8 @@ void NodeItem::SetDataSend(NodeItem *node_item, int anchor_index) { void NodeItem::SetCtrlSend(NodeItem *node_item, uint32_t switch_index) { if (switch_index < switch_groups_.size()) { - std::vector &switch_group = switch_groups_[switch_index]; - switch_group.emplace_back(node_item); + auto &switch_group = switch_groups_[switch_index]; + switch_group.emplace(node_item); } else { ctrl_send_.insert(node_item); } @@ -433,8 +430,8 @@ void NodeItem::SetMergeCtrl(NodeItem *node_item, uint32_t merge_index) { } // this is StreamMerge node, node_item is StreamActive node. - std::vector &switch_group = switch_groups_[merge_index]; - switch_group.emplace_back(node_item); + auto &switch_group = switch_groups_[merge_index]; + switch_group.emplace(node_item); node_item->ctrl_send_.emplace(this); GELOGI("Node[%s] will control node[%s]", node_item->NodeName().c_str(), NodeName().c_str()); diff --git a/ge/hybrid/model/node_item.h b/ge/hybrid/model/node_item.h index ec66f094..12775b00 100644 --- a/ge/hybrid/model/node_item.h +++ b/ge/hybrid/model/node_item.h @@ -155,7 +155,7 @@ struct NodeItem { std::map data_recv_; // Recv data notify from std::set ctrl_send_; // Send ctrl notify to std::set ctrl_recv_; // Recv ctrl notify from - std::vector> switch_groups_; // Send ctrl notify to + std::vector> switch_groups_; // Send ctrl notify to std::shared_ptr kernel_task; std::unique_ptr fused_subgraph; diff --git a/ge/hybrid/node_executor/hccl/hccl_node_executor.cc b/ge/hybrid/node_executor/hccl/hccl_node_executor.cc index b8819a42..3f887819 100644 --- a/ge/hybrid/node_executor/hccl/hccl_node_executor.cc +++ b/ge/hybrid/node_executor/hccl/hccl_node_executor.cc @@ -342,6 +342,7 @@ Status RdmaNodeTask::ExecuteAsync(TaskContext &context, std::function do GE_CHK_RT_RET(rtEventDestroy(evt)); } GELOGI("rdma callback success."); + return SUCCESS; }; HcclResult hccl_ret = HcomExecEnqueueRemoteAccess(context.GetNodeItem().NodeType(), addr_infos, callback); diff --git a/ge/hybrid/node_executor/rts/rts_node_executor.cc b/ge/hybrid/node_executor/rts/rts_node_executor.cc index 5cd971df..d52f56b9 100644 --- a/ge/hybrid/node_executor/rts/rts_node_executor.cc +++ b/ge/hybrid/node_executor/rts/rts_node_executor.cc @@ -17,13 +17,9 @@ #include "hybrid/node_executor/rts/rts_node_executor.h" #include "hybrid/node_executor/rts/rts_task_factory.h" -#include "framework/common/debug/log.h" #include "common/ge/ge_util.h" -#include "framework/common/types.h" -#include "graph/common/omg_util.h" #include "graph/utils/tensor_utils.h" #include "hybrid/model/hybrid_model.h" -#include "runtime/rt.h" namespace ge { namespace hybrid { @@ -133,8 +129,7 @@ Status ProfilingTraceNodeTask::ExecuteAsync(TaskContext &context, std::function< Status RtsNodeExecutor::LoadTask(const HybridModel &model, const NodePtr &node, shared_ptr &task) const { GE_CHECK_NOTNULL(node); GELOGD("[%s] Load for local task.", node->GetName().c_str()); - std::string node_type; - GE_CHK_STATUS_RET(GetOriginalType(node, node_type), "Get original type failed."); + const std::string node_type = NodeUtils::GetNodeType(node); RtsNodeTaskPtr rts_task = RtsTaskFactory::GetInstance().Create(node_type); if (rts_task == nullptr) { GELOGE(UNSUPPORTED, "[%s] Unsupported RTS op type: %s", node->GetName().c_str(), node_type.c_str()); From 538394ffc5a60fe2b4c6f71de3033190e617173c Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Wed, 23 Jun 2021 20:53:47 +0800 Subject: [PATCH 081/226] Fix Guard for variable release --- ge/graph/load/model_manager/model_manager.cc | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/ge/graph/load/model_manager/model_manager.cc b/ge/graph/load/model_manager/model_manager.cc index 2cb31074..8bcaa23f 100755 --- a/ge/graph/load/model_manager/model_manager.cc +++ b/ge/graph/load/model_manager/model_manager.cc @@ -1394,9 +1394,19 @@ Status ModelManager::LaunchKernelCustAicpuSo(const string &kernel_name) { return SUCCESS; } + rtStream_t stream = nullptr; vector allocated_mem; + std::function callback = [&]() { + for (auto mem : allocated_mem) { + GE_CHK_RT(rtFree(mem)); + } + if (stream != nullptr) { + GE_CHK_RT(rtStreamDestroy(stream)); + } + }; + GE_MAKE_GUARD(release, callback); + rtError_t status; - rtStream_t stream = nullptr; vector v_cust_so; void *args = nullptr; @@ -1471,13 +1481,6 @@ Status ModelManager::LaunchKernelCustAicpuSo(const string &kernel_name) { GELOGE(RT_FAILED, "[Call][RtStreamSynchronize] fail, ret = 0x%X", status); return RT_ERROR_TO_GE_STATUS(status); } - std::function callback = [&]() { - for (auto mem : allocated_mem) { - GE_CHK_RT(rtFree(mem)); - } - GE_CHK_RT(rtStreamDestroy(stream)); - }; - GE_MAKE_GUARD(release, callback); GELOGI("Cpu kernel launch task success."); return SUCCESS; } From 8a1ec9739945e9292a6af47980370147d3931014 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Wed, 23 Jun 2021 21:03:49 +0800 Subject: [PATCH 082/226] Fix st. --- ge/session/omg.cc | 11 ++--------- inc/framework/omg/omg.h | 3 +-- tests/ut/ge/session/omg_omg_unittest.cc | 9 --------- 3 files changed, 3 insertions(+), 20 deletions(-) diff --git a/ge/session/omg.cc b/ge/session/omg.cc index a2ee176f..f7f3def7 100755 --- a/ge/session/omg.cc +++ b/ge/session/omg.cc @@ -220,14 +220,7 @@ static Status ParseOutputFp16NodesFormat(const string &is_output_fp16) { return SUCCESS; } -void FindParserSo(const string &path, vector &file_list, - string &caffe_parser_path, uint32_t recursive_depth) { - static const uint32_t max_recursive_depth = 20; // For recursive depth protection - - if (recursive_depth >= max_recursive_depth) { - GELOGW("Recursive depth is become %u, Please check input!", recursive_depth); - return; - } +void FindParserSo(const string &path, vector &file_list, string &caffe_parser_path) { // path, Change to absolute path string real_path = RealPath(path.c_str()); if (real_path.empty()) { // plugin path does not exist @@ -264,7 +257,7 @@ void FindParserSo(const string &path, vector &file_list, continue; } - FindParserSo(full_name, file_list, caffe_parser_path, recursive_depth + 1); + FindParserSo(full_name, file_list, caffe_parser_path); } closedir(dir); return; diff --git a/inc/framework/omg/omg.h b/inc/framework/omg/omg.h index 1c39d203..a0cdb449 100644 --- a/inc/framework/omg/omg.h +++ b/inc/framework/omg/omg.h @@ -91,8 +91,7 @@ GE_FUNC_VISIBILITY Status ConvertFwkModelToJson(domi::FrameworkType framework, c GE_FUNC_VISIBILITY void GetGroupName(ge::proto::ModelDef &model); -GE_FUNC_VISIBILITY void FindParserSo(const string &path, vector &fileList, string &caffe_parser_path, - uint32_t recursive_depth = 0); +GE_FUNC_VISIBILITY void FindParserSo(const string &path, vector &fileList, string &caffe_parser_path); GE_FUNC_VISIBILITY Status DumpInfershapeJson(const ge::Graph &graph, const char *json_file); diff --git a/tests/ut/ge/session/omg_omg_unittest.cc b/tests/ut/ge/session/omg_omg_unittest.cc index 6176b7c0..334df319 100644 --- a/tests/ut/ge/session/omg_omg_unittest.cc +++ b/tests/ut/ge/session/omg_omg_unittest.cc @@ -48,13 +48,4 @@ TEST_F(UtestOmg, display_model_info_success) { attr_def->mutable_list()->add_i(4); PrintModelInfo(&model_def, 1); } - -TEST_F(UtestOmg, find_parser_so) { - string path = ""; - vector file_list = {}; - string caffe_parser_path = ""; - FindParserSo(path, file_list, caffe_parser_path); - path = "/lib64"; - FindParserSo(path, file_list, caffe_parser_path); -} } // namespace ge From 55a5e8019df6c0e53c474c27e91a045d5492002c Mon Sep 17 00:00:00 2001 From: lianghao Date: Tue, 22 Jun 2021 21:49:01 +0800 Subject: [PATCH 083/226] FillKernel --- ge/host_kernels/fill_kernel.cc | 8 +++++ .../folding_kernel/fill_kernel_unittest.cc | 36 +++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/ge/host_kernels/fill_kernel.cc b/ge/host_kernels/fill_kernel.cc index e41c5bf3..ac46101b 100644 --- a/ge/host_kernels/fill_kernel.cc +++ b/ge/host_kernels/fill_kernel.cc @@ -45,6 +45,7 @@ Status FillKernel::Compute(const ge::OpDescPtr op_desc_ptr, const std::vectorGetName().c_str()); GE_CHECK_NOTNULL(input.at(kFillDimsInputIndex)); GE_CHECK_NOTNULL(input.at(kFillDataInputIndex)); @@ -57,6 +58,13 @@ Status FillKernel::Compute(const ge::OpDescPtr op_desc_ptr, const std::vectorGetOutputDescPtr(0); + GE_CHECK_NOTNULL(output_desc); + if (output_desc->GetShape().IsUnknownShape()) { + GELOGD("Output is unknown shape, [%s] skip FillKernel.", op_desc_ptr->GetName().c_str()); + return NOT_CHANGED; + } + GeTensorPtr output_ptr; output_ptr = MakeShared(op_desc_ptr->GetOutputDesc(0)); if (output_ptr == nullptr) { diff --git a/tests/ut/ge/graph/passes/folding_kernel/fill_kernel_unittest.cc b/tests/ut/ge/graph/passes/folding_kernel/fill_kernel_unittest.cc index f58d6d9b..c0cce260 100644 --- a/tests/ut/ge/graph/passes/folding_kernel/fill_kernel_unittest.cc +++ b/tests/ut/ge/graph/passes/folding_kernel/fill_kernel_unittest.cc @@ -64,6 +64,7 @@ class UtestGraphPassesFoldingKernelFillKernel : public testing::Test { op_desc_ptr->AddInputDesc(dims_tensor_desc); op_desc_ptr->AddInputDesc(value_tensor_desc); + op_desc_ptr->AddOutputDesc(dims_tensor_desc); std::vector input = {dim_tensor, value_tensor}; std::vector outputs; @@ -124,6 +125,7 @@ TEST_F(UtestGraphPassesFoldingKernelFillKernel, FillBoolShape2And3) { op_desc_ptr->AddInputDesc(dims_tensor_desc); op_desc_ptr->AddInputDesc(value_tensor_desc); + op_desc_ptr->AddOutputDesc(dims_tensor_desc); std::vector input = {dim_tensor, value_tensor}; std::vector outputs; @@ -230,6 +232,7 @@ TEST_F(UtestGraphPassesFoldingKernelFillKernel, FillDimsHaveNegativeNumber) { op_desc_ptr->AddInputDesc(dims_tensor_desc); op_desc_ptr->AddInputDesc(value_tensor_desc); + op_desc_ptr->AddOutputDesc(dims_tensor_desc); std::vector input = {dim_tensor, value_tensor}; std::vector outputs; @@ -284,6 +287,7 @@ TEST_F(UtestGraphPassesFoldingKernelFillKernel, FillDimsTypeNotSupport) { op_desc_ptr->AddInputDesc(dims_tensor_desc); op_desc_ptr->AddInputDesc(value_tensor_desc); + op_desc_ptr->AddOutputDesc(dims_tensor_desc); std::vector input = {dim_tensor, value_tensor}; std::vector outputs; @@ -310,6 +314,7 @@ TEST_F(UtestGraphPassesFoldingKernelFillKernel, FillDimsOverflow) { op_desc_ptr->AddInputDesc(dims_tensor_desc); op_desc_ptr->AddInputDesc(value_tensor_desc); + op_desc_ptr->AddOutputDesc(dims_tensor_desc); std::vector input = {dim_tensor, value_tensor}; std::vector outputs; @@ -336,6 +341,7 @@ TEST_F(UtestGraphPassesFoldingKernelFillKernel, FillDimsMulDataTypeOverflow) { op_desc_ptr->AddInputDesc(dims_tensor_desc); op_desc_ptr->AddInputDesc(value_tensor_desc); + op_desc_ptr->AddOutputDesc(dims_tensor_desc); std::vector input = {dim_tensor, value_tensor}; std::vector outputs; @@ -343,3 +349,33 @@ TEST_F(UtestGraphPassesFoldingKernelFillKernel, FillDimsMulDataTypeOverflow) { EXPECT_EQ(PARAM_INVALID, status); } + +TEST_F(UtestGraphPassesFoldingKernelFillKernel, OutputdescUnknown) { + ge::OpDescPtr op_dims = std::make_shared(); + vector dims_vec = {2}; + vector dims_value_vec = {2, 3}; + GeTensorDesc dims_tensor_desc(GeShape(dims_vec), FORMAT_NCHW, DT_INT32); + GeTensorPtr dim_tensor = std::make_shared(dims_tensor_desc, (uint8_t *) dims_value_vec.data(), + dims_value_vec.size() * sizeof(int32_t)); + OpDescUtils::SetWeights(op_dims, dim_tensor); + + ge::OpDescPtr op_value = std::make_shared(); + vector data_vec = {1}; + GeTensorDesc value_tensor_desc(GeShape(), FORMAT_NCHW, DT_BOOL); + GeTensorPtr value_tensor = + std::make_shared(value_tensor_desc, (uint8_t *) data_vec.data(), data_vec.size() * sizeof(bool)); + OpDescUtils::SetWeights(op_value, value_tensor); + + op_desc_ptr->AddInputDesc(dims_tensor_desc); + op_desc_ptr->AddInputDesc(value_tensor_desc); + + vector out_vec = {-1, -1}; + GeTensorDesc out_tensor_desc(GeShape(out_vec), FORMAT_NCHW, DT_INT32); + op_desc_ptr->AddOutputDesc(out_tensor_desc); + + std::vector input = {dim_tensor, value_tensor}; + std::vector outputs; + Status status = kernel->Compute(op_desc_ptr, input, outputs); + + EXPECT_EQ(NOT_CHANGED, status); +} \ No newline at end of file From a55b872fc5de15af2c50e082a2f11b2d2d9fc38d Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Thu, 24 Jun 2021 09:32:14 +0800 Subject: [PATCH 084/226] UT for LaunchKernelCustAicpuSo --- ge/graph/load/model_manager/model_manager.cc | 4 +++- .../ut/ge/graph/load/model_manager_unittest.cc | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/ge/graph/load/model_manager/model_manager.cc b/ge/graph/load/model_manager/model_manager.cc index 8bcaa23f..3c31014d 100755 --- a/ge/graph/load/model_manager/model_manager.cc +++ b/ge/graph/load/model_manager/model_manager.cc @@ -1378,7 +1378,9 @@ Status ModelManager::LoadCustAicpuSo(const OpDescPtr &op_desc, const string &so_ Status ModelManager::LaunchKernelCustAicpuSo(const string &kernel_name) { GELOGD("Aicpu kernel launch task in, kernel name %s.", kernel_name.c_str()); std::lock_guard lock(cust_aicpu_mutex_); - if (cust_aicpu_so_.size() == 0) return SUCCESS; + if (cust_aicpu_so_.empty()) { + return SUCCESS; + } // get current context rtContext_t rt_cur_ctx = nullptr; auto rt_error = rtCtxGetCurrent(&rt_cur_ctx); diff --git a/tests/ut/ge/graph/load/model_manager_unittest.cc b/tests/ut/ge/graph/load/model_manager_unittest.cc index a3545b33..d9e4eabd 100644 --- a/tests/ut/ge/graph/load/model_manager_unittest.cc +++ b/tests/ut/ge/graph/load/model_manager_unittest.cc @@ -438,4 +438,22 @@ TEST_F(UtestModelManagerModelManager, test_data_input_tensor) { auto ret = mm.DataInputTensor(model_id,inputs); EXPECT_EQ(PARAM_INVALID, ret); // HybridDavinciModel::impl_ is null. } + +TEST_F(UtestModelManagerModelManager, test_launch_kernel_cust_aicpu) { + ModelManager mm; + + // cust_aicpu_so_ is empty. + EXPECT_EQ(mm.LaunchKernelCustAicpuSo("empty_cust_aicpu"), SUCCESS); + + // deleteCustOp after Launch will deleted. + uintptr_t resource_id = 1; // for rtCtxGetCurrent stub + std::vector kernel_bin(256); + auto &cust_resource_001 = mm.cust_aicpu_so_[resource_id]; + auto tbe_kernel = std::shared_ptr(new OpKernelBin("deleteCustOp", std::move(kernel_bin))); + auto &cust_opkernel_001 = cust_resource_001["deleteCustOp"] = tbe_kernel; + + EXPECT_FALSE(mm.cust_aicpu_so_.empty()); + EXPECT_EQ(mm.LaunchKernelCustAicpuSo("deleteCustOp"), SUCCESS); + EXPECT_TRUE(mm.cust_aicpu_so_.empty()); +} } // namespace ge From 4fd937eb056b6a82094428d0b31a4feb3cb1f19d Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Thu, 24 Jun 2021 15:49:11 +0800 Subject: [PATCH 085/226] Replace MemcpyAsyncNodeTask --- .../node_executor/rts/rts_node_executor.cc | 1 + ge/hybrid/node_executor/rts/rts_node_task.cc | 29 ------------------- ge/hybrid/node_executor/rts/rts_node_task.h | 5 ---- 3 files changed, 1 insertion(+), 34 deletions(-) diff --git a/ge/hybrid/node_executor/rts/rts_node_executor.cc b/ge/hybrid/node_executor/rts/rts_node_executor.cc index d52f56b9..e3058ee3 100644 --- a/ge/hybrid/node_executor/rts/rts_node_executor.cc +++ b/ge/hybrid/node_executor/rts/rts_node_executor.cc @@ -29,6 +29,7 @@ REGISTER_RTS_TASK_CREATOR(IDENTITY, IdentityNodeTask); REGISTER_RTS_TASK_CREATOR(IDENTITYN, IdentityNNodeTask); REGISTER_RTS_TASK_CREATOR(READVARIABLEOP, ReadVariableOpNodeTask); REGISTER_RTS_TASK_CREATOR(PROFILINGTRAININGTRACE, ProfilingTraceNodeTask); +REGISTER_RTS_TASK_CREATOR(MEMCPYASYNC, IdentityNodeTask); Status IdentityNodeTask::DoCopyTensor(TaskContext &context, int index) { auto input_desc = context.MutableInputDesc(index); diff --git a/ge/hybrid/node_executor/rts/rts_node_task.cc b/ge/hybrid/node_executor/rts/rts_node_task.cc index 9af54815..7b95f98a 100644 --- a/ge/hybrid/node_executor/rts/rts_node_task.cc +++ b/ge/hybrid/node_executor/rts/rts_node_task.cc @@ -43,7 +43,6 @@ namespace hybrid { REGISTER_RTS_TASK_CREATOR(STREAMACTIVE, StreamActiveNodeTask); REGISTER_RTS_TASK_CREATOR(STREAMSWITCH, StreamSwitchNodeTask); REGISTER_RTS_TASK_CREATOR(STREAMMERGE, StreamMergeNodeTask); -REGISTER_RTS_TASK_CREATOR(MEMCPYASYNC, MemcpyAsyncNodeTask); REGISTER_RTS_TASK_CREATOR(ENTER, PassThroughNodeTask); REGISTER_RTS_TASK_CREATOR(REFENTER, PassThroughNodeTask); @@ -168,34 +167,6 @@ Status StreamMergeNodeTask::ExecuteAsync(TaskContext &task_context, std::functio return SUCCESS; } -Status MemcpyAsyncNodeTask::ExecuteAsync(TaskContext &task_context, std::function done_callback) { - GELOGD("[%s] Start to execute.", task_context.GetNodeName()); - auto input_desc = task_context.MutableInputDesc(0); - GE_CHECK_NOTNULL(input_desc); - int64_t copy_size = 0; - GE_CHK_GRAPH_STATUS_RET(TensorUtils::GetTensorSizeInBytes(*input_desc, copy_size)); - // copy_size would not be negative since GetTensorSizeInBytes returned successfully. - if (copy_size > 0) { - const auto in_v = task_context.MutableInput(0); - const auto out_v = task_context.MutableOutput(0); - GE_CHECK_NOTNULL(in_v); - GE_CHECK_NOTNULL(out_v); - GELOGD("[%s] input size: %zu, output size: %zu, copy size: %ld", task_context.GetNodeName(), - in_v->GetSize(), out_v->GetSize(), copy_size); - GE_CHK_RT_RET(rtMemcpyAsync(out_v->MutableData(), out_v->GetSize(), in_v->GetData(), copy_size, - RT_MEMCPY_DEVICE_TO_DEVICE, task_context.GetStream())); - } else { - GELOGW("[%s] invalid copy size: %ld", task_context.GetNodeName(), copy_size); - } - - if (done_callback) { - GE_CHK_STATUS_RET(task_context.RegisterCallback(done_callback)); - } - - GELOGD("[%s] Done executing successfully.", task_context.GetNodeName()); - return SUCCESS; -} - Status PassThroughNodeTask::ExecuteAsync(TaskContext &task_context, std::function done_callback) { GELOGD("[%s] Start to execute.", task_context.GetNodeName()); const auto in_x = task_context.GetInput(0); // x diff --git a/ge/hybrid/node_executor/rts/rts_node_task.h b/ge/hybrid/node_executor/rts/rts_node_task.h index d7d63eb5..e18f9a8f 100644 --- a/ge/hybrid/node_executor/rts/rts_node_task.h +++ b/ge/hybrid/node_executor/rts/rts_node_task.h @@ -60,11 +60,6 @@ class StreamMergeNodeTask : public RtsNodeTask { Status ExecuteAsync(TaskContext &task_context, std::function done_callback) override; }; -class MemcpyAsyncNodeTask : public RtsNodeTask { - public: - Status ExecuteAsync(TaskContext &task_context, std::function done_callback) override; -}; - class PassThroughNodeTask : public RtsNodeTask { public: Status ExecuteAsync(TaskContext &task_context, std::function done_callback) override; From 27a9d527f9649d4e46c01052ab82b30533dca798 Mon Sep 17 00:00:00 2001 From: zhupuxu Date: Wed, 9 Jun 2021 14:13:41 +0800 Subject: [PATCH 086/226] step info Signed-off-by: zhupuxu --- ge/common/profiling/ge_profiling.cc | 39 ++++++++++++++++++- inc/framework/common/profiling/ge_profiling.h | 5 +++ tests/depends/profiler/src/profiler_stub.cc | 8 ++++ tests/ut/ge/CMakeLists.txt | 1 + .../ge_profiling_manager_unittest.cc | 19 ++++++++- 5 files changed, 69 insertions(+), 3 deletions(-) diff --git a/ge/common/profiling/ge_profiling.cc b/ge/common/profiling/ge_profiling.cc index d0343326..48d12609 100644 --- a/ge/common/profiling/ge_profiling.cc +++ b/ge/common/profiling/ge_profiling.cc @@ -22,6 +22,7 @@ #include "graph/load/graph_loader.h" #include "init/gelib.h" #include "framework/common/ge_inner_error_codes.h" +#include "model/ge_model.h" namespace { const uint32_t kDeviceListIndex = 3; @@ -42,6 +43,10 @@ const std::map kProfCommandTypeMap = { {kProfCommandhandleFinalize, kProfilingFinalize}, {kProfCommandhandleModelSubscribe, kProfModelSubscribe}, {kProfCommandhandleModelUnsubscribe, kProfModelUnsubscribe}}; + +const uint64_t kModelId = ge::INVALID_MODEL_ID; +const uint16_t kStepStart = 0; +const uint16_t kStepEnd = 1; } // namespace bool TransProfConfigToParam(const ProfCommandHandleData &profCommand, vector &prof_config_params) { @@ -216,6 +221,36 @@ ge::Status ProfCommandHandle(ProfCommandHandleType type, void *data, uint32_t le return ge::SUCCESS; } -GE_FUNC_VISIBILITY ge::Status ProfSetStepInfo(uint64_t index_id, uint16_t tag_id, rtStream_t stream) { - return ge::SUCCESS; +ge::Status ProfSetStepInfo(uint64_t index_id, uint16_t tag_id, rtStream_t stream) { + static bool is_first_run = true; + int32_t device_id = 0; + rtError_t rt_ret = rtGetDevice(&device_id); + if (rt_ret != RT_ERROR_NONE) { + GELOGE(rt_ret, "[Get][LogicDeviceId]Failed, ret 0x%X", rt_ret); + REPORT_CALL_ERROR("E19999", "Get logic device id failed, ret 0x%X", rt_ret); + return ge::FAILED; + } + if (is_first_run && tag_id == kStepStart) { + GE_CHK_STATUS_RET_NOLOG(ge::ProfilingManager::Instance().ProfileStepInfo(index_id, + kModelId, + tag_id, + stream, + device_id)); + is_first_run = false; + return ge::SUCCESS; + } + if (!is_first_run && tag_id == kStepEnd) { + GE_CHK_STATUS_RET_NOLOG(ge::ProfilingManager::Instance().ProfileStepInfo(index_id, + kModelId, + tag_id, + stream, + device_id)); + is_first_run = true; + return ge::SUCCESS; + } + GELOGE(ge::FAILED, "Param tag_id:%u invalid when is_first_run is %d", tag_id, is_first_run); + REPORT_INPUT_ERROR("E10001", std::vector({"value", "parameter", "reason"}), + std::vector({std::to_string(tag_id), "tag_id", + "tag id must be 0 when first run, must be 1 when second run"})); + return ge::FAILED; } diff --git a/inc/framework/common/profiling/ge_profiling.h b/inc/framework/common/profiling/ge_profiling.h index a8de56a8..7a238b2f 100644 --- a/inc/framework/common/profiling/ge_profiling.h +++ b/inc/framework/common/profiling/ge_profiling.h @@ -43,6 +43,11 @@ GE_FUNC_VISIBILITY ge::Status RegProfCtrlCallback(MsprofCtrlCallback func); GE_FUNC_VISIBILITY ge::Status RegProfSetDeviceCallback(MsprofSetDeviceCallback func); GE_FUNC_VISIBILITY ge::Status RegProfReporterCallback(MsprofReporterCallback func); GE_FUNC_VISIBILITY ge::Status ProfCommandHandle(ProfCommandHandleType type, void *data, uint32_t len); + +/// +/// @brief Output the profiling data of single operator in Pytorch, and does not support multithreading +/// @return Status result +/// GE_FUNC_VISIBILITY ge::Status ProfSetStepInfo(uint64_t index_id, uint16_t tag_id, rtStream_t stream); #endif // INC_FRAMEWORK_COMMON_GE_PROFILING_H_ diff --git a/tests/depends/profiler/src/profiler_stub.cc b/tests/depends/profiler/src/profiler_stub.cc index 1ed49fd8..0b8eaa88 100644 --- a/tests/depends/profiler/src/profiler_stub.cc +++ b/tests/depends/profiler/src/profiler_stub.cc @@ -16,6 +16,7 @@ #include "toolchain/prof_engine.h" #include "toolchain/prof_mgr_core.h" +#include "runtime/base.h" void * ProfMgrStartUp(const ProfMgrCfg *cfg) { @@ -32,3 +33,10 @@ int Msprof::Engine::RegisterEngine(const std::string& module, const Msprof::Engi return 0; } +rtError_t rtSetMsprofReporterCallback(MsprofReporterCallback callback) { + return 0; +} + +rtError_t rtRegDeviceStateCallback(const char *regName, rtDeviceStateCallback callback) { + return 0; +} diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index 3ea4d1a7..25fe4947 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -158,6 +158,7 @@ set(COMMON_SRC_FILES "${GE_CODE_DIR}/ge/opskernel_manager/ops_kernel_builder_manager.cc" "${GE_CODE_DIR}/ge/graph/load/model_manager/model_manager.cc" "${GE_CODE_DIR}/ge/common/profiling/profiling_manager.cc" + "${GE_CODE_DIR}/ge/common/profiling/ge_profiling.cc" "${GE_CODE_DIR}/ge/graph/manager/host_mem_manager.cc" "${GE_CODE_DIR}/ge/graph/manager/memory_api.cc" "${GE_CODE_DIR}/ge/session/inner_session.cc" diff --git a/tests/ut/ge/profiling/ge_profiling_manager_unittest.cc b/tests/ut/ge/profiling/ge_profiling_manager_unittest.cc index 9c615317..aae3f535 100644 --- a/tests/ut/ge/profiling/ge_profiling_manager_unittest.cc +++ b/tests/ut/ge/profiling/ge_profiling_manager_unittest.cc @@ -25,6 +25,7 @@ #define private public #include "common/profiling/profiling_manager.h" #include "graph/ge_local_context.h" +#include "inc/framework/common/profiling/ge_profiling.h" #undef protected #undef private @@ -115,4 +116,20 @@ TEST_F(UtestGeProfilinganager, get_fp_bp_point_empty) { ProfilingManager::Instance().GetFpBpPoint(fp_point, bp_point); EXPECT_EQ(fp_point, ""); EXPECT_EQ(bp_point, ""); -} \ No newline at end of file +} + +TEST_F(UtestGeProfilinganager, set_step_info_success) { + uint64_t index_id = 0; + auto stream = (rtStream_t)0x1; + Status ret = ProfSetStepInfo(index_id, 0, stream); + EXPECT_EQ(ret, ge::SUCCESS); + ret = ProfSetStepInfo(index_id, 1, stream); + EXPECT_EQ(ret, ge::SUCCESS); +} + +TEST_F(UtestGeProfilinganager, set_step_info_failed) { + uint64_t index_id = 0; + auto stream = (rtStream_t)0x1; + Status ret = ProfSetStepInfo(index_id, 1, stream); + EXPECT_EQ(ret, ge::FAILED); +} From 75429f81a033a9de1b975936948aa6310b68523c Mon Sep 17 00:00:00 2001 From: wqtshg Date: Fri, 25 Jun 2021 10:22:30 +0800 Subject: [PATCH 087/226] update submodule --- metadef | 2 +- parser | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/metadef b/metadef index 9e4a51a9..f3f137de 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit 9e4a51a9602195b82e326b853f5adbfefc3972b6 +Subproject commit f3f137de034885f0c7394d7f04b41b08d450d2d2 diff --git a/parser b/parser index 79536a19..15a27afe 160000 --- a/parser +++ b/parser @@ -1 +1 @@ -Subproject commit 79536a196f89cf7a1f5852ff7304b9a7d7b12eff +Subproject commit 15a27afefe45f2abdb78787d629163aab9437599 From 6ab6ee4c72aae0b3cc3752a9edb9c62ce8a5bb2f Mon Sep 17 00:00:00 2001 From: zhou_chao1993 Date: Fri, 25 Jun 2021 11:15:15 +0800 Subject: [PATCH 088/226] add ptr checker --- ge/hybrid/common/tensor_value.h | 3 ++- ge/hybrid/executor/hybrid_model_async_executor.cc | 12 ++++++++---- ge/hybrid/executor/hybrid_model_async_executor.h | 4 ++-- .../executor/hybrid_model_async_executor_unittest.cc | 2 +- 4 files changed, 13 insertions(+), 8 deletions(-) diff --git a/ge/hybrid/common/tensor_value.h b/ge/hybrid/common/tensor_value.h index c20074fd..c041263b 100644 --- a/ge/hybrid/common/tensor_value.h +++ b/ge/hybrid/common/tensor_value.h @@ -95,7 +95,8 @@ class TensorValue { name_ = name; } - MemStorageType GetMemType() const { + Status GetMemType(MemStorageType &mem_type) const { + GE_CHECK_NOTNULL(buffer_); return buffer_->GetMemType(); } diff --git a/ge/hybrid/executor/hybrid_model_async_executor.cc b/ge/hybrid/executor/hybrid_model_async_executor.cc index a6f31522..e0dd768d 100644 --- a/ge/hybrid/executor/hybrid_model_async_executor.cc +++ b/ge/hybrid/executor/hybrid_model_async_executor.cc @@ -458,7 +458,8 @@ Status HybridModelAsyncExecutor::CopyOutputs(HybridModelExecutor::ExecuteArgs &a auto tensor = TensorAdapter::AsTensor(ge_tensor); outputs.emplace_back(std::move(tensor)); } else { - BuildDeviceTensor(output_tensor, ge_tensor_desc, output_size, outputs); + GE_CHK_STATUS_RET(BuildDeviceTensor(output_tensor, ge_tensor_desc, output_size, outputs), + "Build device tensor failed"); output_data->blobs.emplace_back(output_tensor.Release(), static_cast(output_size), false, static_cast(kPlacementDevice)); } @@ -478,13 +479,15 @@ Status HybridModelAsyncExecutor::CopyOutputs(HybridModelExecutor::ExecuteArgs &a return SUCCESS; } -void HybridModelAsyncExecutor::BuildDeviceTensor(TensorValue &output_tensor, GeTensorDesc &ge_tensor_desc, - int64_t output_size, std::vector &outputs) { +Status HybridModelAsyncExecutor::BuildDeviceTensor(TensorValue &output_tensor, GeTensorDesc &ge_tensor_desc, + int64_t output_size, std::vector &outputs) { GELOGD("Start to build device tensor"); - auto mem_type = output_tensor.GetMemType(); + MemStorageType mem_type = HBM; + GE_CHK_STATUS_RET(output_tensor.GetMemType(mem_type), "[Build][DeviceTensor] Get mem type failed"); GELOGD("Mem type is %d", static_cast(mem_type)); auto deleter = [=](uint8_t *device_data) { if (device_data != nullptr) { + GELOGD("Free device addr is %p", device_data); if (mem_type == RDMA_HBM) { MemManager::Instance().RdmaPoolInstance(RT_MEMORY_HBM).Free(device_data, device_id_); } else if (mem_type == HOST_DDR) { @@ -499,6 +502,7 @@ void HybridModelAsyncExecutor::BuildDeviceTensor(TensorValue &output_tensor, GeT auto tensor = TensorAdapter::AsTensor(ge_tensor); tensor.SetData(reinterpret_cast(output_tensor.Release()), static_cast(output_size), deleter); outputs.emplace_back(std::move(tensor)); + return SUCCESS; } Status HybridModelAsyncExecutor::Execute(const std::vector &inputs, diff --git a/ge/hybrid/executor/hybrid_model_async_executor.h b/ge/hybrid/executor/hybrid_model_async_executor.h index 5ae1a222..f94f6aa5 100644 --- a/ge/hybrid/executor/hybrid_model_async_executor.h +++ b/ge/hybrid/executor/hybrid_model_async_executor.h @@ -76,8 +76,8 @@ class HybridModelAsyncExecutor { OutputData *output_data); Status CopyOutputs(HybridModelExecutor::ExecuteArgs &args, OutputData *output_data, std::vector &outputs); - void BuildDeviceTensor(TensorValue &output_tensor, GeTensorDesc &ge_tensor_desc, int64_t output_size, - std::vector &outputs); + Status BuildDeviceTensor(TensorValue &output_tensor, GeTensorDesc &ge_tensor_desc, int64_t output_size, + std::vector &outputs); Status OnComputeDone(uint32_t data_index, uint32_t result_code, std::vector &outputs); diff --git a/tests/ut/ge/hybrid/executor/hybrid_model_async_executor_unittest.cc b/tests/ut/ge/hybrid/executor/hybrid_model_async_executor_unittest.cc index 98bb78f2..f772af23 100644 --- a/tests/ut/ge/hybrid/executor/hybrid_model_async_executor_unittest.cc +++ b/tests/ut/ge/hybrid/executor/hybrid_model_async_executor_unittest.cc @@ -82,7 +82,7 @@ TEST_F(UtestHybridModelAsyncExecutor, BuildDeviceTensor) { GeTensorDesc ge_tensor_desc; int64_t output_size = 100; std::vector outputs; - executor.BuildDeviceTensor(tensor, ge_tensor_desc, output_size, outputs); + auto ret = executor.BuildDeviceTensor(tensor, ge_tensor_desc, output_size, outputs); auto size = tensor.GetSize(); ASSERT_EQ(size, 100); } From 01e49940a6393583c274271fdba7119f689a9d6c Mon Sep 17 00:00:00 2001 From: wuweikang Date: Thu, 13 May 2021 16:14:41 +0800 Subject: [PATCH 089/226] add copy graph --- ge/graph/manager/graph_manager.cc | 2 +- ge/hybrid/model/hybrid_model.h | 1 + ge/hybrid/model/hybrid_model_builder.cc | 47 +++++++++++++++---- ge/hybrid/model/hybrid_model_builder.h | 1 + ge/model/ge_root_model.h | 5 ++ .../executor/subgraph_executor_unittest.cc | 3 ++ .../model/hybrid_model_builder_unittest.cc | 26 ++++++++-- 7 files changed, 70 insertions(+), 15 deletions(-) diff --git a/ge/graph/manager/graph_manager.cc b/ge/graph/manager/graph_manager.cc index 0a4633ad..0b27fdf3 100755 --- a/ge/graph/manager/graph_manager.cc +++ b/ge/graph/manager/graph_manager.cc @@ -3139,10 +3139,10 @@ void GraphManager::PreRunThread(GraphManager *graph_manager) { } // Avoid repeatively prerun for graphs owns same graph_id in online inference concurrency if (count > 1 && graph_node->GetBuildFlag()) { - graph_node->Lock(); GELOGD("Avoid repeatively prerun, graph_id:%u.", args.graph_id); // In online inference concurrency senario, graph_node is allowed to be locked for 'count' times graph_node->SetSemSize(count); + graph_node->Lock(); graph_manager->run_args_q_.Push(RunArgs( { graph_node, args.graph_id, args.session_id, args.error_context, args.input_tensor, graph_node->GetGeRootModel(), GetThreadLocalContext(), args.callback })); GELOGI("[PreRunThread] Loop end. Start to run with cached build model."); diff --git a/ge/hybrid/model/hybrid_model.h b/ge/hybrid/model/hybrid_model.h index 9821242a..77246e20 100644 --- a/ge/hybrid/model/hybrid_model.h +++ b/ge/hybrid/model/hybrid_model.h @@ -147,6 +147,7 @@ class HybridModel { GeRootModelPtr ge_root_model_; std::map input_nodes_; ComputeGraphPtr root_graph_; + ComputeGraphPtr orig_root_graph_; std::map device_variable_nodes_; //lint !e148 std::map host_variable_nodes_; //lint !e148 std::map> variable_tensors_; diff --git a/ge/hybrid/model/hybrid_model_builder.cc b/ge/hybrid/model/hybrid_model_builder.cc index bb405605..351f8a02 100755 --- a/ge/hybrid/model/hybrid_model_builder.cc +++ b/ge/hybrid/model/hybrid_model_builder.cc @@ -147,6 +147,7 @@ Status HybridModelBuilder::Build() { GE_CHK_STATUS_RET(ValidateParams(), "[Invoke][ValidateParams] failed, model_name_:[%s]", GetGraphName()); hybrid_model_.model_name_ = ge_root_model_->GetModelName(); GELOGI("[%s] Start to build hybrid model.", GetGraphName()); + GE_CHK_STATUS_RET(CopyGraph(), "[Invoke][CopyGraph] failed, model_name_:[%s]", GetGraphName()); GE_CHK_STATUS_RET(InitRuntimeParams(), "[Invoke][InitRuntimeParams] failed, model_name_:[%s]", GetGraphName()); GE_CHK_STATUS_RET(RecoverGraphUnknownFlag(), "[Invoke][RecoverGraphUnknownFlag] failed, model_name_:[%s]", GetGraphName()); @@ -171,11 +172,12 @@ Status HybridModelBuilder::Build() { Status HybridModelBuilder::BuildForSingleOp() { GE_CHK_STATUS_RET(ValidateParams(), "[Invoke][ValidateParams] failed, model_name_:[%s]", GetGraphName()); + hybrid_model_.root_graph_ = ge_root_model_->GetRootGraph(); hybrid_model_.model_name_ = ge_root_model_->GetRootGraph()->GetName(); GELOGI("[%s] Start to build hybrid model.", GetGraphName()); auto ret = ge_root_model_->GetSubgraphInstanceNameToModel(); - const GeModelPtr ge_model = ret[ge_root_model_->GetRootGraph()->GetName()]; - GE_CHK_STATUS_RET(IndexTaskDefs(ge_root_model_->GetRootGraph(), ge_model), + const GeModelPtr ge_model = ret[hybrid_model_.root_graph_->GetName()]; + GE_CHK_STATUS_RET(IndexTaskDefs(hybrid_model_.root_graph_, ge_model), "[Invoke][IndexTaskDefs] failed, model_name_:[%s]", GetGraphName()); GE_CHK_STATUS_RET(LoadGraph(), "[Invoke][LoadGraph] failed, model_name_:[%s]", GetGraphName()); GE_CHK_STATUS_RET(InitWeights(), "[Invoke][InitWeights] failed, model_name_:[%s]", GetGraphName()); @@ -190,6 +192,29 @@ Status HybridModelBuilder::ValidateParams() { return SUCCESS; } +Status HybridModelBuilder::CopyGraph() { + GELOGD("Copy compute graph begin."); + auto root_graph = ge_root_model_->GetRootGraph(); + + ge_root_model_->IncreaseBuildTimes(); + std::string new_graph_name = ge_root_model_->GetRootGraph()->GetName() + "_" + + std::to_string(ge_root_model_->GetBuildTimes()); + ComputeGraphPtr new_root_graph = MakeShared(new_graph_name); + GE_CHECK_NOTNULL(new_root_graph); + int32_t depth = 0; + std::map node_old_2_new; + std::map op_desc_old_2_new; + graphStatus ret = GraphUtils::CopyComputeGraph(root_graph, new_root_graph, node_old_2_new, op_desc_old_2_new, depth); + if (ret != GRAPH_SUCCESS) { + GELOGE(GRAPH_FAILED, "Copy compute graph failed."); + return GRAPH_FAILED; + } + hybrid_model_.root_graph_ = new_root_graph; + + GELOGD("Copy compute graph[%s] success.", new_graph_name.c_str()); + return SUCCESS; +} + Status HybridModelBuilder::BuildNodeItem(const NodePtr &node, NodeItem &node_item) { auto op_desc = node->GetOpDesc(); GE_CHK_STATUS_RET(ParseForceInfershapeNodes(node, node_item), @@ -814,12 +839,13 @@ Status HybridModelBuilder::BuildOutputMapping(GraphItem &graph_item, } Status HybridModelBuilder::LoadGraph() { - auto root_graph = ge_root_model_->GetRootGraph(); + auto root_graph = hybrid_model_.root_graph_; if (!GetContext().GetHostExecFlag()) { std::shared_ptr merged_graph; GELOGI("Before merging subgraphs DirectNodesSize = %zu, GetAllNodesSize = %zu", root_graph->GetDirectNodesSize(), root_graph->GetAllNodesSize()); + hybrid_model_.orig_root_graph_ = root_graph; GE_CHK_GRAPH_STATUS_RET(UnfoldSubgraphs(root_graph, merged_graph), "[Invoke][UnfoldSubgraphs]Failed to unfold subgraphs, model_name_:%s.", GetGraphName()); root_graph = std::move(merged_graph); @@ -877,6 +903,7 @@ Status HybridModelBuilder::LoadGraph() { } for (auto &it : hybrid_model_.known_shape_sub_models_) { auto node_item = MutableNodeItem(it.first); + GE_CHECK_NOTNULL(node_item); AscendString graph_name; GE_CHK_GRAPH_STATUS_RET(it.second->GetGraph().GetName(graph_name), "Failed to get subgraph name"); auto subgraph = hybrid_model_.GetRootGraph()->GetSubgraph(graph_name.GetString()); @@ -1125,7 +1152,9 @@ Status HybridModelBuilder::InitWeights() { sub_weight_buffer->GetSize()); auto subgraph = GraphUtils::GetComputeGraph(subgraph_model.second->GetGraph()); if (subgraph != ge_root_model_->GetRootGraph()) { - subgraph = ge_root_model_->GetRootGraph()->GetSubgraph(subgraph_model.first); + subgraph = hybrid_model_.root_graph_->GetSubgraph(subgraph_model.first); + } else { + subgraph = hybrid_model_.root_graph_; } GE_CHECK_NOTNULL(subgraph); hybrid_model_.weight_buffer_map_.emplace(subgraph->GetName(), std::move(sub_weight_buffer)); @@ -1304,7 +1333,7 @@ Status HybridModelBuilder::IndexTaskDefs(const ComputeGraphPtr &sub_graph, const } Status HybridModelBuilder::IndexTaskDefs() { - const auto root_graph = ge_root_model_->GetRootGraph(); + const auto &root_graph = hybrid_model_.root_graph_; const auto &root_graph_name = root_graph->GetName(); if (SetOutputNameAttr(*root_graph) != SUCCESS) { GELOGW("Set output name attr failed."); @@ -1338,7 +1367,7 @@ Status HybridModelBuilder::IndexTaskDefs() { Status HybridModelBuilder::IndexSpecialNodes() { GELOGD("Start to index special nodes"); - const auto &root_graph = ge_root_model_->GetRootGraph(); + const auto &root_graph = hybrid_model_.root_graph_; for (auto &node : root_graph->GetAllNodes()) { GE_CHECK_NOTNULL(node); GE_CHECK_NOTNULL(node->GetOpDesc()); @@ -1493,7 +1522,7 @@ Status HybridModelBuilder::InitRuntimeParams() { runtime_param_.session_id = ret ? static_cast(value) : 0; ret = ge::AttrUtils::GetInt(first_model, ATTR_MODEL_TASK_GEN_VAR_ADDR, value); runtime_param_.logic_var_base = ret ? static_cast(value) : 0; - runtime_param_.graph_id = ge_root_model_->GetRootGraph()->GetGraphID(); + runtime_param_.graph_id = hybrid_model_.root_graph_->GetGraphID(); value = 0; for (auto &it : ge_root_model_->GetSubgraphInstanceNameToModel()) { (void) ge::AttrUtils::GetInt(it.second, ATTR_MODEL_VAR_SIZE, value); @@ -1630,7 +1659,7 @@ Status HybridModelBuilder::TransAllVarData() { } Status HybridModelBuilder::CopyVarData() { - GE_CHK_STATUS_RET(TransVarDataUtils::CopyVarData(ge_root_model_->GetRootGraph(), + GE_CHK_STATUS_RET(TransVarDataUtils::CopyVarData(hybrid_model_.root_graph_, runtime_param_.session_id, hybrid_model_.device_id_), "[Invoke][CopyVarData] failed."); @@ -1713,7 +1742,7 @@ Status HybridModelBuilder::LoadKnownShapedSubgraph(ComputeGraph &graph, NodeItem } Status HybridModelBuilder::RecoverGraphUnknownFlag() { - const auto &root_graph = ge_root_model_->GetRootGraph(); + const auto &root_graph = hybrid_model_.root_graph_; for (auto &sub_graph : root_graph->GetAllSubgraphs()) { GE_CHECK_NOTNULL(sub_graph); for (const auto &node : sub_graph->GetDirectNode()) { diff --git a/ge/hybrid/model/hybrid_model_builder.h b/ge/hybrid/model/hybrid_model_builder.h index 9c1eb187..05830e82 100644 --- a/ge/hybrid/model/hybrid_model_builder.h +++ b/ge/hybrid/model/hybrid_model_builder.h @@ -56,6 +56,7 @@ class HybridModelBuilder { Status BuildOutputMapping(GraphItem &partitioned_call, const NodeItem &node_item, bool is_root_graph); Status ValidateParams(); Status LoadGraph(); + Status CopyGraph(); Status LoadGeModel(ComputeGraph &graph, const GeModelPtr &ge_model); static Status InitHcclExecutorOnDemand(const GeModelPtr &ge_model); Status LoadTask(NodeItem &node_item); diff --git a/ge/model/ge_root_model.h b/ge/model/ge_root_model.h index 9e8e116e..b6e3d081 100755 --- a/ge/model/ge_root_model.h +++ b/ge/model/ge_root_model.h @@ -60,6 +60,10 @@ class GeRootModel { bool GetTrainFlag() const { return train_flag_; } + int32_t GetBuildTimes() const { return hybrid_build_times_; } + + void IncreaseBuildTimes() { hybrid_build_times_++; } + private: ComputeGraphPtr root_graph_ = nullptr; std::map subgraph_instance_name_to_model_; @@ -69,6 +73,7 @@ class GeRootModel { bool train_flag_ = false; std::string model_name_; bool is_specific_stream_ = false; + int32_t hybrid_build_times_ = 0; }; } // namespace ge using GeRootModelPtr = std::shared_ptr; diff --git a/tests/ut/ge/hybrid/executor/subgraph_executor_unittest.cc b/tests/ut/ge/hybrid/executor/subgraph_executor_unittest.cc index 2dc3b639..827705ae 100644 --- a/tests/ut/ge/hybrid/executor/subgraph_executor_unittest.cc +++ b/tests/ut/ge/hybrid/executor/subgraph_executor_unittest.cc @@ -249,6 +249,9 @@ TEST_F(UtestSubgraphExecutor, cond_graph_schedule_tasks) { graph_context.callback_manager = std::unique_ptr(new CallbackManager()); ASSERT_EQ(graph_context.callback_manager->Init(), SUCCESS); + auto root_graph = hybrid_model.root_graph_; + switch_t = root_graph->FindNode("switch_t"); + switch_f = root_graph->FindNode("switch_f"); const auto node_it_t = hybrid_model.node_items_.find(switch_t); const auto node_it_f = hybrid_model.node_items_.find(switch_f); ASSERT_NE(hybrid_model.node_items_.end(), node_it_t); diff --git a/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc b/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc index 5567aca2..10f7c0fe 100644 --- a/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc +++ b/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc @@ -214,11 +214,17 @@ TEST_F(UtestHybridModelBuilder, normal_hybrid_model_build) { ASSERT_EQ(it->second->frame_index_, index); ASSERT_EQ(it->second->parent_frame_, -1); }; - TestFrameGroup(enter1, control_group_index); - TestFrameGroup(active1, control_group_index); - TestFrameGroup(active2, control_group_index); - TestFrameGroup(active3, control_group_index); - TestFrameGroup(output1, -1); + auto root_graph = hybrid_model.root_graph_; + auto enter1_node = root_graph->FindNode("enter"); + auto active1_node = root_graph->FindNode("active1"); + auto active2_node = root_graph->FindNode("active2"); + auto active3_node = root_graph->FindNode("active3"); + auto output1_node = root_graph->FindNode("net_output"); + TestFrameGroup(enter1_node, control_group_index); + TestFrameGroup(active1_node, control_group_index); + TestFrameGroup(active2_node, control_group_index); + TestFrameGroup(active3_node, control_group_index); + TestFrameGroup(output1_node, -1); engine_mapping.clear(); task_executor.clear(); @@ -373,4 +379,14 @@ TEST_F(UtestHybridModelBuilder, TestInitHcclExecutorOnDemand) { NodeExecutorManager::GetInstance().builders_.erase(NodeExecutorManager::ExecutorType::HCCL); ASSERT_EQ(HybridModelBuilder::InitHcclExecutorOnDemand(ge_model), SUCCESS); } + +TEST_F(UtestHybridModelBuilder, copy_graph_success) { +ComputeGraphPtr graph = std::make_shared("test"); +GeRootModelPtr ge_root_model = make_shared(graph); +HybridModel hybrid_model(ge_root_model); +HybridModelBuilder hybrid_model_builder(hybrid_model); + +Status st = hybrid_model_builder.CopyGraph(); +EXPECT_EQ(st, SUCCESS); +} } // namespace ge From ae488883304a78b6dc802a0f090a9d22ffa96af7 Mon Sep 17 00:00:00 2001 From: liudingyan Date: Thu, 24 Jun 2021 21:31:30 +0800 Subject: [PATCH 090/226] modify ge_log_error --- ge/executor/ge_executor.cc | 276 +++++++++++++++++++--------- ge/graph/build/label_allocator.cc | 3 +- inc/framework/common/debug/ge_log.h | 7 +- 3 files changed, 194 insertions(+), 92 deletions(-) diff --git a/ge/executor/ge_executor.cc b/ge/executor/ge_executor.cc index 486764bd..73cd7bb5 100755 --- a/ge/executor/ge_executor.cc +++ b/ge/executor/ge_executor.cc @@ -125,34 +125,41 @@ void SetDynamicInputDataFlag(const ge::RunModelData &input_data, const std::vect bool IsDynamicBatchSizeMatchModel(uint64_t batch_size, const vector> &batch_info) { if (batch_info.empty()) { - GELOGE(ge::FAILED, "Dynamic batch info is empty."); + REPORT_INNER_ERROR("E19999", "param Dynamic batch info is empty, check invalid."); + GELOGE(ge::FAILED, "[Check][Param] Dynamic batch info is empty."); return false; } for (auto batch : batch_info) { if (batch.size() != kDynamicBatchSizeVecSize) { - GELOGE(ge::FAILED, "Dynamic batch param num is %zu, current batch size is %zu.", kDynamicBatchSizeVecSize, - batch.size()); + REPORT_INNER_ERROR("E19999", "Dynamic batch param num is %zu, current batch size is %zu.", + kDynamicBatchSizeVecSize, batch.size()); + GELOGE(ge::FAILED, "[Check][Param] Dynamic batch param num is %zu, current batch size is %zu.", + kDynamicBatchSizeVecSize, batch.size()); return false; } if (batch[0] == static_cast(batch_size)) { return true; } } - GELOGE(ge::FAILED, "Dynamic batch %lu can not match the gear of model.", batch_size); + REPORT_INNER_ERROR("E19999", "Dynamic batch %lu can not match the gear of model.", batch_size); + GELOGE(ge::FAILED, "[Check][Param] Dynamic batch %lu can not match the gear of model.", batch_size); return false; } bool IsDynamicImageSizeMatchModel(uint64_t image_height, uint64_t image_width, const vector> &batch_info) { if (batch_info.empty()) { - GELOGE(ge::FAILED, "Dynamic batch info is empty."); + REPORT_INNER_ERROR("E19999", "ParamDynamic batch info is empty. check invalid"); + GELOGE(ge::FAILED, "[Check][Param] Dynamic batch info is empty."); return false; } for (auto resolution : batch_info) { if (resolution.size() != kDynamicImageSizeVecSize) { - GELOGE(ge::FAILED, "Dynamic resolution param num is %zu, current resolution size is %zu.", + REPORT_INNER_ERROR("E19999", "Dynamic resolution param num is %zu, current resolution size is %zu.", + kDynamicImageSizeVecSize, resolution.size()); + GELOGE(ge::FAILED, "[Check][Param] Dynamic resolution param num is %zu, current resolution size is %zu.", kDynamicImageSizeVecSize, resolution.size()); return false; } @@ -160,22 +167,28 @@ bool IsDynamicImageSizeMatchModel(uint64_t image_height, uint64_t image_width, return true; } } - - GELOGE(ge::FAILED, "Dynamic resolution (%lu,%lu) can not match the gear of model.", image_height, image_width); + REPORT_INNER_ERROR("E19999", "Dynamic resolution (%lu,%lu) can not match the gear of model.", + image_height, image_width); + GELOGE(ge::FAILED, "[Check][Param]Dynamic resolution (%lu,%lu) can not match the gear of model.", + image_height, image_width); return false; } bool IsDynmaicDimsSizeMatchModel(const vector cur_dynamic_dims, const vector> &batch_info) { if (batch_info.empty()) { - GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Dynamic batch info is empty."); + REPORT_INNER_ERROR("E19999", "param batch_info is empty, check invalid"); + GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Check][Param] Dynamic batch info is empty."); return false; } bool find_match = false; for (auto resolution : batch_info) { if (cur_dynamic_dims.size() != resolution.size()) { - GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Cur dynamic dims param num is %zu, current resolution size is %zu.", + REPORT_INNER_ERROR("E19999", "Cur dynamic dims param num is %zu, current resolution size is %zu.", + cur_dynamic_dims.size(), resolution.size()); + GELOGE(ACL_ERROR_GE_PARAM_INVALID, + "[Check][Param] Cur dynamic dims param num is %zu, current resolution size is %zu.", cur_dynamic_dims.size(), resolution.size()); return false; } @@ -192,7 +205,7 @@ bool IsDynmaicDimsSizeMatchModel(const vector cur_dynamic_dims, } } if (!find_match) { - GELOGE(ACL_ERROR_GE_PARAM_INVALID, "choose dynamic dims can not match the gear of model."); + GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Check][Param] choose dynamic dims can not match the gear of model."); } return find_match; } @@ -241,7 +254,7 @@ Status GeExecutor::Initialize() { Status init_hostcpu_engine_status = HostCpuEngine::GetInstance().Initialize(); if (init_hostcpu_engine_status != SUCCESS) { - GELOGE(init_hostcpu_engine_status, "Failed to initialize HostCpuEngine"); + GELOGE(init_hostcpu_engine_status, "[initialize][HostCpuEngine] failed"); return init_hostcpu_engine_status; } @@ -251,12 +264,12 @@ Status GeExecutor::Initialize() { mem_type.push_back(RT_MEMORY_P2P_DDR); auto ret = MemManager::Instance().Initialize(mem_type); if (ret != SUCCESS) { - GELOGE(ret, "Memory Manager init failed."); + GELOGE(ret, "[Initialize][MemManager] failed."); return ret; } GE_CHK_STATUS_RET(OpsKernelBuilderManager::Instance().Initialize({}, false), - "Failed to initialize OpsKernelBuilders."); + "[Initialize][OpsKernelBuilderManager] failed."); // Start profiling Options profiling_options; @@ -292,13 +305,18 @@ Status GeExecutor::Finalize() { Status GeExecutor::SetDynamicBatchSize(uint32_t model_id, void *dynamic_input_addr, uint64_t length, uint64_t batch_size) { if (dynamic_input_addr == nullptr) { - GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID, "Dynamic input addr is nullptr!"); + REPORT_INNER_ERROR("E19999", "param dynamic_input_addr is nullptr, check invalid, model id:%u", model_id); + GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID, + "[Check][Param] Dynamic input addr is nullptr, model id:%u", model_id); return ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID; } uint64_t size = sizeof(uint32_t); if (length < size) { - GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID, "Dynamic input size [%lu] is less than [%lu]!", length, size); + REPORT_INNER_ERROR("E19999", "Dynamic input size [%lu] is less than [%lu], check invalid, model id:%u", + length, size, model_id); + GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID, + "[Check][Param] Dynamic input size [%lu] is less than [%lu], model id:%u", length, size, model_id); return ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID; } if (length >= sizeof(uint64_t)) { @@ -311,24 +329,28 @@ Status GeExecutor::SetDynamicBatchSize(uint32_t model_id, void *dynamic_input_ad int32_t dynamic_type = static_cast(FIXED); Status ret = GraphExecutor::GetDynamicBatchInfo(model_id, batch_info, dynamic_type); if (ret != SUCCESS) { - GELOGE(ret, "Get dynamic input info failed."); + REPORT_CALL_ERROR("E19999", "get dynamic batch info failed, model id:%u", model_id); + GELOGE(ret, "[Get][DynamicBatchInfo] failed, model id:%u.", model_id); return ret; } if (!IsDynamicBatchSizeMatchModel(batch_size, batch_info)) { - GELOGE(ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID, "The current dynamic input does not match the gear of the model."); + GELOGE(ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID, + "[Check][Param] The current dynamic input does not match the gear of the model(id:%u).", model_id); return ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID; } ret = GraphExecutor::SetDynamicSize(model_id, batch_num, static_cast(DYNAMIC_BATCH)); if (ret != SUCCESS) { - GELOGE(ret, "Set dynamic size failed"); + REPORT_CALL_ERROR("E19999", "set dynamic size failed, model id:%u, dynamic_type:1", model_id); + GELOGE(ret, "[Set][DynamicSize] failed, model id:%u, dynamic_type:1", model_id); return ret; } // memcpy dynamic_batch_size from host to device rtError_t rt_ret = rtMemcpy(dynamic_input_addr, length, &batch_size, size, RT_MEMCPY_HOST_TO_DEVICE); if (rt_ret != RT_ERROR_NONE) { - GELOGE(rt_ret, "memcpy dynamic batch input data failed! ret: 0x%X", rt_ret); + REPORT_CALL_ERROR("E19999", "Call rtMemcpy, size:%lu ret:0x%X", length, rt_ret); + GELOGE(rt_ret, "[Call][RtMemcpy] memcpy dynamic batch input data failed! size:%lu ret:0x%X", length, rt_ret); return RT_ERROR_TO_GE_STATUS(rt_ret); } return SUCCESS; @@ -337,14 +359,19 @@ Status GeExecutor::SetDynamicBatchSize(uint32_t model_id, void *dynamic_input_ad Status GeExecutor::SetDynamicImageSize(uint32_t model_id, void *dynamic_input_addr, uint64_t length, uint64_t image_height, uint64_t image_width) { if (dynamic_input_addr == nullptr) { - GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID, "Dynamic input addr is nullptr!"); + REPORT_INNER_ERROR("E19999", "param dynamic_input_addr is nullptr, check invalid, model id:%u", model_id); + GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID, + "[Check][Param] Dynamic input addr is nullptr, model id:%u", model_id); return ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID; } uint64_t dynamic_input_size = kDynamicImageSizeInputSize * sizeof(uint32_t); if (length < dynamic_input_size) { + REPORT_INNER_ERROR("E19999", "Dynamic input size [%lu] is less than [%lu], check invalid, model id:%u", + length, dynamic_input_size, model_id); GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID, - "Dynamic input size [%lu] is less than [%lu]!", length, dynamic_input_size); + "[Check][Param] Dynamic input size [%lu] is less than [%lu], model id:%u", + length, dynamic_input_size, model_id); return ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID; } uint64_t size = sizeof(uint32_t); @@ -357,18 +384,22 @@ Status GeExecutor::SetDynamicImageSize(uint32_t model_id, void *dynamic_input_ad int32_t dynamic_type = static_cast(FIXED); Status ret = GraphExecutor::GetDynamicBatchInfo(model_id, batch_info, dynamic_type); if (ret != SUCCESS) { - GELOGE(ret, "Get dynamic input info failed."); + REPORT_CALL_ERROR("E19999", "Get dynamic input info failed, model id:%u.", model_id); + GELOGE(ret, "[Get][DynamicBatchInfo] failed, model id:%u.", model_id); return ret; } if (!IsDynamicImageSizeMatchModel(image_height, image_width, batch_info)) { - GELOGE(ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID, "The current dynamic input does not match the gear of the model."); + GELOGE(ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID, + "[Check][Param] The current dynamic input does not match the gear of the model, " + "image_height:%lu, image_width:%lu.", image_height, image_width); return ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID; } ret = GraphExecutor::SetDynamicSize(model_id, batch_num, static_cast(DYNAMIC_IMAGE)); if (ret != SUCCESS) { - GELOGE(ret, "Set dynamic size failed"); + REPORT_CALL_ERROR("E19999", "Set dynamic size failed, model id:%u,", model_id); + GELOGE(ret, "[Set][DynamicSize] failed, model id:%u", model_id); return ret; } @@ -376,7 +407,9 @@ Status GeExecutor::SetDynamicImageSize(uint32_t model_id, void *dynamic_input_ad rtError_t rt_ret = rtMemcpy(dynamic_input_addr, size, &image_height, size, RT_MEMCPY_HOST_TO_DEVICE); if (rt_ret != RT_ERROR_NONE) { - GELOGE(rt_ret, "memcpy dynamic resolution input data failed! ret: 0x%X", rt_ret); + REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed! size:%lu, ret:0x%X, model id:%u", size, rt_ret, model_id); + GELOGE(rt_ret, "[Call][RtMemcpy] memcpy dynamic resolution input data failed! size:%lu, ret:0x%X, model id:%u", + size, rt_ret, model_id); return RT_ERROR_TO_GE_STATUS(rt_ret); } @@ -385,7 +418,10 @@ Status GeExecutor::SetDynamicImageSize(uint32_t model_id, void *dynamic_input_ad rt_ret = rtMemcpy(reinterpret_cast(reinterpret_cast(dynamic_input_addr) + size), remain_size, &image_width, size, RT_MEMCPY_HOST_TO_DEVICE); if (rt_ret != RT_ERROR_NONE) { - GELOGE(rt_ret, "memcpy dynamic resolution input data failed!"); + REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed! size:%lu, ret:0x%X, model id:%u", + remain_size, rt_ret, model_id); + GELOGE(rt_ret, "[Call][RtMemcpy] memcpy dynamic resolution input data failed! size:%lu, ret:0x%X, model id:%u", + remain_size, rt_ret, model_id); return RT_ERROR_TO_GE_STATUS(rt_ret); } return SUCCESS; @@ -394,40 +430,48 @@ Status GeExecutor::SetDynamicImageSize(uint32_t model_id, void *dynamic_input_ad Status GeExecutor::SetDynamicDims(uint32_t model_id, void *dynamic_input_addr, uint64_t length, const vector &dynamic_dims) { if (dynamic_input_addr == nullptr) { - GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID, "Dynamic input addr is nullptr!"); + REPORT_INNER_ERROR("E19999", "Param dynamic_input_addr is nullptr, check invalid, model id:%u", model_id); + GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID, + "[Check][Param] Dynamic input addr is nullptr, model id:%u", model_id); return ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID; } vector cur_dynamic_dims; Status ret = GetCurDynamicDims(model_id, dynamic_dims, cur_dynamic_dims); if (ret != SUCCESS) { - GELOGE(ret, "Set cur gear dynamic dims failed"); + GELOGE(ret, "[Get][CurDynamicDims] failed, model id:%u", model_id); return ret; } std::vector> batch_info; int32_t dynamic_type = static_cast(FIXED); ret = GraphExecutor::GetDynamicBatchInfo(model_id, batch_info, dynamic_type); if (ret != SUCCESS) { - GELOGE(ret, "Get dynamic input info failed."); + REPORT_CALL_ERROR("E19999", "Get dynamic input info failed, model id:%u.", model_id); + GELOGE(ret, "[Get][DynamicBatchInfo] failed, model id:%u.", model_id); return ret; } if (!IsDynmaicDimsSizeMatchModel(cur_dynamic_dims, batch_info)) { - GELOGE(ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID, "The current dynamic input does not match the gear of the model."); + GELOGE(ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID, + "[Check][Param] The current dynamic input does not match the gear of the model, id:%u.", model_id); return ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID; } ret = GraphExecutor::SetDynamicSize(model_id, cur_dynamic_dims, static_cast(DYNAMIC_DIMS)); if (ret != SUCCESS) { - GELOGE(ret, "Set dynamic size failed"); + REPORT_CALL_ERROR("E19999", "Set dynamic size failed, model id:%u", model_id); + GELOGE(ret, "[Set][DynamicSize] failed, model id:%u", model_id); return ret; } size_t dynamic_dim_num = cur_dynamic_dims.size(); uint64_t dynamic_input_size = static_cast(dynamic_dim_num * sizeof(uint32_t)); if (length < dynamic_input_size) { + REPORT_INNER_ERROR("E19999", "input dynamic size [%lu] is less than [%lu], model id:%u", + length, dynamic_input_size, model_id); GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID, - "Dynamic input size [%lu] is less than [%lu]!", length, dynamic_input_size); + "[Check][Param] Dynamic input size [%lu] is less than [%lu], model id:%u", + length, dynamic_input_size, model_id); return ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID; } uint64_t size = sizeof(uint32_t); @@ -440,7 +484,9 @@ Status GeExecutor::SetDynamicDims(uint32_t model_id, void *dynamic_input_addr, u rt_ret = rtMemcpy(reinterpret_cast(reinterpret_cast(dynamic_input_addr) + size * i), length - size * i, &cur_dynamic_dims[i], size, RT_MEMCPY_HOST_TO_DEVICE); if (rt_ret != RT_ERROR_NONE) { - GELOGE(rt_ret, "memcpy dynamic resolution input data failed!"); + REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%lu, ret:0x%X", (length - size * i), rt_ret); + GELOGE(rt_ret, "[Call][RtMemcpy] memcpy dynamic resolution input data failed! size:%lu, ret:0x%X", + length - size * i, rt_ret); return RT_ERROR_TO_GE_STATUS(rt_ret); } } @@ -454,14 +500,14 @@ Status GeExecutor::GetCurDynamicDims(uint32_t model_id, const vector & vector output_desc; auto ret = GetModelDescInfo(model_id, input_desc, output_desc); if (ret != ge::SUCCESS) { - GELOGE(ret, "GetModelDescInfo failed."); + GELOGE(ret, "[Get][ModelDescInfo] failed, model id:%u.", model_id); return ret; } vector user_designate_shape_order; vector all_data_dims; ret = GetUserDesignateShapeOrder(model_id, user_designate_shape_order); if (ret != ge::SUCCESS) { - GELOGE(ret, "GetUserDesignateShapeOrder failed."); + GELOGE(ret, "[Call][GetUserDesignateShapeOrder] failed, model id:%u.", model_id); return ret; } for (auto &data_name : user_designate_shape_order) { @@ -475,8 +521,10 @@ Status GeExecutor::GetCurDynamicDims(uint32_t model_id, const vector & } } if (dynamic_dims.size() != all_data_dims.size()){ + REPORT_INNER_ERROR("E19999", "Dynamic input size [%lu] is not equal with all data dims size [%lu]!", + dynamic_dims.size(), all_data_dims.size()); GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID, - "Dynamic input size [%lu] is not equal with all data dims size [%lu]!", + "[Check][Param] Dynamic input size [%lu] is not equal with all data dims size [%lu]!", dynamic_dims.size(), all_data_dims.size()); return ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID; } @@ -484,8 +532,10 @@ Status GeExecutor::GetCurDynamicDims(uint32_t model_id, const vector & if (all_data_dims[i] < 0) { cur_dynamic_dims.push_back(dynamic_dims[i]); } else if (static_cast(all_data_dims[i]) != dynamic_dims[i]) { + REPORT_INNER_ERROR("E19999", "Static dims should be same, index:%zu value:%lu should be %ld", + i, dynamic_dims[i], all_data_dims[i]); GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID, - "Static dims should be same, index: %zu value: %lu should be %ld", + "[Check][Param] Static dims should be same, index:%zu value:%lu should be %ld", i, dynamic_dims[i], all_data_dims[i]); return ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID; } @@ -496,12 +546,14 @@ Status GeExecutor::GetCurDynamicDims(uint32_t model_id, const vector & Status GeExecutor::GetCurShape(const uint32_t model_id, std::vector &batch_info, int32_t &dynamic_type) { GELOGI("Begin to get current shape"); if (!isInit_) { - GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!"); + REPORT_INNER_ERROR("E19999", "GeExecutor has not been initialized, model id:%u", model_id); + GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "[Check][Param] GeExecutor has not been initialized, model id:%u", model_id); return ACL_ERROR_GE_EXEC_NOT_INIT; } Status ret = GraphExecutor::GetCurShape(model_id, batch_info, dynamic_type); if (ret != SUCCESS) { - GELOGE(ret, "Get current shape failed"); + REPORT_CALL_ERROR("E19999", "Get Cur Shape failed, model id:%u", model_id); + GELOGE(ret, "[Get][CurShape] failed, model id:%u", model_id); return ret; } return SUCCESS; @@ -512,11 +564,14 @@ Status GeExecutor::SetDynamicAippData(uint32_t model_id, void *dynamic_input_add const kAippDynamicPara &aippParms) { GELOGI("Enter to SetDynamicAippData."); if (dynamic_input_addr == nullptr) { - GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID, "Dynamic aipp input addr is nullptr!"); + REPORT_INNER_ERROR("E19999", "Param dynamic_input_addr is nullptr, check invalid, model id:%u", model_id); + GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID, + "[Check][Param] Dynamic aipp input addr is nullptr, model id:%u", model_id); return ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID; } if (aippBatchPara.empty()) { - GELOGE(ACL_ERROR_GE_AIPP_BATCH_EMPTY, "aippBatchPara is empty."); + REPORT_INNER_ERROR("E19999", "Param aippBatchPara is empty, check invalid, model id:%u", model_id); + GELOGE(ACL_ERROR_GE_AIPP_BATCH_EMPTY, "[Check][Param] aippBatchPara is empty, model id:%u", model_id); return ACL_ERROR_GE_AIPP_BATCH_EMPTY; } uint64_t batch_num = aippBatchPara.size(); @@ -527,14 +582,18 @@ Status GeExecutor::SetDynamicAippData(uint32_t model_id, void *dynamic_input_add "batch num is %lu, struct_len is %lu", model_id, length, batch_num, struct_len); if (struct_len > length) { + REPORT_INNER_ERROR("E19999", "input dynamic aipp param len:%lu is larger than aipp_data size:%lu", + struct_len, length); GELOGE(ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID, - "input dynamic aipp param len [%lu] is larger than aipp_data size [%lu]", struct_len, length); + "[Check][Param] input dynamic aipp param len [%lu] is larger than aipp_data size [%lu]", + struct_len, length); return ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID; } // Memcpy real kAippDynamicBatchPara from host to device rtError_t rt_ret = rtMemcpy(dynamic_input_addr, length, &aippParms, real_aippParms_size, RT_MEMCPY_HOST_TO_DEVICE); if (rt_ret != RT_ERROR_NONE) { - GELOGE(rt_ret, "memcpy real_aippParms_size failed! ret: 0x%X", rt_ret); + REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, size:%lu, ret:0x%X", length, rt_ret); + GELOGE(rt_ret, "[Call][RtMemcpy] memcpy aippParms failed! size:%lu, ret:0x%X", length, rt_ret); return RT_ERROR_TO_GE_STATUS(rt_ret); } uint64_t remain_len = length - real_aippParms_size; @@ -545,7 +604,8 @@ Status GeExecutor::SetDynamicAippData(uint32_t model_id, void *dynamic_input_add (remain_len - i * sizeof(kAippDynamicBatchPara)), &(aippBatchPara[i]), sizeof(kAippDynamicBatchPara), RT_MEMCPY_HOST_TO_DEVICE); if (rt_ret != RT_ERROR_NONE) { - GELOGE(rt_ret, "memcpy kAippDynamicBatchPara input data failed! ret: 0x%X", rt_ret); + REPORT_CALL_ERROR("E19999", "Call rtMemcpy failed, ret:0x%X", rt_ret); + GELOGE(rt_ret, "[Call][RtMemcpy] memcpy kAippDynamicBatchPara input data failed! ret:0x%X", rt_ret); return RT_ERROR_TO_GE_STATUS(rt_ret); } } @@ -555,12 +615,14 @@ Status GeExecutor::SetDynamicAippData(uint32_t model_id, void *dynamic_input_add Status GeExecutor::UnloadModel(uint32_t model_id) { GELOGD("unload model %u begin.", model_id); if (!isInit_) { - GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!"); + REPORT_INNER_ERROR("E19999", "GeExecutor has not been initialized"); + GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "[Check][Param] GeExecutor has not been initialized!"); return ACL_ERROR_GE_EXEC_NOT_INIT; } Status ret = GraphLoader::DestroyAicpuSessionForInfer(model_id); if (ret != SUCCESS) { - GELOGE(ret, "[GraphLoader] DestroyAicpuSessionForInfer failed. model id: %u", model_id); + REPORT_CALL_ERROR("E19999", "Destroy Aicpu Session For Infer failed, model id:%u", model_id); + GELOGE(ret, "[Destroy][AicpuSession] For Infer failed. model id:%u", model_id); return ret; } @@ -578,7 +640,8 @@ Status GeExecutor::UnloadModel(uint32_t model_id) { } ret = GraphLoader::UnloadModel(model_id); if (ret != SUCCESS) { - GELOGE(ret, "[GraphLoader] DestroyAicpuSessionForInfer failed. model id: %u", model_id); + REPORT_CALL_ERROR("E19999", "unload model failed, model id:%u", model_id); + GELOGE(ret, "[Unload][Model] failed. model id:%u", model_id); return ret; } return SUCCESS; @@ -588,7 +651,8 @@ Status GeExecutor::UnloadModel(uint32_t model_id) { Status GeExecutor::GetModelDescInfo(uint32_t model_id, std::vector &input_desc, std::vector &output_desc, bool new_model_desc) { if (!isInit_) { - GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!"); + REPORT_INNER_ERROR("E19999", "GeExecutor has not been initialized, model id:%u", model_id); + GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "[Check][Param] GeExecutor has not been initialized, model id:%u", model_id); return ACL_ERROR_GE_EXEC_NOT_INIT; } @@ -600,20 +664,26 @@ Status GeExecutor::GetModelDescInfo(uint32_t model_id, std::vector> &batch_info, int32_t &dynamic_type) { if (!isInit_) { - GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!"); + REPORT_INNER_ERROR("E19999", "GeExecutor has not been initialized!"); + GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "[Check][Param] GeExecutor has not been initialized!"); return ACL_ERROR_GE_EXEC_NOT_INIT; } Status ret = GraphExecutor::GetDynamicBatchInfo(model_id, batch_info, dynamic_type); if (ret != SUCCESS) { - GELOGE(ret, "GetDynamicBatchInfo failed."); + REPORT_CALL_ERROR("E19999", "Get Dynamic BatchInfo failed, model id:%u.", model_id); + GELOGE(ret, "[Get][DynamicBatchInfo] failed, model id:%u.", model_id); return ret; } return SUCCESS; @@ -657,13 +729,15 @@ Status GeExecutor::GetDynamicBatchInfo(uint32_t model_id, std::vector> &batch_info) { GELOGI("Begin to get combined dynamic dims info."); if (!isInit_) { - GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!"); + REPORT_INNER_ERROR("E19999", "GeExecutor has not been initialized!"); + GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "[Check][Param] GeExecutor has not been initialized!"); return ACL_ERROR_GE_EXEC_NOT_INIT; } Status ret = GraphExecutor::GetCombinedDynamicDims(model_id, batch_info); if (ret != SUCCESS) { - GELOGE(ret, "GetCombinedDynamicDims failed."); + REPORT_CALL_ERROR("E19999", "Get Combined DynamicDims failed, model id:%u.", model_id); + GELOGE(ret, "[Get][CombinedDynamicDims] failed, model id:%u.", model_id); return ret; } @@ -680,13 +754,15 @@ Status GeExecutor::GetCombinedDynamicDims(uint32_t model_id, vector &user_designate_shape_order) { if (!isInit_) { - GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!"); + REPORT_INNER_ERROR("E19999", "GeExecutor has not been initialized!"); + GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "[Check][Param] GeExecutor has not been initialized!"); return ACL_ERROR_GE_EXEC_NOT_INIT; } Status ret = GraphExecutor::GetUserDesignateShapeOrder(model_id, user_designate_shape_order); if (ret != SUCCESS) { - GELOGE(ret, "GetUserDesignateShapeOrder failed."); + REPORT_CALL_ERROR("E19999", "GetUserDesignateShapeOrder failed, model id:%u.", model_id); + GELOGE(ret, "[Call][GetUserDesignateShapeOrder] failed, model id:%u.", model_id); return ret; } @@ -704,7 +780,8 @@ Status GeExecutor::GetUserDesignateShapeOrder(uint32_t model_id, vector Status GeExecutor::GetAIPPInfo(uint32_t model_id, uint32_t index, AippConfigInfo &aipp_info) { GELOGI("Begin to GetAIPPInfo."); if (!isInit_) { - GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "not inited yet!"); + REPORT_INNER_ERROR("E19999", "GeExecutor has not been initialized!"); + GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "[Check][Param] GeExecutor not inited yet!"); return ACL_ERROR_GE_EXEC_NOT_INIT; } Status ret = GraphExecutor::GetAippInfo(model_id, index, aipp_info); @@ -719,7 +796,8 @@ Status GeExecutor::GetAIPPInfo(uint32_t model_id, uint32_t index, AippConfigInfo Status GeExecutor::GetAippType(uint32_t model_id, uint32_t index, InputAippType &type, size_t &aipp_index) { GELOGI("Begin to get aipp type."); if (!isInit_) { - GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "not inited yet!"); + REPORT_INNER_ERROR("E19999", "GeExecutor has not been initialized!"); + GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not inited yet!"); return ACL_ERROR_GE_EXEC_NOT_INIT; } Status ret = GraphExecutor::GetAippType(model_id, index, type, aipp_index); @@ -741,8 +819,10 @@ Status GeExecutor::GetOpAttr(uint32_t model_id, const std::string &op_name, cons } Status ret = GraphExecutor::GetOpAttr(model_id, op_name, attr_name, attr_value); if (ret != SUCCESS) { - GELOGE(ret, "[Get][OpAttr]Get op:%s attr:%s failed.", op_name.c_str(), attr_name.c_str()); - REPORT_CALL_ERROR("E19999", "Get op:%s attr:%s failed.", op_name.c_str(), attr_name.c_str()); + GELOGE(ret, "[Get][OpAttr]Get op:%s attr:%s failed, model id:%u.", + op_name.c_str(), attr_name.c_str(), model_id); + REPORT_CALL_ERROR("E19999", "Get op:%s attr:%s failed, model id:%u", + op_name.c_str(), attr_name.c_str(), model_id); return ret; } return SUCCESS; @@ -750,12 +830,14 @@ Status GeExecutor::GetOpAttr(uint32_t model_id, const std::string &op_name, cons Status GeExecutor::GetModelAttr(uint32_t model_id, std::vector &dynamic_output_shape_info) { if (!isInit_) { - GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "not inited yet!"); + REPORT_INNER_ERROR("E19999", "GeExecutor has not inited yet!"); + GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "[Check][Param] GeExecutor has not inited yet!"); return ACL_ERROR_GE_EXEC_NOT_INIT; } Status ret = GraphExecutor::GetModelAttr(model_id, dynamic_output_shape_info); if (ret != SUCCESS) { - GELOGE(ret, "Get dynamic batch output shape info failed."); + REPORT_CALL_ERROR("E19999", "Get Model Attr failed, model id:%u.", model_id); + GELOGE(ret, "[Get][ModelAttr] failed, model id:%u.", model_id); return ret; } return SUCCESS; @@ -764,7 +846,8 @@ Status GeExecutor::GetModelAttr(uint32_t model_id, std::vector &dyn Status GeExecutor::CommandHandle(const Command &command) { Status ret = GraphLoader::CommandHandle(command); if (ret != SUCCESS) { - GELOGE(ACL_ERROR_GE_COMMAND_HANDLE, "CommandHandle: Command Handle failed."); + REPORT_CALL_ERROR("E19999", "call CommandHandle failed, ret:%u", ret); + GELOGE(ACL_ERROR_GE_COMMAND_HANDLE, "[Call][CommandHandle] failed, ret:%u", ret); return ACL_ERROR_GE_COMMAND_HANDLE; } return SUCCESS; @@ -773,7 +856,8 @@ Status GeExecutor::CommandHandle(const Command &command) { Status GeExecutor::GetMaxUsedMemory(uint32_t model_id, uint32_t &max_size) { GELOGI("Get max used memory begin."); if (!isInit_) { - GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!"); + REPORT_INNER_ERROR("E19999", "GeExecutor has not been initialized!"); + GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "[Check][Param] GeExecutor has not been initialized!"); return ACL_ERROR_GE_EXEC_NOT_INIT; } @@ -793,14 +877,15 @@ Status GeExecutor::GetMaxUsedMemory(uint32_t model_id, uint32_t &max_size) { Status GeExecutor::LoadDataFromFile(const std::string &path, ModelData &model_data) { GELOGI("Load data from file begin."); if (!isInit_) { - GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!"); + REPORT_INNER_ERROR("E19999", "GeExecutor has not been initialized!"); + GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "[Check][Param] GeExecutor has not been initialized!"); return ACL_ERROR_GE_EXEC_NOT_INIT; } string filePath = RealPath(path.c_str()); if (filePath.empty()) { GELOGE(ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID, - "File path is invalid. please check your text file '%s'.", path.c_str()); + "[Call][RealPath] File path is invalid. please check your text file '%s'.", path.c_str()); return ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID; } GELOGI("load modelData from file: %s.", path.c_str()); @@ -829,7 +914,8 @@ Status GeExecutor::LoadDataFromFile(const std::string &path, ModelData &model_da Status GeExecutor::LoadModelFromData(uint32_t &model_id, const ModelData &model_data, void *dev_ptr, size_t mem_size, void *weight_ptr, size_t weight_size) { if (!isInit_) { - GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "not inited yet!"); + REPORT_INNER_ERROR("E19999", "GeExecutor has not been initialized!"); + GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "[Check][Param] GeExecutor has not inited yet!"); return ACL_ERROR_GE_EXEC_NOT_INIT; } @@ -850,7 +936,8 @@ Status GeExecutor::LoadModelWithQ(uint32_t &model_id, const ModelData &model_dat const std::vector &output_queue_ids) { GELOGI("Load model with queue begin."); if (!isInit_) { - GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!"); + REPORT_INNER_ERROR("E19999", "GeExecutor has not been initialized!"); + GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "[Check][Param] GeExecutor has not been initialized!"); return ACL_ERROR_GE_EXEC_NOT_INIT; } return GraphLoader::LoadModelWithQ(model_id, model_data, input_queue_ids, output_queue_ids); @@ -889,7 +976,8 @@ Status GeExecutor::ExecModel(uint32_t model_id, void *stream, const ge::RunModel const std::vector &input_desc, ge::RunModelData &run_output_data, std::vector &output_desc, bool async_mode) { if (!isInit_) { - GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!"); + REPORT_INNER_ERROR("E19999", "GeExecutor has not been initialized!"); + GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "[Check][Param] GeExecutor has not been initialized!"); return ACL_ERROR_GE_EXEC_NOT_INIT; } @@ -904,7 +992,8 @@ Status GeExecutor::ExecModel(uint32_t model_id, void *stream, const ge::RunModel int32_t dynamic_type = static_cast(FIXED); Status ret = GraphExecutor::GetDynamicBatchInfo(model_id, batch_info, dynamic_type); if (ret != SUCCESS) { - GELOGE(ret, "Get dynamic input info failed."); + REPORT_CALL_ERROR("E19999", "get dynamic batch info failed, model id:%u.", model_id); + GELOGE(ret, "[Get][DynamicBatchInfo] failed, model id:%u.", model_id); return ret; } if (!batch_info.empty()) { @@ -926,14 +1015,16 @@ Status GeExecutor::ExecModel(uint32_t model_id, void *stream, const ge::RunModel Status GeExecutor::GetMemAndWeightSize(const std::string &path, size_t &mem_size, size_t &weight_size) { GELOGI("Get memory and weight size from file begin."); if (!isInit_) { - GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!"); + REPORT_INNER_ERROR("E19999", "GeExecutor has not been initialized!"); + GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "[Check][Param] GeExecutor has not been initialized!"); return ACL_ERROR_GE_EXEC_NOT_INIT; } ModelData model; Status ret = ge::GraphLoader::LoadDataFromFile(path, 0, model); if ((ret != SUCCESS) || (model.model_data == nullptr)) { - GELOGE(ret, "Load data from file failed. ret = %d", ret); + REPORT_CALL_ERROR("E19999", "load data from file failed, ret = %d", ret); + GELOGE(ret, "[Load][Data] from file failed. ret = %d", ret); return ret; } @@ -958,12 +1049,14 @@ Status GeExecutor::GetMemAndWeightSize(const void *model_data, size_t model_size size_t &weight_size) { GELOGI("Get memory and weight size from data begin."); if (!isInit_) { - GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!"); + REPORT_INNER_ERROR("E19999", "GeExecutor has not been initialized!"); + GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "[Check][Param] GeExecutor has not been initialized!"); return ACL_ERROR_GE_EXEC_NOT_INIT; } if (model_data == nullptr) { - GELOGE(ACL_ERROR_GE_EXEC_MODEL_ADDR_INVALID, "invalid model data!"); + REPORT_INNER_ERROR("E19999", "param model_data is nullptr, check invalid!"); + GELOGE(ACL_ERROR_GE_EXEC_MODEL_ADDR_INVALID, "[Check][Param] invalid model data!"); return ACL_ERROR_GE_EXEC_MODEL_ADDR_INVALID; } @@ -997,7 +1090,8 @@ Status GeExecutor::LoadDynamicSingleOpV2(const std::string &model_name, const ge Status GeExecutor::ExecuteAsync(SingleOp *executor, const std::vector &inputs, std::vector &outputs) { if (executor == nullptr) { - GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "param is NULL"); + REPORT_INNER_ERROR("E19999", "Param executor is nullptr, check invalid"); + GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "[Check][Param] param executor is nullptr"); return ACL_ERROR_GE_EXEC_NOT_INIT; } @@ -1021,7 +1115,8 @@ Status GeExecutor::GetDeviceIdByModelId(uint32_t model_id, uint32_t &device_id) GE_CHECK_NOTNULL(model_manager); auto davinci_model = model_manager->GetModel(model_id); if (davinci_model == nullptr) { - GELOGE(ACL_ERROR_GE_EXEC_MODEL_ID_INVALID, "Model id: %d is invaild or model is not loaded.", model_id); + GELOGE(ACL_ERROR_GE_EXEC_MODEL_ID_INVALID, + "[Get][Model] failed, Model id:%u is invaild or model is not loaded.", model_id); return ACL_ERROR_GE_EXEC_MODEL_ID_INVALID; } @@ -1034,7 +1129,7 @@ Status GeExecutor::GetBatchInfoSize(uint32_t model_id, size_t &shape_count) { int32_t dynamic_type = static_cast(FIXED); Status ret = GetDynamicBatchInfo(model_id, batch_info, dynamic_type); if (ret != SUCCESS) { - GELOGE(ret, "Calc batch info size failed. ret = %d", ret); + GELOGE(ret, "[Get][DynamicBatchInfo] failed. ret = %d, model id:%u", ret, model_id); return ret; } if (batch_info.empty()) { @@ -1048,13 +1143,15 @@ Status GeExecutor::GetBatchInfoSize(uint32_t model_id, size_t &shape_count) { Status GeExecutor::GetOrigInputInfo(uint32_t model_id, uint32_t index, OriginInputInfo &orig_input_info) { GELOGI("Begin to GetOrigInputInfo."); if (!isInit_) { - GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "not inited yet!"); + REPORT_INNER_ERROR("E19999", "GeExecutor has not been initialized!"); + GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "[Check][Param] GeExecutor has not been initialized!"); return ACL_ERROR_GE_EXEC_NOT_INIT; } Status ret = GraphExecutor::GetOrigInputInfo(model_id, index, orig_input_info); if (ret != SUCCESS) { - GELOGE(ret, "GetOrigInputInfo failed."); + REPORT_CALL_ERROR("E19999", "Get Orig Input Info failed, model id:%u.", model_id); + GELOGE(ret, "[Get][OrigInputInfo] failed, model id:%u.", model_id); return ret; } @@ -1067,13 +1164,15 @@ Status GeExecutor::GetAllAippInputOutputDims(uint32_t model_id, uint32_t index, std::vector &output_dims) { GELOGI("Begin to GetAllAippInputOutputDims."); if (!isInit_) { - GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "not inited yet!"); + REPORT_INNER_ERROR("E19999", "GeExecutor has not been initialized!"); + GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "[Check][Param] GeExecutor has not been initialized!"); return ACL_ERROR_GE_EXEC_NOT_INIT; } Status ret = GraphExecutor::GetAllAippInputOutputDims(model_id, index, input_dims, output_dims); if (ret != SUCCESS) { - GELOGE(ret, "GetAllAippInputOutputDims failed."); + REPORT_CALL_ERROR("E19999", "Get All Aipp Input Output Dims failed, model id:%u.", model_id); + GELOGE(ret, "[Get][AllAippInputOutputDims] failed, model id:%u.", model_id); return ret; } @@ -1085,7 +1184,10 @@ Status GeExecutor::GetOpDescInfo(uint32_t device_id, uint32_t stream_id, uint32_ GELOGI("Begin to GetOpDescInfo."); Status ret = GraphExecutor::GetOpDescInfo(device_id, stream_id, task_id, op_desc_info); if (ret != SUCCESS) { - GELOGE(ret, "GetOpDescInfo failed."); + REPORT_CALL_ERROR("E19999", "get opdesc info failed, device_id:%u, stream_id:%u, task_id:%u.", + device_id, stream_id, task_id); + GELOGE(ret, "[Get][OpDescInfo] failed, device_id:%u, stream_id:%u, task_id:%u.", + device_id, stream_id, task_id); return ret; } GELOGI("GetOpDescInfo succ."); @@ -1096,7 +1198,7 @@ Status GeExecutor::SetDump(const DumpConfig &dump_config) { GELOGI("Start to set dump config"); auto ret = DumpManager::GetInstance().SetDumpConf(dump_config); if (ret != SUCCESS) { - GELOGE(ret, "Set dump conf failed"); + GELOGE(ret, "[Set][DumpConf] failed, ret:%d", ret); return ret; } GELOGI("Set dump config successfully"); diff --git a/ge/graph/build/label_allocator.cc b/ge/graph/build/label_allocator.cc index 6d81c17d..f2329769 100644 --- a/ge/graph/build/label_allocator.cc +++ b/ge/graph/build/label_allocator.cc @@ -80,8 +80,7 @@ bool LabelAllocator::CollectFunctionalNode(ComputeGraphPtr &graph, std::setGetParentNode(); if (func_node == nullptr) { - REPORT_INNER_ERROR("E19999", "Parent node not set in node:%s(%s), graph:%s", - func_node->GetName().c_str(), func_node->GetType().c_str(), graph->GetName().c_str()); + REPORT_INNER_ERROR("E19999", "Parent node not set, graph:%s", graph->GetName().c_str()); GELOGE(INTERNAL_ERROR, "[Get][Node] Parent functional node not set: %s.", graph->GetName().c_str()); return false; } diff --git a/inc/framework/common/debug/ge_log.h b/inc/framework/common/debug/ge_log.h index 754712f3..3e646440 100644 --- a/inc/framework/common/debug/ge_log.h +++ b/inc/framework/common/debug/ge_log.h @@ -84,9 +84,10 @@ inline bool IsLogEnable(int module_name, int log_level) { ##__VA_ARGS__); \ } while (0) -#define GE_LOG_ERROR(MOD_NAME, ERROR_CODE, fmt, ...) \ - dlog_error(MOD_NAME, "%lu %s: ErrorNo: %d(%s) " fmt, GeLog::GetTid(), __FUNCTION__, ERROR_CODE, \ - ((GE_GET_ERRORNO_STR(ERROR_CODE)).c_str()), ##__VA_ARGS__) +#define GE_LOG_ERROR(MOD_NAME, ERROR_CODE, fmt, ...) \ + dlog_error(MOD_NAME, "%lu %s: ErrorNo: %d(%s) %s" fmt, GeLog::GetTid(), __FUNCTION__, ERROR_CODE, \ + ((GE_GET_ERRORNO_STR(ERROR_CODE)).c_str()), ErrorManager::GetInstance().GetLogHeader().c_str(), \ + ##__VA_ARGS__) // print memory when it is greater than 1KB. #define GE_PRINT_DYNAMIC_MEMORY(FUNC, PURPOSE, SIZE) \ From 66b63a27ef778deeb3f7df0681e1e41b8fe9d100 Mon Sep 17 00:00:00 2001 From: y00500818 Date: Fri, 25 Jun 2021 16:52:48 +0800 Subject: [PATCH 091/226] bugfix for InitNetOutput --- ge/graph/load/model_manager/davinci_model.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ge/graph/load/model_manager/davinci_model.cc b/ge/graph/load/model_manager/davinci_model.cc index a00d2b9d..7d82879f 100755 --- a/ge/graph/load/model_manager/davinci_model.cc +++ b/ge/graph/load/model_manager/davinci_model.cc @@ -1156,7 +1156,6 @@ Status DavinciModel::InitNetOutput(const ComputeGraphPtr &graph, const NodePtr & } size_t num = output_data_info_.size(); - bool fusion_flag = false; size_t input_count = input_size_list.size(); is_getnext_sink_dynamic_ = false; @@ -1166,6 +1165,7 @@ Status DavinciModel::InitNetOutput(const ComputeGraphPtr &graph, const NodePtr & } for (size_t idx = 0; idx < input_count; ++idx) { ZeroCopyOffset zero_copy_offset; + bool fusion_flag = false; Status ret = zero_copy_offset.InitOutputDataInfo(input_size_list, virtual_addr_list, op_desc, idx, fusion_flag); GE_IF_BOOL_EXEC(ret != SUCCESS, GELOGE(PARAM_INVALID, "[Init][DataInfo] of input_info %s failed.", op_desc->GetName().c_str()); From 3af68a42300f54e50f079ebf8ea41b01a02eeb59 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Fri, 25 Jun 2021 20:53:08 +0800 Subject: [PATCH 092/226] Fix Set Control flow group for -1 --- .../mark_force_unknown_for_cond_pass.cc | 97 +++++++++++-------- .../passes/mark_force_unknown_for_cond_pass.h | 11 +++ .../passes/switch_to_stream_switch_pass.cc | 5 +- ge/hybrid/executor/node_state.cc | 35 ++++--- ge/hybrid/executor/node_state.h | 1 + ge/hybrid/model/node_item.cc | 6 +- ge/hybrid/model/node_item.h | 4 +- ge/hybrid/node_executor/node_executor.cc | 1 + ge/hybrid/node_executor/task_context.cc | 6 ++ ge/hybrid/node_executor/task_context.h | 1 + 10 files changed, 108 insertions(+), 59 deletions(-) diff --git a/ge/graph/passes/mark_force_unknown_for_cond_pass.cc b/ge/graph/passes/mark_force_unknown_for_cond_pass.cc index fbf69c04..233a1ff0 100644 --- a/ge/graph/passes/mark_force_unknown_for_cond_pass.cc +++ b/ge/graph/passes/mark_force_unknown_for_cond_pass.cc @@ -16,8 +16,6 @@ #include "graph/passes/mark_force_unknown_for_cond_pass.h" -#include - #include "graph/utils/node_utils.h" #include "graph/common/omg_util.h" @@ -26,17 +24,7 @@ namespace { inline bool IsMergeInLoop(const NodePtr &node) { const static std::set kLoopMergeInputs{ ENTER, REFENTER, NEXTITERATION, REFNEXTITERATION }; - std::string node_type; - (void)GetOriginalType(node, node_type); - return kLoopMergeInputs.count(node_type) > 0; -} - -inline bool IsSwitchInLoop(const NodePtr &node) { - const static std::set kLoopSwitchInputs{ MERGE, REFMERGE, LOOPCOND }; - - std::string node_type; - (void)GetOriginalType(node, node_type); - return kLoopSwitchInputs.count(node_type) > 0; + return kLoopMergeInputs.count(NodeUtils::GetNodeType(node)) > 0; } } @@ -44,10 +32,7 @@ Status MarkForceUnknownForCondPass::Run(ComputeGraphPtr graph) { GELOGD("MarkForceUnknownForCondPass Enter"); std::map> switch_groups; for (const auto &node : graph->GetDirectNode()) { - std::string node_type; - GE_CHK_STATUS_RET(GetOriginalType(node, node_type), - "[Get][OriginalType] of node in graph:%s failed.", graph->GetName().c_str()); - if (kMergeOpTypes.count(node_type) == 0) { + if (kMergeOpTypes.count(NodeUtils::GetNodeType(node)) == 0) { continue; } @@ -64,6 +49,51 @@ Status MarkForceUnknownForCondPass::Run(ComputeGraphPtr graph) { return SUCCESS; } +/// +/// @brief Deal with Switch node for LoopCond +/// @param [in] Switch node +/// @param [in] dest span +/// @param [out] Search queue +/// @return true: Switch In while loop / false: Not in while Loop. +/// +bool MarkForceUnknownForCondPass::DealWithLoopSwitch(const NodePtr &node, uint32_t dst_span, + std::queue> search_queue) { + /// LoopCond --->\. + /// \. + /// Enter-----------+ \. + /// +--> Merge --> Switch --> Exit + /// NextIteration---+ + const auto is_loop_op = [](const NodePtr &n) { + return NodeUtils::GetNodeType(n) == LOOPCOND; + }; + const auto is_exit_op = [](const NodePtr &n) { + return kExitOpTypes.count(NodeUtils::GetNodeType(n)) > 0; + }; + + const auto src_nodes = node->GetInAllNodes(); + const auto dst_nodes = node->GetOutAllNodes(); + if (std::none_of(src_nodes.begin(), src_nodes.end(), is_loop_op) && + std::none_of(dst_nodes.begin(), dst_nodes.end(), is_exit_op)) { + return false; + } + + for (const auto &m : src_nodes) { + if (kMergeOpTypes.count(NodeUtils::GetNodeType(m)) > 0) { + for (const auto &n : m->GetInAllNodes()) { + if (kNextIterationOpTypes.count(NodeUtils::GetNodeType(n)) > 0) { + continue; + } + + search_queue.push({n, dst_span}); + GELOGD("Travel in Loop: %s <-- %s <-- %s, span is: %u", node->GetName().c_str(), m->GetName().c_str(), + n->GetName().c_str(), dst_span); + } + } + } + + return true; +} + /// /// @brief Mark force unknown shape for Switch node /// @param [in] merge node @@ -72,6 +102,7 @@ Status MarkForceUnknownForCondPass::Run(ComputeGraphPtr graph) { /// void MarkForceUnknownForCondPass::MarkUnknownForSwitch(const NodePtr &node, std::vector &switch_group) { // Switch --> {Switch --> Merge} --> Merge + GELOGD("Search Switch node for Merge: %s", node->GetName().c_str()); std::unordered_set nodes_seen; std::queue> search_queue({{node, 0}}); while (!search_queue.empty()) { @@ -79,43 +110,25 @@ void MarkForceUnknownForCondPass::MarkUnknownForSwitch(const NodePtr &node, std: const auto dst_span = search_queue.front().second; search_queue.pop(); - // Switch --> Identity --> Constant - for (const auto &in_node : dst_node->GetInControlNodes()) { - if (nodes_seen.count(in_node) > 0) { - GELOGD("Travel node: %s, Skip already seen node: %s", dst_node->GetName().c_str(), in_node->GetName().c_str()); - continue; - } - nodes_seen.insert(in_node); - - if (in_node->GetType() == IDENTITY) { - GELOGD("Travel node: %s, In control: %s, span is: %u", dst_node->GetName().c_str(), - in_node->GetName().c_str(), dst_span); - search_queue.push({in_node, dst_span}); - } - } - - for (const auto &in_node : dst_node->GetInDataNodes()) { + for (const auto &in_node : dst_node->GetInAllNodes()) { if (nodes_seen.count(in_node) > 0) { GELOGD("Travel node: %s, Skip already seen node: %s", dst_node->GetName().c_str(), in_node->GetName().c_str()); continue; } nodes_seen.insert(in_node); - std::string node_type; - (void)GetOriginalType(in_node, node_type); + const std::string node_type = NodeUtils::GetNodeType(in_node); GELOGD("Travel node: %s, %s node: %s, span is: %u", dst_node->GetName().c_str(), node_type.c_str(), in_node->GetName().c_str(), dst_span); if (kSwitchOpTypes.count(node_type) > 0) { // Switch input node. + if (DealWithLoopSwitch(in_node, dst_span, search_queue)) { + continue; + } + if (dst_span > 0) { search_queue.push({in_node, dst_span - 1}); } else { - const auto &all_in_nodes = in_node->GetInDataNodes(); - if (std::any_of(all_in_nodes.begin(), all_in_nodes.end(), IsSwitchInLoop)) { - GELOGW("Travel node: %s, %s node: %s, Skip LoopCond switch", dst_node->GetName().c_str(), node_type.c_str(), - in_node->GetName().c_str()); - } else { - switch_group.emplace_back(in_node); - } + switch_group.emplace_back(in_node); } } else if (kMergeOpTypes.count(node_type) > 0) { // Merge input node. search_queue.push({in_node, dst_span + 1}); diff --git a/ge/graph/passes/mark_force_unknown_for_cond_pass.h b/ge/graph/passes/mark_force_unknown_for_cond_pass.h index 528a8fdc..d2be9a9e 100644 --- a/ge/graph/passes/mark_force_unknown_for_cond_pass.h +++ b/ge/graph/passes/mark_force_unknown_for_cond_pass.h @@ -19,12 +19,23 @@ #include "inc/graph_pass.h" +#include + namespace ge { class MarkForceUnknownForCondPass : public GraphPass { public: Status Run(ComputeGraphPtr graph); private: + /// + /// @brief Deal with Switch node for LoopCond + /// @param [in] Switch node + /// @param [in] dest span + /// @param [out] Search queue + /// @return true: Switch In while loop / false: Not in while Loop. + /// + bool DealWithLoopSwitch(const NodePtr &node, uint32_t dst_span, std::queue> search_queue); + /// /// @brief Mark force unknown shape for Switch node /// @param [in] merge node diff --git a/ge/graph/passes/switch_to_stream_switch_pass.cc b/ge/graph/passes/switch_to_stream_switch_pass.cc index 77a7c9db..7fecae31 100644 --- a/ge/graph/passes/switch_to_stream_switch_pass.cc +++ b/ge/graph/passes/switch_to_stream_switch_pass.cc @@ -395,8 +395,9 @@ NodePtr SwitchToStreamSwitchPass::CreateStreamSwitchNode(const ComputeGraphPtr & peer_cond_anchor->GetOwnerNode()->GetName().c_str(), stream_switch->GetName().c_str()); int64_t group_index = -1; - (void)AttrUtils::GetInt(switch_node->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, group_index); - SetControlFlowGroup(stream_switch, group_index); + if (AttrUtils::GetInt(switch_node->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, group_index)) { + SetControlFlowGroup(stream_switch, group_index); + } return stream_switch; } diff --git a/ge/hybrid/executor/node_state.cc b/ge/hybrid/executor/node_state.cc index 468c84e6..4b0d0c44 100644 --- a/ge/hybrid/executor/node_state.cc +++ b/ge/hybrid/executor/node_state.cc @@ -326,17 +326,37 @@ std::shared_ptr NodeState::GetTaskContext() { } void NodeState::SavePersistTensor(int input_idx, const TensorValue &tensor) { - if (node_item_->root_data_.count(input_idx) > 0) { + const auto is_persist_tensor = [](const std::map> &items, int idx) { + const auto is_exist = [&idx](const std::pair> &items) { + return items.second.count(idx) > 0; + }; + return std::any_of(items.begin(), items.end(), is_exist); + }; + + if (is_persist_tensor(node_item_->root_data_, input_idx)) { GELOGD("[%s] Save Root input tensor: %d", GetName().c_str(), input_idx); root_tensor_values_[input_idx] = tensor; - } - - if (node_item_->enter_data_.count(input_idx) > 0) { + } else if (is_persist_tensor(node_item_->enter_data_, input_idx)) { GELOGD("[%s] Save Enter input tensor: %d", GetName().c_str(), input_idx); root_tensor_values_[input_idx] = tensor; } } +void NodeState::UpdatePersistTensor() { + const auto update_tensor = [&](const std::map> &items) { + for (const auto &item : items) { + for (const auto idx : item.second) { + UpdatePersistTensor(idx); + } + } + }; + + update_tensor(node_item_->root_data_); + if (iteration_count_ > 0) { + update_tensor(node_item_->enter_data_); + } +} + void NodeState::UpdatePersistTensor(int input_idx) { const auto it = root_tensor_values_.find(input_idx); if (it == root_tensor_values_.end()) { @@ -363,16 +383,9 @@ void NodeState::ResetContext(uint64_t iteration) { data_scheduled_ = static_cast(node_item_->root_data_.size()); ctrl_scheduled_ = static_cast(node_item_->root_ctrl_.size()); - for (auto item : node_item_->root_data_) { - UpdatePersistTensor(item.first); - } - if (iteration > 0) { data_scheduled_ += static_cast(node_item_->enter_data_.size()); ctrl_scheduled_ += static_cast(node_item_->enter_ctrl_.size()); - for (auto item : node_item_->enter_data_) { - UpdatePersistTensor(item.first); - } } iteration_count_ = iteration; diff --git a/ge/hybrid/executor/node_state.h b/ge/hybrid/executor/node_state.h index 727402f1..f1cec215 100644 --- a/ge/hybrid/executor/node_state.h +++ b/ge/hybrid/executor/node_state.h @@ -132,6 +132,7 @@ struct NodeState { void RunNextIteration(); void SavePersistTensor(int input_idx, const TensorValue &tensor); + void UpdatePersistTensor(); Status NodeScheduled(const std::function &ready) const; diff --git a/ge/hybrid/model/node_item.cc b/ge/hybrid/model/node_item.cc index 250562ce..8e87c6e2 100644 --- a/ge/hybrid/model/node_item.cc +++ b/ge/hybrid/model/node_item.cc @@ -395,11 +395,13 @@ void NodeItem::SetDataSend(NodeItem *node_item, int anchor_index) { data_send_.emplace(node_item); node_item->data_recv_[this] = anchor_index; if (is_root_node_) { - node_item->root_data_[anchor_index] = this; + auto &data_anchors = node_item->root_data_[this]; + data_anchors.emplace(anchor_index); } // If Enter feed Not Merge, take as root Node. if (IsEnterOp() && (node_item->node_type != STREAMMERGE)) { - node_item->enter_data_[anchor_index] = this; + auto &data_anchors = node_item->enter_data_[this]; + data_anchors.emplace(anchor_index); } GELOGI("Node[%s] will control node[%s]", NodeName().c_str(), node_item->NodeName().c_str()); } diff --git a/ge/hybrid/model/node_item.h b/ge/hybrid/model/node_item.h index 12775b00..f6dcdcf6 100644 --- a/ge/hybrid/model/node_item.h +++ b/ge/hybrid/model/node_item.h @@ -148,9 +148,9 @@ struct NodeItem { int64_t frame_index_ = -1; int64_t parent_frame_ = -1; std::set root_ctrl_; // Recv ctrl from root node - std::map root_data_; // Recv data from root node + std::map> root_data_; // Recv data from root node std::set enter_ctrl_; // Recv ctrl from Enter node - std::map enter_data_; // Recv data from Enter node + std::map> enter_data_; // Recv data from Enter node std::set data_send_; // Send data notify to std::map data_recv_; // Recv data notify from std::set ctrl_send_; // Send ctrl notify to diff --git a/ge/hybrid/node_executor/node_executor.cc b/ge/hybrid/node_executor/node_executor.cc index 9e9354d9..eeb5ba20 100755 --- a/ge/hybrid/node_executor/node_executor.cc +++ b/ge/hybrid/node_executor/node_executor.cc @@ -39,6 +39,7 @@ const char *const kEngineNameHostCpu = "DNN_VM_HOST_CPU_OP_STORE"; Status NodeExecutor::PrepareTask(NodeTask &task, TaskContext &context) const { GE_CHK_STATUS_RET_NOLOG(context.AllocateOutputs()); GE_CHK_STATUS_RET_NOLOG(context.AllocateWorkspaces()); + GE_CHK_STATUS_RET_NOLOG(context.UpdatePersistTensor()); GE_CHK_STATUS_RET_NOLOG(task.UpdateArgs(context)); return SUCCESS; } diff --git a/ge/hybrid/node_executor/task_context.cc b/ge/hybrid/node_executor/task_context.cc index c0464c87..3c288981 100644 --- a/ge/hybrid/node_executor/task_context.cc +++ b/ge/hybrid/node_executor/task_context.cc @@ -470,6 +470,12 @@ Status TaskContext::PropagateOutputs() { return SUCCESS; } +Status TaskContext::UpdatePersistTensor() { + GE_CHECK_NOTNULL(node_state_); + node_state_->UpdatePersistTensor(); + return SUCCESS; +} + const void *TaskContext::GetVarBaseAddr() { return execution_context_->model->GetVarMemBase(); } diff --git a/ge/hybrid/node_executor/task_context.h b/ge/hybrid/node_executor/task_context.h index c96e194e..cff5d680 100644 --- a/ge/hybrid/node_executor/task_context.h +++ b/ge/hybrid/node_executor/task_context.h @@ -78,6 +78,7 @@ class TaskContext { Status AllocateOutputs(AllocationAttr *attr = nullptr); Status AllocateWorkspaces(); Status AllocateWorkspace(size_t size, void **buffer, void *ori_addr = nullptr); + Status UpdatePersistTensor(); bool IsTraceEnabled() const; From d4828ea130d310773161d5f1b8ccc313f283cd1a Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Sat, 26 Jun 2021 15:00:53 +0800 Subject: [PATCH 093/226] UpdatePersistTensor from ExecutionEngine --- ge/hybrid/executor/node_state.cc | 4 ++++ ge/hybrid/executor/worker/execution_engine.cc | 1 + ge/hybrid/node_executor/node_executor.cc | 1 - ge/hybrid/node_executor/task_context.cc | 11 +---------- ge/hybrid/node_executor/task_context.h | 1 - 5 files changed, 6 insertions(+), 12 deletions(-) diff --git a/ge/hybrid/executor/node_state.cc b/ge/hybrid/executor/node_state.cc index 4b0d0c44..7ab7b536 100644 --- a/ge/hybrid/executor/node_state.cc +++ b/ge/hybrid/executor/node_state.cc @@ -333,6 +333,10 @@ void NodeState::SavePersistTensor(int input_idx, const TensorValue &tensor) { return std::any_of(items.begin(), items.end(), is_exist); }; + if (root_tensor_values_.count(input_idx) > 0) { + return; + } + if (is_persist_tensor(node_item_->root_data_, input_idx)) { GELOGD("[%s] Save Root input tensor: %d", GetName().c_str(), input_idx); root_tensor_values_[input_idx] = tensor; diff --git a/ge/hybrid/executor/worker/execution_engine.cc b/ge/hybrid/executor/worker/execution_engine.cc index 8eecbc80..d4c73f58 100755 --- a/ge/hybrid/executor/worker/execution_engine.cc +++ b/ge/hybrid/executor/worker/execution_engine.cc @@ -375,6 +375,7 @@ Status ExecutionEngine::DoExecuteAsync(NodeState &node_state, RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[PrepareTask] Start"); GE_CHK_STATUS_RET(executor->PrepareTask(*task, task_context), "[Prepare][Task] for [%s] failed.", node_state.GetName().c_str()); + node_state.UpdatePersistTensor(); RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[PrepareTask] End"); GELOGD("[%s] Done task preparation successfully.", node_state.GetName().c_str()); diff --git a/ge/hybrid/node_executor/node_executor.cc b/ge/hybrid/node_executor/node_executor.cc index eeb5ba20..9e9354d9 100755 --- a/ge/hybrid/node_executor/node_executor.cc +++ b/ge/hybrid/node_executor/node_executor.cc @@ -39,7 +39,6 @@ const char *const kEngineNameHostCpu = "DNN_VM_HOST_CPU_OP_STORE"; Status NodeExecutor::PrepareTask(NodeTask &task, TaskContext &context) const { GE_CHK_STATUS_RET_NOLOG(context.AllocateOutputs()); GE_CHK_STATUS_RET_NOLOG(context.AllocateWorkspaces()); - GE_CHK_STATUS_RET_NOLOG(context.UpdatePersistTensor()); GE_CHK_STATUS_RET_NOLOG(task.UpdateArgs(context)); return SUCCESS; } diff --git a/ge/hybrid/node_executor/task_context.cc b/ge/hybrid/node_executor/task_context.cc index 3c288981..4ecc1558 100644 --- a/ge/hybrid/node_executor/task_context.cc +++ b/ge/hybrid/node_executor/task_context.cc @@ -460,22 +460,12 @@ Status TaskContext::PropagateOutputs() { subgraph_context_->all_inputs_[input_offset].SetName( node_item_->NodeName() + "_in_" + std::to_string(dst_input_idx)); } - - auto dst_node_state = subgraph_context_->GetOrCreateNodeState(dst_node_item); - GE_CHECK_NOTNULL(dst_node_state); - dst_node_state->SavePersistTensor(dst_input_idx, *tensor); } } (void)guard; return SUCCESS; } -Status TaskContext::UpdatePersistTensor() { - GE_CHECK_NOTNULL(node_state_); - node_state_->UpdatePersistTensor(); - return SUCCESS; -} - const void *TaskContext::GetVarBaseAddr() { return execution_context_->model->GetVarMemBase(); } @@ -501,6 +491,7 @@ void TaskContext::ReleaseInputsAndOutputs() { void TaskContext::ReleaseInput(int index) { auto input_tensor = MutableInput(index); if (input_tensor != nullptr) { + node_state_->SavePersistTensor(index, *input_tensor); input_tensor->Destroy(); GELOGD("[%s] Tensor of input[%d] released", GetNodeName(), index); } diff --git a/ge/hybrid/node_executor/task_context.h b/ge/hybrid/node_executor/task_context.h index cff5d680..c96e194e 100644 --- a/ge/hybrid/node_executor/task_context.h +++ b/ge/hybrid/node_executor/task_context.h @@ -78,7 +78,6 @@ class TaskContext { Status AllocateOutputs(AllocationAttr *attr = nullptr); Status AllocateWorkspaces(); Status AllocateWorkspace(size_t size, void **buffer, void *ori_addr = nullptr); - Status UpdatePersistTensor(); bool IsTraceEnabled() const; From e2cb3778f0e4724cec833363a878e6b28093ec04 Mon Sep 17 00:00:00 2001 From: wq160 Date: Thu, 24 Jun 2021 11:42:57 +0800 Subject: [PATCH 094/226] add infer_base and infer value range --- ge/CMakeLists.txt | 4 + .../formats/utils/formats_trans_utils.cc | 19 + ge/common/formats/utils/formats_trans_utils.h | 2 + ge/graph/passes/constant_folding_pass.cc | 26 +- ge/graph/passes/constant_folding_pass.h | 5 + ge/graph/passes/folding_pass.cc | 8 - ge/graph/passes/folding_pass.h | 2 - ge/graph/passes/infer_base_pass.cc | 386 ++++++++++++ ge/graph/passes/infer_base_pass.h | 65 ++ ge/graph/passes/infer_value_range_pass.cc | 500 +++++++++++++++ ge/graph/passes/infer_value_range_pass.h | 49 ++ ge/graph/preprocess/graph_preprocess.cc | 3 + metadef | 2 +- tests/ut/ge/CMakeLists.txt | 7 +- .../graph/passes/infer_base_pass_unittest.cc | 359 +++++++++++ .../passes/infer_value_range_pass_unittest.cc | 583 ++++++++++++++++++ 16 files changed, 1997 insertions(+), 23 deletions(-) create mode 100644 ge/graph/passes/infer_base_pass.cc create mode 100644 ge/graph/passes/infer_base_pass.h create mode 100644 ge/graph/passes/infer_value_range_pass.cc create mode 100644 ge/graph/passes/infer_value_range_pass.h create mode 100644 tests/ut/ge/graph/passes/infer_base_pass_unittest.cc create mode 100644 tests/ut/ge/graph/passes/infer_value_range_pass_unittest.cc diff --git a/ge/CMakeLists.txt b/ge/CMakeLists.txt index 2b9122da..dc80597c 100755 --- a/ge/CMakeLists.txt +++ b/ge/CMakeLists.txt @@ -298,7 +298,9 @@ set(TRAIN_SRC_LIST "graph/passes/hccl_continuous_memcpy_pass.cc" "graph/passes/identity_pass.cc" "graph/passes/ref_identity_delete_op_pass.cc" + "graph/passes/infer_base_pass.cc" "graph/passes/infershape_pass.cc" + "graph/passes/infer_value_range_pass.cc" "graph/passes/iterator_op_pass.cc" "graph/passes/link_gen_mask_nodes_pass.cc" "graph/passes/merge_pass.cc" @@ -547,7 +549,9 @@ set(INFER_SRC_LIST "graph/passes/shape_operate_op_remove_pass.cc" "graph/passes/assert_pass.cc" "graph/passes/dropout_pass.cc" + "graph/passes/infer_base_pass.cc" "graph/passes/infershape_pass.cc" + "graph/passes/infer_value_range_pass.cc" "graph/passes/unused_const_pass.cc" "graph/passes/permute_pass.cc" "graph/passes/ctrl_edge_transfer_pass.cc" diff --git a/ge/common/formats/utils/formats_trans_utils.cc b/ge/common/formats/utils/formats_trans_utils.cc index 052951ce..db1812d0 100755 --- a/ge/common/formats/utils/formats_trans_utils.cc +++ b/ge/common/formats/utils/formats_trans_utils.cc @@ -49,6 +49,25 @@ GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY std::string ShapeToString(const s return JoinToString(shape); } +GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY +std::string RangeToString(const std::vector> &ranges) { + bool first = true; + std::stringstream ss; + ss << "["; + for (const auto &range : ranges) { + if (first) { + first = false; + } else { + ss << ","; + } + ss << "{"; + ss << range.first << "," << range.second; + ss << "}"; + } + ss << "]"; + return ss.str(); +} + int64_t GetItemNumByShape(const std::vector &shape) { int64_t num = 1; for (auto dim : shape) { diff --git a/ge/common/formats/utils/formats_trans_utils.h b/ge/common/formats/utils/formats_trans_utils.h index 848e8b3a..64f9f820 100755 --- a/ge/common/formats/utils/formats_trans_utils.h +++ b/ge/common/formats/utils/formats_trans_utils.h @@ -54,6 +54,8 @@ std::string ShapeToString(const GeShape &shape); std::string ShapeToString(const std::vector &shape); +std::string RangeToString(const std::vector> &ranges); + int64_t GetItemNumByShape(const std::vector &shape); bool CheckShapeValid(const std::vector &shape, const int64_t expect_dims); diff --git a/ge/graph/passes/constant_folding_pass.cc b/ge/graph/passes/constant_folding_pass.cc index 6607388f..53b14fd5 100644 --- a/ge/graph/passes/constant_folding_pass.cc +++ b/ge/graph/passes/constant_folding_pass.cc @@ -20,17 +20,23 @@ #include "external/graph/operator_factory.h" #include "graph/utils/node_utils.h" #include "graph/utils/type_utils.h" +#include "ge_local_engine/engine/host_cpu_engine.h" #include "init/gelib.h" namespace ge { const int64_t kStartCallNum = 1; const std::string kKernelLibName = "aicpu_tf_kernel"; -// tf_kernel.json opsFlag config const std::string kOpsFlagClose = "0"; -Status RunOpKernelWithCheck(NodePtr &node, - const vector &inputs, - std::vector &outputs) { +const map> &ConstantFoldingPass::GetGeConstantFoldingPerfStatistic() const { + return statistic_of_ge_constant_folding_; +} +const map> &ConstantFoldingPass::GetOpConstantFoldingPerfStatistic() const { + return statistic_of_op_constant_folding_; +} + +Status ConstantFoldingPass::RunOpKernelWithCheck(NodePtr &node, const vector &inputs, + std::vector &outputs) { std::shared_ptr instance_ptr = ge::GELib::GetInstance(); if ((instance_ptr == nullptr) || (!instance_ptr->InitFlag())) { GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Check][Param] GE is not initialized or is finalized."); @@ -47,15 +53,13 @@ Status RunOpKernelWithCheck(NodePtr &node, if (ops_flag == kOpsFlagClose) { return UNSUPPORTED; } - return FoldingPass::RunOpKernel(node, inputs, outputs); + return RunOpKernel(node, inputs, outputs); } -const map> &ConstantFoldingPass::GetGeConstantFoldingPerfStatistic() const { - return statistic_of_ge_constant_folding_; -} - -const map> &ConstantFoldingPass::GetOpConstantFoldingPerfStatistic() const { - return statistic_of_op_constant_folding_; +Status ConstantFoldingPass::RunOpKernel(NodePtr &node, + const vector &inputs, + std::vector &outputs) { + return HostCpuEngine::GetInstance().Run(node, inputs, outputs); } Status ConstantFoldingPass::Run(ge::NodePtr &node) { diff --git a/ge/graph/passes/constant_folding_pass.h b/ge/graph/passes/constant_folding_pass.h index 703e6edd..7de48a17 100644 --- a/ge/graph/passes/constant_folding_pass.h +++ b/ge/graph/passes/constant_folding_pass.h @@ -28,6 +28,11 @@ class ConstantFoldingPass : public FoldingPass { Status Run(ge::NodePtr &node) override; const std::map> &GetGeConstantFoldingPerfStatistic() const; const std::map> &GetOpConstantFoldingPerfStatistic() const; + + static Status RunOpKernel(NodePtr &node, const vector &inputs, vector &outputs); + static Status RunOpKernelWithCheck(NodePtr &node, const vector &inputs, + std::vector &outputs); + private: std::map> statistic_of_op_constant_folding_; std::map> statistic_of_ge_constant_folding_; diff --git a/ge/graph/passes/folding_pass.cc b/ge/graph/passes/folding_pass.cc index c0a0f2a2..819c3b40 100755 --- a/ge/graph/passes/folding_pass.cc +++ b/ge/graph/passes/folding_pass.cc @@ -28,8 +28,6 @@ #include "inc/kernel.h" #include "inc/kernel_factory.h" #include "graph/debug/ge_attr_define.h" -#include "ge_local_engine/engine/host_cpu_engine.h" - namespace ge { namespace folding_pass { @@ -123,12 +121,6 @@ NodePtr AddIdentityNodeToGraph(const std::string &name, const GeTensorDesc &tens } } // namespace -Status FoldingPass::RunOpKernel(NodePtr &node, - const vector &inputs, - std::vector &outputs) { - return HostCpuEngine::GetInstance().Run(node, inputs, outputs); -} - Status FoldingPass::Folding(NodePtr &node, vector &outputs) { GE_CHECK_NOTNULL(node); GELOGD("begin folding node:%s", node->GetName().c_str()); diff --git a/ge/graph/passes/folding_pass.h b/ge/graph/passes/folding_pass.h index 745cffd7..c461ff5c 100755 --- a/ge/graph/passes/folding_pass.h +++ b/ge/graph/passes/folding_pass.h @@ -34,8 +34,6 @@ bool IsNoNeedConstantFolding(const NodePtr &node); using IndexsToAnchors = std::map>; class FoldingPass : public BaseNodePass { - public: - static Status RunOpKernel(NodePtr &node, const vector &inputs, vector &outputs); protected: Status Folding(NodePtr &node, vector &outputs); private: diff --git a/ge/graph/passes/infer_base_pass.cc b/ge/graph/passes/infer_base_pass.cc new file mode 100644 index 00000000..27eb0c54 --- /dev/null +++ b/ge/graph/passes/infer_base_pass.cc @@ -0,0 +1,386 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "infer_base_pass.h" +#include "common/ge/ge_util.h" +#include "common/util/error_manager/error_manager.h" +#include "framework/common/debug/ge_log.h" +#include "framework/common/util.h" +#include "graph/debug/ge_attr_define.h" +#include "graph/utils/graph_utils.h" +#include "graph/utils/node_utils.h" +#include "graph/utils/tensor_utils.h" +#include "graph/utils/type_utils.h" + +namespace ge { +namespace { +graphStatus FindValidSubgraphNetoutput(const ConstNodePtr &node, const ComputeGraphPtr &sub_graph, NodePtr &netoutput) { + auto sub_nodes = sub_graph->GetDirectNode(); + for (size_t i = sub_nodes.size(); i > 0; --i) { + auto sub_node = sub_nodes.at(i - 1); + if (sub_node->GetType() == NETOUTPUT) { + if (sub_node == nullptr) { + REPORT_INNER_ERROR("E19999", "NetOutput node is null in subgraph %s, parent node %s.", + sub_graph->GetName().c_str(), node->GetName().c_str()); + GELOGE(GRAPH_FAILED, "[Check][Param] NetOutput node is null on sub graph %s, parent node %s", + sub_graph->GetName().c_str(), node->GetName().c_str()); + return GRAPH_FAILED; + } + auto sub_node_opdesc = sub_node->GetOpDesc(); + if (sub_node_opdesc == nullptr) { + REPORT_INNER_ERROR("E19999", "Invalid NetOutput node in subgraph %s, parent node %s, no OpDesc on it", + sub_graph->GetName().c_str(), node->GetName().c_str()); + GELOGE(GRAPH_FAILED, "[Check][Param] Invalid NetOutput node on sub graph %s, parent node %s, no OpDesc on it", + sub_graph->GetName().c_str(), node->GetName().c_str()); + return GRAPH_FAILED; + } + + netoutput = sub_node; + return GRAPH_SUCCESS; + } + } + + REPORT_INNER_ERROR("E19999", "Can not find the NetOutput node in subgraph %s, parent node %s", + sub_graph->GetName().c_str(), node->GetName().c_str()); + GELOGE(GRAPH_FAILED, "[Check][Param] Can not find the NetOutput node in subgraph %s, parent node %s", + sub_graph->GetName().c_str(), node->GetName().c_str()); + return GRAPH_FAILED; +} +} // namespace + +Status InferBasePass::Run(NodePtr &node) { + GE_CHECK_NOTNULL(node); + GE_CHECK_NOTNULL(node->GetOpDesc()); + + bool need_infer = NeedInfer(node); + if (!need_infer) { + GELOGD("Node %s does not need to infer.", node->GetName().c_str()); + return SUCCESS; + } + + std::set changed_nodes; + auto ret = InferAndUpdate(node, !OptionExists(kOptimizeAfterSubGraph), changed_nodes); + if (ret != GRAPH_SUCCESS) { + GELOGE(ret, "Infer and update for node %s failed! ret: %u", node->GetName().c_str(), ret); + return GRAPH_FAILED; + } + + AddChangedNodesImmediateRepass(changed_nodes); + return SUCCESS; +} + +bool InferBasePass::NeedInfer(const NodePtr &node) const { return true; } +void InferBasePass::AddChangedNodesImmediateRepass(const std::set &changed_nodes) { + for (const auto &node_ele : changed_nodes) { + AddImmediateRePassNode(node_ele); + } +} + +graphStatus InferBasePass::InferAndUpdate(NodePtr &node, bool before_subgraph, std::set &changed_nodes) { + graphStatus ret; + if (ContainsSubgraph(node)) { + if (before_subgraph) { + ret = UpdateTensorDescToSubgraphData(node); + } else { + ret = UpdateTensorDescToParentNodeOutput(node); + } + if (ret != GRAPH_SUCCESS) { + GELOGE(ret, "Update tensor desc failed between parent node %s and subgraphs. ret: %u", node->GetName().c_str(), + ret); + return ret; + } + } + + PrintInOutTensors(node, "before_infer"); + ret = Infer(node); + PrintInOutTensors(node, "after_infer"); + if (ret == GRAPH_NODE_NEED_REPASS) { + // if a node need re_pass, it is not necessary to update peer node input. + changed_nodes.insert(node); + return GRAPH_SUCCESS; + } else if (ret != GRAPH_SUCCESS && ret != GRAPH_NOT_CHANGED) { + GELOGE(ret, "Infer failed for node %s, ret: %u", node->GetName().c_str(), ret); + return ret; + } + + ret = UpdateTensorDescToPeerInputs(node, changed_nodes); + if (ret != GRAPH_SUCCESS) { + GELOGE(ret, "Node %s updates tensor desc to peer input nodes failed! ret: %u", node->GetName().c_str(), ret); + } + GELOGD("Node %s infer and update succeeded .", node->GetName().c_str()); + return ret; +} + +bool InferBasePass::ContainsSubgraph(const NodePtr &node) { + auto sub_graph_names = node->GetOpDesc()->GetSubgraphInstanceNames(); + return !sub_graph_names.empty(); +} + +graphStatus InferBasePass::UpdateTensorDescToPeerInputs(NodePtr &node, std::set &changed_nodes) { + auto op_desc = node->GetOpDesc(); + for (const auto &out_anchor : node->GetAllOutDataAnchors()) { + auto output_tensor = op_desc->MutableOutputDesc(out_anchor->GetIdx()); + for (const auto &peer_anchor : out_anchor->GetPeerInDataAnchors()) { + auto peer_anchor_opdesc = peer_anchor->GetOwnerNode()->GetOpDesc(); + if (peer_anchor_opdesc == nullptr) { + continue; + } + auto peer_input_desc = peer_anchor_opdesc->MutableInputDesc(peer_anchor->GetIdx()); + if (peer_input_desc == nullptr) { + continue; + } + + bool changed = false; + auto ret = UpdateTensorDesc(output_tensor, peer_input_desc, changed); + if (ret != GRAPH_SUCCESS) { + REPORT_CALL_ERROR("E19999", "Update peer input desc failed, node %s.", node->GetName().c_str()); + GELOGE(ret, "Update peer input desc failed, node %s.", node->GetName().c_str()); + return ret; + } + if (changed) { + changed_nodes.insert(peer_anchor->GetOwnerNode()); + GELOGD("Node %s update peer node succeeded, peer node %s is changed.", node->GetName().c_str(), + peer_anchor->GetOwnerNode()->GetName().c_str()); + } + } + } + return GRAPH_SUCCESS; +} + +std::vector InferBasePass::GetCurNodeSubgraphs(const NodePtr &node) { + std::vector cur_node_subgraph; + auto op_desc = node->GetOpDesc(); + auto sub_graph_names = op_desc->GetSubgraphInstanceNames(); + if (sub_graph_names.empty()) { + return cur_node_subgraph; + } + + auto root_graph = GraphUtils::FindRootGraph(node->GetOwnerComputeGraph()); + for (const auto &name : sub_graph_names) { + if (name.empty()) { + GELOGW("The node %s contains empty subgraph instance name", node->GetName().c_str()); + continue; + } + auto sub_graph = root_graph->GetSubgraph(name); + if (sub_graph == nullptr) { + GELOGW("The subgrpah %s for node %s is null.", name.c_str(), node->GetName().c_str()); + continue; + } + cur_node_subgraph.emplace_back(sub_graph); + } + return cur_node_subgraph; +} + +graphStatus InferBasePass::UpdateTensorDescToSubgraphData(NodePtr &node) { + auto op_desc = node->GetOpDesc(); + for (const auto &sub_graph : GetCurNodeSubgraphs(node)) { + for (const auto &node_sub : sub_graph->GetDirectNode()) { + if (node_sub->GetType() != DATA) { + continue; + } + + auto data_opdesc = node_sub->GetOpDesc(); + if (data_opdesc == nullptr) { + REPORT_INNER_ERROR("E19999", "Invalid data node on the sub graph %s parent node %s, no OpDesc", + sub_graph->GetName().c_str(), node->GetName().c_str()); + GELOGE(GRAPH_FAILED, "[Get][OpDesc] Invalid data node on the sub graph %s parent node %s, no OpDesc", + sub_graph->GetName().c_str(), node->GetName().c_str()); + return GRAPH_FAILED; + } + int ref_i; + if (!AttrUtils::GetInt(data_opdesc, ATTR_NAME_PARENT_NODE_INDEX, ref_i)) { + REPORT_INNER_ERROR("E19999", "Invalid data node on the sub graph %s parent node %s, no ref-index attribute", + sub_graph->GetName().c_str(), node->GetName().c_str()); + GELOGE(GRAPH_FAILED, "[Get][Int] Invalid data node on the sub graph %s parent node %s, no ref-index attribute", + sub_graph->GetName().c_str(), node->GetName().c_str()); + return GRAPH_FAILED; + } + GELOGD("Subgraph Data node ref_index is %d, parent node is %s.", ref_i, node->GetName().c_str()); + + // In multi-batch, data shape of subgraph is different, no need to refresh. + if (data_opdesc->HasAttr(ATTR_MBATCH_ORIGIN_INPUT_DIMS)) { + GELOGD("While updating subgraph data node, ignore node %s which is created by multi-dims", + data_opdesc->GetName().c_str()); + continue; + } + auto input_desc = op_desc->MutableInputDesc(ref_i); + if (input_desc == nullptr) { + REPORT_INNER_ERROR("E19999", + "The ref index(%d) on the data %s on the sub graph %s " + "parent node %s are incompatible, inputs num %u", + ref_i, node_sub->GetName().c_str(), sub_graph->GetName().c_str(), node->GetName().c_str(), + node->GetAllInDataAnchorsSize()); + GELOGE(GRAPH_FAILED, + "[Call][MutableInputDesc] The ref index(%d) on the data %s on the sub graph %s " + "parent node %s are incompatible, inputs num %u", + ref_i, node_sub->GetName().c_str(), sub_graph->GetName().c_str(), node->GetName().c_str(), + node->GetAllInDataAnchorsSize()); + return GRAPH_FAILED; + } + GELOGI("Ref index is %d, input_desc dtype is %d, node name is %s", ref_i, input_desc->GetDataType(), + node->GetName().c_str()); + + bool has_tensor_desc_changed = false; + auto data_input_td = data_opdesc->MutableInputDesc(0); + auto ret = UpdateTensorDesc(input_desc, data_input_td, has_tensor_desc_changed); + if (ret != GRAPH_SUCCESS) { + REPORT_CALL_ERROR("E19999", "Failed to update input desc of data %s on the sub graph %s parent node %s", + node_sub->GetName().c_str(), sub_graph->GetName().c_str(), node->GetName().c_str()); + GELOGE(GRAPH_FAILED, "[Update][InputDesc] of data %s on the sub graph %s parent node %s failed", + node_sub->GetName().c_str(), sub_graph->GetName().c_str(), node->GetName().c_str()); + return ret; + } + + auto data_output_td = data_opdesc->MutableOutputDesc(0); + ret = UpdateTensorDesc(input_desc, data_output_td, has_tensor_desc_changed); + if (ret != GRAPH_SUCCESS) { + REPORT_CALL_ERROR("E19999", "Failed to update output desc of data %s on the sub graph %s parent node %s", + node_sub->GetName().c_str(), sub_graph->GetName().c_str(), node->GetName().c_str()); + GELOGE(GRAPH_FAILED, "[Update][OutputDesc] of data %s on the sub graph %s parent node %s failed", + node_sub->GetName().c_str(), sub_graph->GetName().c_str(), node->GetName().c_str()); + return ret; + } + GELOGD("Parent node %s update subgraph data %s input and output succeed.", node->GetName().c_str(), + data_opdesc->GetName().c_str()); + } + } + return GRAPH_SUCCESS; +} + +graphStatus InferBasePass::UpdateTensorDescToParentNodeOutput(NodePtr &node) { + std::vector> ref_out_tensors(node->GetAllOutDataAnchorsSize()); + + for (const auto &sub_graph : GetCurNodeSubgraphs(node)) { + NodePtr netoutput; + auto ret = FindValidSubgraphNetoutput(node, sub_graph, netoutput); + if (ret != GRAPH_SUCCESS) { + return ret; + } + + auto netoutput_opdesc = netoutput->GetOpDesc(); + for (auto &netoutput_in_anchor : netoutput->GetAllInDataAnchors()) { + auto netoutput_in_desc = netoutput_opdesc->MutableInputDesc(netoutput_in_anchor->GetIdx()); + if (netoutput_in_desc == nullptr) { + REPORT_INNER_ERROR("E19999", + "Invalid NetOutput node on sub graph %s, parent node %s, can not find input tensor %d", + sub_graph->GetName().c_str(), node->GetName().c_str(), netoutput_in_anchor->GetIdx()); + GELOGE(GRAPH_FAILED, + "[Get][Tensor] Invalid NetOutput node on sub graph %s, parent node %s, can not find input tensor %d", + sub_graph->GetName().c_str(), node->GetName().c_str(), netoutput_in_anchor->GetIdx()); + return GRAPH_FAILED; + } + GELOGI("Netoutput in anchor index is %d, input tensor dim is %zu", netoutput_in_anchor->GetIdx(), + netoutput_in_desc->GetShape().GetDimNum()); + int ref_i; + if (!AttrUtils::GetInt(netoutput_in_desc, ATTR_NAME_PARENT_NODE_INDEX, ref_i)) { + // if there is no ref index on the TensorDesc, it means the output data will be ignored outer. + continue; + } + GELOGI("Parent node index of edge desc is %d", ref_i); + if (ref_i < 0 || static_cast(ref_i) >= node->GetAllOutDataAnchorsSize()) { + REPORT_INNER_ERROR("E19999", + "Invalid ref_index %d of parent node %s, ref_index should less than %u.", ref_i, + node->GetName().c_str(), node->GetAllOutDataAnchorsSize()); + GELOGE(GRAPH_FAILED, + "[Get][Ref_index] Invalid ref_index %d of parent node %s, ref_index should less than %u.", ref_i, + node->GetName().c_str(), node->GetAllOutDataAnchorsSize()); + return GRAPH_FAILED; + } + ref_out_tensors[ref_i].emplace_back(netoutput_in_desc); + } + } + + return UpdateParentNodeContainsSubgraphs(node, ref_out_tensors); +} + +graphStatus InferBasePass::UpdateParentNodeContainsSubgraphs( + NodePtr &node, const std::vector> &ref_out_tensors) { + for (size_t i = 0; i < ref_out_tensors.size(); i++) { + if (ref_out_tensors[i].empty()) { + REPORT_CALL_ERROR("E19999", "Parent node %s ref_index %zu subgraph output tensor list is empty.", + node->GetName().c_str(), i); + GELOGE(GRAPH_FAILED, "[Param][check] Parent node %s ref_index %zu subgraph output tensor list is empty.", + node->GetName().c_str(), i); + return GRAPH_FAILED; + } + auto node_op_desc = node->GetOpDesc(); + auto node_output_td = node_op_desc->MutableOutputDesc(i); + if (node_output_td == nullptr) { + REPORT_CALL_ERROR("E19999", "Node %s output %zu tensor desc is null.", node->GetName().c_str(), i); + GELOGE(GRAPH_FAILED, "[Param][check] Node %s output %zu tensor desc is null.", node->GetName().c_str(), i); + return GRAPH_FAILED; + } + + graphStatus ret; + if (node_op_desc->HasAttr(ATTR_NAME_BATCH_NUM)) { + ret = UpdateOutputFromSubgraphsForMultiDims(ref_out_tensors[i], node_output_td); + } else { + ret = UpdateOutputFromSubgraphs(ref_out_tensors[i], node_output_td); + } + if (ret != GRAPH_SUCCESS) { + REPORT_CALL_ERROR("E19999", "Node %s update output %zu tensor desc failed. ret: %u", node->GetName().c_str(), i, + ret); + GELOGE(GRAPH_FAILED, "[Param][check] Node %s update output %zu tensor desc failed. ret: %u", + node->GetName().c_str(), i, ret); + return ret; + } + GELOGD("Parent node %s successfully updated the output tensors from subgraphs.", node->GetName().c_str()); + } + return GRAPH_SUCCESS; +} + +void InferBasePass::PrintInOutTensors(const NodePtr &node, const std::string &phase) { + if (!IsLogEnable(GE, DLOG_DEBUG)) { + return; + } + if (node == nullptr) { + REPORT_INNER_ERROR("E19999", "Param node is nullptr, check invalid"); + GELOGE(GRAPH_FAILED, "[Check][Param] node is null"); + return; + } + ge::OpDescPtr op_desc = node->GetOpDesc(); + GE_IF_BOOL_EXEC(op_desc == nullptr, REPORT_INNER_ERROR("E19999", "Node has no opdesc, check invalid"); + GELOGE(GRAPH_FAILED, "[Get][OpDesc] op_desc is null."); return ); + std::stringstream ss; + ss << "{"; + int32_t in_idx = 0; + for (const auto &input_desc : op_desc->GetAllInputsDescPtr()) { + if (input_desc == nullptr) { + in_idx++; + continue; + } + if (in_idx > 0) { + ss << " "; + } + ss << "input_" << in_idx << " tensor: "; + ss << SerialTensorInfo(input_desc); + in_idx++; + } + int32_t out_idx = 0; + for (const auto &output_desc : op_desc->GetAllOutputsDescPtr()) { + if (output_desc == nullptr) { + out_idx++; + continue; + } + ss << " "; + ss << "output_" << out_idx << " tensor: "; + ss << SerialTensorInfo(output_desc); + out_idx++; + } + ss << "}"; + GELOGD("Infer tensor dump [%s], Node name: [%s]. %s", phase.c_str(), node->GetName().c_str(), ss.str().c_str()); +} +} // namespace ge diff --git a/ge/graph/passes/infer_base_pass.h b/ge/graph/passes/infer_base_pass.h new file mode 100644 index 00000000..3900b5db --- /dev/null +++ b/ge/graph/passes/infer_base_pass.h @@ -0,0 +1,65 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef GE_GRAPH_PASSES_INFER_BASE_PASS_H_ +#define GE_GRAPH_PASSES_INFER_BASE_PASS_H_ + +#include "graph/passes/base_pass.h" + +namespace ge { +class InferBasePass : public BaseNodePass { + public: + Status Run(NodePtr &node) override; + graphStatus InferAndUpdate(NodePtr &node, bool before_subgraph, std::set &changed_nodes); + void PrintInOutTensors(const NodePtr &node, const std::string &phase); + + protected: + virtual std::string SerialTensorInfo(const GeTensorDescPtr &tensor_desc) const = 0; + virtual bool NeedInfer(const NodePtr &node) const; + virtual graphStatus Infer(NodePtr &node) = 0; + + /** + * Update the output TensorDesc by src TensorDesc. This will be called when updating peer node input desc. + * @param src, input TensorDesc + * @param dst, output TensorDesc to be updated + * @return + */ + virtual graphStatus UpdateTensorDesc(const GeTensorDescPtr &src, GeTensorDescPtr &dst, bool &changed) = 0; + + /** + * Update the output TensorDesc for nodes which contain subgraphs. + * In dynamic multi-dims/batch/images size scene, the update process maybe different, + * in which case, the `InferBasePass` will call method `UpdateOutputFromSubgraphsForMultiDims` instead. + * @param src, input TensorDesc from NetOutput nodes in all subgraphs + * @param dst, output TensorDesc to be updated + * @return + */ + virtual graphStatus UpdateOutputFromSubgraphs(const std::vector &src, + GeTensorDescPtr &dst) = 0; + virtual graphStatus UpdateOutputFromSubgraphsForMultiDims(const std::vector &src, + GeTensorDescPtr &dst) = 0; + + private: + void AddChangedNodesImmediateRepass(const std::set &changed_nodes); + bool ContainsSubgraph(const NodePtr &node); + std::vector GetCurNodeSubgraphs(const NodePtr &node); + graphStatus UpdateTensorDescToSubgraphData(NodePtr &node); + graphStatus UpdateTensorDescToParentNodeOutput(NodePtr &node); + graphStatus UpdateParentNodeContainsSubgraphs(NodePtr &node, + const std::vector> &ref_out_tensors); + graphStatus UpdateTensorDescToPeerInputs(NodePtr &node, std::set &changed_nodes); +}; +} // namespace ge +#endif // GE_GRAPH_PASSES_INFER_BASE_PASS_H_ diff --git a/ge/graph/passes/infer_value_range_pass.cc b/ge/graph/passes/infer_value_range_pass.cc new file mode 100644 index 00000000..b9cb88bc --- /dev/null +++ b/ge/graph/passes/infer_value_range_pass.cc @@ -0,0 +1,500 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "graph/passes/infer_value_range_pass.h" +#include "common/formats/utils/formats_trans_utils.h" +#include "common/util/error_manager/error_manager.h" +#include "framework/common/debug/ge_log.h" +#include "graph/debug/ge_attr_define.h" +#include "graph/operator_factory_impl.h" +#include "graph/passes/constant_folding_pass.h" +#include "graph/utils/type_utils.h" +#include "common/ge/ge_util.h" + +using std::unique_ptr; +namespace ge { +namespace { +#define GET_DATA_BY_DTYPE(DTYPE, TYPE) \ + case (DTYPE): \ + ConstructValueRange(lower_boundary_tensor, upper_boundary_tensor, output_tensor_value_range); \ + break; + +void SerialShapeRange(const GeTensorDescPtr &desc, std::string &desc_str) { + std::vector> shape_range; + (void)desc->GetShapeRange(shape_range); + desc_str += formats::RangeToString(shape_range); + shape_range.clear(); + (void)desc->GetOriginShapeRange(shape_range); + desc_str += ","; + desc_str += formats::RangeToString(shape_range); + shape_range.clear(); +} + +Status RunCpuKernelForValueRange(NodePtr &node, const vector &inputs, + std::vector &outputs) { + // RunOpKernelWithCheck, RunOpKernel for test + auto ret = ConstantFoldingPass::RunOpKernel(node, inputs, outputs); + if (ret != SUCCESS) { + auto op_kernel = folding_pass::GetKernelByType(node); + if (op_kernel == nullptr) { + GELOGW("Calculate value range failed, no op kernel for node %s type %s", node->GetName().c_str(), + node->GetType().c_str()); + return NOT_CHANGED; + } + + ret = op_kernel->Compute(node->GetOpDesc(), inputs, outputs); + if (ret != SUCCESS) { + GELOGW("Calculate value range failed, node %s run cpu kernel failed.", node->GetName().c_str()); + return NOT_CHANGED; + } + } + GELOGI("Node %s type %s, run cpu kernel success.", node->GetName().c_str(), node->GetType().c_str()); + return SUCCESS; +} +} // namespace + +graphStatus InferValueRangePass::Infer(NodePtr &node) { + auto infer_value_range_param = OperatorFactoryImpl::GetInferValueRangePara(node->GetType()); + + // Use registered func to calculate value range + if (!infer_value_range_param.use_cpu_kernel) { + if (infer_value_range_param.infer_value_func == nullptr) { + GELOGW("The registered func of node %s to infer value range is nullptr.", node->GetName().c_str()); + return GRAPH_NOT_CHANGED; + } + Operator op = OpDescUtils::CreateOperatorFromNode(node); + auto ret = node->GetOpDesc()->CallInferValueRangeFunc(op); + if (ret != GRAPH_SUCCESS) { + GELOGW("Node %s call infer value range func failed, ret: %u.", node->GetName().c_str(), ret); + return GRAPH_NOT_CHANGED; + } + GELOGD("Node %s infer value range func succeed by registered func.", node->GetName().c_str()); + return GRAPH_SUCCESS; + } + + // if input value range has -1, cpu kernel cannot calculate correctly, so set {1:-1} + if (InputHasUnknownValueRange(node)) { + GELOGI("Node %s has unknown value range in input tensors, set value range {1:-1}, and skip cpu kernel.", + node->GetName().c_str()); + return GenerateWorstValueRange(node); + } + + // Use CPU kernel func to calculate value range + auto ret = ConstructInputAndInferValueRange(node); + if (ret != GRAPH_SUCCESS) { + GELOGW("Use CPU kernel to calculate value range failed. node: %s, ret: %u", node->GetName().c_str(), ret); + return GRAPH_NOT_CHANGED; + } + GELOGD("Node %s infer value range func succeed by running cpu kernel.", node->GetName().c_str()); + return GRAPH_SUCCESS; +} + +std::string InferValueRangePass::SerialTensorInfo(const GeTensorDescPtr &tensor_desc) const { + std::stringstream ss; + ss << "["; + ss << "(shape:[" << tensor_desc->MutableShape().ToString() << "]),"; + string range_str; + SerialShapeRange(tensor_desc, range_str); + ss << "(shape_range:" << range_str << "),"; + std::vector> value_range; + (void)tensor_desc->GetValueRange(value_range); + string value_range_str = formats::RangeToString(value_range); + ss << "(value_range:" << value_range_str << ")]"; + return ss.str(); +} + +bool InferValueRangePass::NeedInfer(const NodePtr &node) const { + auto infer_value_range_param = OperatorFactoryImpl::GetInferValueRangePara(node->GetType()); + if (!infer_value_range_param.is_initialized) { + GELOGD("Node %s does not register func to infer value range, skip infer_value_range_pass.", + node->GetName().c_str()); + return false; + } + + if (infer_value_range_param.when_call == INPUT_IS_DYNAMIC) { + // Only do infer for node that all inputs are dynamic, such as shape + if (InputIsDynamic(node)) { + return true; + } + GELOGD("Node %s register func to infer value range and when_call is INPUT_IS_DYNAMIC, but check input failed.", + node->GetName().c_str()); + } else if (infer_value_range_param.when_call == INPUT_HAS_VALUE_RANGE) { + // Only do infer for node that all inputs have value_range or node type of inputs is constant/const + if (InputIsConstOrHasValueRange(node)) { + return true; + } + GELOGD("Node %s register func to infer value range and when_call is INPUT_HAS_VALUE_RANGE, but check input failed.", + node->GetName().c_str()); + } + GELOGD("Node %s does not need to infer value range, skip infer_value_range_pass.", node->GetName().c_str()); + return false; +} + +bool InferValueRangePass::InputIsDynamic(const NodePtr &node) const{ + bool input_is_dynamic = false; + auto cur_op_desc = node->GetOpDesc(); + for (const auto &input_desc : cur_op_desc->GetAllInputsDescPtr()) { + auto dims = input_desc->GetShape().GetDims(); + for (auto dim : dims) { + if (dim == UNKNOWN_DIM || dim == UNKNOWN_DIM_NUM) { + input_is_dynamic = true; + break; + } + } + } + return input_is_dynamic; +} + +bool InferValueRangePass::InputIsConstOrHasValueRange(const NodePtr &node) const { + bool input_is_const_or_has_value_range = true; + auto cur_op_desc = node->GetOpDesc(); + auto in_data_anchors = node->GetAllInDataAnchors(); + for (size_t i = 0; i < in_data_anchors.size(); ++i) { + auto peer_out_anchor = in_data_anchors.at(i)->GetPeerOutAnchor(); + if (peer_out_anchor == nullptr) { + continue; + } + auto peer_node = peer_out_anchor->GetOwnerNode(); + if (peer_node == nullptr || peer_node->GetOpDesc() == nullptr) { + continue; + } + if ((peer_node->GetType() == CONSTANT) || (peer_node->GetType() == CONSTANTOP)) { + continue; + } + + const auto &input_desc = cur_op_desc->GetInputDesc(i); + std::vector> value_range; + (void)input_desc.GetValueRange(value_range); + if (value_range.empty()) { + GELOGD("Node %s input %zu does not have value range, skip infer_value_range_pass for current node.", + node->GetName().c_str(), i); + input_is_const_or_has_value_range = false; + break; + } + } + return input_is_const_or_has_value_range; +} + +bool InferValueRangePass::InputHasUnknownValueRange(const NodePtr &node) const { + bool has_unknown_value_range = false; + auto cur_op_desc = node->GetOpDesc(); + for (const auto &input_desc : cur_op_desc->GetAllInputsDescPtr()) { + std::vector> input_desc_value_range; + input_desc->GetValueRange(input_desc_value_range); + if (!input_desc_value_range.empty()) { + for (const auto &range : input_desc_value_range) { + if (range.first == -1 || range.second == -1) { + GELOGD("Node %s input tensors have unknown value range, value range is %s.", node->GetName().c_str(), + formats::RangeToString(input_desc_value_range).c_str()); + has_unknown_value_range = true; + } + } + } + } + return has_unknown_value_range; +} + +graphStatus InferValueRangePass::UpdateTensorDesc(const GeTensorDescPtr &src, GeTensorDescPtr &dst, bool &changed) { + if (src == nullptr || dst == nullptr) { + REPORT_CALL_ERROR("E19999", "While updating tensor desc, input desc is null."); + GELOGE(GRAPH_FAILED, "[Param][check] While updating tensor desc, input desc is null."); + return GRAPH_FAILED; + } + + changed = false; + std::vector> src_value_range; + std::vector> dst_value_range; + (void)src->GetValueRange(src_value_range); + (void)dst->GetValueRange(dst_value_range); + if (src_value_range != dst_value_range) { + GELOGD("While updating tensor desc, value range has been changed, src value range: %s, dst value range: %s.", + formats::RangeToString(src_value_range).c_str(), formats::RangeToString(dst_value_range).c_str()); + changed = true; + } + + dst->SetValueRange(src_value_range); + return GRAPH_SUCCESS; +} + +graphStatus InferValueRangePass::UpdateOutputFromSubgraphs(const std::vector &src, + GeTensorDescPtr &dst) { + std::vector> ref_out_tensor_value_range; + auto ref_out_tensor = src.at(0); + (void)ref_out_tensor->GetValueRange(ref_out_tensor_value_range); + for (auto &ref_tensor : src) { + std::vector> ref_tensor_value_range; + (void)ref_tensor->GetValueRange(ref_tensor_value_range); + + if (ref_tensor_value_range.size() != ref_out_tensor_value_range.size()) { + GELOGD("Update TensorDesc %s failed, rank of value ranges %s and %s are not the same, skip value range refresh.", + dst->GetName().c_str(), formats::RangeToString(ref_out_tensor_value_range).c_str(), + formats::RangeToString(ref_tensor_value_range).c_str()); + return GRAPH_SUCCESS; + } + + for (size_t j = 0; j < ref_out_tensor_value_range.size(); j++) { + if ((ref_out_tensor_value_range.at(j).first != ref_tensor_value_range.at(j).first) || + (ref_out_tensor_value_range.at(j).second != ref_tensor_value_range.at(j).second)) { + ref_out_tensor_value_range[j] = std::make_pair(1, -1); + } + } + } + GELOGD("While updating output desc from subgraphs, set parent node desc value range %s.", + formats::RangeToString(ref_out_tensor_value_range).c_str()); + dst->SetValueRange(ref_out_tensor_value_range); + return GRAPH_SUCCESS; +} + +graphStatus InferValueRangePass::UpdateOutputFromSubgraphsForMultiDims(const std::vector &src, + GeTensorDescPtr &dst) { + REPORT_INNER_ERROR("E19999", + "Update TensorDesc %s failed. In dynamic multi-dims size scene, there should be no value range.", + dst->GetName().c_str()); + GELOGE(GRAPH_FAILED, + "[Update][TensorDesc] %s failed. In dynamic multi-dims size scene, there should be no value range.", + dst->GetName().c_str()); + return GRAPH_FAILED; +} + +graphStatus InferValueRangePass::GenerateWorstValueRange(NodePtr &node) { + GELOGI("Node %s does not run cpu kernel, because input value range has -1.", node->GetName().c_str()); + OpDescPtr op_desc = node->GetOpDesc(); + for (size_t i = 0; i < op_desc->GetOutputsSize(); ++i) { + auto output_desc = op_desc->MutableOutputDesc(i); + if (output_desc == nullptr) { + continue; + } + auto output_i_shape = output_desc->GetShape(); + auto output_i_shape_size = output_i_shape.GetShapeSize(); + if (output_i_shape_size < 0) { + GELOGD("Node %s output shape is unknown, cannot infer value range, shape is %s.", node->GetName().c_str(), + formats::ShapeToString(output_i_shape).c_str()); + return GRAPH_NOT_CHANGED; + } + + std::vector> output_i_value_range(output_i_shape_size, {1, -1}); + output_desc->SetValueRange(output_i_value_range); + GELOGD("Node %s output %zu shape is %s, the generated worst value range is %s.", node->GetName().c_str(), i, + formats::ShapeToString(output_i_shape).c_str(), formats::RangeToString(output_i_value_range).c_str()); + } + return GRAPH_SUCCESS; +} + +template +graphStatus InferValueRangePass::ConstructData(const GeTensorDesc &tensor_desc, bool use_floor_value, + GeTensorPtr &output_ptr) { + std::vector> value_range; + (void)tensor_desc.GetValueRange(value_range); + if (static_cast(value_range.size()) != tensor_desc.GetShape().GetShapeSize()) { + GELOGW("Value range of input %s is invalid.", tensor_desc.GetName().c_str()); + return GRAPH_PARAM_INVALID; + } + + size_t value_range_data_num = value_range.size(); + unique_ptr buf(new (std::nothrow) T[value_range_data_num]()); + if (buf == nullptr) { + REPORT_INNER_ERROR("E19999", "New buf failed"); + GELOGE(MEMALLOC_FAILED, "New buf failed"); + return GRAPH_FAILED; + } + for (size_t j = 0; j < value_range_data_num; ++j) { + auto value_range_j = use_floor_value ? value_range[j].first : value_range[j].second; + buf[j] = static_cast(value_range_j); + } + + if (output_ptr->SetData(reinterpret_cast(buf.get()), value_range_data_num * sizeof(T)) != GRAPH_SUCCESS) { + GELOGW("Set data failed while constructing value range input tensor."); + return GRAPH_NOT_CHANGED; + } + return GRAPH_SUCCESS; +} + +graphStatus InferValueRangePass::ConstructDataByType(const GeTensorDesc &tensor_desc, bool use_floor_value, + GeTensorPtr &output_ptr) { + graphStatus ret = GRAPH_SUCCESS; + auto data_type = tensor_desc.GetDataType(); + output_ptr->MutableTensorDesc().SetDataType(data_type); + switch (data_type) { + case DT_FLOAT: + ret = ConstructData(tensor_desc, use_floor_value, output_ptr); + break; + case DT_DOUBLE: + ret = ConstructData(tensor_desc, use_floor_value, output_ptr); + break; + case DT_UINT8: + ret = ConstructData(tensor_desc, use_floor_value, output_ptr); + break; + case DT_INT8: + ret = ConstructData(tensor_desc, use_floor_value, output_ptr); + break; + case DT_UINT16: + ret = ConstructData(tensor_desc, use_floor_value, output_ptr); + break; + case DT_INT16: + ret = ConstructData(tensor_desc, use_floor_value, output_ptr); + break; + case DT_INT32: + ret = ConstructData(tensor_desc, use_floor_value, output_ptr); + break; + case DT_INT64: + ret = ConstructData(tensor_desc, use_floor_value, output_ptr); + break; + default: + GELOGW("Data type:%s is not supported.", TypeUtils::DataTypeToSerialString(data_type).c_str()); + ret = GRAPH_PARAM_INVALID; + } + return ret; +} + +vector InferValueRangePass::ConstructInputTensors(const NodePtr &node, bool use_floor_value) { + vector input_tensors; + auto cur_op_desc = node->GetOpDesc(); + auto in_data_anchors = node->GetAllInDataAnchors(); + for (size_t i = 0; i < in_data_anchors.size(); ++i) { + auto peer_out_anchor = in_data_anchors.at(i)->GetPeerOutAnchor(); + if (peer_out_anchor == nullptr) { + continue; + } + auto peer_node = peer_out_anchor->GetOwnerNode(); + if (peer_node == nullptr) { + continue; + } + + // construct input tensor by constant node + if ((peer_node->GetType() == CONSTANT) || (peer_node->GetType() == CONSTANTOP)) { + vector const_weight = OpDescUtils::MutableWeights(peer_node); + if (const_weight.empty()) { + GELOGW("MutableWeights failed, weight is empty, node: %s(%s)", peer_node->GetName().c_str(), + peer_node->GetType().c_str()); + return vector(); + } + // const/constant op has only one weight + if (const_weight.at(0) == nullptr) { + GELOGW("MutableWeights failed, weight of constant is null, node name: %s(%s)", + peer_node->GetName().c_str(), peer_node->GetType().c_str()); + return vector(); + } + input_tensors.push_back(const_weight.at(0)); + GELOGD("Node %s construct input tensor %zu by constant node.", node->GetName().c_str(), input_tensors.size()); + continue; + } + + // construct input tensor by boundary of value range + const auto &input_tensor_desc = cur_op_desc->GetInputDesc(i); + GeTensorPtr tmp_tensor_ptr = MakeShared(input_tensor_desc); + if (tmp_tensor_ptr == nullptr) { + REPORT_INNER_ERROR("E19999", "Make shared failed"); + GELOGE(MEMALLOC_FAILED, "Make shared failed"); + return vector(); + } + + auto ret = ConstructDataByType(input_tensor_desc, use_floor_value, tmp_tensor_ptr); + if (ret != GRAPH_SUCCESS) { + GELOGW("Construct input tensor by boundary of value range failed for input %s.", + input_tensor_desc.GetName().c_str()); + return vector(); + } + input_tensors.push_back(tmp_tensor_ptr); + GELOGD("Node %s construct input tensor %zu by input desc value range.", node->GetName().c_str(), + input_tensors.size()); + } + + return input_tensors; +} + +graphStatus InferValueRangePass::ConstructInputAndInferValueRange(NodePtr &node) { + auto inputs = ConstructInputTensors(node, true); + if (inputs.empty()) { + return GRAPH_PARAM_INVALID; + } + vector lower_boundary_outputs; + auto ret = RunCpuKernelForValueRange(node, inputs, lower_boundary_outputs); + if (ret != SUCCESS) { + GELOGW("Node %s run cpu kernel failed while calculating value range.", node->GetName().c_str()); + return GRAPH_PARAM_INVALID; + } + + inputs = ConstructInputTensors(node, false); + if (inputs.empty()) { + return GRAPH_PARAM_INVALID; + } + vector upper_boundary_outputs; + ret = RunCpuKernelForValueRange(node, inputs, upper_boundary_outputs); + if (ret != SUCCESS) { + GELOGW("Node %s run cpu kernel failed while calculating value range.", node->GetName().c_str()); + return GRAPH_PARAM_INVALID; + } + + // construct value range from output tensor + OpDescPtr node_desc = node->GetOpDesc(); + std::vector> output_tensor_value_range; + size_t node_output_desc_size = node_desc->GetOutputsSize(); + for (size_t i = 0; i < node_output_desc_size; ++i) { + output_tensor_value_range.clear(); + auto output_tensor_desc = node_desc->MutableOutputDesc(i); + auto output_shape_size = output_tensor_desc->GetShape().GetShapeSize(); + auto lower_boundary_tensor = lower_boundary_outputs[i]; + auto lower_boundary_shape = lower_boundary_tensor->GetTensorDesc().GetShape(); + auto upper_boundary_tensor = upper_boundary_outputs[i]; + auto upper_boundary_shape = upper_boundary_tensor->GetTensorDesc().GetShape(); + if (lower_boundary_shape.GetShapeSize() != output_shape_size || + upper_boundary_shape.GetShapeSize() != output_shape_size) { + GELOGD( + "Cpu kernel result shapes %s, %s and output shape %s do not match, can not infer value range for output %s.", + formats::ShapeToString(lower_boundary_shape).c_str(), formats::ShapeToString(upper_boundary_shape).c_str(), + formats::ShapeToString(output_tensor_desc->GetShape()).c_str(), output_tensor_desc->GetName().c_str()); + return GRAPH_PARAM_INVALID; + } + + auto data_type = output_tensor_desc->GetDataType(); + switch (data_type) { + GET_DATA_BY_DTYPE(DT_INT8, int8_t) + GET_DATA_BY_DTYPE(DT_INT16, int16_t) + GET_DATA_BY_DTYPE(DT_INT32, int32_t) + GET_DATA_BY_DTYPE(DT_INT64, int64_t) + GET_DATA_BY_DTYPE(DT_UINT8, uint8_t) + GET_DATA_BY_DTYPE(DT_UINT16, uint16_t) + GET_DATA_BY_DTYPE(DT_UINT32, uint32_t) + GET_DATA_BY_DTYPE(DT_UINT64, uint64_t) + GET_DATA_BY_DTYPE(DT_FLOAT, float) + GET_DATA_BY_DTYPE(DT_DOUBLE, double) + default: + GELOGW("Data type:%s is not supported.", TypeUtils::DataTypeToSerialString(data_type).c_str()); + return GRAPH_PARAM_INVALID; + } + output_tensor_desc->SetValueRange(output_tensor_value_range); + GELOGD("Node %s calculates output %zu value range %s by running cpu kernel.", node->GetName().c_str(), i, + formats::RangeToString(output_tensor_value_range).c_str()); + } + return GRAPH_SUCCESS; +} + +template +void InferValueRangePass::ConstructValueRange(const GeTensorPtr &left_tensor, const GeTensorPtr &right_tensor, + std::vector> &value_range) { + auto x = reinterpret_cast(left_tensor->GetData().GetData()); + auto y = reinterpret_cast(right_tensor->GetData().GetData()); + if (x == nullptr || y == nullptr) { + GELOGI("Output tensor of cpu kernel does not have data, no way to set value range."); + return; + } + for (auto j = 0; j < left_tensor->GetTensorDesc().GetShape().GetShapeSize(); ++j) { + auto left = static_cast(*(x + j)); + auto right = static_cast(*(y + j)); + value_range.emplace_back(std::make_pair(left, right)); + } +} +} // namespace ge diff --git a/ge/graph/passes/infer_value_range_pass.h b/ge/graph/passes/infer_value_range_pass.h new file mode 100644 index 00000000..eb485c87 --- /dev/null +++ b/ge/graph/passes/infer_value_range_pass.h @@ -0,0 +1,49 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef GE_GRAPH_PASSES_INFER_VALUE_RANGE_PASS_H_ +#define GE_GRAPH_PASSES_INFER_VALUE_RANGE_PASS_H_ + +#include "graph/passes/infer_base_pass.h" + +namespace ge { +class InferValueRangePass : public InferBasePass { + public: + graphStatus Infer(NodePtr &node) override; + + private: + std::string SerialTensorInfo(const GeTensorDescPtr &tensor_desc) const override; + graphStatus UpdateTensorDesc(const GeTensorDescPtr &src, GeTensorDescPtr &dst, bool &changed) override; + graphStatus UpdateOutputFromSubgraphs(const std::vector &src, GeTensorDescPtr &dst) override; + graphStatus UpdateOutputFromSubgraphsForMultiDims(const std::vector &src, + GeTensorDescPtr &dst) override; + bool NeedInfer(const NodePtr &node) const override; + + bool InputIsDynamic(const NodePtr &node) const; + bool InputIsConstOrHasValueRange(const NodePtr &node) const; + bool InputHasUnknownValueRange(const NodePtr &node) const; + graphStatus GenerateWorstValueRange(NodePtr &node); + template + graphStatus ConstructData(const GeTensorDesc &tensor_desc, bool use_floor_value, GeTensorPtr &output_ptr); + graphStatus ConstructDataByType(const GeTensorDesc &tensor_desc, bool use_floor_value, GeTensorPtr &output_ptr); + vector ConstructInputTensors(const NodePtr &node, bool use_floor_value); + template + void ConstructValueRange(const GeTensorPtr &left_tensor, const GeTensorPtr &right_tensor, + std::vector> &value_range); + graphStatus ConstructInputAndInferValueRange(NodePtr &node); +}; +} // namespace ge +#endif // GE_GRAPH_PASSES_INFER_VALUE_RANGE_PASS_H_ diff --git a/ge/graph/preprocess/graph_preprocess.cc b/ge/graph/preprocess/graph_preprocess.cc index 6fd83623..bc8646e7 100644 --- a/ge/graph/preprocess/graph_preprocess.cc +++ b/ge/graph/preprocess/graph_preprocess.cc @@ -54,6 +54,7 @@ #include "graph/passes/hccl_group_pass.h" #include "graph/passes/identity_pass.h" #include "graph/passes/infershape_pass.h" +#include "graph/passes/infer_value_range_pass.h" #include "graph/passes/merge_pass.h" #include "graph/passes/net_output_pass.h" #include "graph/passes/no_use_reshape_remove_pass.h" @@ -2016,6 +2017,8 @@ Status GraphPrepare::InferShapeForPreprocess() { names_to_passes.emplace_back("DimensionComputePass", &dimension_compute_pass); ConstantFoldingPass constant_folding_pass; names_to_passes.emplace_back("ConstantFoldingPass", &constant_folding_pass); + InferValueRangePass infer_value_pass; + names_to_passes.emplace_back("InferValuePass", &infer_value_pass); int32_t dev_count = 0; AicpuConstantFoldingPass aicpu_constant_folding_pass; diff --git a/metadef b/metadef index 2ad00e17..9e4a51a9 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit 2ad00e17886fd06c0d00f8a8cf370783a3d31818 +Subproject commit 9e4a51a9602195b82e326b853f5adbfefc3972b6 diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index b67dc23d..80b12e32 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -220,7 +220,9 @@ set(COMMON_SRC_FILES "${GE_CODE_DIR}/ge/graph/passes/shape_operate_op_remove_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/assert_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/dropout_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/infer_base_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/infershape_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/infer_value_range_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/unused_const_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/permute_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/ctrl_edge_transfer_pass.cc" @@ -534,7 +536,9 @@ set(GRAPH_PASS_COMMON_SRC_FILES "${GE_CODE_DIR}/ge/graph/passes/transpose_transdata_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/hccl_memcpy_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/no_use_reshape_remove_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/infer_base_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/infershape_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/infer_value_range_pass.cc" "${GE_CODE_DIR}/ge/ge_local_engine/engine/host_cpu_engine.cc" "${GE_CODE_DIR}/ge/analyzer/analyzer.cc" "${GE_CODE_DIR}/ge/graph/passes/net_output_pass.cc" @@ -661,6 +665,8 @@ set(DISTINCT_GRAPH_LOAD_TEST_FILES ) set(PASS_TEST_FILES + "graph/passes/infer_value_range_pass_unittest.cc" + "graph/passes/infer_base_pass_unittest.cc" "graph/passes/prune_pass_unittest.cc" "graph/passes/enter_pass_unittest.cc" "graph/passes/switch_op_pass_unittest.cc" @@ -719,7 +725,6 @@ set(PASS_TEST_FILES "graph/passes/memcpy_addr_async_unittest.cc" "graph/passes/hccl_continuous_pass_unittest.cc" "graph/passes/hccl_memcpy_pass_unittest.cc" - ) set(KERNEL_TEST_FILES diff --git a/tests/ut/ge/graph/passes/infer_base_pass_unittest.cc b/tests/ut/ge/graph/passes/infer_base_pass_unittest.cc new file mode 100644 index 00000000..e9247f75 --- /dev/null +++ b/tests/ut/ge/graph/passes/infer_base_pass_unittest.cc @@ -0,0 +1,359 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "graph/passes/infer_base_pass.h" +#include "graph/debug/ge_attr_define.h" +#include "graph/utils/tensor_utils.h" +#include "graph/utils/graph_utils.h" +#include "graph_builder_utils.h" + +using namespace std; +using namespace testing; +namespace ge { +class ChildPassBuilder; +static const char *kInferTimes = "infer_times"; +class InferBasePassStub : public InferBasePass { + public: + friend class ChildPassBuilder; + graphStatus Infer(NodePtr &node) override{ + call_infer_times++; + for (size_t i = 0; i < node->GetOutDataNodesSize(); ++i) { + auto output_td = node->GetOpDesc()->MutableOutputDesc(i); + int times = 0; + AttrUtils::GetInt(output_td, kInferTimes, times); + AttrUtils::SetInt(output_td, kInferTimes, times + 1); + } + return infer_result_; + }; + + int32_t call_infer_times = 0; + int32_t call_update_tensor_desc_times = 0; + int32_t call_update_from_subgraph_times = 0; + int32_t call_update_from_subgraph_multi_dims_times = 0; + std::vector> update_td_pairs; + + private: + bool NeedInfer(const NodePtr &node) const override { + return need_infer_; + }; + std::string SerialTensorInfo(const GeTensorDescPtr &tensor_desc) const override { return "test SerialTensorInfo"; }; + graphStatus UpdateTensorDesc(const GeTensorDescPtr &src, GeTensorDescPtr &dst, bool &changed) override { + call_update_tensor_desc_times++; + changed = td_changed_; + int times = 0; + if (AttrUtils::GetInt(src, kInferTimes, times)) { + AttrUtils::SetInt(dst, kInferTimes, times); + } + update_td_pairs.emplace_back(src, dst); + return GRAPH_SUCCESS; + }; + graphStatus UpdateOutputFromSubgraphs(const std::vector &src, GeTensorDescPtr &dst) override { + call_update_from_subgraph_times++; + return GRAPH_SUCCESS; + }; + graphStatus UpdateOutputFromSubgraphsForMultiDims(const std::vector &src, + GeTensorDescPtr &dst) override { + call_update_from_subgraph_multi_dims_times++; + return GRAPH_SUCCESS; + }; + bool td_changed_; + bool need_infer_; + graphStatus infer_result_; +}; + +class ChildPassBuilder { + public: + ChildPassBuilder &SetNeedInferFlag(bool flag) { + need_infer_ = flag; + return *this; + } + + ChildPassBuilder &SetInferResult(graphStatus ret) { + infer_result_ = ret; + return *this; + } + + ChildPassBuilder &SetTdChangedFlag(bool changed_flag) { + td_changed_ = changed_flag; + return *this; + } + + InferBasePassStub Build() { + InferBasePassStub ib; + ib.td_changed_ = td_changed_; + ib.need_infer_ = need_infer_; + ib.infer_result_ = infer_result_; + return ib; + } + + private: + bool td_changed_ = false; + bool need_infer_ = true; + graphStatus infer_result_ = GRAPH_SUCCESS; +}; + +class UtestGraphInferBasePassStub : public testing::Test { + protected: + void SetUp() {} + void TearDown() {} +}; + +/* + * data1 data2 + * \ / + * sub1 + * | + * netoutput + */ +ut::GraphBuilder TestSubgraphBuilder() { + ut::GraphBuilder builder = ut::GraphBuilder("branch_graph"); + std::vector shape1 = {1,1}; + auto data1 = builder.AddNode("data1_1", "Data", 1, 1, FORMAT_NCHW, DT_INT32, shape1); + auto data1_desc = data1->GetOpDesc(); + EXPECT_NE(data1_desc, nullptr); + AttrUtils::SetInt(data1_desc, "_parent_node_index", 0); + std::vector shape2 = {2,2}; + auto data2 = builder.AddNode("data2_1", "Data", 1, 1, FORMAT_NCHW, DT_INT32, shape2); + auto data2_desc = data2->GetOpDesc(); + EXPECT_NE(data2_desc, nullptr); + AttrUtils::SetInt(data2_desc, "_parent_node_index", 1); + + auto sub1 = builder.AddNode("Sub", "Sub", 2, 1); + std::vector shape7 = {8,8}; + auto netoutput = builder.AddNode("output", NETOUTPUT, 1, 0, FORMAT_NCHW, DT_INT32, shape7); + auto input0_desc = netoutput->GetOpDesc()->MutableInputDesc(0); + EXPECT_NE(input0_desc, nullptr); + AttrUtils::SetInt(input0_desc, "_parent_node_index", 0); + + builder.AddDataEdge(data1, 0, sub1, 0); + builder.AddDataEdge(data2, 0, sub1, 1); + builder.AddDataEdge(sub1, 0, netoutput, 0); + return builder; +} + +/* + * data1 data2 + * \ / + * case1 + * | + * netoutput + */ +ut::GraphBuilder RootGraphBuilder() { + ut::GraphBuilder builder = ut::GraphBuilder("root_graph"); + auto data1 = builder.AddNode("data1", "Data", 0, 1); + auto data2 = builder.AddNode("data2", "Data", 0, 1); + auto case1 = builder.AddNode("case1", CASE, 2, 1); + auto netoutput = builder.AddNode("netoutput", NETOUTPUT, 1, 0); + builder.AddDataEdge(data1, 0, case1, 0); + builder.AddDataEdge(data2, 0, case1, 1); + builder.AddDataEdge(case1, 0, netoutput, 0); + + auto parent_graph = builder.GetGraph(); + auto subgraph_builder = TestSubgraphBuilder(); + auto subgraph = subgraph_builder.GetGraph(); + case1->GetOpDesc()->AddSubgraphName(subgraph->GetName()); + case1->GetOpDesc()->SetSubgraphInstanceName(0, subgraph->GetName()); + subgraph->SetParentNode(case1); + subgraph->SetParentGraph(parent_graph); + EXPECT_EQ(parent_graph->AddSubgraph(subgraph->GetName(), subgraph), GRAPH_SUCCESS); + return builder; +} + +/* + * data1 data2 + * \ / + * add1 + * | + * netoutput + */ +ut::GraphBuilder NoSubgraphBuilder() { + ut::GraphBuilder builder = ut::GraphBuilder("no_subgraph"); + auto data1 = builder.AddNode("data1", "Data", 0, 1); + auto data2 = builder.AddNode("data2", "Data", 0, 1); + auto add1 = builder.AddNode("add1", ADD, 2, 1); + auto netoutput = builder.AddNode("netoutput", NETOUTPUT, 1, 0); + builder.AddDataEdge(data1, 0, add1, 0); + builder.AddDataEdge(data2, 0, add1, 1); + builder.AddDataEdge(add1, 0, netoutput, 0); + return builder; +} + +TEST_F(UtestGraphInferBasePassStub, CallInfer_WhenNeedInferReturnTrue) { + auto builder = NoSubgraphBuilder(); + auto test_graph = builder.GetGraph(); + auto add_node = test_graph->FindNode("add1"); + EXPECT_NE(add_node, nullptr); + ChildPassBuilder pass_builder; + auto stub_base_pass = pass_builder.Build(); + + // NeedInfer return true + EXPECT_EQ(stub_base_pass.Run(add_node), SUCCESS); + EXPECT_EQ(stub_base_pass.call_infer_times, 1); + int times = -1; + EXPECT_TRUE(AttrUtils::GetInt(add_node->GetOpDesc()->GetOutputDescPtr(0), kInferTimes, times)); + EXPECT_EQ(times, 1); +} + +TEST_F(UtestGraphInferBasePassStub, NotCallInfer_WhenNeedInferReturnFalse) { + auto builder = NoSubgraphBuilder(); + auto test_graph = builder.GetGraph(); + auto add_node = test_graph->FindNode("add1"); + EXPECT_NE(add_node, nullptr); + ChildPassBuilder pass_builder; + auto stub_base_pass = pass_builder.SetNeedInferFlag(false).Build(); + + // NeedInfer return false + EXPECT_EQ(stub_base_pass.Run(add_node), SUCCESS); + EXPECT_EQ(stub_base_pass.call_infer_times, 0); + int times = -1; + EXPECT_FALSE(AttrUtils::GetInt(add_node->GetOpDesc()->GetOutputDescPtr(0), kInferTimes, times)); +} + +TEST_F(UtestGraphInferBasePassStub, NotAddCurNodeRepass_CallUpdatePeerNode_WhenInferReturnSuccess) { + auto builder = NoSubgraphBuilder(); + auto test_graph = builder.GetGraph(); + auto add_node = test_graph->FindNode("add1"); + auto netoutput = test_graph->FindNode("netoutput"); + EXPECT_NE(add_node, nullptr); + EXPECT_NE(netoutput, nullptr); + ChildPassBuilder pass_builder; + auto stub_base_pass = pass_builder.Build(); + + EXPECT_EQ(stub_base_pass.Run(add_node), SUCCESS); + EXPECT_EQ(stub_base_pass.call_infer_times, 1); + EXPECT_EQ(stub_base_pass.call_update_tensor_desc_times, 1); + std::vector> expected_updated_tensor_desc_pairs = { + {add_node->GetOpDesc()->MutableOutputDesc(0), netoutput->GetOpDesc()->MutableInputDesc(0)}}; + EXPECT_EQ(stub_base_pass.update_td_pairs, expected_updated_tensor_desc_pairs); + EXPECT_EQ(stub_base_pass.GetNodesNeedRePassImmediately(), std::unordered_set({})); +} + +TEST_F(UtestGraphInferBasePassStub, AddCurNodeRepass_NotCallUpdatePeerNode_WhenInferReturnNeedRepass) { + auto builder = NoSubgraphBuilder(); + auto test_graph = builder.GetGraph(); + auto add_node = test_graph->FindNode("add1"); + EXPECT_NE(add_node, nullptr); + ChildPassBuilder pass_builder; + auto stub_base_pass = pass_builder.SetInferResult(GRAPH_NODE_NEED_REPASS).Build(); + + // do re_pass + EXPECT_EQ(stub_base_pass.Run(add_node), SUCCESS); + EXPECT_EQ(stub_base_pass.call_infer_times, 1); + EXPECT_EQ(stub_base_pass.call_update_tensor_desc_times, 0); + EXPECT_EQ(stub_base_pass.GetNodesNeedRePassImmediately(), std::unordered_set({add_node})); +} + +TEST_F(UtestGraphInferBasePassStub, NotAddPeerNodeRepass_AfterUpdatePeerNode_WhenUnchanged) { + auto builder = NoSubgraphBuilder(); + auto test_graph = builder.GetGraph(); + auto add_node = test_graph->FindNode("add1"); + auto netoutput = test_graph->FindNode("netoutput"); + EXPECT_NE(add_node, nullptr); + EXPECT_NE(netoutput, nullptr); + ChildPassBuilder pass_builder; + auto stub_base_pass = pass_builder.Build(); + + EXPECT_EQ(stub_base_pass.Run(add_node), SUCCESS); + EXPECT_EQ(stub_base_pass.call_update_tensor_desc_times, 1); + EXPECT_EQ(stub_base_pass.GetNodesNeedRePassImmediately(), std::unordered_set({})); + int times = -1; + EXPECT_TRUE(AttrUtils::GetInt(add_node->GetOpDesc()->GetOutputDescPtr(0), kInferTimes, times)); + EXPECT_EQ(times, 1); + times = -1; + EXPECT_TRUE(AttrUtils::GetInt(netoutput->GetOpDesc()->GetInputDescPtr(0), kInferTimes, times)); + EXPECT_EQ(times, 1); +} + +TEST_F(UtestGraphInferBasePassStub, AddPeerNodeRepass_AfterUpdatePeerNode_WhenChanged) { + auto builder = NoSubgraphBuilder(); + auto test_graph = builder.GetGraph(); + auto add_node = test_graph->FindNode("add1"); + auto netoutput = test_graph->FindNode("netoutput"); + EXPECT_NE(add_node, nullptr); + EXPECT_NE(netoutput, nullptr); + ChildPassBuilder pass_builder; + auto stub_base_pass = pass_builder.SetTdChangedFlag(true).Build(); + + EXPECT_EQ(stub_base_pass.Run(add_node), SUCCESS); + EXPECT_EQ(stub_base_pass.call_update_tensor_desc_times, 1); + EXPECT_EQ(stub_base_pass.GetNodesNeedRePassImmediately(), std::unordered_set({netoutput})); +} + +TEST_F(UtestGraphInferBasePassStub, TestUpdateSubgraphData_WhenBeforeSubgraph) { + auto builder = RootGraphBuilder(); + auto parent_graph = builder.GetGraph(); + auto subgraphs = parent_graph->GetAllSubgraphs(); + EXPECT_EQ(subgraphs.size(), 1); + + auto case_node = parent_graph->FindNode("case1"); + auto data1 = subgraphs[0]->FindNode("data1_1"); + auto data2 = subgraphs[0]->FindNode("data2_1"); + EXPECT_NE(case_node, nullptr); + EXPECT_NE(data1, nullptr); + EXPECT_NE(data2, nullptr); + ChildPassBuilder pass_builder; + auto stub_base_pass = pass_builder.SetInferResult(GRAPH_NODE_NEED_REPASS).Build(); + + EXPECT_EQ(stub_base_pass.Run(case_node), SUCCESS); + // when GRAPH_NODE_NEED_REPASS, not update peer node, only update two data, update input and output, 2*2 + EXPECT_EQ(stub_base_pass.call_update_tensor_desc_times, 4); + std::vector> expected_updated_tensor_desc_pairs = { + {case_node->GetOpDesc()->MutableInputDesc(0), data1->GetOpDesc()->MutableInputDesc(0)}, + {case_node->GetOpDesc()->MutableInputDesc(0), data1->GetOpDesc()->MutableOutputDesc(0)}, + {case_node->GetOpDesc()->MutableInputDesc(1), data2->GetOpDesc()->MutableInputDesc(0)}, + {case_node->GetOpDesc()->MutableInputDesc(1), data2->GetOpDesc()->MutableOutputDesc(0)}, + }; + EXPECT_EQ(stub_base_pass.update_td_pairs, expected_updated_tensor_desc_pairs); +} + +TEST_F(UtestGraphInferBasePassStub, TestUpdateParentNodeOutput_WhenAfterSubgraph) { + auto builder = RootGraphBuilder(); + auto parent_graph = builder.GetGraph(); + auto subgraphs = parent_graph->GetAllSubgraphs(); + EXPECT_EQ(subgraphs.size(), 1); + + auto case_node = parent_graph->FindNode("case1"); + EXPECT_NE(case_node, nullptr); + ChildPassBuilder pass_builder; + auto stub_base_pass = pass_builder.Build(); + stub_base_pass.SetOption(kOptimizeAfterSubGraph, ""); + + EXPECT_EQ(stub_base_pass.Run(case_node), SUCCESS); + EXPECT_EQ(stub_base_pass.call_update_from_subgraph_times, 1); + EXPECT_EQ(stub_base_pass.call_update_from_subgraph_multi_dims_times, 0); +} + +TEST_F(UtestGraphInferBasePassStub, TestUpdateParentNodeOutputForMultiDims_WhenAfterSubgraph) { + auto builder = RootGraphBuilder(); + auto parent_graph = builder.GetGraph(); + auto subgraphs = parent_graph->GetAllSubgraphs(); + EXPECT_EQ(subgraphs.size(), 1); + + auto case_node = parent_graph->FindNode("case1"); + auto set_ret = AttrUtils::SetInt(case_node->GetOpDesc(), ATTR_NAME_BATCH_NUM, 2); + EXPECT_EQ(set_ret, true); + EXPECT_NE(case_node, nullptr); + ChildPassBuilder pass_builder; + auto stub_base_pass = pass_builder.Build(); + stub_base_pass.SetOption(kOptimizeAfterSubGraph, ""); + + EXPECT_EQ(stub_base_pass.Run(case_node), SUCCESS); + EXPECT_EQ(stub_base_pass.call_update_from_subgraph_times, 0); + EXPECT_EQ(stub_base_pass.call_update_from_subgraph_multi_dims_times, 1); +} +} // namespace ge \ No newline at end of file diff --git a/tests/ut/ge/graph/passes/infer_value_range_pass_unittest.cc b/tests/ut/ge/graph/passes/infer_value_range_pass_unittest.cc new file mode 100644 index 00000000..fea1b27d --- /dev/null +++ b/tests/ut/ge/graph/passes/infer_value_range_pass_unittest.cc @@ -0,0 +1,583 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#define protected public +#define private public +#include "graph/passes/infer_value_range_pass.h" +#include "graph/utils/tensor_utils.h" +#include "graph/utils/graph_utils.h" +#include "graph_builder_utils.h" + +#include "inc/external/graph/operator_reg.h" +#include "inc/external/graph/operator.h" +#include "inc/external/graph/operator_factory.h" +#include "inc/graph/operator_factory_impl.h" +#include "inc/kernel.h" +#include "inc/kernel_factory.h" + +using namespace std; +using namespace testing; +namespace ge { +class UtestGraphInferValueRangePass : public testing::Test { + protected: + void SetUp() {} + void TearDown() {} +}; + +/* + * data1 const1 + * \ / + * case1 + * | + * relu10 + * | + * netoutput + */ +ut::GraphBuilder ParentGraphBuilder() { + ut::GraphBuilder builder = ut::GraphBuilder("g1"); + auto data1 = builder.AddNode("data1", "Data", 0, 1); + std::vector const_shape = {1}; + auto const1 = builder.AddNode("const1", "Const", 0, 1, FORMAT_NCHW, DT_INT32, const_shape); + auto case1 = builder.AddNode("case1", CASE, 2, 1); + auto relu1 = builder.AddNode("relu10", "Relu", 1, 1); + auto netoutput = builder.AddNode("netoutput", NETOUTPUT, 1, 0); + + int32_t weight[1] = {1}; + GeTensorDesc weight_desc(GeShape({1}), FORMAT_NHWC, DT_INT32); + GeTensorPtr tensor = std::make_shared(weight_desc, (uint8_t *)weight, sizeof(weight)); + OpDescUtils::SetWeights(const1, {tensor}); + auto case_in0_shape = GeShape({1, 1,-1, 224}); + auto case_in1_shape = GeShape({1,1}); + std::vector> in0_range = {make_pair(1, 1), make_pair(1, 1), + make_pair(1, -1), make_pair(1, 224)}; + std::vector> in1_range = {make_pair(1, 100), make_pair(1, 10)}; + case1->GetOpDesc()->MutableInputDesc(0)->SetShape(case_in0_shape); + case1->GetOpDesc()->MutableInputDesc(0)->SetValueRange(in0_range); + case1->GetOpDesc()->MutableInputDesc(1)->SetShape(case_in1_shape); + case1->GetOpDesc()->MutableInputDesc(1)->SetValueRange(in1_range); + + builder.AddDataEdge(data1, 0, case1, 0); + builder.AddDataEdge(const1, 0, case1, 1); + builder.AddDataEdge(case1, 0, relu1, 0); + builder.AddDataEdge(relu1, 0, netoutput, 0); + return builder; +} + +/* + * data1 data2 + * \ / + * switch + * / \ + * relu1 relu2 + * \ / + * merge + * | + * netoutput + */ +ut::GraphBuilder SwitchSubgraphBuilder(string graph_name, uint32_t num) { + ut::GraphBuilder builder = ut::GraphBuilder(graph_name); + + std::vector shape1 = {2,2}; + string data1_name = "data1_" + std::to_string(num); + auto data1 = builder.AddNode(data1_name, "Data", 1, 1, FORMAT_NCHW, DT_INT32, shape1); + auto data1_desc = data1->GetOpDesc(); + EXPECT_NE(data1_desc, nullptr); + AttrUtils::SetInt(data1_desc, "_parent_node_index", 0); + + std::vector shape2 = {3,3}; + string data2_name = "data2_" + std::to_string(num); + auto data2 = builder.AddNode(data2_name, "Data", 1, 1, FORMAT_NCHW, DT_INT32, shape2); + auto data2_desc = data2->GetOpDesc(); + EXPECT_NE(data2_desc, nullptr); + AttrUtils::SetInt(data2_desc, "_parent_node_index", 1); + + string switch_name = "switch_" + std::to_string(num); + auto switch1 = builder.AddNode(switch_name, "Switch", 2, 2); + + string relu1_name = "relu1_" + std::to_string(num); + auto relu1 = builder.AddNode(relu1_name, "Relu", 1, 1); + + string relu2_name = "relu2_" + std::to_string(num); + auto relu2 = builder.AddNode(relu2_name, "Relu", 1, 1); + + string merge_name = "merge_" + std::to_string(num); + auto merge = builder.AddNode(merge_name, "Merge", 2, 1); + + std::vector shape7 = {8,8}; + string output_name = "output_" + std::to_string(num); + auto netoutput = builder.AddNode(output_name, NETOUTPUT, 1, 0, FORMAT_NCHW, DT_INT32, shape7); + auto input0_desc = netoutput->GetOpDesc()->MutableInputDesc(0); + EXPECT_NE(input0_desc, nullptr); + AttrUtils::SetInt(input0_desc, "_parent_node_index", 0); + std::vector> range = {make_pair(1, -1), make_pair(1, -1)}; + input0_desc->SetValueRange(range); + + builder.AddDataEdge(data1, 0, switch1, 0); + builder.AddDataEdge(data2, 0, switch1, 1); + builder.AddDataEdge(switch1, 0, relu1, 0); + builder.AddDataEdge(switch1, 1, relu2, 0); + builder.AddDataEdge(relu1, 0, merge, 0); + builder.AddDataEdge(relu2, 0, merge, 1); + builder.AddDataEdge(merge, 0, netoutput, 0); + + return builder; +} + +void AddCaseSubgraph(ComputeGraphPtr &parent_graph, uint32_t branch_num) { + auto case_node = parent_graph->FindNode("case1"); + EXPECT_NE(case_node, nullptr); + + for (uint32_t i = 0; i < branch_num; ++i) { + string name = "Branch_Graph_" + std::to_string(i); + + auto builder_subgraph = SwitchSubgraphBuilder(name, i); + auto switch_subgraph = builder_subgraph.GetGraph(); + + case_node->GetOpDesc()->AddSubgraphName(switch_subgraph->GetName()); + case_node->GetOpDesc()->SetSubgraphInstanceName(i, switch_subgraph->GetName()); + + switch_subgraph->SetParentNode(case_node); + switch_subgraph->SetParentGraph(parent_graph); + EXPECT_EQ(parent_graph->AddSubgraph(switch_subgraph->GetName(), switch_subgraph), GRAPH_SUCCESS); + } +} + +TEST_F(UtestGraphInferValueRangePass, CallRun_NoSubgraph_UnregisteredNodeType) { + auto graph = std::make_shared("test_graph"); + GeTensorDesc ge_tensor_desc(GeShape({1, 1, 4, 192}), ge::FORMAT_NCHW, DT_FLOAT16); + auto addn_op_desc = std::make_shared("AddN", "AddN"); + addn_op_desc->AddInputDesc(ge_tensor_desc); + addn_op_desc->AddOutputDesc(ge_tensor_desc); + auto addn_op_node = graph->AddNode(addn_op_desc); + + InferValueRangePass infer_pass; + EXPECT_EQ(infer_pass.Run(addn_op_node), SUCCESS); +} + +auto ShapeValueInfer = [&](Operator &op) { + auto op_desc = OpDescUtils::GetOpDescFromOperator(op); + auto output_tensor_desc = op_desc->MutableOutputDesc(0); + std::vector> in_shape_range; + op_desc->MutableInputDesc(0)->GetShapeRange(in_shape_range); + if (!in_shape_range.empty()) { + output_tensor_desc->SetValueRange(in_shape_range); + } + return SUCCESS; +}; +REG_OP(Shape) + .OP_END_FACTORY_REG(Shape) +IMPL_INFER_VALUE_RANGE_FUNC(Shape, ShapeValueRangeFunc){ + auto op_desc = OpDescUtils::GetOpDescFromOperator(op); + auto output_tensor_desc = op_desc->MutableOutputDesc(0); + std::vector> in_shape_range; + op_desc->MutableInputDesc(0)->GetShapeRange(in_shape_range); + if (!in_shape_range.empty()) { + output_tensor_desc->SetValueRange(in_shape_range); + } + return GRAPH_SUCCESS; +} + +TEST_F(UtestGraphInferValueRangePass, CallRun_NoSubgraph_UseRegistedFunc_NotInfer) { + INFER_VALUE_RANGE_CUSTOM_FUNC_REG(Shape, INPUT_IS_DYNAMIC, ShapeValueRangeFunc); + auto graph = std::make_shared("test_graph"); + GeTensorDesc ge_tensor_desc(GeShape({1, 1, 4, 192}), ge::FORMAT_NCHW, DT_INT32); + std::vector> shape_range = {make_pair(1, 1), make_pair(1, 1), + make_pair(4, 4), make_pair(192, 192)}; + ge_tensor_desc.SetShapeRange(shape_range); + GeTensorDesc output_tensor_desc(GeShape({4}), ge::FORMAT_NCHW, DT_INT32); + auto op_desc = std::make_shared("Shape", "Shape"); + op_desc->AddInputDesc(ge_tensor_desc); + op_desc->AddOutputDesc(output_tensor_desc); + auto op_node = graph->AddNode(op_desc); + + InferValueRangePass infer_pass; + EXPECT_EQ(infer_pass.Run(op_node), SUCCESS); + + auto output_0_desc = op_node->GetOpDesc()->GetOutputDesc(0); + std::vector> value_range; + output_0_desc.GetValueRange(value_range); + EXPECT_EQ(value_range.empty(), true); +} + +TEST_F(UtestGraphInferValueRangePass, CallRun_NoSubgraph_UseRegistedFunc_DoInfer) { + // sqrt -> shape -> Output + INFER_VALUE_RANGE_CUSTOM_FUNC_REG(Shape, INPUT_IS_DYNAMIC, ShapeValueRangeFunc); + auto graph = std::make_shared("test_graph"); + GeTensorDesc sqrt_tensor_desc(GeShape({-1, -1, 4, 192}), ge::FORMAT_NCHW, DT_INT32); + std::vector> shape_range = {make_pair(1, 100), make_pair(1, 240), + make_pair(4, 4), make_pair(192, 192)}; + sqrt_tensor_desc.SetShapeRange(shape_range); + auto sqrt_op_desc = std::make_shared("Sqrt", "Sqrt"); + sqrt_op_desc->AddInputDesc(sqrt_tensor_desc); + sqrt_op_desc->AddOutputDesc(sqrt_tensor_desc); + auto sqrt_node = graph->AddNode(sqrt_op_desc); + + GeTensorDesc shape_output_desc(GeShape({4}), ge::FORMAT_NCHW, DT_INT32); + auto shape_op_desc = std::make_shared("Shape", "Shape"); + shape_op_desc->AddInputDesc(sqrt_tensor_desc); + shape_op_desc->AddOutputDesc(shape_output_desc); + auto shape_node = graph->AddNode(shape_op_desc); + + GeTensorDesc Output_in_tensor_desc(GeShape({4}), ge::FORMAT_NCHW, ge::DT_INT32); + auto Output_op_desc = std::make_shared("Output", "Output"); + Output_op_desc->AddInputDesc(Output_in_tensor_desc); + auto Output_node = graph->AddNode(Output_op_desc); + + ge::GraphUtils::AddEdge(sqrt_node->GetOutDataAnchor(0), shape_node->GetInDataAnchor(0)); + ge::GraphUtils::AddEdge(shape_node->GetOutDataAnchor(0), Output_node->GetInDataAnchor(0)); + EXPECT_EQ(graph->TopologicalSorting(), GRAPH_SUCCESS); + + + InferValueRangePass infer_pass; + auto ret = infer_pass.Run(shape_node); + EXPECT_EQ(ret, SUCCESS); + + auto output_0_desc = shape_node->GetOpDesc()->GetOutputDesc(0); + std::vector> value_range; + output_0_desc.GetValueRange(value_range); + EXPECT_EQ(value_range.size(), 4); + std::vector target_value_range = {1, 100, 1, 240, 4, 4, 192, 192}; + std::vector output_value_range; + for (auto pair : value_range) { + output_value_range.push_back(pair.first); + output_value_range.push_back(pair.second); + } + EXPECT_EQ(target_value_range, output_value_range); + + auto in_0_desc = Output_node->GetOpDesc()->GetInputDesc(0); + value_range.clear(); + in_0_desc.GetValueRange(value_range); + EXPECT_EQ(value_range.size(), 4); + output_value_range.clear(); + for (auto pair : value_range) { + output_value_range.push_back(pair.first); + output_value_range.push_back(pair.second); + } + EXPECT_EQ(target_value_range, output_value_range); + +} + +class AddKernel : public Kernel { + public: + Status Compute(const ge::OpDescPtr op_desc_ptr, const std::vector &input, + std::vector &v_output) override { + if (input[0]->GetTensorDesc().GetDataType() == DT_INT64 || input[0]->GetTensorDesc().GetDataType() == DT_UINT64) { + vector data_vec; + auto data_num = input[0]->GetTensorDesc().GetShape().GetShapeSize(); + auto x1_data = reinterpret_cast(input[0]->GetData().data()); + auto x2_data = reinterpret_cast(input[1]->GetData().data()); + for (size_t i = 0; i < data_num; i++) { + auto x_index = *(x1_data + i); + auto y_index = *(x2_data + i); + data_vec.push_back(x_index + y_index); + } + GeTensorPtr const_tensor = std::make_shared(input[0]->GetTensorDesc(), (uint8_t *)data_vec.data(), + data_num * sizeof(int64_t)); + v_output.emplace_back(const_tensor); + return SUCCESS; + } else if (input[0]->GetTensorDesc().GetDataType() == DT_INT32 || input[0]->GetTensorDesc().GetDataType() == DT_UINT32) { + vector data_vec; + auto data_num = input[0]->GetTensorDesc().GetShape().GetShapeSize(); + auto x1_data = reinterpret_cast(input[0]->GetData().data()); + auto x2_data = reinterpret_cast(input[1]->GetData().data()); + for (size_t i = 0; i < data_num; i++) { + auto x_index = *(x1_data + i); + auto y_index = *(x2_data + i); + data_vec.push_back(x_index + y_index); + } + GeTensorPtr const_tensor = std::make_shared(input[0]->GetTensorDesc(), (uint8_t *)data_vec.data(), + data_num * sizeof(int32_t)); + v_output.emplace_back(const_tensor); + return SUCCESS; + } + } +}; +REGISTER_KERNEL(ADD, AddKernel); +INFER_VALUE_RANGE_DEFAULT_REG(Add); +INFER_VALUE_RANGE_DEFAULT_REG(Sqrt); + +TEST_F(UtestGraphInferValueRangePass, CallRun_NoSubgraph_UseCpuKernel_InputsHaveUnKnownValueRange) { + // shape --- add --- sqrt + // constant / + auto graph = std::make_shared("test_graph"); + + vector dims_vec = {4}; + vector data_vec = {1, 1, 1, 1}; + GeTensorDesc const_tensor_desc(ge::GeShape(dims_vec), ge::FORMAT_NCHW, ge::DT_INT64); + GeTensorPtr const_tensor = + std::make_shared(const_tensor_desc, (uint8_t *)data_vec.data(), data_vec.size() * sizeof(int64_t)); + + auto const_op_desc = std::make_shared("Constant", "Constant"); + const_op_desc->AddOutputDesc(const_tensor_desc); + EXPECT_EQ(OpDescUtils::SetWeights(const_op_desc, const_tensor), GRAPH_SUCCESS); + auto const_node = graph->AddNode(const_op_desc); + + GeTensorDesc shape_tensor_desc(GeShape({4}), ge::FORMAT_NCHW, ge::DT_INT64); + std::vector> unknown_value_range = {make_pair(1, -1), make_pair(1, 240), + make_pair(4, 4), make_pair(192, 192)}; + shape_tensor_desc.SetValueRange(unknown_value_range); + auto shape_op_desc = std::make_shared("Shape", "Shape"); + shape_op_desc->AddOutputDesc(shape_tensor_desc); + auto shape_node = graph->AddNode(shape_op_desc); + + GeTensorDesc add_tensor_desc(GeShape({4}), ge::FORMAT_NCHW, ge::DT_INT64); + auto add_op_desc = std::make_shared("Add", "Add"); + add_op_desc->AddInputDesc(shape_tensor_desc); + add_op_desc->AddInputDesc(const_tensor_desc); + add_op_desc->AddOutputDesc(add_tensor_desc); + auto add_node = graph->AddNode(add_op_desc); + + ge::GraphUtils::AddEdge(shape_node->GetOutDataAnchor(0), add_node->GetInDataAnchor(0)); + ge::GraphUtils::AddEdge(const_node->GetOutDataAnchor(0), add_node->GetInDataAnchor(1)); + + // test unknown value range + InferValueRangePass infer_pass; + EXPECT_EQ(infer_pass.Run(add_node), SUCCESS); + auto output_0_desc = add_node->GetOpDesc()->GetOutputDesc(0); + std::vector> out_value_range; + output_0_desc.GetValueRange(out_value_range); + EXPECT_EQ(out_value_range.size(), 4); + + std::vector unknown_target_value_range = {1, -1, 1, -1, 1, -1, 1, -1}; + std::vector output_value_range; + for (auto pair : out_value_range) { + output_value_range.push_back(pair.first); + output_value_range.push_back(pair.second); + } + EXPECT_EQ(unknown_target_value_range, output_value_range); +} + +TEST_F(UtestGraphInferValueRangePass, CallRun_NoSubgraph_UseCpuKernel_InputsAreKnownValueRange_Int64) { + // shape --- add --- sqrt + // constant / + auto graph = std::make_shared("test_graph"); + + vector dims_vec = {4}; + vector data_vec = {1, 1, 1, 1}; + GeTensorDesc const_tensor_desc(ge::GeShape(dims_vec), ge::FORMAT_NCHW, ge::DT_INT64); + GeTensorPtr const_tensor = + std::make_shared(const_tensor_desc, (uint8_t *)data_vec.data(), data_vec.size() * sizeof(int64_t)); + + auto const_op_desc = std::make_shared("Constant", "Constant"); + const_op_desc->AddOutputDesc(const_tensor_desc); + EXPECT_EQ(OpDescUtils::SetWeights(const_op_desc, const_tensor), GRAPH_SUCCESS); + auto const_node = graph->AddNode(const_op_desc); + + GeTensorDesc shape_tensor_desc(GeShape({4}), ge::FORMAT_NCHW, ge::DT_INT64); + std::vector> unknown_value_range = {make_pair(1, 100), make_pair(1, 240), + make_pair(4, 4), make_pair(192, 192)}; + shape_tensor_desc.SetValueRange(unknown_value_range); + auto shape_op_desc = std::make_shared("Shape", "Shape"); + shape_op_desc->AddOutputDesc(shape_tensor_desc); + auto shape_node = graph->AddNode(shape_op_desc); + + GeTensorDesc add_tensor_desc(GeShape({4}), ge::FORMAT_NCHW, ge::DT_INT64); + auto add_op_desc = std::make_shared("Add", "Add"); + add_op_desc->AddInputDesc(shape_tensor_desc); + add_op_desc->AddInputDesc(const_tensor_desc); + add_op_desc->AddOutputDesc(add_tensor_desc); + auto add_node = graph->AddNode(add_op_desc); + + auto sqrt_op_desc = std::make_shared("Sqrt", "Sqrt"); + sqrt_op_desc->AddInputDesc(GeTensorDesc()); + auto sqrt_node = graph->AddNode(sqrt_op_desc); + + ge::GraphUtils::AddEdge(shape_node->GetOutDataAnchor(0), add_node->GetInDataAnchor(0)); + ge::GraphUtils::AddEdge(const_node->GetOutDataAnchor(0), add_node->GetInDataAnchor(1)); + ge::GraphUtils::AddEdge(add_node->GetOutDataAnchor(0), sqrt_node->GetInDataAnchor(1)); + + InferValueRangePass infer_pass; + EXPECT_EQ(infer_pass.Run(sqrt_node), SUCCESS); + + // test known value range + EXPECT_EQ(infer_pass.Run(add_node), SUCCESS); + auto output_0_desc = add_node->GetOpDesc()->GetOutputDesc(0); + std::vector> out_value_range; + output_0_desc.GetValueRange(out_value_range); + EXPECT_EQ(out_value_range.size(), 4); + + std::vector target_value_range = {2, 101, 2, 241, 5, 5, 193, 193}; + std::vector output_value_range; + for (auto pair : out_value_range) { + output_value_range.push_back(pair.first); + output_value_range.push_back(pair.second); + } + EXPECT_EQ(target_value_range, output_value_range); +} + +TEST_F(UtestGraphInferValueRangePass, CallRun_NoSubgraph_UseCpuKernel_InputsAreKnownValueRange_Int32) { + // shape --- add --- sqrt + // constant / + auto graph = std::make_shared("test_graph"); + vector data_vec = {1, 100, 2, 200}; + GeTensorDesc const_tensor_desc(ge::GeShape({4}), ge::FORMAT_NCHW, ge::DT_INT32); + GeTensorPtr const_tensor = + std::make_shared(const_tensor_desc, (uint8_t *)data_vec.data(), data_vec.size() * sizeof(int32_t)); + auto const_op_desc = std::make_shared("Constant", "Constant"); + const_op_desc->AddOutputDesc(const_tensor_desc); + EXPECT_EQ(OpDescUtils::SetWeights(const_op_desc, const_tensor), GRAPH_SUCCESS); + auto const_node = graph->AddNode(const_op_desc); + + GeTensorDesc shape_tensor_desc(GeShape({4}), ge::FORMAT_NCHW, ge::DT_INT32); + std::vector> known_value_range = {make_pair(1, 100), make_pair(1, 240), + make_pair(4, 4), make_pair(192, 192)}; + shape_tensor_desc.SetValueRange(known_value_range); + auto shape_op_desc = std::make_shared("Shape", "Shape"); + shape_op_desc->AddOutputDesc(shape_tensor_desc); + auto shape_node = graph->AddNode(shape_op_desc); + + GeTensorDesc add_tensor_desc(GeShape({4}), ge::FORMAT_NCHW, ge::DT_INT32); + auto add_op_desc = std::make_shared("Add", "Add"); + add_op_desc->AddInputDesc(shape_tensor_desc); + add_op_desc->AddInputDesc(const_tensor_desc); + add_op_desc->AddOutputDesc(add_tensor_desc); + auto add_node = graph->AddNode(add_op_desc); + + ge::GraphUtils::AddEdge(shape_node->GetOutDataAnchor(0), add_node->GetInDataAnchor(0)); + ge::GraphUtils::AddEdge(const_node->GetOutDataAnchor(0), add_node->GetInDataAnchor(1)); + + InferValueRangePass infer_pass; + EXPECT_EQ(infer_pass.Run(add_node), SUCCESS); + auto output_0_desc = add_node->GetOpDesc()->GetOutputDesc(0); + std::vector> out_value_range; + output_0_desc.GetValueRange(out_value_range); + EXPECT_EQ(out_value_range.size(), 4); + + std::vector target_value_range = {2, 101, 101, 340, 6, 6, 392, 392}; + std::vector output_value_range; + for (auto pair : out_value_range) { + output_value_range.push_back(pair.first); + output_value_range.push_back(pair.second); + } + EXPECT_EQ(target_value_range, output_value_range); +} + +REG_OP(Case) + .OP_END_FACTORY_REG(Case) +IMPL_INFER_VALUE_RANGE_FUNC(Case, ValueRangeFunc){ + auto op_desc = OpDescUtils::GetOpDescFromOperator(op); + auto output_tensor_desc = op_desc->MutableOutputDesc(0); + std::vector> in_value_range; + output_tensor_desc->GetValueRange(in_value_range); + if (in_value_range.empty()) { + std::vector> out_value_range = {make_pair(1, 2), make_pair(1, 3), + make_pair(1, 4), make_pair(1, 5)};; + output_tensor_desc->SetValueRange(out_value_range); + } + return GRAPH_SUCCESS; +} +INFER_VALUE_RANGE_CUSTOM_FUNC_REG(Case, INPUT_HAS_VALUE_RANGE, ValueRangeFunc); + +TEST_F(UtestGraphInferValueRangePass, CallRun_HasCaeSubgraph_WhenBeforeSubgraph) { + auto builder = ParentGraphBuilder(); + auto parent_graph = builder.GetGraph(); + AddCaseSubgraph(parent_graph, 2); + auto subgraphs = parent_graph->GetAllSubgraphs(); + EXPECT_EQ(subgraphs.size(), 2); + + // check before subgraph + auto case_node = parent_graph->FindNode("case1"); + EXPECT_NE(case_node, nullptr); + InferValueRangePass infer_pass; + EXPECT_EQ(infer_pass.Run(case_node), SUCCESS); + + auto case_out_0_desc = case_node->GetOpDesc()->MutableOutputDesc(0); + std::vector> out_value_range; + case_out_0_desc->GetValueRange(out_value_range); + EXPECT_EQ(out_value_range.size(), 4); + std::vector target_value_range = {1,2,1,3,1,4,1,5}; + std::vector output_value_range_list; + for (auto pair : out_value_range) { + output_value_range_list.push_back(pair.first); + output_value_range_list.push_back(pair.second); + } + EXPECT_EQ(target_value_range, output_value_range_list); + + auto data_node = subgraphs[0]->FindNode("data1_0"); + auto data_output_0_desc = data_node->GetOpDesc()->GetOutputDesc(0); + std::vector target_value_range_list = {1, 1, 1, 1, 1, -1, 1, 224}; + std::vector> output_value_range; + data_output_0_desc.GetValueRange(output_value_range); + EXPECT_EQ(output_value_range.size(), 4); + std::vector data_value_range_list; + for (auto pair : output_value_range) { + data_value_range_list.push_back(pair.first); + data_value_range_list.push_back(pair.second); + } + EXPECT_EQ(data_value_range_list, target_value_range_list); + + data_node = subgraphs[0]->FindNode("data2_0"); + auto data2_input_0_desc = data_node->GetOpDesc()->GetInputDesc(0); + std::vector target_value_range_list2 = {1, 100, 1, 10}; + out_value_range.clear(); + data2_input_0_desc.GetValueRange(out_value_range); + EXPECT_EQ(out_value_range.size(), 2); + data_value_range_list.clear(); + for (auto pair : out_value_range) { + data_value_range_list.push_back(pair.first); + data_value_range_list.push_back(pair.second); + } + EXPECT_EQ(data_value_range_list, target_value_range_list2); +} + +TEST_F(UtestGraphInferValueRangePass, CallRun_HasCaeSubgraph_WhenAfterSubgraph) { + auto builder = ParentGraphBuilder(); + auto parent_graph = builder.GetGraph(); + AddCaseSubgraph(parent_graph, 2); + auto subgraphs = parent_graph->GetAllSubgraphs(); + EXPECT_EQ(subgraphs.size(), 2); + + auto case_node = parent_graph->FindNode("case1"); + EXPECT_NE(case_node, nullptr); + InferValueRangePass infer_pass; + // check after subgraph + infer_pass.options_[kOptimizeAfterSubGraph] = "yes"; + EXPECT_EQ(infer_pass.Run(case_node), SUCCESS); + + std::vector out_target_dims = {1, -1, 1, -1}; + auto case_out = case_node->GetOpDesc()->GetOutputDescPtr(0); + std::vector> out_value_range; + case_out->GetValueRange(out_value_range); + EXPECT_EQ(out_value_range.size(), 2); + + std::vector output_value_range_list; + for (auto pair : out_value_range) { + output_value_range_list.push_back(pair.first); + output_value_range_list.push_back(pair.second); + } + EXPECT_EQ(out_target_dims, output_value_range_list); +} + +TEST_F(UtestGraphInferValueRangePass, CallRun_HasSubgraph_WhenAfterSubgraph_ForMultiDims) { + auto builder = ParentGraphBuilder(); + auto parent_graph = builder.GetGraph(); + AddCaseSubgraph(parent_graph, 2); + auto subgraphs = parent_graph->GetAllSubgraphs(); + EXPECT_EQ(subgraphs.size(), 2); + + auto case_node = parent_graph->FindNode("case1"); + EXPECT_NE(case_node, nullptr); + InferValueRangePass infer_pass; + infer_pass.options_[kOptimizeAfterSubGraph] = "yes"; + + // check after subgraph for multi-batch + auto set_ret = AttrUtils::SetInt(case_node->GetOpDesc(), ATTR_NAME_BATCH_NUM, 2); + EXPECT_EQ(set_ret, true); + EXPECT_EQ(infer_pass.Run(case_node), GRAPH_FAILED); +} +} // namespace ge From eccff67f421da4ae147c629f61821676acf550d2 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Sat, 26 Jun 2021 15:42:55 +0800 Subject: [PATCH 095/226] Clear UpdatePersistTensor Warning for first run --- ge/graph/passes/mark_force_unknown_for_cond_pass.cc | 6 +++--- ge/graph/passes/mark_force_unknown_for_cond_pass.h | 2 +- ge/hybrid/executor/node_state.cc | 4 ++++ 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/ge/graph/passes/mark_force_unknown_for_cond_pass.cc b/ge/graph/passes/mark_force_unknown_for_cond_pass.cc index 233a1ff0..aa36a43b 100644 --- a/ge/graph/passes/mark_force_unknown_for_cond_pass.cc +++ b/ge/graph/passes/mark_force_unknown_for_cond_pass.cc @@ -56,8 +56,8 @@ Status MarkForceUnknownForCondPass::Run(ComputeGraphPtr graph) { /// @param [out] Search queue /// @return true: Switch In while loop / false: Not in while Loop. /// -bool MarkForceUnknownForCondPass::DealWithLoopSwitch(const NodePtr &node, uint32_t dst_span, - std::queue> search_queue) { +bool MarkForceUnknownForCondPass::DealAsLoopSwitch(const NodePtr &node, uint32_t dst_span, + std::queue> &search_queue) { /// LoopCond --->\. /// \. /// Enter-----------+ \. @@ -121,7 +121,7 @@ void MarkForceUnknownForCondPass::MarkUnknownForSwitch(const NodePtr &node, std: GELOGD("Travel node: %s, %s node: %s, span is: %u", dst_node->GetName().c_str(), node_type.c_str(), in_node->GetName().c_str(), dst_span); if (kSwitchOpTypes.count(node_type) > 0) { // Switch input node. - if (DealWithLoopSwitch(in_node, dst_span, search_queue)) { + if (DealAsLoopSwitch(in_node, dst_span, search_queue)) { continue; } diff --git a/ge/graph/passes/mark_force_unknown_for_cond_pass.h b/ge/graph/passes/mark_force_unknown_for_cond_pass.h index d2be9a9e..030b55ee 100644 --- a/ge/graph/passes/mark_force_unknown_for_cond_pass.h +++ b/ge/graph/passes/mark_force_unknown_for_cond_pass.h @@ -34,7 +34,7 @@ class MarkForceUnknownForCondPass : public GraphPass { /// @param [out] Search queue /// @return true: Switch In while loop / false: Not in while Loop. /// - bool DealWithLoopSwitch(const NodePtr &node, uint32_t dst_span, std::queue> search_queue); + bool DealAsLoopSwitch(const NodePtr &node, uint32_t dst_span, std::queue> &search_queue); /// /// @brief Mark force unknown shape for Switch node diff --git a/ge/hybrid/executor/node_state.cc b/ge/hybrid/executor/node_state.cc index 7ab7b536..ad38c792 100644 --- a/ge/hybrid/executor/node_state.cc +++ b/ge/hybrid/executor/node_state.cc @@ -355,6 +355,10 @@ void NodeState::UpdatePersistTensor() { } }; + if (root_tensor_values_.empty()) { + return; + } + update_tensor(node_item_->root_data_); if (iteration_count_ > 0) { update_tensor(node_item_->enter_data_); From 01dfd8974956525c7977f926a94aee25335c7b8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=B6=9B?= Date: Sat, 26 Jun 2021 16:16:34 +0800 Subject: [PATCH 096/226] =?UTF-8?q?=E5=9B=9E=E9=80=80=20'Pull=20Request=20?= =?UTF-8?q?!1849=20:=20add=20copy=20graph'?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ge/graph/manager/graph_manager.cc | 2 +- ge/hybrid/model/hybrid_model.h | 1 - ge/hybrid/model/hybrid_model_builder.cc | 47 ++++--------------- ge/hybrid/model/hybrid_model_builder.h | 1 - ge/model/ge_root_model.h | 5 -- .../executor/subgraph_executor_unittest.cc | 3 -- .../model/hybrid_model_builder_unittest.cc | 26 ++-------- 7 files changed, 15 insertions(+), 70 deletions(-) diff --git a/ge/graph/manager/graph_manager.cc b/ge/graph/manager/graph_manager.cc index 01a2e502..66026f8d 100755 --- a/ge/graph/manager/graph_manager.cc +++ b/ge/graph/manager/graph_manager.cc @@ -3131,10 +3131,10 @@ void GraphManager::PreRunThread(GraphManager *graph_manager) { } // Avoid repeatively prerun for graphs owns same graph_id in online inference concurrency if (count > 1 && graph_node->GetBuildFlag()) { + graph_node->Lock(); GELOGD("Avoid repeatively prerun, graph_id:%u.", args.graph_id); // In online inference concurrency senario, graph_node is allowed to be locked for 'count' times graph_node->SetSemSize(count); - graph_node->Lock(); graph_manager->run_args_q_.Push(RunArgs( { graph_node, args.graph_id, args.session_id, args.error_context, args.input_tensor, graph_node->GetGeRootModel(), GetThreadLocalContext(), args.callback })); GELOGI("[PreRunThread] Loop end. Start to run with cached build model."); diff --git a/ge/hybrid/model/hybrid_model.h b/ge/hybrid/model/hybrid_model.h index 77246e20..9821242a 100644 --- a/ge/hybrid/model/hybrid_model.h +++ b/ge/hybrid/model/hybrid_model.h @@ -147,7 +147,6 @@ class HybridModel { GeRootModelPtr ge_root_model_; std::map input_nodes_; ComputeGraphPtr root_graph_; - ComputeGraphPtr orig_root_graph_; std::map device_variable_nodes_; //lint !e148 std::map host_variable_nodes_; //lint !e148 std::map> variable_tensors_; diff --git a/ge/hybrid/model/hybrid_model_builder.cc b/ge/hybrid/model/hybrid_model_builder.cc index 8f96ea9d..1f68f374 100755 --- a/ge/hybrid/model/hybrid_model_builder.cc +++ b/ge/hybrid/model/hybrid_model_builder.cc @@ -147,7 +147,6 @@ Status HybridModelBuilder::Build() { GE_CHK_STATUS_RET(ValidateParams(), "[Invoke][ValidateParams] failed, model_name_:[%s]", GetGraphName()); hybrid_model_.model_name_ = ge_root_model_->GetModelName(); GELOGI("[%s] Start to build hybrid model.", GetGraphName()); - GE_CHK_STATUS_RET(CopyGraph(), "[Invoke][CopyGraph] failed, model_name_:[%s]", GetGraphName()); GE_CHK_STATUS_RET(InitRuntimeParams(), "[Invoke][InitRuntimeParams] failed, model_name_:[%s]", GetGraphName()); GE_CHK_STATUS_RET(RecoverGraphUnknownFlag(), "[Invoke][RecoverGraphUnknownFlag] failed, model_name_:[%s]", GetGraphName()); @@ -172,12 +171,11 @@ Status HybridModelBuilder::Build() { Status HybridModelBuilder::BuildForSingleOp() { GE_CHK_STATUS_RET(ValidateParams(), "[Invoke][ValidateParams] failed, model_name_:[%s]", GetGraphName()); - hybrid_model_.root_graph_ = ge_root_model_->GetRootGraph(); hybrid_model_.model_name_ = ge_root_model_->GetRootGraph()->GetName(); GELOGI("[%s] Start to build hybrid model.", GetGraphName()); auto ret = ge_root_model_->GetSubgraphInstanceNameToModel(); - const GeModelPtr ge_model = ret[hybrid_model_.root_graph_->GetName()]; - GE_CHK_STATUS_RET(IndexTaskDefs(hybrid_model_.root_graph_, ge_model), + const GeModelPtr ge_model = ret[ge_root_model_->GetRootGraph()->GetName()]; + GE_CHK_STATUS_RET(IndexTaskDefs(ge_root_model_->GetRootGraph(), ge_model), "[Invoke][IndexTaskDefs] failed, model_name_:[%s]", GetGraphName()); GE_CHK_STATUS_RET(LoadGraph(), "[Invoke][LoadGraph] failed, model_name_:[%s]", GetGraphName()); GE_CHK_STATUS_RET(InitWeights(), "[Invoke][InitWeights] failed, model_name_:[%s]", GetGraphName()); @@ -192,29 +190,6 @@ Status HybridModelBuilder::ValidateParams() { return SUCCESS; } -Status HybridModelBuilder::CopyGraph() { - GELOGD("Copy compute graph begin."); - auto root_graph = ge_root_model_->GetRootGraph(); - - ge_root_model_->IncreaseBuildTimes(); - std::string new_graph_name = ge_root_model_->GetRootGraph()->GetName() + "_" + - std::to_string(ge_root_model_->GetBuildTimes()); - ComputeGraphPtr new_root_graph = MakeShared(new_graph_name); - GE_CHECK_NOTNULL(new_root_graph); - int32_t depth = 0; - std::map node_old_2_new; - std::map op_desc_old_2_new; - graphStatus ret = GraphUtils::CopyComputeGraph(root_graph, new_root_graph, node_old_2_new, op_desc_old_2_new, depth); - if (ret != GRAPH_SUCCESS) { - GELOGE(GRAPH_FAILED, "Copy compute graph failed."); - return GRAPH_FAILED; - } - hybrid_model_.root_graph_ = new_root_graph; - - GELOGD("Copy compute graph[%s] success.", new_graph_name.c_str()); - return SUCCESS; -} - Status HybridModelBuilder::BuildNodeItem(const NodePtr &node, NodeItem &node_item) { auto op_desc = node->GetOpDesc(); GE_CHK_STATUS_RET(ParseForceInfershapeNodes(node, node_item), @@ -835,13 +810,12 @@ Status HybridModelBuilder::BuildOutputMapping(GraphItem &graph_item, } Status HybridModelBuilder::LoadGraph() { - auto root_graph = hybrid_model_.root_graph_; + auto root_graph = ge_root_model_->GetRootGraph(); if (!GetContext().GetHostExecFlag()) { std::shared_ptr merged_graph; GELOGI("Before merging subgraphs DirectNodesSize = %zu, GetAllNodesSize = %zu", root_graph->GetDirectNodesSize(), root_graph->GetAllNodesSize()); - hybrid_model_.orig_root_graph_ = root_graph; GE_CHK_GRAPH_STATUS_RET(UnfoldSubgraphs(root_graph, merged_graph), "[Invoke][UnfoldSubgraphs]Failed to unfold subgraphs, model_name_:%s.", GetGraphName()); root_graph = std::move(merged_graph); @@ -899,7 +873,6 @@ Status HybridModelBuilder::LoadGraph() { } for (auto &it : hybrid_model_.known_shape_sub_models_) { auto node_item = MutableNodeItem(it.first); - GE_CHECK_NOTNULL(node_item); AscendString graph_name; GE_CHK_GRAPH_STATUS_RET(it.second->GetGraph().GetName(graph_name), "Failed to get subgraph name"); auto subgraph = hybrid_model_.GetRootGraph()->GetSubgraph(graph_name.GetString()); @@ -1148,9 +1121,7 @@ Status HybridModelBuilder::InitWeights() { sub_weight_buffer->GetSize()); auto subgraph = GraphUtils::GetComputeGraph(subgraph_model.second->GetGraph()); if (subgraph != ge_root_model_->GetRootGraph()) { - subgraph = hybrid_model_.root_graph_->GetSubgraph(subgraph_model.first); - } else { - subgraph = hybrid_model_.root_graph_; + subgraph = ge_root_model_->GetRootGraph()->GetSubgraph(subgraph_model.first); } GE_CHECK_NOTNULL(subgraph); hybrid_model_.weight_buffer_map_.emplace(subgraph->GetName(), std::move(sub_weight_buffer)); @@ -1329,7 +1300,7 @@ Status HybridModelBuilder::IndexTaskDefs(const ComputeGraphPtr &sub_graph, const } Status HybridModelBuilder::IndexTaskDefs() { - const auto &root_graph = hybrid_model_.root_graph_; + const auto root_graph = ge_root_model_->GetRootGraph(); const auto &root_graph_name = root_graph->GetName(); if (SetOutputNameAttr(*root_graph) != SUCCESS) { GELOGW("Set output name attr failed."); @@ -1363,7 +1334,7 @@ Status HybridModelBuilder::IndexTaskDefs() { Status HybridModelBuilder::IndexSpecialNodes() { GELOGD("Start to index special nodes"); - const auto &root_graph = hybrid_model_.root_graph_; + const auto &root_graph = ge_root_model_->GetRootGraph(); for (auto &node : root_graph->GetAllNodes()) { GE_CHECK_NOTNULL(node); GE_CHECK_NOTNULL(node->GetOpDesc()); @@ -1518,7 +1489,7 @@ Status HybridModelBuilder::InitRuntimeParams() { runtime_param_.session_id = ret ? static_cast(value) : 0; ret = ge::AttrUtils::GetInt(first_model, ATTR_MODEL_TASK_GEN_VAR_ADDR, value); runtime_param_.logic_var_base = ret ? static_cast(value) : 0; - runtime_param_.graph_id = hybrid_model_.root_graph_->GetGraphID(); + runtime_param_.graph_id = ge_root_model_->GetRootGraph()->GetGraphID(); value = 0; for (auto &it : ge_root_model_->GetSubgraphInstanceNameToModel()) { (void) ge::AttrUtils::GetInt(it.second, ATTR_MODEL_VAR_SIZE, value); @@ -1655,7 +1626,7 @@ Status HybridModelBuilder::TransAllVarData() { } Status HybridModelBuilder::CopyVarData() { - GE_CHK_STATUS_RET(TransVarDataUtils::CopyVarData(hybrid_model_.root_graph_, + GE_CHK_STATUS_RET(TransVarDataUtils::CopyVarData(ge_root_model_->GetRootGraph(), runtime_param_.session_id, hybrid_model_.device_id_), "[Invoke][CopyVarData] failed."); @@ -1738,7 +1709,7 @@ Status HybridModelBuilder::LoadKnownShapedSubgraph(ComputeGraph &graph, NodeItem } Status HybridModelBuilder::RecoverGraphUnknownFlag() { - const auto &root_graph = hybrid_model_.root_graph_; + const auto &root_graph = ge_root_model_->GetRootGraph(); for (auto &sub_graph : root_graph->GetAllSubgraphs()) { GE_CHECK_NOTNULL(sub_graph); for (const auto &node : sub_graph->GetDirectNode()) { diff --git a/ge/hybrid/model/hybrid_model_builder.h b/ge/hybrid/model/hybrid_model_builder.h index 05830e82..9c1eb187 100644 --- a/ge/hybrid/model/hybrid_model_builder.h +++ b/ge/hybrid/model/hybrid_model_builder.h @@ -56,7 +56,6 @@ class HybridModelBuilder { Status BuildOutputMapping(GraphItem &partitioned_call, const NodeItem &node_item, bool is_root_graph); Status ValidateParams(); Status LoadGraph(); - Status CopyGraph(); Status LoadGeModel(ComputeGraph &graph, const GeModelPtr &ge_model); static Status InitHcclExecutorOnDemand(const GeModelPtr &ge_model); Status LoadTask(NodeItem &node_item); diff --git a/ge/model/ge_root_model.h b/ge/model/ge_root_model.h index b6e3d081..9e8e116e 100755 --- a/ge/model/ge_root_model.h +++ b/ge/model/ge_root_model.h @@ -60,10 +60,6 @@ class GeRootModel { bool GetTrainFlag() const { return train_flag_; } - int32_t GetBuildTimes() const { return hybrid_build_times_; } - - void IncreaseBuildTimes() { hybrid_build_times_++; } - private: ComputeGraphPtr root_graph_ = nullptr; std::map subgraph_instance_name_to_model_; @@ -73,7 +69,6 @@ class GeRootModel { bool train_flag_ = false; std::string model_name_; bool is_specific_stream_ = false; - int32_t hybrid_build_times_ = 0; }; } // namespace ge using GeRootModelPtr = std::shared_ptr; diff --git a/tests/ut/ge/hybrid/executor/subgraph_executor_unittest.cc b/tests/ut/ge/hybrid/executor/subgraph_executor_unittest.cc index 827705ae..2dc3b639 100644 --- a/tests/ut/ge/hybrid/executor/subgraph_executor_unittest.cc +++ b/tests/ut/ge/hybrid/executor/subgraph_executor_unittest.cc @@ -249,9 +249,6 @@ TEST_F(UtestSubgraphExecutor, cond_graph_schedule_tasks) { graph_context.callback_manager = std::unique_ptr(new CallbackManager()); ASSERT_EQ(graph_context.callback_manager->Init(), SUCCESS); - auto root_graph = hybrid_model.root_graph_; - switch_t = root_graph->FindNode("switch_t"); - switch_f = root_graph->FindNode("switch_f"); const auto node_it_t = hybrid_model.node_items_.find(switch_t); const auto node_it_f = hybrid_model.node_items_.find(switch_f); ASSERT_NE(hybrid_model.node_items_.end(), node_it_t); diff --git a/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc b/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc index 10f7c0fe..5567aca2 100644 --- a/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc +++ b/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc @@ -214,17 +214,11 @@ TEST_F(UtestHybridModelBuilder, normal_hybrid_model_build) { ASSERT_EQ(it->second->frame_index_, index); ASSERT_EQ(it->second->parent_frame_, -1); }; - auto root_graph = hybrid_model.root_graph_; - auto enter1_node = root_graph->FindNode("enter"); - auto active1_node = root_graph->FindNode("active1"); - auto active2_node = root_graph->FindNode("active2"); - auto active3_node = root_graph->FindNode("active3"); - auto output1_node = root_graph->FindNode("net_output"); - TestFrameGroup(enter1_node, control_group_index); - TestFrameGroup(active1_node, control_group_index); - TestFrameGroup(active2_node, control_group_index); - TestFrameGroup(active3_node, control_group_index); - TestFrameGroup(output1_node, -1); + TestFrameGroup(enter1, control_group_index); + TestFrameGroup(active1, control_group_index); + TestFrameGroup(active2, control_group_index); + TestFrameGroup(active3, control_group_index); + TestFrameGroup(output1, -1); engine_mapping.clear(); task_executor.clear(); @@ -379,14 +373,4 @@ TEST_F(UtestHybridModelBuilder, TestInitHcclExecutorOnDemand) { NodeExecutorManager::GetInstance().builders_.erase(NodeExecutorManager::ExecutorType::HCCL); ASSERT_EQ(HybridModelBuilder::InitHcclExecutorOnDemand(ge_model), SUCCESS); } - -TEST_F(UtestHybridModelBuilder, copy_graph_success) { -ComputeGraphPtr graph = std::make_shared("test"); -GeRootModelPtr ge_root_model = make_shared(graph); -HybridModel hybrid_model(ge_root_model); -HybridModelBuilder hybrid_model_builder(hybrid_model); - -Status st = hybrid_model_builder.CopyGraph(); -EXPECT_EQ(st, SUCCESS); -} } // namespace ge From 572990a616dc60da23bcb70fbd436e7f2948cc88 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Sat, 26 Jun 2021 23:20:31 +0800 Subject: [PATCH 097/226] UT for control flow group --- tests/depends/mmpa/src/mmpa_stub.cc | 4 + tests/ut/ge/CMakeLists.txt | 3 +- .../logical_stream_allocator_unittest.cc | 10 +- .../graph/build/stream_allocator_unittest.cc | 2 +- .../ge/graph/passes/assert_pass_unittest.cc | 6 +- .../ut/ge/graph/passes/base_pass_unittest.cc | 14 +- .../graph/passes/cond_branch_v1_unittest.cc | 6 +- .../passes/constant_folding_pass_unittest.cc | 38 +++--- .../passes/dimension_compute_pass_unittest.cc | 8 +- .../ssd_prior_box_kernel_unittest.cc | 2 +- ...a_nodes_with_common_input_pass_unittest.cc | 2 +- ...rk_force_unknown_for_cond_pass_unittest.cc | 129 ++++++++++++------ .../ut/ge/graph/passes/merge_pass_unittest.cc | 28 ++-- .../passes/parallel_group_pass_unittest.cc | 12 +- .../passes/reshape_recovery_pass_unittest.cc | 6 +- .../passes/reshape_remove_pass_unittest.cc | 16 +-- .../resource_pair_control_pass_unittest.cc | 2 +- .../switch_logic_remove_pass_unittest.cc | 12 +- .../trans_op_breadth_fusion_pass_unittest.cc | 4 +- .../trans_op_depth_fusion_pass_unittest.cc | 14 +- ...p_nearby_allreduce_fusion_pass_unittest.cc | 4 +- .../graph/passes/variable_op_pass_unittest.cc | 2 +- .../variable_accelerate_ctrl_unittest.cc | 10 +- 23 files changed, 195 insertions(+), 139 deletions(-) diff --git a/tests/depends/mmpa/src/mmpa_stub.cc b/tests/depends/mmpa/src/mmpa_stub.cc index aae8de9f..b0f1fb87 100644 --- a/tests/depends/mmpa/src/mmpa_stub.cc +++ b/tests/depends/mmpa/src/mmpa_stub.cc @@ -345,6 +345,10 @@ INT32 mmIsDir(const CHAR *fileName) INT32 mmGetEnv(const CHAR *name, CHAR *value, UINT32 len) { + const char *env = getenv(name); + if (env != nullptr) { + strcpy(value, env); + } return 0; } diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index 95b9e388..57677cf0 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -720,7 +720,6 @@ set(PASS_TEST_FILES "graph/passes/memcpy_addr_async_unittest.cc" "graph/passes/hccl_continuous_pass_unittest.cc" "graph/passes/hccl_memcpy_pass_unittest.cc" - ) set(KERNEL_TEST_FILES @@ -850,7 +849,6 @@ set(HYBRID_TEST_FILES "hybrid/executor/hybrid_model_async_executor_unittest.cc" "hybrid/executor/hybrid_model_pipeline_executor_unittest.cc" "hybrid/node_executor/aicore/aicore_task_compiler_unittest.cc" - ) set(OTHERS_TEST_FILES @@ -877,6 +875,7 @@ add_library(ge_ut_graph STATIC target_compile_definitions(ge_ut_graph PRIVATE google=ascend_private + FMK_SUPPORT_DUMP ) target_compile_options(ge_ut_graph PRIVATE diff --git a/tests/ut/ge/graph/build/logical_stream_allocator_unittest.cc b/tests/ut/ge/graph/build/logical_stream_allocator_unittest.cc index 218bfd0d..352984fa 100644 --- a/tests/ut/ge/graph/build/logical_stream_allocator_unittest.cc +++ b/tests/ut/ge/graph/build/logical_stream_allocator_unittest.cc @@ -349,7 +349,7 @@ class UtestLogicalStreamAllocator : public testing::Test { /// B --> C(AllReduce) --- D /// / /// stream id: 0 A - /// \ + /// \. /// E --> F(AllReduce) --- G /// stream id: 2 2 2 /// @@ -599,7 +599,7 @@ TEST_F(UtestLogicalStreamAllocator, test_label_not_reusable2) { /// case of multi-output, then unuse stream /// sub1 -/// / | \ +/// / | \. /// sub2 sub3 sub4 TEST_F(UtestLogicalStreamAllocator, test_multiOut_new_stream) { SubGraphInfoPtr data = CreateDataSubgraph(); @@ -624,7 +624,7 @@ TEST_F(UtestLogicalStreamAllocator, test_multiOut_new_stream) { /// if paralle id 1, then use stream /// sub1 -/// / | | \ +/// / | | \. /// sub2 sub3 sub4 sub5 TEST_F(UtestLogicalStreamAllocator, test_parallel_one) { SubGraphInfoPtr data = CreateDataSubgraph(); @@ -653,7 +653,7 @@ TEST_F(UtestLogicalStreamAllocator, test_parallel_one) { /// if the param of engine independent is true, then set independent stream /// sub1 -/// / | | \ +/// / | | \. /// sub2 sub3 sub4 sub5 TEST_F(UtestLogicalStreamAllocator, test_independent) { SubGraphInfoPtr data = CreateDataSubgraph(); @@ -692,7 +692,7 @@ TEST_F(UtestLogicalStreamAllocator, test_independent) { /// set stream based on stream label, and then based on independent /// sub1 -/// / | | \ +/// / | | \. /// sub2 sub3 sub4 sub5 TEST_F(UtestLogicalStreamAllocator, test_independent_switch_label) { SubGraphInfoPtr data = CreateDataSubgraph(); diff --git a/tests/ut/ge/graph/build/stream_allocator_unittest.cc b/tests/ut/ge/graph/build/stream_allocator_unittest.cc index 019e75d1..4ae871af 100644 --- a/tests/ut/ge/graph/build/stream_allocator_unittest.cc +++ b/tests/ut/ge/graph/build/stream_allocator_unittest.cc @@ -36,7 +36,7 @@ class UtestStreamAllocator : public testing::Test { /// /// A - /// / \ + /// / \. /// B C /// | | /// D 400 diff --git a/tests/ut/ge/graph/passes/assert_pass_unittest.cc b/tests/ut/ge/graph/passes/assert_pass_unittest.cc index 4aa133d3..9247681c 100644 --- a/tests/ut/ge/graph/passes/assert_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/assert_pass_unittest.cc @@ -55,7 +55,7 @@ class UtestGraphPassesAssertPass : public Test { }; /// D E -/// | \ | \ +/// | \ | \. /// F C G /// : | : /// H A I @@ -134,8 +134,8 @@ TEST_F(UtestGraphPassesAssertPass, assert_pass_test2) { EXPECT_EQ(graph->FindNode("D"), nullptr); } -/// E F -/// | \ | \ +/// E F +/// | \ | \. /// H C -> D G /// \ | : /// A I diff --git a/tests/ut/ge/graph/passes/base_pass_unittest.cc b/tests/ut/ge/graph/passes/base_pass_unittest.cc index 9bba5d77..c687e07f 100644 --- a/tests/ut/ge/graph/passes/base_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/base_pass_unittest.cc @@ -130,7 +130,7 @@ class UTESTGraphPassesBasePass : public testing::Test { /// reshape1 /// | /// add1 -/// / \ +/// / \. /// | | /// data1 const1 ComputeGraphPtr BuildGraph1() { @@ -148,9 +148,9 @@ ComputeGraphPtr BuildGraph1() { } /// sum1 -/// / \ -/// / \ -/// / \ +/// / \. +/// / \. +/// / \. /// reshape1 addn1 /// | c | /// add1 <--- shape1 @@ -217,7 +217,7 @@ void CheckIterOrder(UtestTestPass *pass, std::vector &loop, vector &cond) { /******************************************************************************* - * Exit Identify - * \ / \. - * \ / \. - * Switch Add - * / | | - * / | | - * / | | - * LoopCond | | - * \ | | - * \ | | - * \ | | - * Less | | - * \ | NextIteration - * \ | | - * \ | | - * Merge <---------| - * | - * | - * Enter + * | + * +--------------------- Merge ----------------------+ + * / | + * / | + * / | + * / | + * Exit Identify | + * \ / \. | + * \ / \. | + * Switch Add Add + * / | | | + * / | | | + * / | | | + * LoopCond | | | + * \ | | | + * \ | | | + * \ | | | + * Less | | | + * \ | NextIteration | + * \ | | | + * \ | | | + * Merge <---------| | + * | | + * | | + * Enter | + * \ | + * \ | + * Switch Switch + * | | + * +-----------------Equal----------------------+ + * | ******************************************************************************/ - auto data1 = CreateNode(*graph, "data", DATA, 1, 1); + auto data1 = CreateNode(*graph, "data1", DATA, 1, 1); + auto data2 = CreateNode(*graph, "data2", DATA, 1, 1); + + auto equal1 = CreateNode(*graph, "equal1", EQUAL, 2, 1); + auto switch1 = CreateNode(*graph, "switch1", SWITCH, 2, 2); + auto switch2 = CreateNode(*graph, "switch2", SWITCH, 2, 2); + auto enter1 = CreateNode(*graph, "enter", ENTER, 1, 1); - auto merge1 = CreateNode(*graph, "merge", MERGE, 2, 2); - auto less1 = CreateNode(*graph, "less", LESS, 2, 1); + auto merge1 = CreateNode(*graph, "merge1", MERGE, 2, 2); + auto less1 = CreateNode(*graph, "less1", LESS, 2, 1); auto loop1 = CreateNode(*graph, "loopcond", LOOPCOND, 1, 1); - auto switch1 = CreateNode(*graph, "switch", SWITCH, 2, 2); + auto switch3 = CreateNode(*graph, "switch3", SWITCH, 2, 2); auto ident1 = CreateNode(*graph, "identity", IDENTITY, 1, 1); - auto add1 = CreateNode(*graph, "add", ADD, 2, 1); + auto add1 = CreateNode(*graph, "add1", ADD, 2, 1); auto next1 = CreateNode(*graph, "next", NEXTITERATION, 1, 1); auto exit1 = CreateNode(*graph, "exit", EXIT, 1, 1); - auto value0 = CreateNode(*graph, "const", CONSTANT, 0, 1); - auto value1 = CreateNode(*graph, "const", CONSTANT, 0, 1); + auto value1 = CreateNode(*graph, "const1", CONSTANT, 0, 1); + + auto value2 = CreateNode(*graph, "const2", CONSTANT, 0, 1); + auto add2 = CreateNode(*graph, "add2", ADD, 2, 1); + auto merge2 = CreateNode(*graph, "merge2", MERGE, 2, 2); auto output1 = CreateNode(*graph, "net_output", NETOUTPUT, 1, 1); - GraphUtils::AddEdge(data1->GetOutDataAnchor(0), enter1->GetInDataAnchor(0)); + GraphUtils::AddEdge(data1->GetOutDataAnchor(0), equal1->GetInDataAnchor(0)); + GraphUtils::AddEdge(data2->GetOutDataAnchor(0), equal1->GetInDataAnchor(1)); + GraphUtils::AddEdge(data1->GetOutDataAnchor(0), switch1->GetInDataAnchor(0)); + GraphUtils::AddEdge(data2->GetOutDataAnchor(0), switch2->GetInDataAnchor(0)); + GraphUtils::AddEdge(equal1->GetOutDataAnchor(0), switch1->GetInDataAnchor(1)); + GraphUtils::AddEdge(equal1->GetOutDataAnchor(0), switch2->GetInDataAnchor(1)); + cond.emplace_back(switch1); + cond.emplace_back(switch2); + + GraphUtils::AddEdge(switch1->GetOutDataAnchor(0), enter1->GetInDataAnchor(0)); // false GraphUtils::AddEdge(enter1->GetOutDataAnchor(0), merge1->GetInDataAnchor(0)); GraphUtils::AddEdge(merge1->GetOutDataAnchor(0), less1->GetInDataAnchor(0)); GraphUtils::AddEdge(value1->GetOutDataAnchor(0), less1->GetInDataAnchor(1)); GraphUtils::AddEdge(less1->GetOutDataAnchor(0), loop1->GetInDataAnchor(0)); - GraphUtils::AddEdge(loop1->GetOutDataAnchor(0), switch1->GetInDataAnchor(0)); - GraphUtils::AddEdge(merge1->GetOutDataAnchor(0), switch1->GetInDataAnchor(1)); + GraphUtils::AddEdge(loop1->GetOutDataAnchor(0), switch3->GetInDataAnchor(0)); + GraphUtils::AddEdge(merge1->GetOutDataAnchor(0), switch3->GetInDataAnchor(1)); + loop.emplace_back(merge1); - GraphUtils::AddEdge(switch1->GetOutDataAnchor(0), exit1->GetInDataAnchor(0)); - GraphUtils::AddEdge(switch1->GetOutDataAnchor(1), ident1->GetInDataAnchor(0)); + GraphUtils::AddEdge(switch3->GetOutDataAnchor(0), exit1->GetInDataAnchor(0)); // false + GraphUtils::AddEdge(switch3->GetOutDataAnchor(1), ident1->GetInDataAnchor(0)); // true + loop.emplace_back(switch3); GraphUtils::AddEdge(ident1->GetOutDataAnchor(0), add1->GetInDataAnchor(0)); GraphUtils::AddEdge(value1->GetOutDataAnchor(0), add1->GetInDataAnchor(1)); GraphUtils::AddEdge(add1->GetOutDataAnchor(0), next1->GetInDataAnchor(0)); - GraphUtils::AddEdge(next1->GetOutDataAnchor(0), merge1->GetInDataAnchor(1)); - GraphUtils::AddEdge(exit1->GetOutDataAnchor(0), output1->GetInDataAnchor(0)); - merge = merge1; + GraphUtils::AddEdge(switch2->GetOutDataAnchor(1), add2->GetInDataAnchor(1)); // true + GraphUtils::AddEdge(value2->GetOutDataAnchor(0), add2->GetInDataAnchor(0)); + + GraphUtils::AddEdge(exit1->GetOutDataAnchor(0), merge2->GetInDataAnchor(0)); + GraphUtils::AddEdge(add2->GetOutDataAnchor(0), merge2->GetInDataAnchor(1)); + GraphUtils::AddEdge(merge2->GetOutDataAnchor(0), output1->GetInDataAnchor(0)); + + cond.emplace_back(merge2); + merge = merge2; } static void CreateCondGraph(ComputeGraphPtr &graph, NodePtr &merge) { @@ -197,12 +235,27 @@ static void CreateCondGraph(ComputeGraphPtr &graph, NodePtr &merge) { TEST_F(UtestMarkForceUnknownForCondPass, skip_while_loop_merge) { auto graph = std::make_shared("test_graph"); NodePtr merge; - CreateLoopGraph(graph, merge); - - AttrUtils::SetBool(merge->GetOpDesc(), ATTR_NAME_FORCE_UNKNOWN_SHAPE, true); + vector loop; + vector cond; + CreateLoopGraph(graph, merge, loop, cond); MarkForceUnknownForCondPass mark_force_unknown_pass; EXPECT_EQ(mark_force_unknown_pass.Run(graph), SUCCESS); // skip LoopCond + setenv("DUMP_GE_GRAPH", "1", true); + GE_DUMP(graph, "control_group"); + unsetenv("DUMP_GE_GRAPH"); + + EXPECT_EQ(loop.size(), 2); + for (const auto &node : loop) { + EXPECT_FALSE(node->GetOpDesc()->HasAttr(ATTR_NAME_CONTROL_FLOW_GROUP)); + } + + EXPECT_EQ(cond.size(), 3); + for (const auto &node : cond) { + int64_t group_index = -1; + EXPECT_TRUE(AttrUtils::GetInt(node->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, group_index)); + EXPECT_EQ(group_index, merge->GetOpDesc()->GetId()); + } } TEST_F(UtestMarkForceUnknownForCondPass, skip_known_shape_merge) { diff --git a/tests/ut/ge/graph/passes/merge_pass_unittest.cc b/tests/ut/ge/graph/passes/merge_pass_unittest.cc index 75fdb21b..f8f0afea 100644 --- a/tests/ut/ge/graph/passes/merge_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/merge_pass_unittest.cc @@ -110,8 +110,8 @@ TEST_F(UtestGraphPassesMergePass, multiple_inputs) { } /// Merge -/// | \ -/// | \ +/// | \. +/// | \. /// Op1 Op2 Merge2 /// \ | | /// \ | Op3 @@ -137,10 +137,10 @@ TEST_F(UtestGraphPassesMergePass, empty_input_cut_branch_meet_net_output_with_da } /// Merge -/// | \ -/// | \ +/// | \. +/// | \. /// Op1 Op2 Merge2 -/// \ | | \ +/// \ | | \. /// \ | Op3 /// \ | : /// NetOutput @@ -165,8 +165,8 @@ TEST_F(UtestGraphPassesMergePass, empty_input_cut_branch_meet_net_output_with_co TEST_F(UtestGraphPassesMergePass, empty_input_cut_branch) { /// Merge - /// | \ - /// | \ + /// | \. + /// | \. /// Op1 Op2 Merge2 /// \ | | /// \ | Op3 @@ -210,7 +210,7 @@ TEST_F(UtestGraphPassesMergePass, empty_input_cut_branch) { /// Op1 Op2 Merge2 /// \ | /// \ Op3 - /// \ + /// \. /// Merge3 ret = pass_.Run(merge_node2); @@ -224,7 +224,7 @@ TEST_F(UtestGraphPassesMergePass, single_non_const_input) { /// Op1 /// | /// Merge - /// / \ + /// / \. /// Op2 Op3 auto merge_node = NewNode("Merge", MERGE, 1, 2); auto node1 = NewNode("Op1", RELU, 1, 1); @@ -253,7 +253,7 @@ TEST_F(UtestGraphPassesMergePass, single_const_input) { /// Const /// | /// Merge Pass Const - /// / \ ===> / \ + /// / \ ===> / \. /// Op1 Op2 Op1 Op2 auto merge_node = NewNode("Merge", MERGE, 1, 2); auto const_node = NewNode("Const", CONSTANT, 1, 1); @@ -284,7 +284,7 @@ TEST_F(UtestGraphPassesMergePass, single_const_input_value_index_two_out_nodes) /// / | ===> / \(control anchor) /// Op1 | \ Op1 Constant /// Op2 Op3 | - /// / \ + /// / \. /// Op2 Op3 auto merge_node = NewNode("Merge", MERGE, 1, 2); auto const_node = NewNode("Const", CONSTANT, 1, 1); @@ -329,7 +329,7 @@ TEST_F(UtestGraphPassesMergePass, single_const_input_value_index_two_out_nodes1) /// / | ===> / \(control anchor) /// Op1 | \ Op1 Constant /// Op2 Op3 | - /// / \ + /// / \. /// Op2 Op3 auto merge_node = NewNode("Merge", MERGE, 1, 2); auto const_node = NewNode("Const", CONSTANT, 1, 1); @@ -357,7 +357,7 @@ TEST_F(UtestGraphPassesMergePass, const_with_control_input) { /// C /// | /// Merge - /// / \ + /// / \. /// Op1 Op2 auto switch_node = NewNode("Switch", SWITCH, 1, 2); auto identity_node = NewNode("Identity", SWITCH, 1, 1); @@ -381,7 +381,7 @@ TEST_F(UtestGraphPassesMergePass, const_with_control_input) { /// . /// . /// C - /// / \ + /// / \. /// Op1 Op2 auto ret = pass_.Run(merge_node); EXPECT_EQ(ret, SUCCESS); diff --git a/tests/ut/ge/graph/passes/parallel_group_pass_unittest.cc b/tests/ut/ge/graph/passes/parallel_group_pass_unittest.cc index d5b1db41..374fe837 100644 --- a/tests/ut/ge/graph/passes/parallel_group_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/parallel_group_pass_unittest.cc @@ -66,11 +66,11 @@ class UtestGraphPassesParallelGgroupPass : public testing::Test { void BuildDefaultGraph() { /// input - /// \ + /// \. /// sqrt pred /// \ / /// cast - /// / \ + /// / \. /// switch_t switch_f /// | | /// F T @@ -118,13 +118,13 @@ class UtestGraphPassesParallelGgroupPass : public testing::Test { void BuildDefaultGraph1() { /// input - /// \ + /// \. /// sqrt pred /// \ / /// Switch /// | | /// ----F T---- - /// \ | / \ + /// \ | / \. /// \ Merge1 Merge2 /// \_________| input_node_ = NewNode("input", RELU, 0, 1); @@ -164,14 +164,14 @@ class UtestGraphPassesParallelGgroupPass : public testing::Test { void BuildDefaultGraph2() { /// input input1 - /// \ \ + /// \ \. /// sqrt pred sqrt1 pred1 /// \ / \ / /// Switch Switch1 /// | | _______| /// | | / /// ____F T____ - /// \ | / \ + /// \ | / \. /// \ Merge1 Merge2 /// \__________| input_node_ = NewNode("input", RELU, 0, 2); diff --git a/tests/ut/ge/graph/passes/reshape_recovery_pass_unittest.cc b/tests/ut/ge/graph/passes/reshape_recovery_pass_unittest.cc index 3be11452..f941645e 100644 --- a/tests/ut/ge/graph/passes/reshape_recovery_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/reshape_recovery_pass_unittest.cc @@ -31,9 +31,9 @@ class UtestReshapeRecoveryPass : public testing::Test { namespace { /// netoutput1 -/// | \ -///transdata1 \ -/// | \ +/// | \. +///transdata1 \. +/// | \. /// | transdata2 /// | / /// var1 const1 diff --git a/tests/ut/ge/graph/passes/reshape_remove_pass_unittest.cc b/tests/ut/ge/graph/passes/reshape_remove_pass_unittest.cc index 351e96d7..ca0cac86 100644 --- a/tests/ut/ge/graph/passes/reshape_remove_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/reshape_remove_pass_unittest.cc @@ -35,7 +35,7 @@ namespace { /// transdata1 /// | /// reshape1 -/// | \ +/// | \. /// var1 const1 ut::GraphBuilder Graph1Builder() { ut::GraphBuilder builder = ut::GraphBuilder("g1"); @@ -55,11 +55,11 @@ ut::GraphBuilder Graph1Builder() { } /// netoutput1 -/// | \ -///transdata1 \ -/// | \ +/// | \. +///transdata1 \. +/// | \. /// reshape1 reshape2 -/// | \ / \ +/// | \ / \. /// var1 const1 var2 ut::GraphBuilder Graph2Builder() { ut::GraphBuilder builder = ut::GraphBuilder("g2"); @@ -83,9 +83,9 @@ ut::GraphBuilder Graph2Builder() { } /// netoutput1 -/// | \ -///transdata1 \ -/// | \ +/// | \. +///transdata1 \. +/// | \. /// reshape1 transdata2 /// | \ / /// var1 const1 diff --git a/tests/ut/ge/graph/passes/resource_pair_control_pass_unittest.cc b/tests/ut/ge/graph/passes/resource_pair_control_pass_unittest.cc index 6d12a49d..8cdfd0c7 100644 --- a/tests/ut/ge/graph/passes/resource_pair_control_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/resource_pair_control_pass_unittest.cc @@ -34,7 +34,7 @@ class UtestResourcePairControlPass : public testing::Test { namespace { /// netoutput1 -/// | \ +/// | \. /// StackPush StackPop /// | | /// var1 const1 diff --git a/tests/ut/ge/graph/passes/switch_logic_remove_pass_unittest.cc b/tests/ut/ge/graph/passes/switch_logic_remove_pass_unittest.cc index dcad318c..22734047 100644 --- a/tests/ut/ge/graph/passes/switch_logic_remove_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/switch_logic_remove_pass_unittest.cc @@ -63,9 +63,9 @@ ComputeGraphPtr BuildGraph1() { /// netoutput1 /// | /// merge1 -/// / \ +/// / \. /// / add1 -/// / F| \ +/// / F| \. /// addn1 swtich2 var3 /// \F T/ | /// switch1 | @@ -101,9 +101,9 @@ ComputeGraphPtr BuildGraph2() { /// add1 /// / \T /// var3 swtich2 -/// T/ \ -/// switch1 \ -/// / \ \ +/// T/ \. +/// switch1 \. +/// / \ \. /// var1 var2 var4 ComputeGraphPtr BuildGraph3() { auto builder = ut::GraphBuilder("g3"); @@ -129,7 +129,7 @@ ComputeGraphPtr BuildGraph3() { /// netoutput1 /// | /// merge1 -/// / \ +/// / \. /// add1 addn1 /// / \T F/ /// var3 swtich2 diff --git a/tests/ut/ge/graph/passes/trans_op_breadth_fusion_pass_unittest.cc b/tests/ut/ge/graph/passes/trans_op_breadth_fusion_pass_unittest.cc index dbb163e1..d05bd695 100644 --- a/tests/ut/ge/graph/passes/trans_op_breadth_fusion_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/trans_op_breadth_fusion_pass_unittest.cc @@ -402,7 +402,7 @@ TEST_F(UtestGraphPassesTransOpBreadthFusionPass, test_multi_anchor_case) { } /// ----> netoutput1 -/// / | \ +/// / | \. /// transdata1 transdata2 transdata3 /// \ / | /// var1-------------- @@ -432,7 +432,7 @@ static ComputeGraphPtr BuildGraph1() { } /// ---------> netoutput1 -/// / | \ +/// / | \. /// transdata1 transdata2(l1) transdata3(l1) /// \ / | /// var1------------------ diff --git a/tests/ut/ge/graph/passes/trans_op_depth_fusion_pass_unittest.cc b/tests/ut/ge/graph/passes/trans_op_depth_fusion_pass_unittest.cc index a9ea41ea..dbac3246 100644 --- a/tests/ut/ge/graph/passes/trans_op_depth_fusion_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/trans_op_depth_fusion_pass_unittest.cc @@ -456,19 +456,19 @@ TEST_F(UtestGraphPassesTransOpDepthFusionPass, test_transop_with_multi_out_edge) /// -->transpose1 -->transpose3-->sinh2 /// | \ / /// | -->transpose2 - /// | \ + /// | \. /// / -->cast3-->cast4-->sinh3 /// / /// / -->transpose4-->transpose5-->sinh4 /// / / /// Node4D-->Cast1-->Cast2-->Cast5 -->reshape2-->sinh5 - /// \ \ + /// \ \. /// \ -->sinh6 - /// \ + /// \. /// \ -->transpose6-->transpose7-->sinh9 /// \ / /// -->reshape-->cast6-->cast7-->sinh8 - /// \ + /// \. /// -->sinh7 /// after optimized graph @@ -479,15 +479,15 @@ TEST_F(UtestGraphPassesTransOpDepthFusionPass, test_transop_with_multi_out_edge) /// / /-->transpose3-->sinh2 /// -->Cast1 /// / \-->sinh7 - /// / \ + /// / \. /// / -->sinh9 /// Node4D /// \ -->sinh4 /// \ / /// -->Cast5-->sinh5 - /// \ \ + /// \ \. /// \ -->sinh6 - /// \ + /// \. /// -->Cast7-->sinh8 ge::ComputeGraphPtr graph = std::make_shared("test"); diff --git a/tests/ut/ge/graph/passes/transop_nearby_allreduce_fusion_pass_unittest.cc b/tests/ut/ge/graph/passes/transop_nearby_allreduce_fusion_pass_unittest.cc index 1220b35e..9c6d8276 100644 --- a/tests/ut/ge/graph/passes/transop_nearby_allreduce_fusion_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/transop_nearby_allreduce_fusion_pass_unittest.cc @@ -180,7 +180,7 @@ ComputeGraphPtr GetGraph7(size_t symmetric_transdata_num, size_t asymmetric_tran /// TransData TransData ... MatMul ... /// \ | / / / /// HcomAllReduce - /// / | \ \ \ + /// / | \ \ \. /// TransData TransData ... RealDiv ... ComputeGraphPtr graph = std::make_shared("test"); NodePtr allreduce = @@ -340,7 +340,7 @@ TEST(UtestTransopNearbyAllreduceFusionPass, test7_all_reduce_with_multiple_trans /// TransData TransData ... MatMul ... /// \ | / / / /// HcomAllReduce - /// / | \ \ \ + /// / | \ \ \. /// TransData TransData ... RealDiv ... size_t symmetric_transdata_num = 20; size_t asymmetric_transdata_num = 20; diff --git a/tests/ut/ge/graph/passes/variable_op_pass_unittest.cc b/tests/ut/ge/graph/passes/variable_op_pass_unittest.cc index f1ea7a27..655867a7 100644 --- a/tests/ut/ge/graph/passes/variable_op_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/variable_op_pass_unittest.cc @@ -66,7 +66,7 @@ namespace { /// transdata2 /// | /// assign1 -/// / \ +/// / \. /// transdata1 | /// | | /// var1 const1 diff --git a/tests/ut/ge/graph/variable_accelerate_ctrl_unittest.cc b/tests/ut/ge/graph/variable_accelerate_ctrl_unittest.cc index 37b4bda7..bf350b6c 100644 --- a/tests/ut/ge/graph/variable_accelerate_ctrl_unittest.cc +++ b/tests/ut/ge/graph/variable_accelerate_ctrl_unittest.cc @@ -35,8 +35,8 @@ namespace { /// shapeNo1 /// | /// addnYes1 -/// / \ -/// / \ +/// / \. +/// / \. /// const1 const2 ComputeGraphPtr BuildGraph1() { @@ -57,9 +57,9 @@ ComputeGraphPtr BuildGraph1() { /// /// netoutput1 -/// / \ \ -/// add1 assign1 \ -/// / \ / \ \ +/// / \ \. +/// add1 assign1 \. +/// / \ / \ \. /// var1 var2 const1 var3 ComputeGraphPtr BuildGraph2() { From a4aae38d72c9bc0e564ecce4d6da4ad02d46a0fd Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Sat, 26 Jun 2021 23:25:58 +0800 Subject: [PATCH 098/226] Remove UT dump env --- .../graph/passes/mark_force_unknown_for_cond_pass_unittest.cc | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/ut/ge/graph/passes/mark_force_unknown_for_cond_pass_unittest.cc b/tests/ut/ge/graph/passes/mark_force_unknown_for_cond_pass_unittest.cc index 50991822..557359b7 100644 --- a/tests/ut/ge/graph/passes/mark_force_unknown_for_cond_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/mark_force_unknown_for_cond_pass_unittest.cc @@ -241,9 +241,6 @@ TEST_F(UtestMarkForceUnknownForCondPass, skip_while_loop_merge) { MarkForceUnknownForCondPass mark_force_unknown_pass; EXPECT_EQ(mark_force_unknown_pass.Run(graph), SUCCESS); // skip LoopCond - setenv("DUMP_GE_GRAPH", "1", true); - GE_DUMP(graph, "control_group"); - unsetenv("DUMP_GE_GRAPH"); EXPECT_EQ(loop.size(), 2); for (const auto &node : loop) { From 020d7cb8a04780fb7cf99ddd8a578281a26128ce Mon Sep 17 00:00:00 2001 From: lichun Date: Mon, 28 Jun 2021 10:34:54 +0800 Subject: [PATCH 099/226] remove SetDataType --- ge/hybrid/executor/subgraph_executor.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/ge/hybrid/executor/subgraph_executor.cc b/ge/hybrid/executor/subgraph_executor.cc index 6979d05f..33a2846c 100644 --- a/ge/hybrid/executor/subgraph_executor.cc +++ b/ge/hybrid/executor/subgraph_executor.cc @@ -109,7 +109,6 @@ Status SubgraphExecutor::InitInputsForUnknownShape(const std::vectorSetShape(tensor_desc->GetShape()); output_desc->SetOriginShape(tensor_desc->GetOriginShape()); - output_desc->SetDataType(tensor_desc->GetDataType()); node_state->SetSkipInferShape(true); } } From 91fd6e45ddd8ccbc0d1289c59efffac3ebbd2d13 Mon Sep 17 00:00:00 2001 From: WeiGangqiang Date: Tue, 22 Jun 2021 14:53:24 +0800 Subject: [PATCH 100/226] add ge running env config and perfact graph dsl --- .clang-format | 2 +- build.sh | 7 +- ge/ge_runtime/task/hccl_task.cc | 1 + tests/depends/slog/src/slog_stub.cc | 62 ++++++-- tests/framework/CMakeLists.txt | 4 +- tests/framework/cmake/graphengine.cmake | 31 +--- .../ge_graph_dsl/op_desc/op_desc_cfg_box.h | 34 ++-- .../ge_graph_dsl/src/op_desc_cfg_box.cc | 29 +++- .../ge_graph_dsl/tests/op_desc_config_test.cc | 75 +++++++++ .../ge_graph_dsl/tests/stub/optype_stub.cc | 1 + tests/framework/ge_running_env/CMakeLists.txt | 18 +++ .../ge_running_env/include/CMakeLists.txt | 17 ++ .../include/ge_running_env/env_installer.h} | 29 ++-- .../include/ge_running_env/fake_engine.h | 56 +++++++ .../include/ge_running_env/fake_ns.h | 28 ++++ .../include/ge_running_env/fake_op.h | 49 ++++++ .../ge_running_env/fake_ops_kernel_builder.h} | 39 ++--- .../fake_ops_kernel_info_store.h | 39 +++++ .../ge_running_env/ge_running_env_faker.h | 45 ++++++ .../ge_running_env/info_store_holder.h} | 40 ++--- .../ge_running_env/src/CMakeLists.txt | 45 ++++++ .../ge_running_env/src/engine/fake_engine.cc | 81 ++++++++++ .../src/engine/fake_ops_kernel_builder.cc} | 43 ++--- .../src/engine/fake_ops_kernel_info_store.cc | 42 +++++ .../src/engine/info_store_holder.cc | 49 ++++++ .../src/env/ge_default_running_env.cc | 56 +++++++ .../src/env/ge_default_running_env.h | 32 ++++ .../src/env/ge_running_env_faker.cc | 109 +++++++++++++ .../ge_running_env/src/op/fake_op.cc | 95 +++++++++++ .../ge_running_env/src/op/fake_op_repo.cc | 39 +++++ .../ge_running_env/src/op/fake_op_repo.h | 31 ++++ .../ge_running_env/tests/CMakeLists.txt | 33 ++++ .../tests/test_ge_running_env_faker.cc | 148 ++++++++++++++++++ .../ge_running_env/tests/test_main.cc | 34 ++++ tests/framework/stub_engine/CMakeLists.txt | 58 ------- .../stub_engine/engine/stub_engine.cc | 74 --------- .../stub_engine/engine/stub_engine.h | 127 --------------- tests/framework/stub_engine/inc/st_types.h | 33 ---- .../ops_kernel_store/op/host_op.cc | 41 ----- .../ops_kernel_store/op/stub_op_factory.cc | 51 ------ .../ops_kernel_store/op/stub_op_factory.h | 109 ------------- .../ops_kernel_store/stub_ops_kernel_store.cc | 77 --------- .../ops_kernel_store/stub_ops_kernel_store.h | 73 --------- tests/st/testcase/CMakeLists.txt | 2 +- tests/st/testcase/test_framework_dummy.cc | 16 +- tests/st/testcase/test_main.cc | 37 +++++ 46 files changed, 1328 insertions(+), 813 deletions(-) create mode 100644 tests/framework/ge_graph_dsl/tests/op_desc_config_test.cc create mode 100644 tests/framework/ge_running_env/CMakeLists.txt create mode 100644 tests/framework/ge_running_env/include/CMakeLists.txt rename tests/framework/{stub_engine/ops_kernel_store/op/host_op.h => ge_running_env/include/ge_running_env/env_installer.h} (52%) create mode 100644 tests/framework/ge_running_env/include/ge_running_env/fake_engine.h create mode 100644 tests/framework/ge_running_env/include/ge_running_env/fake_ns.h create mode 100644 tests/framework/ge_running_env/include/ge_running_env/fake_op.h rename tests/framework/{stub_engine/ops_kernel_store/stub_ops_kernel_builder.h => ge_running_env/include/ge_running_env/fake_ops_kernel_builder.h} (59%) create mode 100644 tests/framework/ge_running_env/include/ge_running_env/fake_ops_kernel_info_store.h create mode 100644 tests/framework/ge_running_env/include/ge_running_env/ge_running_env_faker.h rename tests/framework/{stub_engine/ops_kernel_store/op/op.h => ge_running_env/include/ge_running_env/info_store_holder.h} (51%) create mode 100644 tests/framework/ge_running_env/src/CMakeLists.txt create mode 100644 tests/framework/ge_running_env/src/engine/fake_engine.cc rename tests/framework/{stub_engine/ops_kernel_store/stub_ops_kernel_builder.cc => ge_running_env/src/engine/fake_ops_kernel_builder.cc} (73%) create mode 100644 tests/framework/ge_running_env/src/engine/fake_ops_kernel_info_store.cc create mode 100644 tests/framework/ge_running_env/src/engine/info_store_holder.cc create mode 100644 tests/framework/ge_running_env/src/env/ge_default_running_env.cc create mode 100644 tests/framework/ge_running_env/src/env/ge_default_running_env.h create mode 100644 tests/framework/ge_running_env/src/env/ge_running_env_faker.cc create mode 100644 tests/framework/ge_running_env/src/op/fake_op.cc create mode 100644 tests/framework/ge_running_env/src/op/fake_op_repo.cc create mode 100644 tests/framework/ge_running_env/src/op/fake_op_repo.h create mode 100644 tests/framework/ge_running_env/tests/CMakeLists.txt create mode 100644 tests/framework/ge_running_env/tests/test_ge_running_env_faker.cc create mode 100644 tests/framework/ge_running_env/tests/test_main.cc delete mode 100644 tests/framework/stub_engine/CMakeLists.txt delete mode 100644 tests/framework/stub_engine/engine/stub_engine.cc delete mode 100644 tests/framework/stub_engine/engine/stub_engine.h delete mode 100644 tests/framework/stub_engine/inc/st_types.h delete mode 100644 tests/framework/stub_engine/ops_kernel_store/op/host_op.cc delete mode 100644 tests/framework/stub_engine/ops_kernel_store/op/stub_op_factory.cc delete mode 100644 tests/framework/stub_engine/ops_kernel_store/op/stub_op_factory.h delete mode 100644 tests/framework/stub_engine/ops_kernel_store/stub_ops_kernel_store.cc delete mode 100644 tests/framework/stub_engine/ops_kernel_store/stub_ops_kernel_store.h create mode 100644 tests/st/testcase/test_main.cc diff --git a/.clang-format b/.clang-format index c931e8f0..e7f9d935 100644 --- a/.clang-format +++ b/.clang-format @@ -50,7 +50,7 @@ CommentPragmas: '^ IWYU pragma:' CompactNamespaces: false ConstructorInitializerAllOnOneLineOrOnePerLine: true ConstructorInitializerIndentWidth: 4 -ContinuationIndentWidth: 2 +ContinuationIndentWidth: 4 Cpp11BracedListStyle: true DerivePointerAlignment: true DisableFormat: false diff --git a/build.sh b/build.sh index 61f86945..dbbf696b 100755 --- a/build.sh +++ b/build.sh @@ -144,7 +144,6 @@ build_graphengine() CMAKE_ARGS="${CMAKE_ARGS} -DENABLE_GE_UT=ON" fi - if [[ "X$ENABLE_GE_ST" = "Xon" ]]; then CMAKE_ARGS="${CMAKE_ARGS} -DENABLE_GE_ST=ON" fi @@ -176,7 +175,7 @@ build_graphengine() TARGET="ge_compiler atc_atc.bin ge_executor_shared ${TARGET}" elif [ "X$ENABLE_GE_ST" = "Xon" ] then - TARGET="ge_graph_dsl_test graph_engine_test" + TARGET="ge_graph_dsl_test ge_running_env_test graph_engine_test" elif [ "X$ENABLE_GE_UT" = "Xon" ] then TARGET="ut_libgraph ut_libge_multiparts_utest ut_libge_others_utest ut_libge_kernel_utest ut_libge_distinct_load_utest" @@ -244,13 +243,13 @@ if [[ "X$ENABLE_GE_ST" = "Xon" ]]; then mkdir -p ${OUTPUT_PATH}/plugin/opskernel cp ${BUILD_PATH}/tests/framework/libnnengine.so ${OUTPUT_PATH}/plugin/nnengine cp ${BUILD_PATH}/engine_conf.json ${OUTPUT_PATH}/plugin/nnengine/ge_config - cp ${BUILD_PATH}/tests/framework/libhost_cpu_engine.so ${OUTPUT_PATH}/plugin/opskernel cp ${BUILD_PATH}/tests/framework/libge_local_engine.so ${OUTPUT_PATH}/plugin/opskernel - cp ${BUILD_PATH}/tests/framework/stub_engine/libfe.so ${OUTPUT_PATH}/plugin/opskernel #prepare st execution bin cp ${BUILD_PATH}/tests/st/testcase/graph_engine_test ${OUTPUT_PATH} + cp ${BUILD_PATH}/tests/framework/ge_running_env/tests/ge_running_env_test ${OUTPUT_PATH} cp ${BUILD_PATH}/tests/framework/ge_graph_dsl/tests/ge_graph_dsl_test ${OUTPUT_PATH} #execute st testcase + RUN_TEST_CASE=${OUTPUT_PATH}/ge_running_env_test && ${RUN_TEST_CASE} RUN_TEST_CASE=${OUTPUT_PATH}/graph_engine_test && ${RUN_TEST_CASE} RUN_TEST_CASE=${OUTPUT_PATH}/ge_graph_dsl_test && ${RUN_TEST_CASE} if [[ "$?" -ne 0 ]]; then diff --git a/ge/ge_runtime/task/hccl_task.cc b/ge/ge_runtime/task/hccl_task.cc index 2ffe5185..b1c7158c 100644 --- a/ge/ge_runtime/task/hccl_task.cc +++ b/ge/ge_runtime/task/hccl_task.cc @@ -16,6 +16,7 @@ #include "ge_runtime/task/hccl_task.h" #include +#include "framework/common/util.h" #include "ge_runtime/task/task_factory.h" #include "common/opskernel/ops_kernel_info_store.h" #include "common/opskernel/ge_task_info.h" diff --git a/tests/depends/slog/src/slog_stub.cc b/tests/depends/slog/src/slog_stub.cc index d0eb49c5..238a6b37 100644 --- a/tests/depends/slog/src/slog_stub.cc +++ b/tests/depends/slog/src/slog_stub.cc @@ -23,13 +23,46 @@ void dav_log(int module_id, const char *fmt, ...) {} -void DlogErrorInner(int module_id, const char *fmt, ...) { dav_log(module_id, fmt); } +static int log_level = DLOG_ERROR; + +#define __DO_PRINT() \ + do { \ + const int FMT_BUFF_SIZE = 1024; \ + char fmt_buff[FMT_BUFF_SIZE] = {0}; \ + va_list valist; \ + va_start(valist, fmt); \ + vsnprintf(fmt_buff, FMT_BUFF_SIZE, fmt, valist); \ + va_end(valist); \ + printf("%s \n", fmt_buff); \ + } while (0) + +void DlogErrorInner(int module_id, const char *fmt, ...) { + if (log_level > DLOG_ERROR) { + return; + } + __DO_PRINT(); +} -void DlogWarnInner(int module_id, const char *fmt, ...) { dav_log(module_id, fmt); } +void DlogWarnInner(int module_id, const char *fmt, ...) { + if (log_level > DLOG_WARN) { + return; + } + __DO_PRINT(); +} -void DlogInfoInner(int module_id, const char *fmt, ...) { dav_log(module_id, fmt); } +void DlogInfoInner(int module_id, const char *fmt, ...) { + if (log_level > DLOG_INFO) { + return; + } + __DO_PRINT(); +} -void DlogDebugInner(int module_id, const char *fmt, ...) { dav_log(module_id, fmt); } +void DlogDebugInner(int module_id, const char *fmt, ...) { + if (log_level > DLOG_DEBUG) { + return; + } + __DO_PRINT(); +} void DlogEventInner(int module_id, const char *fmt, ...) { dav_log(module_id, fmt); } @@ -39,30 +72,25 @@ void DlogWithKVInner(int module_id, int level, KeyValue *pst_kv_array, int kv_nu dav_log(module_id, fmt); } -int dlog_setlevel(int module_id, int level, int enable_event) { return DLOG_DEBUG; } +int dlog_setlevel(int module_id, int level, int enable_event) { + log_level = level; + return log_level; +} -int dlog_getlevel(int module_id, int *enable_event) { return DLOG_DEBUG; } +int dlog_getlevel(int module_id, int *enable_event) { return log_level; } -int CheckLogLevel(int moduleId, int logLevel) -{ - return 1; -} +int CheckLogLevel(int moduleId, int log_level_check) { return log_level >= log_level_check; } /** * @ingroup plog * @brief DlogReportInitialize: init log in service process before all device setting. * @return: 0: SUCCEED, others: FAILED */ -int DlogReportInitialize() { - return 0; -} +int DlogReportInitialize() { return 0; } /** * @ingroup plog * @brief DlogReportFinalize: release log resource in service process after all device reset. * @return: 0: SUCCEED, others: FAILED */ -int DlogReportFinalize() { - return 0; -} - +int DlogReportFinalize() { return 0; } diff --git a/tests/framework/CMakeLists.txt b/tests/framework/CMakeLists.txt index d7c806a6..8a2218b4 100644 --- a/tests/framework/CMakeLists.txt +++ b/tests/framework/CMakeLists.txt @@ -15,8 +15,8 @@ include(cmake/graphengine.cmake) add_subdirectory(easy_graph) -add_subdirectory(stub_engine) add_subdirectory(ge_graph_dsl) +add_subdirectory(ge_running_env) file(GLOB_RECURSE UTILS_SRC CONFIGURE_DEPENDS "utils/*.cc" @@ -29,4 +29,4 @@ target_include_directories(framework ) set_target_properties(framework PROPERTIES CXX_STANDARD 11) -target_link_libraries(framework PUBLIC ge_graph_dsl graphengine fe) +target_link_libraries(framework PUBLIC ge_graph_dsl ge_with_env) diff --git a/tests/framework/cmake/graphengine.cmake b/tests/framework/cmake/graphengine.cmake index 81aa00cc..3c18b560 100644 --- a/tests/framework/cmake/graphengine.cmake +++ b/tests/framework/cmake/graphengine.cmake @@ -150,7 +150,7 @@ set_target_properties(metadef_graph PROPERTIES CXX_STANDARD 11) # ---- Target : Local engine ---- -add_library(ge_local_engine SHARED ${LOCAL_ENGINE_SRC} ${METADEF_REGISTER_SRCS}) +add_library(ge_local_engine SHARED ${LOCAL_ENGINE_SRC}) target_include_directories(ge_local_engine PUBLIC @@ -169,38 +169,11 @@ target_compile_options(ge_local_engine PRIVATE target_link_libraries(ge_local_engine PUBLIC $ ${STUB_LIBS} - metadef_graph -lrt -ldl -lpthread -lgcov ) set_target_properties(ge_local_engine PROPERTIES CXX_STANDARD 11) -# ---- Target : Host engine ---- - -add_library(host_cpu_engine SHARED ${HOST_ENGINE_SRC}) - -target_include_directories(host_cpu_engine - PUBLIC - "${INCLUDE_DIRECTORIES}" - "${GE_CODE_DIR}/ge/host_cpu_engine" -) - -target_compile_definitions(host_cpu_engine PRIVATE - google=ascend_private - FMK_SUPPORT_DUMP -) - -target_compile_options(host_cpu_engine PRIVATE - -g --coverage -fprofile-arcs -ftest-coverage - -Werror=format -) - -target_link_libraries(host_cpu_engine PUBLIC - $ ${STUB_LIBS} metadef_graph -lrt -ldl -lpthread -lgcov -) - -set_target_properties(host_cpu_engine PROPERTIES CXX_STANDARD 11) - # ---- Target : engine plugin---- # @@ -273,4 +246,4 @@ target_link_libraries(graphengine PUBLIC ) set_target_properties(graphengine PROPERTIES CXX_STANDARD 11) -add_dependencies(graphengine host_cpu_engine ge_local_engine nnengine engine_conf.json optimizer_priority.pbtxt) +add_dependencies(graphengine ge_local_engine nnengine engine_conf.json optimizer_priority.pbtxt) diff --git a/tests/framework/ge_graph_dsl/include/ge_graph_dsl/op_desc/op_desc_cfg_box.h b/tests/framework/ge_graph_dsl/include/ge_graph_dsl/op_desc/op_desc_cfg_box.h index af3a1971..2be05972 100644 --- a/tests/framework/ge_graph_dsl/include/ge_graph_dsl/op_desc/op_desc_cfg_box.h +++ b/tests/framework/ge_graph_dsl/include/ge_graph_dsl/op_desc/op_desc_cfg_box.h @@ -21,6 +21,7 @@ #include "ge_graph_dsl/ge.h" #include "ge_graph_dsl/op_desc/op_box.h" #include "ge_graph_dsl/op_desc/op_desc_cfg.h" +#include "graph/ge_attr_value.h" #include "graph/op_desc.h" GE_NS_BEGIN @@ -29,19 +30,32 @@ struct OpDescCfgBox : OpBox, private OpDescCfg { OpDescCfgBox(const OpType &opType); OpDescCfgBox &InCnt(int in_cnt); OpDescCfgBox &OutCnt(int out_cnt); + OpDescCfgBox &ParentNodeIndex(int node_index); OpDescCfgBox &TensorDesc(Format format = FORMAT_NCHW, DataType data_type = DT_FLOAT, - std::vector shape = {1, 1, 224, 224}); - template - OpDescCfgBox& Attr(const std::string &name, Type value) { - auto attrvalue = ge::GeAttrValue::CreateFrom(value); - attrs_.emplace(std::make_pair(name, attrvalue)); - return *this; - } + std::vector shape = {1, 1, 224, 224}); + OpDescCfgBox &Weight(GeTensorPtr &); - private: + template + OpDescCfgBox &Attr(const std::string &name, Type &&value) { + auto attrvalue = ge::GeAttrValue::CreateFrom(std::forward(value)); + attrs_.emplace(std::make_pair(name, attrvalue)); + return *this; + } + + template + OpDescCfgBox &Attr(const std::string &name, Type &value) { + auto attrvalue = ge::GeAttrValue::CreateFrom(value); + attrs_.emplace(std::make_pair(name, attrvalue)); + return *this; + } + + OpDescCfgBox &Attr(const std::string &name, int value); + OpDescCfgBox &Attr(const std::string &name, const char *value); OpDescPtr Build(const ::EG_NS::NodeId &id) const override; - void UpdateAttrs(OpDescPtr&) const; - std::map attrs_; + + private: + void UpdateAttrs(OpDescPtr &) const; + std::map attrs_; }; #define OP_CFG(optype) ::GE_NS::OpDescCfgBox(optype) diff --git a/tests/framework/ge_graph_dsl/src/op_desc_cfg_box.cc b/tests/framework/ge_graph_dsl/src/op_desc_cfg_box.cc index fc2a6c1c..be7cd831 100644 --- a/tests/framework/ge_graph_dsl/src/op_desc_cfg_box.cc +++ b/tests/framework/ge_graph_dsl/src/op_desc_cfg_box.cc @@ -17,8 +17,8 @@ #include "ge_graph_dsl/op_desc/op_desc_cfg_box.h" #include "easy_graph/infra/status.h" #include "ge_graph_dsl/op_desc/op_desc_cfg_repo.h" -#include "ge_graph_dsl/op_desc/op_desc_cfg.h" #include "external/graph/gnode.h" +#include "graph/debug/ge_attr_define.h" #include "graph/ge_tensor.h" using ::EG_NS::Status; @@ -44,6 +44,26 @@ OpDescCfgBox &OpDescCfgBox::OutCnt(int out_cnt) { return *this; } +OpDescCfgBox &OpDescCfgBox::ParentNodeIndex(int node_index) { + this->Attr(ATTR_NAME_PARENT_NODE_INDEX, node_index); + return *this; +} + +OpDescCfgBox &OpDescCfgBox::Attr(const std::string &name, int value) { + this->Attr(name, (int64_t)value); + return *this; +} + +OpDescCfgBox &OpDescCfgBox::Attr(const std::string &name, const char *value) { + this->Attr(name, std::string(value)); + return *this; +} + +OpDescCfgBox &OpDescCfgBox::Weight(GeTensorPtr &tensor_ptr) { + this->Attr(ATTR_NAME_WEIGHTS, tensor_ptr); + return *this; +} + OpDescCfgBox &OpDescCfgBox::TensorDesc(Format format, DataType data_type, std::vector shape) { default_tensor_.format_ = format; default_tensor_.data_type_ = data_type; @@ -51,10 +71,9 @@ OpDescCfgBox &OpDescCfgBox::TensorDesc(Format format, DataType data_type, std::v return *this; } -void OpDescCfgBox::UpdateAttrs(OpDescPtr& op_desc) const { - std::for_each(attrs_.begin(), attrs_.end(), [&op_desc](const auto &attr){ - op_desc->SetAttr(attr.first, attr.second); - }); +void OpDescCfgBox::UpdateAttrs(OpDescPtr &op_desc) const { + std::for_each(attrs_.begin(), attrs_.end(), + [&op_desc](const auto &attr) { op_desc->SetAttr(attr.first, attr.second); }); } OpDescPtr OpDescCfgBox::Build(const ::EG_NS::NodeId &id) const { diff --git a/tests/framework/ge_graph_dsl/tests/op_desc_config_test.cc b/tests/framework/ge_graph_dsl/tests/op_desc_config_test.cc new file mode 100644 index 00000000..eee5d7c2 --- /dev/null +++ b/tests/framework/ge_graph_dsl/tests/op_desc_config_test.cc @@ -0,0 +1,75 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "gtest/gtest.h" +#include "framework/common/types.h" +#include "graph/debug/ge_attr_define.h" +#include "ge_graph_dsl/op_desc/op_desc_cfg_box.h" +#include "graph/ge_tensor.h" +#include "graph/utils/attr_utils.h" +GE_NS_BEGIN + +class OpDescCfgTest : public testing::Test {}; + +TEST_F(OpDescCfgTest, test_attr_set_string_success) { + auto op_ptr = OP_CFG(DATA).Attr(ENTER_ATTR_FRAME_NAME, "1").Build("data1"); + + ge::GeAttrValue ret; + op_ptr->GetAttr(ENTER_ATTR_FRAME_NAME, ret); + std::string value; + ret.GetValue(value); + + ASSERT_EQ(value, "1"); +} + +TEST_F(OpDescCfgTest, test_attr_set_int_success) { + auto op_ptr = OP_CFG(DATA).Attr(ENTER_ATTR_FRAME_NAME, 2).Build("data1"); + + ge::GeAttrValue ret; + op_ptr->GetAttr(ENTER_ATTR_FRAME_NAME, ret); + int64_t value; + ret.GetValue(value); + + ASSERT_EQ(value, 2); +} + +TEST_F(OpDescCfgTest, test_attr_set_perent_node_index_success) { + auto op_ptr = OP_CFG(DATA).ParentNodeIndex(2).Build("data1"); + + ge::GeAttrValue ret; + op_ptr->GetAttr(ATTR_NAME_PARENT_NODE_INDEX, ret); + int64_t value; + ret.GetValue(value); + + ASSERT_EQ(value, 2); +} + +TEST_F(OpDescCfgTest, test_attr_set_weight_success) { + int64_t dims_size = 1; + vector data_vec = {5}; + for_each(data_vec.begin(), data_vec.end(), [&](int64_t &data) { dims_size *= data; }); + vector data_value_vec(dims_size, 1); + GeTensorDesc data_tensor_desc(GeShape(data_vec), FORMAT_NCHW, DT_INT32); + GeTensorPtr data_tensor = std::make_shared(data_tensor_desc, (uint8_t *)data_value_vec.data(), + data_value_vec.size() * sizeof(int32_t)); + + auto op_ptr = OP_CFG(CONSTANT).Weight(data_tensor).Build("const1"); + + ConstGeTensorPtr tensor_value; + ASSERT_TRUE(AttrUtils::GetTensor(op_ptr, ge::ATTR_NAME_WEIGHTS, tensor_value)); + ASSERT_EQ(tensor_value->GetTensorDesc().GetDataType(), DT_INT32); +} + +GE_NS_END diff --git a/tests/framework/ge_graph_dsl/tests/stub/optype_stub.cc b/tests/framework/ge_graph_dsl/tests/stub/optype_stub.cc index b83d68fc..071f8c36 100644 --- a/tests/framework/ge_graph_dsl/tests/stub/optype_stub.cc +++ b/tests/framework/ge_graph_dsl/tests/stub/optype_stub.cc @@ -23,6 +23,7 @@ GE_NS_BEGIN REGISTER_OPTYPE_DEFINE(DATA, "Data"); REGISTER_OPTYPE_DEFINE(HCOMALLGATHER, "HcomAllGather"); REGISTER_OPTYPE_DEFINE(VARIABLE, "Variable"); +REGISTER_OPTYPE_DEFINE(CONSTANT, "Const"); REGISTER_OPTYPE_DEFINE(CONSTANTOP, "Constant"); REGISTER_OPTYPE_DEFINE(LESS, "Less"); REGISTER_OPTYPE_DEFINE(MUL, "Mul"); diff --git a/tests/framework/ge_running_env/CMakeLists.txt b/tests/framework/ge_running_env/CMakeLists.txt new file mode 100644 index 00000000..deac4e03 --- /dev/null +++ b/tests/framework/ge_running_env/CMakeLists.txt @@ -0,0 +1,18 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +add_subdirectory(include) +add_subdirectory(src) +add_subdirectory(tests) \ No newline at end of file diff --git a/tests/framework/ge_running_env/include/CMakeLists.txt b/tests/framework/ge_running_env/include/CMakeLists.txt new file mode 100644 index 00000000..b71b0578 --- /dev/null +++ b/tests/framework/ge_running_env/include/CMakeLists.txt @@ -0,0 +1,17 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +add_library(ge_running_env_inc INTERFACE) +target_include_directories(ge_running_env_inc INTERFACE ./) diff --git a/tests/framework/stub_engine/ops_kernel_store/op/host_op.h b/tests/framework/ge_running_env/include/ge_running_env/env_installer.h similarity index 52% rename from tests/framework/stub_engine/ops_kernel_store/op/host_op.h rename to tests/framework/ge_running_env/include/ge_running_env/env_installer.h index 464df47a..79b65137 100644 --- a/tests/framework/stub_engine/ops_kernel_store/op/host_op.h +++ b/tests/framework/ge_running_env/include/ge_running_env/env_installer.h @@ -14,23 +14,22 @@ * limitations under the License. */ -#ifndef GE_HOST_CPU_ENGINE_OPS_KERNEL_STORE_OP_HOST_OP_H_ -#define GE_HOST_CPU_ENGINE_OPS_KERNEL_STORE_OP_HOST_OP_H_ +#ifndef H1D9F4FDE_BB21_4DE4_AC7E_751920B45039 +#define H1D9F4FDE_BB21_4DE4_AC7E_751920B45039 -#include "stub_engine/ops_kernel_store/op/op.h" +#include "fake_ns.h" +#include "opskernel_manager/ops_kernel_manager.h" +#include "register/ops_kernel_builder_registry.h" -namespace ge { -namespace st { -class GE_FUNC_VISIBILITY HostOp : public Op { - public: - HostOp(const Node &node, RunContext &run_context) : Op(node, run_context) {} - ~HostOp() override = default; - HostOp &operator=(const HostOp &op) = delete; - HostOp(const HostOp &op) = delete; +FAKE_NS_BEGIN - Status Run() override; +struct EnvInstaller { + virtual void InstallTo(std::map&) const {} + virtual void InstallTo(std::map&) const {} + virtual void InstallTo(std::map&) const {} + virtual void Install() const {} }; -} // namespace st -} // namespace ge -#endif // GE_HOST_CPU_ENGINE_OPS_KERNEL_STORE_OP_HOST_OP_H_ +FAKE_NS_END + +#endif diff --git a/tests/framework/ge_running_env/include/ge_running_env/fake_engine.h b/tests/framework/ge_running_env/include/ge_running_env/fake_engine.h new file mode 100644 index 00000000..c4207223 --- /dev/null +++ b/tests/framework/ge_running_env/include/ge_running_env/fake_engine.h @@ -0,0 +1,56 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef HAF5E9BF2_752F_4E03_B0A5_E1B912A5FA24 +#define HAF5E9BF2_752F_4E03_B0A5_E1B912A5FA24 + +#include +#include "fake_ns.h" +#include "ge_running_env/env_installer.h" +#include "common/opskernel/ops_kernel_info_types.h" +#include "opskernel_manager/ops_kernel_manager.h" +#include "register/ops_kernel_builder_registry.h" +#include "fake_ops_kernel_builder.h" +#include "fake_ops_kernel_info_store.h" + +FAKE_NS_BEGIN + +using FakeOpsKernelBuilderPtr = std::shared_ptr; +using FakeOpsKernelInfoStorePtr = std::shared_ptr; + +struct FakeEngine : EnvInstaller { + FakeEngine(const std::string& engine_name); + FakeEngine& KernelBuilder(FakeOpsKernelBuilderPtr); + FakeEngine& KernelInfoStore(FakeOpsKernelInfoStorePtr); + FakeEngine& KernelInfoStore(const std::string&); + + private: + void InstallTo(std::map&) const override; + void InstallTo(std::map&) const override; + + private: + template + void InstallFor(std::map& maps, const std::map>&) const; + + private: + std::string engine_name_; + std::set info_store_names_; + std::map custom_builders_; + std::map custom_info_stores_; +}; + +FAKE_NS_END + +#endif diff --git a/tests/framework/ge_running_env/include/ge_running_env/fake_ns.h b/tests/framework/ge_running_env/include/ge_running_env/fake_ns.h new file mode 100644 index 00000000..c802e109 --- /dev/null +++ b/tests/framework/ge_running_env/include/ge_running_env/fake_ns.h @@ -0,0 +1,28 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef H7AEFF0EA_9FDE_487F_8562_2917A2D48EA2 +#define H7AEFF0EA_9FDE_487F_8562_2917A2D48EA2 + +#define FAKE_NS ge +#define FAKE_NS_BEGIN namespace FAKE_NS { +#define FAKE_NS_END } +#define USING_STUB_NS using namespace FAKE_NS; +#define FWD_DECL_STUB(type) \ + namespace FAKE_NS { \ + struct type; \ + } + +#endif diff --git a/tests/framework/ge_running_env/include/ge_running_env/fake_op.h b/tests/framework/ge_running_env/include/ge_running_env/fake_op.h new file mode 100644 index 00000000..cc442cdb --- /dev/null +++ b/tests/framework/ge_running_env/include/ge_running_env/fake_op.h @@ -0,0 +1,49 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef H737AD661_27C0_400F_8B08_29701308C5D0 +#define H737AD661_27C0_400F_8B08_29701308C5D0 + +#include +#include +#include "fake_ns.h" +#include "ge_running_env/env_installer.h" +#include "graph/operator_factory.h" + +FAKE_NS_BEGIN + +struct FakeOp : EnvInstaller { + FakeOp(const std::string& op_type); + + FakeOp& Inputs(const std::vector&); + FakeOp& Outputs(const std::vector&); + FakeOp& InferShape(InferShapeFunc); + FakeOp& InfoStoreAndBuilder(const std::string&); + + private: + void Install() const override; + void InstallTo(std::map&) const override; + + private: + const std::string op_type_; + std::vector inputs_; + std::vector outputs_; + InferShapeFunc info_fun_; + std::set info_store_names_; +}; + +FAKE_NS_END + +#endif /* H737AD661_27C0_400F_8B08_29701308C5D0 */ diff --git a/tests/framework/stub_engine/ops_kernel_store/stub_ops_kernel_builder.h b/tests/framework/ge_running_env/include/ge_running_env/fake_ops_kernel_builder.h similarity index 59% rename from tests/framework/stub_engine/ops_kernel_store/stub_ops_kernel_builder.h rename to tests/framework/ge_running_env/include/ge_running_env/fake_ops_kernel_builder.h index 62dab542..acfe5e41 100644 --- a/tests/framework/stub_engine/ops_kernel_store/stub_ops_kernel_builder.h +++ b/tests/framework/ge_running_env/include/ge_running_env/fake_ops_kernel_builder.h @@ -13,39 +13,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +#ifndef H39E4E719_91F4_4D0F_BA4F_6BA56CB1E20D +#define H39E4E719_91F4_4D0F_BA4F_6BA56CB1E20D -#ifndef GE_HOST_CPU_ENGINE_OPS_KERNEL_STORE_HOST_CPU_OPS_KERNEL_BUILDER_H_ -#define GE_HOST_CPU_ENGINE_OPS_KERNEL_STORE_HOST_CPU_OPS_KERNEL_BUILDER_H_ +#include "fake_ns.h" +#include "common/opskernel/ops_kernel_builder.h" +#include "info_store_holder.h" -#if defined(_MSC_VER) -#ifdef FUNC_VISIBILITY -#define GE_FUNC_VISIBILITY _declspec(dllexport) -#else -#define GE_FUNC_VISIBILITY -#endif -#else -#ifdef FUNC_VISIBILITY -#define GE_FUNC_VISIBILITY __attribute__((visibility("default"))) -#else -#define GE_FUNC_VISIBILITY -#endif -#endif +FAKE_NS_BEGIN -#include "common/opskernel/ops_kernel_builder.h" +struct FakeOpsKernelBuilder : OpsKernelBuilder, InfoStoreHolder { + FakeOpsKernelBuilder(const std::string &kernel_lib_name); + FakeOpsKernelBuilder(); -namespace ge { -namespace st { -class GE_FUNC_VISIBILITY StubOpsKernelBuilder : public OpsKernelBuilder { - public: + private: Status Initialize(const map &options) override; - Status Finalize() override; - Status CalcOpRunningParam(Node &node) override; - Status GenerateTask(const Node &node, RunContext &context, std::vector &tasks) override; }; -} // namespace st -} // namespace ge -#endif // GE_HOST_CPU_ENGINE_OPS_KERNEL_STORE_HOST_CPU_OPS_KERNEL_BUILDER_H_ +FAKE_NS_END + +#endif diff --git a/tests/framework/ge_running_env/include/ge_running_env/fake_ops_kernel_info_store.h b/tests/framework/ge_running_env/include/ge_running_env/fake_ops_kernel_info_store.h new file mode 100644 index 00000000..4a8ab9dc --- /dev/null +++ b/tests/framework/ge_running_env/include/ge_running_env/fake_ops_kernel_info_store.h @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef H1EBABA85_7056_48F0_B496_E4DB68E5FED3 +#define H1EBABA85_7056_48F0_B496_E4DB68E5FED3 + +#include "fake_ns.h" +#include "common/opskernel/ops_kernel_info_store.h" +#include "ge/ge_api_types.h" +#include "info_store_holder.h" + +FAKE_NS_BEGIN + +struct FakeOpsKernelInfoStore : OpsKernelInfoStore, InfoStoreHolder { + FakeOpsKernelInfoStore(const std::string &kernel_lib_name); + FakeOpsKernelInfoStore(); + + private: + Status Initialize(const std::map &options) override; + Status Finalize() override; + bool CheckSupported(const OpDescPtr &op_desc, std::string &reason) const override; + void GetAllOpsKernelInfo(std::map &infos) const override; +}; + +FAKE_NS_END + +#endif diff --git a/tests/framework/ge_running_env/include/ge_running_env/ge_running_env_faker.h b/tests/framework/ge_running_env/include/ge_running_env/ge_running_env_faker.h new file mode 100644 index 00000000..6d325c6a --- /dev/null +++ b/tests/framework/ge_running_env/include/ge_running_env/ge_running_env_faker.h @@ -0,0 +1,45 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef H99C11FC4_700E_4D4D_B073_7808FA88BEBC +#define H99C11FC4_700E_4D4D_B073_7808FA88BEBC + +#include "ge_running_env/fake_engine.h" +#include "fake_ns.h" +#include "opskernel_manager/ops_kernel_manager.h" +#include "register/ops_kernel_builder_registry.h" + +FAKE_NS_BEGIN + +struct GeRunningEnvFaker { + GeRunningEnvFaker(); + GeRunningEnvFaker &Reset(); + GeRunningEnvFaker &Install(const EnvInstaller &); + GeRunningEnvFaker &InstallDefault(); + static void BackupEnv(); + + private: + void flush(); + + private: + std::map> &op_kernel_info_; + std::map &ops_kernel_info_stores_; + std::map &ops_kernel_optimizers_; + std::map &ops_kernel_builders_; +}; + +FAKE_NS_END + +#endif /* H99C11FC4_700E_4D4D_B073_7808FA88BEBC */ diff --git a/tests/framework/stub_engine/ops_kernel_store/op/op.h b/tests/framework/ge_running_env/include/ge_running_env/info_store_holder.h similarity index 51% rename from tests/framework/stub_engine/ops_kernel_store/op/op.h rename to tests/framework/ge_running_env/include/ge_running_env/info_store_holder.h index 3741567a..85b6c75f 100644 --- a/tests/framework/stub_engine/ops_kernel_store/op/op.h +++ b/tests/framework/ge_running_env/include/ge_running_env/info_store_holder.h @@ -13,33 +13,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +#ifndef H7992249B_058D_40A1_94EA_52BBCB76434E +#define H7992249B_058D_40A1_94EA_52BBCB76434E -#ifndef GE_HOST_CPU_ENGINE_OPS_KERNEL_STORE_OP_OP_H_ -#define GE_HOST_CPU_ENGINE_OPS_KERNEL_STORE_OP_OP_H_ - -#include -#include -#include -#include "common/ge_inner_error_codes.h" +#include "fake_ns.h" #include "common/opskernel/ops_kernel_info_types.h" -#include "graph/node.h" -namespace ge { -namespace st { -/** - * The base class for all op. - */ -class GE_FUNC_VISIBILITY Op { - public: - Op(const Node &node, RunContext &run_context) : run_context_(run_context), node_(node) {} - virtual ~Op() = default; - virtual Status Run() = 0; +FAKE_NS_BEGIN + +struct InfoStoreHolder { + InfoStoreHolder(); + InfoStoreHolder(const std::string&); + void EngineName(std::string engine_name); + void RegistOp(std::string op_type); + std::string GetLibName(); protected: - const RunContext &run_context_; - const Node &node_; + std::map op_info_map_; + std::string kernel_lib_name_; + std::string engine_name_; }; -} // namespace st -} // namespace ge -#endif // GE_HOST_CPU_ENGINE_OPS_KERNEL_STORE_OP_OP_H_ +FAKE_NS_END + +#endif diff --git a/tests/framework/ge_running_env/src/CMakeLists.txt b/tests/framework/ge_running_env/src/CMakeLists.txt new file mode 100644 index 00000000..ae068bd3 --- /dev/null +++ b/tests/framework/ge_running_env/src/CMakeLists.txt @@ -0,0 +1,45 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +file(GLOB_RECURSE SOURCES CONFIGURE_DEPENDS "*.cc" "*.CC" "*.cpp" "*.CPP" "*.c++") + +# ---- Target : stub Host engine ---- +add_library(ge_with_env STATIC ${SOURCES}) + +target_include_directories(ge_with_env + PUBLIC + include + ) + +target_include_directories(ge_with_env + PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR} + ) + +target_compile_definitions(ge_with_env PRIVATE + google=ascend_private + FMK_SUPPORT_DUMP + ) + +target_compile_options(ge_with_env PRIVATE + -g --coverage -fprofile-arcs -ftest-coverage + -Werror=format + ) + +target_link_libraries(ge_with_env PUBLIC + $ ge_running_env_inc graphengine -lrt -ldl -lpthread -lgcov + ) + +set_target_properties(ge_with_env PROPERTIES CXX_STANDARD 17) diff --git a/tests/framework/ge_running_env/src/engine/fake_engine.cc b/tests/framework/ge_running_env/src/engine/fake_engine.cc new file mode 100644 index 00000000..4b8fedbc --- /dev/null +++ b/tests/framework/ge_running_env/src/engine/fake_engine.cc @@ -0,0 +1,81 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ge_running_env/fake_engine.h" +#include "ge_running_env/fake_ops_kernel_builder.h" +#include "ge_running_env/fake_ops_kernel_info_store.h" +#include "opskernel_manager/ops_kernel_manager.h" + +FAKE_NS_BEGIN + +FakeEngine::FakeEngine(const std::string &engine_name) : engine_name_(engine_name) {} + +FakeEngine &FakeEngine::KernelInfoStore(const std::string &info_store) { + info_store_names_.insert(info_store); + return *this; +} + +FakeEngine &FakeEngine::KernelInfoStore(FakeOpsKernelInfoStorePtr ptr) { + info_store_names_.insert(ptr->GetLibName()); + custom_info_stores_.insert(std::make_pair(ptr->GetLibName(), ptr)); + return *this; +} + +FakeEngine &FakeEngine::KernelBuilder(FakeOpsKernelBuilderPtr builder) { + info_store_names_.insert(builder->GetLibName()); + custom_builders_.insert(std::make_pair(builder->GetLibName(), builder)); + return *this; +} + +namespace { +template +void InstallDefault(std::map &maps, const std::string &info_store_name, + const std::string &engine_name) { + auto parent_obj = std::make_shared(info_store_name); + if (parent_obj == nullptr) { + return; + } + parent_obj->EngineName(engine_name); + maps.insert(std::make_pair(parent_obj->GetLibName(), parent_obj)); +} +} // namespace + +template +void FakeEngine::InstallFor(std::map &maps, + const std::map> &child_maps) const { + if (info_store_names_.empty()) { + InstallDefault(maps, engine_name_, engine_name_); + } else { + for (auto &info_store_name : info_store_names_) { + auto iter = child_maps.find(info_store_name); + if (iter == child_maps.end()) { + InstallDefault(maps, info_store_name, engine_name_); + } else { + maps.insert(std::make_pair(iter->second->GetLibName(), iter->second)); + } + } + } +} + +void FakeEngine::InstallTo(std::map &ops_kernel_info_stores) const { + InstallFor(ops_kernel_info_stores, custom_info_stores_); +} + +void FakeEngine::InstallTo(std::map &ops_kernel_builders) const { + InstallFor(ops_kernel_builders, custom_builders_); +} + +FAKE_NS_END diff --git a/tests/framework/stub_engine/ops_kernel_store/stub_ops_kernel_builder.cc b/tests/framework/ge_running_env/src/engine/fake_ops_kernel_builder.cc similarity index 73% rename from tests/framework/stub_engine/ops_kernel_store/stub_ops_kernel_builder.cc rename to tests/framework/ge_running_env/src/engine/fake_ops_kernel_builder.cc index 2de8691f..77472249 100644 --- a/tests/framework/stub_engine/ops_kernel_store/stub_ops_kernel_builder.cc +++ b/tests/framework/ge_running_env/src/engine/fake_ops_kernel_builder.cc @@ -14,40 +14,25 @@ * limitations under the License. */ -#include "stub_ops_kernel_builder.h" -#include +#include "ge_running_env/fake_ops_kernel_builder.h" +#include "graph/utils/node_utils.h" #include "common/ge_inner_error_codes.h" #include "ge/ge_api_types.h" #include "graph/utils/node_utils.h" #include "graph/utils/tensor_utils.h" #include "graph/utils/type_utils.h" -#include #include "framework/common/debug/ge_log.h" -#include "host_cpu_engine/common/constant/constant.h" -#include "register/ops_kernel_builder_registry.h" -#include "inc/st_types.h" +FAKE_NS_BEGIN -namespace ge { -namespace st { -REGISTER_OPS_KERNEL_BUILDER(kAicoreLibName, StubOpsKernelBuilder); -REGISTER_OPS_KERNEL_BUILDER(kVectorLibName, StubOpsKernelBuilder); -REGISTER_OPS_KERNEL_BUILDER(kAicpuLibName, StubOpsKernelBuilder); -REGISTER_OPS_KERNEL_BUILDER(kAicpuAscendLibName, StubOpsKernelBuilder); -REGISTER_OPS_KERNEL_BUILDER(kHcclLibName, StubOpsKernelBuilder); -REGISTER_OPS_KERNEL_BUILDER(kRTSLibName, StubOpsKernelBuilder); +FakeOpsKernelBuilder::FakeOpsKernelBuilder(const std::string &info_store_name) : InfoStoreHolder(info_store_name) {} +FakeOpsKernelBuilder::FakeOpsKernelBuilder() : InfoStoreHolder() {} -Status StubOpsKernelBuilder::Finalize() { - return SUCCESS; -} -Status StubOpsKernelBuilder::Initialize(const map &options) { - return SUCCESS; -} +Status FakeOpsKernelBuilder::Finalize() { return SUCCESS; } +Status FakeOpsKernelBuilder::Initialize(const map &options) { return SUCCESS; } -Status StubOpsKernelBuilder::CalcOpRunningParam(Node &ge_node) { +Status FakeOpsKernelBuilder::CalcOpRunningParam(Node &ge_node) { OpDescPtr op_desc = ge_node.GetOpDesc(); if (op_desc == nullptr) { - GELOGE(FAILED, "[Get][OpDesc]CalcOpRunningParam failed, as op desc is null"); - REPORT_INNER_ERROR("E19999", "GetOpDesc failed."); return FAILED; } @@ -86,9 +71,9 @@ Status StubOpsKernelBuilder::CalcOpRunningParam(Node &ge_node) { name.c_str(), type.c_str(), i, output_mem_size, TypeUtils::FormatToSerialString(format).c_str(), TypeUtils::DataTypeToSerialString(data_type).c_str()); REPORT_CALL_ERROR( - "E19999", "CalcTensorMemSize failed for op[%s:%s] out[%zu] mem size, mem_size=%ld, format=%s, data_type=%s.", - name.c_str(), type.c_str(), i, output_mem_size, TypeUtils::FormatToSerialString(format).c_str(), - TypeUtils::DataTypeToSerialString(data_type).c_str()); + "E19999", "CalcTensorMemSize failed for op[%s:%s] out[%zu] mem size, mem_size=%ld, format=%s, data_type=%s.", + name.c_str(), type.c_str(), i, output_mem_size, TypeUtils::FormatToSerialString(format).c_str(), + TypeUtils::DataTypeToSerialString(data_type).c_str()); return FAILED; } GELOGI("Calc op[%s:%s] out[%zu] mem size is %ld, format=%s, data_type=%s.", name.c_str(), type.c_str(), i, @@ -111,9 +96,9 @@ Status StubOpsKernelBuilder::CalcOpRunningParam(Node &ge_node) { return SUCCESS; } -Status StubOpsKernelBuilder::GenerateTask(const Node &node, RunContext &context, vector &tasks) { +Status FakeOpsKernelBuilder::GenerateTask(const Node &node, RunContext &context, vector &tasks) { // no need to generate device task return SUCCESS; } -} // namespace st -} // namespace ge \ No newline at end of file + +FAKE_NS_END diff --git a/tests/framework/ge_running_env/src/engine/fake_ops_kernel_info_store.cc b/tests/framework/ge_running_env/src/engine/fake_ops_kernel_info_store.cc new file mode 100644 index 00000000..348e1334 --- /dev/null +++ b/tests/framework/ge_running_env/src/engine/fake_ops_kernel_info_store.cc @@ -0,0 +1,42 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "external/ge/ge_api_error_codes.h" +#include "ge_running_env/fake_ops_kernel_info_store.h" + +FAKE_NS_BEGIN + +FakeOpsKernelInfoStore::FakeOpsKernelInfoStore(const std::string &info_store_name) : InfoStoreHolder(info_store_name) {} + +FakeOpsKernelInfoStore::FakeOpsKernelInfoStore() : InfoStoreHolder() {} + +Status FakeOpsKernelInfoStore::Finalize() { + op_info_map_.clear(); + return SUCCESS; +} + +Status FakeOpsKernelInfoStore::Initialize(const std::map &options) { return SUCCESS; } + +void FakeOpsKernelInfoStore::GetAllOpsKernelInfo(map &infos) const { infos = op_info_map_; } + +bool FakeOpsKernelInfoStore::CheckSupported(const OpDescPtr &op_desc, std::string &) const { + if (op_desc == nullptr) { + return false; + } + return op_info_map_.count(op_desc->GetType()) > 0; +} + +FAKE_NS_END diff --git a/tests/framework/ge_running_env/src/engine/info_store_holder.cc b/tests/framework/ge_running_env/src/engine/info_store_holder.cc new file mode 100644 index 00000000..231af93a --- /dev/null +++ b/tests/framework/ge_running_env/src/engine/info_store_holder.cc @@ -0,0 +1,49 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ge_running_env/info_store_holder.h" +FAKE_NS_BEGIN + +namespace { +std::string GenStoreName() { + static int store_id = 0; + return "store_" + std::to_string(store_id++); +} +} // namespace + +InfoStoreHolder::InfoStoreHolder(const std::string& kernel_lib_name) : kernel_lib_name_(kernel_lib_name) {} + +InfoStoreHolder::InfoStoreHolder() : kernel_lib_name_(GenStoreName()) {} + +void InfoStoreHolder::RegistOp(std::string op_type) { + OpInfo default_op_info = {.engine = engine_name_, + .opKernelLib = kernel_lib_name_, + .computeCost = 0, + .flagPartial = false, + .flagAsync = false, + .isAtomic = false}; + + auto iter = op_info_map_.find(op_type); + if (iter == op_info_map_.end()) { + op_info_map_.emplace(op_type, default_op_info); + } +} + +void InfoStoreHolder::EngineName(std::string engine_name) { engine_name_ = engine_name; } + +std::string InfoStoreHolder::GetLibName() { return kernel_lib_name_; } + +FAKE_NS_END diff --git a/tests/framework/ge_running_env/src/env/ge_default_running_env.cc b/tests/framework/ge_running_env/src/env/ge_default_running_env.cc new file mode 100644 index 00000000..ab705f55 --- /dev/null +++ b/tests/framework/ge_running_env/src/env/ge_default_running_env.cc @@ -0,0 +1,56 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ge_default_running_env.h" +#include "ge_running_env/ge_running_env_faker.h" +#include "ge_running_env/fake_op.h" + +FAKE_NS_BEGIN +namespace { +std::vector default_engines = {FakeEngine("AIcoreEngine").KernelInfoStore("AiCoreLib"), + FakeEngine("VectorEngine").KernelInfoStore("VectorLib"), + FakeEngine("DNN_VM_AICPU").KernelInfoStore("AicpuLib"), + FakeEngine("DNN_VM_AICPU_ASCEND").KernelInfoStore("AicpuAscendLib"), + FakeEngine("DNN_HCCL").KernelInfoStore("HcclLib"), + FakeEngine("DNN_VM_RTS").KernelInfoStore("RTSLib")}; + +std::vector fake_ops = { + FakeOp(ENTER).InfoStoreAndBuilder("RTSLib"), FakeOp(MERGE).InfoStoreAndBuilder("RTSLib"), + FakeOp(SWITCH).InfoStoreAndBuilder("RTSLib"), FakeOp(LOOPCOND).InfoStoreAndBuilder("RTSLib"), + FakeOp(STREAMMERGE).InfoStoreAndBuilder("RTSLib"), FakeOp(STREAMSWITCH).InfoStoreAndBuilder("RTSLib"), + FakeOp(STREAMACTIVE).InfoStoreAndBuilder("RTSLib"), FakeOp(EXIT).InfoStoreAndBuilder("RTSLib"), + + FakeOp(LESS).InfoStoreAndBuilder("AiCoreLib"), FakeOp(NEXTITERATION).InfoStoreAndBuilder("AiCoreLib"), + FakeOp(CAST).InfoStoreAndBuilder("AiCoreLib"), FakeOp(TRANSDATA).InfoStoreAndBuilder("AiCoreLib"), + FakeOp(NOOP).InfoStoreAndBuilder("AiCoreLib"), FakeOp(VARIABLE).InfoStoreAndBuilder("AiCoreLib"), + FakeOp(CONSTANT).InfoStoreAndBuilder("AiCoreLib"), FakeOp(ASSIGN).InfoStoreAndBuilder("AiCoreLib"), + FakeOp(ADD).InfoStoreAndBuilder("AiCoreLib"), FakeOp(MUL).InfoStoreAndBuilder("AiCoreLib"), + FakeOp(DATA).InfoStoreAndBuilder("AiCoreLib"), FakeOp(NETOUTPUT).InfoStoreAndBuilder("AiCoreLib"), + +}; +} // namespace + +void GeDefaultRunningEnv::InstallTo(GeRunningEnvFaker& ge_env) { + for (auto& fake_engine : default_engines) { + ge_env.Install(fake_engine); + } + + for (auto& fake_op : fake_ops) { + ge_env.Install(fake_op); + } +} + +FAKE_NS_END \ No newline at end of file diff --git a/tests/framework/ge_running_env/src/env/ge_default_running_env.h b/tests/framework/ge_running_env/src/env/ge_default_running_env.h new file mode 100644 index 00000000..b93c528a --- /dev/null +++ b/tests/framework/ge_running_env/src/env/ge_default_running_env.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_5D044B8760CB41ABA108AE2E37E8EBDE +#define INC_5D044B8760CB41ABA108AE2E37E8EBDE + +#include "ge_running_env/fake_ns.h" + +FAKE_NS_BEGIN + +struct GeRunningEnvFaker; + +struct GeDefaultRunningEnv { + static void InstallTo(GeRunningEnvFaker&); +}; + +FAKE_NS_END + +#endif \ No newline at end of file diff --git a/tests/framework/ge_running_env/src/env/ge_running_env_faker.cc b/tests/framework/ge_running_env/src/env/ge_running_env_faker.cc new file mode 100644 index 00000000..2977f6b2 --- /dev/null +++ b/tests/framework/ge_running_env/src/env/ge_running_env_faker.cc @@ -0,0 +1,109 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include "external/ge/ge_api.h" +#include "opskernel_manager/ops_kernel_builder_manager.h" +#include "init/gelib.h" +#include "utility" +#include "ge_running_env/ge_running_env_faker.h" +#include "ge_default_running_env.h" +#include "ge_running_env/env_installer.h" +#include "op/fake_op_repo.h" + +FAKE_NS_BEGIN + +namespace { +OpsKernelManager& getKernelManger() { + std::shared_ptr instancePtr = ge::GELib::GetInstance(); + return instancePtr->OpsKernelManagerObj(); +} + +struct InitEnv { + static InitEnv& GetInstance() { + static InitEnv instance; + return instance; + } + + void reset(std::map& ops_kernel_info_stores, + std::map& builders) { + std::set remove_info_names; + for (auto iter : ops_kernel_info_stores) { + if (kernel_info_names.find(iter.first) == kernel_info_names.end()) { + remove_info_names.insert(iter.first); + } + } + for (auto info_name : remove_info_names) { + ops_kernel_info_stores.erase(info_name); + builders.erase(info_name); + } + } + + private: + InitEnv() { + for (auto iter : getKernelManger().GetAllOpsKernelInfoStores()) { + kernel_info_names.insert(iter.first); + } + } + + private: + std::set kernel_info_names; +}; +} // namespace + +GeRunningEnvFaker::GeRunningEnvFaker() + : op_kernel_info_(const_cast>&>(getKernelManger().GetAllOpsKernelInfo())), + ops_kernel_info_stores_( + const_cast&>(getKernelManger().GetAllOpsKernelInfoStores())), + ops_kernel_optimizers_( + const_cast&>(getKernelManger().GetAllGraphOptimizerObjs())), + ops_kernel_builders_(const_cast&>( + OpsKernelBuilderManager::Instance().GetAllOpsKernelBuilders())) { + Reset(); +} + +GeRunningEnvFaker& GeRunningEnvFaker::Reset() { + InitEnv& init_env = InitEnv::GetInstance(); + FakeOpRepo::Reset(); + init_env.reset(ops_kernel_info_stores_, ops_kernel_builders_); + flush(); + return *this; +} + +void GeRunningEnvFaker::BackupEnv() { InitEnv::GetInstance(); } + +GeRunningEnvFaker& GeRunningEnvFaker::Install(const EnvInstaller& installer) { + installer.Install(); + installer.InstallTo(ops_kernel_info_stores_); + installer.InstallTo(ops_kernel_optimizers_); + installer.InstallTo(ops_kernel_builders_); + flush(); + return *this; +} + +void GeRunningEnvFaker::flush() { + op_kernel_info_.clear(); + getKernelManger().GetOpsKernelInfo(""); +} + +GeRunningEnvFaker& GeRunningEnvFaker::InstallDefault() { + Reset(); + GeDefaultRunningEnv::InstallTo(*this); + return *this; +} + +FAKE_NS_END diff --git a/tests/framework/ge_running_env/src/op/fake_op.cc b/tests/framework/ge_running_env/src/op/fake_op.cc new file mode 100644 index 00000000..52bbee8d --- /dev/null +++ b/tests/framework/ge_running_env/src/op/fake_op.cc @@ -0,0 +1,95 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ge_running_env/fake_op.h" +#include "fake_op_repo.h" +#include "ge_running_env/info_store_holder.h" +#include "graph/operator_factory.h" + +FAKE_NS_BEGIN + +FakeOp::FakeOp(const std::string& op_type) : op_type_(op_type) {} + +FakeOp& FakeOp::Inputs(const std::vector& inputs) { + inputs_ = inputs; + return *this; +} + +FakeOp& FakeOp::Outputs(const std::vector& outputs) { + outputs_ = outputs; + return *this; +} + +FakeOp& FakeOp::InferShape(InferShapeFunc infer_fun) { + info_fun_ = infer_fun; + return *this; +} + +FakeOp& FakeOp::InfoStoreAndBuilder(const std::string& name) { + info_store_names_.insert(name); + return *this; +} + +namespace { + +void RegistOpToInfoStore(OpsKernelInfoStorePtr& info_store, const std::string& op_type) { + if (info_store == nullptr) { + return; + } + auto holder = dynamic_cast(info_store.get()); + holder->RegistOp(op_type); +} + +struct FakeOperator : Operator { + FakeOperator(const std::string& op_type) : Operator(op_type) {} + + FakeOperator& RegistInputs(const std::vector& inputs) { + for (auto& input : inputs) { + Operator::InputRegister(input); + } + return *this; + } + + FakeOperator& RegistOutputs(const std::vector& outputs) { + for (auto& output : outputs) { + Operator::OutputRegister(output); + } + return *this; + } +}; +} // namespace + +void FakeOp::InstallTo(std::map& info_stores) const { + std::for_each(info_store_names_.begin(), info_store_names_.end(), [=, &info_stores](auto& info_store_name) { + auto iter = info_stores.find(info_store_name); + if (iter != info_stores.end()) { + RegistOpToInfoStore(iter->second, op_type_); + } + }); +} + +void FakeOp::Install() const { + FakeOpRepo::Regist( + op_type_, + [op_type = this->op_type_, inputs = this->inputs_, outputs = this->outputs_](const std::string&) -> Operator { + return FakeOperator(op_type).RegistInputs(inputs).RegistOutputs(outputs); + }); + if (info_fun_) { + FakeOpRepo::Regist(op_type_, info_fun_); + } +} + +FAKE_NS_END diff --git a/tests/framework/ge_running_env/src/op/fake_op_repo.cc b/tests/framework/ge_running_env/src/op/fake_op_repo.cc new file mode 100644 index 00000000..7d571b8b --- /dev/null +++ b/tests/framework/ge_running_env/src/op/fake_op_repo.cc @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "graph/operator_factory_impl.h" +#include "ge_running_env/fake_op.h" +#include "fake_op_repo.h" + +FAKE_NS_BEGIN + +void FakeOpRepo::Reset() { + if (OperatorFactoryImpl::operator_creators_) { + OperatorFactoryImpl::operator_creators_->clear(); + } + if (OperatorFactoryImpl::operator_infershape_funcs_) { + OperatorFactoryImpl::operator_infershape_funcs_->clear(); + } +} + +void FakeOpRepo::Regist(const std::string &operator_type, const OpCreator creator) { + OperatorFactoryImpl::RegisterOperatorCreator(operator_type, creator); +} +void FakeOpRepo::Regist(const std::string &operator_type, const InferShapeFunc infer_fun) { + OperatorFactoryImpl::RegisterInferShapeFunc(operator_type, infer_fun); +} + +FAKE_NS_END \ No newline at end of file diff --git a/tests/framework/ge_running_env/src/op/fake_op_repo.h b/tests/framework/ge_running_env/src/op/fake_op_repo.h new file mode 100644 index 00000000..345515e4 --- /dev/null +++ b/tests/framework/ge_running_env/src/op/fake_op_repo.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef DBF6CE7CD4AC4A83BA4ED4B372FC66E4 +#define DBF6CE7CD4AC4A83BA4ED4B372FC66E4 + +#include "ge_running_env/fake_ns.h" +#include "graph/operator_factory.h" + +FAKE_NS_BEGIN + +struct FakeOpRepo { + static void Reset(); + static void Regist(const std::string &operator_type, const OpCreator); + static void Regist(const std::string &operator_type, const InferShapeFunc); +}; + +FAKE_NS_END +#endif \ No newline at end of file diff --git a/tests/framework/ge_running_env/tests/CMakeLists.txt b/tests/framework/ge_running_env/tests/CMakeLists.txt new file mode 100644 index 00000000..67a9bd70 --- /dev/null +++ b/tests/framework/ge_running_env/tests/CMakeLists.txt @@ -0,0 +1,33 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +file(GLOB_RECURSE SOURCES CONFIGURE_DEPENDS "*.cc" "*.CC" "*.cpp" "*.CPP") + +add_executable(ge_running_env_test ${SOURCES}) + +target_include_directories(ge_running_env_test + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} +) + +target_compile_options(ge_running_env_test PRIVATE + -g +) +set_target_properties(ge_running_env_test PROPERTIES CXX_STANDARD 17) + +target_link_libraries(ge_running_env_test PUBLIC gtest ge_with_env) + +include(CTest) +enable_testing() +add_test(NAME test COMMAND ge_running_env_test) \ No newline at end of file diff --git a/tests/framework/ge_running_env/tests/test_ge_running_env_faker.cc b/tests/framework/ge_running_env/tests/test_ge_running_env_faker.cc new file mode 100644 index 00000000..4429f4a7 --- /dev/null +++ b/tests/framework/ge_running_env/tests/test_ge_running_env_faker.cc @@ -0,0 +1,148 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "graph/operator_factory_impl.h" +#include "init/gelib.h" +#include "external/ge/ge_api.h" +#include "opskernel_manager/ops_kernel_builder_manager.h" +#include "ge_running_env/fake_ops_kernel_builder.h" +#include "ge_running_env/fake_ns.h" +#include "ge_running_env/ge_running_env_faker.h" +#include "ge_running_env/fake_op.h" +FAKE_NS_BEGIN + +#define ASSERT_OPS_LIST_SIZE(list_size) \ + std::vector ops_list; \ + OperatorFactory::GetOpsTypeList(ops_list);\ + ASSERT_EQ(ops_list.size(), list_size); + +class GeRunningEvnFakerTest : public testing::Test { + protected: + void SetUp() {} + OpsKernelManager &kernel_manager = ge::GELib::GetInstance()->OpsKernelManagerObj(); + OpsKernelBuilderManager &builder_manager = OpsKernelBuilderManager::Instance(); +}; + +TEST_F(GeRunningEvnFakerTest, test_reset_running_env_is_success) { + GeRunningEnvFaker ge_env; + ge_env.Reset(); + ASSERT_OPS_LIST_SIZE(0); + ASSERT_EQ(kernel_manager.GetAllOpsKernelInfoStores().size(), 1); + ASSERT_EQ(builder_manager.GetAllOpsKernelBuilders().size(), 1); + ASSERT_EQ(kernel_manager.GetAllOpsKernelInfo().size(), 52); + ASSERT_EQ(kernel_manager.GetOpsKernelInfo(SWITCH).size(), 1); +} + +TEST_F(GeRunningEvnFakerTest, test_install_fake_op_success) { + GeRunningEnvFaker ge_env; + ge_env.Install(FakeOp(DATA)).Install(FakeOp(SWITCH)); + ASSERT_OPS_LIST_SIZE(2); + ASSERT_TRUE(OperatorFactory::IsExistOp(DATA)); + ASSERT_TRUE(OperatorFactory::IsExistOp(SWITCH)); +} + +TEST_F(GeRunningEvnFakerTest, test_install_fake_op_with_inputs_and_outputs_success) { + GeRunningEnvFaker ge_env; + ge_env.Install(FakeOp(ADD).Inputs({"x1", "x2"}).Outputs({"y"})); + + auto add1 = OperatorFactory::CreateOperator("add1", ADD); + + ASSERT_EQ(add1.GetInputsSize(), 2); + ASSERT_EQ(add1.GetOutputsSize(), 1); + ASSERT_OPS_LIST_SIZE(1); +} + +TEST_F(GeRunningEvnFakerTest, test_install_fake_op_with_infer_shape_success) { + GeRunningEnvFaker ge_env; + auto infer_fun = [](Operator &op) -> graphStatus { + TensorDesc input_desc = op.GetInputDescByName("data"); + return GRAPH_SUCCESS; + }; + ASSERT_TRUE(OperatorFactoryImpl::GetInferShapeFunc(DATA) == nullptr); + + ge_env.Install(FakeOp(DATA).Inputs({"data"}).InferShape(infer_fun)); + + ASSERT_TRUE(OperatorFactoryImpl::GetInferShapeFunc(DATA) != nullptr); +} + +TEST_F(GeRunningEvnFakerTest, test_install_engine_with_default_info_store) { + GeRunningEnvFaker ge_env; + ge_env.Install(FakeEngine("DNN_HCCL")); + + ASSERT_EQ(kernel_manager.GetAllOpsKernelInfoStores().size(), 2); + ASSERT_EQ(builder_manager.GetAllOpsKernelBuilders().size(), 2); + ASSERT_EQ(kernel_manager.GetAllOpsKernelInfo().size(), 52); + ASSERT_EQ(kernel_manager.GetOpsKernelInfo(SWITCH).size(), 1); +} + +TEST_F(GeRunningEvnFakerTest, test_install_engine_with_info_store_name) { + GeRunningEnvFaker ge_env; + ge_env.Install(FakeEngine("DNN_HCCL").KernelInfoStore("AiCoreLib2")) + .Install(FakeOp(SWITCH).InfoStoreAndBuilder("AiCoreLib2")); + + ASSERT_EQ(kernel_manager.GetAllOpsKernelInfoStores().size(), 2); + ASSERT_EQ(builder_manager.GetAllOpsKernelBuilders().size(), 2); + ASSERT_EQ(kernel_manager.GetAllOpsKernelInfo().size(), 52); + ASSERT_EQ(kernel_manager.GetOpsKernelInfo(SWITCH).size(), 2); +} + +TEST_F(GeRunningEvnFakerTest, test_install_custom_kernel_builder_success) { + struct FakeKernelBuilder : FakeOpsKernelBuilder { + Status CalcOpRunningParam(Node &node) override { + OpDescPtr op_desc = node.GetOpDesc(); + if (op_desc == nullptr) { + return FAILED; + } + return SUCCESS; + } + }; + + GeRunningEnvFaker ge_env; + auto ai_core_kernel = FakeEngine("DNN_HCCL").KernelBuilder(std::make_shared()); + ge_env.Reset().Install(ai_core_kernel); + + ASSERT_EQ(kernel_manager.GetAllOpsKernelInfoStores().size(), 2); + ASSERT_EQ(builder_manager.GetAllOpsKernelBuilders().size(), 2); + ASSERT_EQ(kernel_manager.GetAllOpsKernelInfo().size(), 52); +} + +TEST_F(GeRunningEvnFakerTest, test_install_custom_kernel_info_store_success) { + struct FakeKernelBuilder : FakeOpsKernelInfoStore { + FakeKernelBuilder(const std::string &kernel_lib_name) : FakeOpsKernelInfoStore(kernel_lib_name) {} + + bool CheckSupported(const OpDescPtr &op_desc, std::string &reason) const override { return FAILED; } + }; + + GeRunningEnvFaker ge_env; + auto ai_core_kernel = FakeEngine("DNN_HCCL").KernelInfoStore(std::make_shared("AiCoreLib2")); + ge_env.Reset().Install(ai_core_kernel); + + ASSERT_EQ(kernel_manager.GetAllOpsKernelInfoStores().size(), 2); + ASSERT_EQ(builder_manager.GetAllOpsKernelBuilders().size(), 2); + ASSERT_EQ(kernel_manager.GetAllOpsKernelInfo().size(), 52); +} + +TEST_F(GeRunningEvnFakerTest, test_install_default_fake_engine_success) { + GeRunningEnvFaker ge_env; + ge_env.InstallDefault(); + + ASSERT_EQ(kernel_manager.GetAllOpsKernelInfoStores().size(), 7); + ASSERT_EQ(builder_manager.GetAllOpsKernelBuilders().size(), 7); + ASSERT_EQ(kernel_manager.GetAllOpsKernelInfo().size(), 66); +} + +FAKE_NS_END diff --git a/tests/framework/ge_running_env/tests/test_main.cc b/tests/framework/ge_running_env/tests/test_main.cc new file mode 100644 index 00000000..ede79c75 --- /dev/null +++ b/tests/framework/ge_running_env/tests/test_main.cc @@ -0,0 +1,34 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "common/debug/log.h" +#include "external/ge/ge_api.h" +#include "ge_running_env/ge_running_env_faker.h" + +using namespace std; +using namespace ge; + +int main(int argc, char **argv) { + map options; + ge::GEInitialize(options); + GeRunningEnvFaker::BackupEnv(); + testing::InitGoogleTest(&argc, argv); + int ret = RUN_ALL_TESTS(); + + return ret; +} diff --git a/tests/framework/stub_engine/CMakeLists.txt b/tests/framework/stub_engine/CMakeLists.txt deleted file mode 100644 index c86313c7..00000000 --- a/tests/framework/stub_engine/CMakeLists.txt +++ /dev/null @@ -1,58 +0,0 @@ -list(APPEND INCLUDE_DIRECTORIES - "${CMAKE_CURRENT_SOURCE_DIR}" - "${GE_CODE_DIR}" - "${GE_CODE_DIR}/inc" - "${GE_CODE_DIR}/metadef/inc" - "${GE_CODE_DIR}/ge" - "${GE_CODE_DIR}/ge/inc" - "${GE_CODE_DIR}/ge/ir_build" - "${GE_CODE_DIR}/metadef" - "${GE_CODE_DIR}/metadef/graph" - "${GE_CODE_DIR}/inc/external" - "${GE_CODE_DIR}/inc/framework/common" - "${GE_CODE_DIR}/metadef/inc/external" - "${GE_CODE_DIR}/metadef/inc/external/graph" - "${GE_CODE_DIR}/metadef/inc/graph" - "${GE_CODE_DIR}/inc/framework" - "${GE_CODE_DIR}/metadef/inc/common" - "${GE_CODE_DIR}/metadef/third_party" - "${GE_CODE_DIR}/metadef/third_party/transformer/inc" - "${GE_CODE_DIR}/parser" - "${GE_CODE_DIR}/parser/parser" - "${GE_CODE_DIR}/third_party/fwkacllib/inc" - "${GE_CODE_DIR}/third_party/fwkacllib/inc/cce" - "${GE_CODE_DIR}/third_party/fwkacllib/inc/ops" - "${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain" - "${GE_CODE_DIR}/tests/ut/ge" - "${GE_CODE_DIR}/tests/ut/common" - "${CMAKE_BINARY_DIR}" - "${CMAKE_BINARY_DIR}/proto/ge" - "${CMAKE_BINARY_DIR}/proto/ge/proto" - ) - -file(GLOB_RECURSE SOURCES CONFIGURE_DEPENDS "*.cc" "*.CC" "*.cpp" "*.CPP" "*.c++") - -# ---- Target : stub Host engine ---- -add_library(fe SHARED ${SOURCES}) - -target_include_directories(fe - PUBLIC - ${INCLUDE_DIRECTORIES} - ${CMAKE_CURRENT_SOURCE_DIR} - ) - -target_compile_definitions(fe PRIVATE - google=ascend_private - FMK_SUPPORT_DUMP - ) - -target_compile_options(fe PRIVATE - -g --coverage -fprofile-arcs -ftest-coverage - -Werror=format - ) - -target_link_libraries(fe PUBLIC - $ ${STUB_LIBS} metadef_graph -lmmpa -L${GE_CODE_DIR}/third_party/prebuild/x86_64 -lrt -ldl -lpthread -lgcov - ) - -set_target_properties(fe PROPERTIES CXX_STANDARD 11) diff --git a/tests/framework/stub_engine/engine/stub_engine.cc b/tests/framework/stub_engine/engine/stub_engine.cc deleted file mode 100644 index 622e8c4e..00000000 --- a/tests/framework/stub_engine/engine/stub_engine.cc +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "stub_engine.h" -#include -#include -#include -#include -#include "framework/common/debug/ge_log.h" -#include "common/ge/ge_util.h" -#include "inc/st_types.h" - -namespace ge { -namespace st { -StubEngine &StubEngine::Instance() { - static StubEngine instance; - return instance; -} - -Status StubEngine::Initialize(const std::map &options) { - for (const auto engine_2_lib : kStubEngine2KernelLib) { - auto ops_kernel_store = MakeShared(engine_2_lib.second); - if (ops_kernel_store == nullptr) { - return FAILED; - } - ops_kernel_store_map_.insert(make_pair(engine_2_lib.second, ops_kernel_store)); - } - return SUCCESS; -} - -void StubEngine::GetOpsKernelInfoStores(std::map &ops_kernel_map) { - for (const auto name_2_ops_kernel_store : ops_kernel_store_map_) { - ops_kernel_map[name_2_ops_kernel_store.first] = name_2_ops_kernel_store.second; - } -} - -void StubEngine::GetGraphOptimizerObjs(std::map &) { - // no optimizer for host cpu engine -} - -Status StubEngine::Finalize() { - return SUCCESS; -} -} // namespace st -} // namespace ge - -ge::Status Initialize(const std::map &options) { - return ge::st::StubEngine::Instance().Initialize(options); -} - -void GetOpsKernelInfoStores(std::map &ops_kernel_map) { - ge::st::StubEngine::Instance().GetOpsKernelInfoStores(ops_kernel_map); -} - -void GetGraphOptimizerObjs(std::map &graph_optimizers) { - ge::st::StubEngine::Instance().GetGraphOptimizerObjs(graph_optimizers); -} - -ge::Status Finalize() { - return ge::st::StubEngine::Instance().Finalize(); -} diff --git a/tests/framework/stub_engine/engine/stub_engine.h b/tests/framework/stub_engine/engine/stub_engine.h deleted file mode 100644 index d3909115..00000000 --- a/tests/framework/stub_engine/engine/stub_engine.h +++ /dev/null @@ -1,127 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef GRAPH_ENGINE_LLT_STUB_ENGINE_H_ -#define GRAPH_ENGINE_LLT_STUB_ENGINE_H_ - -#if defined(_MSC_VER) -#ifdef FUNC_VISIBILITY -#define GE_FUNC_VISIBILITY _declspec(dllexport) -#else -#define GE_FUNC_VISIBILITY -#endif -#else -#ifdef FUNC_VISIBILITY -#define GE_FUNC_VISIBILITY __attribute__((visibility("default"))) -#else -#define GE_FUNC_VISIBILITY -#endif -#endif - -#include -#include -#include -#include "inc/st_types.h" -#include "common/opskernel/ops_kernel_info_store.h" -#include "common/optimizer/graph_optimizer.h" -#include "stub_engine/ops_kernel_store/stub_ops_kernel_store.h" - -using OpsKernelInfoStorePtr = std::shared_ptr; -using StubOpsKernelInfoStorePtr = std::shared_ptr; -using GraphOptimizerPtr = std::shared_ptr; - -namespace ge { -namespace st { -/** - * host cpu engine. - * Used for the ops which executes on host. - */ -class GE_FUNC_VISIBILITY StubEngine { - public: - /** - * get StubEngine instance. - * @return StubEngine instance. - */ - static StubEngine &Instance(); - - virtual ~StubEngine() = default; - - /** - * When Ge start, GE will invoke this interface - * @return The status whether initialize successfully - */ - Status Initialize(const std::map &options); - - /** - * After the initialize, GE will invoke this interface - * to get the Ops kernel Store. - * @param ops_kernel_map The host cpu's ops kernel info - */ - void GetOpsKernelInfoStores(std::map &ops_kernel_map); - - /** - * After the initialize, GE will invoke this interface - * to get the Graph Optimizer. - * @param graph_optimizers The host cpu's Graph Optimizer objs - */ - void GetGraphOptimizerObjs(std::map &graph_optimizers); - - /** - * When the graph finished, GE will invoke this interface - * @return The status whether initialize successfully - */ - Status Finalize(); - - StubEngine(const StubEngine &StubEngine) = delete; - StubEngine(const StubEngine &&StubEngine) = delete; - StubEngine &operator=(const StubEngine &StubEngine) = delete; - StubEngine &operator=(StubEngine &&StubEngine) = delete; - - private: - StubEngine() = default; - map ops_kernel_store_map_; -}; -} // namespace st -} // namespace ge - -extern "C" { - -/** - * When Ge start, GE will invoke this interface - * @return The status whether initialize successfully - */ -GE_FUNC_VISIBILITY ge::Status Initialize(const map &options); - -/** - * After the initialize, GE will invoke this interface to get the Ops kernel Store - * @param ops_kernel_map The host cpu's ops kernel info - */ -GE_FUNC_VISIBILITY void GetOpsKernelInfoStores(std::map &ops_kernel_map); - -/** - * After the initialize, GE will invoke this interface to get the Graph Optimizer - * @param graph_optimizers The host cpu's Graph Optimizer objs - */ -GE_FUNC_VISIBILITY void GetGraphOptimizerObjs(std::map &graph_optimizers); - -/** - * When the graph finished, GE will invoke this interface - * @return The status whether initialize successfully - */ -GE_FUNC_VISIBILITY ge::Status Finalize(); -} - -#endif // GRAPH_ENGINE_LLT_STUB_ENGINE_H_ diff --git a/tests/framework/stub_engine/inc/st_types.h b/tests/framework/stub_engine/inc/st_types.h deleted file mode 100644 index 92aa00d9..00000000 --- a/tests/framework/stub_engine/inc/st_types.h +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef GRAPHENGINE_ST_TYPES_H -#define GRAPHENGINE_ST_TYPES_H -#include -namespace ge { -namespace st { -const std::string kAicoreLibName = "AiCoreLib"; -const std::string kVectorLibName = "VectorLib"; -const std::string kAicpuLibName = "AicpuLib"; -const std::string kAicpuAscendLibName = "AicpuAscendLib"; -const std::string kHcclLibName = "HcclLib"; -const std::string kRTSLibName = "RTSLib"; -const std::map kStubEngine2KernelLib = { - {"AIcoreEngine", "AiCoreLib"}, {"VectorEngine", "VectorLib"}, - {"DNN_VM_AICPU", "AicpuLib"}, {"DNN_VM_AICPU_ASCEND", "AicpuAscendLib"}, - {"DNN_HCCL", "HcclLib"}, {"DNN_VM_RTS", "RTSLib"}}; -} // namespace st -} // namespace ge -#endif // GRAPHENGINE_ST_TYPES_H diff --git a/tests/framework/stub_engine/ops_kernel_store/op/host_op.cc b/tests/framework/stub_engine/ops_kernel_store/op/host_op.cc deleted file mode 100644 index 42678148..00000000 --- a/tests/framework/stub_engine/ops_kernel_store/op/host_op.cc +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "inc/st_types.h" -#include "stub_engine/ops_kernel_store/op/host_op.h" -#include "framework/common/util.h" -#include "stub_engine/ops_kernel_store/op/stub_op_factory.h" - -namespace ge { -namespace st { -Status HostOp::Run() { - // no need to generate device task - return SUCCESS; -} -REGISTER_OP_CREATOR(Enter, RTSLib, HostOp); -REGISTER_OP_CREATOR(Merge, RTSLib, HostOp); -REGISTER_OP_CREATOR(Switch, RTSLib, HostOp); -REGISTER_OP_CREATOR(Less, AiCoreLib, HostOp); -REGISTER_OP_CREATOR(NextIteration, AiCoreLib, HostOp); -REGISTER_OP_CREATOR(LoopCond, RTSLib, HostOp); -REGISTER_OP_CREATOR(Exit, RTSLib, HostOp); -REGISTER_OP_CREATOR(StreamMerge, RTSLib, HostOp); -REGISTER_OP_CREATOR(StreamSwitch, RTSLib, HostOp); -REGISTER_OP_CREATOR(StreamActive, RTSLib, HostOp); -REGISTER_OP_CREATOR(Cast, AiCoreLib, HostOp); -REGISTER_OP_CREATOR(Transdata, AiCoreLib, HostOp); -} // namespace st -} // namespace ge diff --git a/tests/framework/stub_engine/ops_kernel_store/op/stub_op_factory.cc b/tests/framework/stub_engine/ops_kernel_store/op/stub_op_factory.cc deleted file mode 100644 index 601bca4d..00000000 --- a/tests/framework/stub_engine/ops_kernel_store/op/stub_op_factory.cc +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "stub_op_factory.h" -#include "framework/common/debug/ge_log.h" -#include "common/ge_inner_error_codes.h" -#include "graph/op_desc.h" - -namespace ge { -namespace st { -OpFactory &OpFactory::Instance() { - static OpFactory instance; - return instance; -} - -std::shared_ptr OpFactory::CreateOp(const Node &node, RunContext &run_context) { - auto iter = op_creator_map_.find(node.GetType()); - if (iter != op_creator_map_.end()) { - return iter->second(node, run_context); - } - GELOGE(FAILED, "Not supported OP, type = %s, name = %s", node.GetType().c_str(), node.GetName().c_str()); - return nullptr; -} - -void OpFactory::RegisterCreator(const std::string &type, const std::string &kernel_lib, const OP_CREATOR_FUNC &func) { - if (func == nullptr) { - GELOGW("Func is NULL."); - return; - } - - if (all_store_ops_.find(kernel_lib) != all_store_ops_.end()) { - all_store_ops_[kernel_lib].emplace_back(type); - } else { - all_store_ops_[kernel_lib] = {type}; - } -} -} // namespace st -} // namespace ge diff --git a/tests/framework/stub_engine/ops_kernel_store/op/stub_op_factory.h b/tests/framework/stub_engine/ops_kernel_store/op/stub_op_factory.h deleted file mode 100644 index f41fd07e..00000000 --- a/tests/framework/stub_engine/ops_kernel_store/op/stub_op_factory.h +++ /dev/null @@ -1,109 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef GE_HOST_CPU_ENGINE_OPS_KERNEL_STORE_OP_OP_FACTORY_H_ -#define GE_HOST_CPU_ENGINE_OPS_KERNEL_STORE_OP_OP_FACTORY_H_ - -#include -#include -#include -#include -#include -#include "common/ge/ge_util.h" -#include "stub_engine/ops_kernel_store/op/op.h" -#include "inc/st_types.h" - -namespace ge { -namespace st { -using OP_CREATOR_FUNC = std::function(const Node &, RunContext &)>; - -/** - * manage all the op, support create op. - */ -class GE_FUNC_VISIBILITY OpFactory { - public: - static OpFactory &Instance(); - - /** - * @brief create Op. - * @param [in] node share ptr of node - * @param [in] run_context run context - * @return not nullptr success - * @return nullptr fail - */ - std::shared_ptr CreateOp(const Node &node, RunContext &run_context); - - /** - * @brief Register Op create function. - * @param [in] type Op type - * @param [in] func Op create func - */ - void RegisterCreator(const std::string &type, const std::string &lib_name, const OP_CREATOR_FUNC &func); - - const std::vector &GetAllOps() const { - return all_ops_; - } - - const std::vector &GetAllOps(std::string lib_name) const { - auto iter = all_store_ops_.find(lib_name); - if (iter == all_store_ops_.end()) { - return all_ops_; - } - return iter->second; - } - - bool CheckSupported(const std::string &type) { - return op_creator_map_.find(type) != op_creator_map_.end(); - } - - OpFactory(const OpFactory &) = delete; - OpFactory &operator=(const OpFactory &) = delete; - OpFactory(OpFactory &&) = delete; - OpFactory &operator=(OpFactory &&) = delete; - - private: - OpFactory() = default; - ~OpFactory() = default; - - // the op creator function map - std::map op_creator_map_; - std::map> lib_op_creator_map_; - std::vector all_ops_; - std::map> all_store_ops_; -}; - -class GE_FUNC_VISIBILITY OpRegistrar { - public: - OpRegistrar(const std::string &type, const std::string &kernel_lib, const OP_CREATOR_FUNC &func) { - OpFactory::Instance().RegisterCreator(type, kernel_lib, func); - } - ~OpRegistrar() = default; - - OpRegistrar(const OpRegistrar &) = delete; - OpRegistrar &operator=(const OpRegistrar &) = delete; - OpRegistrar(OpRegistrar &&) = delete; - OpRegistrar &operator=(OpRegistrar &&) = delete; -}; - -#define REGISTER_OP_CREATOR(type, lib_name, clazz) \ - std::shared_ptr Creator_##type##Op(const Node &node, RunContext &run_context) { \ - return MakeShared(node, run_context); \ - } \ - OpRegistrar g_##type##Op_creator(#type, #lib_name, Creator_##type##Op) -} // namespace st -} // namespace ge - -#endif // GE_HOST_CPU_ENGINE_OPS_KERNEL_STORE_OP_OP_FACTORY_H_ diff --git a/tests/framework/stub_engine/ops_kernel_store/stub_ops_kernel_store.cc b/tests/framework/stub_engine/ops_kernel_store/stub_ops_kernel_store.cc deleted file mode 100644 index d43fee88..00000000 --- a/tests/framework/stub_engine/ops_kernel_store/stub_ops_kernel_store.cc +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "stub_ops_kernel_store.h" -#include -#include "ge/ge_api_types.h" -#include "framework/common/debug/ge_log.h" -#include "graph/utils/node_utils.h" -#include "graph/utils/tensor_utils.h" -#include "graph/utils/type_utils.h" -#include "op/stub_op_factory.h" - -namespace ge { -namespace st { -using domi::TaskDef; -using std::map; -using std::string; -using std::vector; - -Status StubOpsKernelInfoStore::Initialize(const map &options) { - GELOGI("StubOpsKernelInfoStore init start."); - string engine_name; - for (const auto &engine_2_lib : kStubEngine2KernelLib) { - if (engine_2_lib.second == store_name_) { - engine_name = engine_2_lib.first; - } - } - if (engine_name.empty()) { - return FAILED; - } - - OpInfo default_op_info = {.engine = engine_name, - .opKernelLib = store_name_, - .computeCost = 0, - .flagPartial = false, - .flagAsync = false, - .isAtomic = false}; - // Init op_info_map_ - auto all_ops_in_store = OpFactory::Instance().GetAllOps(store_name_); - for (auto &op : all_ops_in_store) { - op_info_map_[op] = default_op_info; - } - - GELOGI("StubOpsKernelInfoStore inited success. op num=%zu", op_info_map_.size()); - return SUCCESS; -} - -Status StubOpsKernelInfoStore::Finalize() { - op_info_map_.clear(); - return SUCCESS; -} - -void StubOpsKernelInfoStore::GetAllOpsKernelInfo(map &infos) const { - infos = op_info_map_; -} - -bool StubOpsKernelInfoStore::CheckSupported(const OpDescPtr &op_desc, std::string &) const { - if (op_desc == nullptr) { - return false; - } - return op_info_map_.count(op_desc->GetType()) > 0; -} -} // namespace st -} // namespace ge diff --git a/tests/framework/stub_engine/ops_kernel_store/stub_ops_kernel_store.h b/tests/framework/stub_engine/ops_kernel_store/stub_ops_kernel_store.h deleted file mode 100644 index ea7f712b..00000000 --- a/tests/framework/stub_engine/ops_kernel_store/stub_ops_kernel_store.h +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef GE_HOST_CPU_ENGINE_OPS_KERNEL_STORE_HOST_CPU_OPS_KERNEL_INFO_H_ -#define GE_HOST_CPU_ENGINE_OPS_KERNEL_STORE_HOST_CPU_OPS_KERNEL_INFO_H_ - -#if defined(_MSC_VER) -#ifdef FUNC_VISIBILITY -#define GE_FUNC_VISIBILITY _declspec(dllexport) -#else -#define GE_FUNC_VISIBILITY -#endif -#else -#ifdef FUNC_VISIBILITY -#define GE_FUNC_VISIBILITY __attribute__((visibility("default"))) -#else -#define GE_FUNC_VISIBILITY -#endif -#endif - -#include -#include -#include - -#include "common/opskernel/ops_kernel_info_store.h" - -namespace ge { -namespace st { -/*const vector kStubOpKernelLibNameVec = { - "AiCoreLib", - "AicpuLib", - "HcclLib", - "RTSLib" -};*/ -class GE_FUNC_VISIBILITY StubOpsKernelInfoStore : public OpsKernelInfoStore { - public: - StubOpsKernelInfoStore(std::string store_name) : store_name_(store_name) {} - ~StubOpsKernelInfoStore() override = default; - Status Initialize(const std::map &options) override; - Status Finalize() override; - bool CheckSupported(const OpDescPtr &op_desc, std::string &reason) const override; - void GetAllOpsKernelInfo(std::map &infos) const override; - std::string GetOpsKernelStoreName() const { - return store_name_; - } - - StubOpsKernelInfoStore(const StubOpsKernelInfoStore &ops_kernel_store) = delete; - StubOpsKernelInfoStore(const StubOpsKernelInfoStore &&ops_kernel_store) = delete; - StubOpsKernelInfoStore &operator=(const StubOpsKernelInfoStore &ops_kernel_store) = delete; - StubOpsKernelInfoStore &operator=(StubOpsKernelInfoStore &&ops_kernel_store) = delete; - - private: - // store op name and OpInfo key-value pair - std::map op_info_map_; - std::string store_name_; -}; -} // namespace st -} // namespace ge - -#endif // GE_HOST_CPU_ENGINE_OPS_KERNEL_STORE_HOST_CPU_OPS_KERNEL_INFO_H_ diff --git a/tests/st/testcase/CMakeLists.txt b/tests/st/testcase/CMakeLists.txt index 9d1d5a0e..b3663708 100644 --- a/tests/st/testcase/CMakeLists.txt +++ b/tests/st/testcase/CMakeLists.txt @@ -8,7 +8,7 @@ target_include_directories(graph_engine_test set_target_properties(graph_engine_test PROPERTIES CXX_STANDARD 17) -target_link_libraries(graph_engine_test PRIVATE gtest gtest_main framework) +target_link_libraries(graph_engine_test PRIVATE gtest framework) include(CTest) enable_testing() diff --git a/tests/st/testcase/test_framework_dummy.cc b/tests/st/testcase/test_framework_dummy.cc index 951e6b2b..0abdd18b 100644 --- a/tests/st/testcase/test_framework_dummy.cc +++ b/tests/st/testcase/test_framework_dummy.cc @@ -17,9 +17,13 @@ #include #include #include "external/ge/ge_api.h" +#include "ge_running_env/fake_engine.h" #include "graph/debug/ge_attr_define.h" #include "framework/common/types.h" + #include "builder/graph_builder_utils.h" +#include "ge_running_env/ge_running_env_faker.h" + #include "graph/operator_reg.h" #include "graph/operator.h" #define protected public @@ -109,8 +113,8 @@ Graph BuildV1ControlFlowGraph() { for_each(data_vec.begin(), data_vec.end(), [&](int64_t &data) { dims_size *= data; }); vector data_value_vec(dims_size, 1); GeTensorDesc data_tensor_desc(GeShape(data_vec), FORMAT_NCHW, DT_INT32); - GeTensorPtr data_tensor = make_shared(data_tensor_desc, (uint8_t *) data_value_vec.data(), - data_value_vec.size() * sizeof(int32_t)); + GeTensorPtr data_tensor = + make_shared(data_tensor_desc, (uint8_t *)data_value_vec.data(), data_value_vec.size() * sizeof(int32_t)); OpDescUtils::SetWeights(const_5->GetOpDesc(), data_tensor); OpDescUtils::SetWeights(const_2->GetOpDesc(), data_tensor); OpDescUtils::SetWeights(const_1->GetOpDesc(), data_tensor); @@ -120,13 +124,9 @@ Graph BuildV1ControlFlowGraph() { } // namespace class FrameworkTest : public testing::Test { protected: - void SetUp() { - // ge initialize - map options; - auto ret = ge::GEInitialize(options); - EXPECT_EQ(ret, SUCCESS); - } + void SetUp() { ge_env.InstallDefault(); } void TearDown() {} + GeRunningEnvFaker ge_env; }; /// data data diff --git a/tests/st/testcase/test_main.cc b/tests/st/testcase/test_main.cc new file mode 100644 index 00000000..a39c68aa --- /dev/null +++ b/tests/st/testcase/test_main.cc @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "common/debug/log.h" +#include "external/ge/ge_api.h" +#include "ge_running_env/include/ge_running_env/ge_running_env_faker.h" + +using namespace std; +using namespace ge; + +int main(int argc, char **argv) { + // init the logging + map options; + auto init_status = ge::GEInitialize(options); + if (init_status != SUCCESS) { + std::cout << "ge init failed , ret code:" << init_status << endl; + } + GeRunningEnvFaker::BackupEnv(); + testing::InitGoogleTest(&argc, argv); + int ret = RUN_ALL_TESTS(); + return ret; +} From 07a5635e8b5e08879c2013cffec6164e5697006d Mon Sep 17 00:00:00 2001 From: TangQunzhang Date: Mon, 28 Jun 2021 13:48:18 +0800 Subject: [PATCH 101/226] Print statics when malloc memory fail --- ge/graph/manager/graph_caching_allocator.cc | 18 ++++++++++++------ ge/graph/manager/graph_caching_allocator.h | 10 +++++++++- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/ge/graph/manager/graph_caching_allocator.cc b/ge/graph/manager/graph_caching_allocator.cc index 82bfbda9..7b316fc3 100644 --- a/ge/graph/manager/graph_caching_allocator.cc +++ b/ge/graph/manager/graph_caching_allocator.cc @@ -20,7 +20,6 @@ #include #include -#include "framework/common/debug/ge_log.h" #include "graph/manager/graph_mem_manager.h" namespace ge { @@ -94,7 +93,8 @@ void IncreaseCount(std::map &count, size_t size) { } } -CachingAllocator::CachingAllocator(rtMemType_t memory_type) : memory_type_(memory_type), memory_allocator_(nullptr) { +CachingAllocator::CachingAllocator(rtMemType_t memory_type) + : memory_type_(memory_type), memory_allocator_(nullptr), called_malloc_counts_(0), called_free_counts_(0) { for (uint32_t i = 0; i < kNumBins; i++) { free_block_bins_[i] = nullptr; } @@ -121,6 +121,8 @@ Status CachingAllocator::Initialize(uint32_t device_id) { if (memory_allocator_ == nullptr) { return ACL_ERROR_GE_INTERNAL_ERROR; } + called_malloc_counts_ = 0; + called_free_counts_ = 0; return ge::SUCCESS; } @@ -133,6 +135,7 @@ void CachingAllocator::Finalize(uint32_t device_id) { uint8_t *CachingAllocator::Malloc(size_t size, uint8_t *org_ptr, uint32_t device_id) { GELOGI("Start malloc pool memory, size = %zu, device id = %u", size, device_id); + called_malloc_counts_++; size = GetBlockSize(size); uint8_t *ptr = nullptr; Block *block = FindFreeBlock(size, org_ptr, device_id); @@ -156,6 +159,7 @@ uint8_t *CachingAllocator::Malloc(size_t size, uint8_t *org_ptr, uint32_t device Status CachingAllocator::Free(uint8_t *ptr, uint32_t device_id) { GELOGI("Free device id = %u", device_id); + called_free_counts_++; if (ptr == nullptr) { REPORT_INNER_ERROR("E19999", "Param ptr is nullptr, device_id:%u, check invalid", device_id); GELOGE(PARAM_INVALID, "[Check][Param] Invalid memory pointer, device_id:%u", device_id); @@ -283,6 +287,7 @@ Status CachingAllocator::TryExtendCache(size_t size, uint32_t device_id) { if (memory_addr == nullptr) { GELOGE(ge::FAILED, "[Malloc][Memory] failed, no enough memory for size = %zu, device_id = %u", memory_size, device_id); + PrintStatics(DLOG_ERROR); return ge::FAILED; } GELOGT(TRACE_RUNNING, "Try to free cached memory size:%zu and malloc memory size:%zu success.", @@ -385,14 +390,14 @@ void CachingAllocator::FreeBlockBins() { } void PrintCount(std::map &count, const std::string &name, size_t total_size, size_t total_count) { - GELOGI("%6s total[size:%10zu count:%10zu].", name.c_str(), total_size, total_count); + GEEVENT("%6s total[size:%11zu count:%11zu].", name.c_str(), total_size, total_count); for (auto &it : count) { - GELOGI(" |- block[size:%10zu count:%10zu].", it.first, it.second); + GEEVENT(" |- block[size:%11zu count:%11zu].", it.first, it.second); } } -void CachingAllocator::PrintStatics() { - if (!IsLogEnable(GE_MODULE_NAME, DLOG_INFO)) { +void CachingAllocator::PrintStatics(int32_t level) { + if (!IsLogEnable(GE_MODULE_NAME, level)) { return; } size_t total_using_size = 0; @@ -435,6 +440,7 @@ void CachingAllocator::PrintStatics() { } } while (0); + GEEVENT("Called counts[malloc:%11zu free:%11zu].", called_malloc_counts_.load(), called_free_counts_.load()); PrintCount(malloc_block_stat, "Malloc", total_malloc_size, total_malloc_count); PrintCount(using_block_stat, "Using", total_using_size, total_using_count); PrintCount(free_block_stat, "Free", total_free_size, total_free_count); diff --git a/ge/graph/manager/graph_caching_allocator.h b/ge/graph/manager/graph_caching_allocator.h index 2db00ff2..d00858f3 100644 --- a/ge/graph/manager/graph_caching_allocator.h +++ b/ge/graph/manager/graph_caching_allocator.h @@ -27,6 +27,7 @@ #include #include +#include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" #include "graph/node.h" #include "graph/manager/block_memory.h" @@ -192,9 +193,10 @@ class CachingAllocator { /// /// @ingroup ge_graph /// @brief print the memory info in pool + /// @param [in] log level /// @return void /// - void PrintStatics(); + void PrintStatics(int32_t level = DLOG_INFO); private: rtMemType_t memory_type_; @@ -213,6 +215,12 @@ class CachingAllocator { // malloced memorys from device std::map malloced_memory_; + + //user call Malloc total counts + std::atomic called_malloc_counts_; + + //user call Free total counts + std::atomic called_free_counts_; }; } // namespace ge #endif // GE_GRAPH_MANAGER_GRAPH_CACHING_ALLOCATOR_H_ From a6b6229967c1241494d86eb39200db8783e55a52 Mon Sep 17 00:00:00 2001 From: wuweikang Date: Thu, 13 May 2021 16:14:41 +0800 Subject: [PATCH 102/226] add copy graph --- ge/graph/manager/graph_manager.cc | 2 +- ge/hybrid/model/hybrid_model.h | 1 + ge/hybrid/model/hybrid_model_builder.cc | 45 +++++++++++++++---- ge/hybrid/model/hybrid_model_builder.h | 1 + .../executor/subgraph_executor_unittest.cc | 3 ++ .../model/hybrid_model_builder_unittest.cc | 26 ++++++++--- 6 files changed, 63 insertions(+), 15 deletions(-) diff --git a/ge/graph/manager/graph_manager.cc b/ge/graph/manager/graph_manager.cc index 66026f8d..01a2e502 100755 --- a/ge/graph/manager/graph_manager.cc +++ b/ge/graph/manager/graph_manager.cc @@ -3131,10 +3131,10 @@ void GraphManager::PreRunThread(GraphManager *graph_manager) { } // Avoid repeatively prerun for graphs owns same graph_id in online inference concurrency if (count > 1 && graph_node->GetBuildFlag()) { - graph_node->Lock(); GELOGD("Avoid repeatively prerun, graph_id:%u.", args.graph_id); // In online inference concurrency senario, graph_node is allowed to be locked for 'count' times graph_node->SetSemSize(count); + graph_node->Lock(); graph_manager->run_args_q_.Push(RunArgs( { graph_node, args.graph_id, args.session_id, args.error_context, args.input_tensor, graph_node->GetGeRootModel(), GetThreadLocalContext(), args.callback })); GELOGI("[PreRunThread] Loop end. Start to run with cached build model."); diff --git a/ge/hybrid/model/hybrid_model.h b/ge/hybrid/model/hybrid_model.h index 9821242a..77246e20 100644 --- a/ge/hybrid/model/hybrid_model.h +++ b/ge/hybrid/model/hybrid_model.h @@ -147,6 +147,7 @@ class HybridModel { GeRootModelPtr ge_root_model_; std::map input_nodes_; ComputeGraphPtr root_graph_; + ComputeGraphPtr orig_root_graph_; std::map device_variable_nodes_; //lint !e148 std::map host_variable_nodes_; //lint !e148 std::map> variable_tensors_; diff --git a/ge/hybrid/model/hybrid_model_builder.cc b/ge/hybrid/model/hybrid_model_builder.cc index 1f68f374..e80d9b90 100755 --- a/ge/hybrid/model/hybrid_model_builder.cc +++ b/ge/hybrid/model/hybrid_model_builder.cc @@ -147,6 +147,7 @@ Status HybridModelBuilder::Build() { GE_CHK_STATUS_RET(ValidateParams(), "[Invoke][ValidateParams] failed, model_name_:[%s]", GetGraphName()); hybrid_model_.model_name_ = ge_root_model_->GetModelName(); GELOGI("[%s] Start to build hybrid model.", GetGraphName()); + GE_CHK_STATUS_RET(CopyGraph(), "[Invoke][CopyGraph] failed, model_name_:[%s]", GetGraphName()); GE_CHK_STATUS_RET(InitRuntimeParams(), "[Invoke][InitRuntimeParams] failed, model_name_:[%s]", GetGraphName()); GE_CHK_STATUS_RET(RecoverGraphUnknownFlag(), "[Invoke][RecoverGraphUnknownFlag] failed, model_name_:[%s]", GetGraphName()); @@ -171,11 +172,12 @@ Status HybridModelBuilder::Build() { Status HybridModelBuilder::BuildForSingleOp() { GE_CHK_STATUS_RET(ValidateParams(), "[Invoke][ValidateParams] failed, model_name_:[%s]", GetGraphName()); + hybrid_model_.root_graph_ = ge_root_model_->GetRootGraph(); hybrid_model_.model_name_ = ge_root_model_->GetRootGraph()->GetName(); GELOGI("[%s] Start to build hybrid model.", GetGraphName()); auto ret = ge_root_model_->GetSubgraphInstanceNameToModel(); - const GeModelPtr ge_model = ret[ge_root_model_->GetRootGraph()->GetName()]; - GE_CHK_STATUS_RET(IndexTaskDefs(ge_root_model_->GetRootGraph(), ge_model), + const GeModelPtr ge_model = ret[hybrid_model_.root_graph_->GetName()]; + GE_CHK_STATUS_RET(IndexTaskDefs(hybrid_model_.root_graph_, ge_model), "[Invoke][IndexTaskDefs] failed, model_name_:[%s]", GetGraphName()); GE_CHK_STATUS_RET(LoadGraph(), "[Invoke][LoadGraph] failed, model_name_:[%s]", GetGraphName()); GE_CHK_STATUS_RET(InitWeights(), "[Invoke][InitWeights] failed, model_name_:[%s]", GetGraphName()); @@ -190,6 +192,27 @@ Status HybridModelBuilder::ValidateParams() { return SUCCESS; } +Status HybridModelBuilder::CopyGraph() { + GELOGD("Copy compute graph begin."); + auto root_graph = ge_root_model_->GetRootGraph(); + + std::string new_graph_name = ge_root_model_->GetRootGraph()->GetName(); + ComputeGraphPtr new_root_graph = MakeShared(new_graph_name); + GE_CHECK_NOTNULL(new_root_graph); + int32_t depth = 0; + std::map node_old_2_new; + std::map op_desc_old_2_new; + graphStatus ret = GraphUtils::CopyComputeGraph(root_graph, new_root_graph, node_old_2_new, op_desc_old_2_new, depth); + if (ret != GRAPH_SUCCESS) { + GELOGE(GRAPH_FAILED, "Copy compute graph failed."); + return GRAPH_FAILED; + } + hybrid_model_.root_graph_ = new_root_graph; + + GELOGD("Copy compute graph[%s] success.", new_graph_name.c_str()); + return SUCCESS; +} + Status HybridModelBuilder::BuildNodeItem(const NodePtr &node, NodeItem &node_item) { auto op_desc = node->GetOpDesc(); GE_CHK_STATUS_RET(ParseForceInfershapeNodes(node, node_item), @@ -810,12 +833,13 @@ Status HybridModelBuilder::BuildOutputMapping(GraphItem &graph_item, } Status HybridModelBuilder::LoadGraph() { - auto root_graph = ge_root_model_->GetRootGraph(); + auto root_graph = hybrid_model_.root_graph_; if (!GetContext().GetHostExecFlag()) { std::shared_ptr merged_graph; GELOGI("Before merging subgraphs DirectNodesSize = %zu, GetAllNodesSize = %zu", root_graph->GetDirectNodesSize(), root_graph->GetAllNodesSize()); + hybrid_model_.orig_root_graph_ = root_graph; GE_CHK_GRAPH_STATUS_RET(UnfoldSubgraphs(root_graph, merged_graph), "[Invoke][UnfoldSubgraphs]Failed to unfold subgraphs, model_name_:%s.", GetGraphName()); root_graph = std::move(merged_graph); @@ -873,6 +897,7 @@ Status HybridModelBuilder::LoadGraph() { } for (auto &it : hybrid_model_.known_shape_sub_models_) { auto node_item = MutableNodeItem(it.first); + GE_CHECK_NOTNULL(node_item); AscendString graph_name; GE_CHK_GRAPH_STATUS_RET(it.second->GetGraph().GetName(graph_name), "Failed to get subgraph name"); auto subgraph = hybrid_model_.GetRootGraph()->GetSubgraph(graph_name.GetString()); @@ -1121,7 +1146,9 @@ Status HybridModelBuilder::InitWeights() { sub_weight_buffer->GetSize()); auto subgraph = GraphUtils::GetComputeGraph(subgraph_model.second->GetGraph()); if (subgraph != ge_root_model_->GetRootGraph()) { - subgraph = ge_root_model_->GetRootGraph()->GetSubgraph(subgraph_model.first); + subgraph = hybrid_model_.root_graph_->GetSubgraph(subgraph_model.first); + } else { + subgraph = hybrid_model_.root_graph_; } GE_CHECK_NOTNULL(subgraph); hybrid_model_.weight_buffer_map_.emplace(subgraph->GetName(), std::move(sub_weight_buffer)); @@ -1300,7 +1327,7 @@ Status HybridModelBuilder::IndexTaskDefs(const ComputeGraphPtr &sub_graph, const } Status HybridModelBuilder::IndexTaskDefs() { - const auto root_graph = ge_root_model_->GetRootGraph(); + const auto &root_graph = hybrid_model_.root_graph_; const auto &root_graph_name = root_graph->GetName(); if (SetOutputNameAttr(*root_graph) != SUCCESS) { GELOGW("Set output name attr failed."); @@ -1334,7 +1361,7 @@ Status HybridModelBuilder::IndexTaskDefs() { Status HybridModelBuilder::IndexSpecialNodes() { GELOGD("Start to index special nodes"); - const auto &root_graph = ge_root_model_->GetRootGraph(); + const auto &root_graph = hybrid_model_.root_graph_; for (auto &node : root_graph->GetAllNodes()) { GE_CHECK_NOTNULL(node); GE_CHECK_NOTNULL(node->GetOpDesc()); @@ -1489,7 +1516,7 @@ Status HybridModelBuilder::InitRuntimeParams() { runtime_param_.session_id = ret ? static_cast(value) : 0; ret = ge::AttrUtils::GetInt(first_model, ATTR_MODEL_TASK_GEN_VAR_ADDR, value); runtime_param_.logic_var_base = ret ? static_cast(value) : 0; - runtime_param_.graph_id = ge_root_model_->GetRootGraph()->GetGraphID(); + runtime_param_.graph_id = hybrid_model_.root_graph_->GetGraphID(); value = 0; for (auto &it : ge_root_model_->GetSubgraphInstanceNameToModel()) { (void) ge::AttrUtils::GetInt(it.second, ATTR_MODEL_VAR_SIZE, value); @@ -1626,7 +1653,7 @@ Status HybridModelBuilder::TransAllVarData() { } Status HybridModelBuilder::CopyVarData() { - GE_CHK_STATUS_RET(TransVarDataUtils::CopyVarData(ge_root_model_->GetRootGraph(), + GE_CHK_STATUS_RET(TransVarDataUtils::CopyVarData(hybrid_model_.root_graph_, runtime_param_.session_id, hybrid_model_.device_id_), "[Invoke][CopyVarData] failed."); @@ -1709,7 +1736,7 @@ Status HybridModelBuilder::LoadKnownShapedSubgraph(ComputeGraph &graph, NodeItem } Status HybridModelBuilder::RecoverGraphUnknownFlag() { - const auto &root_graph = ge_root_model_->GetRootGraph(); + const auto &root_graph = hybrid_model_.root_graph_; for (auto &sub_graph : root_graph->GetAllSubgraphs()) { GE_CHECK_NOTNULL(sub_graph); for (const auto &node : sub_graph->GetDirectNode()) { diff --git a/ge/hybrid/model/hybrid_model_builder.h b/ge/hybrid/model/hybrid_model_builder.h index 9c1eb187..05830e82 100644 --- a/ge/hybrid/model/hybrid_model_builder.h +++ b/ge/hybrid/model/hybrid_model_builder.h @@ -56,6 +56,7 @@ class HybridModelBuilder { Status BuildOutputMapping(GraphItem &partitioned_call, const NodeItem &node_item, bool is_root_graph); Status ValidateParams(); Status LoadGraph(); + Status CopyGraph(); Status LoadGeModel(ComputeGraph &graph, const GeModelPtr &ge_model); static Status InitHcclExecutorOnDemand(const GeModelPtr &ge_model); Status LoadTask(NodeItem &node_item); diff --git a/tests/ut/ge/hybrid/executor/subgraph_executor_unittest.cc b/tests/ut/ge/hybrid/executor/subgraph_executor_unittest.cc index 2dc3b639..827705ae 100644 --- a/tests/ut/ge/hybrid/executor/subgraph_executor_unittest.cc +++ b/tests/ut/ge/hybrid/executor/subgraph_executor_unittest.cc @@ -249,6 +249,9 @@ TEST_F(UtestSubgraphExecutor, cond_graph_schedule_tasks) { graph_context.callback_manager = std::unique_ptr(new CallbackManager()); ASSERT_EQ(graph_context.callback_manager->Init(), SUCCESS); + auto root_graph = hybrid_model.root_graph_; + switch_t = root_graph->FindNode("switch_t"); + switch_f = root_graph->FindNode("switch_f"); const auto node_it_t = hybrid_model.node_items_.find(switch_t); const auto node_it_f = hybrid_model.node_items_.find(switch_f); ASSERT_NE(hybrid_model.node_items_.end(), node_it_t); diff --git a/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc b/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc index 5567aca2..10f7c0fe 100644 --- a/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc +++ b/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc @@ -214,11 +214,17 @@ TEST_F(UtestHybridModelBuilder, normal_hybrid_model_build) { ASSERT_EQ(it->second->frame_index_, index); ASSERT_EQ(it->second->parent_frame_, -1); }; - TestFrameGroup(enter1, control_group_index); - TestFrameGroup(active1, control_group_index); - TestFrameGroup(active2, control_group_index); - TestFrameGroup(active3, control_group_index); - TestFrameGroup(output1, -1); + auto root_graph = hybrid_model.root_graph_; + auto enter1_node = root_graph->FindNode("enter"); + auto active1_node = root_graph->FindNode("active1"); + auto active2_node = root_graph->FindNode("active2"); + auto active3_node = root_graph->FindNode("active3"); + auto output1_node = root_graph->FindNode("net_output"); + TestFrameGroup(enter1_node, control_group_index); + TestFrameGroup(active1_node, control_group_index); + TestFrameGroup(active2_node, control_group_index); + TestFrameGroup(active3_node, control_group_index); + TestFrameGroup(output1_node, -1); engine_mapping.clear(); task_executor.clear(); @@ -373,4 +379,14 @@ TEST_F(UtestHybridModelBuilder, TestInitHcclExecutorOnDemand) { NodeExecutorManager::GetInstance().builders_.erase(NodeExecutorManager::ExecutorType::HCCL); ASSERT_EQ(HybridModelBuilder::InitHcclExecutorOnDemand(ge_model), SUCCESS); } + +TEST_F(UtestHybridModelBuilder, copy_graph_success) { +ComputeGraphPtr graph = std::make_shared("test"); +GeRootModelPtr ge_root_model = make_shared(graph); +HybridModel hybrid_model(ge_root_model); +HybridModelBuilder hybrid_model_builder(hybrid_model); + +Status st = hybrid_model_builder.CopyGraph(); +EXPECT_EQ(st, SUCCESS); +} } // namespace ge From deecaf160a6a18ca9fa075606163fa16e53dba23 Mon Sep 17 00:00:00 2001 From: wq160 Date: Mon, 28 Jun 2021 10:37:15 +0800 Subject: [PATCH 103/226] Temporarily disable ImmediateRePass --- ge/graph/passes/infer_base_pass.cc | 5 ++--- tests/ut/ge/graph/passes/infer_base_pass_unittest.cc | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/ge/graph/passes/infer_base_pass.cc b/ge/graph/passes/infer_base_pass.cc index 27eb0c54..25c45677 100644 --- a/ge/graph/passes/infer_base_pass.cc +++ b/ge/graph/passes/infer_base_pass.cc @@ -84,9 +84,8 @@ Status InferBasePass::Run(NodePtr &node) { bool InferBasePass::NeedInfer(const NodePtr &node) const { return true; } void InferBasePass::AddChangedNodesImmediateRepass(const std::set &changed_nodes) { - for (const auto &node_ele : changed_nodes) { - AddImmediateRePassNode(node_ele); - } +// need passed_nodes set to solve the problem that multi-input operators do repass in advance. +// when there is passed_nodes set, wo should call AddImmediateRePassNode for all nodes in changed_nodes. } graphStatus InferBasePass::InferAndUpdate(NodePtr &node, bool before_subgraph, std::set &changed_nodes) { diff --git a/tests/ut/ge/graph/passes/infer_base_pass_unittest.cc b/tests/ut/ge/graph/passes/infer_base_pass_unittest.cc index e9247f75..24cc5c1b 100644 --- a/tests/ut/ge/graph/passes/infer_base_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/infer_base_pass_unittest.cc @@ -255,7 +255,7 @@ TEST_F(UtestGraphInferBasePassStub, AddCurNodeRepass_NotCallUpdatePeerNode_WhenI EXPECT_EQ(stub_base_pass.Run(add_node), SUCCESS); EXPECT_EQ(stub_base_pass.call_infer_times, 1); EXPECT_EQ(stub_base_pass.call_update_tensor_desc_times, 0); - EXPECT_EQ(stub_base_pass.GetNodesNeedRePassImmediately(), std::unordered_set({add_node})); +// EXPECT_EQ(stub_base_pass.GetNodesNeedRePassImmediately(), std::unordered_set({add_node})); } TEST_F(UtestGraphInferBasePassStub, NotAddPeerNodeRepass_AfterUpdatePeerNode_WhenUnchanged) { @@ -291,7 +291,7 @@ TEST_F(UtestGraphInferBasePassStub, AddPeerNodeRepass_AfterUpdatePeerNode_WhenCh EXPECT_EQ(stub_base_pass.Run(add_node), SUCCESS); EXPECT_EQ(stub_base_pass.call_update_tensor_desc_times, 1); - EXPECT_EQ(stub_base_pass.GetNodesNeedRePassImmediately(), std::unordered_set({netoutput})); +// EXPECT_EQ(stub_base_pass.GetNodesNeedRePassImmediately(), std::unordered_set({netoutput})); } TEST_F(UtestGraphInferBasePassStub, TestUpdateSubgraphData_WhenBeforeSubgraph) { From e9bab262b736d4d56e3d78c8ed921fa811b752bd Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Mon, 28 Jun 2021 16:43:47 +0800 Subject: [PATCH 104/226] Fix bug of aicore input const. --- ge/single_op/task/op_task.cc | 54 ++++++++++++++++--- ge/single_op/task/op_task.h | 2 + .../ge/single_op/single_op_task_unittest.cc | 37 +++++++++++-- 3 files changed, 83 insertions(+), 10 deletions(-) diff --git a/ge/single_op/task/op_task.cc b/ge/single_op/task/op_task.cc index b6a78f9e..92d1e325 100755 --- a/ge/single_op/task/op_task.cc +++ b/ge/single_op/task/op_task.cc @@ -345,6 +345,53 @@ Status TbeOpTask::AllocateWorkspaces(const vector &workspace_sizes) { return SUCCESS; } +Status TbeOpTask::UpdateIoAddr(std::vector &args, const std::vector &inputs, + const std::vector &outputs) { + uintptr_t *arg_base = nullptr; + size_t arg_num = 0; + GetIoAddr(arg_base, arg_num); + + const vector v_is_input_const = op_desc_->GetIsInputConst(); + size_t non_const_index = 0; + for (size_t i = 0; i < op_desc_->GetAllInputsSize(); ++i) { + const GeTensorDescPtr tensor_desc = op_desc_->MutableInputDesc(static_cast(i)); + if (tensor_desc == nullptr) { + GELOGD("SingleOp: %s, Index: %zu, has no input", op_desc_->GetName().c_str(), i); + continue; + } + if (i < v_is_input_const.size() && v_is_input_const[i]) { + if (i >= arg_num) { + GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Check][Size] Args size is %zu, but get index is %zu.", arg_num, i); + REPORT_INNER_ERROR("E19999", "[Check][Size] Args size is %zu, but get index is %zu.", arg_num, i); + return ACL_ERROR_GE_PARAM_INVALID; + } + auto addr = reinterpret_cast(arg_base[i]); + GELOGD("SingleOp: %s, Index: %zu, input is const, addr = %p", op_desc_->GetName().c_str(), i, addr); + args.emplace_back(addr); + continue; + } + if (non_const_index >= inputs.size()) { + GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Check][Size] Input size is %zu, but get non_const_index is %zu", + inputs.size(), non_const_index); + REPORT_INNER_ERROR("E19999", "[Check][Size] Input size is %zu, but get non_const_index is %zu", + inputs.size(), non_const_index); + return ACL_ERROR_GE_PARAM_INVALID; + } + auto addr = inputs[non_const_index].data; + GELOGD("SingleOp: %s, input[%zu], addr = %p", op_desc_->GetName().c_str(), i, addr); + args.emplace_back(addr); + non_const_index++; + } + + for (size_t i = 0; i < outputs.size(); ++i) { + auto addr = outputs[i].data; + GELOGD("SingleOp: %s, output[%zu] addr = %p", op_desc_->GetName().c_str(), i, addr); + args.emplace_back(addr); + } + + return SUCCESS; +} + Status TbeOpTask::LaunchKernel(const vector &input_desc, const vector &input_buffers, vector &output_desc, @@ -355,12 +402,7 @@ Status TbeOpTask::LaunchKernel(const vector &input_desc, GE_CHK_STATUS_RET_NOLOG(UpdateRunInfo()); GE_CHK_STATUS_RET(AllocateWorkspaces(run_info_workspaces_), "[Allocate][Workspaces] failed."); std::vector args; - for (auto &buffer : input_buffers) { - args.emplace_back(buffer.data); - } - for (auto &buffer : output_buffers) { - args.emplace_back(buffer.data); - } + GE_CHK_STATUS_RET(UpdateIoAddr(args, input_buffers, output_buffers), "[Update][IoAddr] failed."); for (auto &buffer : workspaces_) { args.emplace_back(buffer); } diff --git a/ge/single_op/task/op_task.h b/ge/single_op/task/op_task.h index 19320bc0..0cbc1a29 100644 --- a/ge/single_op/task/op_task.h +++ b/ge/single_op/task/op_task.h @@ -101,6 +101,8 @@ class TbeOpTask : public OpTask { const vector &output_desc); Status AllocateWorkspaces(const std::vector &workspace_sizes); Status DoLaunchKernel(rtStream_t stream); + Status UpdateIoAddr(std::vector &args, const std::vector &inputs, + const std::vector &outputs); const void *stub_func_ = nullptr; std::unique_ptr args_; diff --git a/tests/ut/ge/single_op/single_op_task_unittest.cc b/tests/ut/ge/single_op/single_op_task_unittest.cc index a17c9012..472a88c3 100644 --- a/tests/ut/ge/single_op/single_op_task_unittest.cc +++ b/tests/ut/ge/single_op/single_op_task_unittest.cc @@ -91,8 +91,9 @@ TEST_F(UtestSingleOpTask, test_build_kernel_task) { TbeOpTask task_tmp; TbeOpTask *task = &task_tmp; ASSERT_EQ(model.BuildKernelTask(task_def, &task), SUCCESS); + ge::DataBuffer data_buffer; vector input_desc; - vector input_buffers; + vector input_buffers = { data_buffer }; vector output_desc; vector output_buffers; task->node_ = node; @@ -110,8 +111,36 @@ TEST_F(UtestSingleOpTask, test_build_kernel_task) { task->args_.reset(&task_args); ASSERT_EQ(task->LaunchKernel(input_desc, input_buffers, output_desc, output_buffers, stream_), SUCCESS); - char handle_tmp = '0'; - char *handle = &handle_tmp; + char *handle = "00"; task->SetHandle(handle); ASSERT_EQ(task->LaunchKernel(input_desc, input_buffers, output_desc, output_buffers, stream_), SUCCESS); -} \ No newline at end of file +} + +TEST_F(UtestSingleOpTask, test_update_ioaddr) { + auto graph = make_shared("graph"); + auto op_desc = make_shared("Add", "Add"); + + GeTensorDesc desc; + op_desc->AddInputDesc(desc); + op_desc->AddInputDesc(desc); + op_desc->AddOutputDesc(desc); + vector is_input_const = { true, false }; + op_desc->SetIsInputConst(is_input_const); + auto node = graph->AddNode(op_desc); + + TbeOpTask task; + task.op_desc_ = op_desc; + task.args_.reset(new (std::nothrow) uint8_t[sizeof(void *) * 3]); + + vector args; + vector inputs; + vector outputs; + ASSERT_EQ(task.UpdateIoAddr(args, inputs, outputs), ACL_ERROR_GE_PARAM_INVALID); + task.arg_size_ = sizeof(void *) * 3; + ASSERT_EQ(task.UpdateIoAddr(args, inputs, outputs), ACL_ERROR_GE_PARAM_INVALID); + + ge::DataBuffer data_buffer; + inputs = { data_buffer }; + ASSERT_EQ(task.UpdateIoAddr(args, inputs, outputs), SUCCESS); +} + From b9715a14584132d72ca9dd49811a943852730eca Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Mon, 28 Jun 2021 19:23:31 +0800 Subject: [PATCH 105/226] DSP: Switch -> TransData -> Cast -> Exit --- ge/graph/passes/next_iteration_pass.cc | 33 ++++++++++++------- ge/hybrid/executor/worker/execution_engine.cc | 2 +- 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/ge/graph/passes/next_iteration_pass.cc b/ge/graph/passes/next_iteration_pass.cc index fb8f8627..1c2d7218 100644 --- a/ge/graph/passes/next_iteration_pass.cc +++ b/ge/graph/passes/next_iteration_pass.cc @@ -24,7 +24,9 @@ using std::string; namespace ge { namespace { -const int64_t kLoopType = 1; +constexpr int64_t kLoopType = 1; +constexpr uint8_t kMaxTransOp = 3; +constexpr uint8_t kTransOpIoSize = 1; } Status NextIterationPass::Run(ComputeGraphPtr graph) { @@ -287,18 +289,25 @@ void NextIterationPass::HandleSwitchExitNodes(const LoopCondGroup &loop_group, i std::string node_type; for (const auto &switch_node : loop_group.switch_nodes) { SetControlFlowGroup(switch_node, group_index); - for (const auto &node : switch_node->GetOutDataNodes()) { - (void)GetOriginalType(node, node_type); - if (kExitOpTypes.count(node_type) > 0) { - SetControlFlowGroup(node, group_index); - } else { - // For: Switch -> Cast -> Exit - for (const auto &n : node->GetOutDataNodes()) { - (void)GetOriginalType(n, node_type); - if (kExitOpTypes.count(node_type) > 0) { - SetControlFlowGroup(n, group_index); - } + for (auto node : switch_node->GetOutDataNodes()) { + // Switch --> Exit + // Switch --> Cast --> Exit + // Switch --> TransData --> Cast --> Exit + for (uint8_t i = 0; i < kMaxTransOp; ++i) { + if (node->GetInDataNodes().size() != kTransOpIoSize || node->GetAllOutDataAnchorsSize() != kTransOpIoSize) { + break; } + + if (kExitOpTypes.count(NodeUtils::GetNodeType(node)) > 0) { + SetControlFlowGroup(node, group_index); + break; + } + + const auto &all_nodes = node->GetOutAllNodes(); + if (all_nodes.size() != kTransOpIoSize) { + break; + } + node = all_nodes.at(0); } } } diff --git a/ge/hybrid/executor/worker/execution_engine.cc b/ge/hybrid/executor/worker/execution_engine.cc index d4c73f58..ca864244 100755 --- a/ge/hybrid/executor/worker/execution_engine.cc +++ b/ge/hybrid/executor/worker/execution_engine.cc @@ -373,9 +373,9 @@ Status ExecutionEngine::DoExecuteAsync(NodeState &node_state, auto executor = node_item.node_executor; GE_CHECK_NOTNULL(executor); RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[PrepareTask] Start"); + node_state.UpdatePersistTensor(); GE_CHK_STATUS_RET(executor->PrepareTask(*task, task_context), "[Prepare][Task] for [%s] failed.", node_state.GetName().c_str()); - node_state.UpdatePersistTensor(); RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[PrepareTask] End"); GELOGD("[%s] Done task preparation successfully.", node_state.GetName().c_str()); From c7e8fc988d24c951dc1942847878e2b339e0e961 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Mon, 28 Jun 2021 19:26:20 +0800 Subject: [PATCH 106/226] Fix bug of multi_task. --- ge/single_op/single_op_model.cc | 128 ++++++++++++++++++-------------- ge/single_op/single_op_model.h | 6 ++ 2 files changed, 77 insertions(+), 57 deletions(-) diff --git a/ge/single_op/single_op_model.cc b/ge/single_op/single_op_model.cc index 08a0fcbc..e5d15beb 100755 --- a/ge/single_op/single_op_model.cc +++ b/ge/single_op/single_op_model.cc @@ -95,35 +95,6 @@ Status CheckInferDepend(GeModelPtr &ge_model, bool &is_infer_depend, bool &is_ho } return SUCCESS; } - -Status NeedHybridModel(GeModelPtr &ge_model, bool &flag) { - bool is_infer_depend = false; - bool is_host_mem = false; - GE_CHK_STATUS_RET(CheckInferDepend(ge_model, is_infer_depend, is_host_mem), "[Check][InferDepend] failed."); - bool need_d2h_cpy = is_infer_depend && !is_host_mem; - auto tasks = ge_model->GetModelTaskDefPtr()->task(); - int32_t kernel_task_num = 0; - for (int i = 0; i < tasks.size(); ++i) { - auto task_type = static_cast(tasks[i].type()); - if (task_type == RT_MODEL_TASK_KERNEL || task_type == RT_MODEL_TASK_ALL_KERNEL) { - const auto &context = task_type == RT_MODEL_TASK_KERNEL ? tasks[i].kernel().context() : - tasks[i].kernel_with_handle().context(); - auto kernel_type = static_cast(context.kernel_type()); - if (kernel_type == ccKernelType::TE) { - if (need_d2h_cpy) { - flag = true; - return SUCCESS; - } - kernel_task_num++; - if (kernel_task_num > 1) { - flag = true; - return SUCCESS; - } - } - } - } - return SUCCESS; -} } // namespace SingleOpModel::SingleOpModel(const std::string &model_name, const void *model_data, uint32_t model_size) @@ -596,50 +567,92 @@ Status SingleOpModel::BuildModelTaskKernel(StreamResource *stream_resource, cons } Status SingleOpModel::BuildTaskListForDynamicOp(StreamResource *stream_resource, DynamicSingleOp &single_op) { - auto ge_model = model_helper_.GetGeModel(); - GE_CHECK_NOTNULL(ge_model); - - auto compute_graph = GraphUtils::GetComputeGraph(ge_model->GetGraph()); - GE_CHECK_NOTNULL(compute_graph); - single_op.compute_graph_ = compute_graph; - auto tasks = ge_model->GetModelTaskDefPtr()->task(); - for (int i = 0; i < tasks.size(); ++i) { - const TaskDef &task_def = tasks[i]; - GELOGI("[%s] Task[%d], type = [%u], DebugString = [%s]", model_name_.c_str(), i, task_def.type(), - task_def.DebugString().c_str()); + if (tbe_tasks_.size() > 0) { + const auto &task_def = tbe_tasks_[0]; + GELOGD("Building TBE task."); + TbeOpTask *tbe_task = nullptr; + GE_CHK_STATUS_RET_NOLOG(BuildKernelTask(task_def, &tbe_task)); + tbe_task->SetModelArgs(model_name_, model_id_); + if (tbe_task->tiling_buffer_ != nullptr) { + GELOGD("tiling buffer is not nullptr."); + tbe_task->stream_resource_ = stream_resource; + } + single_op.op_task_.reset(tbe_task); + } else if (aicpu_tasks_.size() > 0) { + const auto &task_def = aicpu_tasks_[0]; auto task_type = static_cast(task_def.type()); - if (task_type == RT_MODEL_TASK_KERNEL || task_type == RT_MODEL_TASK_ALL_KERNEL) { - if (single_op.op_task_ != nullptr) { - GELOGE(ACL_ERROR_GE_OP_TASK_TYPE_INVALID, "[Check][TaskType]Do not support dynamic op with multiple tasks."); - REPORT_INNER_ERROR("E19999", - "BuildTaskListForDynamicOp fail for Do not support dynamic op with multiple tasks."); - return ACL_ERROR_GE_OP_TASK_TYPE_INVALID; - } - GE_CHK_STATUS_RET_NOLOG(BuildModelTaskKernel(stream_resource, task_def, single_op)); + if (task_type == RT_MODEL_TASK_KERNEL) { + GELOGD("Building AICPU_CC task"); + OpTask *task = nullptr; + uint64_t dynamic_singleop_kernel_id = aicpu_kernel_id++; + GELOGI("Build dynamic singleOp CCTask, kernel_id = %lu", dynamic_singleop_kernel_id); + GE_CHK_STATUS_RET_NOLOG(BuildCpuKernelTask(task_def.kernel(), &task, dynamic_singleop_kernel_id)); + task->SetModelArgs(model_name_, model_id_); + single_op.op_task_.reset(task); } else if (task_type == RT_MODEL_TASK_KERNEL_EX) { - if (single_op.op_task_ != nullptr) { - GELOGE(ACL_ERROR_GE_OP_TASK_TYPE_INVALID, "[Check][TaskType]Do not support dynamic op with multiple tasks."); - REPORT_INNER_ERROR("E19999", - "BuildTaskListForDynamicOp fail for Do not support dynamic op with multiple tasks."); - return ACL_ERROR_GE_OP_TASK_TYPE_INVALID; - } GELOGD("Building AICPU_TF task"); AiCpuTask *aicpu_task = nullptr; uint64_t dynamic_singleop_kernel_id = aicpu_kernel_id++; GELOGI("Build dynamic singleOp TfTask, kernel_id = %lu", dynamic_singleop_kernel_id); GE_CHK_STATUS_RET_NOLOG(BuildKernelExTask(task_def.kernel_ex(), &aicpu_task, dynamic_singleop_kernel_id)); if (aicpu_task->GetUnknownType() == DEPEND_COMPUTE) { - if (i >= tasks.size() - 1) { + if (aicpu_tasks_.size() < 2) { GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Check][Task]The copy task of the fourth operator was not found."); REPORT_INNER_ERROR("E19999", "The copy task of the fourth operator was not found."); return ACL_ERROR_GE_PARAM_INVALID; } - ++i; - const TaskDef ©_task_def = tasks[i]; + const TaskDef ©_task_def = aicpu_tasks_[1]; GE_CHK_STATUS_RET_NOLOG(aicpu_task->SetMemCopyTask(copy_task_def.kernel_ex())); } aicpu_task->SetModelArgs(model_name_, model_id_); single_op.op_task_.reset(aicpu_task); + } + } + return SUCCESS; +} + +Status SingleOpModel::NeedHybridModel(GeModelPtr &ge_model, bool &need_hybrid_model) { + bool is_infer_depend = false; + bool is_host_mem = false; + GE_CHK_STATUS_RET(CheckInferDepend(ge_model, is_infer_depend, is_host_mem), "[Check][InferDepend] failed."); + bool need_d2h_cpy = is_infer_depend && !is_host_mem; + bool aicpu_multi_task = tbe_tasks_.size() >= 1 && aicpu_tasks_.size() >= 1; + bool aicore_multi_task = tbe_tasks_.size() > 1; + need_hybrid_model = need_d2h_cpy || aicore_multi_task || aicpu_multi_task; + return SUCCESS; +} + +Status SingleOpModel::ParseTasks() { + auto ge_model = model_helper_.GetGeModel(); + GE_CHECK_NOTNULL(ge_model); + + auto tasks = ge_model->GetModelTaskDefPtr()->task(); + for (int i = 0; i < tasks.size(); ++i) { + TaskDef &task_def = tasks[i]; + GELOGI("[%s] Task[%d], type = [%u], DebugString = [%s]", model_name_.c_str(), i, task_def.type(), + task_def.DebugString().c_str()); + auto task_type = static_cast(task_def.type()); + if (task_type == RT_MODEL_TASK_KERNEL) { + const auto &kernel_def = task_def.kernel(); + const auto &context = kernel_def.context(); + auto kernel_type = static_cast(context.kernel_type()); + if (kernel_type == ccKernelType::TE) { + tbe_tasks_.emplace_back(task_def); + } else if (kernel_type == ccKernelType::AI_CPU || kernel_type == ccKernelType::CUST_AI_CPU) { + aicpu_tasks_.emplace_back(task_def); + } else { + GELOGE(ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID, + "[Check][Param:TaskDef]Only TBE, AI_CPU, CUST_AI_CPU kernel are supported, but got %u", + context.kernel_type()); + REPORT_INNER_ERROR("E19999", + "BuildModelTaskKernel fail for got:%u not supported, Only TBE, AI_CPU, CUST_AI_CPU kernel are supported.", + context.kernel_type()); + return ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID; + } + } else if (task_type == RT_MODEL_TASK_ALL_KERNEL) { + tbe_tasks_.emplace_back(task_def); + } else if (task_type == RT_MODEL_TASK_KERNEL_EX) { + aicpu_tasks_.emplace_back(task_def); } else { // skip GELOGD("Skip task type: %d", static_cast(task_type)); @@ -654,6 +667,7 @@ Status SingleOpModel::BuildDynamicOp(StreamResource &resource, DynamicSingleOp & GE_CHK_STATUS_RET_NOLOG(InitModelMem(resource)); model_params_.memory_size = UINT64_MAX; model_params_.graph_is_dynamic = true; + GE_CHK_STATUS_RET(ParseTasks(), "[Parse][Tasks] failed."); auto ge_model = model_helper_.GetGeModel(); GE_CHECK_NOTNULL(ge_model); diff --git a/ge/single_op/single_op_model.h b/ge/single_op/single_op_model.h index b7f6b42a..98aed0f0 100755 --- a/ge/single_op/single_op_model.h +++ b/ge/single_op/single_op_model.h @@ -78,6 +78,12 @@ class SingleOpModel { void ParseArgTable(OpTask *task, SingleOp &op); Status InitHybridModelExecutor(const StreamResource &resource, const GeModelPtr &ge_model, SingleOp &single_op); Status SetHostMemTensor(DynamicSingleOp &single_op); + Status NeedHybridModel(GeModelPtr &ge_model, bool &flag); + Status ParseTasks(); + + std::vector tbe_tasks_; + std::vector atomic_tasks_; + std::vector aicpu_tasks_; std::string model_name_; uint32_t model_id_ = 0; From 98f34a58bbf8cad99b7e127942c83389a6305574 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=99=88=E5=8D=8E?= Date: Wed, 23 Jun 2021 19:36:26 +0800 Subject: [PATCH 107/226] opt info --- CMakeLists.txt | 1 + ge/CMakeLists.txt | 8 ++ ge/ge_opt_info/ge_opt_info.cc | 58 +++++++++ ge/ge_opt_info/ge_opt_info.h | 31 +++++ ge/graph/manager/graph_manager.cc | 7 + tests/CMakeLists.txt | 1 + tests/depends/opt_info/CMakeLists.txt | 37 ++++++ tests/depends/opt_info/src/opt_info_stub.cc | 46 +++++++ tests/framework/cmake/graphengine.cmake | 2 + tests/st/testcase/test_ge_opt_info.cc | 123 ++++++++++++++++++ tests/ut/ge/CMakeLists.txt | 14 ++ .../ut/ge/ge_opt_info/ge_opt_info_unittest.cc | 82 ++++++++++++ third_party/fwkacllib/inc/opt_info/opt_info.h | 32 +++++ 13 files changed, 442 insertions(+) create mode 100644 ge/ge_opt_info/ge_opt_info.cc create mode 100644 ge/ge_opt_info/ge_opt_info.h create mode 100644 tests/depends/opt_info/CMakeLists.txt create mode 100644 tests/depends/opt_info/src/opt_info_stub.cc create mode 100644 tests/st/testcase/test_ge_opt_info.cc create mode 100644 tests/ut/ge/ge_opt_info/ge_opt_info_unittest.cc create mode 100644 third_party/fwkacllib/inc/opt_info/opt_info.h diff --git a/CMakeLists.txt b/CMakeLists.txt index e3cc1e32..41520b14 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -95,6 +95,7 @@ else () #find_module(ascendcl_static libascendcl.a ${GE_LIB_PATH}) else() find_module(slog libalog.so ${ASCEND_ATC_DIR}) + find_module(opt_feature libopt_feature.so ${ASCEND_ATC_DIR}) find_module(static_mmpa libmmpa.a ${ASCEND_ATC_DIR}) if(PLATFORM STREQUAL "train") find_module(adump_server libadump_server.a ${ASCEND_RUNTIME_DIR}) diff --git a/ge/CMakeLists.txt b/ge/CMakeLists.txt index 2b9122da..5db2e7a9 100755 --- a/ge/CMakeLists.txt +++ b/ge/CMakeLists.txt @@ -434,6 +434,7 @@ set(TRAIN_SRC_LIST "graph/build/memory/max_block_mem_assigner.cc" "graph/build/memory/var_mem_assign_util.cc" "graph/build/memory/buffer_pool_mem_assigner.cc" + "ge_opt_info/ge_opt_info.cc" ) set(INFER_SRC_LIST @@ -711,6 +712,7 @@ set(INFER_SRC_LIST "graph/build/memory/max_block_mem_assigner.cc" "graph/build/memory/var_mem_assign_util.cc" "graph/build/memory/buffer_pool_mem_assigner.cc" + "ge_opt_info/ge_opt_info.cc" ) if (NOT ENABLE_D AND NOT ENABLE_ACL AND NOT ENABLE_MS_TESTCASES) @@ -765,11 +767,13 @@ target_include_directories(ge_runner SYSTEM PRIVATE ${GE_CODE_DIR}/../inc ${GE_CODE_DIR}/../toolchain/ide/ide-daemon/external ${GE_CODE_DIR}/../abl/adump/external + ${GE_CODE_DIR}/../abl/licctrl #### blue zone ${ASCEND_DIR}/driver/include ${ASCEND_DIR}/fwkacllib/include ${GE_CODE_DIR}/third_party/fwkacllib/inc ${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain + ${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info ) target_link_options(ge_runner PRIVATE @@ -792,6 +796,7 @@ target_link_libraries(ge_runner PRIVATE runtime error_manager ascend_hal_stub + opt_feature -Wl,--as-needed json -lrt @@ -839,11 +844,13 @@ target_include_directories(ge_compiler SYSTEM PRIVATE ${GE_CODE_DIR}/../inc ${GE_CODE_DIR}/../toolchain/ide/ide-daemon/external ${GE_CODE_DIR}/../abl/adump/external + ${GE_CODE_DIR}/../abl/licctrl #### blue zone #### ${ASCEND_DIR}/driver/include ${ASCEND_DIR}/fwkacllib/include ${GE_CODE_DIR}/third_party/fwkacllib/inc ${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain + ${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info ) target_link_options(ge_compiler PRIVATE @@ -863,6 +870,7 @@ target_link_libraries(ge_compiler PRIVATE error_manager slog runtime_compile + opt_feature -Wl,--as-needed json -lrt diff --git a/ge/ge_opt_info/ge_opt_info.cc b/ge/ge_opt_info/ge_opt_info.cc new file mode 100644 index 00000000..8c1b84ab --- /dev/null +++ b/ge/ge_opt_info/ge_opt_info.cc @@ -0,0 +1,58 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ge_opt_info/ge_opt_info.h" + +#include +#include +#include "graph/ge_local_context.h" +#include "ge/ge_api_types.h" +#include "common/debug/ge_log.h" +#include "opt_info.h" + +namespace ge { +Status GeOptInfo::SetOptInfo() { + std::string soc_ver; + graphStatus ret = GetThreadLocalContext().GetOption(SOC_VERSION, soc_ver); + if (ret != GRAPH_SUCCESS) { + REPORT_CALL_ERROR("E19999", "Get soc version failed."); + GELOGE(FAILED, "[Get][SocVersion]Get soc version failed."); + return FAILED; + } + GELOGD("Soc version:%s.", soc_ver.c_str()); + std::map opt_info; + // the first arg does not work at present. + if (gelc::GetOptInfo(gelc::kOffline, soc_ver, opt_info) != gelc::SUCCESS) { + REPORT_CALL_ERROR("E19999", "Get optional information failed, is_offline:%d, soc version:%s", + gelc::kOffline, soc_ver.c_str()); + GELOGE(FAILED, "[Get][OptInfo]Get optional information failed, is_offline:%d, soc version:%s", + gelc::kOffline, soc_ver.c_str()); + return FAILED; + } + // do nothing if get empty information + if (opt_info.empty()) { + GELOGI("Optional information is empty."); + return SUCCESS; + } + std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); + for (const auto &itr : opt_info) { + graph_options.emplace(itr.first, itr.second); + GELOGI("Get optional information success, key:%s, value:%s.", itr.first.c_str(), itr.second.c_str()); + } + GetThreadLocalContext().SetGraphOption(graph_options); + return SUCCESS; +} +} // namespace ge diff --git a/ge/ge_opt_info/ge_opt_info.h b/ge/ge_opt_info/ge_opt_info.h new file mode 100644 index 00000000..935dff25 --- /dev/null +++ b/ge/ge_opt_info/ge_opt_info.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef GE_OPT_INFO_GE_OPT_INFO_H_ +#define GE_OPT_INFO_GE_OPT_INFO_H_ + +#include "ge/ge_api_error_codes.h" +#include "register/register_types.h" + +namespace ge { +class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY GeOptInfo { + public: + GeOptInfo() = default; + static Status SetOptInfo(); +}; +} // namespace ge + +#endif // GE_OPT_INFO_GE_OPT_INFO_H_ diff --git a/ge/graph/manager/graph_manager.cc b/ge/graph/manager/graph_manager.cc index b862a7d6..0a4633ad 100755 --- a/ge/graph/manager/graph_manager.cc +++ b/ge/graph/manager/graph_manager.cc @@ -27,6 +27,7 @@ #include "common/math/math_util.h" #include "common/thread_pool.h" #include "common/dump/dump_manager.h" +#include "ge_opt_info/ge_opt_info.h" #include "analyzer/analyzer.h" #include "graph/common/ge_call_wrapper.h" #include "graph/common/local_context.h" @@ -1001,6 +1002,12 @@ Status GraphManager::PreRun(const GraphNodePtr &graph_node, const std::vector + c_sec +) + +target_include_directories(opt_feature_stub INTERFACE ${CMAKE_CURRENT_LIST_DIR}/src) diff --git a/tests/depends/opt_info/src/opt_info_stub.cc b/tests/depends/opt_info/src/opt_info_stub.cc new file mode 100644 index 00000000..df518c4b --- /dev/null +++ b/tests/depends/opt_info/src/opt_info_stub.cc @@ -0,0 +1,46 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "opt_info.h" +#include +#include +#include +#include + +namespace gelc { +namespace { +const std::vector kSocVersions = {"Ascend910"}; +} + +void SetAllOptInfo(std::map &opt_infos) { + opt_infos.emplace("opt_module.fe", "all"); + opt_infos.emplace("opt_module.pass", "all"); + opt_infos.emplace("opt_module.op_tune", "all"); + opt_infos.emplace("opt_module.rl_tune", "all"); + opt_infos.emplace("opt_module.aoe", "all"); +} + +Status GetOptInfo(WorkMode mode, const std::string &soc_ver, + std::map &opt_infos) { + if (std::find(kSocVersions.begin(), kSocVersions.end(), soc_ver)== kSocVersions.end()) { + SetAllOptInfo(opt_infos); + return SUCCESS; + } + opt_infos.emplace("opt_module.fe", "all"); + opt_infos.emplace("opt_module.pass", "all"); + opt_infos.emplace("opt_module.op_tune", "all"); + return SUCCESS; +} +} // namespace gelc diff --git a/tests/framework/cmake/graphengine.cmake b/tests/framework/cmake/graphengine.cmake index 81aa00cc..c4380016 100644 --- a/tests/framework/cmake/graphengine.cmake +++ b/tests/framework/cmake/graphengine.cmake @@ -103,6 +103,7 @@ list(APPEND INCLUDE_DIRECTORIES "${GE_CODE_DIR}/third_party/fwkacllib/inc/cce" "${GE_CODE_DIR}/third_party/fwkacllib/inc/ops" "${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain" + "${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info" "${GE_CODE_DIR}/tests/ut/ge" "${GE_CODE_DIR}/tests/ut/common" "${CMAKE_BINARY_DIR}" @@ -117,6 +118,7 @@ list(APPEND STUB_LIBS runtime_stub profiler_stub hccl_stub + opt_feature_stub error_manager_stub ascend_protobuf json diff --git a/tests/st/testcase/test_ge_opt_info.cc b/tests/st/testcase/test_ge_opt_info.cc new file mode 100644 index 00000000..457473b1 --- /dev/null +++ b/tests/st/testcase/test_ge_opt_info.cc @@ -0,0 +1,123 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "easy_graph/graph/box.h" +#include "easy_graph/graph/node.h" +#include "easy_graph/builder/graph_dsl.h" +#include "easy_graph/builder/box_builder.h" +#include "easy_graph/layout/graph_layout.h" +#include "easy_graph/layout/engines/graph_easy/graph_easy_option.h" +#include "easy_graph/layout/engines/graph_easy/graph_easy_executor.h" +#include "graph/graph.h" +#include "graph/compute_graph.h" +#include "framework/common/types.h" +#include "graph/debug/ge_attr_define.h" +#include "ge_graph_dsl/graph_dsl.h" +#include "ge_graph_dsl/op_desc/op_desc_cfg_box.h" +#define protected public +#define private public +#include "ge_opt_info/ge_opt_info.h" +#undef private +#undef protected + +namespace ge { +class STEST_opt_info : public testing::Test { + protected: + void SetUp() {} + void TearDown() {} +}; + +TEST_F(STEST_opt_info, get_opt_info_all) { + std::map options = {{ge::SOC_VERSION, "Ascend310"}}; + GetThreadLocalContext().SetGlobalOption(options); + + /// data1 data2 + /// \ / + /// add + // build graph + DEF_GRAPH(g1) { + CHAIN(NODE("data1", DATA)->NODE("add", ADD)); + CHAIN(NODE("data2", DATA)->NODE("add")); + }); + + auto graph = ToGeGraph(g1); + + // new session & add graph + Session session(options); + auto ret = session.AddGraph(1, graph, options); + EXPECT_EQ(ret, SUCCESS); + // build input tensor + std::vector inputs; + // build_graph through session + ret = session.BuildGraph(1, inputs); + EXPECT_EQ(ret, SUCCESS); + + std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); + auto itr = graph_options.find("opt_module.fe"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.pass"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.op_tune"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.rl_tune"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.aoe"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); +} + +TEST_F(STEST_opt_info, get_opt_info_success) { + std::map options = {{ge::SOC_VERSION, "Ascend910"}}; + GetThreadLocalContext().SetGlobalOption(options); + + /// data1 data2 + /// \ / + /// add + // build graph + DEF_GRAPH(g1) { + CHAIN(NODE("data1", DATA)->NODE("add", ADD)); + CHAIN(NODE("data2", DATA)->NODE("add")); + }); + + auto graph = ToGeGraph(g1); + + // new session & add graph + Session session(options); + auto ret = session.AddGraph(1, graph, options); + EXPECT_EQ(ret, SUCCESS); + // build input tensor + std::vector inputs; + // build_graph through session + ret = session.BuildGraph(1, inputs); + EXPECT_EQ(ret, SUCCESS); + + std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); + auto itr = graph_options.find("opt_module.fe"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.pass"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.op_tune"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); +} +} // namespace ge diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index 06b3e0f2..cf573343 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -62,6 +62,7 @@ include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc) include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc/cce) include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc/ops) include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain) +include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info) include_directories(${GE_CODE_DIR}/tests/ut/ge) include_directories(${GE_CODE_DIR}/tests/ut/common) include_directories(${CMAKE_BINARY_DIR}) @@ -346,6 +347,7 @@ set(COMMON_SRC_FILES "${GE_CODE_DIR}/ge/common/ge/datatype_util.cc" "${GE_CODE_DIR}/ge/ge_local_engine/engine/host_cpu_engine.cc" "${GE_CODE_DIR}/ge/session/omg.cc" + "${GE_CODE_DIR}/ge/ge_opt_info/ge_opt_info.cc" ) set(COMMON_FORMAT_SRC_FILES @@ -453,6 +455,7 @@ set(GRAPH_EXECUTE_COMMON_SRC_FILES "${GE_CODE_DIR}/ge/graph/manager/graph_manager.cc" "${GE_CODE_DIR}/ge/graph/manager/graph_context.cc" "${GE_CODE_DIR}/ge/graph/manager/util/rt_context_util.cc" + "${GE_CODE_DIR}/ge/ge_opt_info/ge_opt_info.cc" "${GE_CODE_DIR}/ge/graph/manager/graph_context.h" ) @@ -628,6 +631,10 @@ set(SINGLE_OP_SRC_FILES "${GE_CODE_DIR}/ge/hybrid/hybrid_davinci_model.cc" ) +set(GE_OPT_INFO_SRC_FILES + "${GE_CODE_DIR}/ge/ge_opt_info/ge_opt_info.cc" +) + # test files set(COMMON_TEST_FILES "graph/passes/graph_builder_utils.cc" @@ -813,6 +820,10 @@ set(MULTI_PARTS_TEST_FILES "common/host_cpu_engine_unittest.cc" ) +set(GE_OPT_INFO_TEST_FILES + "ge_opt_info/ge_opt_info_unittest.cc" +) + set(GENERATOR_TEST_FILES "generator/ge_generator_unittest.cc" ) @@ -864,6 +875,7 @@ list(APPEND COMMON_SHARED_LIBRARIES mmpa_stub hccl_stub error_manager_stub + opt_feature_stub ascend_protobuf json ) @@ -1109,10 +1121,12 @@ target_link_libraries(ut_libge_multiparts_utest # libge_others_utest add_executable(ut_libge_others_utest + ${GE_OPT_INFO_SRC_FILES} ${COMMON_TEST_FILES} ${PASS_TEST_FILES} ${EXECUTE_TEST_FILES} ${OTHERS_TEST_FILES} + ${GE_OPT_INFO_TEST_FILES} ) target_compile_options(ut_libge_others_utest PRIVATE diff --git a/tests/ut/ge/ge_opt_info/ge_opt_info_unittest.cc b/tests/ut/ge/ge_opt_info/ge_opt_info_unittest.cc new file mode 100644 index 00000000..20c123e9 --- /dev/null +++ b/tests/ut/ge/ge_opt_info/ge_opt_info_unittest.cc @@ -0,0 +1,82 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#define protected public +#define private public +#include "ge_opt_info/ge_opt_info.h" +#include "graph/ge_local_context.h" +#include "external/ge/ge_api_types.h" +#undef private +#undef protected + +namespace ge { +class UTEST_opt_info : public testing::Test { + protected: + void SetUp() {} + void TearDown() {} +}; + +TEST_F(UTEST_opt_info, get_opt_info_success) { + std::map options = {{ge::SOC_VERSION, "Ascend910"}}; + GetThreadLocalContext().SetGlobalOption(options); + auto ret = GeOptInfo::SetOptInfo(); + EXPECT_EQ(ret, ge::SUCCESS); + std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); + auto itr = graph_options.find("opt_module.fe"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.pass"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.op_tune"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); +} + +TEST_F(UTEST_opt_info, get_opt_info_all) { + std::map global_options = {{ge::SOC_VERSION, "Ascend310"}}; + GetThreadLocalContext().SetGlobalOption(global_options); + auto ret = GeOptInfo::SetOptInfo(); + EXPECT_EQ(ret, ge::SUCCESS); + std::map graph_options = GetThreadLocalContext().GetAllGraphOptions(); + auto itr = graph_options.find("opt_module.fe"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.pass"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.op_tune"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.rl_tune"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); + itr = graph_options.find("opt_module.aoe"); + EXPECT_NE(itr, graph_options.end()); + EXPECT_EQ(itr->second, "all"); +} + +TEST_F(UTEST_opt_info, get_opt_info_failed) { + std::map options; + GetThreadLocalContext().SetGlobalOption(options); + auto ret = GeOptInfo::SetOptInfo(); + EXPECT_EQ(ret, ge::FAILED); +} + +} // namespace ge diff --git a/third_party/fwkacllib/inc/opt_info/opt_info.h b/third_party/fwkacllib/inc/opt_info/opt_info.h new file mode 100644 index 00000000..4dff695b --- /dev/null +++ b/third_party/fwkacllib/inc/opt_info/opt_info.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +namespace gelc { +using Status = uint32_t; +using WorkMode = uint32_t; +const Status SUCCESS = 0x0; +const Status FAILED = 0xFFFFFFFF; +const WorkMode kOffline = 0x0; +const WorkMode kInline = 0x01; + +__attribute__((visibility ("default"))) +Status GetOptInfo(WorkMode mode, const std::string &soc_ver, + std::map &opt_info_map); +} // namespace gelc + From ae348cf55a6cb7c5729dd6b4b5fb700723e8362b Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Tue, 29 Jun 2021 15:42:36 +0800 Subject: [PATCH 108/226] Fix ut. --- ge/single_op/task/op_task.cc | 120 +++++++++--------- ge/single_op/task/op_task.h | 6 +- ge/single_op/task/tbe_task_builder.cc | 1 + .../ge/single_op/single_op_task_unittest.cc | 23 ++-- tests/ut/ge/single_op/single_op_unittest.cc | 2 +- 5 files changed, 84 insertions(+), 68 deletions(-) diff --git a/ge/single_op/task/op_task.cc b/ge/single_op/task/op_task.cc index 92d1e325..632cd4d8 100755 --- a/ge/single_op/task/op_task.cc +++ b/ge/single_op/task/op_task.cc @@ -345,14 +345,46 @@ Status TbeOpTask::AllocateWorkspaces(const vector &workspace_sizes) { return SUCCESS; } -Status TbeOpTask::UpdateIoAddr(std::vector &args, const std::vector &inputs, - const std::vector &outputs) { - uintptr_t *arg_base = nullptr; - size_t arg_num = 0; - GetIoAddr(arg_base, arg_num); +Status TbeOpTask::UpdateTilingArgs(rtStream_t stream) { + size_t args_size = op_desc_->GetInputsSize() + op_desc_->GetOutputsSize() + workspaces_.size(); + if (tiling_buffer_ != nullptr) { + args_size++; + } + size_t temp_size = args_size * sizeof(void *); + if (arg_size_ < temp_size) { + GELOGD("Need to reset size of args_ from %zu to %zu.", arg_size_, temp_size); + std::unique_ptr args(new (std::nothrow) uint8_t[temp_size]()); + GE_CHECK_NOTNULL(args); + if (memcpy_s(args.get(), temp_size, args_.get(), arg_size_) != EOK) { + GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, "[Update][KernelArgs] failed for [%s].", node_->GetName().c_str()); + REPORT_INNER_ERROR("E19999", "update kernel args failed for %s.", node_->GetName().c_str()); + return ACL_ERROR_GE_MEMORY_OPERATE_FAILED; + } + args_.reset(args.release()); + arg_size_ = temp_size; + } + + uintptr_t *arg_base = reinterpret_cast(args_.get()); + size_t arg_index = op_desc_->GetInputsSize() + op_desc_->GetOutputsSize(); + for (size_t i = 0; i < workspaces_.size(); ++i) { + arg_base[arg_index++] = reinterpret_cast(workspaces_[i]); + } + + if (tiling_buffer_ != nullptr) { + GELOGD("[%s] Start to copy tiling info. size = %zu", node_->GetName().c_str(), tiling_data_.size()); + GE_CHK_RT_RET(rtMemcpyAsync(tiling_buffer_, max_tiling_size_, tiling_data_.data(), tiling_data_.size(), + RT_MEMCPY_HOST_TO_DEVICE_EX, stream)); + arg_base[arg_index] = reinterpret_cast(tiling_buffer_); + } + + return SUCCESS; +} + +Status TbeOpTask::SetArgIndex() { + arg_index_.clear(); const vector v_is_input_const = op_desc_->GetIsInputConst(); - size_t non_const_index = 0; + size_t input_index = 0; for (size_t i = 0; i < op_desc_->GetAllInputsSize(); ++i) { const GeTensorDescPtr tensor_desc = op_desc_->MutableInputDesc(static_cast(i)); if (tensor_desc == nullptr) { @@ -360,33 +392,33 @@ Status TbeOpTask::UpdateIoAddr(std::vector &args, const std::vector= arg_num) { - GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Check][Size] Args size is %zu, but get index is %zu.", arg_num, i); - REPORT_INNER_ERROR("E19999", "[Check][Size] Args size is %zu, but get index is %zu.", arg_num, i); - return ACL_ERROR_GE_PARAM_INVALID; - } - auto addr = reinterpret_cast(arg_base[i]); - GELOGD("SingleOp: %s, Index: %zu, input is const, addr = %p", op_desc_->GetName().c_str(), i, addr); - args.emplace_back(addr); + GELOGD("SingleOp: %s, Index: %zu, input is const", op_desc_->GetName().c_str(), i); + input_index++; continue; } - if (non_const_index >= inputs.size()) { - GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Check][Size] Input size is %zu, but get non_const_index is %zu", - inputs.size(), non_const_index); - REPORT_INNER_ERROR("E19999", "[Check][Size] Input size is %zu, but get non_const_index is %zu", - inputs.size(), non_const_index); - return ACL_ERROR_GE_PARAM_INVALID; - } - auto addr = inputs[non_const_index].data; - GELOGD("SingleOp: %s, input[%zu], addr = %p", op_desc_->GetName().c_str(), i, addr); - args.emplace_back(addr); - non_const_index++; + arg_index_.emplace_back(input_index); + input_index++; } + return SUCCESS; +} - for (size_t i = 0; i < outputs.size(); ++i) { - auto addr = outputs[i].data; - GELOGD("SingleOp: %s, output[%zu] addr = %p", op_desc_->GetName().c_str(), i, addr); - args.emplace_back(addr); +Status TbeOpTask::UpdateIoAddr(const vector &inputs, const vector &outputs) { + if (arg_index_.size() != inputs.size()) { + GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Check][Size] Args size is %zu, but get input size is %zu.", + arg_index_.size(), inputs.size()); + REPORT_INNER_ERROR("E19999", "[Check][Size] Args size is %zu, but get input size is %zu.", + arg_index_.size(), inputs.size()); + return ACL_ERROR_GE_PARAM_INVALID; + } + + uintptr_t *arg_base = reinterpret_cast(args_.get()); + for (size_t i = 0; i < arg_index_.size(); ++i) { + arg_base[arg_index_[i]] = reinterpret_cast(inputs[i].data); + } + + size_t input_size = op_desc_->GetInputsSize(); + for (size_t i = 0; i < op_desc_->GetOutputsSize(); ++i) { + arg_base[input_size + i] = reinterpret_cast(outputs[i].data); } return SUCCESS; @@ -398,37 +430,11 @@ Status TbeOpTask::LaunchKernel(const vector &input_desc, vector &output_buffers, rtStream_t stream) { GELOGD("[%s] Start to launch kernel", node_->GetName().c_str()); + GE_CHK_STATUS_RET(UpdateIoAddr(input_buffers, output_buffers), "[Update][IoAddr] failed."); GE_CHK_STATUS_RET_NOLOG(UpdateNodeByShape(input_desc, output_desc)); GE_CHK_STATUS_RET_NOLOG(UpdateRunInfo()); GE_CHK_STATUS_RET(AllocateWorkspaces(run_info_workspaces_), "[Allocate][Workspaces] failed."); - std::vector args; - GE_CHK_STATUS_RET(UpdateIoAddr(args, input_buffers, output_buffers), "[Update][IoAddr] failed."); - for (auto &buffer : workspaces_) { - args.emplace_back(buffer); - } - - if (tiling_buffer_ != nullptr) { - GELOGD("[%s] Start to copy tiling info. size = %zu", node_->GetName().c_str(), tiling_data_.size()); - GE_CHK_RT_RET(rtMemcpyAsync(tiling_buffer_, max_tiling_size_, tiling_data_.data(), tiling_data_.size(), - RT_MEMCPY_HOST_TO_DEVICE_EX, stream)); - - args.emplace_back(tiling_buffer_); - } - - GELOGD("Dst size is %zu, src size is %zu.", arg_size_, args.size() * sizeof(void *)); - // node with workspace: build can not get size of workspace, need to update arg_size_ when execute - if (arg_size_ < (args.size() * sizeof(void *))) { - size_t temp_size = args.size() * sizeof(void *); - GELOGD("Need to reset size of args_ from %zu to %zu.", arg_size_, temp_size); - args_.reset(new(std::nothrow) uint8_t[temp_size]()); - GE_CHECK_NOTNULL(args_); - arg_size_ = temp_size; - } - if (memcpy_s(args_.get(), arg_size_, args.data(), args.size() * sizeof(void *)) != EOK) { - GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, "[Update][KernelArgs] failed for [%s].", node_->GetName().c_str()); - REPORT_INNER_ERROR("E19999", "update kernel args failed for %s.", node_->GetName().c_str()); - return ACL_ERROR_GE_MEMORY_OPERATE_FAILED; - } + GE_CHK_STATUS_RET(UpdateTilingArgs(stream), "[Update][TilingArgs] failed."); GELOGD("[%s] Start to invoke rtKernelLaunch", node_->GetName().c_str()); GE_CHK_STATUS_RET(DoLaunchKernel(stream), "Failed to do launch kernel."); diff --git a/ge/single_op/task/op_task.h b/ge/single_op/task/op_task.h index 0cbc1a29..d3e8383d 100644 --- a/ge/single_op/task/op_task.h +++ b/ge/single_op/task/op_task.h @@ -85,6 +85,7 @@ class TbeOpTask : public OpTask { const OpDescPtr &op_desc, const domi::KernelDefWithHandle& kernel_def_with_handle); Status UpdateRunInfo() override; + Status SetArgIndex(); const void *GetArgs() const; size_t GetArgSize() const; @@ -100,9 +101,9 @@ class TbeOpTask : public OpTask { Status UpdateNodeByShape(const vector &input_desc, const vector &output_desc); Status AllocateWorkspaces(const std::vector &workspace_sizes); + Status UpdateTilingArgs(rtStream_t stream); Status DoLaunchKernel(rtStream_t stream); - Status UpdateIoAddr(std::vector &args, const std::vector &inputs, - const std::vector &outputs); + Status UpdateIoAddr(const vector &inputs, const vector &outputs); const void *stub_func_ = nullptr; std::unique_ptr args_; @@ -122,6 +123,7 @@ class TbeOpTask : public OpTask { void* handle_ = nullptr; std::string original_kernel_key_; std::string node_info_; + std::vector arg_index_; }; class AiCpuBaseTask : public OpTask { diff --git a/ge/single_op/task/tbe_task_builder.cc b/ge/single_op/task/tbe_task_builder.cc index c7ff13d1..e5206ea6 100644 --- a/ge/single_op/task/tbe_task_builder.cc +++ b/ge/single_op/task/tbe_task_builder.cc @@ -387,6 +387,7 @@ Status TbeTaskBuilder::BuildTask(TbeOpTask &task, const SingleOpModelParam ¶ } task.SetStubFunc(stub_name_, stub_func); } + GE_CHK_STATUS_RET(task.SetArgIndex(), "[Set][ArgTable] failed."); return SUCCESS; } diff --git a/tests/ut/ge/single_op/single_op_task_unittest.cc b/tests/ut/ge/single_op/single_op_task_unittest.cc index 472a88c3..020efc23 100644 --- a/tests/ut/ge/single_op/single_op_task_unittest.cc +++ b/tests/ut/ge/single_op/single_op_task_unittest.cc @@ -95,7 +95,7 @@ TEST_F(UtestSingleOpTask, test_build_kernel_task) { vector input_desc; vector input_buffers = { data_buffer }; vector output_desc; - vector output_buffers; + vector output_buffers = { data_buffer }; task->node_ = node; OpTilingFunc op_tiling_func = [](const TeOpParas &, const OpCompileInfo &, OpRunInfo &) -> bool {return true;}; OpTilingRegistryInterf("Add", op_tiling_func); @@ -107,8 +107,7 @@ TEST_F(UtestSingleOpTask, test_build_kernel_task) { task->max_tiling_size_ = 64; task->tiling_data_ = "tiling_data"; task->arg_size_ = 64; - uint8_t task_args{0}; - task->args_.reset(&task_args); + task->args_.reset(new (std::nothrow) uint8_t[sizeof(void *) * 3]); ASSERT_EQ(task->LaunchKernel(input_desc, input_buffers, output_desc, output_buffers, stream_), SUCCESS); char *handle = "00"; @@ -130,17 +129,25 @@ TEST_F(UtestSingleOpTask, test_update_ioaddr) { TbeOpTask task; task.op_desc_ = op_desc; - task.args_.reset(new (std::nothrow) uint8_t[sizeof(void *) * 3]); + task.node_ = node; + ASSERT_EQ(task.SetArgIndex(), SUCCESS); + task.arg_size_ = sizeof(void *) * 4; + task.args_.reset(new (std::nothrow) uint8_t[task.arg_size_]); + task.arg_index_ = {0}; vector args; vector inputs; vector outputs; - ASSERT_EQ(task.UpdateIoAddr(args, inputs, outputs), ACL_ERROR_GE_PARAM_INVALID); - task.arg_size_ = sizeof(void *) * 3; - ASSERT_EQ(task.UpdateIoAddr(args, inputs, outputs), ACL_ERROR_GE_PARAM_INVALID); + ASSERT_EQ(task.UpdateIoAddr(inputs, outputs), ACL_ERROR_GE_PARAM_INVALID); ge::DataBuffer data_buffer; inputs = { data_buffer }; - ASSERT_EQ(task.UpdateIoAddr(args, inputs, outputs), SUCCESS); + outputs = { data_buffer }; + ASSERT_EQ(task.UpdateIoAddr(inputs, outputs), SUCCESS); + + task.tiling_buffer_ = (void *)0x0001; + task.workspaces_ = { (void *)0x0002 }; + ASSERT_EQ(task.UpdateTilingArgs(nullptr), SUCCESS); + task.tiling_buffer_ = nullptr; } diff --git a/tests/ut/ge/single_op/single_op_unittest.cc b/tests/ut/ge/single_op/single_op_unittest.cc index db3de7ec..09aac153 100644 --- a/tests/ut/ge/single_op/single_op_unittest.cc +++ b/tests/ut/ge/single_op/single_op_unittest.cc @@ -103,7 +103,7 @@ TEST_F(UtestSingleOp, test_dynamic_singleop_execute_async1) { EXPECT_EQ(desc_ptr->AddInputDesc("x", GeTensorDesc(GeShape({2}), FORMAT_NCHW)), GRAPH_SUCCESS); dynamic_single_op.op_task_->op_desc_ = desc_ptr; // UpdateRunInfo failed - EXPECT_EQ(dynamic_single_op.ExecuteAsync(input_desc, input_buffers, output_desc, output_buffers), PARAM_INVALID); + EXPECT_EQ(dynamic_single_op.ExecuteAsync(input_desc, input_buffers, output_desc, output_buffers), ACL_ERROR_GE_PARAM_INVALID); } From a781b6c3548ec748d869cbb8d51e11d515c322a1 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Tue, 29 Jun 2021 16:56:06 +0800 Subject: [PATCH 109/226] Fix bug of atomic profiling. --- .../node_executor/aicore/aicore_node_executor.cc | 2 +- ge/hybrid/node_executor/aicore/aicore_op_task.cc | 10 ++++++++++ ge/hybrid/node_executor/aicore/aicore_op_task.h | 4 ++++ ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc | 2 +- ge/hybrid/node_executor/task_context.cc | 6 +++--- ge/hybrid/node_executor/task_context.h | 4 ++-- .../executor/worker/execution_engine_unittest.cc | 2 +- tests/ut/ge/hybrid/ge_hybrid_unittest.cc | 11 ++++++++++- 8 files changed, 32 insertions(+), 9 deletions(-) diff --git a/ge/hybrid/node_executor/aicore/aicore_node_executor.cc b/ge/hybrid/node_executor/aicore/aicore_node_executor.cc index c2ce24a4..7a22a062 100755 --- a/ge/hybrid/node_executor/aicore/aicore_node_executor.cc +++ b/ge/hybrid/node_executor/aicore/aicore_node_executor.cc @@ -208,7 +208,7 @@ Status AiCoreNodeTask::ExecuteAsync(TaskContext &context, std::function context.SetTaskId(task_id); context.SetStreamId(stream_id); GELOGD("Aicore node[%s] task_id: %u, stream_id: %u.", context.GetNodeName(), task_id, stream_id); - (void)context.SaveProfilingTaskDescInfo(task_id, stream_id, kTaskTypeAicore, (*it)->GetBlockDim()); + (void)context.SaveProfilingTaskDescInfo(task_id, stream_id, kTaskTypeAicore, (*it)->GetBlockDim(), (*it)->GetOpType()); RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[AiCoreNodeLaunchKernel] End"); RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[AiCoreNodeLaunchKernel] End"); } diff --git a/ge/hybrid/node_executor/aicore/aicore_op_task.cc b/ge/hybrid/node_executor/aicore/aicore_op_task.cc index a32f2999..b34cc0c6 100644 --- a/ge/hybrid/node_executor/aicore/aicore_op_task.cc +++ b/ge/hybrid/node_executor/aicore/aicore_op_task.cc @@ -33,6 +33,7 @@ namespace { constexpr char const *kAttrSupportDynamicShape = "support_dynamicshape"; constexpr char const *kAttrOpParamSize = "op_para_size"; constexpr char const *kAttrAtomicOpParamSize = "atomic_op_para_size"; +const string kAtomicOpType = "DynamicAtomicAddrClean"; std::atomic log_id(0); } // namespace @@ -51,6 +52,7 @@ bool TbeHandleRegistry::AddHandle(std::unique_ptr &&holder) { } Status AiCoreOpTask::Init(const OpDesc &op_desc, const domi::TaskDef &task_def) { + op_type_ = op_desc.GetType(); log_name_ = op_desc.GetName() + "_tvmbin"; log_id_ = log_id++; auto op_desc_ptr = MakeShared(op_desc); @@ -538,6 +540,10 @@ const std::string &AiCoreOpTask::GetName() const { return stub_name_; } +const std::string &AiCoreOpTask::GetOpType() const { + return op_type_; +} + std::string AiCoreOpTask::GetKeyForOpParamSize() const { return kAttrOpParamSize; } @@ -631,6 +637,10 @@ std::string AtomicAddrCleanOpTask::GetKeyForKernelName(const OpDesc &op_desc) co return op_desc.GetName() + "_atomic_kernelname"; } +const std::string &AtomicAddrCleanOpTask::GetOpType() const { + return kAtomicOpType; +} + Status AtomicAddrCleanOpTask::CalcTilingInfo(const NodePtr &node, OpRunInfo &tiling_info) { GELOGD("[%s] Start to invoke OpAtomicCalculate.", node->GetName().c_str()); GE_CHK_STATUS_RET(optiling::OpAtomicCalculateV2(*node, tiling_info), diff --git a/ge/hybrid/node_executor/aicore/aicore_op_task.h b/ge/hybrid/node_executor/aicore/aicore_op_task.h index b03bd9e4..8d7be0db 100755 --- a/ge/hybrid/node_executor/aicore/aicore_op_task.h +++ b/ge/hybrid/node_executor/aicore/aicore_op_task.h @@ -78,6 +78,8 @@ class AiCoreOpTask { void SetSingleOp(bool is_single_op) {is_single_op_ = is_single_op;}; + virtual const std::string& GetOpType() const; + protected: Status UpdateTilingInfo(TaskContext &context); virtual std::string GetKeyForOpParamSize() const; @@ -117,12 +119,14 @@ class AiCoreOpTask { uint64_t log_id_ = 0; std::string log_name_; uint32_t offset_ = 0; + std::string op_type_; }; class AtomicAddrCleanOpTask : public AiCoreOpTask { public: Status Init(const OpDesc &op_desc, const domi::TaskDef &task_def) override; Status UpdateArgs(TaskContext &task_context) override; + const std::string& GetOpType() const override; protected: std::string GetKeyForOpParamSize() const override; diff --git a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc index c83a76d1..820c9b56 100755 --- a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc +++ b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc @@ -207,7 +207,7 @@ Status AicpuNodeTaskBase::ExecuteAsync(TaskContext &context, std::functionSynchronize(GetStream()); } -Status TaskContext::SaveProfilingTaskDescInfo(uint32_t task_id, uint32_t stream_id, - const std::string &task_type, uint32_t block_dim) { +Status TaskContext::SaveProfilingTaskDescInfo(uint32_t task_id, uint32_t stream_id, const std::string &task_type, + uint32_t block_dim, const std::string &op_type) { if (ProfilingManager::Instance().ProfilingModelLoadOn()) { const NodeItem &node_item = GetNodeItem(); auto op_desc = node_item.GetOpDesc(); @@ -589,7 +589,7 @@ Status TaskContext::SaveProfilingTaskDescInfo(uint32_t task_id, uint32_t stream TaskDescInfo tmp_task_desc_info; tmp_task_desc_info.model_name = dynamic_model_name; tmp_task_desc_info.op_name = op_desc->GetName(); - tmp_task_desc_info.op_type = op_desc->GetType(); + tmp_task_desc_info.op_type = op_type; tmp_task_desc_info.block_dim = block_dim; tmp_task_desc_info.task_type = task_type; tmp_task_desc_info.task_id = task_id; diff --git a/ge/hybrid/node_executor/task_context.h b/ge/hybrid/node_executor/task_context.h index c96e194e..5304606b 100644 --- a/ge/hybrid/node_executor/task_context.h +++ b/ge/hybrid/node_executor/task_context.h @@ -118,8 +118,8 @@ class TaskContext { void *handle_ = nullptr; const std::vector& GetProfilingTaskDescInfo() const { return task_desc_info; } - Status SaveProfilingTaskDescInfo(uint32_t task_id, uint32_t stream_id, - const std::string &task_type, uint32_t block_dim); + Status SaveProfilingTaskDescInfo(uint32_t task_id, uint32_t stream_id, const std::string &task_type, + uint32_t block_dim, const std::string &op_type); void ClearProfilingTaskDescInfo() { task_desc_info.clear(); } private: diff --git a/tests/ut/ge/hybrid/executor/worker/execution_engine_unittest.cc b/tests/ut/ge/hybrid/executor/worker/execution_engine_unittest.cc index cc20d614..07701f4d 100644 --- a/tests/ut/ge/hybrid/executor/worker/execution_engine_unittest.cc +++ b/tests/ut/ge/hybrid/executor/worker/execution_engine_unittest.cc @@ -119,7 +119,7 @@ TEST_F(UtestExecutionEngine, ExecuteAsync_without_callback_and_kernel_task) { uint32_t stream_id = 1; std::string task_type = "rts"; uint32_t block_dim = 0; - node_state->GetTaskContext()->SaveProfilingTaskDescInfo(task_id, stream_id, task_type, block_dim); + node_state->GetTaskContext()->SaveProfilingTaskDescInfo(task_id, stream_id, task_type, block_dim, op_desc->GetType()); ASSERT_TRUE(node_state->GetTaskContext() != nullptr); diff --git a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc index 4f14f628..688d3a34 100644 --- a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc +++ b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc @@ -100,7 +100,7 @@ TEST_F(UtestGeHybrid, aicore_op_task_init_success) { op_desc->SetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel); std::string kernel_name("kernel/Add"); AttrUtils::SetStr(op_desc, op_desc->GetName() + "_kernelname", kernel_name); - ASSERT_EQ(aicore_task->InitWithTaskDef(*op_desc.get(), task_def), SUCCESS); + ASSERT_EQ(aicore_task->Init(*op_desc.get(), task_def), SUCCESS); rtStream_t stream = nullptr; rtStreamCreate(&stream, 0); ASSERT_EQ(aicore_task->LaunchKernel(stream), SUCCESS); @@ -676,6 +676,15 @@ TEST_F(UtestGeHybrid, test_key_for_kernel_bin) { EXPECT_EQ(atomic_task->GetKeyForKernelName(op_desc), "Sum_atomic_kernelname"); } +TEST_F(UtestGeHybrid, test_op_type) { + auto aicore_task = std::unique_ptr(new(std::nothrow)hybrid::AiCoreOpTask()); + aicore_task->op_type_ = "Add"; + EXPECT_EQ(aicore_task->GetOpType(), "Add"); + + auto atomic_task = std::unique_ptr(new(std::nothrow)hybrid::AtomicAddrCleanOpTask()); + EXPECT_EQ(atomic_task->GetOpType(), "DynamicAtomicAddrClean"); +} + TEST_F(UtestGeHybrid, TestParseDependentInputNodesForHccl) { NodeExecutorManager::GetInstance().engine_mapping_.emplace("ops_kernel_info_hccl", NodeExecutorManager::ExecutorType::HCCL); From 813f2fe4a2eb234153ce0ad5a5801c98bdb9be4f Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Tue, 29 Jun 2021 19:11:56 +0800 Subject: [PATCH 110/226] Fix ut. --- ge/single_op/task/op_task.cc | 10 ++++------ ge/single_op/task/op_task.h | 4 +++- ge/single_op/task/tbe_task_builder.cc | 2 ++ 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/ge/single_op/task/op_task.cc b/ge/single_op/task/op_task.cc index 632cd4d8..28ec7f64 100755 --- a/ge/single_op/task/op_task.cc +++ b/ge/single_op/task/op_task.cc @@ -346,7 +346,7 @@ Status TbeOpTask::AllocateWorkspaces(const vector &workspace_sizes) { } Status TbeOpTask::UpdateTilingArgs(rtStream_t stream) { - size_t args_size = op_desc_->GetInputsSize() + op_desc_->GetOutputsSize() + workspaces_.size(); + size_t args_size = input_num_ + output_num_ + workspaces_.size(); if (tiling_buffer_ != nullptr) { args_size++; } @@ -361,12 +361,12 @@ Status TbeOpTask::UpdateTilingArgs(rtStream_t stream) { return ACL_ERROR_GE_MEMORY_OPERATE_FAILED; } - args_.reset(args.release()); + args_ = std::move(args); arg_size_ = temp_size; } uintptr_t *arg_base = reinterpret_cast(args_.get()); - size_t arg_index = op_desc_->GetInputsSize() + op_desc_->GetOutputsSize(); + size_t arg_index = input_num_ + output_num_; for (size_t i = 0; i < workspaces_.size(); ++i) { arg_base[arg_index++] = reinterpret_cast(workspaces_[i]); } @@ -382,7 +382,6 @@ Status TbeOpTask::UpdateTilingArgs(rtStream_t stream) { } Status TbeOpTask::SetArgIndex() { - arg_index_.clear(); const vector v_is_input_const = op_desc_->GetIsInputConst(); size_t input_index = 0; for (size_t i = 0; i < op_desc_->GetAllInputsSize(); ++i) { @@ -416,9 +415,8 @@ Status TbeOpTask::UpdateIoAddr(const vector &inputs, const vector(inputs[i].data); } - size_t input_size = op_desc_->GetInputsSize(); for (size_t i = 0; i < op_desc_->GetOutputsSize(); ++i) { - arg_base[input_size + i] = reinterpret_cast(outputs[i].data); + arg_base[input_num_ + i] = reinterpret_cast(outputs[i].data); } return SUCCESS; diff --git a/ge/single_op/task/op_task.h b/ge/single_op/task/op_task.h index d3e8383d..f93e031a 100644 --- a/ge/single_op/task/op_task.h +++ b/ge/single_op/task/op_task.h @@ -123,7 +123,9 @@ class TbeOpTask : public OpTask { void* handle_ = nullptr; std::string original_kernel_key_; std::string node_info_; - std::vector arg_index_; + std::vector arg_index_; // data index in args + size_t input_num_; // Include const input + size_t output_num_; }; class AiCpuBaseTask : public OpTask { diff --git a/ge/single_op/task/tbe_task_builder.cc b/ge/single_op/task/tbe_task_builder.cc index e5206ea6..db8ecfe2 100644 --- a/ge/single_op/task/tbe_task_builder.cc +++ b/ge/single_op/task/tbe_task_builder.cc @@ -388,6 +388,8 @@ Status TbeTaskBuilder::BuildTask(TbeOpTask &task, const SingleOpModelParam ¶ task.SetStubFunc(stub_name_, stub_func); } GE_CHK_STATUS_RET(task.SetArgIndex(), "[Set][ArgTable] failed."); + task.input_num_ = op_desc_->GetInputsSize(); + task.output_num_ = op_desc_->GetOutputsSize(); return SUCCESS; } From 59c97eef3eef0903c54052fe4b6428e5597db58c Mon Sep 17 00:00:00 2001 From: wq160 Date: Tue, 29 Jun 2021 10:58:45 +0800 Subject: [PATCH 111/226] set scalar tensor value range --- ge/graph/passes/infer_value_range_pass.cc | 3 ++ .../passes/replace_with_empty_const_pass.cc | 14 ++++-- .../passes/replace_with_empty_const_pass.h | 2 +- .../passes/infer_value_range_pass_unittest.cc | 48 +++++++++++++++++++ 4 files changed, 61 insertions(+), 6 deletions(-) diff --git a/ge/graph/passes/infer_value_range_pass.cc b/ge/graph/passes/infer_value_range_pass.cc index b9cb88bc..e714e90a 100644 --- a/ge/graph/passes/infer_value_range_pass.cc +++ b/ge/graph/passes/infer_value_range_pass.cc @@ -286,6 +286,9 @@ graphStatus InferValueRangePass::GenerateWorstValueRange(NodePtr &node) { } std::vector> output_i_value_range(output_i_shape_size, {1, -1}); + if (output_i_shape.IsScalar()) { + output_i_value_range.emplace_back(1, -1); + } output_desc->SetValueRange(output_i_value_range); GELOGD("Node %s output %zu shape is %s, the generated worst value range is %s.", node->GetName().c_str(), i, formats::ShapeToString(output_i_shape).c_str(), formats::RangeToString(output_i_value_range).c_str()); diff --git a/ge/graph/passes/replace_with_empty_const_pass.cc b/ge/graph/passes/replace_with_empty_const_pass.cc index 3176d1ee..6cb31627 100644 --- a/ge/graph/passes/replace_with_empty_const_pass.cc +++ b/ge/graph/passes/replace_with_empty_const_pass.cc @@ -71,7 +71,7 @@ Status ReplaceWithEmptyConstPass::Run(NodePtr &node) { GELOGI("Node %s Got empty output_desc_ptr, ignore current pass.", node->GetName().c_str()); return SUCCESS; } - if (!IsEmptyTenor(output_desc_ptr->GetShape())) { + if (!IsKnownEmptyTenor(output_desc_ptr->GetShape())) { is_all_output_empty = false; break; } @@ -107,12 +107,16 @@ Status ReplaceWithEmptyConstPass::GetOutputsOfCurrNode(const NodePtr &node_to_re return SUCCESS; } -bool ReplaceWithEmptyConstPass::IsEmptyTenor(const GeShape &shape) const { +bool ReplaceWithEmptyConstPass::IsKnownEmptyTenor(const GeShape &shape) const { + bool is_known_empty_tensor = false; for (auto dim : shape.GetDims()) { - if (dim == 0) { - return true; + if (dim < 0) { + // current dim is unknown dim, skip replace + return false; + } else if (dim == 0) { + is_known_empty_tensor = true; } } - return false; + return is_known_empty_tensor; } } // namespace ge diff --git a/ge/graph/passes/replace_with_empty_const_pass.h b/ge/graph/passes/replace_with_empty_const_pass.h index fde75358..90103432 100644 --- a/ge/graph/passes/replace_with_empty_const_pass.h +++ b/ge/graph/passes/replace_with_empty_const_pass.h @@ -26,7 +26,7 @@ class ReplaceWithEmptyConstPass : public FoldingPass { private: Status GetOutputsOfCurrNode(const NodePtr &node_to_replace, vector &outputs); - bool IsEmptyTenor(const GeShape &shape) const; + bool IsKnownEmptyTenor(const GeShape &shape) const; }; } // namespace ge #endif // GE_GRAPH_PASSES_REPLACE_WITH_EMPTY_CONST_PASS_H_ diff --git a/tests/ut/ge/graph/passes/infer_value_range_pass_unittest.cc b/tests/ut/ge/graph/passes/infer_value_range_pass_unittest.cc index fea1b27d..576d679c 100644 --- a/tests/ut/ge/graph/passes/infer_value_range_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/infer_value_range_pass_unittest.cc @@ -362,6 +362,54 @@ TEST_F(UtestGraphInferValueRangePass, CallRun_NoSubgraph_UseCpuKernel_InputsHave EXPECT_EQ(unknown_target_value_range, output_value_range); } +TEST_F(UtestGraphInferValueRangePass, CallRun_NoSubgraph_UseCpuKernel_InputsHaveUnKnownValueRange_ScalarOutput) { + // shape --- add --- sqrt + // constant / + auto graph = std::make_shared("test_graph"); + vector data_vec = {1}; + GeTensorDesc const_tensor_desc(ge::GeShape(), ge::FORMAT_NCHW, ge::DT_INT64); + GeTensorPtr const_tensor = + std::make_shared(const_tensor_desc, (uint8_t *)data_vec.data(), data_vec.size() * sizeof(int64_t)); + + auto const_op_desc = std::make_shared("Constant", "Constant"); + const_op_desc->AddOutputDesc(const_tensor_desc); + EXPECT_EQ(OpDescUtils::SetWeights(const_op_desc, const_tensor), GRAPH_SUCCESS); + auto const_node = graph->AddNode(const_op_desc); + + GeTensorDesc shape_tensor_desc(GeShape(), ge::FORMAT_NCHW, ge::DT_INT64); + std::vector> unknown_value_range = {make_pair(1, -1)}; + shape_tensor_desc.SetValueRange(unknown_value_range); + auto shape_op_desc = std::make_shared("Shape", "Shape"); + shape_op_desc->AddOutputDesc(shape_tensor_desc); + auto shape_node = graph->AddNode(shape_op_desc); + + GeTensorDesc add_tensor_desc(GeShape(), ge::FORMAT_NCHW, ge::DT_INT64); + auto add_op_desc = std::make_shared("Add", "Add"); + add_op_desc->AddInputDesc(shape_tensor_desc); + add_op_desc->AddInputDesc(const_tensor_desc); + add_op_desc->AddOutputDesc(add_tensor_desc); + auto add_node = graph->AddNode(add_op_desc); + + ge::GraphUtils::AddEdge(shape_node->GetOutDataAnchor(0), add_node->GetInDataAnchor(0)); + ge::GraphUtils::AddEdge(const_node->GetOutDataAnchor(0), add_node->GetInDataAnchor(1)); + + // test unknown value range + InferValueRangePass infer_pass; + EXPECT_EQ(infer_pass.Run(add_node), SUCCESS); + auto output_0_desc = add_node->GetOpDesc()->GetOutputDesc(0); + std::vector> out_value_range; + output_0_desc.GetValueRange(out_value_range); + EXPECT_EQ(out_value_range.size(), 1); + + std::vector unknown_target_value_range = {1, -1}; + std::vector output_value_range; + for (auto pair : out_value_range) { + output_value_range.push_back(pair.first); + output_value_range.push_back(pair.second); + } + EXPECT_EQ(unknown_target_value_range, output_value_range); +} + TEST_F(UtestGraphInferValueRangePass, CallRun_NoSubgraph_UseCpuKernel_InputsAreKnownValueRange_Int64) { // shape --- add --- sqrt // constant / From 4f3f54d56407d045fa37bf02508cc101a46e1bef Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Tue, 29 Jun 2021 19:19:08 +0800 Subject: [PATCH 112/226] Fix ut. --- ge/single_op/task/op_task.h | 2 +- tests/ut/ge/single_op/single_op_task_unittest.cc | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/ge/single_op/task/op_task.h b/ge/single_op/task/op_task.h index f93e031a..97d8a342 100644 --- a/ge/single_op/task/op_task.h +++ b/ge/single_op/task/op_task.h @@ -124,7 +124,7 @@ class TbeOpTask : public OpTask { std::string original_kernel_key_; std::string node_info_; std::vector arg_index_; // data index in args - size_t input_num_; // Include const input + size_t input_num_; // include const input size_t output_num_; }; diff --git a/tests/ut/ge/single_op/single_op_task_unittest.cc b/tests/ut/ge/single_op/single_op_task_unittest.cc index 020efc23..b0c98205 100644 --- a/tests/ut/ge/single_op/single_op_task_unittest.cc +++ b/tests/ut/ge/single_op/single_op_task_unittest.cc @@ -134,6 +134,8 @@ TEST_F(UtestSingleOpTask, test_update_ioaddr) { task.arg_size_ = sizeof(void *) * 4; task.args_.reset(new (std::nothrow) uint8_t[task.arg_size_]); task.arg_index_ = {0}; + task.input_num_ = 2; + task.output_num_ = 1; vector args; vector inputs; From 9f6aff6759714c0093045b563f9b6f5bba8d46a6 Mon Sep 17 00:00:00 2001 From: wuweikang Date: Fri, 18 Jun 2021 09:32:57 +0800 Subject: [PATCH 113/226] check dump option --- ge/common/dump/dump_properties.cc | 243 ++++++++++++++++-- ge/common/dump/dump_properties.h | 18 +- ge/session/inner_session.cc | 2 +- tests/ut/ge/CMakeLists.txt | 1 + .../ut/ge/common/dump_properties_unittest.cc | 126 +++++++++ 5 files changed, 364 insertions(+), 26 deletions(-) create mode 100644 tests/ut/ge/common/dump_properties_unittest.cc diff --git a/ge/common/dump/dump_properties.cc b/ge/common/dump/dump_properties.cc index ef755540..010347c0 100644 --- a/ge/common/dump/dump_properties.cc +++ b/ge/common/dump/dump_properties.cc @@ -18,6 +18,7 @@ #include #include +#include #include "common/ge/ge_util.h" #include "framework/common/util.h" @@ -37,6 +38,159 @@ const uint32_t kAtomicOverflow = (0x1 << 1); const uint32_t kAllOverflow = (kAicoreOverflow | kAtomicOverflow); } // namespace namespace ge { +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::Split(const std::string &s, + std::vector &result, + const char *delchar) { + if (s.empty()) { + return; + } + result.clear(); + + char *buffer = new (std::nothrow)char[s.size() + 1]; + if (buffer == nullptr) { + GELOGE(FAILED, "[Split][string] failed while malloc memory, string value is:%s", s.c_str()); + REPORT_CALL_ERROR("E19999", "Memory malloc may fail when split string, get fatal exception, " + "string value is:%s", s.c_str()); + return; + } + buffer[s.size()] = '\0'; + errno_t e = strcpy_s(buffer, s.size() + 1, s.c_str()); + if (e != EOK) { + delete[] buffer; + return; + } + char *context = nullptr; + char *p = strtok_s(buffer, delchar, &context); + while (p != nullptr) { + result.emplace_back(p); + p = strtok_s(nullptr, delchar, &context); + } + delete[] buffer; +} + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::CheckDumpStep(const std::string &dump_step) { + std::string modified_dum_step = dump_step + "|"; + std::smatch result; + std::vector match_vecs; + std::regex pattern(R"((\d{1,}-\d{1,}\||\d{1,}\|)+)"); + if (regex_match(modified_dum_step, result, pattern)) { + Split(result.str(), match_vecs, "|"); + if (match_vecs.empty()) { + REPORT_CALL_ERROR("E19999", "Split may get fatal exception, dump_step:%s.", dump_step.c_str()); + GELOGE(FAILED, "[Check][Param] failed. Split may get fatal exception, ge.exec.dumpStep:%s.", dump_step.c_str()); + return FAILED; + } + // 100 is the max sets of dump steps. + if (match_vecs.size() > 100) { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.dumpStep", + dump_step.c_str(), + " is not supported, only support dump <= 100 sets of data"})); + GELOGE(PARAM_INVALID, "[Check][Param] get dump_step value:%s, " + "dump_step only support dump <= 100 sets of data.", dump_step.c_str()); + return PARAM_INVALID; + } + for (const auto &match_vec : match_vecs) { + std::vector vec_after_split; + Split(match_vec, vec_after_split, "-"); + if (match_vecs.empty()) { + REPORT_CALL_ERROR("E19999", "Split may get fatal exception."); + GELOGE(FAILED, "[Check][Param] failed, split may get fatal exception."); + return FAILED; + } + if (vec_after_split.size() > 1) { + if (std::atoi(vec_after_split[0].c_str()) >= std::atoi(vec_after_split[1].c_str())) { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.dumpStep", + dump_step.c_str(), + " is not supported." + "in range steps, the first step is >= second step, correct example:'0|5|10-20"})); + GELOGE(PARAM_INVALID, "[Check][Param] get dump_step value:%s, " + "in range steps, the first step is >= second step, correct example:'0|5|10-20'", dump_step.c_str()); + return PARAM_INVALID; + } + } + } + } else { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.dumpStep", + dump_step.c_str(), + " is not supported, correct example:'0|5|10|50-100."})); + GELOGE(PARAM_INVALID, "[Check][Param] get dump_step value:%s, " + "dump_step string style is error, correct example:'0|5|10|50-100.'", dump_step.c_str()); + return PARAM_INVALID; + } + return SUCCESS; +} + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::CheckDumpMode(const std::string &dump_mode) { + const std::set dump_mode_list = {"input", "output", "all"}; + std::set::iterator iter; + + if ((iter = dump_mode_list.find(dump_mode)) == dump_mode_list.end()) { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.dumpMode", + dump_mode.c_str(), + " is not supported, should be one of the following:[input, output, all]"})); + GELOGE(PARAM_INVALID, "[Check][Param] the dump_debug_mode:%s, is is not supported," + "should be one of the following:[input, output, all].", dump_mode.c_str()); + return PARAM_INVALID; + } + return SUCCESS; +} + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::CheckDumpPath(const std::string &input) { + if (mmIsDir(input.c_str()) != EN_OK) { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.dumpPath", + input.c_str(), + " is not a directory."})); + GELOGE(PARAM_INVALID, "[Check][Param] the path:%s, is not directory.", input.c_str()); + return PARAM_INVALID; + } + char trusted_path[MMPA_MAX_PATH] = { "\0" }; + if (mmRealPath(input.c_str(), trusted_path, MMPA_MAX_PATH) != EN_OK) { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.dumpPath", + input.c_str(), + " dumpPath invalid."})); + GELOGE(PARAM_INVALID, "[Check][Param] the dumpPath:%s, is invalid.", input.c_str()); + return PARAM_INVALID; + } + if (mmAccess2(trusted_path, R_OK | W_OK) != EN_OK) { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.dumpPath", + input.c_str(), + " does't have read, write permissions."})); + GELOGE(PARAM_INVALID, "[Check][Param] the path:%s, does't have read, write permissions.", input.c_str()); + return PARAM_INVALID; + } + return SUCCESS; +} + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::CheckEnableDump(const std::string &input) { + std::set enable_dump_option_list = {"1", "0"}; + auto it = enable_dump_option_list.find(input); + if (it == enable_dump_option_list.end()) { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.enableDump", + input.c_str(), + " only support 1 or 0."})); + GELOGE(PARAM_INVALID, "[Check][Param] Not support ge.exec.enableDump or ge.exec.enableDumpDebug format:%s, " + "only support 1 or 0.", input.c_str()); + return PARAM_INVALID; + } + return SUCCESS; +} + FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY DumpProperties::DumpProperties(const DumpProperties &other) { CopyFrom(other); } @@ -47,7 +201,26 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY DumpProperties &DumpProperties: return *this; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::InitByOptions() { +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::SetDumpOptions() { + if (enable_dump_ == kEnableFlag) { + std::string dump_step; + if (GetContext().GetOption(OPTION_EXEC_DUMP_STEP, dump_step) == GRAPH_SUCCESS) { + GE_CHK_STATUS_RET(CheckDumpStep(dump_step), "[Check][dump_step] failed."); + GELOGI("Get dump step %s successfully", dump_step.c_str()); + SetDumpStep(dump_step); + } + string dump_mode = "output"; + if (GetContext().GetOption(OPTION_EXEC_DUMP_MODE, dump_mode) == GRAPH_SUCCESS) { + GELOGI("Get dump mode %s successfully", dump_mode.c_str()); + GE_CHK_STATUS_RET(CheckDumpMode(dump_mode), "[Check][dump_mode] failed."); + SetDumpMode(dump_mode); + } + AddPropertyValue(DUMP_ALL_MODEL, {}); + } + return SUCCESS; +} + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::InitByOptions() { enable_dump_.clear(); enable_dump_debug_.clear(); dump_path_.clear(); @@ -57,17 +230,32 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::InitByOpti is_infer_op_debug_ = false; op_debug_mode_ = 0; - std::string enable_dump; + std::string enable_dump = std::to_string(false); (void)GetContext().GetOption(OPTION_EXEC_ENABLE_DUMP, enable_dump); enable_dump_ = enable_dump; + if (!enable_dump_.empty()) { + GE_CHK_STATUS_RET(CheckEnableDump(enable_dump_), "[Check][enable_dump] failed."); + } - std::string enable_dump_debug; + std::string enable_dump_debug = std::to_string(false); (void)GetContext().GetOption(OPTION_EXEC_ENABLE_DUMP_DEBUG, enable_dump_debug); enable_dump_debug_ = enable_dump_debug; - + if (!enable_dump_debug_.empty()) { + GE_CHK_STATUS_RET(CheckEnableDump(enable_dump_debug_), "[Check][enable_dump_debug] failed."); + } + if ((enable_dump_ == kEnableFlag) && (enable_dump_debug_ == kEnableFlag)) { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.enableDump and ge.exec.enableDumpDebug", + enable_dump_ + ", " + enable_dump_debug, + "ge.exec.enableDump and ge.exec.enableDumpDebug cannot be set to 1 at the same time."})); + GELOGE(FAILED, "ge.exec.enableDump and ge.exec.enableDumpDebug cannot be both set to 1 at the same time."); + return FAILED; + } if ((enable_dump_ == kEnableFlag) || (enable_dump_debug_ == kEnableFlag)) { std::string dump_path; if (GetContext().GetOption(OPTION_EXEC_DUMP_PATH, dump_path) == GRAPH_SUCCESS) { + GE_CHK_STATUS_RET(CheckDumpPath(dump_path), "Check dump path failed."); if (!dump_path.empty() && dump_path[dump_path.size() - 1] != '/') { dump_path = dump_path + "/"; } @@ -75,25 +263,21 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::InitByOpti GELOGI("Get dump path %s successfully", dump_path.c_str()); SetDumpPath(dump_path); } else { - GELOGW("Dump path is not set"); + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.dumpPath", + dump_path, + "ge.exec.dumpPath is not set."})); + GELOGE(FAILED, "[Check][dump_path] failed. Dump path is not set."); + return FAILED; } } - if (enable_dump_ == kEnableFlag) { - std::string dump_step; - if (GetContext().GetOption(OPTION_EXEC_DUMP_STEP, dump_step) == GRAPH_SUCCESS) { - GELOGI("Get dump step %s successfully", dump_step.c_str()); - SetDumpStep(dump_step); - } - string dump_mode; - if (GetContext().GetOption(OPTION_EXEC_DUMP_MODE, dump_mode) == GRAPH_SUCCESS) { - GELOGI("Get dump mode %s successfully", dump_mode.c_str()); - SetDumpMode(dump_mode); - } - AddPropertyValue(DUMP_ALL_MODEL, {}); - } + GE_CHK_STATUS_RET(SetDumpOptions(), "SetDumpOptions failed."); + + GE_CHK_STATUS_RET(SetDumpDebugOptions(), "SetDumpDebugOptions failed."); - SetDumpDebugOptions(); + return SUCCESS; } // The following is the new dump scenario of the fusion operator @@ -253,14 +437,20 @@ void DumpProperties::CopyFrom(const DumpProperties &other) { } } -void DumpProperties::SetDumpDebugOptions() { +Status DumpProperties::SetDumpDebugOptions() { if (enable_dump_debug_ == kEnableFlag) { std::string dump_debug_mode; if (GetContext().GetOption(OPTION_EXEC_DUMP_DEBUG_MODE, dump_debug_mode) == GRAPH_SUCCESS) { GELOGD("Get dump debug mode %s successfully", dump_debug_mode.c_str()); } else { - GELOGW("Dump debug mode is not set."); - return; + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.dumpDebugMode", + dump_debug_mode, + "ge.exec.dumpDebugMode is not set."})); + GELOGE(PARAM_INVALID, "[Check][dump_debug_mode] failed. Dump debug mode is not set."); + + return PARAM_INVALID; } if (dump_debug_mode == OP_DEBUG_AICORE) { @@ -276,10 +466,17 @@ void DumpProperties::SetDumpDebugOptions() { is_train_op_debug_ = true; op_debug_mode_ = kAllOverflow; } else { - GELOGW("ge.exec.dumpDebugMode is invalid."); + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.dumpDebugMode", + dump_debug_mode, + "ge.exec.dumpDebugMode is invalid."})); + GELOGE(PARAM_INVALID, "[Set][DumpDebugOptions] failed, ge.exec.dumpDebugMode is invalid."); + return PARAM_INVALID; } } else { GELOGI("ge.exec.enableDumpDebug is false or is not set."); } + return SUCCESS; } } // namespace ge diff --git a/ge/common/dump/dump_properties.h b/ge/common/dump/dump_properties.h index 98487491..cbfc362d 100644 --- a/ge/common/dump/dump_properties.h +++ b/ge/common/dump/dump_properties.h @@ -23,6 +23,7 @@ #include namespace ge { +using Status = uint32_t; class DumpProperties { public: DumpProperties() = default; @@ -33,7 +34,7 @@ class DumpProperties { DumpProperties &operator=(const DumpProperties &dump); - void InitByOptions(); + Status InitByOptions(); void AddPropertyValue(const std::string &model, const std::set &layers); @@ -95,7 +96,20 @@ class DumpProperties { private: void CopyFrom(const DumpProperties &other); - void SetDumpDebugOptions(); + Status SetDumpDebugOptions(); + + Status SetDumpOptions(); + + void Split(const std::string &s, std::vector &result, const char *delchar); + + Status CheckDumpStep(const std::string &dump_step); + + Status CheckDumpMode(const std::string &dump_mode); + + Status CheckDumpPath(const std::string &input); + + Status CheckEnableDump(const std::string &input); + std::string enable_dump_; std::string enable_dump_debug_; diff --git a/ge/session/inner_session.cc b/ge/session/inner_session.cc index aabbe19c..b3df08ce 100755 --- a/ge/session/inner_session.cc +++ b/ge/session/inner_session.cc @@ -109,7 +109,7 @@ Status InnerSession::Initialize() { GE_CHK_RT_RET(rtSetDevice(GetContext().DeviceId())); DumpProperties dump_properties; - dump_properties.InitByOptions(); + GE_CHK_STATUS_RET(dump_properties.InitByOptions(), "Init dump properties failed."); GE_CHK_STATUS_RET(AddDumpProperties(dump_properties), "[Add][DumpProperties] failed."); ret = graph_manager_.Initialize(options_); diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index cf573343..d7568ccc 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -774,6 +774,7 @@ set(MULTI_PARTS_TEST_FILES "common/util_unittest.cc" "common/dump_manager_unittest.cc" "common/dump_op_unittest.cc" + "common/dump_properties_unittest.cc" "common/dump_exception_unittest.cc" "common/opdebug_register_unittest.cc" "common/format_transfer_unittest.cc" diff --git a/tests/ut/ge/common/dump_properties_unittest.cc b/tests/ut/ge/common/dump_properties_unittest.cc new file mode 100644 index 00000000..57809013 --- /dev/null +++ b/tests/ut/ge/common/dump_properties_unittest.cc @@ -0,0 +1,126 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#define protected public +#define private public + +#include "common/dump/dump_properties.h" +#include "ge_local_context.h" +#include "ge/ge_api_types.h" +#include "common/debug/log.h" +#include "common/ge_inner_error_codes.h" + +namespace ge { +class UTEST_dump_properties : public testing::Test { + protected: + void SetUp() {} + void TearDown() {} +}; + +TEST_F(UTEST_dump_properties, check_dump_step) { + DumpProperties dp; + std::string dump_step{"0|3-5|10"}; + std::string unsupport_input1{"0|5-3|10"}; + std::string unsupport_input2{"one"}; + std::string unsupport_input3; + for (int i = 0; i < 200; ++i) { + unsupport_input3 += std::to_string(i) + "|"; + } + unsupport_input3.pop_back(); + Status st = dp.CheckDumpStep(dump_step); + EXPECT_EQ(st, SUCCESS); + st = dp.CheckDumpStep(unsupport_input1); + EXPECT_NE(st, SUCCESS); + st = dp.CheckDumpStep(unsupport_input2); + EXPECT_NE(st, SUCCESS); + st = dp.CheckDumpStep(unsupport_input3); + EXPECT_NE(st, SUCCESS); +} + +TEST_F(UTEST_dump_properties, check_dump_mode) { + DumpProperties dp; + std::string dump_mode_1{"input"}; + std::string dump_mode_2{"output"}; + std::string dump_mode_3{"all"}; + std::string unsupport_input1{"mode1"}; + Status st = dp.CheckDumpMode(dump_mode_1); + EXPECT_EQ(st, SUCCESS); + st = dp.CheckDumpMode(dump_mode_2); + EXPECT_EQ(st, SUCCESS); + st = dp.CheckDumpMode(dump_mode_3); + EXPECT_EQ(st, SUCCESS); + st = dp.CheckDumpMode(unsupport_input1); + EXPECT_NE(st, SUCCESS); +} + +TEST_F(UTEST_dump_properties, check_dump_path) { + DumpProperties dp; + std::string dump_path{"/tmp/"}; + std::string unsupport_input1{" \\unsupported"}; + Status st = dp.CheckDumpPath(dump_path); + EXPECT_EQ(st, SUCCESS); + st = dp.CheckDumpPath(unsupport_input1); + EXPECT_NE(st, SUCCESS); +} + +TEST_F(UTEST_dump_properties, check_enable_dump) { + DumpProperties dp; + std::string enable_dump_t{"1"}; + std::string enable_dump_f{"0"}; + std::string unsupport_input1{"true"}; + std::string unsupport_input2{"false"}; + Status st = dp.CheckEnableDump(enable_dump_t); + EXPECT_EQ(st, SUCCESS); + st = dp.CheckEnableDump(enable_dump_f); + EXPECT_EQ(st, SUCCESS); + st = dp.CheckEnableDump(unsupport_input1); + EXPECT_NE(st, SUCCESS); + st = dp.CheckEnableDump(unsupport_input2); + EXPECT_NE(st, SUCCESS); +} + +TEST_F(UTEST_dump_properties, init_by_options_success_1) { + DumpProperties dp; + std::map options {{OPTION_EXEC_ENABLE_DUMP, "1"}, + {OPTION_EXEC_DUMP_PATH, "/tmp/"}, + {OPTION_EXEC_DUMP_STEP, "0|1-3|10"}, + {OPTION_EXEC_DUMP_MODE, "all"}}; + GetThreadLocalContext().SetGlobalOption(options); + Status st = dp.InitByOptions(); + EXPECT_EQ(st, SUCCESS); +} + +TEST_F(UTEST_dump_properties, init_by_options_success_2) { + DumpProperties dp; + std::map options {{OPTION_EXEC_ENABLE_DUMP_DEBUG, "1"}, + {OPTION_EXEC_DUMP_PATH, "/tmp/"}, + {OPTION_EXEC_DUMP_DEBUG_MODE, "aicore_overflow"}}; + GetThreadLocalContext().SetGlobalOption(options); + Status st = dp.InitByOptions(); + EXPECT_EQ(st, SUCCESS); +} + +TEST_F(UTEST_dump_properties, init_by_options_failed) { + DumpProperties dp; + std::map options {{OPTION_EXEC_ENABLE_DUMP_DEBUG, "1"}, + {OPTION_EXEC_DUMP_PATH, "/tmp/"}}; + GetThreadLocalContext().SetGlobalOption(options); + Status st = dp.InitByOptions(); + EXPECT_NE(st, SUCCESS); +} +} // namespace ge \ No newline at end of file From e1c506026cc082fed10f63630bfb3c2275ce245b Mon Sep 17 00:00:00 2001 From: wangxiaotian22 Date: Tue, 29 Jun 2021 21:08:22 +0800 Subject: [PATCH 114/226] fix sc --- ge/graph/preprocess/multi_batch_copy_graph.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ge/graph/preprocess/multi_batch_copy_graph.cc b/ge/graph/preprocess/multi_batch_copy_graph.cc index 1634c8ce..fd3a4e91 100644 --- a/ge/graph/preprocess/multi_batch_copy_graph.cc +++ b/ge/graph/preprocess/multi_batch_copy_graph.cc @@ -1206,7 +1206,7 @@ Status MultiBatchGraphCopyer::CheckCopyResult(const std::vector &start_ auto dims = NodeUtils::GetOutputDesc(*node, kDataOutIndex).GetShape().GetDims(); if (!IsAllDimsPositive(dims)) { REPORT_CALL_ERROR("E19999", "Failed to copy multi batch graph, the node %s still has unknown shape %s", - node->GetName().c_str(), formats::ShapeToString(dims).c_str()); + node->GetName().c_str(), formats::ShapeToString(dims).c_str()); GELOGE(INTERNAL_ERROR, "[Check][Param] Failed to copy multi batch graph, the node %s still has unknown shape %s", node->GetName().c_str(), formats::ShapeToString(dims).c_str()); return INTERNAL_ERROR; From faaef130b4192da826a7965006b8952969bf8218 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Wed, 30 Jun 2021 10:20:59 +0800 Subject: [PATCH 115/226] Add ut. --- ge/single_op/single_op.h | 1 - 1 file changed, 1 deletion(-) diff --git a/ge/single_op/single_op.h b/ge/single_op/single_op.h index 7e05dd5f..94d7227b 100755 --- a/ge/single_op/single_op.h +++ b/ge/single_op/single_op.h @@ -92,7 +92,6 @@ class DynamicSingleOp { rtStream_t stream_ = nullptr; size_t num_inputs_ = 0; size_t num_outputs_ = 0; - ComputeGraphPtr compute_graph_; }; } // namespace ge #endif // GE_SINGLE_OP_SINGLE_OP_H_ From e7296ed73c1b133df356245791dc533e2e4b8e6f Mon Sep 17 00:00:00 2001 From: wangxiaotian22 Date: Wed, 30 Jun 2021 15:19:38 +0800 Subject: [PATCH 116/226] fix sc + --- ge/ge_runtime/task/label_goto_task.cc | 2 +- ge/single_op/task/op_task.h | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/ge/ge_runtime/task/label_goto_task.cc b/ge/ge_runtime/task/label_goto_task.cc index 4302bff3..7cb6d556 100644 --- a/ge/ge_runtime/task/label_goto_task.cc +++ b/ge/ge_runtime/task/label_goto_task.cc @@ -72,7 +72,7 @@ bool LabelGotoTask::Distribute() { return false; } - rt_ret = rtLabelListCpy((void**)label_list.data(), label_list.size(), label_info_, label_info_size); + rt_ret = rtLabelListCpy(reinterpret_cast(label_list.data()), label_list.size(), label_info_, label_info_size); if (rt_ret != RT_ERROR_NONE) { GELOGE(RT_FAILED, "Call rt api failed, ret: %#x", rt_ret); return false; diff --git a/ge/single_op/task/op_task.h b/ge/single_op/task/op_task.h index 19320bc0..75b0707b 100644 --- a/ge/single_op/task/op_task.h +++ b/ge/single_op/task/op_task.h @@ -33,6 +33,10 @@ #include "register/op_tiling.h" namespace ge { +namespace { +const int kAddressNum = 2; +} // namespace + class StreamResource; struct SingleOpModelParam; class OpTask { @@ -256,7 +260,7 @@ class MemcpyAsyncTask : public OpTask { friend class SingleOpModel; friend class RtsKernelTaskBuilder; - uintptr_t addresses_[2]; + uintptr_t addresses_[kAddressNum]; size_t dst_max_; size_t count_; rtMemcpyKind_t kind_; From 2c7342bb3ae956e6f4884cc0e99068d215c178b0 Mon Sep 17 00:00:00 2001 From: wq160 Date: Tue, 29 Jun 2021 21:10:52 +0800 Subject: [PATCH 117/226] add scalar tensor value range process --- ge/graph/passes/infer_value_range_pass.cc | 30 ++++++++++--- .../passes/infer_value_range_pass_unittest.cc | 45 +++++++++++++++++++ 2 files changed, 70 insertions(+), 5 deletions(-) diff --git a/ge/graph/passes/infer_value_range_pass.cc b/ge/graph/passes/infer_value_range_pass.cc index e714e90a..03a18fdb 100644 --- a/ge/graph/passes/infer_value_range_pass.cc +++ b/ge/graph/passes/infer_value_range_pass.cc @@ -301,12 +301,26 @@ graphStatus InferValueRangePass::ConstructData(const GeTensorDesc &tensor_desc, GeTensorPtr &output_ptr) { std::vector> value_range; (void)tensor_desc.GetValueRange(value_range); - if (static_cast(value_range.size()) != tensor_desc.GetShape().GetShapeSize()) { - GELOGW("Value range of input %s is invalid.", tensor_desc.GetName().c_str()); + size_t value_range_data_num = value_range.size(); + auto tensor_shape = tensor_desc.GetShape(); + bool value_range_and_tensor_shape_matched = true; + if (tensor_shape.IsScalar()){ + // scalar tensor has only one value_range pair + if (value_range_data_num != 1) { + value_range_and_tensor_shape_matched = false; + } + } else { + // normal tensor, value_range size is equal to tensor shape size. + if (static_cast(value_range_data_num) != tensor_shape.GetShapeSize()) { + value_range_and_tensor_shape_matched = false; + } + } + if (!value_range_and_tensor_shape_matched) { + GELOGW("Input %s value range and tensor shape do not match. Value range size is %zu, tensor shape is %s.", + tensor_desc.GetName().c_str(), value_range_data_num, formats::ShapeToString(tensor_shape).c_str()); return GRAPH_PARAM_INVALID; } - size_t value_range_data_num = value_range.size(); unique_ptr buf(new (std::nothrow) T[value_range_data_num]()); if (buf == nullptr) { REPORT_INNER_ERROR("E19999", "New buf failed"); @@ -494,10 +508,16 @@ void InferValueRangePass::ConstructValueRange(const GeTensorPtr &left_tensor, co GELOGI("Output tensor of cpu kernel does not have data, no way to set value range."); return; } - for (auto j = 0; j < left_tensor->GetTensorDesc().GetShape().GetShapeSize(); ++j) { + auto left_tensor_shape = left_tensor->GetTensorDesc().GetShape(); + for (auto j = 0; j < left_tensor_shape.GetShapeSize(); ++j) { auto left = static_cast(*(x + j)); auto right = static_cast(*(y + j)); - value_range.emplace_back(std::make_pair(left, right)); + value_range.emplace_back(left, right); + } + + if (left_tensor_shape.IsScalar()) { + GELOGD("When inferring value range, output tensors of cpu kernel are scalar tensors."); + value_range.emplace_back(static_cast(*x), static_cast(*y)); } } } // namespace ge diff --git a/tests/ut/ge/graph/passes/infer_value_range_pass_unittest.cc b/tests/ut/ge/graph/passes/infer_value_range_pass_unittest.cc index 576d679c..c39755b3 100644 --- a/tests/ut/ge/graph/passes/infer_value_range_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/infer_value_range_pass_unittest.cc @@ -293,6 +293,9 @@ class AddKernel : public Kernel { } else if (input[0]->GetTensorDesc().GetDataType() == DT_INT32 || input[0]->GetTensorDesc().GetDataType() == DT_UINT32) { vector data_vec; auto data_num = input[0]->GetTensorDesc().GetShape().GetShapeSize(); + if (input[0]->GetTensorDesc().GetShape().IsScalar()) { + data_num = 1; + } auto x1_data = reinterpret_cast(input[0]->GetData().data()); auto x2_data = reinterpret_cast(input[1]->GetData().data()); for (size_t i = 0; i < data_num; i++) { @@ -410,6 +413,48 @@ TEST_F(UtestGraphInferValueRangePass, CallRun_NoSubgraph_UseCpuKernel_InputsHave EXPECT_EQ(unknown_target_value_range, output_value_range); } +TEST_F(UtestGraphInferValueRangePass, CallRun_NoSubgraph_UseCpuKernel_InputsAreKnownValueRange_ScalarOutput) { + // shape --- add --- sqrt + // constant / + auto graph = std::make_shared("test_graph"); + vector data_vec = {2}; + GeTensorDesc const_td(ge::GeShape(), ge::FORMAT_NCHW, ge::DT_INT32); + GeTensorPtr const_tensor = std::make_shared(const_td, (uint8_t *)data_vec.data(), sizeof(int32_t)); + auto const_op_desc = std::make_shared("Constant", "Constant"); + const_op_desc->AddOutputDesc(const_td); + EXPECT_EQ(OpDescUtils::SetWeights(const_op_desc, const_tensor), GRAPH_SUCCESS); + auto const_node = graph->AddNode(const_op_desc); + + GeTensorDesc shape_td(GeShape(), ge::FORMAT_NCHW, ge::DT_INT32); + std::vector> known_value_range = {make_pair(1, 100)}; + shape_td.SetValueRange(known_value_range); + auto shape_op_desc = std::make_shared("Shape", "Shape"); + shape_op_desc->AddOutputDesc(shape_td); + auto shape_node = graph->AddNode(shape_op_desc); + + GeTensorDesc add_td(GeShape(), ge::FORMAT_NCHW, ge::DT_INT32); + auto add_op_desc = std::make_shared("Add", "Add"); + add_op_desc->AddInputDesc(shape_td); + add_op_desc->AddInputDesc(const_td); + add_op_desc->AddOutputDesc(add_td); + auto add_node = graph->AddNode(add_op_desc); + + ge::GraphUtils::AddEdge(shape_node->GetOutDataAnchor(0), add_node->GetInDataAnchor(0)); + ge::GraphUtils::AddEdge(const_node->GetOutDataAnchor(0), add_node->GetInDataAnchor(1)); + + InferValueRangePass infer_pass; + EXPECT_EQ(infer_pass.Run(add_node), SUCCESS); + + auto output_0_desc = add_node->GetOpDesc()->GetOutputDesc(0); + std::vector> out_value_range; + output_0_desc.GetValueRange(out_value_range); + EXPECT_EQ(out_value_range.size(), 1); + + std::vector target_value_range = {3, 102}; + std::vector output_value_range = {out_value_range[0].first, out_value_range[0].second}; + EXPECT_EQ(output_value_range, target_value_range); +} + TEST_F(UtestGraphInferValueRangePass, CallRun_NoSubgraph_UseCpuKernel_InputsAreKnownValueRange_Int64) { // shape --- add --- sqrt // constant / From f9e7359ad37746bacdb246e255cc32c7250d767a Mon Sep 17 00:00:00 2001 From: chenyemeng Date: Wed, 30 Jun 2021 16:10:57 +0800 Subject: [PATCH 118/226] header file update --- .../inc/external/runtime/rt_error_codes.h | 7 + third_party/fwkacllib/inc/runtime/base.h | 8 +- third_party/fwkacllib/inc/runtime/config.h | 43 +++++ third_party/fwkacllib/inc/runtime/dev.h | 5 + third_party/fwkacllib/inc/runtime/event.h | 37 +++- third_party/fwkacllib/inc/runtime/kernel.h | 83 +++++++-- third_party/fwkacllib/inc/runtime/mem.h | 11 ++ third_party/fwkacllib/inc/runtime/rt_model.h | 13 +- .../fwkacllib/inc/toolchain/prof_callback.h | 31 +++- third_party/fwkacllib/inc/toolchain/slog.h | 62 +++---- .../inc/toolchain/tuning_tool/tune_api.h | 160 ++++++++++-------- 11 files changed, 332 insertions(+), 128 deletions(-) diff --git a/third_party/fwkacllib/inc/external/runtime/rt_error_codes.h b/third_party/fwkacllib/inc/external/runtime/rt_error_codes.h index 67146dbe..9f216a56 100755 --- a/third_party/fwkacllib/inc/external/runtime/rt_error_codes.h +++ b/third_party/fwkacllib/inc/external/runtime/rt_error_codes.h @@ -38,6 +38,7 @@ static const int32_t ACL_ERROR_RT_STREAM_NO_CB_REG = 107015; // callba static const int32_t ACL_ERROR_RT_INVALID_MEMORY_TYPE = 107016; // invalid memory type static const int32_t ACL_ERROR_RT_INVALID_HANDLE = 107017; // invalid handle static const int32_t ACL_ERROR_RT_INVALID_MALLOC_TYPE = 107018; // invalid malloc type +static const int32_t ACL_ERROR_RT_WAIT_TIMEOUT = 107019; // wait timeout static const int32_t ACL_ERROR_RT_FEATURE_NOT_SUPPORT = 207000; // feature not support static const int32_t ACL_ERROR_RT_MEMORY_ALLOCATION = 207001; // memory allocation error @@ -50,6 +51,7 @@ static const int32_t ACL_ERROR_RT_NO_EVENT_RESOURCE = 207007; // no eve static const int32_t ACL_ERROR_RT_NO_STREAM_RESOURCE = 207008; // no stream resource static const int32_t ACL_ERROR_RT_NO_NOTIFY_RESOURCE = 207009; // no notify resource static const int32_t ACL_ERROR_RT_NO_MODEL_RESOURCE = 207010; // no model resource +static const int32_t ACL_ERROR_RT_NO_CDQ_RESOURCE = 207011; // no cdq resource static const int32_t ACL_ERROR_RT_INTERNAL_ERROR = 507000; // runtime internal error static const int32_t ACL_ERROR_RT_TS_ERROR = 507001; // ts internel error @@ -85,9 +87,14 @@ static const int32_t ACL_ERROR_RT_DEBUG_UNREGISTER_FAIL = 507030; // debug static const int32_t ACL_ERROR_RT_LABEL_CONTEXT = 507031; // label not in current context static const int32_t ACL_ERROR_RT_PROGRAM_USE_OUT = 507032; // program register num use out static const int32_t ACL_ERROR_RT_DEV_SETUP_ERROR = 507033; // device setup error +static const int32_t ACL_ERROR_RT_VECTOR_CORE_TIMEOUT = 507034; // vector core timeout +static const int32_t ACL_ERROR_RT_VECTOR_CORE_EXCEPTION = 507035; // vector core exception +static const int32_t ACL_ERROR_RT_VECTOR_CORE_TRAP_EXCEPTION = 507036; // vector core trap exception +static const int32_t ACL_ERROR_RT_CDQ_BATCH_ABNORMAL = 507037; // cdq alloc batch abnormal static const int32_t ACL_ERROR_RT_DRV_INTERNAL_ERROR = 507899; // drv internal error static const int32_t ACL_ERROR_RT_AICPU_INTERNAL_ERROR = 507900; // aicpu internal error +static const int32_t ACL_ERROR_RT_SOCKET_CLOSE = 507901; // hdc disconnect #ifdef __cplusplus } diff --git a/third_party/fwkacllib/inc/runtime/base.h b/third_party/fwkacllib/inc/runtime/base.h index 40bc91f7..7fc1cdea 100644 --- a/third_party/fwkacllib/inc/runtime/base.h +++ b/third_party/fwkacllib/inc/runtime/base.h @@ -156,7 +156,7 @@ RTS_API rtError_t rtProfilerTrace(uint64_t id, bool notify, uint32_t flags, rtSt /** * @ingroup profiling_base - * @brief ts send keypoint for step info. + * @brief ts send keypoint profiler log. */ RTS_API rtError_t rtProfilerTraceEx(uint64_t id, uint64_t modelId, uint16_t tagId, rtStream_t stream); @@ -206,7 +206,7 @@ RTS_API rtError_t rtRegDeviceStateCallback(const char *regName, rtDeviceStateCal /** * @ingroup dvrt_base - * @brief register callback for fail task + * @brief register callback for fail task * @param [in] uniName unique register name, can't be null * @param [in] callback fail task callback function * @param [out] NA @@ -345,11 +345,11 @@ RTS_API rtError_t rtLabelCreateEx(rtLabel_t *label, rtStream_t stream); * @return RT_ERROR_NONE for ok * @return RT_ERROR_INVALID_VALUE for error input */ -rtError_t rtLabelCreateExV2(rtLabel_t *label, rtModel_t model, rtStream_t stream); +RTS_API rtError_t rtLabelCreateExV2(rtLabel_t *label, rtModel_t model, rtStream_t stream); /** * @ingroup dvrt_base - * @brief get current thread last stream id and task id + * @brief get current thread last stream id and task id * @param [out] stream id and task id * @param [in] null * @return RT_ERROR_NONE for ok diff --git a/third_party/fwkacllib/inc/runtime/config.h b/third_party/fwkacllib/inc/runtime/config.h index ee104693..c1327c45 100644 --- a/third_party/fwkacllib/inc/runtime/config.h +++ b/third_party/fwkacllib/inc/runtime/config.h @@ -46,6 +46,12 @@ typedef enum tagRtChipType { CHIP_END, } rtChipType_t; +typedef enum tagRtAicpuScheType { + SCHEDULE_SOFTWARE = 0, /* Software Schedule */ + SCHEDULE_SOFTWARE_OPT, + SCHEDULE_HARDWARE, /* HWTS Schedule */ +} rtAicpuScheType; + typedef enum tagRtVersion { VER_BEGIN = 0, VER_NA = VER_BEGIN, @@ -65,6 +71,7 @@ typedef enum tagRtPlatformType { PLATFORM_LHISI_CS, PLATFORM_DC, PLATFORM_CLOUD_V2, + PLATFORM_LHISI_SD3403, PLATFORM_END, } rtPlatformType_t; @@ -126,6 +133,11 @@ typedef struct tagRtPlatformConfig { uint32_t platformConfig; } rtPlatformConfig_t; +typedef enum tagRTTaskTimeoutType { + RT_TIMEOUT_TYPE_OP_WAIT = 0, + RT_TIMEOUT_TYPE_OP_EXECUTE, +} rtTaskTimeoutType_t; + /** * @ingroup * @brief get AI core count @@ -184,6 +196,37 @@ RTS_API rtError_t rtMemGetL2Info(rtStream_t stream, void **ptr, uint32_t *size); */ RTS_API rtError_t rtGetRuntimeVersion(uint32_t *runtimeVersion); + +/** + * @ingroup + * @brief get device feature ability by device id, such as task schedule ability. + * @param [in] deviceId + * @param [in] moduleType + * @param [in] featureType + * @param [out] value + * @return RT_ERROR_NONE for ok + * @return RT_ERROR_INVALID_VALUE for error input + */ +RTS_API rtError_t rtGetDeviceCapability(int32_t deviceId, int32_t moduleType, int32_t featureType, int32_t *value); + +/** + * @ingroup + * @brief set event wait task timeout time. + * @param [in] timeout + * @return RT_ERROR_NONE for ok + * @return RT_ERROR_INVALID_VALUE for error input + */ +RTS_API rtError_t rtSetOpWaitTimeOut(uint32_t timeout); + +/** + * @ingroup + * @brief set op execute task timeout time. + * @param [in] timeout + * @return RT_ERROR_NONE for ok + * @return RT_ERROR_INVALID_VALUE for error input + */ +RTS_API rtError_t rtSetOpExecuteTimeOut(uint32_t timeout); + #if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) } #endif diff --git a/third_party/fwkacllib/inc/runtime/dev.h b/third_party/fwkacllib/inc/runtime/dev.h index e82ec5fa..2cf6712f 100644 --- a/third_party/fwkacllib/inc/runtime/dev.h +++ b/third_party/fwkacllib/inc/runtime/dev.h @@ -63,6 +63,11 @@ typedef enum tagRtFeatureType { FEATURE_TYPE_RSV } rtFeatureType_t; +typedef enum tagRtDeviceFeatureType { + FEATURE_TYPE_SCHE, + FEATURE_TYPE_END, +} rtDeviceFeatureType_t; + typedef enum tagMemcpyInfo { MEMCPY_INFO_SUPPORT_ZEROCOPY = 0, MEMCPY_INFO_RSV diff --git a/third_party/fwkacllib/inc/runtime/event.h b/third_party/fwkacllib/inc/runtime/event.h index 41e611ea..57948c47 100644 --- a/third_party/fwkacllib/inc/runtime/event.h +++ b/third_party/fwkacllib/inc/runtime/event.h @@ -23,12 +23,23 @@ extern "C" { #endif +typedef enum rtEventWaitStatus { + EVENT_STATUS_COMPLETE = 0, + EVENT_STATUS_NOT_READY = 1, + EVENT_STATUS_MAX = 2, +} rtEventWaitStatus_t; + /** * @ingroup event_flags * @brief event op bit flags */ -#define RT_EVENT_DEFAULT (0x00) -#define RT_EVENT_WITH_FLAG (0x01) +#define RT_EVENT_DEFAULT (0x0E) +#define RT_EVENT_WITH_FLAG (0x0B) + +#define RT_EVENT_DDSYNC_NS 0x01U +#define RT_EVENT_STREAM_MARK 0x02U +#define RT_EVENT_DDSYNC 0x04U +#define RT_EVENT_TIME_LINE 0x08U /** * @ingroup dvrt_event @@ -104,6 +115,16 @@ RTS_API rtError_t rtEventSynchronize(rtEvent_t event); */ RTS_API rtError_t rtEventQuery(rtEvent_t event); +/** + * @ingroup dvrt_event + * @brief Queries an event's wait status + * @param [in] event event to query + * @param [in out] EVENT_WAIT_STATUS status + * @return EVENT_STATUS_COMPLETE for complete + * @return EVENT_STATUS_NOT_READY for not complete + */ +RTS_API rtError_t rtEventQueryWaitStatus(rtEvent_t event, rtEventWaitStatus_t *status); + /** * @ingroup dvrt_event * @brief computes the elapsed time between events. @@ -176,6 +197,18 @@ RTS_API rtError_t rtNotifyRecord(rtNotify_t notify, rtStream_t stream); */ RTS_API rtError_t rtNotifyWait(rtNotify_t notify, rtStream_t stream); +/** + * @ingroup dvrt_event + * @brief Wait for a notify with time out + * @param [in] notify_ notify to be wait + * @param [in] stream_ input stream + * @param [in] timeOut input timeOut + * @return RT_ERROR_NONE for ok + * @return RT_ERROR_INVALID_VALUE for error input + * @return RT_ERROR_STREAM_CONTEXT for stream is not in current ctx + */ +RTS_API rtError_t rtNotifyWaitWithTimeOut(rtNotify_t notify_, rtStream_t stream_, uint32_t timeOut); + /** * @ingroup dvrt_event * @brief Name a notify diff --git a/third_party/fwkacllib/inc/runtime/kernel.h b/third_party/fwkacllib/inc/runtime/kernel.h index 402fadef..9b0221c7 100644 --- a/third_party/fwkacllib/inc/runtime/kernel.h +++ b/third_party/fwkacllib/inc/runtime/kernel.h @@ -111,6 +111,16 @@ typedef struct rtKernelInfo { uint32_t module_size; } *rtKernelInfo_t; +/** + * @ingroup rt_kernel + * @brief op name + */ +typedef struct rtKernelLaunchNames { + const char *soName; // defined for so name + const char *kernelName; // defined for kernel type name + const char *opName; // defined for operator name +} rtKernelLaunchNames_t; + /** * @ingroup rt_KernelConfigDump * @brief device dump type @@ -173,13 +183,7 @@ typedef void (*rtCallback_t)(void *fnData); * @ingroup rt_kernel * @brief magic number of elf binary for aicube */ -#define RT_DEV_BINARY_MAGIC_ELF_AICUBE 0x41415247 - -/** - * @ingroup rt_kernel - * @brief magic number of elf binary for aivector - */ -#define RT_DEV_BINARY_MAGIC_ELF_AIVECTOR 0x41415248 +#define RT_DEV_BINARY_MAGIC_ELF_AICUBE 0x41494343 /** * @ingroup rt_kernel_flags @@ -192,14 +196,14 @@ typedef void (*rtCallback_t)(void *fnData); #define RT_KERNEL_CUSTOM_AICPU (0x08) // STARS topic scheduler sqe : topic_type -#define RT_KERNEL_DEVICE_FIRST (0X10) -#define RT_KERNEL_HOST_ONLY (0X20) -#define RT_KERNEL_HOST_FIRST (0X30) +#define RT_KERNEL_DEVICE_FIRST (0x10) +#define RT_KERNEL_HOST_ONLY (0x20) +#define RT_KERNEL_HOST_FIRST (0x40) /** * @ingroup rt_kernel * @brief kernel mode - */ +**/ #define RT_DEFAULT_KERNEL_MODE (0x00) #define RT_NORMAL_KERNEL_MODE (0x01) #define RT_ALL_KERNEL_MODE (0x02) @@ -222,7 +226,7 @@ RTS_API rtError_t rtDevBinaryRegister(const rtDevBinary_t *bin, void **handle); /** * @ingroup rt_kernel - * @brief register device binary + * @brief register device binary with all kernel * @param [in] bin device binary description * @param [out] handle device binary handle * @return RT_ERROR_NONE for ok @@ -341,7 +345,7 @@ RTS_API rtError_t rtKernelLaunch(const void *stubFunc, uint32_t blockDim, void * * @ingroup rt_kernel * @brief launch kernel with handle to device * @param [in] handle program - * @param [in] devFunc device function description + * @param [in] devFunc device function description. * @param [in] blockDim block dimentions * @param [in] args argments address for kernel function * @param [in] argsSize argements size @@ -352,7 +356,7 @@ RTS_API rtError_t rtKernelLaunch(const void *stubFunc, uint32_t blockDim, void * * @return RT_ERROR_INVALID_VALUE for error input */ RTS_API rtError_t rtKernelLaunchWithHandle(void *handle, const void *devFunc, uint32_t blockDim, void *args, uint32_t argsSize, - rtSmDesc_t *smDesc, rtStream_t stream, const void *kernelInfo); + rtSmDesc_t *smDesc, rtStream_t stream_, const void *kernelInfo); /** * @ingroup rt_kernel @@ -371,7 +375,7 @@ RTS_API rtError_t rtKernelLaunchWithFlag(const void *stubFunc, uint32_t blockDim rtSmDesc_t *smDesc, rtStream_t stream, uint32_t flags); /** - * @ingroup rt_kernel + * @ingroup rt_kernel(abandoned) * @brief launch kernel to device * @param [in] args argments address for kernel function * @param [in] argsSize argements size @@ -383,7 +387,21 @@ RTS_API rtError_t rtKernelLaunchWithFlag(const void *stubFunc, uint32_t blockDim RTS_API rtError_t rtKernelLaunchEx(void *args, uint32_t argsSize, uint32_t flags, rtStream_t stream); /** - * @ingroup rt_kernel + * @ingroup rt_kernel(in use) + * @brief launch kernel to device + * @param [in] opName opkernel name + * @param [in] args argments address for kernel function + * @param [in] argsSize argements size + * @param [in] flags launch flags + * @param [in] stream associated stream + * @return RT_ERROR_NONE for ok + * @return RT_ERROR_INVALID_VALUE for error input + */ +RTS_API rtError_t rtKernelLaunchFwk(const char *opName, void *args, uint32_t argsSize, uint32_t flags, + rtStream_t rtStream); + +/** + * @ingroup rt_kernel(abandoned) * @brief launch cpu kernel to device * @param [in] soName so name * @param [in] kernelName kernel name @@ -399,7 +417,22 @@ RTS_API rtError_t rtCpuKernelLaunch(const void *soName, const void *kernelName, uint32_t argsSize, rtSmDesc_t *smDesc, rtStream_t stream); /** - * @ingroup rt_kernel + * @ingroup rt_kernel(in use) + * @brief launch cpu kernel to device + * @param [in] launchNames names for kernel launch + * @param [in] blockDim block dimentions + * @param [in] args argments address for kernel function + * @param [in] argsSize argments size + * @param [in] smDesc shared memory description + * @param [in] stream associated stream + * @return RT_ERROR_NONE for ok + * @return RT_ERROR_INVALID_VALUE for error input + */ +RTS_API rtError_t rtAicpuKernelLaunch(const rtKernelLaunchNames_t *launchNames, + uint32_t blockDim, const void *args, uint32_t argsSize, rtSmDesc_t *smDesc, rtStream_t stream); + +/** + * @ingroup rt_kernel(abandoned) * @brief launch cpu kernel to device with dump identifier * @param [in] soName so name * @param [in] kernelName kernel name @@ -416,6 +449,22 @@ RTS_API rtError_t rtCpuKernelLaunchWithFlag(const void *soName, const void *kern const void *args, uint32_t argsSize, rtSmDesc_t *smDesc, rtStream_t stream, uint32_t flags); +/** + * @ingroup rt_kernel(in use) + * @brief launch cpu kernel to device with dump identifier + * @param [in] launchNames names for kernel launch + * @param [in] blockDim block dimentions + * @param [in] args argments address for kernel function + * @param [in] argsSize argments size + * @param [in] smDesc shared memory description + * @param [in] stream associated stream + * @param [in] flag dump flag or others function flag + * @return RT_ERROR_NONE for ok + * @return RT_ERROR_INVALID_VALUE for error input + */ +RTS_API rtError_t rtAicpuKernelLaunchWithFlag(const rtKernelLaunchNames_t *launchNames, uint32_t blockDim, + const void *args, uint32_t argsSize, rtSmDesc_t *smDesc, rtStream_t stream, uint32_t flags); + /** * @ingroup rt_kernel * @brief L1 fusion dump addr transfered to device diff --git a/third_party/fwkacllib/inc/runtime/mem.h b/third_party/fwkacllib/inc/runtime/mem.h index 30af85d9..bace4bc6 100644 --- a/third_party/fwkacllib/inc/runtime/mem.h +++ b/third_party/fwkacllib/inc/runtime/mem.h @@ -116,6 +116,9 @@ typedef enum tagRtMemInfoType { typedef enum tagRtRecudeKind { RT_MEMCPY_SDMA_AUTOMATIC_ADD = 10, // D2D, SDMA inline reduce, include 1P, and P2P + RT_MEMCPY_SDMA_AUTOMATIC_MAX = 11, + RT_MEMCPY_SDMA_AUTOMATIC_MIN = 12, + RT_MEMCPY_SDMA_AUTOMATIC_EQUAL = 13, RT_RECUDE_KIND_END } rtRecudeKind_t; @@ -123,6 +126,14 @@ typedef enum tagRtDataType { RT_DATA_TYPE_FP32 = 0, // fp32 RT_DATA_TYPE_FP16 = 1, // fp16 RT_DATA_TYPE_INT16 = 2, // int16 + RT_DATA_TYPE_INT4 = 3, // int4 + RT_DATA_TYPE_INT8 = 4, // int8 + RT_DATA_TYPE_INT32 = 5, // int32 + RT_DATA_TYPE_BFP16 = 6, // bfp16 + RT_DATA_TYPE_BFP32 = 7, // bfp32 + RT_DATA_TYPE_UINT8 = 8, // uint8 + RT_DATA_TYPE_UINT16= 9, // uint16 + RT_DATA_TYPE_UINT32= 10,// uint32 RT_DATA_TYPE_END } rtDataType_t; diff --git a/third_party/fwkacllib/inc/runtime/rt_model.h b/third_party/fwkacllib/inc/runtime/rt_model.h index 30b8f053..a7618b45 100644 --- a/third_party/fwkacllib/inc/runtime/rt_model.h +++ b/third_party/fwkacllib/inc/runtime/rt_model.h @@ -135,12 +135,13 @@ typedef struct tagAllKernelTaskInfo { uint16_t argsCount; uint16_t argsSize; uint16_t reserved; - const void *dev_func; + void *devfunc; void *handle; uint8_t *smDesc; uint8_t *args; uint16_t *argsOffset; } rtAllKernelTaskInfo_t; + typedef struct tagKernelTaskInfoEx { uint32_t flags; uint32_t argsSize; @@ -198,6 +199,13 @@ typedef struct tagProfilerTraceTaskInfo { uint32_t reserved[6]; } rtProfilerTrace_t; +typedef struct tagProfilerTraceExTaskInfo { + uint64_t profilerTraceId; + uint64_t modelId; + uint16_t tagId; + uint8_t reserved[22]; +} rtProfilerTraceEx_t; + typedef struct tagrtMemcpyAsyncTaskInfo { void *dst; uint64_t destMax; @@ -265,7 +273,7 @@ typedef struct tagTaskInfo { union { rtKernelTaskInfoEx_t kernelTaskEx; rtKernelTaskInfo_t kernelTask; - rtAllKernelTaskInfo_t allkernelTask; + rtAllKernelTaskInfo_t allKernelTask; rtEventTaskInfo_t eventTask; rtStreamSwitchTaskInfo_t streamSwitchTask; rtStreamActiveTaskInfo_t streamActiveTask; @@ -273,6 +281,7 @@ typedef struct tagTaskInfo { rtLabelSwitchTaskInfo_t labelSwitchTask; rtLabelGotoTaskInfo_t labelGotoTask; rtProfilerTrace_t profilertraceTask; + rtProfilerTraceEx_t profilertraceExTask; rtMemcpyAsyncTaskInfo_t memcpyAsyncTask; rtNotifyTaskInfo_t notifyTask; rtReduceAsyncTaskInfo_t reduceAsyncTask; diff --git a/third_party/fwkacllib/inc/toolchain/prof_callback.h b/third_party/fwkacllib/inc/toolchain/prof_callback.h index 18550157..5073cfb1 100644 --- a/third_party/fwkacllib/inc/toolchain/prof_callback.h +++ b/third_party/fwkacllib/inc/toolchain/prof_callback.h @@ -108,7 +108,19 @@ enum MsprofCtrlCallbackType { MSPROF_CTRL_INIT_ACL_ENV = 0, // start profiling with acl env MSPROF_CTRL_INIT_ACL_JSON, // start profiling with acl.json MSPROF_CTRL_INIT_GE_OPTIONS, // start profiling with ge env and options - MSPROF_CTRL_FINALIZE // stop profiling + MSPROF_CTRL_FINALIZE, // stop profiling + MSPROF_CTRL_REPORT_FUN_P, // for report callback + MSPROF_CTRL_PROF_SWITCH_ON, // for prof switch on + MSPROF_CTRL_PROF_SWITCH_OFF // for prof switch off +}; + +#define MSPROF_MAX_DEV_NUM (64) + +struct MsprofCommandHandle { + uint64_t profSwitch; + uint32_t devNums; // length of device id list + uint32_t devIdList[MSPROF_MAX_DEV_NUM]; + uint32_t modelId; }; /** @@ -129,6 +141,23 @@ typedef int32_t (*MsprofCtrlCallback)(uint32_t type, void *data, uint32_t len); */ typedef void (*MsprofSetDeviceCallback)(uint32_t devId, bool isOpenDevice); +/* + * @name MsprofInit + * @brief Profiling module init + * @param [in] dataType: profiling type: ACL Env/ACL Json/GE Option + * @param [in] data: profiling switch data + * @param [in] dataLen: Length of data + * @return 0:SUCCESS, >0:FAILED + */ +int32_t MsprofInit(uint32_t dataType, void *data, uint32_t dataLen); + +/* + * @name AscendCL + * @brief Finishing Profiling + * @param NULL + * @return 0:SUCCESS, >0:FAILED + */ +int32_t MsprofFinalize(); #ifdef __cplusplus } #endif diff --git a/third_party/fwkacllib/inc/toolchain/slog.h b/third_party/fwkacllib/inc/toolchain/slog.h index ba286d02..cc7c83ca 100644 --- a/third_party/fwkacllib/inc/toolchain/slog.h +++ b/third_party/fwkacllib/inc/toolchain/slog.h @@ -17,6 +17,8 @@ #ifndef D_SYSLOG_H_ #define D_SYSLOG_H_ +static const int TMP_LOG = 0; + #ifdef __cplusplus #ifndef LOG_CPP extern "C" { @@ -120,15 +122,15 @@ typedef struct tagKV { } KeyValue; typedef enum { - APPLICATION = 0, - SYSTEM + APPLICATION = 0, + SYSTEM } ProcessType; typedef struct { - ProcessType type; - unsigned int pid; - unsigned int deviceId; - char reserved[RESERVERD_LENGTH]; + ProcessType type; + unsigned int pid; + unsigned int deviceId; + char reserved[RESERVERD_LENGTH]; } LogAttr; /** @@ -141,7 +143,7 @@ enum { IDEDD, /**< IDE daemon device */ IDEDH, /**< IDE daemon host */ HCCL, /**< HCCL */ - FMK, /**< Framework */ + FMK, /**< Adapter */ HIAIENGINE, /**< Matrix */ DVPP, /**< DVPP */ RUNTIME, /**< Runtime */ @@ -162,11 +164,11 @@ enum { MDCDEFAULT, /**< MDC undefine */ MDCSC, /**< MDC spatial cognition */ MDCPNC, - MLL, + MLL, /**< abandon */ DEVMM, /**< Dlog memory managent */ KERNEL, /**< Kernel */ LIBMEDIA, /**< Libmedia */ - CCECPU, /**< ai cpu */ + CCECPU, /**< aicpu shedule */ ASCENDDK, /**< AscendDK */ ROS, /**< ROS */ HCCP, @@ -179,7 +181,7 @@ enum { TSDUMP, /**< TSDUMP module */ AICPU, /**< AICPU module */ LP, /**< LP module */ - TDT, + TDT, /**< tsdaemon or aicpu shedule */ FE, MD, MB, @@ -261,7 +263,7 @@ DLL_EXPORT int DlogSetAttr(LogAttr logAttr); #define dlog_error(moduleId, fmt, ...) \ do { \ DlogErrorInner(moduleId, "[%s:%d]" fmt, __FILE__, __LINE__, ##__VA_ARGS__); \ - } while (0) + } while (TMP_LOG != 0) /** * @ingroup slog @@ -276,7 +278,7 @@ DLL_EXPORT int DlogSetAttr(LogAttr logAttr); if(CheckLogLevel(moduleId, DLOG_WARN) == 1) { \ DlogWarnInner(moduleId, "[%s:%d]" fmt, __FILE__, __LINE__, ##__VA_ARGS__); \ } \ - } while (0) + } while (TMP_LOG != 0) /** * @ingroup slog @@ -291,7 +293,7 @@ DLL_EXPORT int DlogSetAttr(LogAttr logAttr); if(CheckLogLevel(moduleId, DLOG_INFO) == 1) { \ DlogInfoInner(moduleId, "[%s:%d]" fmt, __FILE__, __LINE__, ##__VA_ARGS__); \ } \ - } while (0) + } while (TMP_LOG != 0) /** * @ingroup slog @@ -306,7 +308,7 @@ DLL_EXPORT int DlogSetAttr(LogAttr logAttr); if(CheckLogLevel(moduleId, DLOG_DEBUG) == 1) { \ DlogDebugInner(moduleId, "[%s:%d]" fmt, __FILE__, __LINE__, ##__VA_ARGS__); \ } \ - } while (0) + } while (TMP_LOG != 0) /** * @ingroup slog @@ -318,7 +320,7 @@ DLL_EXPORT int DlogSetAttr(LogAttr logAttr); #define dlog_event(moduleId, fmt, ...) \ do { \ DlogEventInner(moduleId, "[%s:%d]" fmt, __FILE__, __LINE__, ##__VA_ARGS__); \ - } while (0) + } while (TMP_LOG != 0) /** * @ingroup slog @@ -334,7 +336,7 @@ DLL_EXPORT int DlogSetAttr(LogAttr logAttr); if(CheckLogLevel(moduleId, level) == 1) { \ DlogInner(moduleId, level, "[%s:%d]" fmt, __FILE__, __LINE__, ##__VA_ARGS__); \ } \ - } while (0) + } while (TMP_LOG != 0) /** * @ingroup slog @@ -351,7 +353,7 @@ DLL_EXPORT int DlogSetAttr(LogAttr logAttr); if(CheckLogLevel(moduleId, level) == 1) { \ DlogInner(moduleId, level, "[%s:%d][%s]" fmt, __FILE__, __LINE__, submodule, ##__VA_ARGS__); \ } \ - } while (0) + } while (TMP_LOG != 0) /** * @ingroup slog @@ -369,7 +371,7 @@ DLL_EXPORT int DlogSetAttr(LogAttr logAttr); if(CheckLogLevel(moduleId, level) == 1) { \ DlogWithKVInner(moduleId, level, pstKVArray, kvNum, "[%s:%d]" fmt, __FILE__, __LINE__, ##__VA_ARGS__); \ } \ - } while (0) + } while (TMP_LOG != 0) /** * @ingroup slog @@ -381,13 +383,13 @@ DLL_EXPORT void DlogFlush(void); * @ingroup slog * @brief Internal log interface, other modules are not allowed to call this interface */ -void DlogErrorInner(int moduleId, const char *fmt, ...) __attribute__((format(printf, 2, 3))); -void DlogWarnInner(int moduleId, const char *fmt, ...) __attribute__((format(printf, 2, 3))); -void DlogInfoInner(int moduleId, const char *fmt, ...) __attribute__((format(printf, 2, 3))); -void DlogDebugInner(int moduleId, const char *fmt, ...) __attribute__((format(printf, 2, 3))); -void DlogEventInner(int moduleId, const char *fmt, ...) __attribute__((format(printf, 2, 3))); -void DlogInner(int moduleId, int level, const char *fmt, ...) __attribute__((format(printf, 3, 4))); -void DlogWithKVInner(int moduleId, int level, KeyValue *pstKVArray, int kvNum, const char *fmt, ...) __attribute__((format(printf, 5, 6))); +void DlogErrorInner(int moduleId, const char *fmt, ...); +void DlogWarnInner(int moduleId, const char *fmt, ...); +void DlogInfoInner(int moduleId, const char *fmt, ...); +void DlogDebugInner(int moduleId, const char *fmt, ...); +void DlogEventInner(int moduleId, const char *fmt, ...); +void DlogInner(int moduleId, int level, const char *fmt, ...); +void DlogWithKVInner(int moduleId, int level, KeyValue *pstKVArray, int kvNum, const char *fmt, ...); #ifdef __cplusplus #ifndef LOG_CPP @@ -453,7 +455,7 @@ DLL_EXPORT int DlogSetAttrForC(LogAttr logAttr); if(CheckLogLevelForC(moduleId, level) == 1) { \ DlogInnerForC(moduleId, level, "[%s:%d]" fmt, __FILE__, __LINE__, ##__VA_ARGS__); \ } \ - } while (0) + } while (TMP_LOG != 0) /** * @ingroup slog @@ -470,7 +472,7 @@ DLL_EXPORT int DlogSetAttrForC(LogAttr logAttr); if(CheckLogLevelForC(moduleId, level) == 1) { \ DlogInnerForC(moduleId, level, "[%s:%d][%s]" fmt, __FILE__, __LINE__, submodule, ##__VA_ARGS__); \ } \ - } while (0) + } while (TMP_LOG != 0) /** * @ingroup slog @@ -488,7 +490,7 @@ DLL_EXPORT int DlogSetAttrForC(LogAttr logAttr); if(CheckLogLevelForC(moduleId, level) == 1) { \ DlogWithKVInnerForC(moduleId, level, pstKVArray, kvNum, "[%s:%d]" fmt, __FILE__, __LINE__, ##__VA_ARGS__); \ } \ - } while (0) + } while (TMP_LOG != 0) /** * @ingroup slog @@ -500,8 +502,8 @@ DLL_EXPORT void DlogFlushForC(void); * @ingroup slog * @brief Internal log interface, other modules are not allowed to call this interface */ -void DlogInnerForC(int moduleId, int level, const char *fmt, ...) __attribute__((format(printf, 3, 4))); -void DlogWithKVInnerForC(int moduleId, int level, KeyValue *pstKVArray, int kvNum, const char *fmt, ...) __attribute__((format(printf, 5, 6))); +void DlogInnerForC(int moduleId, int level, const char *fmt, ...); +void DlogWithKVInnerForC(int moduleId, int level, KeyValue *pstKVArray, int kvNum, const char *fmt, ...); #ifdef __cplusplus } diff --git a/third_party/fwkacllib/inc/toolchain/tuning_tool/tune_api.h b/third_party/fwkacllib/inc/toolchain/tuning_tool/tune_api.h index 6208f462..2cf6e0c4 100644 --- a/third_party/fwkacllib/inc/toolchain/tuning_tool/tune_api.h +++ b/third_party/fwkacllib/inc/toolchain/tuning_tool/tune_api.h @@ -1,72 +1,88 @@ -/** - * @file tune_api.h - * - * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.\n - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n - * 描述:mstune调优接口头文件 - */ -/** @defgroup mstune mstune调优接口 */ -#ifndef TUNE_API_H -#define TUNE_API_H -#include -#include -#include -#include "graph/graph.h" -#include "ge/ge_api.h" - -/** - * @ingroup mstune - * - * mstune status - */ -enum MsTuneStatus { - MSTUNE_SUCCESS, /** tune success */ - MSTUNE_FAILED, /** tune failed */ -}; - -// Option key: for train options sets -const std::string MSTUNE_SELF_KEY = "mstune"; -const std::string MSTUNE_GEINIT_KEY = "initialize"; -const std::string MSTUNE_GESESS_KEY = "session"; - -/** - * @ingroup mstune - * @par 描述: 命令行调优 - * - * @attention 无 - * @param option [IN] 调优参数 - * @param msg [OUT] 调优异常下返回信息 - * @retval #MSTUNE_SUCCESS 执行成功 - * @retval #MSTUNE_FAILED 执行失败 - * @par 依赖: - * @li tune_api.cpp:该接口所属的开发包。 - * @li tune_api.h:该接口声明所在的头文件。 - * @see 无 - * @since - */ -MsTuneStatus MsTuning(const std::map &option, std::string &msg); - -/** - * @ingroup mstune - * @par 描述: 梯度调优 - * - * @attention 无 - * @param tuningGraph [IN] 调优图 - * @param dependGraph [IN] 调优依赖图 - * @param session [IN] ge连接会话 - * @param option [IN] 参数集. 包含调优参数及ge参数 - * @retval #MSTUNE_SUCCESS 执行成功 - * @retval #MSTUNE_FAILED 执行失败 - * @par 依赖: - * @li tune_api.cpp:该接口所属的开发包。 - * @li tune_api.h:该接口声明所在的头文件。 - * @see 无 - * @since - */ -extern "C" MsTuneStatus MsTrainTuning(ge::Graph &tuningGraph, std::vector &dependGraph, - ge::Session *session, const std::map> &option); - -#endif +/** + * @file tune_api.h + * + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2021. All rights reserved.\n + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n + * 描述:aoe调优接口头文件 + */ +/** @defgroup aoe aoe调优接口 */ +#ifndef TUNE_API_H +#define TUNE_API_H +#include +#include +#include "ge/ge_api.h" +#include "aoe_types.h" + +/** + * @ingroup aoe + * @par 描述: 命令行调优 + * + * @attention 无 + * @param option [IN] 调优参数 + * @param msg [OUT] 调优异常下返回信息 + * @retval #AOE_SUCCESS 执行成功 + * @retval #AOE_FAILURE 执行失败 + * @par 依赖: + * @li tune_api.cpp:该接口所属的开发包。 + * @li tune_api.h:该接口声明所在的头文件。 + * @see 无 + * @since + */ +AoeStatus AoeOfflineTuning(const std::map &option, std::string &msg); + +/** + * @ingroup aoe + * @par 描述: 调优初始化 + * + * @attention 无 + * @param session [IN] ge连接会话 + * @param option [IN] 参数集. 包含调优参数及ge参数 + * @retval #AOE_SUCCESS 执行成功 + * @retval #AOE_FAILURE 执行失败 + * @par 依赖: + * @li tune_api.cpp:该接口所属的开发包。 + * @li tune_api.h:该接口声明所在的头文件。 + * @see 无 + * @since + */ +extern "C" AoeStatus AoeOnlineInitialize(ge::Session *session, const std::map &option); + +/** + * @ingroup aoe + * @par 描述: 调优去初始化 + * + * @attention 无 + * @param 无 + * @retval #AOE_SUCCESS 执行成功 + * @retval #AOE_FAILURE 执行失败 + * @par 依赖: + * @li tune_api.cpp:该接口所属的开发包。 + * @li tune_api.h:该接口声明所在的头文件。 + * @see 无 + * @since + */ +extern "C" AoeStatus AoeOnlineFinalize(); + +/** + * @ingroup aoe + * @par 描述: 调优处理 + * + * @attention 无 + * @param tuningGraph [IN] 调优图 + * @param dependGraph [IN] 调优依赖图 + * @param session [IN] ge连接会话 + * @param option [IN] 参数集. 包含调优参数及ge参数 + * @retval #AOE_SUCCESS 执行成功 + * @retval #AOE_FAILURE 执行失败 + * @par 依赖: + * @li tune_api.cpp:该接口所属的开发包。 + * @li tune_api.h:该接口声明所在的头文件。 + * @see 无 + * @since + */ +extern "C" AoeStatus AoeOnlineTuning(ge::Graph &tuningGraph, std::vector &dependGraph, + ge::Session *session, const std::map &option); +#endif From d1bb84d4ea4ff58a4d08d2fce112b8f289175da0 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Wed, 30 Jun 2021 16:59:03 +0800 Subject: [PATCH 119/226] Fix ut. --- ge/single_op/single_op_model.h | 1 - .../node_executor/node_executor_unittest.cc | 4 ++-- .../ge/single_op/single_op_model_unittest.cc | 20 ++++++++++++++++++- 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/ge/single_op/single_op_model.h b/ge/single_op/single_op_model.h index 98aed0f0..b5198e3d 100755 --- a/ge/single_op/single_op_model.h +++ b/ge/single_op/single_op_model.h @@ -82,7 +82,6 @@ class SingleOpModel { Status ParseTasks(); std::vector tbe_tasks_; - std::vector atomic_tasks_; std::vector aicpu_tasks_; std::string model_name_; diff --git a/tests/ut/ge/hybrid/node_executor/node_executor_unittest.cc b/tests/ut/ge/hybrid/node_executor/node_executor_unittest.cc index 8a1240d3..a6f5c2de 100644 --- a/tests/ut/ge/hybrid/node_executor/node_executor_unittest.cc +++ b/tests/ut/ge/hybrid/node_executor/node_executor_unittest.cc @@ -97,7 +97,7 @@ TEST_F(NodeExecutorTest, TestInitAndFinalize) { manager.FinalizeExecutors(); ASSERT_FALSE(manager.executors_.empty()); manager.FinalizeExecutors(); - ASSERT_TRUE(manager.executors_.empty()); - ASSERT_TRUE(finalized); + // ASSERT_TRUE(manager.executors_.empty()); + // ASSERT_TRUE(finalized); } } // namespace ge diff --git a/tests/ut/ge/single_op/single_op_model_unittest.cc b/tests/ut/ge/single_op/single_op_model_unittest.cc index e4a53340..2c0073f5 100644 --- a/tests/ut/ge/single_op/single_op_model_unittest.cc +++ b/tests/ut/ge/single_op/single_op_model_unittest.cc @@ -311,7 +311,7 @@ TEST_F(UtestSingleOpModel, BuildTaskList) { ASSERT_EQ(mem_task.LaunchKernel(0), SUCCESS); } -TEST_F(UtestSingleOpModel, build_aicpu_task) { +TEST_F(UtestSingleOpModel, build_dynamic_task) { ComputeGraphPtr graph = make_shared("single_op"); GeModelPtr ge_model = make_shared(); ge_model->SetGraph(GraphUtils::CreateGraphFromComputeGraph(graph)); @@ -321,6 +321,15 @@ TEST_F(UtestSingleOpModel, build_aicpu_task) { domi::TaskDef *task_def = model_task_def->add_task(); task_def->set_type(RT_MODEL_TASK_KERNEL_EX); + domi::TaskDef *task_def2 = model_task_def->add_task(); + task_def2->set_type(RT_MODEL_TASK_KERNEL); + domi::KernelDef *kernel_def = task_def2->mutable_kernel(); + domi::KernelContext *context = kernel_def->mutable_context(); + context->set_kernel_type(6); // ccKernelType::AI_CPU + + domi::TaskDef *task_def3 = model_task_def->add_task(); + task_def3->set_type(RT_MODEL_TASK_ALL_KERNEL); + string model_data_str = "123456789"; SingleOpModel model("model", model_data_str.c_str(), model_data_str.size()); std::mutex stream_mu; @@ -329,8 +338,17 @@ TEST_F(UtestSingleOpModel, build_aicpu_task) { DynamicSingleOp single_op(0, &stream_mu, stream); model.model_helper_.model_ = ge_model; auto op_desc = std::make_shared("add", "Add"); + std::vector kernelBin; + TBEKernelPtr tbe_kernel = std::make_shared("name/Add", std::move(kernelBin)); + op_desc->SetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel); NodePtr node = graph->AddNode(op_desc); model.op_list_[0] = node; StreamResource *res = new (std::nothrow) StreamResource(1); + + ASSERT_EQ(model.ParseTasks(), SUCCESS); + ASSERT_EQ(model.BuildTaskListForDynamicOp(res, single_op), SUCCESS); + model.tbe_tasks_.clear(); ASSERT_EQ(model.BuildTaskListForDynamicOp(res, single_op), SUCCESS); + model.aicpu_tasks_[0] = *task_def2; + model.BuildTaskListForDynamicOp(res, single_op); } From 25557f9bf05bf39cd2034ad5480956151555408e Mon Sep 17 00:00:00 2001 From: wangzhengjun Date: Wed, 30 Jun 2021 17:08:09 +0800 Subject: [PATCH 120/226] set size for dynamic input --- .../executor/hybrid_model_async_executor.cc | 8 ++++-- .../hybrid_model_async_executor_unittest.cc | 28 +++++++++++++++++++ 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/ge/hybrid/executor/hybrid_model_async_executor.cc b/ge/hybrid/executor/hybrid_model_async_executor.cc index e0dd768d..229cce84 100644 --- a/ge/hybrid/executor/hybrid_model_async_executor.cc +++ b/ge/hybrid/executor/hybrid_model_async_executor.cc @@ -295,13 +295,15 @@ Status HybridModelAsyncExecutor::PrepareInputs(const InputData ¤t_data, Hy } } tensor_desc->SetShape(shape); - args.input_desc[input_index] = tensor_desc; - GELOGD("Update shape of input[%zu] to [%s]", input_index, tensor_desc->MutableShape().ToString().c_str()); + GELOGD("Update shape[%s] of input[%zu] to [%s]", + shape.ToString().c_str(), input_index, tensor_desc->MutableShape().ToString().c_str()); GE_CHK_GRAPH_STATUS_RET(TensorUtils::GetTensorMemorySizeInBytes(*tensor_desc, tensor_size), "[Invoke][GetTensorMemorySizeInBytes]Failed to calc tensor size," "index = %zu, shape = [%s], model_id = %u.", input_index, tensor_desc->GetShape().ToString().c_str(), model_id_); - GELOGD("Input tensor[%zu] size = %zu", input_index, tensor_size); + GELOGD("Input tensor[%zu] size = %ld", input_index, tensor_size); + TensorUtils::SetSize(*tensor_desc, tensor_size); + args.input_desc[input_index] = tensor_desc; } GE_CHECK_GE(tensor_size, 0); diff --git a/tests/ut/ge/hybrid/executor/hybrid_model_async_executor_unittest.cc b/tests/ut/ge/hybrid/executor/hybrid_model_async_executor_unittest.cc index f772af23..c053885f 100644 --- a/tests/ut/ge/hybrid/executor/hybrid_model_async_executor_unittest.cc +++ b/tests/ut/ge/hybrid/executor/hybrid_model_async_executor_unittest.cc @@ -103,4 +103,32 @@ TEST_F(UtestHybridModelAsyncExecutor, Test_execute) { context.callback_manager->callback_queue_.Push(eof_entry); ASSERT_EQ(executor.Execute(args), SUCCESS); } + +TEST_F(UtestHybridModelAsyncExecutor, test_PrepareInputs) { + ComputeGraphPtr graph = std::make_shared("test"); + GeRootModelPtr ge_root_model = make_shared(graph); + ge_root_model->SetModelName("test_name"); + GeModelPtr ge_sub_model = make_shared(); + HybridModel hybrid_model(ge_root_model); + HybridModelAsyncExecutor executor(&hybrid_model); + GeTensorDescPtr tensor_desc = make_shared(GeShape({-1, 16, 16, 3})); + tensor_desc->SetShapeRange({{1, 256}, {16, 16}, {16, 16}, {3, 3}}); + executor.input_tensor_desc_.insert({0, tensor_desc}); + executor.device_id_ = 0; + executor.input_sizes_.insert({0, -1}); + executor.is_input_dynamic_.push_back(true); + + unique_ptr data_buf(new (std::nothrow)uint8_t[3072]); + InputData input_data; + input_data.blobs.push_back(DataBuffer(data_buf.get(), 3072, false)); + input_data.shapes.push_back({1, 16, 16, 3}); + HybridModelExecutor::ExecuteArgs args; + + auto ret = executor.PrepareInputs(input_data, args); + ASSERT_EQ(ret, SUCCESS); + ASSERT_EQ(args.input_desc[0]->GetShape().ToString(), GeShape({1, 16, 16, 3}).ToString()); + int64_t tensor_size = 0; + TensorUtils::GetSize(*(args.input_desc[0]), tensor_size); + ASSERT_EQ(tensor_size, 3104); +} } // namespace ge \ No newline at end of file From 022ec1c8c050437906025bfac682efdbc5fada8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=B6=9B?= Date: Wed, 30 Jun 2021 19:20:09 +0800 Subject: [PATCH 121/226] =?UTF-8?q?=E5=9B=9E=E9=80=80=20'Pull=20Request=20?= =?UTF-8?q?!1842=20:=20check=20dump=20option'?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ge/common/dump/dump_properties.cc | 243 ++---------------- ge/common/dump/dump_properties.h | 18 +- ge/session/inner_session.cc | 2 +- tests/ut/ge/CMakeLists.txt | 1 - .../ut/ge/common/dump_properties_unittest.cc | 126 --------- 5 files changed, 26 insertions(+), 364 deletions(-) delete mode 100644 tests/ut/ge/common/dump_properties_unittest.cc diff --git a/ge/common/dump/dump_properties.cc b/ge/common/dump/dump_properties.cc index 010347c0..ef755540 100644 --- a/ge/common/dump/dump_properties.cc +++ b/ge/common/dump/dump_properties.cc @@ -18,7 +18,6 @@ #include #include -#include #include "common/ge/ge_util.h" #include "framework/common/util.h" @@ -38,159 +37,6 @@ const uint32_t kAtomicOverflow = (0x1 << 1); const uint32_t kAllOverflow = (kAicoreOverflow | kAtomicOverflow); } // namespace namespace ge { -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::Split(const std::string &s, - std::vector &result, - const char *delchar) { - if (s.empty()) { - return; - } - result.clear(); - - char *buffer = new (std::nothrow)char[s.size() + 1]; - if (buffer == nullptr) { - GELOGE(FAILED, "[Split][string] failed while malloc memory, string value is:%s", s.c_str()); - REPORT_CALL_ERROR("E19999", "Memory malloc may fail when split string, get fatal exception, " - "string value is:%s", s.c_str()); - return; - } - buffer[s.size()] = '\0'; - errno_t e = strcpy_s(buffer, s.size() + 1, s.c_str()); - if (e != EOK) { - delete[] buffer; - return; - } - char *context = nullptr; - char *p = strtok_s(buffer, delchar, &context); - while (p != nullptr) { - result.emplace_back(p); - p = strtok_s(nullptr, delchar, &context); - } - delete[] buffer; -} - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::CheckDumpStep(const std::string &dump_step) { - std::string modified_dum_step = dump_step + "|"; - std::smatch result; - std::vector match_vecs; - std::regex pattern(R"((\d{1,}-\d{1,}\||\d{1,}\|)+)"); - if (regex_match(modified_dum_step, result, pattern)) { - Split(result.str(), match_vecs, "|"); - if (match_vecs.empty()) { - REPORT_CALL_ERROR("E19999", "Split may get fatal exception, dump_step:%s.", dump_step.c_str()); - GELOGE(FAILED, "[Check][Param] failed. Split may get fatal exception, ge.exec.dumpStep:%s.", dump_step.c_str()); - return FAILED; - } - // 100 is the max sets of dump steps. - if (match_vecs.size() > 100) { - REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), - std::vector({ - "ge.exec.dumpStep", - dump_step.c_str(), - " is not supported, only support dump <= 100 sets of data"})); - GELOGE(PARAM_INVALID, "[Check][Param] get dump_step value:%s, " - "dump_step only support dump <= 100 sets of data.", dump_step.c_str()); - return PARAM_INVALID; - } - for (const auto &match_vec : match_vecs) { - std::vector vec_after_split; - Split(match_vec, vec_after_split, "-"); - if (match_vecs.empty()) { - REPORT_CALL_ERROR("E19999", "Split may get fatal exception."); - GELOGE(FAILED, "[Check][Param] failed, split may get fatal exception."); - return FAILED; - } - if (vec_after_split.size() > 1) { - if (std::atoi(vec_after_split[0].c_str()) >= std::atoi(vec_after_split[1].c_str())) { - REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), - std::vector({ - "ge.exec.dumpStep", - dump_step.c_str(), - " is not supported." - "in range steps, the first step is >= second step, correct example:'0|5|10-20"})); - GELOGE(PARAM_INVALID, "[Check][Param] get dump_step value:%s, " - "in range steps, the first step is >= second step, correct example:'0|5|10-20'", dump_step.c_str()); - return PARAM_INVALID; - } - } - } - } else { - REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), - std::vector({ - "ge.exec.dumpStep", - dump_step.c_str(), - " is not supported, correct example:'0|5|10|50-100."})); - GELOGE(PARAM_INVALID, "[Check][Param] get dump_step value:%s, " - "dump_step string style is error, correct example:'0|5|10|50-100.'", dump_step.c_str()); - return PARAM_INVALID; - } - return SUCCESS; -} - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::CheckDumpMode(const std::string &dump_mode) { - const std::set dump_mode_list = {"input", "output", "all"}; - std::set::iterator iter; - - if ((iter = dump_mode_list.find(dump_mode)) == dump_mode_list.end()) { - REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), - std::vector({ - "ge.exec.dumpMode", - dump_mode.c_str(), - " is not supported, should be one of the following:[input, output, all]"})); - GELOGE(PARAM_INVALID, "[Check][Param] the dump_debug_mode:%s, is is not supported," - "should be one of the following:[input, output, all].", dump_mode.c_str()); - return PARAM_INVALID; - } - return SUCCESS; -} - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::CheckDumpPath(const std::string &input) { - if (mmIsDir(input.c_str()) != EN_OK) { - REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), - std::vector({ - "ge.exec.dumpPath", - input.c_str(), - " is not a directory."})); - GELOGE(PARAM_INVALID, "[Check][Param] the path:%s, is not directory.", input.c_str()); - return PARAM_INVALID; - } - char trusted_path[MMPA_MAX_PATH] = { "\0" }; - if (mmRealPath(input.c_str(), trusted_path, MMPA_MAX_PATH) != EN_OK) { - REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), - std::vector({ - "ge.exec.dumpPath", - input.c_str(), - " dumpPath invalid."})); - GELOGE(PARAM_INVALID, "[Check][Param] the dumpPath:%s, is invalid.", input.c_str()); - return PARAM_INVALID; - } - if (mmAccess2(trusted_path, R_OK | W_OK) != EN_OK) { - REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), - std::vector({ - "ge.exec.dumpPath", - input.c_str(), - " does't have read, write permissions."})); - GELOGE(PARAM_INVALID, "[Check][Param] the path:%s, does't have read, write permissions.", input.c_str()); - return PARAM_INVALID; - } - return SUCCESS; -} - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::CheckEnableDump(const std::string &input) { - std::set enable_dump_option_list = {"1", "0"}; - auto it = enable_dump_option_list.find(input); - if (it == enable_dump_option_list.end()) { - REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), - std::vector({ - "ge.exec.enableDump", - input.c_str(), - " only support 1 or 0."})); - GELOGE(PARAM_INVALID, "[Check][Param] Not support ge.exec.enableDump or ge.exec.enableDumpDebug format:%s, " - "only support 1 or 0.", input.c_str()); - return PARAM_INVALID; - } - return SUCCESS; -} - FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY DumpProperties::DumpProperties(const DumpProperties &other) { CopyFrom(other); } @@ -201,26 +47,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY DumpProperties &DumpProperties: return *this; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::SetDumpOptions() { - if (enable_dump_ == kEnableFlag) { - std::string dump_step; - if (GetContext().GetOption(OPTION_EXEC_DUMP_STEP, dump_step) == GRAPH_SUCCESS) { - GE_CHK_STATUS_RET(CheckDumpStep(dump_step), "[Check][dump_step] failed."); - GELOGI("Get dump step %s successfully", dump_step.c_str()); - SetDumpStep(dump_step); - } - string dump_mode = "output"; - if (GetContext().GetOption(OPTION_EXEC_DUMP_MODE, dump_mode) == GRAPH_SUCCESS) { - GELOGI("Get dump mode %s successfully", dump_mode.c_str()); - GE_CHK_STATUS_RET(CheckDumpMode(dump_mode), "[Check][dump_mode] failed."); - SetDumpMode(dump_mode); - } - AddPropertyValue(DUMP_ALL_MODEL, {}); - } - return SUCCESS; -} - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::InitByOptions() { +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::InitByOptions() { enable_dump_.clear(); enable_dump_debug_.clear(); dump_path_.clear(); @@ -230,32 +57,17 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::InitByOp is_infer_op_debug_ = false; op_debug_mode_ = 0; - std::string enable_dump = std::to_string(false); + std::string enable_dump; (void)GetContext().GetOption(OPTION_EXEC_ENABLE_DUMP, enable_dump); enable_dump_ = enable_dump; - if (!enable_dump_.empty()) { - GE_CHK_STATUS_RET(CheckEnableDump(enable_dump_), "[Check][enable_dump] failed."); - } - std::string enable_dump_debug = std::to_string(false); + std::string enable_dump_debug; (void)GetContext().GetOption(OPTION_EXEC_ENABLE_DUMP_DEBUG, enable_dump_debug); enable_dump_debug_ = enable_dump_debug; - if (!enable_dump_debug_.empty()) { - GE_CHK_STATUS_RET(CheckEnableDump(enable_dump_debug_), "[Check][enable_dump_debug] failed."); - } - if ((enable_dump_ == kEnableFlag) && (enable_dump_debug_ == kEnableFlag)) { - REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), - std::vector({ - "ge.exec.enableDump and ge.exec.enableDumpDebug", - enable_dump_ + ", " + enable_dump_debug, - "ge.exec.enableDump and ge.exec.enableDumpDebug cannot be set to 1 at the same time."})); - GELOGE(FAILED, "ge.exec.enableDump and ge.exec.enableDumpDebug cannot be both set to 1 at the same time."); - return FAILED; - } + if ((enable_dump_ == kEnableFlag) || (enable_dump_debug_ == kEnableFlag)) { std::string dump_path; if (GetContext().GetOption(OPTION_EXEC_DUMP_PATH, dump_path) == GRAPH_SUCCESS) { - GE_CHK_STATUS_RET(CheckDumpPath(dump_path), "Check dump path failed."); if (!dump_path.empty() && dump_path[dump_path.size() - 1] != '/') { dump_path = dump_path + "/"; } @@ -263,21 +75,25 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::InitByOp GELOGI("Get dump path %s successfully", dump_path.c_str()); SetDumpPath(dump_path); } else { - REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), - std::vector({ - "ge.exec.dumpPath", - dump_path, - "ge.exec.dumpPath is not set."})); - GELOGE(FAILED, "[Check][dump_path] failed. Dump path is not set."); - return FAILED; + GELOGW("Dump path is not set"); } } - GE_CHK_STATUS_RET(SetDumpOptions(), "SetDumpOptions failed."); - - GE_CHK_STATUS_RET(SetDumpDebugOptions(), "SetDumpDebugOptions failed."); + if (enable_dump_ == kEnableFlag) { + std::string dump_step; + if (GetContext().GetOption(OPTION_EXEC_DUMP_STEP, dump_step) == GRAPH_SUCCESS) { + GELOGI("Get dump step %s successfully", dump_step.c_str()); + SetDumpStep(dump_step); + } + string dump_mode; + if (GetContext().GetOption(OPTION_EXEC_DUMP_MODE, dump_mode) == GRAPH_SUCCESS) { + GELOGI("Get dump mode %s successfully", dump_mode.c_str()); + SetDumpMode(dump_mode); + } + AddPropertyValue(DUMP_ALL_MODEL, {}); + } - return SUCCESS; + SetDumpDebugOptions(); } // The following is the new dump scenario of the fusion operator @@ -437,20 +253,14 @@ void DumpProperties::CopyFrom(const DumpProperties &other) { } } -Status DumpProperties::SetDumpDebugOptions() { +void DumpProperties::SetDumpDebugOptions() { if (enable_dump_debug_ == kEnableFlag) { std::string dump_debug_mode; if (GetContext().GetOption(OPTION_EXEC_DUMP_DEBUG_MODE, dump_debug_mode) == GRAPH_SUCCESS) { GELOGD("Get dump debug mode %s successfully", dump_debug_mode.c_str()); } else { - REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), - std::vector({ - "ge.exec.dumpDebugMode", - dump_debug_mode, - "ge.exec.dumpDebugMode is not set."})); - GELOGE(PARAM_INVALID, "[Check][dump_debug_mode] failed. Dump debug mode is not set."); - - return PARAM_INVALID; + GELOGW("Dump debug mode is not set."); + return; } if (dump_debug_mode == OP_DEBUG_AICORE) { @@ -466,17 +276,10 @@ Status DumpProperties::SetDumpDebugOptions() { is_train_op_debug_ = true; op_debug_mode_ = kAllOverflow; } else { - REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), - std::vector({ - "ge.exec.dumpDebugMode", - dump_debug_mode, - "ge.exec.dumpDebugMode is invalid."})); - GELOGE(PARAM_INVALID, "[Set][DumpDebugOptions] failed, ge.exec.dumpDebugMode is invalid."); - return PARAM_INVALID; + GELOGW("ge.exec.dumpDebugMode is invalid."); } } else { GELOGI("ge.exec.enableDumpDebug is false or is not set."); } - return SUCCESS; } } // namespace ge diff --git a/ge/common/dump/dump_properties.h b/ge/common/dump/dump_properties.h index cbfc362d..98487491 100644 --- a/ge/common/dump/dump_properties.h +++ b/ge/common/dump/dump_properties.h @@ -23,7 +23,6 @@ #include namespace ge { -using Status = uint32_t; class DumpProperties { public: DumpProperties() = default; @@ -34,7 +33,7 @@ class DumpProperties { DumpProperties &operator=(const DumpProperties &dump); - Status InitByOptions(); + void InitByOptions(); void AddPropertyValue(const std::string &model, const std::set &layers); @@ -96,20 +95,7 @@ class DumpProperties { private: void CopyFrom(const DumpProperties &other); - Status SetDumpDebugOptions(); - - Status SetDumpOptions(); - - void Split(const std::string &s, std::vector &result, const char *delchar); - - Status CheckDumpStep(const std::string &dump_step); - - Status CheckDumpMode(const std::string &dump_mode); - - Status CheckDumpPath(const std::string &input); - - Status CheckEnableDump(const std::string &input); - + void SetDumpDebugOptions(); std::string enable_dump_; std::string enable_dump_debug_; diff --git a/ge/session/inner_session.cc b/ge/session/inner_session.cc index 58b78f41..54e62d32 100755 --- a/ge/session/inner_session.cc +++ b/ge/session/inner_session.cc @@ -121,7 +121,7 @@ Status InnerSession::Initialize() { GE_CHK_RT_RET(rtSetDevice(GetContext().DeviceId())); DumpProperties dump_properties; - GE_CHK_STATUS_RET(dump_properties.InitByOptions(), "Init dump properties failed."); + dump_properties.InitByOptions(); GE_CHK_STATUS_RET(AddDumpProperties(dump_properties), "[Add][DumpProperties] failed."); ret = graph_manager_.Initialize(options_); diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index 5b8958b4..f808bce7 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -780,7 +780,6 @@ set(MULTI_PARTS_TEST_FILES "common/util_unittest.cc" "common/dump_manager_unittest.cc" "common/dump_op_unittest.cc" - "common/dump_properties_unittest.cc" "common/dump_exception_unittest.cc" "common/opdebug_register_unittest.cc" "common/format_transfer_unittest.cc" diff --git a/tests/ut/ge/common/dump_properties_unittest.cc b/tests/ut/ge/common/dump_properties_unittest.cc deleted file mode 100644 index 57809013..00000000 --- a/tests/ut/ge/common/dump_properties_unittest.cc +++ /dev/null @@ -1,126 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -#define protected public -#define private public - -#include "common/dump/dump_properties.h" -#include "ge_local_context.h" -#include "ge/ge_api_types.h" -#include "common/debug/log.h" -#include "common/ge_inner_error_codes.h" - -namespace ge { -class UTEST_dump_properties : public testing::Test { - protected: - void SetUp() {} - void TearDown() {} -}; - -TEST_F(UTEST_dump_properties, check_dump_step) { - DumpProperties dp; - std::string dump_step{"0|3-5|10"}; - std::string unsupport_input1{"0|5-3|10"}; - std::string unsupport_input2{"one"}; - std::string unsupport_input3; - for (int i = 0; i < 200; ++i) { - unsupport_input3 += std::to_string(i) + "|"; - } - unsupport_input3.pop_back(); - Status st = dp.CheckDumpStep(dump_step); - EXPECT_EQ(st, SUCCESS); - st = dp.CheckDumpStep(unsupport_input1); - EXPECT_NE(st, SUCCESS); - st = dp.CheckDumpStep(unsupport_input2); - EXPECT_NE(st, SUCCESS); - st = dp.CheckDumpStep(unsupport_input3); - EXPECT_NE(st, SUCCESS); -} - -TEST_F(UTEST_dump_properties, check_dump_mode) { - DumpProperties dp; - std::string dump_mode_1{"input"}; - std::string dump_mode_2{"output"}; - std::string dump_mode_3{"all"}; - std::string unsupport_input1{"mode1"}; - Status st = dp.CheckDumpMode(dump_mode_1); - EXPECT_EQ(st, SUCCESS); - st = dp.CheckDumpMode(dump_mode_2); - EXPECT_EQ(st, SUCCESS); - st = dp.CheckDumpMode(dump_mode_3); - EXPECT_EQ(st, SUCCESS); - st = dp.CheckDumpMode(unsupport_input1); - EXPECT_NE(st, SUCCESS); -} - -TEST_F(UTEST_dump_properties, check_dump_path) { - DumpProperties dp; - std::string dump_path{"/tmp/"}; - std::string unsupport_input1{" \\unsupported"}; - Status st = dp.CheckDumpPath(dump_path); - EXPECT_EQ(st, SUCCESS); - st = dp.CheckDumpPath(unsupport_input1); - EXPECT_NE(st, SUCCESS); -} - -TEST_F(UTEST_dump_properties, check_enable_dump) { - DumpProperties dp; - std::string enable_dump_t{"1"}; - std::string enable_dump_f{"0"}; - std::string unsupport_input1{"true"}; - std::string unsupport_input2{"false"}; - Status st = dp.CheckEnableDump(enable_dump_t); - EXPECT_EQ(st, SUCCESS); - st = dp.CheckEnableDump(enable_dump_f); - EXPECT_EQ(st, SUCCESS); - st = dp.CheckEnableDump(unsupport_input1); - EXPECT_NE(st, SUCCESS); - st = dp.CheckEnableDump(unsupport_input2); - EXPECT_NE(st, SUCCESS); -} - -TEST_F(UTEST_dump_properties, init_by_options_success_1) { - DumpProperties dp; - std::map options {{OPTION_EXEC_ENABLE_DUMP, "1"}, - {OPTION_EXEC_DUMP_PATH, "/tmp/"}, - {OPTION_EXEC_DUMP_STEP, "0|1-3|10"}, - {OPTION_EXEC_DUMP_MODE, "all"}}; - GetThreadLocalContext().SetGlobalOption(options); - Status st = dp.InitByOptions(); - EXPECT_EQ(st, SUCCESS); -} - -TEST_F(UTEST_dump_properties, init_by_options_success_2) { - DumpProperties dp; - std::map options {{OPTION_EXEC_ENABLE_DUMP_DEBUG, "1"}, - {OPTION_EXEC_DUMP_PATH, "/tmp/"}, - {OPTION_EXEC_DUMP_DEBUG_MODE, "aicore_overflow"}}; - GetThreadLocalContext().SetGlobalOption(options); - Status st = dp.InitByOptions(); - EXPECT_EQ(st, SUCCESS); -} - -TEST_F(UTEST_dump_properties, init_by_options_failed) { - DumpProperties dp; - std::map options {{OPTION_EXEC_ENABLE_DUMP_DEBUG, "1"}, - {OPTION_EXEC_DUMP_PATH, "/tmp/"}}; - GetThreadLocalContext().SetGlobalOption(options); - Status st = dp.InitByOptions(); - EXPECT_NE(st, SUCCESS); -} -} // namespace ge \ No newline at end of file From 7fdb8aad95f00f2d360fba9bf727928f988d6adf Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Wed, 30 Jun 2021 19:51:10 +0800 Subject: [PATCH 122/226] Fix ut. --- ge/single_op/single_op.h | 1 + ge/single_op/single_op_model.cc | 42 +++---------------- ge/single_op/single_op_model.h | 2 - .../node_executor/node_executor_unittest.cc | 5 ++- 4 files changed, 10 insertions(+), 40 deletions(-) diff --git a/ge/single_op/single_op.h b/ge/single_op/single_op.h index 94d7227b..7e05dd5f 100755 --- a/ge/single_op/single_op.h +++ b/ge/single_op/single_op.h @@ -92,6 +92,7 @@ class DynamicSingleOp { rtStream_t stream_ = nullptr; size_t num_inputs_ = 0; size_t num_outputs_ = 0; + ComputeGraphPtr compute_graph_; }; } // namespace ge #endif // GE_SINGLE_OP_SINGLE_OP_H_ diff --git a/ge/single_op/single_op_model.cc b/ge/single_op/single_op_model.cc index e5d15beb..7f42f03c 100755 --- a/ge/single_op/single_op_model.cc +++ b/ge/single_op/single_op_model.cc @@ -529,44 +529,14 @@ Status SingleOpModel::BuildOp(StreamResource &resource, SingleOp &single_op) { return BuildTaskList(&resource, single_op); } -Status SingleOpModel::BuildModelTaskKernel(StreamResource *stream_resource, const TaskDef &task_def, - DynamicSingleOp &single_op) { - auto task_type = static_cast(task_def.type()); - const auto &context = task_type == RT_MODEL_TASK_KERNEL ? task_def.kernel().context() : - task_def.kernel_with_handle().context(); +Status SingleOpModel::BuildTaskListForDynamicOp(StreamResource *stream_resource, DynamicSingleOp &single_op) { + auto ge_model = model_helper_.GetGeModel(); + GE_CHECK_NOTNULL(ge_model); - auto kernel_type = static_cast(context.kernel_type()); - if (kernel_type == ccKernelType::TE) { - GELOGD("Building TBE task."); - TbeOpTask *tbe_task = nullptr; - GE_CHK_STATUS_RET_NOLOG(BuildKernelTask(task_def, &tbe_task)); - tbe_task->SetModelArgs(model_name_, model_id_); - if (tbe_task->tiling_buffer_ != nullptr) { - GELOGD("tiling buffer is not nullptr."); - tbe_task->stream_resource_ = stream_resource; - } - single_op.op_task_.reset(tbe_task); - } else if (kernel_type == ccKernelType::AI_CPU || kernel_type == ccKernelType::CUST_AI_CPU) { - GELOGD("Building AICPU_CC task"); - OpTask *task = nullptr; - uint64_t dynamic_singleop_kernel_id = aicpu_kernel_id++; - GELOGI("Build dynamic singleOp CCTask, kernel_id = %lu", dynamic_singleop_kernel_id); - GE_CHK_STATUS_RET_NOLOG(BuildCpuKernelTask(task_def.kernel(), &task, dynamic_singleop_kernel_id)); - task->SetModelArgs(model_name_, model_id_); - single_op.op_task_.reset(task); - } else { - GELOGE(ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID, - "[Check][Param:TaskDef]Only TBE, AI_CPU, CUST_AI_CPU kernel are supported, but got %u", - context.kernel_type()); - REPORT_INNER_ERROR("E19999", - "BuildModelTaskKernel fail for got:%u not supported, Only TBE, AI_CPU, CUST_AI_CPU kernel are supported.", - context.kernel_type()); - return ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID; - } - return SUCCESS; -} + auto compute_graph = GraphUtils::GetComputeGraph(ge_model->GetGraph()); + GE_CHECK_NOTNULL(compute_graph); + single_op.compute_graph_ = compute_graph; -Status SingleOpModel::BuildTaskListForDynamicOp(StreamResource *stream_resource, DynamicSingleOp &single_op) { if (tbe_tasks_.size() > 0) { const auto &task_def = tbe_tasks_[0]; GELOGD("Building TBE task."); diff --git a/ge/single_op/single_op_model.h b/ge/single_op/single_op_model.h index b5198e3d..45616d9a 100755 --- a/ge/single_op/single_op_model.h +++ b/ge/single_op/single_op_model.h @@ -71,8 +71,6 @@ class SingleOpModel { Status BuildKernelTask(const domi::TaskDef &task_def, TbeOpTask **task); Status BuildKernelExTask(const domi::KernelExDef &kernel_def, AiCpuTask **task, uint64_t kernel_id); Status BuildCpuKernelTask(const domi::KernelDef &kernel_def, OpTask **task, uint64_t kernel_id); - Status BuildModelTaskKernel(StreamResource *stream_resource, const domi::TaskDef &task_def, - DynamicSingleOp &single_op); static void ParseOpModelParams(ModelHelper &model_helper, SingleOpModelParam ¶m); void ParseArgTable(OpTask *task, SingleOp &op); diff --git a/tests/ut/ge/hybrid/node_executor/node_executor_unittest.cc b/tests/ut/ge/hybrid/node_executor/node_executor_unittest.cc index a6f5c2de..1d5bbb3d 100644 --- a/tests/ut/ge/hybrid/node_executor/node_executor_unittest.cc +++ b/tests/ut/ge/hybrid/node_executor/node_executor_unittest.cc @@ -87,6 +87,7 @@ TEST_F(NodeExecutorTest, TestGetOrCreateExecutor) { TEST_F(NodeExecutorTest, TestInitAndFinalize) { auto &manager = NodeExecutorManager::GetInstance(); manager.FinalizeExecutors(); + manager.FinalizeExecutors(); manager.EnsureInitialized(); manager.EnsureInitialized(); const NodeExecutor *executor = nullptr; @@ -97,7 +98,7 @@ TEST_F(NodeExecutorTest, TestInitAndFinalize) { manager.FinalizeExecutors(); ASSERT_FALSE(manager.executors_.empty()); manager.FinalizeExecutors(); - // ASSERT_TRUE(manager.executors_.empty()); - // ASSERT_TRUE(finalized); + ASSERT_TRUE(manager.executors_.empty()); + ASSERT_TRUE(finalized); } } // namespace ge From bf0ae87401ec1ef04fd803d79c832d53d35d1362 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Wed, 30 Jun 2021 20:09:47 +0800 Subject: [PATCH 123/226] Fix ut. --- ge/single_op/single_op_model.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ge/single_op/single_op_model.cc b/ge/single_op/single_op_model.cc index 7f42f03c..9a52a83d 100755 --- a/ge/single_op/single_op_model.cc +++ b/ge/single_op/single_op_model.cc @@ -536,7 +536,6 @@ Status SingleOpModel::BuildTaskListForDynamicOp(StreamResource *stream_resource, auto compute_graph = GraphUtils::GetComputeGraph(ge_model->GetGraph()); GE_CHECK_NOTNULL(compute_graph); single_op.compute_graph_ = compute_graph; - if (tbe_tasks_.size() > 0) { const auto &task_def = tbe_tasks_[0]; GELOGD("Building TBE task."); @@ -566,7 +565,7 @@ Status SingleOpModel::BuildTaskListForDynamicOp(StreamResource *stream_resource, GELOGI("Build dynamic singleOp TfTask, kernel_id = %lu", dynamic_singleop_kernel_id); GE_CHK_STATUS_RET_NOLOG(BuildKernelExTask(task_def.kernel_ex(), &aicpu_task, dynamic_singleop_kernel_id)); if (aicpu_task->GetUnknownType() == DEPEND_COMPUTE) { - if (aicpu_tasks_.size() < 2) { + if (aicpu_tasks_.size() < 2) { GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Check][Task]The copy task of the fourth operator was not found."); REPORT_INNER_ERROR("E19999", "The copy task of the fourth operator was not found."); return ACL_ERROR_GE_PARAM_INVALID; From 4d1ec067f3d497f6ea95ddbfc7af8d0e6da836cb Mon Sep 17 00:00:00 2001 From: lianghao Date: Wed, 30 Jun 2021 19:48:29 +0800 Subject: [PATCH 124/226] FindLastBpFromBpNode --- ge/graph/build/task_generator.cc | 44 +++++++++---------- ge/graph/build/task_generator.h | 2 +- .../ge/graph/build/task_generator_unittest.cc | 4 +- 3 files changed, 24 insertions(+), 26 deletions(-) diff --git a/ge/graph/build/task_generator.cc b/ge/graph/build/task_generator.cc index 5dee37d6..67289f73 100755 --- a/ge/graph/build/task_generator.cc +++ b/ge/graph/build/task_generator.cc @@ -793,7 +793,6 @@ Status TaskGenerator::AutoFindBpOpIndex(const ComputeGraphPtr &graph, ProfilingP GELOGI("Start AutoFindBpOpIndex"); NodePtr bp_node = nullptr; uint32_t current_idx = 0; - uint32_t netoutput_idx = 0; for (auto &node : graph->GetNodes(graph->GetGraphUnknownFlag())) { OpDescPtr op_desc = node->GetOpDesc(); GE_CHECK_NOTNULL(op_desc); @@ -811,7 +810,6 @@ Status TaskGenerator::AutoFindBpOpIndex(const ComputeGraphPtr &graph, ProfilingP if (op_desc->GetName() == NODE_NAME_NET_OUTPUT) { if (bp_node == nullptr) { bp_node = node; - netoutput_idx = current_idx - 1; } } if (graph->GetNeedIteration()) { @@ -836,34 +834,30 @@ Status TaskGenerator::AutoFindBpOpIndex(const ComputeGraphPtr &graph, ProfilingP if (bp_node == nullptr) { GELOGW("not find bp_node."); return SUCCESS; - } else if (bp_node->GetName() == NODE_NAME_NET_OUTPUT) { - profiling_point.bp_index = netoutput_idx; - GELOGI("First bp name %s, idx %u", bp_node->GetName().c_str(), netoutput_idx); - } else { - profiling_point.bp_index = FindLastBpFromBpNode(graph, bp_node); } - return SUCCESS; + return FindLastBpFromBpNode(graph, bp_node, profiling_point.bp_index); } -uint32_t TaskGenerator::FindLastBpFromBpNode(const ComputeGraphPtr &graph, const NodePtr &bp_node) const { - uint32_t last_bp = 0; +Status TaskGenerator::FindLastBpFromBpNode(const ComputeGraphPtr &graph, const NodePtr &target_node, + uint32_t &bp_index) const { + bp_index = 0; + auto target_desc = target_node->GetOpDesc(); + GE_CHECK_NOTNULL(target_desc); OpDescPtr bp_op_desc = nullptr; - for (auto &in_anchor : bp_node->GetAllInDataAnchors()) { - auto out_anchor = in_anchor->GetPeerOutAnchor(); - if (out_anchor == nullptr || out_anchor->GetOwnerNode() == nullptr) { - continue; - } - auto out_node_desc = out_anchor->GetOwnerNode()->GetOpDesc(); - GE_CHECK_NOTNULL(out_node_desc); - if (bp_op_desc == nullptr || ((out_node_desc->GetId()) > (bp_op_desc->GetId()))) { - bp_op_desc = out_node_desc; + for (auto &in_node : target_node->GetInAllNodes()) { + GE_CHECK_NOTNULL(in_node); + auto in_node_desc = in_node->GetOpDesc(); + GE_CHECK_NOTNULL(in_node_desc); + if ((bp_op_desc == nullptr || (in_node_desc->GetId() > bp_op_desc->GetId())) && + (in_node_desc->GetStreamId() == target_desc->GetStreamId())){ + bp_op_desc = in_node_desc; } - GELOGI("bp_op_desc is %s, id is %ld", bp_op_desc->GetName().c_str(), bp_op_desc->GetId()); } if (bp_op_desc == nullptr) { - return last_bp; + GELOGI("Did not find bp node."); + return SUCCESS; } uint32_t current_idx = 0; for (auto &node : graph->GetNodes(graph->GetGraphUnknownFlag())) { @@ -871,12 +865,14 @@ uint32_t TaskGenerator::FindLastBpFromBpNode(const ComputeGraphPtr &graph, const GE_CHECK_NOTNULL(op_desc); current_idx++; if (op_desc->GetName() == bp_op_desc->GetName()) { - last_bp = current_idx; - GELOGI("First bp name %s, idx %u", op_desc->GetName().c_str(), last_bp); + bp_index = current_idx; + GELOGI("Find bp name %s, idx %u", op_desc->GetName().c_str(), bp_index); break; } } - return last_bp; + GELOGI("Last bp node[%s], type[%s], index[%u], stream id[%ld]", bp_op_desc->GetName().c_str(), + bp_op_desc->GetType().c_str(), bp_index, bp_op_desc->GetStreamId()); + return SUCCESS; } Status TaskGenerator::FindFpOfEnv(const ComputeGraphPtr &graph, const std::string &fp_point_str, diff --git a/ge/graph/build/task_generator.h b/ge/graph/build/task_generator.h index 6f460906..5d204c3c 100755 --- a/ge/graph/build/task_generator.h +++ b/ge/graph/build/task_generator.h @@ -116,7 +116,7 @@ class TaskGenerator { Status AutoFindFpOpIndex(const ComputeGraphPtr &graph, ProfilingPoint &profiling_point) const; Status AutoFindBpOpIndex(const ComputeGraphPtr &graph, ProfilingPoint &profiling_point, vector &all_reduce_nodes) const; - uint32_t FindLastBpFromBpNode(const ComputeGraphPtr &graph, const NodePtr &bp_node) const; + Status FindLastBpFromBpNode(const ComputeGraphPtr &graph, const NodePtr &bp_node, uint32_t &bp_index) const; Status FindFpOfEnv(const ComputeGraphPtr &graph, const std::string &fp_point_str, ProfilingPoint &profiling_point) const; diff --git a/tests/ut/ge/graph/build/task_generator_unittest.cc b/tests/ut/ge/graph/build/task_generator_unittest.cc index f869f1e0..1e865050 100644 --- a/tests/ut/ge/graph/build/task_generator_unittest.cc +++ b/tests/ut/ge/graph/build/task_generator_unittest.cc @@ -116,7 +116,9 @@ TEST_F(UtestTaskGeneratorTest, FindLastBpFromBpNode) { TaskGenerator task_generator(nullptr, 0); auto net_output = graph->FindNode("Node_Output"); // netoutput has no data input, return default value 0 - EXPECT_EQ(task_generator.FindLastBpFromBpNode(graph, net_output), 0); + uint32_t bp_index = 0; + EXPECT_EQ(task_generator.FindLastBpFromBpNode(graph, net_output, bp_index), 0); + EXPECT_EQ(bp_index, 2); } TEST_F(UtestTaskGeneratorTest, UpdateOpIsVarAttr) { From 55189da9b32224951143b395713954ab66b2bf42 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Wed, 30 Jun 2021 21:01:46 +0800 Subject: [PATCH 125/226] Add Magic in single_op. --- ge/single_op/task/tbe_task_builder.cc | 25 ++++++++++++++++++- ge/single_op/task/tbe_task_builder.h | 1 + tests/ut/ge/hybrid/ge_hybrid_unittest.cc | 1 + .../ge/single_op/single_op_model_unittest.cc | 1 + .../ge/single_op/single_op_task_unittest.cc | 1 + 5 files changed, 28 insertions(+), 1 deletion(-) diff --git a/ge/single_op/task/tbe_task_builder.cc b/ge/single_op/task/tbe_task_builder.cc index db8ecfe2..c1bafed8 100644 --- a/ge/single_op/task/tbe_task_builder.cc +++ b/ge/single_op/task/tbe_task_builder.cc @@ -104,7 +104,7 @@ Status TbeTaskBuilder::DoRegisterBinary(const OpKernelBin &kernel_bin, void **bi binary.version = 0; binary.data = kernel_bin.GetBinData(); binary.length = kernel_bin.GetBinDataSize(); - binary.magic = param.core_type == 0 ? RT_DEV_BINARY_MAGIC_ELF : RT_DEV_BINARY_MAGIC_ELF_AIVEC; + GE_CHK_STATUS_RET_NOLOG(GetMagic(binary.magic)); Status ret = 0; if (task_def_.type() == RT_MODEL_TASK_ALL_KERNEL) { ret = rtRegisterAllKernel(&binary, bin_handle); @@ -416,4 +416,27 @@ Status TbeTaskBuilder::InitTilingInfo(TbeOpTask &task) { task.EnableDynamicSupport(node_, tiling_buffer, static_cast(max_size)); return SUCCESS; } + +Status TbeTaskBuilder::GetMagic(uint32_t &magic) const { + std::string json_string; + GE_IF_BOOL_EXEC(AttrUtils::GetStr(op_desc_, TVM_ATTR_NAME_MAGIC, json_string), + GELOGD("Get original type of session_graph_id.")); + if (json_string == "RT_DEV_BINARY_MAGIC_ELF") { + magic = RT_DEV_BINARY_MAGIC_ELF; + } else if (json_string == "RT_DEV_BINARY_MAGIC_ELF_AIVEC") { + magic = RT_DEV_BINARY_MAGIC_ELF_AIVEC; + } else if (json_string == "RT_DEV_BINARY_MAGIC_ELF_AICUBE") { + magic = RT_DEV_BINARY_MAGIC_ELF_AICUBE; + } else { + REPORT_INNER_ERROR("E19999", "Attr:%s in op:%s(%s), value:%s check invalid", + TVM_ATTR_NAME_MAGIC.c_str(), op_desc_->GetName().c_str(), + op_desc_->GetType().c_str(), json_string.c_str()); + GELOGE(PARAM_INVALID, "[Check][Param] Attr:%s in op:%s(%s), value:%s check invalid", + TVM_ATTR_NAME_MAGIC.c_str(), op_desc_->GetName().c_str(), + op_desc_->GetType().c_str(), json_string.c_str()); + return PARAM_INVALID; + } + return SUCCESS; +} + } // namespace ge diff --git a/ge/single_op/task/tbe_task_builder.h b/ge/single_op/task/tbe_task_builder.h index a202cbf1..6252feea 100755 --- a/ge/single_op/task/tbe_task_builder.h +++ b/ge/single_op/task/tbe_task_builder.h @@ -105,6 +105,7 @@ class TbeTaskBuilder { const SingleOpModelParam ¶m); Status DoRegisterBinary(const OpKernelBin &kernel_bin, void **bin_handle, const SingleOpModelParam ¶m) const; Status DoRegisterMeta(void *bin_handle); + Status GetMagic(uint32_t &magic) const; static Status DoRegisterFunction(void *bin_handle, const char *stub_name, const char *kernel_name); diff --git a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc index 1d1c4fa9..d1c51c67 100644 --- a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc +++ b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc @@ -153,6 +153,7 @@ TEST_F(UtestGeHybrid, task_update_tiling_info) { ge::AttrUtils::SetStr(op_desc, "compile_info_json", "json"); ge::AttrUtils::SetBool(op_desc, "support_dynamicshape", true); ge::AttrUtils::SetInt(op_desc, "op_para_size", 1); + ge::AttrUtils::SetStr(op_desc, TVM_ATTR_NAME_MAGIC, "RT_DEV_BINARY_MAGIC_ELF"); auto node = graph->AddNode(op_desc); std::unique_ptr node_item; diff --git a/tests/ut/ge/single_op/single_op_model_unittest.cc b/tests/ut/ge/single_op/single_op_model_unittest.cc index 2c0073f5..23269814 100644 --- a/tests/ut/ge/single_op/single_op_model_unittest.cc +++ b/tests/ut/ge/single_op/single_op_model_unittest.cc @@ -338,6 +338,7 @@ TEST_F(UtestSingleOpModel, build_dynamic_task) { DynamicSingleOp single_op(0, &stream_mu, stream); model.model_helper_.model_ = ge_model; auto op_desc = std::make_shared("add", "Add"); + AttrUtils::SetStr(op_desc, TVM_ATTR_NAME_MAGIC, "RT_DEV_BINARY_MAGIC_ELF"); std::vector kernelBin; TBEKernelPtr tbe_kernel = std::make_shared("name/Add", std::move(kernelBin)); op_desc->SetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel); diff --git a/tests/ut/ge/single_op/single_op_task_unittest.cc b/tests/ut/ge/single_op/single_op_task_unittest.cc index b0c98205..2424d209 100644 --- a/tests/ut/ge/single_op/single_op_task_unittest.cc +++ b/tests/ut/ge/single_op/single_op_task_unittest.cc @@ -54,6 +54,7 @@ TEST_F(UtestSingleOpTask, test_build_kernel_task) { auto graph = make_shared("graph"); auto op_desc = make_shared("Add", "Add"); + AttrUtils::SetStr(op_desc, TVM_ATTR_NAME_MAGIC, "RT_DEV_BINARY_MAGIC_ELF"); std::vector kernelBin; TBEKernelPtr tbe_kernel = std::make_shared("name/Add", std::move(kernelBin)); op_desc->SetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel); From 47852ba2b4e641685ce98f0c39fe97e415261524 Mon Sep 17 00:00:00 2001 From: wangkai Date: Wed, 30 Jun 2021 22:05:07 +0800 Subject: [PATCH 126/226] add ace header targets Signed-off-by: wangkai --- ge/common/CMakeLists.txt | 2 ++ ge/executor/CMakeLists.txt | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/ge/common/CMakeLists.txt b/ge/common/CMakeLists.txt index 313f1ff3..1872b4c2 100755 --- a/ge/common/CMakeLists.txt +++ b/ge/common/CMakeLists.txt @@ -95,6 +95,7 @@ target_link_libraries(ge_common PRIVATE $<$>:$> $<$>:$> $<$>:$> + $<$>:$> static_mmpa -Wl,--no-as-needed graph @@ -155,6 +156,7 @@ target_link_libraries(ge_common_static PRIVATE $<$>:$> $<$>:$> $<$>:$> + $<$>:$> ascend_protobuf_static json c_sec diff --git a/ge/executor/CMakeLists.txt b/ge/executor/CMakeLists.txt index f258dffe..44ba3131 100755 --- a/ge/executor/CMakeLists.txt +++ b/ge/executor/CMakeLists.txt @@ -186,6 +186,8 @@ target_include_directories(ge_executor SYSTEM PRIVATE ${CMAKE_BINARY_DIR}/proto/graphengine_protos #### yellow zone #### $<$>:${GE_DEPEND_DIR}/inc> + $<$>:$> + $<$>:$> #### blue zone #### $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc> $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain> @@ -251,6 +253,8 @@ target_link_libraries(ge_executor_shared PRIVATE $<$>:$> $<$>:$> $<$>:$> + $<$>:$> + $<$>:$> -Wl,--no-as-needed ge_common runtime From 977d507d027bcc8c56a8f0a48b4de3000417d428 Mon Sep 17 00:00:00 2001 From: wuweikang Date: Fri, 18 Jun 2021 09:32:57 +0800 Subject: [PATCH 127/226] check dump option --- ge/common/dump/dump_properties.cc | 243 ++++++++++++++++-- ge/common/dump/dump_properties.h | 18 +- ge/session/inner_session.cc | 2 +- tests/ut/ge/CMakeLists.txt | 1 + .../ut/ge/common/dump_properties_unittest.cc | 126 +++++++++ 5 files changed, 364 insertions(+), 26 deletions(-) create mode 100644 tests/ut/ge/common/dump_properties_unittest.cc diff --git a/ge/common/dump/dump_properties.cc b/ge/common/dump/dump_properties.cc index ef755540..84bdb7bf 100644 --- a/ge/common/dump/dump_properties.cc +++ b/ge/common/dump/dump_properties.cc @@ -18,6 +18,7 @@ #include #include +#include #include "common/ge/ge_util.h" #include "framework/common/util.h" @@ -37,6 +38,159 @@ const uint32_t kAtomicOverflow = (0x1 << 1); const uint32_t kAllOverflow = (kAicoreOverflow | kAtomicOverflow); } // namespace namespace ge { +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::Split(const std::string &s, + std::vector &result, + const char *delchar) { + if (s.empty()) { + return; + } + result.clear(); + + char *buffer = new (std::nothrow)char[s.size() + 1]; + if (buffer == nullptr) { + GELOGE(FAILED, "[Split][string] failed while malloc memory, string value is:%s", s.c_str()); + REPORT_CALL_ERROR("E19999", "Memory malloc may fail when split string, get fatal exception, " + "string value is:%s", s.c_str()); + return; + } + buffer[s.size()] = '\0'; + errno_t e = strcpy_s(buffer, s.size() + 1, s.c_str()); + if (e != EOK) { + delete[] buffer; + return; + } + char *context = nullptr; + char *p = strtok_s(buffer, delchar, &context); + while (p != nullptr) { + result.emplace_back(p); + p = strtok_s(nullptr, delchar, &context); + } + delete[] buffer; +} + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::CheckDumpStep(const std::string &dump_step) { + std::string modified_dum_step = dump_step + "|"; + std::smatch result; + std::vector match_vecs; + std::regex pattern(R"((\d{1,}-\d{1,}\||\d{1,}\|)+)"); + if (regex_match(modified_dum_step, result, pattern)) { + Split(result.str(), match_vecs, "|"); + if (match_vecs.empty()) { + REPORT_CALL_ERROR("E19999", "Split may get fatal exception, dump_step:%s.", dump_step.c_str()); + GELOGE(FAILED, "[Check][Param] failed. Split may get fatal exception, ge.exec.dumpStep:%s.", dump_step.c_str()); + return FAILED; + } + // 100 is the max sets of dump steps. + if (match_vecs.size() > 100) { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.dumpStep", + dump_step.c_str(), + " is not supported, only support dump <= 100 sets of data"})); + GELOGE(PARAM_INVALID, "[Check][Param] get dump_step value:%s, " + "dump_step only support dump <= 100 sets of data.", dump_step.c_str()); + return PARAM_INVALID; + } + for (const auto &match_vec : match_vecs) { + std::vector vec_after_split; + Split(match_vec, vec_after_split, "-"); + if (match_vecs.empty()) { + REPORT_CALL_ERROR("E19999", "Split may get fatal exception."); + GELOGE(FAILED, "[Check][Param] failed, split may get fatal exception."); + return FAILED; + } + if (vec_after_split.size() > 1) { + if (std::atoi(vec_after_split[0].c_str()) >= std::atoi(vec_after_split[1].c_str())) { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.dumpStep", + dump_step.c_str(), + " is not supported." + "in range steps, the first step is >= second step, correct example:'0|5|10-20"})); + GELOGE(PARAM_INVALID, "[Check][Param] get dump_step value:%s, " + "in range steps, the first step is >= second step, correct example:'0|5|10-20'", dump_step.c_str()); + return PARAM_INVALID; + } + } + } + } else { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.dumpStep", + dump_step.c_str(), + " is not supported, correct example:'0|5|10|50-100."})); + GELOGE(PARAM_INVALID, "[Check][Param] get dump_step value:%s, " + "dump_step string style is error, correct example:'0|5|10|50-100.'", dump_step.c_str()); + return PARAM_INVALID; + } + return SUCCESS; +} + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::CheckDumpMode(const std::string &dump_mode) { + const std::set dump_mode_list = {"input", "output", "all"}; + std::set::iterator iter; + + if ((iter = dump_mode_list.find(dump_mode)) == dump_mode_list.end()) { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.dumpMode", + dump_mode.c_str(), + " is not supported, should be one of the following:[input, output, all]"})); + GELOGE(PARAM_INVALID, "[Check][Param] the dump_debug_mode:%s, is is not supported," + "should be one of the following:[input, output, all].", dump_mode.c_str()); + return PARAM_INVALID; + } + return SUCCESS; +} + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::CheckDumpPath(const std::string &input) { + if (mmIsDir(input.c_str()) != EN_OK) { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.dumpPath", + input.c_str(), + " is not a directory."})); + GELOGE(PARAM_INVALID, "[Check][Param] the path:%s, is not directory.", input.c_str()); + return PARAM_INVALID; + } + char trusted_path[MMPA_MAX_PATH] = { "\0" }; + if (mmRealPath(input.c_str(), trusted_path, MMPA_MAX_PATH) != EN_OK) { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.dumpPath", + input.c_str(), + " dumpPath invalid."})); + GELOGE(PARAM_INVALID, "[Check][Param] the dumpPath:%s, is invalid.", input.c_str()); + return PARAM_INVALID; + } + if (mmAccess2(trusted_path, M_R_OK | M_W_OK) != EN_OK) { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.dumpPath", + input.c_str(), + " does't have read, write permissions."})); + GELOGE(PARAM_INVALID, "[Check][Param] the path:%s, does't have read, write permissions.", input.c_str()); + return PARAM_INVALID; + } + return SUCCESS; +} + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::CheckEnableDump(const std::string &input) { + std::set enable_dump_option_list = {"1", "0"}; + auto it = enable_dump_option_list.find(input); + if (it == enable_dump_option_list.end()) { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.enableDump", + input.c_str(), + " only support 1 or 0."})); + GELOGE(PARAM_INVALID, "[Check][Param] Not support ge.exec.enableDump or ge.exec.enableDumpDebug format:%s, " + "only support 1 or 0.", input.c_str()); + return PARAM_INVALID; + } + return SUCCESS; +} + FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY DumpProperties::DumpProperties(const DumpProperties &other) { CopyFrom(other); } @@ -47,7 +201,26 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY DumpProperties &DumpProperties: return *this; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::InitByOptions() { +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::SetDumpOptions() { + if (enable_dump_ == kEnableFlag) { + std::string dump_step; + if (GetContext().GetOption(OPTION_EXEC_DUMP_STEP, dump_step) == GRAPH_SUCCESS) { + GE_CHK_STATUS_RET(CheckDumpStep(dump_step), "[Check][dump_step] failed."); + GELOGI("Get dump step %s successfully", dump_step.c_str()); + SetDumpStep(dump_step); + } + string dump_mode = "output"; + if (GetContext().GetOption(OPTION_EXEC_DUMP_MODE, dump_mode) == GRAPH_SUCCESS) { + GELOGI("Get dump mode %s successfully", dump_mode.c_str()); + GE_CHK_STATUS_RET(CheckDumpMode(dump_mode), "[Check][dump_mode] failed."); + SetDumpMode(dump_mode); + } + AddPropertyValue(DUMP_ALL_MODEL, {}); + } + return SUCCESS; +} + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::InitByOptions() { enable_dump_.clear(); enable_dump_debug_.clear(); dump_path_.clear(); @@ -57,17 +230,32 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::InitByOpti is_infer_op_debug_ = false; op_debug_mode_ = 0; - std::string enable_dump; + std::string enable_dump = std::to_string(false); (void)GetContext().GetOption(OPTION_EXEC_ENABLE_DUMP, enable_dump); enable_dump_ = enable_dump; + if (!enable_dump_.empty()) { + GE_CHK_STATUS_RET(CheckEnableDump(enable_dump_), "[Check][enable_dump] failed."); + } - std::string enable_dump_debug; + std::string enable_dump_debug = std::to_string(false); (void)GetContext().GetOption(OPTION_EXEC_ENABLE_DUMP_DEBUG, enable_dump_debug); enable_dump_debug_ = enable_dump_debug; - + if (!enable_dump_debug_.empty()) { + GE_CHK_STATUS_RET(CheckEnableDump(enable_dump_debug_), "[Check][enable_dump_debug] failed."); + } + if ((enable_dump_ == kEnableFlag) && (enable_dump_debug_ == kEnableFlag)) { + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.enableDump and ge.exec.enableDumpDebug", + enable_dump_ + ", " + enable_dump_debug, + "ge.exec.enableDump and ge.exec.enableDumpDebug cannot be set to 1 at the same time."})); + GELOGE(FAILED, "ge.exec.enableDump and ge.exec.enableDumpDebug cannot be both set to 1 at the same time."); + return FAILED; + } if ((enable_dump_ == kEnableFlag) || (enable_dump_debug_ == kEnableFlag)) { std::string dump_path; if (GetContext().GetOption(OPTION_EXEC_DUMP_PATH, dump_path) == GRAPH_SUCCESS) { + GE_CHK_STATUS_RET(CheckDumpPath(dump_path), "Check dump path failed."); if (!dump_path.empty() && dump_path[dump_path.size() - 1] != '/') { dump_path = dump_path + "/"; } @@ -75,25 +263,21 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::InitByOpti GELOGI("Get dump path %s successfully", dump_path.c_str()); SetDumpPath(dump_path); } else { - GELOGW("Dump path is not set"); + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.dumpPath", + dump_path, + "ge.exec.dumpPath is not set."})); + GELOGE(FAILED, "[Check][dump_path] failed. Dump path is not set."); + return FAILED; } } - if (enable_dump_ == kEnableFlag) { - std::string dump_step; - if (GetContext().GetOption(OPTION_EXEC_DUMP_STEP, dump_step) == GRAPH_SUCCESS) { - GELOGI("Get dump step %s successfully", dump_step.c_str()); - SetDumpStep(dump_step); - } - string dump_mode; - if (GetContext().GetOption(OPTION_EXEC_DUMP_MODE, dump_mode) == GRAPH_SUCCESS) { - GELOGI("Get dump mode %s successfully", dump_mode.c_str()); - SetDumpMode(dump_mode); - } - AddPropertyValue(DUMP_ALL_MODEL, {}); - } + GE_CHK_STATUS_RET(SetDumpOptions(), "SetDumpOptions failed."); + + GE_CHK_STATUS_RET(SetDumpDebugOptions(), "SetDumpDebugOptions failed."); - SetDumpDebugOptions(); + return SUCCESS; } // The following is the new dump scenario of the fusion operator @@ -253,14 +437,20 @@ void DumpProperties::CopyFrom(const DumpProperties &other) { } } -void DumpProperties::SetDumpDebugOptions() { +Status DumpProperties::SetDumpDebugOptions() { if (enable_dump_debug_ == kEnableFlag) { std::string dump_debug_mode; if (GetContext().GetOption(OPTION_EXEC_DUMP_DEBUG_MODE, dump_debug_mode) == GRAPH_SUCCESS) { GELOGD("Get dump debug mode %s successfully", dump_debug_mode.c_str()); } else { - GELOGW("Dump debug mode is not set."); - return; + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.dumpDebugMode", + dump_debug_mode, + "ge.exec.dumpDebugMode is not set."})); + GELOGE(PARAM_INVALID, "[Check][dump_debug_mode] failed. Dump debug mode is not set."); + + return PARAM_INVALID; } if (dump_debug_mode == OP_DEBUG_AICORE) { @@ -276,10 +466,17 @@ void DumpProperties::SetDumpDebugOptions() { is_train_op_debug_ = true; op_debug_mode_ = kAllOverflow; } else { - GELOGW("ge.exec.dumpDebugMode is invalid."); + REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), + std::vector({ + "ge.exec.dumpDebugMode", + dump_debug_mode, + "ge.exec.dumpDebugMode is invalid."})); + GELOGE(PARAM_INVALID, "[Set][DumpDebugOptions] failed, ge.exec.dumpDebugMode is invalid."); + return PARAM_INVALID; } } else { GELOGI("ge.exec.enableDumpDebug is false or is not set."); } + return SUCCESS; } } // namespace ge diff --git a/ge/common/dump/dump_properties.h b/ge/common/dump/dump_properties.h index 98487491..cbfc362d 100644 --- a/ge/common/dump/dump_properties.h +++ b/ge/common/dump/dump_properties.h @@ -23,6 +23,7 @@ #include namespace ge { +using Status = uint32_t; class DumpProperties { public: DumpProperties() = default; @@ -33,7 +34,7 @@ class DumpProperties { DumpProperties &operator=(const DumpProperties &dump); - void InitByOptions(); + Status InitByOptions(); void AddPropertyValue(const std::string &model, const std::set &layers); @@ -95,7 +96,20 @@ class DumpProperties { private: void CopyFrom(const DumpProperties &other); - void SetDumpDebugOptions(); + Status SetDumpDebugOptions(); + + Status SetDumpOptions(); + + void Split(const std::string &s, std::vector &result, const char *delchar); + + Status CheckDumpStep(const std::string &dump_step); + + Status CheckDumpMode(const std::string &dump_mode); + + Status CheckDumpPath(const std::string &input); + + Status CheckEnableDump(const std::string &input); + std::string enable_dump_; std::string enable_dump_debug_; diff --git a/ge/session/inner_session.cc b/ge/session/inner_session.cc index aabbe19c..b3df08ce 100755 --- a/ge/session/inner_session.cc +++ b/ge/session/inner_session.cc @@ -109,7 +109,7 @@ Status InnerSession::Initialize() { GE_CHK_RT_RET(rtSetDevice(GetContext().DeviceId())); DumpProperties dump_properties; - dump_properties.InitByOptions(); + GE_CHK_STATUS_RET(dump_properties.InitByOptions(), "Init dump properties failed."); GE_CHK_STATUS_RET(AddDumpProperties(dump_properties), "[Add][DumpProperties] failed."); ret = graph_manager_.Initialize(options_); diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index cf573343..d7568ccc 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -774,6 +774,7 @@ set(MULTI_PARTS_TEST_FILES "common/util_unittest.cc" "common/dump_manager_unittest.cc" "common/dump_op_unittest.cc" + "common/dump_properties_unittest.cc" "common/dump_exception_unittest.cc" "common/opdebug_register_unittest.cc" "common/format_transfer_unittest.cc" diff --git a/tests/ut/ge/common/dump_properties_unittest.cc b/tests/ut/ge/common/dump_properties_unittest.cc new file mode 100644 index 00000000..57809013 --- /dev/null +++ b/tests/ut/ge/common/dump_properties_unittest.cc @@ -0,0 +1,126 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#define protected public +#define private public + +#include "common/dump/dump_properties.h" +#include "ge_local_context.h" +#include "ge/ge_api_types.h" +#include "common/debug/log.h" +#include "common/ge_inner_error_codes.h" + +namespace ge { +class UTEST_dump_properties : public testing::Test { + protected: + void SetUp() {} + void TearDown() {} +}; + +TEST_F(UTEST_dump_properties, check_dump_step) { + DumpProperties dp; + std::string dump_step{"0|3-5|10"}; + std::string unsupport_input1{"0|5-3|10"}; + std::string unsupport_input2{"one"}; + std::string unsupport_input3; + for (int i = 0; i < 200; ++i) { + unsupport_input3 += std::to_string(i) + "|"; + } + unsupport_input3.pop_back(); + Status st = dp.CheckDumpStep(dump_step); + EXPECT_EQ(st, SUCCESS); + st = dp.CheckDumpStep(unsupport_input1); + EXPECT_NE(st, SUCCESS); + st = dp.CheckDumpStep(unsupport_input2); + EXPECT_NE(st, SUCCESS); + st = dp.CheckDumpStep(unsupport_input3); + EXPECT_NE(st, SUCCESS); +} + +TEST_F(UTEST_dump_properties, check_dump_mode) { + DumpProperties dp; + std::string dump_mode_1{"input"}; + std::string dump_mode_2{"output"}; + std::string dump_mode_3{"all"}; + std::string unsupport_input1{"mode1"}; + Status st = dp.CheckDumpMode(dump_mode_1); + EXPECT_EQ(st, SUCCESS); + st = dp.CheckDumpMode(dump_mode_2); + EXPECT_EQ(st, SUCCESS); + st = dp.CheckDumpMode(dump_mode_3); + EXPECT_EQ(st, SUCCESS); + st = dp.CheckDumpMode(unsupport_input1); + EXPECT_NE(st, SUCCESS); +} + +TEST_F(UTEST_dump_properties, check_dump_path) { + DumpProperties dp; + std::string dump_path{"/tmp/"}; + std::string unsupport_input1{" \\unsupported"}; + Status st = dp.CheckDumpPath(dump_path); + EXPECT_EQ(st, SUCCESS); + st = dp.CheckDumpPath(unsupport_input1); + EXPECT_NE(st, SUCCESS); +} + +TEST_F(UTEST_dump_properties, check_enable_dump) { + DumpProperties dp; + std::string enable_dump_t{"1"}; + std::string enable_dump_f{"0"}; + std::string unsupport_input1{"true"}; + std::string unsupport_input2{"false"}; + Status st = dp.CheckEnableDump(enable_dump_t); + EXPECT_EQ(st, SUCCESS); + st = dp.CheckEnableDump(enable_dump_f); + EXPECT_EQ(st, SUCCESS); + st = dp.CheckEnableDump(unsupport_input1); + EXPECT_NE(st, SUCCESS); + st = dp.CheckEnableDump(unsupport_input2); + EXPECT_NE(st, SUCCESS); +} + +TEST_F(UTEST_dump_properties, init_by_options_success_1) { + DumpProperties dp; + std::map options {{OPTION_EXEC_ENABLE_DUMP, "1"}, + {OPTION_EXEC_DUMP_PATH, "/tmp/"}, + {OPTION_EXEC_DUMP_STEP, "0|1-3|10"}, + {OPTION_EXEC_DUMP_MODE, "all"}}; + GetThreadLocalContext().SetGlobalOption(options); + Status st = dp.InitByOptions(); + EXPECT_EQ(st, SUCCESS); +} + +TEST_F(UTEST_dump_properties, init_by_options_success_2) { + DumpProperties dp; + std::map options {{OPTION_EXEC_ENABLE_DUMP_DEBUG, "1"}, + {OPTION_EXEC_DUMP_PATH, "/tmp/"}, + {OPTION_EXEC_DUMP_DEBUG_MODE, "aicore_overflow"}}; + GetThreadLocalContext().SetGlobalOption(options); + Status st = dp.InitByOptions(); + EXPECT_EQ(st, SUCCESS); +} + +TEST_F(UTEST_dump_properties, init_by_options_failed) { + DumpProperties dp; + std::map options {{OPTION_EXEC_ENABLE_DUMP_DEBUG, "1"}, + {OPTION_EXEC_DUMP_PATH, "/tmp/"}}; + GetThreadLocalContext().SetGlobalOption(options); + Status st = dp.InitByOptions(); + EXPECT_NE(st, SUCCESS); +} +} // namespace ge \ No newline at end of file From 2daa03a052674a47a576fd834b20fc2e8cbf1db6 Mon Sep 17 00:00:00 2001 From: lianghuikang <505519763@qq.com> Date: Wed, 30 Jun 2021 09:08:41 +0800 Subject: [PATCH 128/226] op_select_implmode support high_precision_for_all and high_performance_for_all --- ge/ir_build/option_utils.cc | 9 +++++++-- ge/offline/main.cc | 7 ++++--- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/ge/ir_build/option_utils.cc b/ge/ir_build/option_utils.cc index 16586c4e..7287fe91 100755 --- a/ge/ir_build/option_utils.cc +++ b/ge/ir_build/option_utils.cc @@ -50,6 +50,8 @@ const std::set kBufferOptimizeSupportOption = {"l1_optimize", "l2_o const char *const kBufferOptimizeSupport = "only support l2_optimize, off_optimize"; const char *const IR_OPTION_OP_SELECT_IMPLMODE_DEFAULT = "high_performance"; const char *const IR_OPTION_OP_SELECT_IMPLMODE_PRECISON = "high_precision"; +const char *const IR_OPTION_OP_SELECT_IMPLMODE_HIGH_PRECISION_FOR_ALL = "high_precision_for_all"; +const char *const IR_OPTION_OP_SELECT_IMPLMODE_HIGH_PERFORMANCE_FOR_ALL = "high_performance_for_all"; const char *const kInputShapeSample1 = "\"input_name1:n1,c1,h1,w1\""; const char *const kInputShapeSample2 = "\"input_name1:1,3,224,224\""; const char *const kSplitError1 = "size not equal to 2 split by \":\""; @@ -57,7 +59,8 @@ const char *const kEmptyError = "can not be empty"; const char *const kFloatNumError = "exist float number"; const char *const kDigitError = "is not digit"; const char *const kCompressWeightError = "it must be appointed when appoint parameter[--optypelist_for_implmode]"; -const char *const kSelectImplmodeError = "only support high_performance, high_precision"; +const char *const kSelectImplmodeError = "only support high_performance, high_precision, " + "high_precision_for_all, high_performance_for_all"; const char *const kDynamicBatchSizeError = "It can only contains digit, \",\", \" \""; const char *const kDynamicImageSizeError = "It can only contains digit, \",\", \" \" and \";\""; const char *const kKeepDtypeError = "file not found"; @@ -782,7 +785,9 @@ Status CheckImplmodeParamValid(const std::string &optypelist_for_implmode, std:: op_select_implmode = IR_OPTION_OP_SELECT_IMPLMODE_DEFAULT; } else { if (op_select_implmode != IR_OPTION_OP_SELECT_IMPLMODE_DEFAULT && - op_select_implmode != IR_OPTION_OP_SELECT_IMPLMODE_PRECISON) { + op_select_implmode != IR_OPTION_OP_SELECT_IMPLMODE_PRECISON && + op_select_implmode != IR_OPTION_OP_SELECT_IMPLMODE_HIGH_PRECISION_FOR_ALL && + op_select_implmode != IR_OPTION_OP_SELECT_IMPLMODE_HIGH_PERFORMANCE_FOR_ALL) { ErrorManager::GetInstance().ATCReportErrMessage("E10001", {"parameter", "value", "reason"}, {"--op_select_implmode", op_select_implmode.c_str(), kSelectImplmodeError}); diff --git a/ge/offline/main.cc b/ge/offline/main.cc index 4837653f..bc3b823d 100755 --- a/ge/offline/main.cc +++ b/ge/offline/main.cc @@ -143,7 +143,8 @@ DEFINE_string(output_type, "", DEFINE_string(op_select_implmode, "", "Optional; op select implmode! " - "Support high_precision, high_performance."); + "Support high_precision, high_performance, " + "high_precision_for_all, high_performance_for_all."); DEFINE_string(optypelist_for_implmode, "", "Optional; Nodes need use implmode selected in op_select_implmode " @@ -311,8 +312,8 @@ class GFlagUtils { "scenarios by using a configuration file.\n" " --auto_tune_mode Set tune mode. E.g.: \"GA,RL\", support configure multiple, spit by ,\n" " --op_bank_path Set the path of the custom repository generated after operator tuning with Auto Tune.\n" - " --op_select_implmode Set op select implmode. Support high_precision, high_performance. " - "default: high_performance\n" + " --op_select_implmode Set op select implmode. Support high_precision, high_performance, " + "high_precision_for_all, high_performance_for_all. default: high_performance\n" " --optypelist_for_implmode Appoint which op to select implmode, cooperated with op_select_implmode.\n" " Separate multiple nodes with commas (,). Use double quotation marks (\") " "to enclose each argument. E.g.: \"node_name1,node_name2\"\n" From 81d226b36b26d4253d32428fd48bfe256b9d3d21 Mon Sep 17 00:00:00 2001 From: lichun Date: Thu, 1 Jul 2021 16:12:24 +0800 Subject: [PATCH 129/226] add global step info for known subgraph in unknown model and generate om for remained cases when some single op cases run atc failed --- ge/graph/load/model_manager/davinci_model.h | 1 + .../compiledsubgraph/known_node_executor.cc | 21 ++++--- .../compiledsubgraph/known_node_executor.h | 2 + ge/offline/main.cc | 4 +- .../ge/hybrid/known_node_executor_unittest.cc | 57 ++++++++++++++++++- 5 files changed, 75 insertions(+), 10 deletions(-) diff --git a/ge/graph/load/model_manager/davinci_model.h b/ge/graph/load/model_manager/davinci_model.h index 1e964855..daf0c7e6 100755 --- a/ge/graph/load/model_manager/davinci_model.h +++ b/ge/graph/load/model_manager/davinci_model.h @@ -300,6 +300,7 @@ class DavinciModel { return op_list_.at(index); } + void SetGlobalStep(void *global_step) { global_step_addr_ = global_step; } void *GetGlobalStep() const { return global_step_addr_; } // get task info for profiling diff --git a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc index 4db223e0..ea6e2965 100755 --- a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc +++ b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc @@ -182,6 +182,19 @@ Status KnownNodeExecutor::PrepareTask(NodeTask &task, TaskContext &context) cons return SUCCESS; } +void KnownNodeExecutor::SettingDaviciModel(const HybridModel &model, const NodePtr &node, + std::shared_ptr &davinci_model) const { + // set known node flag as true + davinci_model->SetKnownNode(true); + davinci_model->SetId(model.GetModelId()); + davinci_model->SetDumpModelName(model.GetModelName()); + davinci_model->SetOmName(model.GetOmName()); + TensorValue *global_step_var = model.GetVariable(NODE_NAME_GLOBAL_STEP); + davinci_model->SetKnownShapeGlobalStep(global_step_var->MutableData()); + // set model id as root node's node id + davinci_model->SetSubModelId(node->GetOpDesc()->GetId()); +} + Status KnownNodeExecutor::LoadTask(const HybridModel &model, const NodePtr &node, shared_ptr &task) const { GELOGI("[%s] KnownNodeExecutor::LoadTask in.", node->GetName().c_str()); @@ -199,13 +212,7 @@ Status KnownNodeExecutor::LoadTask(const HybridModel &model, const NodePtr &node std::shared_ptr davinci_model = MakeShared(0, nullptr); GE_CHECK_NOTNULL(davinci_model); - // set known node flag as true - davinci_model->SetKnownNode(true); - davinci_model->SetId(model.GetModelId()); - davinci_model->SetDumpModelName(model.GetModelName()); - davinci_model->SetOmName(model.GetOmName()); - // set model id as root node's node id - davinci_model->SetSubModelId(node->GetOpDesc()->GetId()); + SettingDaviciModel(model, node, davinci_model); GELOGD("KnownNodeExecutor::LoadTask node id %ld.", node->GetOpDesc()->GetId()); GE_CHK_STATUS_RET(davinci_model->Assign(ge_model), diff --git a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.h b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.h index 11cda846..475feeb1 100644 --- a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.h +++ b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.h @@ -59,6 +59,8 @@ class KnownNodeExecutor : public NodeExecutor { const NodePtr &node, GeModelPtr &ge_model, ComputeGraphPtr &graph); + void SettingDaviciModel(const HybridModel &model, const NodePtr &node, + std::shared_ptr &davinci_model) const; }; } // namespace hybrid } // namespace ge diff --git a/ge/offline/main.cc b/ge/offline/main.cc index bc3b823d..a50ff931 100755 --- a/ge/offline/main.cc +++ b/ge/offline/main.cc @@ -1150,9 +1150,9 @@ domi::Status GenerateSingleOp(const std::string& json_file_path) { if (ret != SUCCESS) { DOMI_LOGE("Compile op failed. ge ret = %u, op index = %d", ret, index); ret = domi::FAILED; - break; + } else { + GELOGI("Compile op success. op index = %d, output = %s", index, output_path.c_str()); } - GELOGI("Compile op success. op index = %d, output = %s", index, output_path.c_str()); index += 1; } diff --git a/tests/ut/ge/hybrid/known_node_executor_unittest.cc b/tests/ut/ge/hybrid/known_node_executor_unittest.cc index 98e985f7..a8367130 100644 --- a/tests/ut/ge/hybrid/known_node_executor_unittest.cc +++ b/tests/ut/ge/hybrid/known_node_executor_unittest.cc @@ -27,6 +27,7 @@ #undef protected #include "graph/manager/graph_mem_allocator.h" #include "../graph/passes/graph_builder_utils.h" +#include "../inc/graph/utils/graph_utils.h" using namespace std; using namespace testing; @@ -48,6 +49,34 @@ class KnownNodeTaskMock : public KnownNodeTask { }; } +static ge::OpDescPtr CreateOpDesc(string name = "", string type = "") { + auto op_desc = std::make_shared(name, type); + op_desc->SetStreamId(0); + op_desc->SetId(0); + + op_desc->SetWorkspace({}); + ; + op_desc->SetWorkspaceBytes({}); + op_desc->SetInputOffset({}); + op_desc->SetOutputOffset({}); + + ge::AttrUtils::SetStr(op_desc, ge::TVM_ATTR_NAME_MAGIC, "RT_DEV_BINARY_MAGIC_ELF_AIVEC"); + bool support_dynamic = true; + ge::AttrUtils::GetBool(op_desc, "support_dynamicshape", support_dynamic); + return op_desc; +} + +static ComputeGraphPtr BuildDataDirectConnectGraph() { + const char *kRefIndex = "_parent_node_index"; + ge::ut::GraphBuilder builder("subgraph"); + auto data = builder.AddNode("Data", "Data", 1, 1); + auto netoutput = builder.AddNode("NetOutput", "NetOutput", 1, 1); + (void)AttrUtils::SetInt(netoutput->GetOpDesc()->MutableInputDesc(0), kRefIndex, 0); + + builder.AddDataEdge(data, 0, netoutput, 0); + return builder.GetGraph(); +} + TEST_F(UnknownNodeExecutorTest, test_init_davinci_model) { auto davinci_model = std::make_shared(0, nullptr); davinci_model->SetDeviceId(0); @@ -88,4 +117,30 @@ TEST_F(UnknownNodeExecutorTest, TestParseAttrForAllocatingOutputs) { ASSERT_EQ(node_item.ref_outputs[1], const_node); ASSERT_EQ(node_item.reuse_inputs.size(), 1); ASSERT_EQ(node_item.reuse_inputs[0], 0); -} \ No newline at end of file +} + +TEST_F(UnknownNodeExecutorTest, TestSetGlobalStep) { + OpDescPtr op_desc = CreateOpDesc("PartitionedCall", "PartitionedCall"); + auto root_graph = make_shared("root_graph"); + auto node = root_graph->AddNode(op_desc); + node->SetOwnerComputeGraph(root_graph); + auto sub_graph = BuildDataDirectConnectGraph(); + sub_graph->SetParentGraph(root_graph); + sub_graph->SetParentNode(node); + node->GetOpDesc()->AddSubgraphName("subgraph"); + node->GetOpDesc()->SetSubgraphInstanceName(0, "subgraph"); + root_graph->AddSubgraph("subgraph", sub_graph); + + GeRootModelPtr ge_root_model = make_shared(root_graph); + HybridModel hybrid_model(ge_root_model); + auto *step_id = new int64_t[1]; + step_id[0] = 520; + std::unique_ptr tensor_value; + tensor_value.reset(new(std::nothrow)TensorValue((void*)step_id, sizeof(step_id))); + hybrid_model.variable_tensors_.insert({"ge_global_step", std::move(tensor_value)}); + + KnownNodeExecutor known_node_executor; + std::shared_ptr davinci_model = MakeShared(0, nullptr); + known_node_executor.SettingDaviciModel(hybrid, node, davinci_model); + EXPECT_EQ(davinci_model->global_step_addr_, 520); +} From 1cc845d733e9146317b2c5f99ed2a31f9cfa9761 Mon Sep 17 00:00:00 2001 From: wqtshg Date: Thu, 1 Jul 2021 17:47:37 +0800 Subject: [PATCH 130/226] delete useless code --- ge/graph/build/model_builder.cc | 1 - ge/graph/manager/graph_var_manager.cc | 49 ------------------ ge/graph/manager/graph_var_manager.h | 15 ------ ge/graph/manager/trans_var_data_utils.cc | 66 ------------------------ ge/graph/manager/trans_var_data_utils.h | 11 ---- 5 files changed, 142 deletions(-) diff --git a/ge/graph/build/model_builder.cc b/ge/graph/build/model_builder.cc index e35e4e7d..2816f170 100755 --- a/ge/graph/build/model_builder.cc +++ b/ge/graph/build/model_builder.cc @@ -32,7 +32,6 @@ #include "graph/ge_attr_value.h" #include "graph/ge_context.h" #include "external/graph/ge_error_codes.h" -#include "graph/manager/graph_mem_allocator.h" #include "graph/manager/graph_var_manager.h" #include "graph/optimize/common/params.h" #include "external/graph/types.h" diff --git a/ge/graph/manager/graph_var_manager.cc b/ge/graph/manager/graph_var_manager.cc index ced8465f..89a4e45b 100755 --- a/ge/graph/manager/graph_var_manager.cc +++ b/ge/graph/manager/graph_var_manager.cc @@ -194,35 +194,6 @@ ge::Status VarResource::GetBroadCastInfo(uint32_t graph_id, const string &var_na return SUCCESS; } -ge::Status VarResource::SyncVarData2BroadCast(uint32_t graph_id, const std::string &var_name, - const GeTensorDesc &var_tensor_desc, uint8_t *base_ptr) { - GE_CHECK_NOTNULL(base_ptr); - GELOGI("SyncVarData2BroadCast graph_id: %u, var_name: %s.", graph_id, var_name.c_str()); - - VarBroadCastInfo var_broadcast_info = var_broad_cast_info_[graph_id][var_name]; - uint8_t *dst_addr = base_ptr + var_broadcast_info.input_offset; - - return ge::TransVarDataUtils::SyncVarData2BroadCast(var_name, var_tensor_desc, dst_addr, - var_broadcast_info.input_size, session_id_); -} - -ge::Status VarResource::SyncBroadCastData2Var(uint32_t graph_id, const std::string &var_name, - const GeTensorDesc &var_tensor_desc, uint8_t *base_ptr) { - GELOGI("SyncBroadCastData2Var var_name: %s", var_name.c_str()); - - VarBroadCastInfo var_broadcast_info = var_broad_cast_info_[graph_id][var_name]; - // subgraph base_ptr could be nullptr, task it as base 0 - uint8_t *dst_addr = base_ptr + var_broadcast_info.output_offset; - - return ge::TransVarDataUtils::SyncBroadCastData2Var(dst_addr, var_broadcast_info.output_size, var_name, - var_tensor_desc, session_id_); -} - -ge::Status VarResource::SyncVarData(uint32_t graph_id, const std::string &var_name, - const GeTensorDesc &var_tensor_desc, uint8_t *base_ptr) { - return SyncVarData2BroadCast(graph_id, var_name, var_tensor_desc, base_ptr); -} - bool VarResource::IsVarAddr(const int64_t &offset) { return var_offset_map_.count(offset) > 0; } rtMemType_t VarResource::GetVarMemType(const int64_t &offset) { @@ -638,16 +609,6 @@ bool VarManager::IsVarExist(const std::string &var_name) { return var_resource_->IsVarExist(var_name); } -ge::Status VarManager::SyncVarData(uint32_t graph_id, const std::string &var_name, const GeTensorDesc &var_tensor_desc, - uint8_t *base_ptr) { - std::lock_guard lock(mutex_); - if (var_resource_ == nullptr) { - GELOGW("VarManager has not been init."); - return ge::INTERNAL_ERROR; - } - return var_resource_->SyncVarData(graph_id, var_name, var_tensor_desc, base_ptr); -} - ge::Status VarManager::GetCurVarDesc(const std::string &var_name, ge::GeTensorDesc &tensor_desc) { std::lock_guard lock(mutex_); GELOGI("VarManager::GetCurVarDesc var_name = %s.", var_name.c_str()); @@ -701,16 +662,6 @@ ge::Status VarManager::RenewCurVarDesc(const std::string &var_name, ge::OpDescPt return var_resource_->RenewCurVarDesc(var_name, std::move(op_desc)); } -ge::Status VarManager::SyncBroadCastData2Var(uint32_t graph_id, const std::string &var_name, - const GeTensorDesc &var_tensor_desc, uint8_t *base_ptr) { - std::lock_guard lock(mutex_); - if (var_resource_ == nullptr) { - GELOGW("VarManager has not been init."); - return ge::INTERNAL_ERROR; - } - return var_resource_->SyncBroadCastData2Var(graph_id, var_name, var_tensor_desc, base_ptr); -} - bool VarManager::IsVarAddr(const int64_t &offset) { std::lock_guard lock(mutex_); if (var_resource_ == nullptr) { diff --git a/ge/graph/manager/graph_var_manager.h b/ge/graph/manager/graph_var_manager.h index 736466c4..f2b68e79 100755 --- a/ge/graph/manager/graph_var_manager.h +++ b/ge/graph/manager/graph_var_manager.h @@ -118,15 +118,6 @@ class VarResource { ge::Status GetBroadCastInfo(uint32_t graph_id, const string &var_name, VarBroadCastInfo &broad_cast_info); - ge::Status SyncVarData2BroadCast(uint32_t graph_id, const std::string &var_name, - const GeTensorDesc &var_tensor_desc, uint8_t *base_ptr); - - ge::Status SyncBroadCastData2Var(uint32_t graph_id, const std::string &var_name, - const GeTensorDesc &var_tensor_desc, uint8_t *base_ptr); - - ge::Status SyncVarData(uint32_t graph_id, const std::string &var_name, const GeTensorDesc &var_tensor_desc, - uint8_t *base_ptr); - Status SetTransRoad(const std::string &var_name, const VarTransRoad &trans_road) { if (var_to_trans_road_.find(var_name) != var_to_trans_road_.end()) { GELOGW("Var name: %s has already set.", var_name.c_str()); @@ -234,16 +225,10 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY VarManager { ge::Status GetVarAddr(const std::string &var_name, const ge::GeTensorDesc &tensor_desc, uint8_t **dev_ptr); - ge::Status SyncVarData(uint32_t graph_id, const std::string &var_name, const GeTensorDesc &var_tensor_desc, - uint8_t *base_ptr); - ge::Status SaveBroadCastInfo(uint32_t graph_id, const VarBroadCastInfo &broad_cast_info); ge::Status GetBroadCastInfo(uint32_t graph_id, const string &var_name, VarBroadCastInfo &broad_cast_info); - ge::Status SyncBroadCastData2Var(uint32_t graph_id, const std::string &var_name, const GeTensorDesc &var_tensor_desc, - uint8_t *base_ptr); - ge::Status GetCurVarDesc(const std::string &var_name, ge::GeTensorDesc &tensor_desc); ge::Status RenewCurVarDesc(const std::string &var_name, ge::OpDescPtr op_desc); diff --git a/ge/graph/manager/trans_var_data_utils.cc b/ge/graph/manager/trans_var_data_utils.cc index 4c25dff1..2e6ce454 100644 --- a/ge/graph/manager/trans_var_data_utils.cc +++ b/ge/graph/manager/trans_var_data_utils.cc @@ -415,72 +415,6 @@ Status CopyTensorFromSrcVarNode(const NodePtr &var_src, return SUCCESS; } } // namespace -Status TransVarDataUtils::SyncVarData2BroadCast(const string &var_name, const ge::GeTensorDesc &src_tensor_desc, - uint8_t *dst_addr, int64_t dst_addr_size, uint64_t session_id) { - GE_CHK_BOOL_RET_STATUS(dst_addr != nullptr, FAILED, "[Check][Param] dst addr is nullptr."); - uint8_t *src_host_addr = nullptr; - int64_t src_addr_size = 0; - GE_MAKE_GUARD_RTMEM(src_host_addr); - GE_CHK_STATUS_RET(SyncTensorToHost(var_name, src_tensor_desc, &src_host_addr, src_addr_size, session_id)); - - GELOGI("src_addr_size: %ld, dst_addr_size: %ld", src_addr_size, dst_addr_size); - GE_CHK_BOOL_RET_STATUS(src_addr_size == dst_addr_size, FAILED, - "[Check][Param] src_addr_size:%ld not equal to dst_addr_size:%ld", - src_addr_size, dst_addr_size); - - GE_CHK_RT_RET(rtMemcpy(dst_addr, dst_addr_size, src_host_addr, src_addr_size, RT_MEMCPY_HOST_TO_DEVICE)); - return SUCCESS; -} - -Status TransVarDataUtils::SyncBroadCastData2Var(uint8_t *src_addr, int64_t src_addr_size, const string &var_name, - const ge::GeTensorDesc &dst_tensor_desc, uint64_t session_id) { - GE_CHK_BOOL_RET_STATUS(src_addr != nullptr, FAILED, "[Check][Param] src addr is nullptr. "); - uint8_t *host_addr = nullptr; - GE_MAKE_GUARD_RTMEM(host_addr); - GE_CHK_RT_RET(rtMallocHost(reinterpret_cast(&host_addr), src_addr_size)); - GE_CHK_RT_RET(rtMemcpy(host_addr, src_addr_size, src_addr, src_addr_size, RT_MEMCPY_DEVICE_TO_HOST)); - - GE_CHK_STATUS_RET( - SyncTensorToDevice(var_name, reinterpret_cast(host_addr), src_addr_size, dst_tensor_desc, session_id)); - - return SUCCESS; -} - -Status TransVarDataUtils::SyncTensorToHost(const string &var_name, const ge::GeTensorDesc &src_tensor_desc, - uint8_t **host_addr, int64_t &src_tensor_size, uint64_t session_id) { - GE_CHK_STATUS_RET(ge::TensorUtils::GetSize(src_tensor_desc, src_tensor_size), "[Get][Size] from TensorDesc failed"); - - uint8_t *src_addr = nullptr; - GE_CHK_STATUS_RET(VarManager::Instance(session_id)->GetVarAddr(var_name, src_tensor_desc, &src_addr)); - uint8_t *mem_addr = - src_addr - - static_cast(static_cast(VarManager::Instance(session_id)->GetVarMemLogicBase())) + - static_cast( - reinterpret_cast(VarManager::Instance(session_id)->GetVarMemoryBase(RT_MEMORY_HBM))); - GE_CHK_RT_RET(rtMallocHost(reinterpret_cast(host_addr), src_tensor_size)); - - GE_CHK_RT_RET(rtMemcpy(*host_addr, src_tensor_size, mem_addr, src_tensor_size, RT_MEMCPY_DEVICE_TO_HOST)); - - GELOGI("SyncTensorToHost var_name %s, src_tensor_size %ld", var_name.c_str(), src_tensor_size); - return SUCCESS; -} - -Status TransVarDataUtils::SyncTensorToDevice(const string &var_name, const uint8_t *host_addr, uint32_t addr_size, - const ge::GeTensorDesc &dst_tensor_desc, uint64_t session_id) { - uint8_t *dst_addr = nullptr; - GE_CHK_STATUS_RET(VarManager::Instance(session_id)->GetVarAddr(var_name, dst_tensor_desc, &dst_addr)); - uint8_t *mem_addr = - dst_addr - - static_cast(static_cast(VarManager::Instance(session_id)->GetVarMemLogicBase())) + - static_cast( - reinterpret_cast(VarManager::Instance(session_id)->GetVarMemoryBase(RT_MEMORY_HBM))); - GE_CHK_RT_RET(rtMemcpy(mem_addr, addr_size, host_addr, addr_size, RT_MEMCPY_HOST_TO_DEVICE)); - - GELOGI("SyncTensorToDevice var_name %s, addr_size %u", var_name.c_str(), addr_size); - - return SUCCESS; -} - Status TransVarDataUtils::TransAllVarData(const vector &variable_nodes, uint64_t session_id, rtContext_t context, diff --git a/ge/graph/manager/trans_var_data_utils.h b/ge/graph/manager/trans_var_data_utils.h index d5096ef2..174efbb3 100755 --- a/ge/graph/manager/trans_var_data_utils.h +++ b/ge/graph/manager/trans_var_data_utils.h @@ -29,11 +29,6 @@ namespace ge { class TransVarDataUtils { public: - static ge::Status SyncVarData2BroadCast(const string &var_name, const ge::GeTensorDesc &src_tensor_desc, - uint8_t *dst_addr, int64_t dst_addr_size, uint64_t session_id_); - static ge::Status SyncBroadCastData2Var(uint8_t *src_addr, int64_t src_addr_size, const string &var_name, - const ge::GeTensorDesc &dst_tensor_desc, uint64_t session_id_); - static ge::Status TransAllVarData(const std::vector &variable_nodes, uint64_t session_id, rtContext_t context, @@ -41,12 +36,6 @@ class TransVarDataUtils { uint32_t thread_num = 16); static ge::Status CopyVarData(const ComputeGraphPtr &compute_graph, uint64_t session_id, uint32_t device_id); - - private: - static ge::Status SyncTensorToHost(const string &var_name, const ge::GeTensorDesc &src_tensor_desc, - uint8_t **host_addr, int64_t &addr_size, uint64_t session_id_); - static ge::Status SyncTensorToDevice(const string &var_name, const uint8_t *host_addr, uint32_t addr_size, - const ge::GeTensorDesc &dst_tensor_desc, uint64_t session_id_); }; } // namespace ge From 2400e65904de4e5d20731f41421853360542e04c Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Thu, 1 Jul 2021 17:49:37 +0800 Subject: [PATCH 131/226] Do not create context in hydrid executor init func. --- ge/hybrid/executor/hybrid_model_executor.cc | 4 ---- ge/hybrid/executor/hybrid_model_pipeline_executor.cc | 1 - ge/hybrid/executor/worker/task_compile_engine.cc | 11 +++++++++-- metadef | 2 +- parser | 2 +- .../executor/worker/execution_engine_unittest.cc | 12 ++++++++++++ 6 files changed, 23 insertions(+), 9 deletions(-) diff --git a/ge/hybrid/executor/hybrid_model_executor.cc b/ge/hybrid/executor/hybrid_model_executor.cc index 58da451c..2bb683c7 100755 --- a/ge/hybrid/executor/hybrid_model_executor.cc +++ b/ge/hybrid/executor/hybrid_model_executor.cc @@ -33,9 +33,6 @@ HybridModelExecutor::HybridModelExecutor(HybridModel *model, uint32_t device_id, } HybridModelExecutor::~HybridModelExecutor() { - if (context_.rt_gen_context != nullptr) { - (void) rtCtxDestroy(context_.rt_gen_context); - } } Status HybridModelExecutor::Init() { @@ -139,7 +136,6 @@ Status HybridModelExecutor::Cleanup() { Status HybridModelExecutor::InitExecutionContext() { GE_CHK_RT_RET(rtCtxGetCurrent(&context_.rt_context)); - GE_CHK_RT_RET(rtCtxCreate(&context_.rt_gen_context, RT_CTX_GEN_MODE, 0)); GE_CHK_RT_RET(rtCtxSetCurrent(context_.rt_context)); context_.global_step = model_->GetGlobalStep(); diff --git a/ge/hybrid/executor/hybrid_model_pipeline_executor.cc b/ge/hybrid/executor/hybrid_model_pipeline_executor.cc index 45e61138..b5e66628 100644 --- a/ge/hybrid/executor/hybrid_model_pipeline_executor.cc +++ b/ge/hybrid/executor/hybrid_model_pipeline_executor.cc @@ -191,7 +191,6 @@ HybridModelPipelineExecutor::HybridModelPipelineExecutor(HybridModel *model, uin } Status StageExecutor::InitExecutionContext() { - GE_CHK_RT_RET(rtCtxCreate(&context_.rt_gen_context, RT_CTX_GEN_MODE, 0)); GE_CHK_RT_RET(rtCtxSetCurrent(context_.rt_context)); context_.model = model_; diff --git a/ge/hybrid/executor/worker/task_compile_engine.cc b/ge/hybrid/executor/worker/task_compile_engine.cc index f7da9acd..491e0997 100755 --- a/ge/hybrid/executor/worker/task_compile_engine.cc +++ b/ge/hybrid/executor/worker/task_compile_engine.cc @@ -21,10 +21,17 @@ namespace ge { namespace hybrid { Status TaskCompileEngine::Compile(NodeState &node_state, GraphExecutionContext *context) { - const auto &node_item = *node_state.GetNodeItem(); GE_CHECK_NOTNULL(context); + rtContext_t rt_gen_context = nullptr; + GE_CHK_RT_RET(rtCtxCreate(&rt_gen_context, RT_CTX_GEN_MODE, 0)); + std::function callback = [&]() { + (void) rtCtxDestroy(rt_gen_context); + GE_CHK_RT(rtCtxSetCurrent(context->rt_context)); + }; + GE_MAKE_GUARD(rt_gen_context, callback); + + const auto &node_item = *node_state.GetNodeItem(); RECORD_COMPILE_EVENT(context, node_item.NodeName().c_str(), "[Compile] Start"); - GE_CHK_RT_RET(rtCtxSetCurrent(context->rt_gen_context)); if (context->ge_context != nullptr) { GetThreadLocalContext() = *context->ge_context; diff --git a/metadef b/metadef index f3f137de..9e4a51a9 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit f3f137de034885f0c7394d7f04b41b08d450d2d2 +Subproject commit 9e4a51a9602195b82e326b853f5adbfefc3972b6 diff --git a/parser b/parser index 15a27afe..79536a19 160000 --- a/parser +++ b/parser @@ -1 +1 @@ -Subproject commit 15a27afefe45f2abdb78787d629163aab9437599 +Subproject commit 79536a196f89cf7a1f5852ff7304b9a7d7b12eff diff --git a/tests/ut/ge/hybrid/executor/worker/execution_engine_unittest.cc b/tests/ut/ge/hybrid/executor/worker/execution_engine_unittest.cc index 07701f4d..96641c59 100644 --- a/tests/ut/ge/hybrid/executor/worker/execution_engine_unittest.cc +++ b/tests/ut/ge/hybrid/executor/worker/execution_engine_unittest.cc @@ -27,6 +27,7 @@ #include "hybrid/executor/hybrid_model_executor.h" #include "hybrid/executor/worker/execution_engine.h" #include "hybrid/executor/subgraph_executor.h" +#include "hybrid/executor/worker/task_compile_engine.h" #undef private #undef protected @@ -45,7 +46,14 @@ class UtestExecutionEngine : public testing::Test { }; namespace { const int kIntBase = 10; +class CompileNodeExecutor : public NodeExecutor { + public: + Status CompileTask(const HybridModel &model, const NodePtr &node, std::shared_ptr &task) const override { + return SUCCESS; + } +}; } + static ge::OpDescPtr CreateOpDesc(string name = "", string type = "") { auto op_desc = std::make_shared(name, type); op_desc->SetStreamId(0); @@ -128,4 +136,8 @@ TEST_F(UtestExecutionEngine, ExecuteAsync_without_callback_and_kernel_task) { executor.InitCallback(node_state.get(), callback); ExecutionEngine execution_engine; EXPECT_EQ(execution_engine.ExecuteAsync(*node_state, node_state->GetTaskContext(), execution_context, callback), INTERNAL_ERROR); + + CompileNodeExecutor node_executor; + node_item->node_executor = &node_executor; + EXPECT_EQ(TaskCompileEngine::Compile(*node_state, &execution_context), SUCCESS); } From 41ffa8bed108b57bb472843db88800c1ad105a85 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Thu, 1 Jul 2021 17:52:32 +0800 Subject: [PATCH 132/226] Update submodule. --- metadef | 2 +- parser | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/metadef b/metadef index 9e4a51a9..9c9907b7 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit 9e4a51a9602195b82e326b853f5adbfefc3972b6 +Subproject commit 9c9907b76a457f456072af96b8cbcfb7943beccc diff --git a/parser b/parser index 79536a19..15a27afe 160000 --- a/parser +++ b/parser @@ -1 +1 @@ -Subproject commit 79536a196f89cf7a1f5852ff7304b9a7d7b12eff +Subproject commit 15a27afefe45f2abdb78787d629163aab9437599 From cdecf866db4f328e73e016aec3dd58b685be71b8 Mon Sep 17 00:00:00 2001 From: lichun Date: Thu, 1 Jul 2021 17:54:49 +0800 Subject: [PATCH 133/226] add global step info for known subgraph in unknown model and generate om for remained cases when some single op cases run atc failed --- tests/ut/ge/hybrid/known_node_executor_unittest.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ut/ge/hybrid/known_node_executor_unittest.cc b/tests/ut/ge/hybrid/known_node_executor_unittest.cc index a8367130..435928ee 100644 --- a/tests/ut/ge/hybrid/known_node_executor_unittest.cc +++ b/tests/ut/ge/hybrid/known_node_executor_unittest.cc @@ -141,6 +141,6 @@ TEST_F(UnknownNodeExecutorTest, TestSetGlobalStep) { KnownNodeExecutor known_node_executor; std::shared_ptr davinci_model = MakeShared(0, nullptr); - known_node_executor.SettingDaviciModel(hybrid, node, davinci_model); + known_node_executor.SettingDaviciModel(hybrid_model, node, davinci_model); EXPECT_EQ(davinci_model->global_step_addr_, 520); } From 70a9868d3b3e66fde3200960f8e659318c9da944 Mon Sep 17 00:00:00 2001 From: lianghao Date: Thu, 1 Jul 2021 18:02:40 +0800 Subject: [PATCH 134/226] IsEnterFeedNode --- ge/hybrid/model/node_item.cc | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/ge/hybrid/model/node_item.cc b/ge/hybrid/model/node_item.cc index 8e87c6e2..77bd8efd 100644 --- a/ge/hybrid/model/node_item.cc +++ b/ge/hybrid/model/node_item.cc @@ -24,6 +24,8 @@ namespace ge { namespace hybrid { namespace { +const uint8_t kMaxTransCount = 3; +const uint32_t kTransOpIoSize = 1; const char *const kAttrNameOriginalFusionGraph = "_original_fusion_graph"; const char *const kNodeTypeRetVal = "_RetVal"; const std::set kControlOpTypes{ @@ -39,6 +41,25 @@ const std::set kMergeOpTypes{ MERGE, REFMERGE, STREAMMERGE }; +bool IsEnterFeedNode(NodePtr node) { + // For: Enter -> node + // For: Enter -> Cast -> node + // For: Enter -> TransData -> Cast -> node + for (uint8_t i = 0; i < kMaxTransCount; ++i) { + if (kEnterOpTypes.count(NodeUtils::GetNodeType(node)) > 0) { + GELOGD("Node[%u] is Enter feed node.", node->GetName().c_str()); + return true; + } + + const auto all_nodes = node->GetInDataNodes(); + if (all_nodes.size() != kTransOpIoSize || node->GetAllInDataAnchorsSize() != kTransOpIoSize) { + return false; + } + node = all_nodes.at(0); + } + return false; +} + Status ParseInputMapping(Node &node, OpDesc &op_desc, FusedSubgraph &fused_subgraph) { uint32_t parent_index = 0; if (!AttrUtils::GetInt(op_desc, ATTR_NAME_PARENT_NODE_INDEX, parent_index)) { @@ -399,7 +420,7 @@ void NodeItem::SetDataSend(NodeItem *node_item, int anchor_index) { data_anchors.emplace(anchor_index); } // If Enter feed Not Merge, take as root Node. - if (IsEnterOp() && (node_item->node_type != STREAMMERGE)) { + if (IsEnterFeedNode(node) && (node_item->node_type != STREAMMERGE)) { auto &data_anchors = node_item->enter_data_[this]; data_anchors.emplace(anchor_index); } @@ -419,7 +440,7 @@ void NodeItem::SetCtrlSend(NodeItem *node_item, uint32_t switch_index) { node_item->root_ctrl_.emplace(this); } // If Enter feed control signal, take as root Node. - if (IsEnterOp() && (node_item->node_type != STREAMMERGE && node_item->node_type != STREAMACTIVE)) { + if (IsEnterFeedNode(node) && (node_item->node_type != STREAMMERGE && node_item->node_type != STREAMACTIVE)) { node_item->enter_ctrl_.emplace(this); } GELOGI("Node[%s] will control node[%s]", NodeName().c_str(), node_item->NodeName().c_str()); From 1db4cac78d33622b6c54fedbb8d59b8d70f1fe66 Mon Sep 17 00:00:00 2001 From: WeiGangqiang Date: Tue, 29 Jun 2021 10:04:43 +0800 Subject: [PATCH 135/226] add graph check framework --- .clang-format | 3 +- metadef | 2 +- scripts/env/Dockerfile | 15 ++ scripts/env/ge_env.sh | 4 +- tests/depends/cce/CMakeLists.txt | 1 + tests/framework/CMakeLists.txt | 13 -- .../include/easy_graph/builder/graph_dsl.h | 22 ++- .../easy_graph/src/layout/graph_layout.cc | 8 +- .../ge_graph_dsl/assert/assert_error.h | 37 +++++ .../include/ge_graph_dsl/assert/check_utils.h | 32 +++++ .../ge_graph_dsl/assert/filter_scope_guard.h} | 49 ++++--- .../ge_graph_dsl/assert/graph_assert.h | 59 ++++++++ .../ge_graph_dsl/op_desc/op_desc_cfg.h | 6 +- .../ge_graph_dsl/src/assert/assert_error.cc | 26 ++++ .../ge_graph_dsl/src/assert/check_utils.cc | 34 +++++ .../src/assert/filter_scope_guard.cc | 31 +++++ .../ge_graph_dsl/src/assert/ge_dump_filter.h | 33 +++++ .../src/assert/ge_graph_check_dumper.cc | 79 +++++++++++ .../src/assert/ge_graph_check_dumper.h | 49 +++++++ .../src/assert/ge_graph_checker.h | 32 +++++ .../src/assert/ge_graph_default_checker.cc | 28 ++++ .../src/assert/ge_graph_default_checker.h | 41 ++++++ .../src/{ => op_desc}/op_desc_cfg_box.cc | 0 .../src/{ => op_desc}/op_desc_cfg_repo.cc | 17 ++- .../src/{ => op_desc}/op_desc_ptr_box.cc | 4 +- .../ge_graph_visitor.cc} | 12 +- .../src/{ => vistor}/ge_subgraph_vistor.cc | 0 .../src/{ => vistor}/graph_dsl.cc | 0 .../ge_graph_dsl/tests/CMakeLists.txt | 2 +- .../ge_graph_dsl/tests/check_graph_test.cc | 129 ++++++++++++++++++ .../ge_graph_dsl/tests/graph_dsl_test.cc | 44 +++--- .../ge_graph_dsl/tests/stub/optype_stub.cc | 6 + .../tests/test_main.cc} | 47 ++++--- .../utils/builder/graph_builder_utils.cc | 48 ------- .../utils/builder/graph_builder_utils.h | 55 -------- tests/st/testcase/CMakeLists.txt | 2 +- tests/st/testcase/test_framework_dummy.cc | 127 +++++++---------- tests/st/testcase/test_ge_opt_info.cc | 20 +-- tests/st/testcase/test_main.cc | 4 +- tests/ut/common/graph/CMakeLists.txt | 1 + tests/ut/ge/CMakeLists.txt | 1 + 41 files changed, 811 insertions(+), 312 deletions(-) create mode 100644 tests/framework/ge_graph_dsl/include/ge_graph_dsl/assert/assert_error.h create mode 100644 tests/framework/ge_graph_dsl/include/ge_graph_dsl/assert/check_utils.h rename tests/framework/{utils/builder/tensor_builder_utils.cc => ge_graph_dsl/include/ge_graph_dsl/assert/filter_scope_guard.h} (68%) create mode 100644 tests/framework/ge_graph_dsl/include/ge_graph_dsl/assert/graph_assert.h create mode 100644 tests/framework/ge_graph_dsl/src/assert/assert_error.cc create mode 100644 tests/framework/ge_graph_dsl/src/assert/check_utils.cc create mode 100644 tests/framework/ge_graph_dsl/src/assert/filter_scope_guard.cc create mode 100644 tests/framework/ge_graph_dsl/src/assert/ge_dump_filter.h create mode 100644 tests/framework/ge_graph_dsl/src/assert/ge_graph_check_dumper.cc create mode 100644 tests/framework/ge_graph_dsl/src/assert/ge_graph_check_dumper.h create mode 100644 tests/framework/ge_graph_dsl/src/assert/ge_graph_checker.h create mode 100644 tests/framework/ge_graph_dsl/src/assert/ge_graph_default_checker.cc create mode 100644 tests/framework/ge_graph_dsl/src/assert/ge_graph_default_checker.h rename tests/framework/ge_graph_dsl/src/{ => op_desc}/op_desc_cfg_box.cc (100%) rename tests/framework/ge_graph_dsl/src/{ => op_desc}/op_desc_cfg_repo.cc (53%) rename tests/framework/ge_graph_dsl/src/{ => op_desc}/op_desc_ptr_box.cc (97%) rename tests/framework/ge_graph_dsl/src/{ge_graph_vistor.cc => vistor/ge_graph_visitor.cc} (89%) rename tests/framework/ge_graph_dsl/src/{ => vistor}/ge_subgraph_vistor.cc (100%) rename tests/framework/ge_graph_dsl/src/{ => vistor}/graph_dsl.cc (100%) create mode 100644 tests/framework/ge_graph_dsl/tests/check_graph_test.cc rename tests/framework/{utils/builder/tensor_builder_utils.h => ge_graph_dsl/tests/test_main.cc} (73%) delete mode 100644 tests/framework/utils/builder/graph_builder_utils.cc delete mode 100644 tests/framework/utils/builder/graph_builder_utils.h diff --git a/.clang-format b/.clang-format index e7f9d935..6faea40d 100644 --- a/.clang-format +++ b/.clang-format @@ -52,7 +52,6 @@ ConstructorInitializerAllOnOneLineOrOnePerLine: true ConstructorInitializerIndentWidth: 4 ContinuationIndentWidth: 4 Cpp11BracedListStyle: true -DerivePointerAlignment: true DisableFormat: false ExperimentalAutoDetectBinPacking: false FixNamespaceComments: true @@ -94,7 +93,7 @@ PenaltyBreakString: 1000 PenaltyBreakTemplateDeclaration: 10 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 200 -PointerAlignment: Left +PointerAlignment: Right RawStringFormats: - Language: Cpp Delimiters: diff --git a/metadef b/metadef index f3f137de..9c9907b7 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit f3f137de034885f0c7394d7f04b41b08d450d2d2 +Subproject commit 9c9907b76a457f456072af96b8cbcfb7943beccc diff --git a/scripts/env/Dockerfile b/scripts/env/Dockerfile index af02f7bb..923a1453 100755 --- a/scripts/env/Dockerfile +++ b/scripts/env/Dockerfile @@ -38,5 +38,20 @@ RUN wget https://github.com/ccup/lcov/archive/refs/tags/add_lcov.tar.gz -O add_l ENV PROJECT_HOME=/code/Turing/graphEngine +RUN mkdir /var/run/sshd +RUN echo "root:root" | chpasswd +RUN sed -i 's/\#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config +RUN sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd + +ENV NOTVISIBLE "in users profile" +RUN echo "export VISIBLE=now" >> /etc/profile + +EXPOSE 22 7777 + +RUN useradd -ms /bin/bash debugger +RUN echo "debugger:ge123" | chpasswd + +CMD ["/usr/sbin/sshd" "-D" "&"] + RUN echo "alias ge=/code/Turing/graphEngine/scripts/ge.sh">>~/.bashrc diff --git a/scripts/env/ge_env.sh b/scripts/env/ge_env.sh index 18c6aa5d..10ca810f 100755 --- a/scripts/env/ge_env.sh +++ b/scripts/env/ge_env.sh @@ -21,7 +21,7 @@ MOUNT_PROJECT_HOME=$(cd $PROJECT_HOME || return; pwd) DOCKER_BUILD_ENV_NAME=${MOUNT_PROJECT_HOME#*/} DOCKER_BUILD_ENV_NAME=${DOCKER_BUILD_ENV_NAME//\//\_} -DOCKER_IMAGE_TAG=ge_build_env.1.0.6 +DOCKER_IMAGE_TAG=ge_build_env.1.0.9 DOCKER_IAMGE_NAME=joycode2art/turing DOCKER_FULL_IMAGE_NAME=${DOCKER_IAMGE_NAME}:${DOCKER_IMAGE_TAG} @@ -61,7 +61,7 @@ function enter_docker_env(){ if test -z "$(docker images |grep ${DOCKER_IAMGE_NAME} | grep ${DOCKER_IMAGE_TAG})"; then echo "please run 'ge env --pull' to download images first!" elif test -z "$(docker ps -a |grep ${DOCKER_BUILD_ENV_NAME})"; then - $docker_cmd run -it -v ${MOUNT_PROJECT_HOME}:/code/Turing/graphEngine --workdir ${docker_work_dir} --name ${DOCKER_BUILD_ENV_NAME} ${DOCKER_FULL_IMAGE_NAME} ${docker_bash_dir} + $docker_cmd run -p 7002:22 -p 7003:7777 --privileged=true -it -v ${MOUNT_PROJECT_HOME}:/code/Turing/graphEngine --workdir ${docker_work_dir} --name ${DOCKER_BUILD_ENV_NAME} ${DOCKER_FULL_IMAGE_NAME} ${docker_bash_dir} elif test -z "$(docker ps |grep ${DOCKER_BUILD_ENV_NAME})"; then $docker_cmd start ${DOCKER_BUILD_ENV_NAME} $docker_cmd exec -w ${docker_work_dir} -it ${DOCKER_BUILD_ENV_NAME} ${docker_bash_dir} diff --git a/tests/depends/cce/CMakeLists.txt b/tests/depends/cce/CMakeLists.txt index 7550c63f..05fa8133 100644 --- a/tests/depends/cce/CMakeLists.txt +++ b/tests/depends/cce/CMakeLists.txt @@ -60,6 +60,7 @@ set(SRCS "${GE_CODE_DIR}/metadef/graph/detail/attributes_holder.cc" "${GE_CODE_DIR}/metadef/graph/utils/anchor_utils.cc" "${GE_CODE_DIR}/metadef/graph/utils/graph_utils.cc" + "${GE_CODE_DIR}/metadef/graph/utils/dumper/ge_graph_dumper.cc" "${GE_CODE_DIR}/metadef/graph/utils/node_utils.cc" "${GE_CODE_DIR}/metadef/graph/utils/op_desc_utils.cc" "${GE_CODE_DIR}/metadef/graph/utils/type_utils.cc" diff --git a/tests/framework/CMakeLists.txt b/tests/framework/CMakeLists.txt index 8a2218b4..bbab454b 100644 --- a/tests/framework/CMakeLists.txt +++ b/tests/framework/CMakeLists.txt @@ -17,16 +17,3 @@ include(cmake/graphengine.cmake) add_subdirectory(easy_graph) add_subdirectory(ge_graph_dsl) add_subdirectory(ge_running_env) - -file(GLOB_RECURSE UTILS_SRC CONFIGURE_DEPENDS - "utils/*.cc" - ) - -add_library(framework STATIC ${UTILS_SRC}) - -target_include_directories(framework - PUBLIC utils/ -) - -set_target_properties(framework PROPERTIES CXX_STANDARD 11) -target_link_libraries(framework PUBLIC ge_graph_dsl ge_with_env) diff --git a/tests/framework/easy_graph/include/easy_graph/builder/graph_dsl.h b/tests/framework/easy_graph/include/easy_graph/builder/graph_dsl.h index 4d430983..46bfe324 100644 --- a/tests/framework/easy_graph/include/easy_graph/builder/graph_dsl.h +++ b/tests/framework/easy_graph/include/easy_graph/builder/graph_dsl.h @@ -26,16 +26,32 @@ EG_NS_BEGIN //////////////////////////////////////////////////////////////// namespace detail { -template +template Graph BuildGraph(const char *name, GRAPH_BUILDER builderInDSL) { GraphBuilder builder(name); builderInDSL(builder); return std::move(*builder); } + +struct GraphDefiner { + GraphDefiner(const char *defaultName, const char *specifiedName = nullptr) { + name = specifiedName ? specifiedName : defaultName; + } + + template + auto operator|(USER_BUILDER &&userBuilder) { + GraphBuilder graphBuilder{name}; + std::forward(userBuilder)(graphBuilder); + return *graphBuilder; + } + + private: + const char *name; +}; + } // namespace detail -#define HAS_NAME(...) NOT_EMPTY_SELECT(__VA_ARGS__) -#define DEF_GRAPH(G, ...) ::EG_NS::Graph G = ::EG_NS::detail::BuildGraph(HAS_NAME(__VA_ARGS__)(__VA_ARGS__, #G), [&](::EG_NS::GraphBuilder& BUILDER) +#define DEF_GRAPH(G, ...) ::EG_NS::Graph G = ::EG_NS::detail::GraphDefiner(#G, ##__VA_ARGS__) | [&](auto &&BUILDER) #define DATA_CHAIN(...) ::EG_NS::ChainBuilder(BUILDER, ::EG_NS::EdgeType::DATA)->__VA_ARGS__ #define CTRL_CHAIN(...) ::EG_NS::ChainBuilder(BUILDER, ::EG_NS::EdgeType::CTRL)->__VA_ARGS__ #define CHAIN(...) DATA_CHAIN(__VA_ARGS__) diff --git a/tests/framework/easy_graph/src/layout/graph_layout.cc b/tests/framework/easy_graph/src/layout/graph_layout.cc index 340acf67..716bed8a 100644 --- a/tests/framework/easy_graph/src/layout/graph_layout.cc +++ b/tests/framework/easy_graph/src/layout/graph_layout.cc @@ -16,10 +16,15 @@ #include "easy_graph/layout/graph_layout.h" #include "easy_graph/layout/layout_executor.h" +#include "easy_graph/layout/engines/graph_easy/graph_easy_executor.h" #include "easy_graph/graph/graph.h" EG_NS_BEGIN +namespace { +GraphEasyExecutor default_executor; +} + void GraphLayout::Config(LayoutExecutor &executor, const LayoutOption *opts) { this->executor_ = &executor; options_ = opts; @@ -27,8 +32,7 @@ void GraphLayout::Config(LayoutExecutor &executor, const LayoutOption *opts) { Status GraphLayout::Layout(const Graph &graph, const LayoutOption *opts) { const LayoutOption *options = opts ? opts : this->options_; - if (!executor_) - return EG_UNIMPLEMENTED; + if (!executor_) return static_cast(default_executor).Layout(graph, options); return executor_->Layout(graph, options); } diff --git a/tests/framework/ge_graph_dsl/include/ge_graph_dsl/assert/assert_error.h b/tests/framework/ge_graph_dsl/include/ge_graph_dsl/assert/assert_error.h new file mode 100644 index 00000000..7f5d5086 --- /dev/null +++ b/tests/framework/ge_graph_dsl/include/ge_graph_dsl/assert/assert_error.h @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef D52AA06185E34BBFB714FFBCDAB0D53A +#define D52AA06185E34BBFB714FFBCDAB0D53A + +#include "ge_graph_dsl/ge.h" +#include +#include + +GE_NS_BEGIN + +struct AssertError : std::exception { + AssertError(const char *file, int line, const std::string &info); + + private: + const char *what() const noexcept override; + + private: + std::string info; +}; + +GE_NS_END + +#endif \ No newline at end of file diff --git a/tests/framework/ge_graph_dsl/include/ge_graph_dsl/assert/check_utils.h b/tests/framework/ge_graph_dsl/include/ge_graph_dsl/assert/check_utils.h new file mode 100644 index 00000000..fa0ae783 --- /dev/null +++ b/tests/framework/ge_graph_dsl/include/ge_graph_dsl/assert/check_utils.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef INC_31309AA0A4E44C009C22AD9351BF3410 +#define INC_31309AA0A4E44C009C22AD9351BF3410 + +#include "ge_graph_dsl/ge.h" +#include "graph/compute_graph.h" + +GE_NS_BEGIN + +using GraphCheckFun = std::function; +struct CheckUtils { + static bool CheckGraph(const std::string &phase_id, const GraphCheckFun &fun); + static void init(); +}; + +GE_NS_END + +#endif \ No newline at end of file diff --git a/tests/framework/utils/builder/tensor_builder_utils.cc b/tests/framework/ge_graph_dsl/include/ge_graph_dsl/assert/filter_scope_guard.h similarity index 68% rename from tests/framework/utils/builder/tensor_builder_utils.cc rename to tests/framework/ge_graph_dsl/include/ge_graph_dsl/assert/filter_scope_guard.h index f99b9107..a208c02e 100644 --- a/tests/framework/utils/builder/tensor_builder_utils.cc +++ b/tests/framework/ge_graph_dsl/include/ge_graph_dsl/assert/filter_scope_guard.h @@ -1,17 +1,32 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "tensor_builder_utils.h" +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef C8B32320BD4943D588594B82FFBF2685 +#define C8B32320BD4943D588594B82FFBF2685 + +#include +#include +#include "ge_graph_dsl/ge.h" + +GE_NS_BEGIN + +struct FilterScopeGuard { + FilterScopeGuard(const std::vector &); + ~FilterScopeGuard(); +}; + +GE_NS_END + +#endif diff --git a/tests/framework/ge_graph_dsl/include/ge_graph_dsl/assert/graph_assert.h b/tests/framework/ge_graph_dsl/include/ge_graph_dsl/assert/graph_assert.h new file mode 100644 index 00000000..663907a0 --- /dev/null +++ b/tests/framework/ge_graph_dsl/include/ge_graph_dsl/assert/graph_assert.h @@ -0,0 +1,59 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef AD954C4ADF5B44F5B1CC8BCD72EE9ED6 +#define AD954C4ADF5B44F5B1CC8BCD72EE9ED6 + +#include "ge_graph_dsl/ge.h" +#include "ge_graph_dsl/assert/check_utils.h" +#include "ge_graph_dsl/assert/assert_error.h" +#include "ge_graph_dsl/assert/filter_scope_guard.h" + +GE_NS_BEGIN + +#ifdef GTEST_MESSAGE_AT_ +#define GRAPH_CHECK_MESSAGE(file, line, message) \ + GTEST_MESSAGE_AT_(file, line, message, ::testing::TestPartResult::kFatalFailure) +#elif +#define GRAPH_CHECK_MESSAGE(file, line, message) throw AssertError(file, line, message) +#endif + +namespace detail { +struct GraphAssert { + GraphAssert(const char *file, unsigned int line, const std::string &phase_id) + : file_(file), line_(line), phase_id_(phase_id) {} + + void operator|(const ::GE_NS::GraphCheckFun &check_fun) { + bool ret = ::GE_NS::CheckUtils::CheckGraph(phase_id_, check_fun); + if (!ret) { + auto message = "expect dump graph in phase: [" + phase_id_ + "], while not find the dump graph! "; + GRAPH_CHECK_MESSAGE(file_, line_, message.c_str()); + } + } + + private: + const char *file_; + unsigned int line_; + const std::string phase_id_; +}; +} // namespace detail + +#define DUMP_GRAPH_WHEN(...) ::GE_NS::FilterScopeGuard guard__COUNTER__({__VA_ARGS__}); +#define CHECK_GRAPH(phase_id) \ + ::GE_NS::detail::GraphAssert(__FILE__, __LINE__, #phase_id) | [&](const ::GE_NS::ComputeGraphPtr &graph) + +GE_NS_END + +#endif \ No newline at end of file diff --git a/tests/framework/ge_graph_dsl/include/ge_graph_dsl/op_desc/op_desc_cfg.h b/tests/framework/ge_graph_dsl/include/ge_graph_dsl/op_desc/op_desc_cfg.h index bb2326ec..99eafa7f 100644 --- a/tests/framework/ge_graph_dsl/include/ge_graph_dsl/op_desc/op_desc_cfg.h +++ b/tests/framework/ge_graph_dsl/include/ge_graph_dsl/op_desc/op_desc_cfg.h @@ -33,14 +33,12 @@ struct OpDescCfg { std::vector shape_; }; - OpDescCfg(const OpType &type, int in_cnt = 0, int out_cnt = 0, Format format = FORMAT_NCHW, + OpDescCfg(const OpType &type, int in_cnt = 1, int out_cnt = 1, Format format = FORMAT_NCHW, DataType data_type = DT_FLOAT, std::vector shape = {1, 1, 224, 224}) : type_(type), in_cnt_(in_cnt), out_cnt_(out_cnt), default_tensor_(format, data_type, shape) {} protected: - OpType GetType() const { - return type_; - } + OpType GetType() const { return type_; } OpType type_; int in_cnt_; int out_cnt_; diff --git a/tests/framework/ge_graph_dsl/src/assert/assert_error.cc b/tests/framework/ge_graph_dsl/src/assert/assert_error.cc new file mode 100644 index 00000000..5b74d852 --- /dev/null +++ b/tests/framework/ge_graph_dsl/src/assert/assert_error.cc @@ -0,0 +1,26 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ge_graph_dsl/assert/assert_error.h" + +GE_NS_BEGIN + +AssertError::AssertError(const char *file, int line, const std::string &info) { + this->info = std::string(file) + ":" + std::to_string(line) + "\n" + info; +} + +const char *AssertError::what() const noexcept { return info.c_str(); } + +GE_NS_END diff --git a/tests/framework/ge_graph_dsl/src/assert/check_utils.cc b/tests/framework/ge_graph_dsl/src/assert/check_utils.cc new file mode 100644 index 00000000..56bc6e81 --- /dev/null +++ b/tests/framework/ge_graph_dsl/src/assert/check_utils.cc @@ -0,0 +1,34 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ge_graph_dsl/assert/check_utils.h" +#include "graph/utils/dumper/ge_graph_dumper.h" +#include "ge_graph_default_checker.h" +#include "ge_graph_check_dumper.h" + +GE_NS_BEGIN + +bool CheckUtils::CheckGraph(const std::string &phase_id, const GraphCheckFun &fun) { + auto &dumper = dynamic_cast(GraphDumperRegistry::GetDumper()); + return dumper.CheckFor(GeGraphDefaultChecker(phase_id, fun)); +} + +void CheckUtils::init() { + static GeGraphCheckDumper checkDumper; + GraphDumperRegistry::Register(checkDumper); +} + +GE_NS_END diff --git a/tests/framework/ge_graph_dsl/src/assert/filter_scope_guard.cc b/tests/framework/ge_graph_dsl/src/assert/filter_scope_guard.cc new file mode 100644 index 00000000..4aa4795d --- /dev/null +++ b/tests/framework/ge_graph_dsl/src/assert/filter_scope_guard.cc @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ge_graph_dsl/assert/filter_scope_guard.h" +#include "graph/utils/dumper/ge_graph_dumper.h" +#include "ge_dump_filter.h" + +GE_NS_BEGIN + +namespace { +GeDumpFilter &GetDumpFilter() { return dynamic_cast(GraphDumperRegistry::GetDumper()); } +} // namespace + +FilterScopeGuard::FilterScopeGuard(const std::vector &filter) { GetDumpFilter().Update(filter); } + +FilterScopeGuard::~FilterScopeGuard() { GetDumpFilter().Reset(); } + +GE_NS_END \ No newline at end of file diff --git a/tests/framework/ge_graph_dsl/src/assert/ge_dump_filter.h b/tests/framework/ge_graph_dsl/src/assert/ge_dump_filter.h new file mode 100644 index 00000000..47967c91 --- /dev/null +++ b/tests/framework/ge_graph_dsl/src/assert/ge_dump_filter.h @@ -0,0 +1,33 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef INC_4C6224E8F7474EF89B18CCB0E4B19FD6 +#define INC_4C6224E8F7474EF89B18CCB0E4B19FD6 + +#include +#include +#include "ge_graph_dsl/ge.h" +#include "easy_graph/infra/keywords.h" + +GE_NS_BEGIN + +INTERFACE(GeDumpFilter) { + ABSTRACT(void Update(const std::vector &)); + ABSTRACT(void Reset()); +}; + +GE_NS_END + +#endif diff --git a/tests/framework/ge_graph_dsl/src/assert/ge_graph_check_dumper.cc b/tests/framework/ge_graph_dsl/src/assert/ge_graph_check_dumper.cc new file mode 100644 index 00000000..ba72cf86 --- /dev/null +++ b/tests/framework/ge_graph_dsl/src/assert/ge_graph_check_dumper.cc @@ -0,0 +1,79 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ge_graph_check_dumper.h" +#include "graph/model.h" +#include "graph/buffer.h" +#include "graph/utils/graph_utils.h" +#include "ge_graph_default_checker.h" + +GE_NS_BEGIN + +GeGraphCheckDumper::GeGraphCheckDumper() { Reset(); } + +bool GeGraphCheckDumper::IsNeedDump(const std::string &suffix) const { + auto iter = std::find(suffixes_.begin(), suffixes_.end(), suffix); + return (iter != suffixes_.end()); +} + +void GeGraphCheckDumper::Dump(const ge::ComputeGraphPtr &graph, const std::string &suffix) { + if (!IsNeedDump(suffix)) { + return; + } + auto iter = buffers_.find(suffix); + if (iter != buffers_.end()) { + DumpGraph(graph, iter->second); + } else { + buffers_[suffix] = Buffer(); + DumpGraph(graph, buffers_.at(suffix)); + } +} + +bool GeGraphCheckDumper::CheckFor(const GeGraphChecker &checker) { + auto iter = buffers_.find(checker.PhaseId()); + if (iter == buffers_.end()) { + return false; + } + DoCheck(checker, iter->second); + return true; +} + +void GeGraphCheckDumper::DoCheck(const GeGraphChecker &checker, ::GE_NS::Buffer &buffer) { + Model model("", ""); + Model::Load(buffer.GetData(), buffer.GetSize(), model); + auto load_graph = model.GetGraph(); + checker.Check(GraphUtils::GetComputeGraph(load_graph)); +} + +void GeGraphCheckDumper::DumpGraph(const ge::ComputeGraphPtr &graph, ::GE_NS::Buffer &buffer) { + Model model("", ""); + buffer.clear(); + model.SetGraph(GraphUtils::CreateGraphFromComputeGraph(graph)); + model.Save(buffer, true); +} + +void GeGraphCheckDumper::Update(const std::vector &new_suffixes_) { + suffixes_ = new_suffixes_; + buffers_.clear(); +} + +void GeGraphCheckDumper::Reset() { + static std::vector default_suffixes_{"PreRunAfterBuild"}; + suffixes_ = default_suffixes_; + buffers_.clear(); +} + +GE_NS_END \ No newline at end of file diff --git a/tests/framework/ge_graph_dsl/src/assert/ge_graph_check_dumper.h b/tests/framework/ge_graph_dsl/src/assert/ge_graph_check_dumper.h new file mode 100644 index 00000000..5eda52ea --- /dev/null +++ b/tests/framework/ge_graph_dsl/src/assert/ge_graph_check_dumper.h @@ -0,0 +1,49 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef INC_8EFED0015C27464897BF64531355C810 +#define INC_8EFED0015C27464897BF64531355C810 + +#include "ge_graph_dsl/ge.h" +#include "graph/utils/dumper/ge_graph_dumper.h" +#include "ge_dump_filter.h" +#include + +GE_NS_BEGIN + +struct GeGraphChecker; + +struct GeGraphCheckDumper : GeGraphDumper, GeDumpFilter { + GeGraphCheckDumper(); + virtual void Dump(const ge::ComputeGraphPtr &graph, const std::string &suffix); + bool CheckFor(const GeGraphChecker &checker); + + private: + void DoCheck(const GeGraphChecker &checker, ::GE_NS::Buffer &buffer); + void DumpGraph(const ge::ComputeGraphPtr &graph, ::GE_NS::Buffer &buffer); + + private: + void Update(const std::vector &) override; + void Reset() override; + bool IsNeedDump(const std::string &suffix) const; + + private: + std::map buffers_; + std::vector suffixes_; +}; + +GE_NS_END + +#endif \ No newline at end of file diff --git a/tests/framework/ge_graph_dsl/src/assert/ge_graph_checker.h b/tests/framework/ge_graph_dsl/src/assert/ge_graph_checker.h new file mode 100644 index 00000000..c6b25b65 --- /dev/null +++ b/tests/framework/ge_graph_dsl/src/assert/ge_graph_checker.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef INC_5960A8F437324904BEE0690271258762 +#define INC_5960A8F437324904BEE0690271258762 + +#include "ge_graph_dsl/ge.h" +#include "easy_graph/infra/keywords.h" +#include "graph/compute_graph.h" + +GE_NS_BEGIN + +INTERFACE(GeGraphChecker) { + ABSTRACT(const std::string &PhaseId() const); + ABSTRACT(void Check(const ge::ComputeGraphPtr &graph) const); +}; + +GE_NS_END + +#endif \ No newline at end of file diff --git a/tests/framework/ge_graph_dsl/src/assert/ge_graph_default_checker.cc b/tests/framework/ge_graph_dsl/src/assert/ge_graph_default_checker.cc new file mode 100644 index 00000000..4aa48ac6 --- /dev/null +++ b/tests/framework/ge_graph_dsl/src/assert/ge_graph_default_checker.cc @@ -0,0 +1,28 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ge_graph_default_checker.h" + +GE_NS_BEGIN + +GeGraphDefaultChecker::GeGraphDefaultChecker(const std::string &phase_id, const GraphCheckFun &check_fun) + : phase_id_(phase_id), check_fun_(check_fun) {} + +const std::string &GeGraphDefaultChecker::PhaseId() const { return phase_id_; } + +void GeGraphDefaultChecker::Check(const ge::ComputeGraphPtr &graph) const { return check_fun_(graph); } + +GE_NS_END \ No newline at end of file diff --git a/tests/framework/ge_graph_dsl/src/assert/ge_graph_default_checker.h b/tests/framework/ge_graph_dsl/src/assert/ge_graph_default_checker.h new file mode 100644 index 00000000..af8f3fbe --- /dev/null +++ b/tests/framework/ge_graph_dsl/src/assert/ge_graph_default_checker.h @@ -0,0 +1,41 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef BCF4D96BE9FC48938DE7B7E93B551C54 +#define BCF4D96BE9FC48938DE7B7E93B551C54 + +#include "ge_graph_dsl/ge.h" +#include "ge_graph_checker.h" +#include "graph/compute_graph.h" + +GE_NS_BEGIN + +using GraphCheckFun = std::function; + +struct GeGraphDefaultChecker : GeGraphChecker { + GeGraphDefaultChecker(const std::string &, const GraphCheckFun &); + + private: + const std::string &PhaseId() const override; + void Check(const ge::ComputeGraphPtr &graph) const override; + + private: + const std::string phase_id_; + const GraphCheckFun check_fun_; +}; + +GE_NS_END + +#endif \ No newline at end of file diff --git a/tests/framework/ge_graph_dsl/src/op_desc_cfg_box.cc b/tests/framework/ge_graph_dsl/src/op_desc/op_desc_cfg_box.cc similarity index 100% rename from tests/framework/ge_graph_dsl/src/op_desc_cfg_box.cc rename to tests/framework/ge_graph_dsl/src/op_desc/op_desc_cfg_box.cc diff --git a/tests/framework/ge_graph_dsl/src/op_desc_cfg_repo.cc b/tests/framework/ge_graph_dsl/src/op_desc/op_desc_cfg_repo.cc similarity index 53% rename from tests/framework/ge_graph_dsl/src/op_desc_cfg_repo.cc rename to tests/framework/ge_graph_dsl/src/op_desc/op_desc_cfg_repo.cc index e7fa018f..19dfa4a5 100644 --- a/tests/framework/ge_graph_dsl/src/op_desc_cfg_repo.cc +++ b/tests/framework/ge_graph_dsl/src/op_desc/op_desc_cfg_repo.cc @@ -23,15 +23,22 @@ GE_NS_BEGIN namespace { -#define OP_CFG(optype, ...) \ - { \ - optype, OpDescCfg { \ - optype, __VA_ARGS__ \ - } \ +#define OP_CFG(optype, ...) \ + { \ + optype, OpDescCfg { optype, __VA_ARGS__ } \ } static std::map cfg_repo{OP_CFG(DATA, 1, 1, FORMAT_NCHW, DT_FLOAT, {1, 1, 224, 224}), OP_CFG(ADD, 2, 1, FORMAT_NCHW, DT_FLOAT, {1, 1, 224, 224}), + OP_CFG(ENTER, 1, 1, FORMAT_NCHW, DT_FLOAT, {1, 1, 224, 224}), + OP_CFG(MERGE, 2, 1, FORMAT_NCHW, DT_FLOAT, {1, 1, 224, 224}), + OP_CFG(CONSTANT, 0, 1, FORMAT_NCHW, DT_FLOAT, {1, 1, 224, 224}), + OP_CFG(LESS, 2, 1, FORMAT_NCHW, DT_FLOAT, {1, 1, 224, 224}), + OP_CFG(LOOPCOND, 1, 1, FORMAT_NCHW, DT_BOOL, {1, 1, 224, 224}), + OP_CFG(SWITCH, 2, 2, FORMAT_NCHW, DT_FLOAT, {1, 1, 224, 224}), + OP_CFG(EXIT, 1, 1, FORMAT_NCHW, DT_FLOAT, {1, 1, 224, 224}), + OP_CFG(NEXTITERATION, 1, 1, FORMAT_NCHW, DT_FLOAT, {1, 1, 224, 224}), + OP_CFG(NETOUTPUT, 2, 2, FORMAT_NCHW, DT_FLOAT, {1, 1, 224, 224}), OP_CFG(VARIABLE, 1, 1)}; } // namespace diff --git a/tests/framework/ge_graph_dsl/src/op_desc_ptr_box.cc b/tests/framework/ge_graph_dsl/src/op_desc/op_desc_ptr_box.cc similarity index 97% rename from tests/framework/ge_graph_dsl/src/op_desc_ptr_box.cc rename to tests/framework/ge_graph_dsl/src/op_desc/op_desc_ptr_box.cc index 23d4773c..1564e019 100644 --- a/tests/framework/ge_graph_dsl/src/op_desc_ptr_box.cc +++ b/tests/framework/ge_graph_dsl/src/op_desc/op_desc_ptr_box.cc @@ -19,6 +19,4 @@ USING_GE_NS -OpDescPtr OpDescPtrBox::Build(const ::EG_NS::NodeId &id) const { - return op_; -} +OpDescPtr OpDescPtrBox::Build(const ::EG_NS::NodeId &id) const { return op_; } diff --git a/tests/framework/ge_graph_dsl/src/ge_graph_vistor.cc b/tests/framework/ge_graph_dsl/src/vistor/ge_graph_visitor.cc similarity index 89% rename from tests/framework/ge_graph_dsl/src/ge_graph_vistor.cc rename to tests/framework/ge_graph_dsl/src/vistor/ge_graph_visitor.cc index d8bc2aab..c1dca646 100644 --- a/tests/framework/ge_graph_dsl/src/ge_graph_vistor.cc +++ b/tests/framework/ge_graph_dsl/src/vistor/ge_graph_visitor.cc @@ -36,17 +36,11 @@ GE_NS_BEGIN GeGraphVisitor::GeGraphVisitor() : build_graph_(std::make_shared("")) {} -void GeGraphVisitor::reset(const ComputeGraphPtr &graph) { - build_graph_ = graph; -} +void GeGraphVisitor::reset(const ComputeGraphPtr &graph) { build_graph_ = graph; } -Graph GeGraphVisitor::BuildGeGraph() const { - return GraphUtils::CreateGraphFromComputeGraph(build_graph_); -} +Graph GeGraphVisitor::BuildGeGraph() const { return GraphUtils::CreateGraphFromComputeGraph(build_graph_); } -ComputeGraphPtr GeGraphVisitor::BuildComputeGraph() const { - return build_graph_; -} +ComputeGraphPtr GeGraphVisitor::BuildComputeGraph() const { return build_graph_; } Status GeGraphVisitor::Visit(const ::EG_NS::Graph &graph) { build_graph_->SetName(graph.GetName()); diff --git a/tests/framework/ge_graph_dsl/src/ge_subgraph_vistor.cc b/tests/framework/ge_graph_dsl/src/vistor/ge_subgraph_vistor.cc similarity index 100% rename from tests/framework/ge_graph_dsl/src/ge_subgraph_vistor.cc rename to tests/framework/ge_graph_dsl/src/vistor/ge_subgraph_vistor.cc diff --git a/tests/framework/ge_graph_dsl/src/graph_dsl.cc b/tests/framework/ge_graph_dsl/src/vistor/graph_dsl.cc similarity index 100% rename from tests/framework/ge_graph_dsl/src/graph_dsl.cc rename to tests/framework/ge_graph_dsl/src/vistor/graph_dsl.cc diff --git a/tests/framework/ge_graph_dsl/tests/CMakeLists.txt b/tests/framework/ge_graph_dsl/tests/CMakeLists.txt index 40097d8b..65482679 100644 --- a/tests/framework/ge_graph_dsl/tests/CMakeLists.txt +++ b/tests/framework/ge_graph_dsl/tests/CMakeLists.txt @@ -26,7 +26,7 @@ target_compile_options(ge_graph_dsl_test PRIVATE ) set_target_properties(ge_graph_dsl_test PROPERTIES CXX_STANDARD 17) -target_link_libraries(ge_graph_dsl_test PUBLIC gtest gtest_main ge_graph_dsl) +target_link_libraries(ge_graph_dsl_test PUBLIC gtest ge_graph_dsl) include(CTest) enable_testing() diff --git a/tests/framework/ge_graph_dsl/tests/check_graph_test.cc b/tests/framework/ge_graph_dsl/tests/check_graph_test.cc new file mode 100644 index 00000000..731b7eed --- /dev/null +++ b/tests/framework/ge_graph_dsl/tests/check_graph_test.cc @@ -0,0 +1,129 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "gtest/gtest.h" +#include "easy_graph/layout/graph_layout.h" +#include "easy_graph/layout/engines/graph_easy/graph_easy_option.h" +#include "easy_graph/layout/engines/graph_easy/graph_easy_executor.h" +#include "ge_graph_dsl/graph_dsl.h" +#include "graph/debug/ge_attr_define.h" +#include "graph/utils/dumper/ge_graph_dumper.h" +#include "framework/common/types.h" +#include "ge_graph_dsl/assert/graph_assert.h" +#include "graph/model.h" +#include "graph/buffer.h" + +USING_GE_NS + +class CheckGraphTest : public testing::Test { + private: + EG_NS::GraphEasyExecutor executor; + + protected: + void SetUp() { EG_NS::GraphLayout::GetInstance().Config(executor, nullptr); } + void TearDown() {} +}; + +TEST_F(CheckGraphTest, test_ge_graph_dump_is_work) { + DEF_GRAPH(g1) { CTRL_CHAIN(NODE("data1", DATA)->NODE("add", ADD)); }; + + DUMP_GRAPH_WHEN("after_build"); + GraphDumperRegistry::GetDumper().Dump(ToComputeGraph(g1), "after_build"); + + CHECK_GRAPH(after_build) { + ASSERT_EQ(graph->GetName(), "g1"); + ASSERT_EQ(graph->GetAllNodesSize(), 2); + }; +} + +TEST_F(CheckGraphTest, test_ge_graph_dump_two_phase) { + DEF_GRAPH(g1) { CTRL_CHAIN(NODE("data1", DATA)->NODE("add", ADD)); }; + DEF_GRAPH(g2) { + CTRL_CHAIN(NODE("data1", DATA)->NODE("add", ADD)); + CTRL_CHAIN(NODE("data2", DATA)->NODE("add", ADD)); + }; + + DUMP_GRAPH_WHEN("before_build", "after_build"); + + GraphDumperRegistry::GetDumper().Dump(ToComputeGraph(g1), "before_build"); + GraphDumperRegistry::GetDumper().Dump(ToComputeGraph(g2), "after_build"); + + CHECK_GRAPH(before_build) { + ASSERT_EQ(graph->GetName(), "g1"); + ASSERT_EQ(graph->GetAllNodesSize(), 2); + }; + + CHECK_GRAPH(after_build) { + ASSERT_EQ(graph->GetName(), "g2"); + ASSERT_EQ(graph->GetAllNodesSize(), 3); + }; +} + +TEST_F(CheckGraphTest, test_ge_graph_dump_one_phase_two_times) { + DEF_GRAPH(g1) { CTRL_CHAIN(NODE("data1", DATA)->NODE("add", ADD)); }; + DEF_GRAPH(g2) { + CTRL_CHAIN(NODE("data1", DATA)->NODE("add", ADD)); + CTRL_CHAIN(NODE("data2", DATA)->NODE("add", ADD)); + }; + + DUMP_GRAPH_WHEN("before_build") + + GraphDumperRegistry::GetDumper().Dump(ToComputeGraph(g1), "before_build"); + GraphDumperRegistry::GetDumper().Dump(ToComputeGraph(g2), "before_build"); + + CHECK_GRAPH(before_build) { + ASSERT_EQ(graph->GetName(), "g2"); + ASSERT_EQ(graph->GetAllNodesSize(), 3); + }; +} + +TEST_F(CheckGraphTest, test_check_phases_is_work) { + DEF_GRAPH(g1) { CTRL_CHAIN(NODE("data1", DATA)->NODE("add", ADD)); }; + + DUMP_GRAPH_WHEN("before_build"); + GraphDumperRegistry::GetDumper().Dump(ToComputeGraph(g1), "after_build"); + auto ret = ::GE_NS::CheckUtils::CheckGraph("after_build", [&](const ::GE_NS::ComputeGraphPtr &graph) {}); + ASSERT_FALSE(ret); +} + +TEST_F(CheckGraphTest, test_check_one_phase_dump_another_not_dump) { + DEF_GRAPH(g1) { CTRL_CHAIN(NODE("data1", DATA)->NODE("add", ADD)); }; + + DUMP_GRAPH_WHEN("before_build"); + GraphDumperRegistry::GetDumper().Dump(ToComputeGraph(g1), "before_build"); + GraphDumperRegistry::GetDumper().Dump(ToComputeGraph(g1), "after_build"); + + CHECK_GRAPH(before_build) { + ASSERT_EQ(graph->GetName(), "g1"); + ASSERT_EQ(graph->GetAllNodesSize(), 2); + }; +} + +TEST_F(CheckGraphTest, test_model_serialize_and_unserialize_success) { + DEF_GRAPH(g1) { CTRL_CHAIN(NODE("data1", DATA)->NODE("add", ADD)); }; + auto ge_graph = ToGeGraph(g1); + + ge::Model model("", ""); + model.SetGraph(ge_graph); + Buffer buffer; + model.Save(buffer, true); + + ge::Model loadModel("", ""); + Model::Load(buffer.GetData(), buffer.GetSize(), loadModel); + auto load_graph = loadModel.GetGraph(); + + ASSERT_EQ(load_graph.GetName(), "g1"); + ASSERT_EQ(load_graph.GetAllNodes().size(), 2); +} diff --git a/tests/framework/ge_graph_dsl/tests/graph_dsl_test.cc b/tests/framework/ge_graph_dsl/tests/graph_dsl_test.cc index f7e55e3d..a8240b32 100644 --- a/tests/framework/ge_graph_dsl/tests/graph_dsl_test.cc +++ b/tests/framework/ge_graph_dsl/tests/graph_dsl_test.cc @@ -37,17 +37,13 @@ class GraphDslTest : public testing::Test { EG_NS::GraphEasyExecutor executor; protected: - void SetUp() { - EG_NS::GraphLayout::GetInstance().Config(executor, nullptr); - } + void SetUp() { EG_NS::GraphLayout::GetInstance().Config(executor, nullptr); } void TearDown() {} }; TEST_F(GraphDslTest, test_build_graph_from_optype_with_name) { - DEF_GRAPH(g1) { - CHAIN(NODE("data1", DATA)->NODE("add", ADD)); - }); + DEF_GRAPH(g1) { CHAIN(NODE("data1", DATA)->NODE("add", ADD)); }; auto geGraph = ToGeGraph(g1); auto computeGraph = ToComputeGraph(g1); @@ -57,9 +53,7 @@ TEST_F(GraphDslTest, test_build_graph_from_optype_with_name) { } TEST_F(GraphDslTest, test_build_graph_with_name) { - DEF_GRAPH(g1, "sample_graph") { - CHAIN(NODE("data1", DATA)->NODE("add", ADD)); - }); + DEF_GRAPH(g1, "sample_graph") { CHAIN(NODE("data1", DATA)->NODE("add", ADD)); }; auto geGraph = ToGeGraph(g1); @@ -72,7 +66,7 @@ TEST_F(GraphDslTest, test_build_from_from_op_desc_ptr) { auto data = std::make_shared("data1", DATA); auto add = std::make_shared("Add", ADD); CHAIN(NODE(data)->NODE(add)); - }); + }; auto geGraph = ToGeGraph(g1); @@ -84,7 +78,7 @@ TEST_F(GraphDslTest, test_build_from_op_desc_cfg) { auto datCfg = OP_CFG(DATA).InCnt(1).OutCnt(1); auto addCfg = OP_CFG(DATA).InCnt(1).OutCnt(1); CHAIN(NODE("data1", datCfg)->NODE("add", addCfg)); - }); + }; auto geGraph = ToGeGraph(g1); @@ -92,9 +86,7 @@ TEST_F(GraphDslTest, test_build_from_op_desc_cfg) { } TEST_F(GraphDslTest, test_build_from_op_desc_cfg_inline) { - DEF_GRAPH(g1) { - CHAIN(NODE("data1", OP_CFG(DATA).InCnt(1).OutCnt(1))->NODE("add", OP_CFG(ADD).InCnt(2).OutCnt(1))); - }); + DEF_GRAPH(g1) { CHAIN(NODE("data1", OP_CFG(DATA).InCnt(1).OutCnt(1))->NODE("add", OP_CFG(ADD).InCnt(2).OutCnt(1))); }; auto geGraph = ToGeGraph(g1); @@ -102,9 +94,7 @@ TEST_F(GraphDslTest, test_build_from_op_desc_cfg_inline) { } TEST_F(GraphDslTest, test_build_from_control_chain) { - DEF_GRAPH(g1) { - CTRL_CHAIN(NODE("data1", DATA)->NODE("add", ADD)); - }); + DEF_GRAPH(g1) { CTRL_CHAIN(NODE("data1", DATA)->NODE("add", ADD)); }; auto geGraph = ToGeGraph(g1); @@ -112,9 +102,7 @@ TEST_F(GraphDslTest, test_build_from_control_chain) { } TEST_F(GraphDslTest, test_build_from_data_chain) { - DEF_GRAPH(g1) { - DATA_CHAIN(NODE("data1", DATA)->NODE("add", ADD)); - }); + DEF_GRAPH(g1) { DATA_CHAIN(NODE("data1", DATA)->NODE("add", ADD)); }; auto geGraph = ToGeGraph(g1); @@ -125,7 +113,7 @@ TEST_F(GraphDslTest, test_build_from_data_chain_with_edge) { DEF_GRAPH(g1) { CTRL_CHAIN(NODE("data1", DATA)->NODE("add", ADD)); CHAIN(NODE("data1", DATA)->EDGE(2, 2)->NODE("add")); - }); + }; auto geGraph = ToGeGraph(g1); @@ -136,7 +124,7 @@ TEST_F(GraphDslTest, test_build_graph_reused_before_node) { DEF_GRAPH(g1) { CTRL_CHAIN(NODE("data1", DATA)->NODE("add", ADD)); CHAIN(NODE("data1")->EDGE(2, 2)->NODE("add")); - }); + }; auto geGraph = ToGeGraph(g1); @@ -147,7 +135,7 @@ TEST_F(GraphDslTest, test_build_graph_with_constant_folding) { DEF_GRAPH(g1) { CHAIN(NODE("data1", DATA)->NODE("add", ADD)); CHAIN(NODE("data2", DATA)->NODE("add")); - }); + }; auto geGraph = ToGeGraph(g1); @@ -168,7 +156,7 @@ TEST_F(GraphDslTest, test_build_complex_normal_graph_build_suggested) { ->NODE("Add4") ->NODE("Add5") ->NODE("net_output", NETOUTPUT)); - }); + }; auto geGraph = ToGeGraph(g1); @@ -187,7 +175,7 @@ TEST_F(GraphDslTest, test_build_complex_mult_normal_graph_build) { CHAIN(NODE("add2")->NODE("net_output")); CHAIN(NODE("add3")->NODE("net_output")); CTRL_CHAIN(NODE("add1")->NODE("add2")->NODE("add3")); - }); + }; auto geGraph = ToGeGraph(g1); @@ -198,17 +186,17 @@ TEST_F(GraphDslTest, test_build_graph_with_sub_graph) { DEF_GRAPH(sub_1) { CHAIN(NODE("data_i", DATA)->NODE("less", LESS)->NODE("netoutput", NETOUTPUT)); CHAIN(NODE("const_5", CONSTANTOP)->NODE("less")); - }); + }; DEF_GRAPH(sub_2) { CHAIN(NODE("data_a", DATA)->NODE("mul", MUL)->NODE("netoutput", NETOUTPUT)); CHAIN(NODE("const_2", CONSTANTOP)->NODE("mul")); - }); + }; DEF_GRAPH(g1) { CHAIN(NODE("data_a", DATA)->NODE("while", WHILE, sub_1, sub_2)->NODE("netoutput", NETOUTPUT)); CHAIN(NODE("data_i", DATA)->NODE("while")); - }); + }; sub_1.Layout(); sub_2.Layout(); diff --git a/tests/framework/ge_graph_dsl/tests/stub/optype_stub.cc b/tests/framework/ge_graph_dsl/tests/stub/optype_stub.cc index 071f8c36..533e8198 100644 --- a/tests/framework/ge_graph_dsl/tests/stub/optype_stub.cc +++ b/tests/framework/ge_graph_dsl/tests/stub/optype_stub.cc @@ -30,5 +30,11 @@ REGISTER_OPTYPE_DEFINE(MUL, "Mul"); REGISTER_OPTYPE_DEFINE(NETOUTPUT, "NetOutput"); REGISTER_OPTYPE_DEFINE(ADD, "Add"); REGISTER_OPTYPE_DEFINE(WHILE, "While"); +REGISTER_OPTYPE_DEFINE(ENTER, "Enter"); +REGISTER_OPTYPE_DEFINE(MERGE, "Merge"); +REGISTER_OPTYPE_DEFINE(LOOPCOND, "Loopcond"); +REGISTER_OPTYPE_DEFINE(SWITCH, "Switch"); +REGISTER_OPTYPE_DEFINE(EXIT, "Exit"); +REGISTER_OPTYPE_DEFINE(NEXTITERATION, "Nextiteration"); GE_NS_END diff --git a/tests/framework/utils/builder/tensor_builder_utils.h b/tests/framework/ge_graph_dsl/tests/test_main.cc similarity index 73% rename from tests/framework/utils/builder/tensor_builder_utils.h rename to tests/framework/ge_graph_dsl/tests/test_main.cc index 73656e4a..eb6112f2 100644 --- a/tests/framework/utils/builder/tensor_builder_utils.h +++ b/tests/framework/ge_graph_dsl/tests/test_main.cc @@ -1,22 +1,25 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef GRAPHENGINE_LLT_ST_TENSOR_BUILDER_UTILS_H -#define GRAPHENGINE_LLT_ST_TENSOR_BUILDER_UTILS_H - -class tensor_builder_utils {}; - -#endif // GRAPHENGINE_LLT_ST_TENSOR_BUILDER_UTILS_H +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "ge_graph_dsl/assert/check_utils.h" + +int main(int argc, char **argv) { + ::GE_NS::CheckUtils::init(); + testing::InitGoogleTest(&argc, argv); + int ret = RUN_ALL_TESTS(); + return ret; +} diff --git a/tests/framework/utils/builder/graph_builder_utils.cc b/tests/framework/utils/builder/graph_builder_utils.cc deleted file mode 100644 index c5555235..00000000 --- a/tests/framework/utils/builder/graph_builder_utils.cc +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "graph_builder_utils.h" -#include "inc/external/graph/operator.h" -#include "inc/external/graph/operator_factory.h" -#include "graph/utils/graph_utils.h" - -namespace ge { -namespace st { -NodePtr ComputeGraphBuilder::AddNode(const std::string &name, const std::string &type, int in_cnt, int out_cnt, - Format format, DataType data_type, std::vector shape) { - auto tensor_desc = std::make_shared(); - tensor_desc->SetShape(GeShape(std::move(shape))); - tensor_desc->SetFormat(format); - tensor_desc->SetDataType(data_type); - - auto op_desc = std::make_shared(name, type); - for (int i = 0; i < in_cnt; ++i) { - op_desc->AddInputDesc(tensor_desc->Clone()); - } - for (int i = 0; i < out_cnt; ++i) { - op_desc->AddOutputDesc(tensor_desc->Clone()); - } - - return graph_->AddNode(op_desc); -} -void ComputeGraphBuilder::AddDataEdge(NodePtr &src_node, int src_idx, NodePtr &dst_node, int dst_idx) { - GraphUtils::AddEdge(src_node->GetOutDataAnchor(src_idx), dst_node->GetInDataAnchor(dst_idx)); -} -void ComputeGraphBuilder::AddControlEdge(NodePtr &src_node, NodePtr &dst_node) { - GraphUtils::AddEdge(src_node->GetOutControlAnchor(), dst_node->GetInControlAnchor()); -} -} // namespace st -} // namespace ge diff --git a/tests/framework/utils/builder/graph_builder_utils.h b/tests/framework/utils/builder/graph_builder_utils.h deleted file mode 100644 index 4627f082..00000000 --- a/tests/framework/utils/builder/graph_builder_utils.h +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef GRAPHENGINE_LLT_ST_GRAPH_BUILDER_H -#define GRAPHENGINE_LLT_ST_GRAPH_BUILDER_H - -#include -#include - -#include "graph/compute_graph.h" -#include "graph/utils/graph_utils.h" -#include "graph/graph.h" -#include "graph/node.h" - -namespace ge { -namespace st { -class ComputeGraphBuilder { - public: - explicit ComputeGraphBuilder(const std::string &name) { - graph_ = std::make_shared(name); - } - NodePtr AddNode(const std::string &name, const std::string &type, int in_cnt, int out_cnt, - Format format = FORMAT_NCHW, DataType data_type = DT_FLOAT, - std::vector shape = {1, 1, 224, 224}); - void AddDataEdge(NodePtr &src_node, int src_idx, NodePtr &dst_node, int dst_idx); - void AddControlEdge(NodePtr &src_node, NodePtr &dst_node); - ComputeGraphPtr GetComputeGraph() { - graph_->TopologicalSorting(); - return graph_; - } - Graph GetGraph() { - graph_->TopologicalSorting(); - return GraphUtils::CreateGraphFromComputeGraph(graph_); - } - - private: - ComputeGraphPtr graph_; -}; -} // namespace st -} // namespace ge - -#endif // GRAPHENGINE_LLT_ST_GRAPH_BUILDER_H diff --git a/tests/st/testcase/CMakeLists.txt b/tests/st/testcase/CMakeLists.txt index b3663708..56b3f41b 100644 --- a/tests/st/testcase/CMakeLists.txt +++ b/tests/st/testcase/CMakeLists.txt @@ -8,7 +8,7 @@ target_include_directories(graph_engine_test set_target_properties(graph_engine_test PROPERTIES CXX_STANDARD 17) -target_link_libraries(graph_engine_test PRIVATE gtest framework) +target_link_libraries(graph_engine_test PRIVATE gtest ge_graph_dsl ge_with_env) include(CTest) enable_testing() diff --git a/tests/st/testcase/test_framework_dummy.cc b/tests/st/testcase/test_framework_dummy.cc index 0abdd18b..8f13bb78 100644 --- a/tests/st/testcase/test_framework_dummy.cc +++ b/tests/st/testcase/test_framework_dummy.cc @@ -15,23 +15,12 @@ */ #include -#include #include "external/ge/ge_api.h" -#include "ge_running_env/fake_engine.h" #include "graph/debug/ge_attr_define.h" #include "framework/common/types.h" - -#include "builder/graph_builder_utils.h" #include "ge_running_env/ge_running_env_faker.h" - -#include "graph/operator_reg.h" -#include "graph/operator.h" -#define protected public -#define private public -#include "graph/utils/op_desc_utils.h" #include "ge_graph_dsl/graph_dsl.h" -#undef protected -#undef private +#include "ge_graph_dsl/assert/graph_assert.h" using namespace std; using namespace ge; @@ -57,76 +46,58 @@ namespace { * **/ Graph BuildV1ControlFlowGraph() { - // build graph - st::ComputeGraphBuilder graphBuilder("g1"); - auto data_i = graphBuilder.AddNode("data_i", DATA, 1, 1); - auto enter_i = graphBuilder.AddNode("enter_i", ENTER, 1, 1); - ge::AttrUtils::SetStr(enter_i->GetOpDesc(), ENTER_ATTR_FRAME_NAME, "1"); - auto merge_i = graphBuilder.AddNode("merge_i", MERGE, 2, 1); - auto const_5 = graphBuilder.AddNode("const_5", CONSTANT, 0, 1); - auto less = graphBuilder.AddNode("less", LESS, 2, 1); - auto loopcond = graphBuilder.AddNode("loopcond", LOOPCOND, 1, 1, FORMAT_NCHW, DT_BOOL); - auto switch_i = graphBuilder.AddNode("switch_i", SWITCH, 2, 2); - auto exit_i = graphBuilder.AddNode("switch_i", EXIT, 1, 1); - auto const_1 = graphBuilder.AddNode("const_1", CONSTANT, 0, 1); - auto add = graphBuilder.AddNode("add", ADD, 2, 1); - auto next_iteration_i = graphBuilder.AddNode("next_iteration_i", NEXTITERATION, 1, 1); - - auto data_a = graphBuilder.AddNode("data_a", DATA, 1, 1); - auto enter_a = graphBuilder.AddNode("enter_a", ENTER, 1, 1); - ge::AttrUtils::SetStr(enter_a->GetOpDesc(), ENTER_ATTR_FRAME_NAME, "1"); - auto merge_a = graphBuilder.AddNode("merge_a", MERGE, 2, 1); - auto switch_a = graphBuilder.AddNode("switch_a", SWITCH, 2, 2); - auto exit_a = graphBuilder.AddNode("exit_a", EXIT, 1, 1); - auto mul = graphBuilder.AddNode("mul", MUL, 2, 1); - auto const_2 = graphBuilder.AddNode("const_2", CONSTANT, 0, 1); - auto next_iteration_a = graphBuilder.AddNode("next_iteration_a", NEXTITERATION, 1, 1); - auto netoutput = graphBuilder.AddNode("netoutput", NETOUTPUT, 2, 2); - // i = i+1 - graphBuilder.AddDataEdge(data_i, 0, enter_i, 0); - graphBuilder.AddDataEdge(enter_i, 0, merge_i, 0); - graphBuilder.AddDataEdge(next_iteration_i, 0, merge_i, 1); - graphBuilder.AddDataEdge(merge_i, 0, less, 0); - graphBuilder.AddDataEdge(const_5, 0, less, 1); - graphBuilder.AddDataEdge(less, 0, loopcond, 0); - graphBuilder.AddDataEdge(loopcond, 0, switch_i, 1); - graphBuilder.AddDataEdge(merge_i, 0, switch_i, 0); - graphBuilder.AddDataEdge(switch_i, 0, exit_i, 0); - graphBuilder.AddDataEdge(switch_i, 1, add, 0); - graphBuilder.AddDataEdge(const_1, 0, add, 1); - graphBuilder.AddDataEdge(add, 0, next_iteration_i, 0); - graphBuilder.AddDataEdge(exit_i, 0, netoutput, 1); - // a=a*2 - graphBuilder.AddDataEdge(data_a, 0, enter_a, 0); - graphBuilder.AddDataEdge(enter_a, 0, merge_a, 0); - graphBuilder.AddDataEdge(next_iteration_a, 0, merge_a, 1); - graphBuilder.AddDataEdge(loopcond, 0, switch_a, 1); - graphBuilder.AddDataEdge(merge_a, 0, switch_a, 0); - graphBuilder.AddDataEdge(switch_a, 0, exit_a, 0); - graphBuilder.AddDataEdge(switch_a, 1, mul, 0); - graphBuilder.AddDataEdge(const_2, 0, mul, 1); - graphBuilder.AddDataEdge(mul, 0, next_iteration_a, 0); - graphBuilder.AddDataEdge(exit_a, 0, netoutput, 0); - // set const weight int64_t dims_size = 1; vector data_vec = {5}; for_each(data_vec.begin(), data_vec.end(), [&](int64_t &data) { dims_size *= data; }); vector data_value_vec(dims_size, 1); GeTensorDesc data_tensor_desc(GeShape(data_vec), FORMAT_NCHW, DT_INT32); - GeTensorPtr data_tensor = - make_shared(data_tensor_desc, (uint8_t *)data_value_vec.data(), data_value_vec.size() * sizeof(int32_t)); - OpDescUtils::SetWeights(const_5->GetOpDesc(), data_tensor); - OpDescUtils::SetWeights(const_2->GetOpDesc(), data_tensor); - OpDescUtils::SetWeights(const_1->GetOpDesc(), data_tensor); + GeTensorPtr data_tensor = make_shared(data_tensor_desc, (uint8_t *)data_value_vec.data(), + data_value_vec.size() * sizeof(int32_t)); - return graphBuilder.GetGraph(); + auto enter = OP_CFG(ENTER).Attr(ENTER_ATTR_FRAME_NAME, "1"); + auto const_op = OP_CFG(CONSTANT).Weight(data_tensor); + + DEF_GRAPH(g1) { + CHAIN(NODE("data_i", DATA) + ->NODE("enter_i", enter) + ->EDGE(0, 0) + ->NODE("merge_i", MERGE) + ->NODE("less", LESS) + ->NODE("loopcond", LOOPCOND)); + CHAIN(NODE("const_1", const_op) + ->EDGE(0, 1) + ->NODE("add", ADD) + ->NODE("iteration_i", NEXTITERATION) + ->EDGE(0, 1) + ->NODE("merge_i")); + CHAIN(NODE("const_5", const_op)->EDGE(0, 1)->NODE("less")); + CHAIN(NODE("loopcond") + ->EDGE(0, 1) + ->NODE("switch_i", SWITCH) + ->EDGE(0, 0) + ->NODE("exit_i", EXIT) + ->EDGE(0, 1) + ->NODE("netoutput", NETOUTPUT)); + CHAIN(NODE("merge_i")->EDGE(0, 0)->NODE("switch_i")->EDGE(1, 0)->NODE("add")); + CHAIN(NODE("data_a", DATA) + ->NODE("enter_a", enter) + ->NODE("merge_a", MERGE) + ->NODE("switch_a", SWITCH) + ->NODE("exit_a", EXIT) + ->EDGE(0, 0) + ->NODE("netoutput")); + CHAIN(NODE("iteration_a", NEXTITERATION)->EDGE(0, 1)->NODE("merge_a")); + CHAIN(NODE("loopcond")->EDGE(0, 1)->NODE("switch_a")->EDGE(1, 0)->NODE("mul", MUL)); + CHAIN(NODE("const_2", const_op)->EDGE(0, 1)->NODE("mul")->EDGE(0, 0)->NODE("iteration_a")); + }; + return ToGeGraph(g1); } } // namespace class FrameworkTest : public testing::Test { protected: + GeRunningEnvFaker ge_env; void SetUp() { ge_env.InstallDefault(); } void TearDown() {} - GeRunningEnvFaker ge_env; }; /// data data @@ -136,19 +107,19 @@ TEST_F(FrameworkTest, test_framework_add) { DEF_GRAPH(g1) { CHAIN(NODE("data1", DATA)->NODE("add", ADD)); CHAIN(NODE("data2", DATA)->NODE("add")); - }); + }; - auto graph = ToGeGraph(g1); - // new session & add graph map options; Session session(options); - auto ret = session.AddGraph(1, graph, options); - EXPECT_EQ(ret, SUCCESS); - // build input tensor + session.AddGraph(1, ToGeGraph(g1), options); std::vector inputs; - // build_graph through session - ret = session.BuildGraph(1, inputs); + auto ret = session.BuildGraph(1, inputs); + EXPECT_EQ(ret, SUCCESS); + CHECK_GRAPH(PreRunAfterBuild) { + ASSERT_EQ(graph->GetName(), "g1_1"); + ASSERT_EQ(graph->GetAllNodesSize(), 4); + }; } /** data a = 2; diff --git a/tests/st/testcase/test_ge_opt_info.cc b/tests/st/testcase/test_ge_opt_info.cc index 457473b1..2e8e5382 100644 --- a/tests/st/testcase/test_ge_opt_info.cc +++ b/tests/st/testcase/test_ge_opt_info.cc @@ -15,24 +15,12 @@ */ #include -#include "easy_graph/graph/box.h" -#include "easy_graph/graph/node.h" +#include "external/ge/ge_api.h" #include "easy_graph/builder/graph_dsl.h" -#include "easy_graph/builder/box_builder.h" -#include "easy_graph/layout/graph_layout.h" -#include "easy_graph/layout/engines/graph_easy/graph_easy_option.h" -#include "easy_graph/layout/engines/graph_easy/graph_easy_executor.h" -#include "graph/graph.h" #include "graph/compute_graph.h" #include "framework/common/types.h" -#include "graph/debug/ge_attr_define.h" +#include "graph/ge_local_context.h" #include "ge_graph_dsl/graph_dsl.h" -#include "ge_graph_dsl/op_desc/op_desc_cfg_box.h" -#define protected public -#define private public -#include "ge_opt_info/ge_opt_info.h" -#undef private -#undef protected namespace ge { class STEST_opt_info : public testing::Test { @@ -52,7 +40,7 @@ TEST_F(STEST_opt_info, get_opt_info_all) { DEF_GRAPH(g1) { CHAIN(NODE("data1", DATA)->NODE("add", ADD)); CHAIN(NODE("data2", DATA)->NODE("add")); - }); + }; auto graph = ToGeGraph(g1); @@ -95,7 +83,7 @@ TEST_F(STEST_opt_info, get_opt_info_success) { DEF_GRAPH(g1) { CHAIN(NODE("data1", DATA)->NODE("add", ADD)); CHAIN(NODE("data2", DATA)->NODE("add")); - }); + }; auto graph = ToGeGraph(g1); diff --git a/tests/st/testcase/test_main.cc b/tests/st/testcase/test_main.cc index a39c68aa..a7a71954 100644 --- a/tests/st/testcase/test_main.cc +++ b/tests/st/testcase/test_main.cc @@ -15,9 +15,8 @@ */ #include - -#include "common/debug/log.h" #include "external/ge/ge_api.h" +#include "ge_graph_dsl/assert/check_utils.h" #include "ge_running_env/include/ge_running_env/ge_running_env_faker.h" using namespace std; @@ -31,6 +30,7 @@ int main(int argc, char **argv) { std::cout << "ge init failed , ret code:" << init_status << endl; } GeRunningEnvFaker::BackupEnv(); + CheckUtils::init(); testing::InitGoogleTest(&argc, argv); int ret = RUN_ALL_TESTS(); return ret; diff --git a/tests/ut/common/graph/CMakeLists.txt b/tests/ut/common/graph/CMakeLists.txt index 73780967..ccf9ce5e 100644 --- a/tests/ut/common/graph/CMakeLists.txt +++ b/tests/ut/common/graph/CMakeLists.txt @@ -90,6 +90,7 @@ set(SRC_FILES "${GE_CODE_DIR}/metadef/graph/detail/attributes_holder.cc" "${GE_CODE_DIR}/metadef/graph/utils/anchor_utils.cc" "${GE_CODE_DIR}/metadef/graph/utils/graph_utils.cc" + "${GE_CODE_DIR}/metadef/graph/utils/dumper/ge_graph_dumper.cc" "${GE_CODE_DIR}/metadef/graph/utils/node_utils.cc" "${GE_CODE_DIR}/metadef/graph/utils/op_desc_utils.cc" "${GE_CODE_DIR}/metadef/graph/utils/type_utils.cc" diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index 5b8958b4..d8fcd6c3 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -102,6 +102,7 @@ set(GRAPH_SRC_FILES "${GE_CODE_DIR}/metadef/graph/detail/attributes_holder.cc" "${GE_CODE_DIR}/metadef/graph/utils/anchor_utils.cc" "${GE_CODE_DIR}/metadef/graph/utils/graph_utils.cc" + "${GE_CODE_DIR}/metadef/graph/utils/dumper/ge_graph_dumper.cc" "${GE_CODE_DIR}/metadef/graph/utils/ge_ir_utils.cc" "${GE_CODE_DIR}/metadef/graph/utils/node_utils.cc" "${GE_CODE_DIR}/metadef/graph/utils/op_desc_utils.cc" From 08bedf29f6490261c1e99140250610c94f30c411 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Thu, 1 Jul 2021 19:04:59 +0800 Subject: [PATCH 136/226] Update submodule. --- metadef | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metadef b/metadef index 9c9907b7..f3f137de 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit 9c9907b76a457f456072af96b8cbcfb7943beccc +Subproject commit f3f137de034885f0c7394d7f04b41b08d450d2d2 From 47b3762f6e24f1bf4eccfde786c55d27a5538943 Mon Sep 17 00:00:00 2001 From: lichun Date: Thu, 1 Jul 2021 21:03:31 +0800 Subject: [PATCH 137/226] add global step info for known subgraph in unknown model and generate om for remained cases when some single op cases run atc failed --- ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc | 2 +- tests/ut/ge/hybrid/known_node_executor_unittest.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc index ea6e2965..b903f6af 100755 --- a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc +++ b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc @@ -190,7 +190,7 @@ void KnownNodeExecutor::SettingDaviciModel(const HybridModel &model, const NodeP davinci_model->SetDumpModelName(model.GetModelName()); davinci_model->SetOmName(model.GetOmName()); TensorValue *global_step_var = model.GetVariable(NODE_NAME_GLOBAL_STEP); - davinci_model->SetKnownShapeGlobalStep(global_step_var->MutableData()); + davinci_model->SetGlobalStep(global_step_var->MutableData()); // set model id as root node's node id davinci_model->SetSubModelId(node->GetOpDesc()->GetId()); } diff --git a/tests/ut/ge/hybrid/known_node_executor_unittest.cc b/tests/ut/ge/hybrid/known_node_executor_unittest.cc index 435928ee..dd2557d1 100644 --- a/tests/ut/ge/hybrid/known_node_executor_unittest.cc +++ b/tests/ut/ge/hybrid/known_node_executor_unittest.cc @@ -142,5 +142,5 @@ TEST_F(UnknownNodeExecutorTest, TestSetGlobalStep) { KnownNodeExecutor known_node_executor; std::shared_ptr davinci_model = MakeShared(0, nullptr); known_node_executor.SettingDaviciModel(hybrid_model, node, davinci_model); - EXPECT_EQ(davinci_model->global_step_addr_, 520); + EXPECT_EQ(*(static_cast(davinci_model->global_step_addr_)), 520); } From ebf39e513d1095dfad509821e926bf2cdd72e79f Mon Sep 17 00:00:00 2001 From: y00500818 Date: Thu, 1 Jul 2021 21:28:57 +0800 Subject: [PATCH 138/226] bugfix for InferFormatForSingleOp --- ge/generator/ge_generator.cc | 10 +++++++--- inc/framework/generator/ge_generator.h | 2 +- tests/ut/ge/generator/ge_generator_unittest.cc | 11 ++++++++--- 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/ge/generator/ge_generator.cc b/ge/generator/ge_generator.cc index 505b1908..07355ab5 100644 --- a/ge/generator/ge_generator.cc +++ b/ge/generator/ge_generator.cc @@ -808,7 +808,7 @@ Status GeGenerator::CheckForSingleOp(OpDescPtr &op_desc, const vector return SUCCESS; } -Status GeGenerator::InferFormatForSingleOp(OpDescPtr &op_desc) { +Status GeGenerator::InferFormatForSingleOp(OpDescPtr &op_desc, Graph &graph) { GE_CHECK_NOTNULL(op_desc); if (OperatorFactoryImpl::GetInferFormatFunc(op_desc->GetType()) != nullptr) { auto node_op = ge::OperatorFactoryImpl::CreateOperator("node_op", op_desc->GetType()); @@ -832,7 +832,11 @@ Status GeGenerator::InferFormatForSingleOp(OpDescPtr &op_desc) { } node_op.BreakConnect(); } - auto op = OpDescUtils::CreateOperatorFromOpDesc(op_desc); + auto comp_graph = GraphUtils::GetComputeGraph(graph); + GE_CHECK_NOTNULL(comp_graph); + auto node = comp_graph->FindNode(op_desc->GetName()); + GE_CHECK_NOTNULL(node); + auto op = OpDescUtils::CreateOperatorFromNode(node); auto ret = op_desc->CallInferFormatFunc(op); if (ret != GRAPH_SUCCESS) { REPORT_INNER_ERROR("E19999", "call InferFormatFunc for single op:%s fail", @@ -879,7 +883,7 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector &in Graph graph; GE_CHK_STATUS(BuildSingleOpGraph(op_desc, inputs, outputs, name, graph), "[Build][Graph] for single op:%s fail.", op_desc->GetName().c_str()); - GE_CHK_STATUS_RET_NOLOG(InferFormatForSingleOp(op_desc)); + GE_CHK_STATUS_RET_NOLOG(InferFormatForSingleOp(op_desc, graph)); // 2. check engine type when compile online if (model_file_name == kFileNameSuffix) { diff --git a/inc/framework/generator/ge_generator.h b/inc/framework/generator/ge_generator.h index ee51d29d..5da5a593 100644 --- a/inc/framework/generator/ge_generator.h +++ b/inc/framework/generator/ge_generator.h @@ -106,7 +106,7 @@ class GE_FUNC_VISIBILITY GeGenerator { bool CheckNoAicore(const ComputeGraphPtr &graph); void RemoveConst(const vector &inputs, vector &outputs); Status CheckForSingleOp(OpDescPtr &op_desc, const vector &inputs, const vector &outputs); - Status InferFormatForSingleOp(OpDescPtr &op_desc); + Status InferFormatForSingleOp(OpDescPtr &op_desc, Graph &graph); using GeRootModelPtr = std::shared_ptr; Status SetModelNameForDump(const GeRootModelPtr &ge_root_model); diff --git a/tests/ut/ge/generator/ge_generator_unittest.cc b/tests/ut/ge/generator/ge_generator_unittest.cc index 1bb4430f..b3abb2f9 100644 --- a/tests/ut/ge/generator/ge_generator_unittest.cc +++ b/tests/ut/ge/generator/ge_generator_unittest.cc @@ -83,12 +83,16 @@ TEST_F(UtestGeGenerator, test_build_single_op_offline) { graphStatus TestFunc(Operator &op) { return 0; } graphStatus TestFunc1(Operator &op) { return 1; } TEST_F(UtestGeGenerator, test_infer_format_for_single_op) { + ComputeGraphPtr compute_graph = MakeShared("graph_name"); + auto graph = GraphUtils::CreateGraphFromComputeGraph(compute_graph); OperatorFactoryImpl::RegisterInferFormatFunc("Add", TestFunc); shared_ptr op_desc = make_shared("add", "add"); + compute_graph->AddNode(op_desc); GeGenerator generator; - EXPECT_EQ(generator.InferFormatForSingleOp(op_desc), SUCCESS); + EXPECT_EQ(generator.InferFormatForSingleOp(op_desc, graph), SUCCESS); shared_ptr op_desc1 = make_shared("Add", "Add"); - EXPECT_EQ(generator.InferFormatForSingleOp(op_desc1), SUCCESS); + compute_graph->AddNode(op_desc1); + EXPECT_EQ(generator.InferFormatForSingleOp(op_desc1, graph), SUCCESS); OperatorFactoryImpl::RegisterInferFormatFunc("MatMulV2", TestFunc1); shared_ptr op_desc2 = make_shared("MatMulV2", "MatMulV2"); GeTensorDesc tensor_desc; @@ -99,7 +103,8 @@ TEST_F(UtestGeGenerator, test_infer_format_for_single_op) { EXPECT_EQ(op_desc2->AddInputDesc(tensor_desc), GRAPH_SUCCESS); EXPECT_EQ(op_desc2->AddOutputDesc(tensor_desc), GRAPH_SUCCESS); EXPECT_EQ(op_desc2->AddOutputDesc(tensor_desc), GRAPH_SUCCESS); - EXPECT_EQ(generator.InferFormatForSingleOp(op_desc2), FAILED); + compute_graph->AddNode(op_desc2); + EXPECT_EQ(generator.InferFormatForSingleOp(op_desc2, graph), FAILED); } TEST_F(UtestGeGenerator, test_build_single_op_online) { From 2874ec935f700ce196c28a5b70c7d8070f7c9ff0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=99=88=E5=8D=8E?= Date: Fri, 25 Jun 2021 14:08:54 +0800 Subject: [PATCH 139/226] fix parallel group pass --- ge/graph/passes/parallel_group_pass.cc | 58 +++++++++----- ge/graph/passes/parallel_group_pass.h | 1 + .../passes/parallel_group_pass_unittest.cc | 80 ++++++++++++++++++- 3 files changed, 119 insertions(+), 20 deletions(-) diff --git a/ge/graph/passes/parallel_group_pass.cc b/ge/graph/passes/parallel_group_pass.cc index 9c93f6cf..795002f1 100644 --- a/ge/graph/passes/parallel_group_pass.cc +++ b/ge/graph/passes/parallel_group_pass.cc @@ -15,7 +15,7 @@ */ #include "graph/passes/parallel_group_pass.h" - +#include #include "framework/common/debug/ge_log.h" #include "common/ge/ge_util.h" #include "framework/common/ge_inner_error_codes.h" @@ -299,24 +299,19 @@ Status ParallelGroupPass::ReplaceWithSwitchAndMerge(NodePtr pre_node, NodePtr cu for (const auto &switch_node : cur_itr->second.first) { int64_t pre_id = pre_node->GetOpDesc()->GetId(); int64_t switch_id = switch_node->GetOpDesc()->GetId(); - // avoid ring - if (pre_id > switch_id) { - auto merge_node = cur_itr->second.second; - if (AddCtrlEdge(merge_node, pre_node) != SUCCESS) { - GELOGE(FAILED, "[AddEdge][Node]Add edge for nodes: %s->%s failed.", - pre_node->GetName().c_str(), switch_node->GetName().c_str()); - REPORT_CALL_ERROR("E19999", "[AddEdge][Node]Add edge for nodes: %s->%s failed.", - pre_node->GetName().c_str(), switch_node->GetName().c_str()); - return FAILED; - } - } else { - if (AddCtrlEdge(pre_node, switch_node) != SUCCESS) { - GELOGE(FAILED, "[AddEdge][Node]Add edge for nodes: %s->%s failed.", - pre_node->GetName().c_str(), switch_node->GetName().c_str()); - REPORT_CALL_ERROR("E19999", "[AddEdge][Node]Add edge for nodes: %s->%s failed.", - pre_node->GetName().c_str(), switch_node->GetName().c_str()); - return FAILED; - } + NodePtr first_node = pre_node; + NodePtr second_node = switch_node; + if (pre_id > switch_id && IsIndirectConnect(switch_node, pre_node)) { + // avoid ring, merge->pre_node + first_node = cur_itr->second.second; + second_node = pre_node; + } + if (AddCtrlEdge(first_node, second_node) != SUCCESS) { + GELOGE(FAILED, "[AddEdge][Node]Add edge for nodes: %s->%s failed.", + first_node->GetName().c_str(), second_node->GetName().c_str()); + REPORT_CALL_ERROR("E19999", "[AddEdge][Node]Add edge for nodes: %s->%s failed.", + first_node->GetName().c_str(), second_node->GetName().c_str()); + return FAILED; } } } else { @@ -345,4 +340,29 @@ bool ParallelGroupPass::IsWhileStreamSwitch(OpDescPtr switch_op_desc) { return (AttrUtils::GetInt(switch_op_desc, ATTR_NAME_STREAM_SWITCH_TYPE, stream_switch_type) && stream_switch_type == kLoopType); } + +bool ParallelGroupPass::IsIndirectConnect(const NodePtr &node_a, const NodePtr &node_b) { + if (node_a == nullptr || node_b == nullptr) { + GELOGW("node_a or node_b is nullptr."); + return false; + } + int64_t end_id = node_b->GetOpDesc()->GetId(); + std::queue nodes; + nodes.push(node_a); + while (!nodes.empty()) { + NodePtr tmp_node = nodes.front(); + nodes.pop(); + if (tmp_node == nullptr || tmp_node->GetOpDesc() == nullptr || + tmp_node->GetOpDesc()->GetId() > end_id) { + continue; + } + if (tmp_node == node_b) { + return true; + } + for (const auto &out_node : tmp_node->GetOutAllNodes()) { + nodes.push(out_node); + } + } + return false; +} } // namespace ge diff --git a/ge/graph/passes/parallel_group_pass.h b/ge/graph/passes/parallel_group_pass.h index cdcdabab..93b0b158 100644 --- a/ge/graph/passes/parallel_group_pass.h +++ b/ge/graph/passes/parallel_group_pass.h @@ -48,6 +48,7 @@ class ParallelGroupPass : public GraphPass { bool IsBigSmallLoopStreamSwitch(OpDescPtr switch_op_desc); bool IsWhileStreamSwitch(OpDescPtr switch_op_desc); + bool IsIndirectConnect(const NodePtr &node_a, const NodePtr &node_b); }; } // namespace ge #endif // GE_GRAPH_PASSES_PARALLEL_GROUP_PASS_H diff --git a/tests/ut/ge/graph/passes/parallel_group_pass_unittest.cc b/tests/ut/ge/graph/passes/parallel_group_pass_unittest.cc index 374fe837..a6c3ff6a 100644 --- a/tests/ut/ge/graph/passes/parallel_group_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/parallel_group_pass_unittest.cc @@ -19,7 +19,8 @@ #include #define private public - +#include "inc/graph/ge_local_context.h" +#include "inc/external/ge/ge_api_types.h" #include "common/ge_inner_error_codes.h" #include "inc/pass_manager.h" #include "utils/graph_utils.h" @@ -225,6 +226,70 @@ class UtestGraphPassesParallelGgroupPass : public testing::Test { output_true_node_->GetOpDesc()->SetIsInputConst({false}); } + void BuildDefaultGraph3() { + /// input + /// \ + /// sqrt pred + /// \ / + /// Switch + /// | | + /// F T ------ + /// / \_/_ \ + /// / / \ \ + /// Merge sqrt2 sqrt3 + /// / \ \ + /// sqrt1 \ relu + /// \ \ + /// \ sqrt4 + /// \ / + /// Merge1 + input_node_ = NewNode("input", RELU, 0, 1); + AttrUtils::SetStr(input_node_->GetOpDesc(), ATTR_NAME_PARALLEL_GROUP, "1"); + pred_node_ = NewNode("pred", GREATER, 2, 1); + sqrt_node_ = NewNode("sqrt", SQRT, 1, 1); + cast_node_ = NewNode("cast", CAST, 2, 2); + + switch_node_t = NewNode("switch_t", STREAMSWITCH, 1, 1); + AttrUtils::SetBool(switch_node_t->GetOpDesc(), ATTR_NAME_SWITCH_TRUE_BRANCH_FLAG, true); + switch_node_f = NewNode("switch_f", STREAMSWITCH, 1, 1); + AttrUtils::SetBool(switch_node_f->GetOpDesc(), ATTR_NAME_SWITCH_TRUE_BRANCH_FLAG, false); + output_false_node_ = NewNode("false_output", RELU, 1, 2); + AttrUtils::SetStr(output_false_node_->GetOpDesc(), ATTR_NAME_PARALLEL_GROUP, "1"); + output_true_node_ = NewNode("true_output", RELU, 1, 2); + AttrUtils::SetStr(output_true_node_->GetOpDesc(), ATTR_NAME_PARALLEL_GROUP, "1"); + merge_node_ = NewNode("merge", STREAMMERGE, 2, 1); + sqrt_node1_ = NewNode("sqrt1", SQRT, 1, 1); + AttrUtils::SetStr(sqrt_node1_->GetOpDesc(), ATTR_NAME_PARALLEL_GROUP, "1"); + sqrt_node2_ = NewNode("sqrt2", SQRT, 1, 1); + AttrUtils::SetStr(sqrt_node2_->GetOpDesc(), ATTR_NAME_PARALLEL_GROUP, "1"); + sqrt_node3_ = NewNode("sqrt3", SQRT, 1, 1); + relu_node_ = NewNode("relu", RELU, 1, 1); + sqrt_node4_ = NewNode("sqrt4", SQRT, 1, 1); + AttrUtils::SetStr(sqrt_node4_->GetOpDesc(), ATTR_NAME_PARALLEL_GROUP, "1"); + merge_node1_ = NewNode("merge1", STREAMMERGE, 2, 1); + + GraphUtils::AddEdge(input_node_->GetOutDataAnchor(0), sqrt_node_->GetInDataAnchor(0)); + GraphUtils::AddEdge(pred_node_->GetOutDataAnchor(0), cast_node_->GetInDataAnchor(0)); + GraphUtils::AddEdge(sqrt_node_->GetOutDataAnchor(0), cast_node_->GetInDataAnchor(1)); + GraphUtils::AddEdge(cast_node_->GetOutDataAnchor(0), switch_node_t->GetInDataAnchor(0)); + GraphUtils::AddEdge(cast_node_->GetOutDataAnchor(1), switch_node_f->GetInDataAnchor(0)); + GraphUtils::AddEdge(switch_node_f->GetOutDataAnchor(0), output_false_node_->GetInDataAnchor(0)); + GraphUtils::AddEdge(switch_node_t->GetOutDataAnchor(0), output_true_node_->GetInDataAnchor(0)); + + GraphUtils::AddEdge(output_false_node_->GetOutDataAnchor(0), merge_node_->GetInDataAnchor(0)); + GraphUtils::AddEdge(output_true_node_->GetOutDataAnchor(0), merge_node_->GetInDataAnchor(1)); + GraphUtils::AddEdge(output_false_node_->GetOutDataAnchor(1), sqrt_node2_->GetInDataAnchor(0)); + GraphUtils::AddEdge(output_true_node_->GetOutDataAnchor(1), sqrt_node3_->GetInDataAnchor(0)); + + GraphUtils::AddEdge(merge_node_->GetOutDataAnchor(0), sqrt_node1_->GetInDataAnchor(0)); + GraphUtils::AddEdge(sqrt_node3_->GetOutDataAnchor(0), relu_node_->GetInDataAnchor(0)); + GraphUtils::AddEdge(relu_node_->GetOutDataAnchor(0), sqrt_node4_->GetInDataAnchor(0)); + GraphUtils::AddEdge(sqrt_node2_->GetOutDataAnchor(0), merge_node1_->GetInDataAnchor(0)); + GraphUtils::AddEdge(sqrt_node4_->GetOutDataAnchor(0), merge_node1_->GetInDataAnchor(1)); + output_false_node_->GetOpDesc()->SetIsInputConst({false}); + output_true_node_->GetOpDesc()->SetIsInputConst({false}); + } + ComputeGraphPtr graph_; ComputeGraphPtr sub_graph_; GeTensorDescPtr default_tensor_desc_; @@ -235,6 +300,9 @@ class UtestGraphPassesParallelGgroupPass : public testing::Test { NodePtr cast_node1_; NodePtr sqrt_node_; NodePtr sqrt_node1_; + NodePtr sqrt_node2_; + NodePtr sqrt_node3_; + NodePtr sqrt_node4_; NodePtr input_node_; NodePtr input_node1_; NodePtr switch_node_t; @@ -278,6 +346,16 @@ TEST_F(UtestGraphPassesParallelGgroupPass, normal_graph2) { EXPECT_EQ(true, input_node1_->GetOutControlAnchor()->IsLinkedWith(cast_node1_->GetInControlAnchor())); } +TEST_F(UtestGraphPassesParallelGgroupPass, normal_graph3) { + std::map options; + options.emplace(OPTION_GRAPH_RUN_MODE, "1"); + GetThreadLocalContext().SetGraphOption(options); + BuildDefaultGraph3(); + auto ret = pass_.Run(graph_); + EXPECT_EQ(ret, GRAPH_SUCCESS); + EXPECT_EQ(true, merge_node1_->GetOutControlAnchor()->IsLinkedWith(sqrt_node1_->GetInControlAnchor())); +} + TEST_F(UtestGraphPassesParallelGgroupPass, normal_subgraph) { BuildDefaultGraph1(); NodePtr input_node1 = NewNode("input1", RELU, 0, 1, true); From 3de58133d1d3e88d912dcc32fa07ac137bbc9a98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=9B=9B=E6=A5=A0?= Date: Fri, 2 Jul 2021 10:22:54 +0800 Subject: [PATCH 140/226] fix the chmod error when unpack .run files --- scripts/update/ge_update.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/update/ge_update.sh b/scripts/update/ge_update.sh index d6bcd043..57266d06 100755 --- a/scripts/update/ge_update.sh +++ b/scripts/update/ge_update.sh @@ -38,7 +38,7 @@ function extract_deps_so_community() { echo "begin to extract .run file ........." chmod +x ./${DRIVER_RUN_NAME_C} - chmod +X ./${PACKAGE_NAME_C} + chmod +x ./${PACKAGE_NAME_C} [ -n "${DEP_TMP_DIR}" ] && rm -rf "${DEP_TMP_DIR}" ./${DRIVER_RUN_NAME_C} --noexec --extract=${DEP_TMP_DIR}/driver ./${PACKAGE_NAME_C} --noexec --extract=${DEP_TMP_DIR}/Packages_tmp From 9287ca4c4ca7285882d3a290d5526f1fb0310057 Mon Sep 17 00:00:00 2001 From: lichun Date: Fri, 2 Jul 2021 13:05:17 +0800 Subject: [PATCH 141/226] add global step info for known subgraph in unknown model and generate om for remained cases when some single op cases run atc failed --- ge/graph/load/model_manager/davinci_model.cc | 2 +- ge/graph/load/model_manager/davinci_model.h | 7 ------- .../node_executor/compiledsubgraph/known_node_executor.cc | 7 +++---- .../node_executor/compiledsubgraph/known_node_executor.h | 2 +- 4 files changed, 5 insertions(+), 13 deletions(-) diff --git a/ge/graph/load/model_manager/davinci_model.cc b/ge/graph/load/model_manager/davinci_model.cc index 7d82879f..87e0c6f2 100755 --- a/ge/graph/load/model_manager/davinci_model.cc +++ b/ge/graph/load/model_manager/davinci_model.cc @@ -4365,7 +4365,7 @@ void DavinciModel::SetDataDumperArgs(const ComputeGraphPtr &graph, const map void *{ diff --git a/ge/graph/load/model_manager/davinci_model.h b/ge/graph/load/model_manager/davinci_model.h index daf0c7e6..6cb79804 100755 --- a/ge/graph/load/model_manager/davinci_model.h +++ b/ge/graph/load/model_manager/davinci_model.h @@ -499,10 +499,6 @@ class DavinciModel { return exception_dumper_.DumpExceptionInfo(exception_infos); } - void SetKnownShapeGlobalStep(void *global_step) { - known_shape_global_step_ = global_step; - } - void DumperShrink() { data_dumper_.DumpShrink(); } @@ -1109,9 +1105,6 @@ class DavinciModel { vector output_descs_; vector output_formats_; - // known shape node for dump - void *known_shape_global_step_; - // op name to attrs mapping std::map>> op_name_to_attrs_; }; diff --git a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc index b903f6af..c8ebd160 100755 --- a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc +++ b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc @@ -145,8 +145,6 @@ Status KnownNodeTask::InitDavinciModel(const HybridModel &model, TensorBuffer *w auto dump_properties = DumpManager::GetInstance().GetDumpProperties(model.GetSessionId()); if (dump_properties.IsDumpOpen() || dump_properties.IsOpDebugOpen()) { davinci_model_->SetDumpProperties(dump_properties); - void *global_step = model.GetGlobalStep(); - davinci_model_->SetKnownShapeGlobalStep(global_step); } void *weight = nullptr; @@ -182,7 +180,7 @@ Status KnownNodeExecutor::PrepareTask(NodeTask &task, TaskContext &context) cons return SUCCESS; } -void KnownNodeExecutor::SettingDaviciModel(const HybridModel &model, const NodePtr &node, +void KnownNodeExecutor::SetDaviciModel(const HybridModel &model, const NodePtr &node, std::shared_ptr &davinci_model) const { // set known node flag as true davinci_model->SetKnownNode(true); @@ -190,6 +188,7 @@ void KnownNodeExecutor::SettingDaviciModel(const HybridModel &model, const NodeP davinci_model->SetDumpModelName(model.GetModelName()); davinci_model->SetOmName(model.GetOmName()); TensorValue *global_step_var = model.GetVariable(NODE_NAME_GLOBAL_STEP); + GE_CHECK_NOTNULL(global_step_var); davinci_model->SetGlobalStep(global_step_var->MutableData()); // set model id as root node's node id davinci_model->SetSubModelId(node->GetOpDesc()->GetId()); @@ -212,7 +211,7 @@ Status KnownNodeExecutor::LoadTask(const HybridModel &model, const NodePtr &node std::shared_ptr davinci_model = MakeShared(0, nullptr); GE_CHECK_NOTNULL(davinci_model); - SettingDaviciModel(model, node, davinci_model); + SetDaviciModel(model, node, davinci_model); GELOGD("KnownNodeExecutor::LoadTask node id %ld.", node->GetOpDesc()->GetId()); GE_CHK_STATUS_RET(davinci_model->Assign(ge_model), diff --git a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.h b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.h index 475feeb1..2d51db58 100644 --- a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.h +++ b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.h @@ -59,7 +59,7 @@ class KnownNodeExecutor : public NodeExecutor { const NodePtr &node, GeModelPtr &ge_model, ComputeGraphPtr &graph); - void SettingDaviciModel(const HybridModel &model, const NodePtr &node, + void SetDaviciModel(const HybridModel &model, const NodePtr &node, std::shared_ptr &davinci_model) const; }; } // namespace hybrid From 9530a1631f5a2cfb274dac3eeef9cb9770e00222 Mon Sep 17 00:00:00 2001 From: lichun Date: Fri, 2 Jul 2021 15:34:00 +0800 Subject: [PATCH 142/226] add global step info for known subgraph in unknown model and generate om for remained cases when some single op cases run atc failed --- ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc index c8ebd160..29c829be 100755 --- a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc +++ b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc @@ -188,7 +188,7 @@ void KnownNodeExecutor::SetDaviciModel(const HybridModel &model, const NodePtr & davinci_model->SetDumpModelName(model.GetModelName()); davinci_model->SetOmName(model.GetOmName()); TensorValue *global_step_var = model.GetVariable(NODE_NAME_GLOBAL_STEP); - GE_CHECK_NOTNULL(global_step_var); + GE_CHK_BOOL_EXEC(global_step_var != nullptr, return); davinci_model->SetGlobalStep(global_step_var->MutableData()); // set model id as root node's node id davinci_model->SetSubModelId(node->GetOpDesc()->GetId()); From d74118080c8989eda1bbf9adfed4969dae6db22d Mon Sep 17 00:00:00 2001 From: lichun Date: Fri, 2 Jul 2021 15:55:23 +0800 Subject: [PATCH 143/226] add global step info for known subgraph in unknown model and generate om for remained cases when some single op cases run atc failed --- tests/ut/ge/hybrid/known_node_executor_unittest.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ut/ge/hybrid/known_node_executor_unittest.cc b/tests/ut/ge/hybrid/known_node_executor_unittest.cc index dd2557d1..e6a6119e 100644 --- a/tests/ut/ge/hybrid/known_node_executor_unittest.cc +++ b/tests/ut/ge/hybrid/known_node_executor_unittest.cc @@ -141,6 +141,6 @@ TEST_F(UnknownNodeExecutorTest, TestSetGlobalStep) { KnownNodeExecutor known_node_executor; std::shared_ptr davinci_model = MakeShared(0, nullptr); - known_node_executor.SettingDaviciModel(hybrid_model, node, davinci_model); + known_node_executor.SetDaviciModel(hybrid_model, node, davinci_model); EXPECT_EQ(*(static_cast(davinci_model->global_step_addr_)), 520); } From 91d70e5f93df1adea8f8bcdbd1ef9fde3582284f Mon Sep 17 00:00:00 2001 From: lichun Date: Sat, 3 Jul 2021 10:43:42 +0800 Subject: [PATCH 144/226] add global step info for known subgraph in unknown model and generate om for remained cases when some single op cases run atc failed --- ge/graph/load/model_manager/davinci_model.cc | 2 +- ge/graph/load/model_manager/davinci_model.h | 7 +++++++ .../node_executor/compiledsubgraph/known_node_executor.cc | 2 ++ 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/ge/graph/load/model_manager/davinci_model.cc b/ge/graph/load/model_manager/davinci_model.cc index 87e0c6f2..7d82879f 100755 --- a/ge/graph/load/model_manager/davinci_model.cc +++ b/ge/graph/load/model_manager/davinci_model.cc @@ -4365,7 +4365,7 @@ void DavinciModel::SetDataDumperArgs(const ComputeGraphPtr &graph, const map void *{ diff --git a/ge/graph/load/model_manager/davinci_model.h b/ge/graph/load/model_manager/davinci_model.h index 6cb79804..daf0c7e6 100755 --- a/ge/graph/load/model_manager/davinci_model.h +++ b/ge/graph/load/model_manager/davinci_model.h @@ -499,6 +499,10 @@ class DavinciModel { return exception_dumper_.DumpExceptionInfo(exception_infos); } + void SetKnownShapeGlobalStep(void *global_step) { + known_shape_global_step_ = global_step; + } + void DumperShrink() { data_dumper_.DumpShrink(); } @@ -1105,6 +1109,9 @@ class DavinciModel { vector output_descs_; vector output_formats_; + // known shape node for dump + void *known_shape_global_step_; + // op name to attrs mapping std::map>> op_name_to_attrs_; }; diff --git a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc index 29c829be..96392fe1 100755 --- a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc +++ b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc @@ -145,6 +145,8 @@ Status KnownNodeTask::InitDavinciModel(const HybridModel &model, TensorBuffer *w auto dump_properties = DumpManager::GetInstance().GetDumpProperties(model.GetSessionId()); if (dump_properties.IsDumpOpen() || dump_properties.IsOpDebugOpen()) { davinci_model_->SetDumpProperties(dump_properties); + void *global_step = model.GetGlobalStep(); + davinci_model_->SetKnownShapeGlobalStep(); } void *weight = nullptr; From 988ef307399fc8720f4d03a8ada5af835b9db674 Mon Sep 17 00:00:00 2001 From: lichun Date: Sat, 3 Jul 2021 10:44:40 +0800 Subject: [PATCH 145/226] add global step info for known subgraph in unknown model and generate om for remained cases when some single op cases run atc failed --- ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc index 96392fe1..753cf4ba 100755 --- a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc +++ b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc @@ -146,7 +146,7 @@ Status KnownNodeTask::InitDavinciModel(const HybridModel &model, TensorBuffer *w if (dump_properties.IsDumpOpen() || dump_properties.IsOpDebugOpen()) { davinci_model_->SetDumpProperties(dump_properties); void *global_step = model.GetGlobalStep(); - davinci_model_->SetKnownShapeGlobalStep(); + davinci_model_->SetKnownShapeGlobalStep(global_step); } void *weight = nullptr; From 9321cd84f5873afa9ff8a36c21b22369588701c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=B6=9B?= Date: Sat, 3 Jul 2021 07:26:22 +0000 Subject: [PATCH 146/226] update README_CN.md. --- README_CN.md | 53 ++-------------------------------------------------- 1 file changed, 2 insertions(+), 51 deletions(-) diff --git a/README_CN.md b/README_CN.md index 0a1e9c09..48fe4216 100644 --- a/README_CN.md +++ b/README_CN.md @@ -34,18 +34,6 @@ 在训练/推理过程中,上述过程会自动执行,通过上述图操作,GE可以将前端下发的图转换为一种可以在昇腾AI处理器上高效运行的图模式。 - - -- [安装说明](#安装说明) - - [安装GE](#安装ge) - - [源码安装](#源码安装) - - [社区](#社区) - - [贡献](#贡献) - - [Release Notes](#release-notes) - - [License](#license) - - - # 安装说明 ## 安装GE @@ -54,45 +42,8 @@ GE内嵌在MindSpore安装包中,MindSpore安装完毕后,GE以三个动态 ## 源码安装 -GE也支持由源码编译,进行源码编译前,首先确保你有昇腾910 AI处理器的环境,同时系统满足以下要求: - -- GCC >= 7.3.0 -- CMake >= 3.14.0 -- Autoconf >= 2.64 -- Libtool >= 2.4.6 -- Automake >= 1.15.1 - -编译完成后会生成几个动态库,他们会链接到MindSpore中执行,无法单独运行。 - -1. 下载GE源码。 - - GE源码托管在码云平台,可由此下载。 - ``` - git clone https://gitee.com/mindspore/graphengine.git - cd graphengine - ``` - -2. 在GE根目录下执行下列命令即可进行编译。 - - ``` - bash build.sh - ``` - - > - 开始编译之前,请确保正确设置相关的环境变量。 - > - 在`build.sh`的脚本中,会进行`git clone`操作,请确保网络连接正常且git配置正确。 - > - 在`build.sh`的脚本中,默认会8线程编译,如果机器性能较差,可能会编译失败。可以通过`-j{线程数}`来控制线程数,如`bash build.sh –j4`。 - -3. 完成编译后,相应的动态库文件会生成在output文件夹中。 - -更多指令帮助,可以使用: -``` -bash build.sh –h -``` -如果想清除历史编译记录,可以如下操作: -``` -rm -rf build/ output/ -bash build.sh -``` +GE也支持由源码编译,请参考以下链接完成: +[个人开发工具链](https://gitee.com/mindspore/graphengine/blob/master/scripts/readme.md) ## 社区 From d488c042a57344e7b0759c335860eeb49a132155 Mon Sep 17 00:00:00 2001 From: lichun Date: Sat, 3 Jul 2021 15:38:36 +0800 Subject: [PATCH 147/226] add global step info for known subgraph in unknown model and generate om for remained cases when some single op cases run atc failed --- ge/graph/load/model_manager/davinci_model.cc | 5 +++++ ge/graph/load/model_manager/davinci_model.h | 2 +- .../compiledsubgraph/known_node_executor.cc | 9 +++++---- .../node_executor/compiledsubgraph/known_node_executor.h | 4 ++-- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/ge/graph/load/model_manager/davinci_model.cc b/ge/graph/load/model_manager/davinci_model.cc index 7d82879f..6bf2c6d5 100755 --- a/ge/graph/load/model_manager/davinci_model.cc +++ b/ge/graph/load/model_manager/davinci_model.cc @@ -1480,6 +1480,11 @@ Status DavinciModel::GetLabelGotoAddr(uint32_t label_index, rtMemType_t mem_type return SUCCESS; } +void DavinciModel::SetGlobalStep(void *global_step, uint64_t global_step_size) { + global_step_addr_ = global_step; + global_step_size_ = global_step_size; +} + /// @ingroup ge /// @brief LabelSet Op Initialize. /// @param [in] op_desc: LabelSet Op descriptor. diff --git a/ge/graph/load/model_manager/davinci_model.h b/ge/graph/load/model_manager/davinci_model.h index daf0c7e6..db53d80f 100755 --- a/ge/graph/load/model_manager/davinci_model.h +++ b/ge/graph/load/model_manager/davinci_model.h @@ -300,7 +300,7 @@ class DavinciModel { return op_list_.at(index); } - void SetGlobalStep(void *global_step) { global_step_addr_ = global_step; } + void SetGlobalStep(void *global_step, uint64_t global_step_size); void *GetGlobalStep() const { return global_step_addr_; } // get task info for profiling diff --git a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc index 753cf4ba..292969b6 100755 --- a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc +++ b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc @@ -182,7 +182,7 @@ Status KnownNodeExecutor::PrepareTask(NodeTask &task, TaskContext &context) cons return SUCCESS; } -void KnownNodeExecutor::SetDaviciModel(const HybridModel &model, const NodePtr &node, +Status KnownNodeExecutor::SetDaviciModel(const HybridModel &model, const NodePtr &node, std::shared_ptr &davinci_model) const { // set known node flag as true davinci_model->SetKnownNode(true); @@ -190,10 +190,11 @@ void KnownNodeExecutor::SetDaviciModel(const HybridModel &model, const NodePtr & davinci_model->SetDumpModelName(model.GetModelName()); davinci_model->SetOmName(model.GetOmName()); TensorValue *global_step_var = model.GetVariable(NODE_NAME_GLOBAL_STEP); - GE_CHK_BOOL_EXEC(global_step_var != nullptr, return); - davinci_model->SetGlobalStep(global_step_var->MutableData()); + GE_CHECK_NOTNULL(global_step_var); + davinci_model->SetGlobalStep(global_step_var->MutableData(), global_step_var->GetSize()); // set model id as root node's node id davinci_model->SetSubModelId(node->GetOpDesc()->GetId()); + return SUCCESS; } Status KnownNodeExecutor::LoadTask(const HybridModel &model, const NodePtr &node, @@ -213,7 +214,7 @@ Status KnownNodeExecutor::LoadTask(const HybridModel &model, const NodePtr &node std::shared_ptr davinci_model = MakeShared(0, nullptr); GE_CHECK_NOTNULL(davinci_model); - SetDaviciModel(model, node, davinci_model); + GE_CHK_STATUS_RET_NOLOG(SetDaviciModel(model, node, davinci_model)); GELOGD("KnownNodeExecutor::LoadTask node id %ld.", node->GetOpDesc()->GetId()); GE_CHK_STATUS_RET(davinci_model->Assign(ge_model), diff --git a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.h b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.h index 2d51db58..37b5a3d8 100644 --- a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.h +++ b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.h @@ -59,8 +59,8 @@ class KnownNodeExecutor : public NodeExecutor { const NodePtr &node, GeModelPtr &ge_model, ComputeGraphPtr &graph); - void SetDaviciModel(const HybridModel &model, const NodePtr &node, - std::shared_ptr &davinci_model) const; + Status SetDaviciModel(const HybridModel &model, const NodePtr &node, + std::shared_ptr &davinci_model) const; }; } // namespace hybrid } // namespace ge From 6bfd96b5409451eca22d82c60fad9679b167ad47 Mon Sep 17 00:00:00 2001 From: lichun Date: Sat, 3 Jul 2021 17:54:44 +0800 Subject: [PATCH 148/226] add global step info for known subgraph in unknown model and generate om for remained cases when some single op cases run atc failed --- ge/graph/load/model_manager/davinci_model.cc | 20 ++++++++++--------- ge/graph/load/model_manager/davinci_model.h | 7 ------- .../compiledsubgraph/known_node_executor.cc | 7 ++----- 3 files changed, 13 insertions(+), 21 deletions(-) diff --git a/ge/graph/load/model_manager/davinci_model.cc b/ge/graph/load/model_manager/davinci_model.cc index 6bf2c6d5..2306665c 100755 --- a/ge/graph/load/model_manager/davinci_model.cc +++ b/ge/graph/load/model_manager/davinci_model.cc @@ -1547,14 +1547,16 @@ Status DavinciModel::InitLabelSet(const OpDescPtr &op_desc) { } Status DavinciModel::InitVariable(const OpDescPtr &op_desc, map &variable_by_name) { - if (op_desc->GetName() == NODE_NAME_GLOBAL_STEP) { - const auto output_sizes = ModelUtils::GetOutputSize(op_desc); - if (!output_sizes.empty()) { - global_step_size_ = output_sizes[0]; - } - const auto output_addrs = ModelUtils::GetOutputDataAddrs(runtime_param_, op_desc); - if (!output_addrs.empty()) { - global_step_addr_ = output_addrs[0]; + if (!known_node_) { + if (op_desc->GetName() == NODE_NAME_GLOBAL_STEP) { + const auto output_sizes = ModelUtils::GetOutputSize(op_desc); + if (!output_sizes.empty()) { + global_step_size_ = output_sizes[0]; + } + const auto output_addrs = ModelUtils::GetOutputDataAddrs(runtime_param_, op_desc); + if (!output_addrs.empty()) { + global_step_addr_ = output_addrs[0]; + } } } @@ -4370,7 +4372,7 @@ void DavinciModel::SetDataDumperArgs(const ComputeGraphPtr &graph, const map void *{ diff --git a/ge/graph/load/model_manager/davinci_model.h b/ge/graph/load/model_manager/davinci_model.h index db53d80f..4ff36677 100755 --- a/ge/graph/load/model_manager/davinci_model.h +++ b/ge/graph/load/model_manager/davinci_model.h @@ -499,10 +499,6 @@ class DavinciModel { return exception_dumper_.DumpExceptionInfo(exception_infos); } - void SetKnownShapeGlobalStep(void *global_step) { - known_shape_global_step_ = global_step; - } - void DumperShrink() { data_dumper_.DumpShrink(); } @@ -1109,9 +1105,6 @@ class DavinciModel { vector output_descs_; vector output_formats_; - // known shape node for dump - void *known_shape_global_step_; - // op name to attrs mapping std::map>> op_name_to_attrs_; }; diff --git a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc index 292969b6..fd33f8b9 100755 --- a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc +++ b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc @@ -145,8 +145,6 @@ Status KnownNodeTask::InitDavinciModel(const HybridModel &model, TensorBuffer *w auto dump_properties = DumpManager::GetInstance().GetDumpProperties(model.GetSessionId()); if (dump_properties.IsDumpOpen() || dump_properties.IsOpDebugOpen()) { davinci_model_->SetDumpProperties(dump_properties); - void *global_step = model.GetGlobalStep(); - davinci_model_->SetKnownShapeGlobalStep(global_step); } void *weight = nullptr; @@ -189,9 +187,8 @@ Status KnownNodeExecutor::SetDaviciModel(const HybridModel &model, const NodePtr davinci_model->SetId(model.GetModelId()); davinci_model->SetDumpModelName(model.GetModelName()); davinci_model->SetOmName(model.GetOmName()); - TensorValue *global_step_var = model.GetVariable(NODE_NAME_GLOBAL_STEP); - GE_CHECK_NOTNULL(global_step_var); - davinci_model->SetGlobalStep(global_step_var->MutableData(), global_step_var->GetSize()); + void *global_step = model.GetGlobalStep(); + davinci_model->SetGlobalStep(global_step, sizeof(int64_t)); // set model id as root node's node id davinci_model->SetSubModelId(node->GetOpDesc()->GetId()); return SUCCESS; From 95944c17963c0595091de45486204f0a5e5d9d44 Mon Sep 17 00:00:00 2001 From: lichun Date: Sat, 3 Jul 2021 18:05:43 +0800 Subject: [PATCH 149/226] add global step info for known subgraph in unknown model and generate om for remained cases when some single op cases run atc failed --- ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc index fd33f8b9..e5663fb8 100755 --- a/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc +++ b/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc @@ -188,6 +188,7 @@ Status KnownNodeExecutor::SetDaviciModel(const HybridModel &model, const NodePtr davinci_model->SetDumpModelName(model.GetModelName()); davinci_model->SetOmName(model.GetOmName()); void *global_step = model.GetGlobalStep(); + GE_CHECK_NOTNULL(global_step); davinci_model->SetGlobalStep(global_step, sizeof(int64_t)); // set model id as root node's node id davinci_model->SetSubModelId(node->GetOpDesc()->GetId()); From dcdfae9453ef209439b14801514a66227524b3d0 Mon Sep 17 00:00:00 2001 From: lichun Date: Sat, 3 Jul 2021 18:14:27 +0800 Subject: [PATCH 150/226] add global step info for known subgraph in unknown model and generate om for remained cases when some single op cases run atc failed --- ge/graph/load/model_manager/davinci_model.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ge/graph/load/model_manager/davinci_model.cc b/ge/graph/load/model_manager/davinci_model.cc index 2306665c..9d86039a 100755 --- a/ge/graph/load/model_manager/davinci_model.cc +++ b/ge/graph/load/model_manager/davinci_model.cc @@ -4372,7 +4372,7 @@ void DavinciModel::SetDataDumperArgs(const ComputeGraphPtr &graph, const map void *{ From 8b9ddcbc0ebfa07193a6c468120c0e23337936fd Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Sat, 3 Jul 2021 19:10:51 +0800 Subject: [PATCH 151/226] Fix cross merge for switch --- .../mark_force_unknown_for_cond_pass.cc | 64 ++++++++++++++++--- 1 file changed, 55 insertions(+), 9 deletions(-) diff --git a/ge/graph/passes/mark_force_unknown_for_cond_pass.cc b/ge/graph/passes/mark_force_unknown_for_cond_pass.cc index aa36a43b..a9b2c70f 100644 --- a/ge/graph/passes/mark_force_unknown_for_cond_pass.cc +++ b/ge/graph/passes/mark_force_unknown_for_cond_pass.cc @@ -145,17 +145,63 @@ void MarkForceUnknownForCondPass::MarkUnknownForSwitch(const NodePtr &node, std: /// @return /// void MarkForceUnknownForCondPass::MarkUnknownForSwitch(const std::map> &switch_groups) { - for (auto it = switch_groups.begin(); it != switch_groups.end(); ++it) { - const auto &op_node = it->first; - const auto &op_desc = op_node->GetOpDesc(); - if (op_desc->HasAttr(ATTR_NAME_CONTROL_FLOW_GROUP)) { - continue; + // Step 0: no group assigned. such as: + // Merge1{id=0, group=} => {Switch1{id=1, group=}, Switch2{id=2, group=}} + // Merge2{id=3, group=} => {Switch1{id=1, group=}, Switch3{id=4, group=}} + // Merge3{id=5, group=} => {Switch4{id=6, group=}, Switch5{id=7, group=}} + // Merge4{id=8, group=} => {Switch1{id=1, group=}, Switch5{id=7, group=}} + std::map unique_groups; + const auto GetGroupIndex = [&unique_groups](const NodePtr &merge, const std::vector &switch_group) { + int64_t group_index = merge->GetOpDesc()->GetId(); + std::set group_ids{group_index}; + for (const auto &node : switch_group) { + if (AttrUtils::GetInt(node->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, group_index)) { + GELOGI("[%s] Get group from [%s], index[%ld]", merge->GetName().c_str(), node->GetName().c_str(), group_index); + group_ids.insert(group_index); + } + } + + const auto it = unique_groups.find(group_index); + if (it != unique_groups.end()) { + group_index = it->second; } - int64_t group_index = op_desc->GetId(); - SetControlFlowGroup(op_node, group_index); - for (const auto &n : it->second) { - SetControlFlowGroup(n, group_index); + for (auto id : group_ids) { + unique_groups[id] = group_index; + } + + return group_index; + }; + + const auto SetGroupIndex = [](const NodePtr &merge, const std::vector &switch_group, int64_t group_index) { + SetControlFlowGroup(merge, group_index); + for (const auto &node : switch_group) { + SetControlFlowGroup(node, group_index); + } + }; + + // Step 1: Set group index to merge, if switch already has group, use assigned group. + // Merge1{id=0, group=0} => {Switch1{id=1, group=0}, Switch2{id=2, group=0}} + // Merge2{id=3, group=0} => {Switch1{id=1, group=0}, Switch3{id=4, group=0}} + // Merge3{id=5, group=5} => {Switch4{id=6, group=5}, Switch5{id=7, group=5}} + // Merge4{id=8, group=0} => {Switch1{id=1, group=0}, Switch5{id=7, group=0}} + for (const auto group : switch_groups) { + int64_t group_index = GetGroupIndex(group.first, group.second); + SetGroupIndex(group.first, group.second, group_index); + } + + // Step 2: Adjust crossed merge group for unique group. + // Merge1{id=0, group=0} => {Switch1{id=1, group=0}, Switch2{id=2, group=0}} + // Merge2{id=3, group=0} => {Switch1{id=1, group=0}, Switch3{id=4, group=0}} + // Merge3{id=5, group=0} => {Switch4{id=6, group=0}, Switch5{id=7, group=0}} + // Merge4{id=8, group=0} => {Switch1{id=1, group=0}, Switch5{id=7, group=0}} + for (const auto group : switch_groups) { + int64_t group_index = -1; + (void)AttrUtils::GetInt(group.first->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, group_index); + + const auto it = unique_groups.find(group_index); + if (it != unique_groups.end() && it->first != it->second) { + SetGroupIndex(group.first, group.second, it->second); } } } From eb11a6e55c2d51456c47e100c2538e1099270cda Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Mon, 5 Jul 2021 08:41:54 +0800 Subject: [PATCH 152/226] Fix lambda expression name --- ge/graph/passes/mark_force_unknown_for_cond_pass.cc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ge/graph/passes/mark_force_unknown_for_cond_pass.cc b/ge/graph/passes/mark_force_unknown_for_cond_pass.cc index a9b2c70f..67b6c617 100644 --- a/ge/graph/passes/mark_force_unknown_for_cond_pass.cc +++ b/ge/graph/passes/mark_force_unknown_for_cond_pass.cc @@ -151,7 +151,7 @@ void MarkForceUnknownForCondPass::MarkUnknownForSwitch(const std::map {Switch4{id=6, group=}, Switch5{id=7, group=}} // Merge4{id=8, group=} => {Switch1{id=1, group=}, Switch5{id=7, group=}} std::map unique_groups; - const auto GetGroupIndex = [&unique_groups](const NodePtr &merge, const std::vector &switch_group) { + const auto get_group_index = [&unique_groups](const NodePtr &merge, const std::vector &switch_group) { int64_t group_index = merge->GetOpDesc()->GetId(); std::set group_ids{group_index}; for (const auto &node : switch_group) { @@ -173,7 +173,7 @@ void MarkForceUnknownForCondPass::MarkUnknownForSwitch(const std::map &switch_group, int64_t group_index) { + const auto set_group_index = [](const NodePtr &merge, const std::vector &switch_group, int64_t group_index) { SetControlFlowGroup(merge, group_index); for (const auto &node : switch_group) { SetControlFlowGroup(node, group_index); @@ -186,8 +186,8 @@ void MarkForceUnknownForCondPass::MarkUnknownForSwitch(const std::map {Switch4{id=6, group=5}, Switch5{id=7, group=5}} // Merge4{id=8, group=0} => {Switch1{id=1, group=0}, Switch5{id=7, group=0}} for (const auto group : switch_groups) { - int64_t group_index = GetGroupIndex(group.first, group.second); - SetGroupIndex(group.first, group.second, group_index); + int64_t group_index = get_group_index(group.first, group.second); + set_group_index(group.first, group.second, group_index); } // Step 2: Adjust crossed merge group for unique group. @@ -201,7 +201,7 @@ void MarkForceUnknownForCondPass::MarkUnknownForSwitch(const std::mapfirst != it->second) { - SetGroupIndex(group.first, group.second, it->second); + set_group_index(group.first, group.second, it->second); } } } From 42c4dee78f2955d0db507fceb87f34cae1732dab Mon Sep 17 00:00:00 2001 From: wuweikang Date: Mon, 5 Jul 2021 09:59:23 +0800 Subject: [PATCH 153/226] fix dump step check --- ge/common/dump/dump_properties.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ge/common/dump/dump_properties.cc b/ge/common/dump/dump_properties.cc index 84bdb7bf..bc645f61 100644 --- a/ge/common/dump/dump_properties.cc +++ b/ge/common/dump/dump_properties.cc @@ -204,7 +204,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY DumpProperties &DumpProperties: FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::SetDumpOptions() { if (enable_dump_ == kEnableFlag) { std::string dump_step; - if (GetContext().GetOption(OPTION_EXEC_DUMP_STEP, dump_step) == GRAPH_SUCCESS) { + if (GetContext().GetOption(OPTION_EXEC_DUMP_STEP, dump_step) == GRAPH_SUCCESS && !dump_step.empty()) { GE_CHK_STATUS_RET(CheckDumpStep(dump_step), "[Check][dump_step] failed."); GELOGI("Get dump step %s successfully", dump_step.c_str()); SetDumpStep(dump_step); From 6314c48f921201de882d64449a49342a782bffeb Mon Sep 17 00:00:00 2001 From: lichun Date: Mon, 5 Jul 2021 10:07:42 +0800 Subject: [PATCH 154/226] add global step info for known subgraph in unknown model and generate om for remained cases when some single op cases run atc failed --- tests/ut/ge/hybrid/known_node_executor_unittest.cc | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/ut/ge/hybrid/known_node_executor_unittest.cc b/tests/ut/ge/hybrid/known_node_executor_unittest.cc index e6a6119e..ea499422 100644 --- a/tests/ut/ge/hybrid/known_node_executor_unittest.cc +++ b/tests/ut/ge/hybrid/known_node_executor_unittest.cc @@ -135,10 +135,9 @@ TEST_F(UnknownNodeExecutorTest, TestSetGlobalStep) { HybridModel hybrid_model(ge_root_model); auto *step_id = new int64_t[1]; step_id[0] = 520; - std::unique_ptr tensor_value; - tensor_value.reset(new(std::nothrow)TensorValue((void*)step_id, sizeof(step_id))); - hybrid_model.variable_tensors_.insert({"ge_global_step", std::move(tensor_value)}); - + std::unique_ptr tensor_buf; + tensor_buf = tensor_buf->Create((void *)step_id, sizeof(int64_t)); + hybrid_model.global_step_ = std::move(tensor_buf); KnownNodeExecutor known_node_executor; std::shared_ptr davinci_model = MakeShared(0, nullptr); known_node_executor.SetDaviciModel(hybrid_model, node, davinci_model); From 27dae9195ce46815a0d71f5caa21247937ed9eb4 Mon Sep 17 00:00:00 2001 From: lichun Date: Mon, 5 Jul 2021 10:30:59 +0800 Subject: [PATCH 155/226] add global step info for known subgraph in unknown model and generate om for remained cases when some single op cases run atc failed --- tests/ut/ge/hybrid/known_node_executor_unittest.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ut/ge/hybrid/known_node_executor_unittest.cc b/tests/ut/ge/hybrid/known_node_executor_unittest.cc index ea499422..b6d06f5d 100644 --- a/tests/ut/ge/hybrid/known_node_executor_unittest.cc +++ b/tests/ut/ge/hybrid/known_node_executor_unittest.cc @@ -135,7 +135,7 @@ TEST_F(UnknownNodeExecutorTest, TestSetGlobalStep) { HybridModel hybrid_model(ge_root_model); auto *step_id = new int64_t[1]; step_id[0] = 520; - std::unique_ptr tensor_buf; + std::unique_ptr tensor_buf; tensor_buf = tensor_buf->Create((void *)step_id, sizeof(int64_t)); hybrid_model.global_step_ = std::move(tensor_buf); KnownNodeExecutor known_node_executor; From a145aaac29d01c71c81f4e562ca031ce1a0c3f33 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Mon, 5 Jul 2021 20:55:40 +0800 Subject: [PATCH 156/226] Atomic task for single_op. --- ge/single_op/single_op_model.cc | 160 +++++++++++------- ge/single_op/single_op_model.h | 5 +- ge/single_op/task/op_task.cc | 101 ++++++++++- ge/single_op/task/op_task.h | 42 ++++- ge/single_op/task/tbe_task_builder.cc | 80 ++++++--- ge/single_op/task/tbe_task_builder.h | 26 ++- tests/ut/ge/hybrid/ge_hybrid_unittest.cc | 1 - .../ge/single_op/single_op_model_unittest.cc | 22 ++- .../ge/single_op/single_op_task_unittest.cc | 27 +++ 9 files changed, 363 insertions(+), 101 deletions(-) diff --git a/ge/single_op/single_op_model.cc b/ge/single_op/single_op_model.cc index 9a52a83d..f8831884 100755 --- a/ge/single_op/single_op_model.cc +++ b/ge/single_op/single_op_model.cc @@ -46,7 +46,12 @@ namespace { const size_t kDataOutputNum = 1; const uint32_t kInputIndexOfData = 0; const uint32_t kOutputIndexOfData = 0; +const size_t kNumTaskWithAtomicAddrCleanTask = 2; +const size_t kNumTaskWithMemCpyTask = 2; constexpr char const *kAttrSupportDynamicShape = "support_dynamicshape"; +const char *const kEngineNameAiCore = "AIcoreEngine"; +const char *const kEngineNameAiCpu = "aicpu_ascend_kernel"; +const char *const kEngineNameAiCpuTf = "aicpu_tf_kernel"; Status CheckHostMem(const std::vector &dependencies, const NodePtr &node, bool &is_host_mem) { auto op_desc = node->GetOpDesc(); @@ -395,7 +400,7 @@ void SingleOpModel::ParseArgTable(OpTask *task, SingleOp &op) { } } } - + Status SingleOpModel::BuildKernelTask(const domi::TaskDef &task_def, TbeOpTask **task) { GE_CHECK_NOTNULL(task); auto task_type = static_cast(task_def.type()); @@ -408,7 +413,7 @@ Status SingleOpModel::BuildKernelTask(const domi::TaskDef &task_def, TbeOpTask * return ACL_ERROR_GE_INTERNAL_ERROR; } - auto *tbe_task = new (std::nothrow) TbeOpTask(); + std::unique_ptr tbe_task(new (std::nothrow) TbeOpTask()); if (tbe_task == nullptr) { GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "[Create][TbeOpTask]failed."); REPORT_INNER_ERROR("E19999", "BuildKernelTask fail for new TbeOpTask."); @@ -418,12 +423,41 @@ Status SingleOpModel::BuildKernelTask(const domi::TaskDef &task_def, TbeOpTask * auto builder = TbeTaskBuilder(model_name_, iter->second, task_def); auto ret = builder.BuildTask(*tbe_task, model_params_); if (ret != SUCCESS) { - delete tbe_task; - tbe_task = nullptr; + GELOGE(ret, "[Build][TbeOpTask]failed."); + REPORT_INNER_ERROR("E19999", "[Build][TbeOpTask]failed."); + return ret; + } + + *task = tbe_task.release(); + return SUCCESS; +} + +Status SingleOpModel::BuildAtomicTask(const domi::TaskDef &task_def, AtomicOpTask **task) { + GE_CHECK_NOTNULL(task); + const auto &context = task_def.kernel().context(); + auto iter = op_list_.find(context.op_index()); + if (iter == op_list_.end()) { + GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "[Check][Param:TaskDef]op desc not found. op index = %u", context.op_index()); + REPORT_INNER_ERROR("E19999", "BuildKernelTask fail for op desc not found. op index = %u", context.op_index()); + return ACL_ERROR_GE_INTERNAL_ERROR; + } + + std::unique_ptr atomic_task(new (std::nothrow) AtomicOpTask()); + if (atomic_task == nullptr) { + GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "[Create][AtomicOpTask]failed."); + REPORT_INNER_ERROR("E19999", "BuildKernelTask fail for new AtomicOpTask."); + return ACL_ERROR_GE_MEMORY_ALLOCATION; + } + + auto builder = AtomicTaskBuilder(model_name_, iter->second, task_def); + auto ret = builder.BuildTask(*atomic_task, model_params_); + if (ret != SUCCESS) { + GELOGE(ret, "[Build][AtomicOpTask]failed."); + REPORT_INNER_ERROR("E19999", "[Build][AtomicOpTask]failed."); return ret; } - *task = tbe_task; + *task = atomic_task.release(); return SUCCESS; } @@ -536,9 +570,21 @@ Status SingleOpModel::BuildTaskListForDynamicOp(StreamResource *stream_resource, auto compute_graph = GraphUtils::GetComputeGraph(ge_model->GetGraph()); GE_CHECK_NOTNULL(compute_graph); single_op.compute_graph_ = compute_graph; - if (tbe_tasks_.size() > 0) { - const auto &task_def = tbe_tasks_[0]; + + GE_CHK_BOOL_RET_STATUS(node_tasks_.size() == 1, ACL_ERROR_GE_PARAM_INVALID, + "[Check][Size]Node size must be 1, but get %zu.", node_tasks_.size()); + auto iter = node_tasks_.begin(); + auto node = iter->first; + auto task_defs = iter->second; + GE_CHK_BOOL_RET_STATUS(task_defs.size() > 0 && task_defs.size() <= kNumTaskWithAtomicAddrCleanTask, + ACL_ERROR_GE_PARAM_INVALID, "[Check][Size]task_defs size must be 1 or 2, but get %zu.", task_defs.size()); + GE_CHECK_NOTNULL(node); + auto op_desc = node->GetOpDesc(); + GE_CHECK_NOTNULL(op_desc); + const auto &lib_name = op_desc->GetOpKernelLibName(); + if (lib_name == kEngineNameAiCore) { GELOGD("Building TBE task."); + const auto &task_def = task_defs.back(); TbeOpTask *tbe_task = nullptr; GE_CHK_STATUS_RET_NOLOG(BuildKernelTask(task_def, &tbe_task)); tbe_task->SetModelArgs(model_name_, model_id_); @@ -546,37 +592,43 @@ Status SingleOpModel::BuildTaskListForDynamicOp(StreamResource *stream_resource, GELOGD("tiling buffer is not nullptr."); tbe_task->stream_resource_ = stream_resource; } + if (task_defs.size() == kNumTaskWithAtomicAddrCleanTask) { + const auto &atomic_task_def = task_defs.front(); + AtomicOpTask *atomic_task = nullptr; + GE_CHK_STATUS_RET_NOLOG(BuildAtomicTask(atomic_task_def, &atomic_task)); + atomic_task->InitAtomicAddrCleanIndices(); + tbe_task->SetAtomicTask(atomic_task); + } single_op.op_task_.reset(tbe_task); - } else if (aicpu_tasks_.size() > 0) { - const auto &task_def = aicpu_tasks_[0]; - auto task_type = static_cast(task_def.type()); - if (task_type == RT_MODEL_TASK_KERNEL) { - GELOGD("Building AICPU_CC task"); - OpTask *task = nullptr; - uint64_t dynamic_singleop_kernel_id = aicpu_kernel_id++; - GELOGI("Build dynamic singleOp CCTask, kernel_id = %lu", dynamic_singleop_kernel_id); - GE_CHK_STATUS_RET_NOLOG(BuildCpuKernelTask(task_def.kernel(), &task, dynamic_singleop_kernel_id)); - task->SetModelArgs(model_name_, model_id_); - single_op.op_task_.reset(task); - } else if (task_type == RT_MODEL_TASK_KERNEL_EX) { - GELOGD("Building AICPU_TF task"); - AiCpuTask *aicpu_task = nullptr; - uint64_t dynamic_singleop_kernel_id = aicpu_kernel_id++; - GELOGI("Build dynamic singleOp TfTask, kernel_id = %lu", dynamic_singleop_kernel_id); - GE_CHK_STATUS_RET_NOLOG(BuildKernelExTask(task_def.kernel_ex(), &aicpu_task, dynamic_singleop_kernel_id)); - if (aicpu_task->GetUnknownType() == DEPEND_COMPUTE) { - if (aicpu_tasks_.size() < 2) { - GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Check][Task]The copy task of the fourth operator was not found."); - REPORT_INNER_ERROR("E19999", "The copy task of the fourth operator was not found."); - return ACL_ERROR_GE_PARAM_INVALID; - } - const TaskDef ©_task_def = aicpu_tasks_[1]; - GE_CHK_STATUS_RET_NOLOG(aicpu_task->SetMemCopyTask(copy_task_def.kernel_ex())); + } else if (lib_name == kEngineNameAiCpu) { + const auto &task_def = task_defs[0]; + GELOGD("Building AICPU_CC task"); + OpTask *task = nullptr; + uint64_t dynamic_singleop_kernel_id = aicpu_kernel_id++; + GELOGI("Build dynamic singleOp CCTask, kernel_id = %lu", dynamic_singleop_kernel_id); + GE_CHK_STATUS_RET_NOLOG(BuildCpuKernelTask(task_def.kernel(), &task, dynamic_singleop_kernel_id)); + task->SetModelArgs(model_name_, model_id_); + single_op.op_task_.reset(task); + } else if (lib_name == kEngineNameAiCpuTf) { + const auto &task_def = task_defs[0]; + GELOGD("Building AICPU_TF task"); + AiCpuTask *aicpu_task = nullptr; + uint64_t dynamic_singleop_kernel_id = aicpu_kernel_id++; + GELOGI("Build dynamic singleOp TfTask, kernel_id = %lu", dynamic_singleop_kernel_id); + GE_CHK_STATUS_RET_NOLOG(BuildKernelExTask(task_def.kernel_ex(), &aicpu_task, dynamic_singleop_kernel_id)); + if (aicpu_task->GetUnknownType() == DEPEND_COMPUTE) { + if (task_defs.size() < kNumTaskWithMemCpyTask) { + GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Check][Task]The copy task of the fourth operator was not found."); + REPORT_INNER_ERROR("E19999", "The copy task of the fourth operator was not found."); + return ACL_ERROR_GE_PARAM_INVALID; } - aicpu_task->SetModelArgs(model_name_, model_id_); - single_op.op_task_.reset(aicpu_task); + const TaskDef ©_task_def = task_defs[1]; + GE_CHK_STATUS_RET_NOLOG(aicpu_task->SetMemCopyTask(copy_task_def.kernel_ex())); } + aicpu_task->SetModelArgs(model_name_, model_id_); + single_op.op_task_.reset(aicpu_task); } + return SUCCESS; } @@ -585,9 +637,7 @@ Status SingleOpModel::NeedHybridModel(GeModelPtr &ge_model, bool &need_hybrid_mo bool is_host_mem = false; GE_CHK_STATUS_RET(CheckInferDepend(ge_model, is_infer_depend, is_host_mem), "[Check][InferDepend] failed."); bool need_d2h_cpy = is_infer_depend && !is_host_mem; - bool aicpu_multi_task = tbe_tasks_.size() >= 1 && aicpu_tasks_.size() >= 1; - bool aicore_multi_task = tbe_tasks_.size() > 1; - need_hybrid_model = need_d2h_cpy || aicore_multi_task || aicpu_multi_task; + need_hybrid_model = need_d2h_cpy || node_tasks_.size() > 1; return SUCCESS; } @@ -601,31 +651,27 @@ Status SingleOpModel::ParseTasks() { GELOGI("[%s] Task[%d], type = [%u], DebugString = [%s]", model_name_.c_str(), i, task_def.type(), task_def.DebugString().c_str()); auto task_type = static_cast(task_def.type()); + uint32_t op_index = 0; if (task_type == RT_MODEL_TASK_KERNEL) { - const auto &kernel_def = task_def.kernel(); - const auto &context = kernel_def.context(); - auto kernel_type = static_cast(context.kernel_type()); - if (kernel_type == ccKernelType::TE) { - tbe_tasks_.emplace_back(task_def); - } else if (kernel_type == ccKernelType::AI_CPU || kernel_type == ccKernelType::CUST_AI_CPU) { - aicpu_tasks_.emplace_back(task_def); - } else { - GELOGE(ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID, - "[Check][Param:TaskDef]Only TBE, AI_CPU, CUST_AI_CPU kernel are supported, but got %u", - context.kernel_type()); - REPORT_INNER_ERROR("E19999", - "BuildModelTaskKernel fail for got:%u not supported, Only TBE, AI_CPU, CUST_AI_CPU kernel are supported.", - context.kernel_type()); - return ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID; - } - } else if (task_type == RT_MODEL_TASK_ALL_KERNEL) { - tbe_tasks_.emplace_back(task_def); + op_index = task_def.kernel().context().op_index(); } else if (task_type == RT_MODEL_TASK_KERNEL_EX) { - aicpu_tasks_.emplace_back(task_def); + op_index = task_def.kernel_ex().op_index(); + } else if (task_type == RT_MODEL_TASK_ALL_KERNEL) { + op_index = task_def.kernel_with_handle().context().op_index(); } else { - // skip GELOGD("Skip task type: %d", static_cast(task_type)); + continue; + } + GELOGD("op_index = %u, task_type = %d", op_index, task_type); + + auto iter = op_list_.find(op_index); + if (iter == op_list_.end()) { + GELOGE(INTERNAL_ERROR, "[Find][Node]Failed to get node by op_index = %u", op_index); + REPORT_INNER_ERROR("E19999", "Failed to get node by op_index = %u.", op_index); + return INTERNAL_ERROR; } + auto &node = iter->second; + node_tasks_[node].emplace_back(task_def); } return SUCCESS; } diff --git a/ge/single_op/single_op_model.h b/ge/single_op/single_op_model.h index 45616d9a..83490f5f 100755 --- a/ge/single_op/single_op_model.h +++ b/ge/single_op/single_op_model.h @@ -69,6 +69,7 @@ class SingleOpModel { Status BuildTaskList(StreamResource *stream_resource, SingleOp &single_op); Status BuildTaskListForDynamicOp(StreamResource *stream_resource, DynamicSingleOp &dynamic_single_op); Status BuildKernelTask(const domi::TaskDef &task_def, TbeOpTask **task); + Status BuildAtomicTask(const domi::TaskDef &task_def, AtomicOpTask **task); Status BuildKernelExTask(const domi::KernelExDef &kernel_def, AiCpuTask **task, uint64_t kernel_id); Status BuildCpuKernelTask(const domi::KernelDef &kernel_def, OpTask **task, uint64_t kernel_id); @@ -79,9 +80,7 @@ class SingleOpModel { Status NeedHybridModel(GeModelPtr &ge_model, bool &flag); Status ParseTasks(); - std::vector tbe_tasks_; - std::vector aicpu_tasks_; - + std::map> node_tasks_; std::string model_name_; uint32_t model_id_ = 0; const void *ori_model_data_; diff --git a/ge/single_op/task/op_task.cc b/ge/single_op/task/op_task.cc index 9b8ef739..dfdec750 100755 --- a/ge/single_op/task/op_task.cc +++ b/ge/single_op/task/op_task.cc @@ -27,7 +27,6 @@ #include "common/formats/formats.h" #include "common/math/math_util.h" #include "framework/common/debug/log.h" -#include "register/op_tiling.h" #include "runtime/rt.h" #include "single_op/task/build_task_utils.h" @@ -222,19 +221,26 @@ Status TbeOpTask::LaunchKernel(rtStream_t stream) { return SUCCESS; } -Status TbeOpTask::UpdateRunInfo() { - // invoke OpParaCalculate - GELOGD("Start to invoke OpParaCalculate."); - optiling::utils::OpRunInfo run_info(0, true, 0); +Status TbeOpTask::CalcTilingInfo(optiling::utils::OpRunInfo &run_info) { auto ret = optiling::OpParaCalculateV2(*node_, run_info); if (ret != GRAPH_SUCCESS) { GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "[Invoke][OpParaCalculate] failed, ret = %u.", ret); REPORT_INNER_ERROR("E19999", "invoke OpParaCalculate failed, ret = %u.", ret); return ACL_ERROR_GE_INTERNAL_ERROR; } + return SUCCESS; +} + +Status TbeOpTask::UpdateRunInfo() { + // invoke OpParaCalculate + GELOGD("Start to invoke OpParaCalculate."); + optiling::utils::OpRunInfo run_info(0, true, 0); + GE_CHK_STATUS_RET(CalcTilingInfo(run_info), "[Calc][TilingInfo]failed."); + block_dim_ = run_info.GetBlockDim(); tiling_data_ = run_info.GetAllTilingData().str(); tiling_key_ = run_info.GetTilingKey(); + clear_atomic_ = run_info.GetClearAtomic(); run_info.GetAllWorkspaces(run_info_workspaces_); GELOGD("Done invoking OpParaCalculate successfully. block_dim = %u, tiling size = %zu, tiling_key = %u", block_dim_, tiling_data_.size(), tiling_key_); @@ -263,6 +269,14 @@ Status TbeOpTask::UpdateTensorDesc(const GeTensorDesc &src_tensor, GeTensorDesc dst_tensor.SetOriginShape(src_tensor.GetShape()); } + int64_t size = 0; + graphStatus graph_status = TensorUtils::GetTensorMemorySizeInBytes(dst_tensor, size); + if (graph_status != GRAPH_SUCCESS) { + REPORT_CALL_ERROR("E19999", "Get tensor size in bytes failed!"); + GELOGE(graph_status, "[Get][TensorMemorySize] In Bytes failed!"); + return FAILED; + } + TensorUtils::SetSize(dst_tensor, size); return SUCCESS; } @@ -346,6 +360,17 @@ Status TbeOpTask::AllocateWorkspaces(const vector &workspace_sizes) { return SUCCESS; } +Status TbeOpTask::CheckAndExecuteAtomic(const vector &input_desc, + const vector &input_buffers, + vector &output_desc, + vector &output_buffers, + rtStream_t stream) { + if (clear_atomic_ && atomic_task_ != nullptr) { + return atomic_task_->LaunchKernel(input_desc, input_buffers, output_desc, output_buffers, stream); + } + return SUCCESS; +} + Status TbeOpTask::UpdateTilingArgs(rtStream_t stream) { size_t args_size = input_num_ + output_num_ + workspaces_.size(); if (tiling_buffer_ != nullptr) { @@ -433,6 +458,8 @@ Status TbeOpTask::LaunchKernel(const vector &input_desc, GE_CHK_STATUS_RET_NOLOG(UpdateNodeByShape(input_desc, output_desc)); GE_CHK_STATUS_RET_NOLOG(UpdateRunInfo()); GE_CHK_STATUS_RET(AllocateWorkspaces(run_info_workspaces_), "[Allocate][Workspaces] failed."); + GE_CHK_STATUS_RET(CheckAndExecuteAtomic(input_desc, input_buffers, output_desc, output_buffers, stream), + "[Execute][AtomicTask] failed."); GE_CHK_STATUS_RET(UpdateTilingArgs(stream), "[Update][TilingArgs] failed."); GELOGD("[%s] Start to invoke rtKernelLaunch", node_->GetName().c_str()); @@ -463,6 +490,70 @@ void TbeOpTask::GetIoAddr(uintptr_t *&arg_base, size_t &arg_count) { } } +Status AtomicOpTask::UpdateIoAddr(const vector &inputs, const vector &outputs) { + uintptr_t *arg_base = reinterpret_cast(args_.get()); + for (auto atomic_output_index : atomic_output_indices_) { + if (atomic_output_index >= static_cast(outputs.size())) { + GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Update][Args] failed, atomic index must smaller then data size."); + REPORT_INNER_ERROR("E19999", "[Update][Args] failed, atomic index must smaller then data size."); + return ACL_ERROR_GE_PARAM_INVALID; + } + auto &output_buffer = outputs[atomic_output_index]; + *arg_base++ = reinterpret_cast(output_buffer.data); + } + return SUCCESS; +} + +Status AtomicOpTask::UpdateTilingArgs(rtStream_t stream) { + if (tiling_buffer_ != nullptr) { + GELOGD("[%s] Start to copy tiling info. size = %zu", node_->GetName().c_str(), tiling_data_.size()); + GE_CHK_RT_RET(rtMemcpyAsync(tiling_buffer_, max_tiling_size_, tiling_data_.data(), tiling_data_.size(), + RT_MEMCPY_HOST_TO_DEVICE_EX, stream)); + uintptr_t *arg_base = reinterpret_cast(args_.get()); + size_t idx = atomic_output_indices_.size(); + arg_base[idx] = reinterpret_cast(tiling_buffer_); + } + return SUCCESS; +} + +Status AtomicOpTask::CalcTilingInfo(optiling::utils::OpRunInfo &run_info) { + auto ret = optiling::OpAtomicCalculateV2(*node_, run_info); + if (ret != GRAPH_SUCCESS) { + GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "[Invoke][OpAtomicCalculate] failed, ret = %u.", ret); + REPORT_INNER_ERROR("E19999", "invoke OpAtomicCalculate failed, ret = %u.", ret); + return ACL_ERROR_GE_INTERNAL_ERROR; + } + return SUCCESS; +} + +Status AtomicOpTask::InitAtomicAddrCleanIndices() { + GELOGD("[%s] Start to setup AtomicAddrClean task.", op_desc_->GetName().c_str()); + std::vector atomic_output_indices; + (void) ge::AttrUtils::GetListInt(op_desc_, ATOMIC_ATTR_OUTPUT_INDEX, atomic_output_indices); + if (atomic_output_indices.empty()) { + GELOGE(INTERNAL_ERROR, "[Check][Size][%s] atomic_output_indices must not be empty.", op_desc_->GetName().c_str()); + REPORT_INNER_ERROR("E19999", "[%s] atomic_output_indices must not be empty.", op_desc_->GetName().c_str()); + return INTERNAL_ERROR; + } + + size_t max_arg_size = tiling_buffer_ == nullptr ? arg_size_ : arg_size_ - 1; + if (atomic_output_indices.size() > max_arg_size) { + GELOGE(INTERNAL_ERROR, "[Check][Size][%s] atomic_output_indices invalid. atomic_output_indices size is %zu," + "arg size is %zu.", op_desc_->GetName().c_str(), atomic_output_indices.size(), arg_size_); + REPORT_INNER_ERROR("E19999", "[%s] atomic_output_indices invalid. atomic_output_indices size is %zu," + "arg size is %zu.", op_desc_->GetName().c_str(), atomic_output_indices.size(), arg_size_); + return INTERNAL_ERROR; + } + + for (auto output_index : atomic_output_indices) { + GELOGD("[%s] Adding output index [%ld]", op_desc_->GetName().c_str(), output_index); + GE_CHECK_GE(output_index, 0); + GE_CHECK_LE(output_index, INT32_MAX); + atomic_output_indices_.emplace_back(static_cast(output_index)); + } + return SUCCESS; +} + AiCpuBaseTask::~AiCpuBaseTask() { if (ext_info_addr_dev_ != nullptr) { (void)rtFree(ext_info_addr_dev_); diff --git a/ge/single_op/task/op_task.h b/ge/single_op/task/op_task.h index 085bb5ff..1e100a11 100644 --- a/ge/single_op/task/op_task.h +++ b/ge/single_op/task/op_task.h @@ -89,6 +89,7 @@ class TbeOpTask : public OpTask { void SetKernelArgs(std::unique_ptr &&args, size_t arg_size, uint32_t block_dim, const OpDescPtr &op_desc); void SetKernelWithHandleArgs(std::unique_ptr &&args, size_t arg_size, uint32_t block_dim, const OpDescPtr &op_desc, const domi::KernelDefWithHandle& kernel_def_with_handle); + void SetAtomicTask(OpTask *task) { atomic_task_.reset(task); } Status UpdateRunInfo() override; Status SetArgIndex(); @@ -100,6 +101,14 @@ class TbeOpTask : public OpTask { const std::string &GetTaskType() const override; void SetHandle(void *handle); + protected: + NodePtr node_; + std::unique_ptr args_; + size_t arg_size_ = 0; + void *tiling_buffer_ = nullptr; + uint32_t max_tiling_size_ = 0; + std::string tiling_data_; + private: friend class SingleOpModel; friend class TbeTaskBuilder; @@ -107,31 +116,46 @@ class TbeOpTask : public OpTask { Status UpdateNodeByShape(const vector &input_desc, const vector &output_desc); Status AllocateWorkspaces(const std::vector &workspace_sizes); - Status UpdateTilingArgs(rtStream_t stream); Status DoLaunchKernel(rtStream_t stream); - Status UpdateIoAddr(const vector &inputs, const vector &outputs); + Status CheckAndExecuteAtomic(const vector &input_desc, + const vector &input_buffers, + vector &output_desc, + vector &output_buffers, + rtStream_t stream); + virtual Status UpdateTilingArgs(rtStream_t stream); + virtual Status UpdateIoAddr(const vector &inputs, const vector &outputs); + virtual Status CalcTilingInfo(optiling::utils::OpRunInfo &run_info); const void *stub_func_ = nullptr; - std::unique_ptr args_; - size_t arg_size_ = 0; void *sm_desc_ = nullptr; std::string stub_name_; - StreamResource *stream_resource_ = nullptr; - void *tiling_buffer_ = nullptr; - uint32_t max_tiling_size_ = 0; - std::string tiling_data_; + std::vector run_info_workspaces_; std::vector workspaces_; - NodePtr node_; uint32_t tiling_key_ = 0; + bool clear_atomic_ = false; void* handle_ = nullptr; std::string original_kernel_key_; std::string node_info_; std::vector arg_index_; // data index in args size_t input_num_; // include const input size_t output_num_; + + std::unique_ptr atomic_task_; +}; + +class AtomicOpTask : public TbeOpTask { + public: + Status InitAtomicAddrCleanIndices(); + + private: + Status UpdateIoAddr(const vector &inputs, const vector &outputs) override; + Status UpdateTilingArgs(rtStream_t stream) override; + Status CalcTilingInfo(optiling::utils::OpRunInfo &run_info) override; + std::vector atomic_output_indices_; + }; class AiCpuBaseTask : public OpTask { diff --git a/ge/single_op/task/tbe_task_builder.cc b/ge/single_op/task/tbe_task_builder.cc index c1bafed8..c5579a01 100644 --- a/ge/single_op/task/tbe_task_builder.cc +++ b/ge/single_op/task/tbe_task_builder.cc @@ -29,15 +29,8 @@ namespace ge { namespace { constexpr char const *kAttrSupportDynamicShape = "support_dynamicshape"; constexpr char const *kAttrOpParamSize = "op_para_size"; +constexpr char const *kAttrAtomicOpParamSize = "atomic_op_para_size"; std::mutex g_reg_mutex; - -inline void GetKernelName(const OpDescPtr &op_desc, std::string &kernel_name) { - (void)AttrUtils::GetStr(op_desc, op_desc->GetName() + "_kernelname", kernel_name); -} - -inline TBEKernelPtr GetTbeKernel(const OpDescPtr &op_desc) { - return op_desc->TryGetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, TBEKernelPtr()); -} } // namespace KernelHolder::KernelHolder(const char *stub_func, std::shared_ptr kernel_bin) @@ -96,7 +89,15 @@ TbeTaskBuilder::TbeTaskBuilder(const std::string &model_name, const NodePtr &nod task_def_(task_def), kernel_def_(task_def.kernel()), kernel_def_with_handle_(task_def.kernel_with_handle()), - stub_name_(model_name + "/" + node->GetName() + "_tvmbin") {} + model_name_(model_name) {} + +TBEKernelPtr TbeTaskBuilder::GetTbeKernel(const OpDescPtr &op_desc) const { + return op_desc->TryGetExtAttr(OP_EXTATTR_NAME_TBE_KERNEL, TBEKernelPtr()); +} + +void TbeTaskBuilder::GetKernelName(const OpDescPtr &op_desc, std::string &kernel_name) const { + (void)AttrUtils::GetStr(op_desc, op_desc->GetName() + "_kernelname", kernel_name); +} Status TbeTaskBuilder::DoRegisterBinary(const OpKernelBin &kernel_bin, void **bin_handle, const SingleOpModelParam ¶m) const { @@ -124,7 +125,7 @@ Status TbeTaskBuilder::DoRegisterBinary(const OpKernelBin &kernel_bin, void **bi Status TbeTaskBuilder::DoRegisterMeta(void *bin_handle) { std::string meta_data; - (void)AttrUtils::GetStr(op_desc_, TVM_ATTR_NAME_METADATA, meta_data); + (void)AttrUtils::GetStr(op_desc_, GetKeyForTvmMetaData(), meta_data); GELOGI("TBE: meta data: %s", meta_data.empty() ? "null" : meta_data.c_str()); if (!meta_data.empty()) { auto rt_ret = rtMetadataRegister(bin_handle, meta_data.c_str()); @@ -307,6 +308,15 @@ Status TbeTaskBuilder::GetSmDesc(void **sm_desc, const SingleOpModelParam ¶m return SUCCESS; } +Status TbeTaskBuilder::InitKernelArgs(void *arg_addr, size_t arg_size, const SingleOpModelParam ¶m) { + // copy args + std::vector tensor_device_addr_vec = BuildTaskUtils::GetKernelArgs(op_desc_, param); + void *src_addr = reinterpret_cast(tensor_device_addr_vec.data()); + uint64_t src_len = sizeof(void *) * tensor_device_addr_vec.size(); + GE_CHK_RT_RET(rtMemcpy(arg_addr, arg_size, src_addr, src_len, RT_MEMCPY_HOST_TO_HOST)); + return SUCCESS; +} + Status TbeTaskBuilder::SetKernelArgs(TbeOpTask &task, const SingleOpModelParam ¶m, const OpDescPtr &op_desc) { auto task_type = static_cast(task_def_.type()); bool is_task_all_kernel = (task_type == RT_MODEL_TASK_ALL_KERNEL); @@ -331,12 +341,7 @@ Status TbeTaskBuilder::SetKernelArgs(TbeOpTask &task, const SingleOpModelParam & kernel_def_with_handle_.context() : kernel_def_.context(); const auto *args_offset_tmp = reinterpret_cast(context.args_offset().data()); uint16_t offset = *args_offset_tmp; - - // copy args - std::vector tensor_device_addr_vec = BuildTaskUtils::GetKernelArgs(op_desc_, param); - void *src_addr = reinterpret_cast(tensor_device_addr_vec.data()); - uint64_t src_len = sizeof(void *) * tensor_device_addr_vec.size(); - GE_CHK_RT_RET(rtMemcpy(args.get() + offset, arg_size - offset, src_addr, src_len, RT_MEMCPY_HOST_TO_HOST)); + GE_CHK_STATUS_RET_NOLOG(InitKernelArgs(args.get() + offset, arg_size - offset, param)); if (is_task_all_kernel) { task.SetKernelWithHandleArgs(std::move(args), arg_size, kernel_def_with_handle_.block_dim(), op_desc, @@ -367,8 +372,15 @@ Status TbeTaskBuilder::BuildTask(TbeOpTask &task, const SingleOpModelParam ¶ } auto task_type = static_cast(task_def_.type()); - ret = task_type == RT_MODEL_TASK_ALL_KERNEL ? RegisterKernelWithHandle(task, param) : - RegisterKernel(task, param); + if (task_type == RT_MODEL_TASK_ALL_KERNEL) { + stub_name_ = model_name_ + "/" + node_->GetName() + "_tvmbin"; + ret = RegisterKernelWithHandle(task, param); + } else { + const domi::KernelDef &kernel_def = task_def_.kernel(); + stub_name_ = model_name_ + "/" + kernel_def.stub_func() + "_tvmbin"; + ret = RegisterKernel(task, param); + } + task.SetHandle(handle_); if (ret != SUCCESS) { return ret; @@ -397,8 +409,8 @@ Status TbeTaskBuilder::BuildTask(TbeOpTask &task, const SingleOpModelParam ¶ Status TbeTaskBuilder::InitTilingInfo(TbeOpTask &task) { GELOGD("Start alloc tiling data of node %s.", op_desc_->GetName().c_str()); int64_t max_size = -1; - (void)AttrUtils::GetInt(op_desc_, kAttrOpParamSize, max_size); - GELOGD("Got op param size by key: %s, ret = %ld", kAttrOpParamSize, max_size); + (void)AttrUtils::GetInt(op_desc_, GetKeyForOpParamSize(), max_size); + GELOGD("Got op param size by key: %s, ret = %ld", GetKeyForOpParamSize().c_str(), max_size); if (max_size < 0) { GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Get][Int] %s Invalid op_param_size: %ld.", op_desc_->GetName().c_str(), max_size); @@ -439,4 +451,32 @@ Status TbeTaskBuilder::GetMagic(uint32_t &magic) const { return SUCCESS; } +std::string TbeTaskBuilder::GetKeyForOpParamSize() const { + return kAttrOpParamSize; +} + +std::string TbeTaskBuilder::GetKeyForTvmMetaData() const { + return TVM_ATTR_NAME_METADATA; +} + +Status AtomicTaskBuilder::InitKernelArgs(void *args_addr, size_t arg_size, const SingleOpModelParam ¶m) { + return SUCCESS; +} + +std::string AtomicTaskBuilder::GetKeyForOpParamSize() const { + return kAttrAtomicOpParamSize; +} + +std::string AtomicTaskBuilder::GetKeyForTvmMetaData() const { + return ATOMIC_ATTR_TVM_METADATA; +} + +void AtomicTaskBuilder::GetKernelName(const OpDescPtr &op_desc, std::string &kernel_name) const { + (void)AttrUtils::GetStr(op_desc, op_desc->GetName() + "_atomic_kernelname", kernel_name); +} + +TBEKernelPtr AtomicTaskBuilder::GetTbeKernel(const OpDescPtr &op_desc) const { + return op_desc->TryGetExtAttr(EXT_ATTR_ATOMIC_TBE_KERNEL, TBEKernelPtr()); +} + } // namespace ge diff --git a/ge/single_op/task/tbe_task_builder.h b/ge/single_op/task/tbe_task_builder.h index 6252feea..833ab0e0 100755 --- a/ge/single_op/task/tbe_task_builder.h +++ b/ge/single_op/task/tbe_task_builder.h @@ -90,10 +90,17 @@ class HandleRegistry { class TbeTaskBuilder { public: TbeTaskBuilder(const std::string &model_name, const NodePtr &node, const domi::TaskDef &task_def); - ~TbeTaskBuilder() = default; + virtual ~TbeTaskBuilder() = default; Status BuildTask(TbeOpTask &task, const SingleOpModelParam ¶m); + protected: + virtual std::string GetKeyForOpParamSize() const; + virtual std::string GetKeyForTvmMetaData() const; + virtual TBEKernelPtr GetTbeKernel(const OpDescPtr &op_desc) const; + virtual void GetKernelName(const OpDescPtr &op_desc, std::string &kernel_name) const; + virtual Status InitKernelArgs(void *args_addr, size_t arg_size, const SingleOpModelParam ¶m); + private: Status InitTilingInfo(TbeOpTask &task); Status SetKernelArgs(TbeOpTask &task, const SingleOpModelParam ¶m, const OpDescPtr &op_desc); @@ -114,9 +121,24 @@ class TbeTaskBuilder { const domi::TaskDef &task_def_; const domi::KernelDef &kernel_def_; const domi::KernelDefWithHandle &kernel_def_with_handle_; - const std::string stub_name_; + const std::string model_name_; + std::string stub_name_; void *handle_ = nullptr; }; + +class AtomicTaskBuilder : public TbeTaskBuilder { + public: + AtomicTaskBuilder(const std::string &model_name, const NodePtr &node, const domi::TaskDef &task_def) + : TbeTaskBuilder(model_name, node, task_def) {} + ~AtomicTaskBuilder() override = default; + + protected: + std::string GetKeyForOpParamSize() const override; + std::string GetKeyForTvmMetaData() const override; + TBEKernelPtr GetTbeKernel(const OpDescPtr &op_desc) const override; + void GetKernelName(const OpDescPtr &op_desc, std::string &kernel_name) const override; + Status InitKernelArgs(void *args_addr, size_t arg_size, const SingleOpModelParam ¶m) override; +}; } // namespace ge #endif // GE_SINGLE_OP_TASK_TBE_TASK_BUILDER_H_ diff --git a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc index d1c51c67..1d1c4fa9 100644 --- a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc +++ b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc @@ -153,7 +153,6 @@ TEST_F(UtestGeHybrid, task_update_tiling_info) { ge::AttrUtils::SetStr(op_desc, "compile_info_json", "json"); ge::AttrUtils::SetBool(op_desc, "support_dynamicshape", true); ge::AttrUtils::SetInt(op_desc, "op_para_size", 1); - ge::AttrUtils::SetStr(op_desc, TVM_ATTR_NAME_MAGIC, "RT_DEV_BINARY_MAGIC_ELF"); auto node = graph->AddNode(op_desc); std::unique_ptr node_item; diff --git a/tests/ut/ge/single_op/single_op_model_unittest.cc b/tests/ut/ge/single_op/single_op_model_unittest.cc index 23269814..ded1b465 100644 --- a/tests/ut/ge/single_op/single_op_model_unittest.cc +++ b/tests/ut/ge/single_op/single_op_model_unittest.cc @@ -40,6 +40,9 @@ using namespace ge; namespace { constexpr char const *kAttrSupportDynamicShape = "support_dynamicshape"; +const char *const kEngineNameAiCore = "AIcoreEngine"; +const char *const kEngineNameAiCpu = "aicpu_ascend_kernel"; +const char *const kEngineNameAiCpuTf = "aicpu_tf_kernel"; } // namespace class UtestSingleOpModel : public testing::Test { @@ -222,6 +225,7 @@ TEST_F(UtestSingleOpModel, test_build_dynamic_op) { auto graph = GraphUtils::CreateGraphFromComputeGraph(compute_graph); model.model_helper_.model_->SetGraph(graph); + model.op_list_[0] = transdata; auto op_desc = transdata->GetOpDesc(); const vector depend_names = { "Data" }; @@ -330,7 +334,10 @@ TEST_F(UtestSingleOpModel, build_dynamic_task) { domi::TaskDef *task_def3 = model_task_def->add_task(); task_def3->set_type(RT_MODEL_TASK_ALL_KERNEL); - string model_data_str = "123456789"; + domi::TaskDef *task_def4 = model_task_def->add_task(); + task_def4->set_type(RT_MODEL_TASK_KERNEL); + + string model_data_str = "dynamic_model"; SingleOpModel model("model", model_data_str.c_str(), model_data_str.size()); std::mutex stream_mu; rtStream_t stream = nullptr; @@ -339,6 +346,7 @@ TEST_F(UtestSingleOpModel, build_dynamic_task) { model.model_helper_.model_ = ge_model; auto op_desc = std::make_shared("add", "Add"); AttrUtils::SetStr(op_desc, TVM_ATTR_NAME_MAGIC, "RT_DEV_BINARY_MAGIC_ELF"); + AttrUtils::SetBool(op_desc, kAttrSupportDynamicShape, true); std::vector kernelBin; TBEKernelPtr tbe_kernel = std::make_shared("name/Add", std::move(kernelBin)); op_desc->SetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel); @@ -347,9 +355,15 @@ TEST_F(UtestSingleOpModel, build_dynamic_task) { StreamResource *res = new (std::nothrow) StreamResource(1); ASSERT_EQ(model.ParseTasks(), SUCCESS); + model.node_tasks_[node] = { *task_def3, *task_def4 }; + op_desc->SetOpKernelLibName(kEngineNameAiCore); + model.BuildTaskListForDynamicOp(res, single_op); + + model.node_tasks_[node] = { *task_def }; + op_desc->SetOpKernelLibName(kEngineNameAiCpuTf); ASSERT_EQ(model.BuildTaskListForDynamicOp(res, single_op), SUCCESS); - model.tbe_tasks_.clear(); - ASSERT_EQ(model.BuildTaskListForDynamicOp(res, single_op), SUCCESS); - model.aicpu_tasks_[0] = *task_def2; + + model.node_tasks_[node] = { *task_def2 }; + op_desc->SetOpKernelLibName(kEngineNameAiCpu); model.BuildTaskListForDynamicOp(res, single_op); } diff --git a/tests/ut/ge/single_op/single_op_task_unittest.cc b/tests/ut/ge/single_op/single_op_task_unittest.cc index 2424d209..51ef928f 100644 --- a/tests/ut/ge/single_op/single_op_task_unittest.cc +++ b/tests/ut/ge/single_op/single_op_task_unittest.cc @@ -154,3 +154,30 @@ TEST_F(UtestSingleOpTask, test_update_ioaddr) { task.tiling_buffer_ = nullptr; } +TEST_F(UtestSingleOpTask, test_atomic_exec) { + auto graph = make_shared("graph"); + auto op_desc = make_shared("Add", "Add"); + auto node = graph->AddNode(op_desc); + + AtomicOpTask task; + task.op_desc_ = op_desc; + task.node_ = node; + + vector inputs; + vector outputs; + task.atomic_output_indices_ = { 0 }; + task.arg_size_ = sizeof(void *) * 2; + task.args_.reset(new (std::nothrow) uint8_t[task.arg_size_]); + ASSERT_EQ(task.UpdateIoAddr(inputs, outputs), ACL_ERROR_GE_PARAM_INVALID); + + ge::DataBuffer data_buffer; + outputs = { data_buffer }; + ASSERT_EQ(task.UpdateIoAddr(inputs, outputs), SUCCESS); + + task.tiling_buffer_ = (void *)0x0001; + ASSERT_EQ(task.UpdateTilingArgs(nullptr), SUCCESS); + task.tiling_buffer_ = nullptr; + + optiling::utils::OpRunInfo run_info(0, true, 0); + task.CalcTilingInfo(run_info); +} From 957c7fad6b5ba0b6fd24999b1e59cccf4ac81073 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Mon, 5 Jul 2021 21:23:03 +0800 Subject: [PATCH 157/226] submodule update --- metadef | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metadef b/metadef index 3e14f92d..2f89122e 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit 3e14f92d47abc9a2e703be2171f047553f7597e0 +Subproject commit 2f89122e1fa26b3633a8efa4bf0a0269bebf537e From 8e6c104db91e4875db542725326e3dd09ed7d419 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Mon, 5 Jul 2021 21:53:27 +0800 Subject: [PATCH 158/226] Fix ut. --- ge/single_op/single_op_model.cc | 2 +- tests/ut/ge/single_op/single_op_model_unittest.cc | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/ge/single_op/single_op_model.cc b/ge/single_op/single_op_model.cc index f8831884..a5547b39 100755 --- a/ge/single_op/single_op_model.cc +++ b/ge/single_op/single_op_model.cc @@ -596,7 +596,7 @@ Status SingleOpModel::BuildTaskListForDynamicOp(StreamResource *stream_resource, const auto &atomic_task_def = task_defs.front(); AtomicOpTask *atomic_task = nullptr; GE_CHK_STATUS_RET_NOLOG(BuildAtomicTask(atomic_task_def, &atomic_task)); - atomic_task->InitAtomicAddrCleanIndices(); + GE_CHK_STATUS_RET_NOLOG(atomic_task->InitAtomicAddrCleanIndices()); tbe_task->SetAtomicTask(atomic_task); } single_op.op_task_.reset(tbe_task); diff --git a/tests/ut/ge/single_op/single_op_model_unittest.cc b/tests/ut/ge/single_op/single_op_model_unittest.cc index ded1b465..7b7a05d8 100644 --- a/tests/ut/ge/single_op/single_op_model_unittest.cc +++ b/tests/ut/ge/single_op/single_op_model_unittest.cc @@ -346,7 +346,6 @@ TEST_F(UtestSingleOpModel, build_dynamic_task) { model.model_helper_.model_ = ge_model; auto op_desc = std::make_shared("add", "Add"); AttrUtils::SetStr(op_desc, TVM_ATTR_NAME_MAGIC, "RT_DEV_BINARY_MAGIC_ELF"); - AttrUtils::SetBool(op_desc, kAttrSupportDynamicShape, true); std::vector kernelBin; TBEKernelPtr tbe_kernel = std::make_shared("name/Add", std::move(kernelBin)); op_desc->SetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel); From 5473a654210a312599abb2bee47b8847e311566b Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Mon, 5 Jul 2021 22:08:22 +0800 Subject: [PATCH 159/226] Split ge_executor from ge_compiler --- ge/CMakeLists.txt | 2 + ge/common/executor.h | 89 +++ ge/graph/common/local_context.cc | 17 +- ge/graph/common/local_context.h | 17 + ge/graph/execute/graph_execute.cc | 11 - ge/graph/execute/graph_execute.h | 4 - ge/graph/execute/model_executor.cc | 553 ++++++++++++++++ ge/graph/execute/model_executor.h | 139 ++++ ge/graph/load/model_manager/model_manager.cc | 9 +- ge/graph/manager/graph_manager.cc | 626 ++---------------- ge/graph/manager/graph_manager.h | 63 +- ge/graph/manager/graph_manager_utils.h | 6 + ge/session/inner_session.cc | 44 +- ge/session/inner_session.h | 5 + ge/single_op/task/op_task.h | 2 +- tests/ut/ge/CMakeLists.txt | 5 +- .../ge/common/datatype_transfer_unittest.cc | 22 +- .../graph/execute/model_executor_unittest.cc | 327 +++++++++ .../graph/manager/graph_manager_unittest.cc | 124 ++-- .../gather_v2_kernel_unittest.cc | 32 +- .../mark_node_unknown_shape_pass_unittest.cc | 2 +- .../passes/multi_batch_clone_pass_unittest.cc | 2 +- .../subgraph_const_migration_pass_unittest.cc | 2 +- 23 files changed, 1362 insertions(+), 741 deletions(-) create mode 100644 ge/common/executor.h create mode 100644 ge/graph/execute/model_executor.cc create mode 100644 ge/graph/execute/model_executor.h create mode 100644 tests/ut/ge/graph/execute/model_executor_unittest.cc diff --git a/ge/CMakeLists.txt b/ge/CMakeLists.txt index 4a296e87..8fcf97ef 100755 --- a/ge/CMakeLists.txt +++ b/ge/CMakeLists.txt @@ -373,6 +373,7 @@ set(TRAIN_SRC_LIST "opskernel_manager/ops_kernel_builder_manager.cc" "session/inner_session.cc" "session/session_manager.cc" + "graph/execute/model_executor.cc" "single_op/single_op.cc" "single_op/single_op_manager.cc" "single_op/single_op_model.cc" @@ -475,6 +476,7 @@ set(INFER_SRC_LIST "init/gelib.cc" "session/inner_session.cc" "session/session_manager.cc" + "graph/execute/model_executor.cc" "engine_manager/dnnengine_manager.cc" "opskernel_manager/ops_kernel_manager.cc" "opskernel_manager/ops_kernel_builder_manager.cc" diff --git a/ge/common/executor.h b/ge/common/executor.h new file mode 100644 index 00000000..7f1d7ef9 --- /dev/null +++ b/ge/common/executor.h @@ -0,0 +1,89 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef GE_COMMON_EXECUTOR_H +#define GE_COMMON_EXECUTOR_H + +#include "external/ge/ge_api_types.h" +#include "graph/ge_local_context.h" +#include "graph/manager/graph_manager_utils.h" + +namespace ge { +struct RunArgs { + GraphNodePtr graph_node; + GraphId graph_id; + uint64_t session_id; + struct error_message::Context error_context; + std::vector input_tensor; + GeRootModelPtr ge_root_model; + GEThreadLocalContext context; + RunAsyncCallback callback; +}; + +class Executor { + public: + /// + /// @ingroup ge + /// @brief Load mode from graph. + /// @param [in] GeRootModel: root model of graph compiled. + /// @param [in] GraphNode: node of graph. + /// @return Status result of function + /// + virtual Status LoadGraph(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node) = 0; + + /// + /// @ingroup ge + /// @brief Unload mode. + /// @param [in] GeRootModel: root model of graph compiled. + /// @param [in] graph_id: graph identifier. + /// @return Status result of function + /// + virtual Status UnloadGraph(const GeRootModelPtr &ge_root_model, uint32_t graph_id) = 0; + + /// + /// @ingroup ge + /// @brief Push model execution params to queue. + /// @param [in] RunArgs of for model execution. + /// @return Status result of function + /// + virtual Status PushGraph(const RunArgs &args) = 0; + + /// + /// @ingroup ge + /// @brief Run graph for synchronize model. + /// @param [in] graph_node: node of graph. + /// @param [in] graph_id: graph identifier. + /// @param [in] inputs: input data for the graph running. + /// @param [out] outputs: output data of the graph running + /// @return Status result of function + /// + virtual Status RunGraph(const GraphNodePtr &graph_node, GraphId graph_id, + const std::vector &inputs, std::vector &outputs) = 0; + + /// + /// @ingroup ge + /// @brief Run graph for NN synchronize model. + /// @param [in] graph_node: node of graph. + /// @param [in] graph_id: graph identifier. + /// @param [in] stream: Stream for model running. + /// @param [in] inputs: input data for the graph running. + /// @param [out] outputs: output data of the graph running + /// @return Status result of function + /// + virtual Status RunGraphWithStream(const GraphNodePtr &graph_node, GraphId graph_id, rtStream_t stream, + const std::vector &inputs, std::vector &outputs) = 0; +}; +} +#endif // GE_COMMON_EXECUTOR_H diff --git a/ge/graph/common/local_context.cc b/ge/graph/common/local_context.cc index fa2f78e0..bd747021 100644 --- a/ge/graph/common/local_context.cc +++ b/ge/graph/common/local_context.cc @@ -16,13 +16,12 @@ #include "graph/common/local_context.h" -#include "framework/common/ge_inner_error_codes.h" #include "framework/common/debug/ge_log.h" -#include "framework/omg/omg_inner_types.h" namespace ge { namespace { thread_local OmgContext *omg_context = nullptr; +thread_local OmeContext *ome_context = nullptr; } void SetLocalOmgContext(OmgContext &context) { @@ -37,4 +36,18 @@ OmgContext &GetLocalOmgContext() { return domi::GetContext(); } } + +void SetLocalOmeContext(OmeContext &context) { + ome_context = &context; +} + +OmeContext &GetLocalOmeContext() { + if (ome_context != nullptr) { + return *ome_context; + } + + GELOGW("ome_context is nullptr."); + static OmeContext context; + return context; +} } diff --git a/ge/graph/common/local_context.h b/ge/graph/common/local_context.h index 4aa95855..751c6692 100644 --- a/ge/graph/common/local_context.h +++ b/ge/graph/common/local_context.h @@ -22,5 +22,22 @@ namespace ge { void SetLocalOmgContext(OmgContext &context); OmgContext &GetLocalOmgContext(); + + +struct OmeContext { + bool need_multi_batch = false; + std::string dynamic_node_type; + std::vector data_nodes; + std::vector getnext_nosink_nodes; + std::vector dynamic_shape_dims; + std::vector>> user_input_dims; + std::vector> user_real_input_dims; +}; + +GE_FUNC_VISIBILITY +void SetLocalOmeContext(OmeContext &context); + +GE_FUNC_VISIBILITY +OmeContext &GetLocalOmeContext(); } // namespace ge #endif // GE_GRAPH_COMMON_LOCAL_CONTEXT_H_ diff --git a/ge/graph/execute/graph_execute.cc b/ge/graph/execute/graph_execute.cc index 02d7d3ca..ba35e7c0 100755 --- a/ge/graph/execute/graph_execute.cc +++ b/ge/graph/execute/graph_execute.cc @@ -31,7 +31,6 @@ GraphExecutor::GraphExecutor() sync_run_mutex_(nullptr), condition_(nullptr), graph_run_listener_(nullptr), - graph_context_(nullptr), last_graph_id_(UINT32_MAX), malloc_flag_(false) {} @@ -79,16 +78,6 @@ Status GraphExecutor::SetCondition(std::mutex *mutex, std::condition_variable *c return SUCCESS; } -Status GraphExecutor::SetGraphContext(GraphContextPtr graph_context_ptr) { - if (graph_context_ptr == nullptr) { - REPORT_INNER_ERROR("E19999", "Check param graph_context_ptr nullptr"); - GELOGE(GE_GRAPH_PARAM_NULLPTR, "[Check][Param] input param graph_context_ptr is nullptr"); - return GE_GRAPH_PARAM_NULLPTR; - } - graph_context_ = graph_context_ptr; - return SUCCESS; -} - Status GraphExecutor::SetDynamicSize(uint32_t model_id, const std::vector &batch_num, int32_t dynamic_type) { auto model_manager = ge::ModelManager::GetInstance(); GE_CHECK_NOTNULL(model_manager); diff --git a/ge/graph/execute/graph_execute.h b/ge/graph/execute/graph_execute.h index 879a124c..b6d56dff 100755 --- a/ge/graph/execute/graph_execute.h +++ b/ge/graph/execute/graph_execute.h @@ -60,8 +60,6 @@ class GraphExecutor { Status SetCondition(std::mutex *mutex, std::condition_variable *cond, std::shared_ptr listener); - Status SetGraphContext(GraphContextPtr graph_context_ptr); - static Status SetDynamicSize(uint32_t model_id, const std::vector &batch_num, int32_t dynamic_type); void SetTrainFlag(bool is_train_graph); @@ -160,8 +158,6 @@ class GraphExecutor { // Run graph asynchronous call back listener std::shared_ptr graph_run_listener_; - GraphContextPtr graph_context_; - std::vector outputs_desc_; GraphId last_graph_id_; diff --git a/ge/graph/execute/model_executor.cc b/ge/graph/execute/model_executor.cc new file mode 100644 index 00000000..50e8a5a5 --- /dev/null +++ b/ge/graph/execute/model_executor.cc @@ -0,0 +1,553 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "graph/execute/model_executor.h" + +#include "graph/ge_context.h" +#include "graph/debug/ge_attr_define.h" +#include "graph/common/ge_call_wrapper.h" +#include "graph/common/local_context.h" +#include "graph/manager/graph_var_manager.h" +#include "graph/utils/tensor_adapter.h" +#include "graph/load/graph_loader.h" +#include "common/math/math_util.h" +#include "common/formats/utils/formats_trans_utils.h" + +namespace { +constexpr int32_t kBase = 10; +constexpr uint8_t kNeverLoaded = 0; +} + +namespace ge { +/// +/// @ingroup ge +/// @brief graph executor init +/// @param [in] options user config params +/// @return Status result of function +/// +Status ModelExecutor::Initialize(const map &options) { + graph_run_listener_ = MakeShared(sync_run_mutex_, condition_); + if (graph_run_listener_ == nullptr) { + REPORT_CALL_ERROR("E19999", "New GraphModelListener fail"); + GELOGE(MEMALLOC_FAILED, "[New][GraphModelListener] failed"); + return MEMALLOC_FAILED; + } + + train_graph_flag_ = ParseTrainGraphFlag(); + thread_run_flag_.store(true); + run_thread_ = std::thread(&ModelExecutor::RunThread, this); + + init_flag_ = true; + return SUCCESS; +} + +/// +/// @ingroup ge +/// @brief graph executor finalize +/// @return Status result of function +/// +Status ModelExecutor::Finalize() { + if (!init_flag_) { + GELOGW("ModelExecutor has not been initialized."); + return SUCCESS; + } + + StopQueue(); + if (run_thread_.joinable()) { + run_thread_.join(); + } + + if (graph_executor_.FreeExecuteMemory() != SUCCESS) { + GELOGW("Graph executor FreeExecuteMemory failed, resources may not be released correctly."); + } + + return SUCCESS; +} + +// OPTION_GRAPH_RUN_MODE is supposed to be a session-level option, but it used to be set to global-level in the past. +// If can not parse from session, it can parse from global by GetContext(). +bool ModelExecutor::ParseTrainGraphFlag() { + string run_mode; + if (GetContext().GetOption(OPTION_GRAPH_RUN_MODE, run_mode) == SUCCESS && !run_mode.empty()) { + if (GraphRunMode(std::strtol(run_mode.c_str(), nullptr, kBase)) >= TRAIN) { + GELOGI("Graph train flag set."); + return true; + } + } + return false; +} + +void ModelExecutor::AddGraphNode(GraphId graph_id, const GraphNodePtr &graph_node) { + std::lock_guard lock(mutex_); + graph_nodes_.emplace(graph_id, graph_node); +} + +void ModelExecutor::RemoveGraphNode(GraphId graph_id) { + std::lock_guard lock(mutex_); + graph_nodes_.erase(graph_id); +} + +/// +/// @ingroup ge +/// @brief Load mode for graph. +/// @param [in] GeRootModel: root model of graph compiled. +/// @param [in] GraphNode: node of graph. +/// @return Status result of function +/// +Status ModelExecutor::LoadGraph(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node) { + GE_CHECK_NOTNULL(graph_node); + if (ge_root_model == nullptr) { + return SUCCESS; + } + + UpdateLocalOmeContext(graph_node); + return graph_node->IsAsync() ? ModelLoadAsync(ge_root_model, graph_node) : ModelLoadSync(ge_root_model, graph_node); +} + +/// +/// @ingroup ge +/// @brief Unload mode for graph. +/// @param [in] GeRootModel: root model of graph compiled. +/// @param [in] graph_id: graph identifier. +/// @return Status result of function +/// +Status ModelExecutor::UnloadGraph(const GeRootModelPtr &ge_root_model, uint32_t graph_id) { + GE_CHECK_NOTNULL(ge_root_model); + rtError_t rt_ret = rtSetDevice(GetContext().DeviceId()); + if (rt_ret != RT_ERROR_NONE) { + GELOGW("[GraphExecutor] rtSetDevice failed, modelId=%u, graphId=%u.", ge_root_model->GetModelId(), graph_id); + return FAILED; + } + + RemoveGraphNode(graph_id); + Status ret = UnloadModel(ge_root_model, graph_id); + if (ret != SUCCESS) { + GELOGW("[GraphExecutor] unload model failed, graph_id=%u.", graph_id); + } + rt_ret = rtDeviceReset(GetContext().DeviceId()); + if (rt_ret != RT_ERROR_NONE) { + GELOGW("[GraphExecutor] rtDeviceReset failed, graphId=%u.", graph_id); + } + + return ret; +} + +Status ModelExecutor::UnloadModel(const GeRootModelPtr &ge_root_model, uint32_t graph_id) { + GE_CHECK_NOTNULL(ge_root_model); + for (size_t i = 0; i < ge_root_model->GetAllModelId().size(); ++i) { + uint32_t model_id = ge_root_model->GetAllModelId()[i]; + GELOGI("Unload model %u.", model_id); + Status ret = GraphLoader::UnloadModel(model_id); + if (ret != SUCCESS) { + GELOGE(ret, "[GraphExecutor] unload model failed, modelId=%u, graphId=%u.", model_id, graph_id); + return ret; + } + } + return SUCCESS; +} + +void ModelExecutor::StopQueue() { + thread_run_flag_.store(false); + run_args_q_.Stop(); +} + +void ModelExecutor::ReturnError(RunAsyncCallback callback, Status ret, const string &log) { + StopQueue(); + GELOGE(ret, "%s.", log.c_str()); + std::vector outputs; + callback(ret, outputs); +} + +void ModelExecutor::UpdateLocalOmeContext(const GraphNodePtr &graph_node) { + std::lock_guard lock(mutex_); + SetLocalOmeContext(graph_node->GetOmeContext()); +} + +/// +/// @ingroup ge +/// @brief Push model execution params to queue. +/// @param [in] RunArgs of for model execution. +/// @return Status result of function +/// +Status ModelExecutor::PushGraph(const RunArgs &args) { + return run_args_q_.Push(args) ? SUCCESS : FAILED; +} + +void ModelExecutor::RunThread() { + ErrorManager::GetInstance().SetStage(error_message::kModelExecute, error_message::kModelExecute); + if (prctl(PR_SET_NAME, ("GE_Run")) != 0) { + GELOGW("Set thread name failed."); + } + + RunArgs args; + while (thread_run_flag_) { + if (!run_args_q_.Pop(args)) { + continue; + } + + GELOGI("[RunThread] A new loop start, graph_id:%u.", args.graph_id); + ErrorManager::GetInstance().SetErrorContext(args.error_context); + GetContext().SetSessionId(args.session_id); + GetThreadLocalContext() = args.context; + UpdateLocalOmeContext(args.graph_node); + + // parse inputs.dims to vector> dynamic_dims + Status ret = ParseInputsDims(args.input_tensor); + if (ret != SUCCESS) { + ReturnError(args.callback, ret, "ParseInputsDims failed, thread exit."); + args.graph_node->Unlock(); + return; + } + + args.graph_node->UpdateLoadFlag(); + if (!args.graph_node->GetLoadFlag()) { + ErrorManager::GetInstance().SetStage(error_message::kModelLoad, error_message::kModelLoad); + args.ge_root_model->SetTrainFlag(train_graph_flag_); + ret = ModelLoadAsync(args.ge_root_model, args.graph_node); + if (ret != SUCCESS || args.ge_root_model == nullptr) { + StopQueue(); + ReturnError(args.callback, ret, "LoadGraphAsync failed, thread exit."); + args.graph_node->Unlock(); + return; + } + // control the times of graph loading in multi-thread scenario + args.graph_node->DecreaseLoadCount(); + args.graph_node->IncreaseLoadRecord(); + + args.graph_node->SetLoadFlag(true); + GELOGI("LoadGraph[%u], model[%u] success and set LoadFlag to true.", args.graph_node->GetGraphId(), + args.ge_root_model->GetModelId()); + } + + ErrorManager::GetInstance().SetStage(error_message::kModelExecute, error_message::kModelExecute); + if (train_graph_flag_) { + graph_executor_.SetTrainFlag(train_graph_flag_); + } + + ret = graph_executor_.ExecuteGraphAsync(args.graph_id, args.graph_node->GetGeRootModel(), + args.input_tensor, args.callback); + args.graph_node->SetRunFlag(false); + if (ret != SUCCESS) { + ReturnError(args.callback, ret, "ExecuteGraphAsync failed, thread exit."); + args.graph_node->Unlock(); + return; + } + args.graph_node->Unlock(); + GELOGI("[GraphExecutor] Run graph async success, graph_id=%u.", args.graph_id); + } +} + +/// +/// @ingroup ge +/// @brief Run graph for synchronize model. +/// @param [in] graph_node: node of graph. +/// @param [in] graph_id: graph identifier. +/// @param [in] inputs: input data for the graph running. +/// @param [out] outputs: output data of the graph running +/// @return Status result of function +/// +Status ModelExecutor::RunGraph(const GraphNodePtr &graph_node, GraphId graph_id, + const std::vector &inputs, std::vector &outputs) { + Status ret = graph_executor_.SetCondition(&sync_run_mutex_, &condition_, graph_run_listener_); + if (ret != SUCCESS) { + GELOGE(GE_GRAPH_RUNGRAPH_FAILED, "[Set][Condition] failed, graph_id = %u.", graph_id); + graph_node->SetRunFlag(false); + return GE_GRAPH_RUNGRAPH_FAILED; + } + + if (train_graph_flag_) { + graph_executor_.SetTrainFlag(train_graph_flag_); + } + ret = graph_executor_.ExecuteGraph(graph_id, graph_node->GetGeRootModel(), inputs, outputs); + + graph_node->SetRunFlag(false); + if (ret != SUCCESS) { + GELOGE(ret, "[Execute][Graph] failed, graph_id = %u.", graph_id); + return ret; + } + return SUCCESS; +} + +/// +/// @ingroup ge +/// @brief Run graph for NN synchronize model. +/// @param [in] graph_node: node of graph. +/// @param [in] graph_id: graph identifier. +/// @param [in] stream: Stream for model running. +/// @param [in] inputs: input data for the graph running. +/// @param [out] outputs: output data of the graph running +/// @return Status result of function +/// +Status ModelExecutor::RunGraphWithStream(const GraphNodePtr &graph_node, GraphId graph_id, rtStream_t stream, + const std::vector &inputs, std::vector &outputs) { + auto ret = graph_executor_.SetCondition(&sync_run_mutex_, &condition_, graph_run_listener_); + if (ret != SUCCESS) { + GELOGE(GE_GRAPH_RUNGRAPH_FAILED, "[Set][Condition] failed, graph id = %u, stream = %p.", graph_id, stream); + graph_node->SetRunFlag(false); + return GE_GRAPH_RUNGRAPH_FAILED; + } + + ret = graph_executor_.ExecuteGraphWithStream(graph_id, stream, graph_node->GetGeRootModel(), inputs, outputs); + graph_node->SetRunFlag(false); + graph_node->SetIsSpecificStream(false); + if (ret != SUCCESS) { + GELOGE(ret, "[Execute][Graph] With Stream failed, graph id = %u, stream = %p.", graph_id, stream); + return ret; + } + GELOGI("[Run][GraphWithStreamAsync] run graph success, graph id = %u, stream = %p.", graph_id, stream); + return SUCCESS; +} + +Status ModelExecutor::ModelLoadSync(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node) { + ge_root_model->SetIsSpecificStream(graph_node->IsSpecificStream()); + return ModelLoad(ge_root_model, graph_node, graph_run_listener_); +} + +Status ModelExecutor::ModelLoadAsync(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node) { + auto listener = MakeShared(); + GE_CHECK_NOTNULL(listener); + return ModelLoad(ge_root_model, graph_node, listener); +} + +Status ModelExecutor::ModelLoad(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node, + const std::shared_ptr &listener) { + ge_root_model->SetTrainFlag(train_graph_flag_); + bool is_unknown_shape = false; + GE_CHK_STATUS_RET(ge_root_model->CheckIsUnknownShape(is_unknown_shape)); + if (!is_unknown_shape) { + if (getenv(kEnvGeuseStaticMemory) != nullptr) { + GELOGI("[LoadGraph] GE_USE_STATIC_MEMORY is seted."); + } else { + auto root_graph = ge_root_model->GetRootGraph(); + GE_CHECK_NOTNULL(root_graph); + auto name_to_model = ge_root_model->GetSubgraphInstanceNameToModel(); + GeModelPtr ge_model = name_to_model[root_graph->GetName()]; + GE_CHK_STATUS_RET(CheckAndReleaseMemory(ge_model, graph_node)); + } + } + GE_TIMESTAMP_START(LoadModelOnline); + uint32_t model_id = INVALID_MODEL_ID; + Status ret = GraphLoader::LoadModelOnline(model_id, ge_root_model, listener); + GE_TIMESTAMP_EVENT_END(LoadModelOnline, "GraphLoader::LoadModelOnline"); + if (ret != SUCCESS) { + GELOGE(ret, "[Load][ModelOnline] Failed, model_id:%u", model_id); + graph_node->SetRunFlag(false); + return ret; + } + graph_node->SetLoadFlag(true); + ge_root_model->SetModelId(model_id); + graph_node->SetGeRootModel(ge_root_model); + AddGraphNode(graph_node->GetGraphId(), graph_node); + return SUCCESS; +} + +void ModelExecutor::ReleaseMemory(const GeModelPtr &ge_model, const GraphNodePtr &graph_node, + const std::vector &model_ids, uint32_t graph_id, uint64_t session_id) { + rtError_t rt_ret = rtSetDevice(GetContext().DeviceId()); + if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtSetDevice failed, device_id:%u", GetContext().DeviceId()); + GELOGE(RT_FAILED, "[Call][RtSetDevice] failed, device_id=%u.", GetContext().DeviceId()); + return; + } + for (auto model_id : model_ids) { + uint64_t max_memory_size = 0; + Status result = GraphLoader::GetMaxUsedMemory(model_id, max_memory_size); + if (result != SUCCESS) { + continue; + } + GELOGI("try to UnloadGraph[%u], model[%u] which MaxUsedMemory[%lu].", graph_id, model_id, max_memory_size); + if (model_ids.size() > 1) { + result = ge_model->GetSessionId(model_id, session_id); + if (result != SUCCESS) { + GELOGW("[GraphExecutor:] get session failed when dynamic memory, modelId=%u, graphId=%u.", model_id, + graph_id); + continue; + } + } + result = GraphLoader::DestroyAicpuKernel(session_id, model_id, 0); + if (result != SUCCESS) { + GELOGW("[GraphExecutor:] destroy aicpu kernel failed when dynamic memory, modelId=%u, graphId=%u.", model_id, + graph_id); + } + result = GraphLoader::UnloadModel(model_id); + if (result != SUCCESS) { + GELOGW("[GraphExecutor:] unload model failed, modelId=%u, graphId=%u.", model_id, graph_id); + } + GELOGI("UnloadGraph[%u], model[%u] success.", graph_id, model_id); + } + graph_node->SetLoadFlag(false); + // Allow model to be loaded agagin without adding graph again + graph_node->SetLoadCount(graph_node->GetLoadRecord()); + graph_node->SetLoadRecord(kNeverLoaded); + GeRootModelPtr ge_root_model = graph_node->GetGeRootModel(); + if (ge_root_model == nullptr) { + GELOGW("ge_root_model is null, graph_id:%u", graph_id); + return; + } + ge_root_model->ClearAllModelId(); + rt_ret = rtDeviceReset(GetContext().DeviceId()); + if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtDeviceReset failed, device_id:%u", GetContext().DeviceId()); + GELOGE(RT_FAILED, "[Call][RtDeviceReset] failed, device_id:%u.", GetContext().DeviceId()); + return; + } +} + +Status ModelExecutor::CheckAndReleaseMemory(const GeModelPtr &ge_model, const GraphNodePtr &graph_node) { + GELOGI("graph_id[%u]", graph_node->GetGraphId()); + int64_t free_memory = 0; + Status result = GraphLoader::GetMemoryInfo(free_memory); + if (result != SUCCESS) { + return result; + } + + int64_t value = 0; + int64_t memory_size = AttrUtils::GetInt(ge_model, ATTR_MODEL_MEMORY_SIZE, value) ? value : 0; + int64_t weight_size = AttrUtils::GetInt(ge_model, ATTR_MODEL_WEIGHT_SIZE, value) ? value : 0; + int64_t session_id = AttrUtils::GetInt(ge_model, MODEL_ATTR_SESSION_ID, value) ? value : 0; + + GELOGI("Graph[%u] need memory_size[%ld], weight_size[%ld], Device[%u] free_memory_size[%ld]", + graph_node->GetGraphId(), memory_size, weight_size, GetContext().DeviceId(), free_memory); + if (CheckInt64AddOverflow(memory_size, weight_size) != SUCCESS) { + REPORT_INNER_ERROR("E19999", "memory_size:%ld and weight_size:%ld will overflow after add, check invalid", + memory_size, weight_size); + GELOGE(INTERNAL_ERROR, "[Check][Param] memory_size:%ld and weight_size:%ld will overflow after add", + memory_size, weight_size); + return INTERNAL_ERROR; + } + if (free_memory >= (memory_size + weight_size)) { + return SUCCESS; + } + + std::lock_guard lock(mutex_); + for (const auto &it : graph_nodes_) { + auto graph_id = it.second->GetGraphId(); + auto model = it.second->GetGeRootModel(); + if (model == nullptr) { + continue; + } + auto model_id = model->GetModelId(); + auto model_ids = model->GetAllModelId(); + // unload model not release + bool is_unknown_shape = false; + GE_CHK_STATUS_RET(model->CheckIsUnknownShape(is_unknown_shape)); + if (is_unknown_shape) { + GELOGD("model_id[%u] graph_id[%u] is unknown model, not release memory", model_id, graph_id); + continue; + } + // not loaded,no need unload + if (!it.second->GetLoadFlag()) { + GELOGI("CheckAndReleaseMemory graph[%u] has not been loaded.", graph_id); + continue; + } + ReleaseMemory(ge_model, it.second, model_ids, graph_id, static_cast(session_id)); + } + + return SUCCESS; +} + +void ModelExecutor::ParseInputsDimsForData(const std::vector &input_tensor) { + GELOGD("Start parse input dims from data."); + for (size_t i = 0; i < input_tensor.size(); ++i) { + const TensorDesc &tensor_desc = input_tensor[i].GetTensorDesc(); + const Shape &shape = tensor_desc.GetShape(); + const auto &shape_dims = shape.GetDims(); + GELOGD("Input tensor dims is %s.", formats::JoinToString(shape_dims).c_str()); + GetLocalOmeContext().user_real_input_dims.emplace_back(shape_dims); + } +} + +Status ModelExecutor::ParseInputsDimsForGetNextNoSinkAndData(const vector &dynamic_nodes, + const std::vector &input_tensor) { + GELOGD("Start parse inputs dims when coexist data and getnext sink."); + for (size_t i = 0; i < dynamic_nodes.size(); ++i) { + auto op_desc = dynamic_nodes.at(i)->GetOpDesc(); + if (op_desc == nullptr) { + continue; + } + GeAttrValue::INT index = 0; + if (!(AttrUtils::GetInt(op_desc, ATTR_NAME_INDEX, index))) { + REPORT_CALL_ERROR("E19999", "Get Attr:%s from op:%s(%s) fail", ATTR_NAME_INDEX.c_str(), + op_desc->GetName().c_str(), op_desc->GetType().c_str()); + GELOGE(PARAM_INVALID, "[Get][Attr] %s from op:%s(%s) fail", ATTR_NAME_INDEX.c_str(), + op_desc->GetName().c_str(), op_desc->GetType().c_str()); + return PARAM_INVALID; + } + if (static_cast(index) > input_tensor.size()) { + REPORT_INNER_ERROR("E19999", "Attr:%s in op:%s(%s) value:%ld > param input_tensor.size:%zu, " + "check invalid", ATTR_NAME_INDEX.c_str(), + op_desc->GetName().c_str(), op_desc->GetType().c_str(), + index, input_tensor.size()); + GELOGE(PARAM_INVALID, "[Check][Param] Attr:%s in op:%s(%s) value:%ld > param input_tensor.size:%zu", + ATTR_NAME_INDEX.c_str(), op_desc->GetName().c_str(), op_desc->GetType().c_str(), + index, input_tensor.size()); + return PARAM_INVALID; + } + + const TensorDesc &tensor_desc = input_tensor[i].GetTensorDesc(); + const Shape &shape = tensor_desc.GetShape(); + const auto &shape_dims = shape.GetDims(); + GELOGI("Shape dims of %zu data is %s.", index, formats::JoinToString(shape_dims).c_str()); + GetLocalOmeContext().user_real_input_dims.emplace_back(std::move(shape_dims)); + } + return SUCCESS; +} + +Status ModelExecutor::ParseInputsDims(const std::vector &input_tensor) { + GELOGI("Start parse input dims of %zu input tensor.", input_tensor.size()); + GetLocalOmeContext().user_real_input_dims.clear(); + if (GetLocalOmeContext().dynamic_node_type.empty()) { + return SUCCESS; + } + + const vector &data_nodes = GetLocalOmeContext().data_nodes; + const vector &getnext_nosink_nodes = GetLocalOmeContext().getnext_nosink_nodes; + GELOGD("Data nodes count is %zu, getnext nosink nodes count is %zu.", data_nodes.size(), + getnext_nosink_nodes.size()); + if (GetLocalOmeContext().dynamic_node_type == DATA) { + if (getnext_nosink_nodes.empty()) { + // just data or data+getnext_sink + ParseInputsDimsForData(input_tensor); + } else { + // data+getnext_nosink, but only need to get shape_dims of data + if (ParseInputsDimsForGetNextNoSinkAndData(data_nodes, input_tensor) != SUCCESS) { + GELOGE(PARAM_INVALID, "[Parse][Dims] from data failed, when data coexist with getnext nosink."); + return PARAM_INVALID; + } + } + } else { + if (getnext_nosink_nodes.empty()) { + // just getnext_sink or getnext_sink+data, need to get shape_dims from aicpu op + GELOGI("Need to get dims from aicpu op: GETDYNAMICDIMS."); + return SUCCESS; + } else { + if (data_nodes.empty()) { + // just getnext_nosink + ParseInputsDimsForData(input_tensor); + } else { + // getnext_nosink + data, but only need to get shape_dims of getnext_nosink + if (ParseInputsDimsForGetNextNoSinkAndData(getnext_nosink_nodes, input_tensor) != SUCCESS) { + GELOGE(PARAM_INVALID, "[Parse][Dims] from getnext nosink failed, when data coexist with getnext nosink"); + return PARAM_INVALID; + } + } + } + } + + GELOGI("Parse %zu inputs dims success.", GetLocalOmeContext().user_real_input_dims.size()); + return SUCCESS; +} +} // namespace ge diff --git a/ge/graph/execute/model_executor.h b/ge/graph/execute/model_executor.h new file mode 100644 index 00000000..f8e717a1 --- /dev/null +++ b/ge/graph/execute/model_executor.h @@ -0,0 +1,139 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef GE_GRAPH_EXECUTE_MODEL_EXECUTOR_H +#define GE_GRAPH_EXECUTE_MODEL_EXECUTOR_H + +#include + +#include "common/executor.h" +#include "graph/execute/graph_execute.h" + +namespace ge { +class ModelExecutor : public Executor { + public: + /// + /// @ingroup ge + /// @brief graph executor init + /// @param [in] options user config params + /// @return Status result of function + /// + Status Initialize(const map &options); + + /// + /// @ingroup ge + /// @brief graph executor finalize + /// @return Status result of function + /// + Status Finalize(); + + /// + /// @ingroup ge + /// @brief Load mode for graph. + /// @param [in] GeRootModel: root model of graph compiled. + /// @param [in] GraphNode: node of graph. + /// @return Status result of function + /// + Status LoadGraph(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node); + + /// + /// @ingroup ge + /// @brief Unload mode for graph. + /// @param [in] GeRootModel: root model of graph compiled. + /// @param [in] graph_id: graph identifier. + /// @return Status result of function + /// + Status UnloadGraph(const GeRootModelPtr &ge_root_model, uint32_t graph_id); + + /// + /// @ingroup ge + /// @brief Push model execution params to queue. + /// @param [in] RunArgs of for model execution. + /// @return Status result of function + /// + Status PushGraph(const RunArgs &args); + + /// + /// @ingroup ge + /// @brief Run graph for synchronize model. + /// @param [in] graph_node: node of graph. + /// @param [in] graph_id: graph identifier. + /// @param [in] inputs: input data for the graph running. + /// @param [out] outputs: output data of the graph running + /// @return Status result of function + /// + Status RunGraph(const GraphNodePtr &graph_node, GraphId graph_id, + const std::vector &inputs, std::vector &outputs); + + /// + /// @ingroup ge + /// @brief Run graph for NN synchronize model. + /// @param [in] graph_node: node of graph. + /// @param [in] graph_id: graph identifier. + /// @param [in] stream: Stream for model running. + /// @param [in] inputs: input data for the graph running. + /// @param [out] outputs: output data of the graph running + /// @return Status result of function + /// + Status RunGraphWithStream(const GraphNodePtr &graph_node, GraphId graph_id, rtStream_t stream, + const std::vector &inputs, std::vector &outputs); + + private: + bool ParseTrainGraphFlag(); + + void AddGraphNode(GraphId graph_id, const GraphNodePtr &graph_node); + void RemoveGraphNode(GraphId graph_id); + + Status ModelLoadSync(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node); + Status ModelLoadAsync(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node); + Status ModelLoad(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node, + const std::shared_ptr &listener); + + Status UnloadModel(const GeRootModelPtr &ge_root_model, uint32_t graph_id); + + void ReleaseMemory(const GeModelPtr &ge_model, const GraphNodePtr &graph_node, const std::vector &model_ids, + uint32_t graph_id, uint64_t session_id); + Status CheckAndReleaseMemory(const GeModelPtr &ge_model, const GraphNodePtr &graph_node); + + void UpdateLocalOmeContext(const GraphNodePtr &graph_node); + + void RunThread(); + void StopQueue(); + void ReturnError(RunAsyncCallback callback, Status ret, const string &log); + + void ParseInputsDimsForData(const std::vector &input_tensor); + Status ParseInputsDimsForGetNextNoSinkAndData(const vector &dynamic_nodes, + const std::vector &input_tensor); + Status ParseInputsDims(const std::vector &input_tensor); + + bool init_flag_{false}; + bool train_graph_flag_{false}; + GraphExecutor graph_executor_; + + std::mutex mutex_; + std::map graph_nodes_; + + std::thread run_thread_; + std::atomic_bool thread_run_flag_{false}; + BlockingQueue run_args_q_; + + // for run graph synchronous return + std::mutex sync_run_mutex_; + std::condition_variable condition_; + // run graph synchronization call back listener + std::shared_ptr graph_run_listener_; +}; +} +#endif // GE_GRAPH_EXECUTE_MODEL_EXECUTOR_H \ No newline at end of file diff --git a/ge/graph/load/model_manager/model_manager.cc b/ge/graph/load/model_manager/model_manager.cc index 3c31014d..45540ba0 100755 --- a/ge/graph/load/model_manager/model_manager.cc +++ b/ge/graph/load/model_manager/model_manager.cc @@ -513,8 +513,7 @@ Status ModelManager::GetCurDynamicDims(const vector> &user_real_ } GELOGD("Cur dynamic dims is %s.", formats::JoinToString(cur_dynamic_dims).c_str()); bool cur_dynamic_dims_valid = false; - std::vector shape_strs = ge::StringUtils::Split(GetLocalOmgContext().dynamic_dims, ';'); - for (auto dynamic_dim : shape_strs) { + for (auto dynamic_dim : GetLocalOmeContext().dynamic_shape_dims) { if (dynamic_dim == formats::JoinToString(cur_dynamic_dims)) { cur_dynamic_dims_valid = true; break; @@ -556,10 +555,10 @@ Status ModelManager::DataInputTensor(uint32_t model_id, const std::vector cur_dynamic_dims; - if (!GetLocalOmgContext().user_real_input_dims.empty()) { - if (GetCurDynamicDims(GetLocalOmgContext().user_real_input_dims, GetLocalOmgContext().user_input_dims, + if (!GetLocalOmeContext().user_real_input_dims.empty()) { + if (GetCurDynamicDims(GetLocalOmeContext().user_real_input_dims, GetLocalOmeContext().user_input_dims, cur_dynamic_dims) != SUCCESS) { GELOGE(INTERNAL_ERROR, "[Get][CurDynamicDims] [Train_Dynamic] Failed to Parse real_dynamic_dims."); return INTERNAL_ERROR; diff --git a/ge/graph/manager/graph_manager.cc b/ge/graph/manager/graph_manager.cc index 04e0f51c..b2528cdd 100755 --- a/ge/graph/manager/graph_manager.cc +++ b/ge/graph/manager/graph_manager.cc @@ -164,26 +164,12 @@ ge::Status CheckFpCeilingMode() { } // namespace namespace ge { -GraphManager::GraphManager() - : thread_run_flag_(false), - graph_run_listener_(nullptr), - init_flag_(false) { -} - -Status GraphManager::Initialize(const std::map &options) { +Status GraphManager::Initialize(const std::map &options, Executor *executor) { ErrorManager::GetInstance().SetStage(error_message::kInitialize, error_message::kOther); if (init_flag_) { GELOGW("[Initialize] GraphManager already initialized."); return SUCCESS; } - - // malloc - graph_run_listener_ = MakeShared(sync_run_mutex_, condition_); - if (graph_run_listener_ == nullptr) { - REPORT_CALL_ERROR("E19999", "New GraphModelListener fail"); - GELOGE(MEMALLOC_FAILED, "[New][GraphModelListener] failed"); - return MEMALLOC_FAILED; - } // graph context graph_context_ = MakeShared(); if (graph_context_ == nullptr) { @@ -211,31 +197,18 @@ Status GraphManager::Initialize(const std::map &options) { return ret; } - graph_map_.clear(); - cache_helper_map_.clear(); - graph_id_to_add_graph_cond_.clear(); - graph_count_.clear(); + executor_ = executor; init_flag_ = true; thread_run_flag_ = true; - prerun_thread_ = std::thread(GraphManager::PreRunThread, this); - run_thread_ = std::thread(GraphManager::RunThread, this); + prerun_thread_ = std::thread(&GraphManager::PreRunThread, this); return SUCCESS; } Status GraphManager::UnloadModel(GeRootModelPtr ge_root_model, uint32_t graph_id) { - Status ret = SUCCESS; - for (size_t i = 0; i < ge_root_model->GetAllModelId().size(); ++i) { - uint32_t model_id = ge_root_model->GetAllModelId()[i]; - GELOGI("Unload model %u.", model_id); - ret = GraphLoader::UnloadModel(model_id); - if (ret != SUCCESS) { - GELOGW("[GraphManager] unload model failed, modelId=%u, graphId=%u.", model_id, graph_id); - return ret; - } - } - return ret; + GE_CHECK_NOTNULL(executor_); + return executor_->UnloadGraph(ge_root_model, graph_id); } Status GraphManager::Finalize() { @@ -244,23 +217,13 @@ Status GraphManager::Finalize() { return SUCCESS; } - if (graph_executor_.FreeExecuteMemory() != SUCCESS) { - GELOGW("Graph executor FreeExecuteMemory failed, resources may not be released correctly."); - } - - StopQueue(this); - + StopQueue(); if (prerun_thread_.joinable()) { prerun_thread_.join(); } - if (run_thread_.joinable()) { - run_thread_.join(); - } // check graph whether running or not Status unload_model_ret = SUCCESS; - Status ret; - rtError_t rt_ret; for (auto iter = graph_map_.begin(); iter != graph_map_.end(); ++iter) { GraphNodePtr graph_node = iter->second; if (graph_node->GetRunFlag()) { @@ -271,22 +234,10 @@ Status GraphManager::Finalize() { // unload model auto ge_root_model = graph_node->GetGeRootModel(); if (ge_root_model != nullptr && ge_root_model->GetModelId() != INVALID_MODEL_ID && graph_node->GetLoadFlag()) { - rt_ret = rtSetDevice(GetContext().DeviceId()); - if (rt_ret != RT_ERROR_NONE) { - GELOGW("[GraphManager] rtSetDevice failed, modelId=%u, graphId=%u.", ge_root_model->GetModelId(), iter->first); - unload_model_ret = FAILED; - continue; - } - ret = UnloadModel(ge_root_model, iter->first); + Status ret = UnloadModel(ge_root_model, iter->first); if (ret != SUCCESS) { - GELOGW("[GraphManager] unload model failed, graph_id=%u.", iter->first); unload_model_ret = ret; - } - rt_ret = rtDeviceReset(GetContext().DeviceId()); - if (rt_ret != RT_ERROR_NONE) { - GELOGW("[GraphManager] rtDeviceReset failed, graphId=%u.", iter->first); - unload_model_ret = FAILED; - continue; + GELOGW("[GraphManager] unload model failed, graph_id=%u.", iter->first); } } @@ -1122,12 +1073,7 @@ Status GraphManager::StartForRunGraph(const GraphNodePtr &graph_node, const std: return ret; } } - ErrorManager::GetInstance().SetStage(error_message::kModelLoad, error_message::kModelLoad); - if (!graph_node->IsAsync()) { - ret = LoadGraph(ge_root_model, graph_node); - } else { - ret = LoadGraphAsync(ge_root_model, graph_node); - } + ret = LoadGraph(ge_root_model, graph_node); if (ret != SUCCESS) { GELOGE(ret, "[Load][Graph] Failed, graph_id:%u.", graph_node->GetGraphId()); return ret; @@ -1135,13 +1081,8 @@ Status GraphManager::StartForRunGraph(const GraphNodePtr &graph_node, const std: graph_node->SetBuildFlag(true); var_acc_ctrl_.SetGraphBuildEnd(graph_node->GetGraphId()); } else if (!graph_node->GetLoadFlag()) { - ErrorManager::GetInstance().SetStage(error_message::kModelLoad, error_message::kModelLoad); GeRootModelPtr ge_root_model_ptr = graph_node->GetGeRootModel(); - if (!graph_node->IsAsync()) { - ret = LoadGraph(ge_root_model_ptr, graph_node); - } else { - ret = LoadGraphAsync(ge_root_model_ptr, graph_node); - } + ret = LoadGraph(ge_root_model, graph_node); if (ret != SUCCESS) { GELOGE(ret, "[Load][Graph] Failed, graph_id:%u.", graph_node->GetGraphId()); return ret; @@ -1149,40 +1090,16 @@ Status GraphManager::StartForRunGraph(const GraphNodePtr &graph_node, const std: } return ret; } + Status GraphManager::LoadGraph(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node) { GELOGI("[LoadGraph] run_graph_flag[%d], graph_id[%u]", options_.run_graph_flag, graph_node->GetGraphId()); - if (options_.run_graph_flag && ge_root_model != nullptr) { - ge_root_model->SetTrainFlag(GetTrainFlag()); - // synchronization run graph with model - std::shared_ptr model_listener = GetModelListener(); - ModelIdInfo model_id_info; - bool is_unknown_shape = false; - GE_CHK_STATUS_RET(ge_root_model->CheckIsUnknownShape(is_unknown_shape)); - if (!is_unknown_shape) { - if (getenv(kEnvGeuseStaticMemory) != nullptr) { - GELOGI("[LoadGraph] GE_USE_STATIC_MEMORY is seted."); - } else { - auto root_graph = ge_root_model->GetRootGraph(); - GE_CHECK_NOTNULL(root_graph); - auto name_to_model = ge_root_model->GetSubgraphInstanceNameToModel(); - GeModelPtr ge_model = name_to_model[root_graph->GetName()]; - GE_CHK_STATUS_RET(CheckAndReleaseMemory(ge_model, graph_node)); - } - } - ge_root_model->SetIsSpecificStream(graph_node->IsSpecificStream()); - GE_TIMESTAMP_START(LoadGraph); - Status ret = GraphLoader::LoadModelOnline(model_id_info.model_id, ge_root_model, model_listener); - GE_TIMESTAMP_EVENT_END(LoadGraph, "GraphManager::LoadGraph"); - if (ret != SUCCESS) { - GELOGE(ret, "[Load][Model] failed, ret:%d", ret); - graph_node->SetRunFlag(false); - return ret; - } - graph_node->SetLoadFlag(true); - ge_root_model->SetModelId(model_id_info.model_id); - graph_node->SetGeRootModel(ge_root_model); + if (!options_.run_graph_flag) { + return SUCCESS; } - return SUCCESS; + + ErrorManager::GetInstance().SetStage(error_message::kModelLoad, error_message::kModelLoad); + GE_CHECK_NOTNULL(executor_); + return executor_->LoadGraph(ge_root_model, graph_node); } Status GraphManager::LoadFromCache(const GraphNodePtr &graph_node, const ModelCacheHelperPtr &cache_helper, @@ -1272,45 +1189,14 @@ Status GraphManager::SaveCacheAfterBuild(uint32_t graph_id, ge::ComputeGraphPtr Status GraphManager::InnerRunGraph(GraphNodePtr &graph_node, const GraphId &graph_id, const std::vector &inputs, std::vector &outputs) { - Status ret = graph_executor_.SetCondition(&sync_run_mutex_, &condition_, graph_run_listener_); - if (ret != SUCCESS) { - GELOGE(GE_GRAPH_RUNGRAPH_FAILED, "[Set][Condition] failed, graph_id = %u.", graph_id); - graph_node->SetRunFlag(false); - return GE_GRAPH_RUNGRAPH_FAILED; - } - - if (GetTrainFlag()) { - GE_CHK_STATUS_RET(graph_executor_.SetGraphContext(GetGraphContext())); - graph_executor_.SetTrainFlag(options_.train_graph_flag); - } - ret = graph_executor_.ExecuteGraph(graph_id, graph_node->GetGeRootModel(), inputs, outputs); - - graph_node->SetRunFlag(false); - if (ret != SUCCESS) { - GELOGE(ret, "[Execute][Graph] failed, graph_id = %u.", graph_id); - return ret; - } - return SUCCESS; + GE_CHECK_NOTNULL(executor_); + return executor_->RunGraph(graph_node, graph_id, inputs, outputs); } Status GraphManager::InnerRunGraphWithStream(GraphNodePtr &graph_node, const GraphId &graph_id, rtStream_t stream, const std::vector &inputs, std::vector &outputs) { - auto ret = graph_executor_.SetCondition(&sync_run_mutex_, &condition_, graph_run_listener_); - if (ret != SUCCESS) { - GELOGE(GE_GRAPH_RUNGRAPH_FAILED, "[Set][Condition] failed, graph id = %u, stream = %p.", graph_id, stream); - graph_node->SetRunFlag(false); - return GE_GRAPH_RUNGRAPH_FAILED; - } - - ret = graph_executor_.ExecuteGraphWithStream(graph_id, stream, graph_node->GetGeRootModel(), inputs, outputs); - graph_node->SetRunFlag(false); - graph_node->SetIsSpecificStream(false); - if (ret != SUCCESS) { - GELOGE(ret, "[Execute][Graph] With Stream failed, graph id = %u, stream = %p.", graph_id, stream); - return ret; - } - GELOGI("[Run][GraphWithStreamAsync] run graph success, graph id = %u, stream = %p.", graph_id, stream); - return SUCCESS; + GE_CHECK_NOTNULL(executor_); + return executor_->RunGraphWithStream(graph_node, graph_id, stream, inputs, outputs); } Status GraphManager::RunGraphWithStreamAsync(const GraphId &graph_id, rtStream_t stream, uint64_t session_id, @@ -1665,38 +1551,18 @@ Status GraphManager::RemoveGraph(const GraphId &graph_id) { std::lock_guard lock(unload_model_mutex_); - Status middle_ret; - rtError_t rt_ret; var_acc_ctrl_.RemoveGraph(graph_id); RemoveGraphNode(graph_id); - RemoveModelCacheHelper(graph_id); auto ge_root_model = graph_node->GetGeRootModel(); if (CheckModelLoad(ge_root_model, graph_node->GetLoadFlag())) { - rt_ret = rtSetDevice(GetContext().DeviceId()); - if (rt_ret != RT_ERROR_NONE) { - REPORT_CALL_ERROR("E19999", "Call rtSetDevice failed, device_id:%u, graph_id:%u", - GetContext().DeviceId(), graph_id); - GELOGE(RT_FAILED, "[Call][RtSetDevice] failed, modelId=%u, graphId=%u.", ge_root_model->GetModelId(), - graph_id); - return FAILED; - } - // same graph may be added for several times, different models were created separately, - // unload them respectively. - middle_ret = UnloadModel(ge_root_model, graph_id); + Status middle_ret = UnloadModel(ge_root_model, graph_id); if (middle_ret != SUCCESS) { REPORT_INNER_ERROR("E19999", "UnloadModel for graph:%u failed, check invalid", graph_id); GELOGE(middle_ret, "[Unload][Model] model failed, graph_id=%u.", graph_id); ret = middle_ret; } - rt_ret = rtDeviceReset(GetContext().DeviceId()); - if (rt_ret != RT_ERROR_NONE) { - REPORT_CALL_ERROR("E19999", "Call rtDeviceReset failed, device_id:%u, graph_id:%u", - GetContext().DeviceId(), graph_id); - GELOGE(RT_FAILED, "[Call][RtDeviceReset] failed, device_id:%u, graph_id:%u", GetContext().DeviceId(), graph_id); - ret = FAILED; - } } RemoveCompilerStages(graph_id); @@ -2120,8 +1986,6 @@ Status GraphManager::SummaryHandle(const GraphId &graph_id, std::vector &outputs) { GELOGI("[GraphManager] CheckpointHandle, outputsSize=%zu.", outputs.size()); - std::vector outputs_desc = graph_executor_.GetOutputsDesc(); - GELOGI("[GraphManager] CheckpointHandle, outputsDescSize=%zu.", outputs_desc.size()); std::map save_results; NodePtr netoutput = nullptr; @@ -2786,160 +2650,6 @@ void GraphManager::ChangeConstTypeWhenTraining(const ComputeGraphPtr &compute_gr } } -Status GraphManager::LoadGraphAsync(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node) { - GELOGI("[LoadGraphAsync] run_graph_flag[%d], graph_id[%u]", options_.run_graph_flag, graph_node->GetGraphId()); - if (options_.run_graph_flag && ge_root_model != nullptr) { - ge_root_model->SetTrainFlag(GetTrainFlag()); - // synchronization run graph with model - ModelIdInfo model_id_info; - bool is_unknown_shape = false; - GE_CHK_STATUS_RET(ge_root_model->CheckIsUnknownShape(is_unknown_shape)); - if (!is_unknown_shape) { - if (getenv(kEnvGeuseStaticMemory) != nullptr) { - GELOGI("[LoadGraphAsync] GE_USE_STATIC_MEMORY is seted."); - } else { - auto root_graph = ge_root_model->GetRootGraph(); - GE_CHECK_NOTNULL(root_graph); - auto name_to_model = ge_root_model->GetSubgraphInstanceNameToModel(); - GeModelPtr ge_model = name_to_model[root_graph->GetName()]; - GE_CHK_STATUS_RET(CheckAndReleaseMemory(ge_model, graph_node)); - } - } - GE_TIMESTAMP_START(LoadGraph); - auto listener = MakeShared(); - GE_CHECK_NOTNULL(listener); - Status ret = GraphLoader::LoadModelOnline(model_id_info.model_id, ge_root_model, listener); - GE_TIMESTAMP_EVENT_END(LoadGraph, "GraphManager::LoadGraphAsync"); - if (ret != SUCCESS) { - GELOGE(ret, "[Load][ModelOnline] Failed, model_id:%u", model_id_info.model_id); - graph_node->SetRunFlag(false); - return ret; - } - graph_node->SetLoadFlag(true); - ge_root_model->SetModelId(model_id_info.model_id); - graph_node->SetGeRootModel(ge_root_model); - } - return SUCCESS; -} - -void GraphManager::ReleaseMemory(const GeModelPtr &ge_model, GraphNodePtr &graph_node, - const std::vector &model_ids, uint32_t graph_id, uint64_t session_id) { - rtError_t rt_ret = rtSetDevice(GetContext().DeviceId()); - if (rt_ret != RT_ERROR_NONE) { - REPORT_CALL_ERROR("E19999", "Call rtSetDevice failed, device_id:%u", GetContext().DeviceId()); - GELOGE(RT_FAILED, "[Call][RtSetDevice] failed, device_id=%u.", GetContext().DeviceId()); - return; - } - for (auto model_id : model_ids) { - uint64_t max_memory_size = 0; - Status result = GraphLoader::GetMaxUsedMemory(model_id, max_memory_size); - if (result != SUCCESS) { - continue; - } - GELOGI("CheckAndReleaseMemory try to UnloadGraph[%u], model[%u] which MaxUsedMemory[%lu].", graph_id, model_id, - max_memory_size); - if (model_ids.size() > 1) { - result = ge_model->GetSessionId(model_id, session_id); - if (result != SUCCESS) { - GELOGW("[GraphManager:] get session failed when dynamic memory, modelId=%u, graphId=%u.", model_id, - graph_id); - continue; - } - } - result = GraphLoader::DestroyAicpuKernel(session_id, model_id, 0); - if (result != SUCCESS) { - GELOGW("[GraphManager:] destroy aicpu kernel failed when dynamic memory, modelId=%u, graphId=%u.", model_id, - graph_id); - } - result = GraphLoader::UnloadModel(model_id); - if (result != SUCCESS) { - GELOGW("[GraphManager:] unload model failed, modelId=%u, graphId=%u.", model_id, graph_id); - } - GELOGI("CheckAndReleaseMemory UnloadGraph[%u], model[%u] success.", graph_id, model_id); - } - graph_node->SetLoadFlag(false); - // Allow model to be loaded agagin without adding graph again - graph_node->SetLoadCount(graph_node->GetLoadRecord()); - graph_node->SetLoadRecord(kNeverLoaded); - GeRootModelPtr ge_root_model = graph_node->GetGeRootModel(); - if (ge_root_model == nullptr) { - GELOGW("ge_root_model is null, graph_id:%u", graph_id); - return; - } - ge_root_model->ClearAllModelId(); - rt_ret = rtDeviceReset(GetContext().DeviceId()); - if (rt_ret != RT_ERROR_NONE) { - REPORT_CALL_ERROR("E19999", "Call rtDeviceReset failed, device_id:%u", GetContext().DeviceId()); - GELOGE(RT_FAILED, "[Call][RtDeviceReset] failed, device_id:%u.", GetContext().DeviceId()); - return; - } -} - -Status GraphManager::CheckAndReleaseMemory(const GeModelPtr &ge_model, const GraphNodePtr &graph_node) { - GELOGI("CheckAndReleaseMemory graph_id[%u]", graph_node->GetGraphId()); - int64_t value = 0; - bool ret = ge::AttrUtils::GetInt(ge_model, ATTR_MODEL_MEMORY_SIZE, value); - int64_t memory_size = ret ? value : 0; - ret = ge::AttrUtils::GetInt(ge_model, ATTR_MODEL_WEIGHT_SIZE, value); - int64_t weight_size = ret ? value : 0; - ret = ge::AttrUtils::GetInt(ge_model, MODEL_ATTR_SESSION_ID, value); - uint64_t session_id = ret ? value : 0; - - int64_t free_memory = 0; - Status result = GraphLoader::GetMemoryInfo(free_memory); - if (result != SUCCESS) { - return result; - } - - GELOGI( - "CheckAndReleaseMemory Graph[%u] need memory_size[%ld], weight_size[%ld]," - " Device[%u] free_memory_size[%ld]", - graph_node->GetGraphId(), memory_size, weight_size, GetContext().DeviceId(), free_memory); - if (ge::CheckInt64AddOverflow(memory_size, weight_size) != SUCCESS) { - REPORT_INNER_ERROR("E19999", "memory_size:%ld and weight_size:%ld will overflow after add, check invalid", - memory_size, weight_size); - GELOGE(INTERNAL_ERROR, "[Check][Param] memory_size:%ld and weight_size:%ld will overflow after add", - memory_size, weight_size); - return INTERNAL_ERROR; - } - if (free_memory >= (memory_size + weight_size)) { - return SUCCESS; - } - - std::lock_guard lock(unload_model_mutex_); - - std::map graph_map; - { - std::lock_guard lock(member_mutex_); - graph_map = graph_map_; - } - - for (auto &it : graph_map) { - auto graph_id = it.second->GetGraphId(); - auto model = it.second->GetGeRootModel(); - if (model == nullptr) { - continue; - } - auto model_id = model->GetModelId(); - auto model_ids = model->GetAllModelId(); - // unload model not release - bool is_unknown_shape = false; - GE_CHK_STATUS_RET(model->CheckIsUnknownShape(is_unknown_shape)); - if (is_unknown_shape) { - GELOGD("model_id[%u] graph_id[%u] is unknown model, not release memory", model_id, graph_id); - continue; - } - // not loaded,no need unload - if (!it.second->GetLoadFlag()) { - GELOGI("CheckAndReleaseMemory graph[%u] has not been loaded.", graph_id); - continue; - } - ReleaseMemory(ge_model, it.second, model_ids, graph_id, session_id); - } - - return SUCCESS; -} - Status GraphManager::ProcessSubGraphWithMultiThreads(GraphManager *graph_manager, GraphId root_graph_id, const SubGraphInfoPtr &sub_graph_info_ptr, const std::string &root_graph_name, @@ -3069,14 +2779,14 @@ Status GraphManager::IncreBuild(const GraphNodePtr &graph_node, GeModelPtr &ge_m return FAILED; } -Status GraphManager::CheckIncreBuildAndPreRun(GraphManager *graph_manager, const PreRunArgs &args, +Status GraphManager::CheckIncreBuildAndPreRun(const PreRunArgs &args, GraphNodePtr &graph_node, GeRootModelPtr &ge_root_model) { - if (!graph_manager->IsGraphNeedBuild(graph_node)) { + if (!IsGraphNeedBuild(graph_node)) { ge_root_model = graph_node->GetGeRootModel(); return SUCCESS; } if (graph_node->GetBuildFlag()) { - ReturnError(graph_manager, args.callback, PARAM_INVALID, + ReturnError(args.callback, PARAM_INVALID, "The graph " + std::to_string(graph_node->GetGraphId()) + " need to re-build, you should remove it" " from GE first, then AddGraph again and rebuild it."); @@ -3084,55 +2794,53 @@ Status GraphManager::CheckIncreBuildAndPreRun(GraphManager *graph_manager, const } // check need incre build. GeModelPtr ge_model = nullptr; - if (graph_manager->IncreBuild(graph_node, ge_model) != SUCCESS) { + if (IncreBuild(graph_node, ge_model) != SUCCESS) { std::vector ge_inputs; for (const auto &item: args.input_tensor) { ge_inputs.emplace_back(TensorAdapter::AsGeTensor(item)); } - Status ret = graph_manager->PreRun(graph_node, ge_inputs, ge_root_model, args.session_id); + Status ret = PreRun(graph_node, ge_inputs, ge_root_model, args.session_id); // release rts generate context RtContextUtil::GetInstance().DestroyRtContexts(args.session_id, graph_node->GetGraphId()); if (ret != SUCCESS) { - ReturnError(graph_manager, args.callback, ret, "PreRun Failed."); + ReturnError(args.callback, ret, "PreRun Failed."); return ret; } } graph_node->SetBuildFlag(true); - graph_manager->var_acc_ctrl_.SetGraphBuildEnd(graph_node->GetGraphId()); + var_acc_ctrl_.SetGraphBuildEnd(graph_node->GetGraphId()); return SUCCESS; } -void GraphManager::PreRunThread(GraphManager *graph_manager) { +void GraphManager::PreRunThread() { if (prctl(PR_SET_NAME, ("GE_PreRun")) != 0) { GELOGW("Set thread name failed."); } PreRunArgs args; - while (graph_manager->thread_run_flag_) { - bool pop_status = graph_manager->prerun_args_q_.Pop(args); - if (!pop_status) { + while (thread_run_flag_) { + if (!prerun_args_q_.Pop(args)) { continue; } GELOGI("[PreRunThread] A new loop start, graph_id:%u.", args.graph_id); - ErrorManager::GetInstance().SetErrorContext(args.error_context); ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); GetContext().SetSessionId(args.session_id); GetThreadLocalContext() = args.context; - graph_manager->UpdateLocalOmgContext(args.graph_id); + UpdateLocalOmgContext(args.graph_id); // find graph GraphNodePtr graph_node = nullptr; - Status ret = graph_manager->GetGraphNode(args.graph_id, graph_node); + Status ret = GetGraphNode(args.graph_id, graph_node); if (ret != SUCCESS) { - ReturnError(graph_manager, args.callback, GE_GRAPH_GRAPH_NODE_NULL, + ReturnError(args.callback, GE_GRAPH_GRAPH_NODE_NULL, "[RunGraph] graph not exist, graph_id=" + std::to_string(args.graph_id)); return; } // more than one graph owns same graph_id uint32_t count = 0; - if (graph_manager->GetGraphCount(args.graph_id, count) != SUCCESS) { + if (GetGraphCount(args.graph_id, count) != SUCCESS) { GELOGE(INTERNAL_ERROR, "[Get][GraphCount] failed, graph id:%u.", args.graph_id); return; } @@ -3142,7 +2850,7 @@ void GraphManager::PreRunThread(GraphManager *graph_manager) { // In online inference concurrency senario, graph_node is allowed to be locked for 'count' times graph_node->SetSemSize(count); graph_node->Lock(); - graph_manager->run_args_q_.Push(RunArgs( { graph_node, args.graph_id, args.session_id, args.error_context, + PushGraph(RunArgs( { graph_node, args.graph_id, args.session_id, args.error_context, args.input_tensor, graph_node->GetGeRootModel(), GetThreadLocalContext(), args.callback })); GELOGI("[PreRunThread] Loop end. Start to run with cached build model."); continue; @@ -3151,7 +2859,7 @@ void GraphManager::PreRunThread(GraphManager *graph_manager) { graph_node->Lock(); if (graph_node->GetRunFlag()) { - ReturnError(graph_manager, args.callback, GE_GRAPH_ALREADY_RUNNING, + ReturnError(args.callback, GE_GRAPH_ALREADY_RUNNING, "[RunGraph] graph already running, graph id=" + std::to_string(args.graph_id)); graph_node->Unlock(); return; @@ -3162,25 +2870,25 @@ void GraphManager::PreRunThread(GraphManager *graph_manager) { ComputeGraphPtr compute_graph_tmp = GraphUtils::GetComputeGraph(*(graph_node->GetGraph())); if (compute_graph_tmp == nullptr) { - ReturnError(graph_manager, args.callback, GE_GRAPH_GRAPH_NODE_NULL, + ReturnError(args.callback, GE_GRAPH_GRAPH_NODE_NULL, "[RunGraph] compute_graph_tmp is NULL, graph id = %u."); graph_node->Unlock(); return; } // when set incre build, save cache helper. - graph_manager->AddModelCacheHelperToMap(args.graph_id, args.session_id, compute_graph_tmp); + AddModelCacheHelperToMap(args.graph_id, args.session_id, compute_graph_tmp); std::vector ge_models; - if (graph_manager->options_.local_fmk_op_flag) { - graph_manager->GetCompilerStages(graph_node->GetGraphId()).optimizer.TranFrameOp(compute_graph_tmp); + if (options_.local_fmk_op_flag) { + GetCompilerStages(graph_node->GetGraphId()).optimizer.TranFrameOp(compute_graph_tmp); } // it will not execute graph preprocess, optimize, parition, build if the graph has built successful. GELOGI("Start for run graph async."); GeRootModelPtr ge_root_model = nullptr; - ret = CheckIncreBuildAndPreRun(graph_manager, args, graph_node, ge_root_model); + ret = CheckIncreBuildAndPreRun(args, graph_node, ge_root_model); if (ret != SUCCESS) { graph_node->SetRunFlag(false); if (!ge::Analyzer::GetInstance()->IsEnableNetAnalyzeDebug()) { @@ -3193,252 +2901,49 @@ void GraphManager::PreRunThread(GraphManager *graph_manager) { continue; } } - graph_manager->run_args_q_.Push(RunArgs( { graph_node, args.graph_id, args.session_id, args.error_context, + + PushGraph(RunArgs( { graph_node, args.graph_id, args.session_id, args.error_context, args.input_tensor, ge_root_model, GetThreadLocalContext(), args.callback })); GELOGI("[PreRunThread] Loop end."); } } -void GraphManager::ParseInputsDimsForData(const std::vector &input_tensor) { - GELOGD("Start parse input dims from data."); - for (size_t i = 0; i < input_tensor.size(); ++i) { - const TensorDesc &tensor_desc = input_tensor[i].GetTensorDesc(); - const Shape &shape = tensor_desc.GetShape(); - const auto &shape_dims = shape.GetDims(); - GELOGD("Input tensor dims is %s.", formats::JoinToString(shape_dims).c_str()); - GetLocalOmgContext().user_real_input_dims.emplace_back(shape_dims); - } -} - -Status GraphManager::ParseInputsDimsForGetNexNosinkAndData(const vector &dynamic_nodes, - const std::vector &input_tensor) { - GELOGD("Start parse inputs dims when coexist data and getnext sink."); - for (size_t i = 0; i < dynamic_nodes.size(); ++i) { - auto op_desc = dynamic_nodes.at(i)->GetOpDesc(); - if (op_desc == nullptr) { - continue; - } - GeAttrValue::INT index = 0; - if (!(AttrUtils::GetInt(op_desc, ATTR_NAME_INDEX, index))) { - REPORT_CALL_ERROR("E19999", "Get Attr:%s from op:%s(%s) fail", ATTR_NAME_INDEX.c_str(), - op_desc->GetName().c_str(), op_desc->GetType().c_str()); - GELOGE(PARAM_INVALID, "[Get][Attr] %s from op:%s(%s) fail", ATTR_NAME_INDEX.c_str(), - op_desc->GetName().c_str(), op_desc->GetType().c_str()); - return PARAM_INVALID; - } - if (static_cast(index) > input_tensor.size()) { - REPORT_INNER_ERROR("E19999", "Attr:%s in op:%s(%s) value:%ld > param input_tensor.size:%zu, " - "check invalid", ATTR_NAME_INDEX.c_str(), - op_desc->GetName().c_str(), op_desc->GetType().c_str(), - index, input_tensor.size()); - GELOGE(PARAM_INVALID, "[Check][Param] Attr:%s in op:%s(%s) value:%ld > param input_tensor.size:%zu", - ATTR_NAME_INDEX.c_str(), op_desc->GetName().c_str(), op_desc->GetType().c_str(), - index, input_tensor.size()); - return PARAM_INVALID; - } - - const TensorDesc &tensor_desc = input_tensor[i].GetTensorDesc(); - const Shape &shape = tensor_desc.GetShape(); - const auto &shape_dims = shape.GetDims(); - GELOGI("Shape dims of %zu data is %s.", index, formats::JoinToString(shape_dims).c_str()); - GetLocalOmgContext().user_real_input_dims.emplace_back(std::move(shape_dims)); +void GraphManager::PushGraph(const RunArgs &args) { + if (executor_ == nullptr) { + GELOGW("Just compile model, not support execute."); + return; } - return SUCCESS; -} -Status GraphManager::ParseInputsDims(const std::vector &input_tensor) { - GELOGI("Start parse input dims of %zu input tensor.", input_tensor.size()); - GetLocalOmgContext().user_real_input_dims.clear(); - if (!GetLocalOmgContext().dynamic_node_type.empty()) { - vector data_nodes; - vector getnext_nosink_nodes; - data_nodes = GetLocalOmgContext().data_nodes; - getnext_nosink_nodes = GetLocalOmgContext().getnext_nosink_nodes; - GELOGD("Data nodes count is %zu, getnext nosink nodes count is %zu.", data_nodes.size(), - getnext_nosink_nodes.size()); - if (GetLocalOmgContext().dynamic_node_type == DATA) { - if (getnext_nosink_nodes.empty()) { - // just data or data+getnext_sink - ParseInputsDimsForData(input_tensor); - } else { - // data+getnext_nosink, but only need to get shape_dims of data - if (ParseInputsDimsForGetNexNosinkAndData(data_nodes, input_tensor) != SUCCESS) { - GELOGE(PARAM_INVALID, "[Parse][Dims] from data failed, when data coexist with getnext nosink."); - return PARAM_INVALID; - } - } - } else { - if (getnext_nosink_nodes.empty()) { - // just getnext_sink or getnext_sink+data, need to get shape_dims from aicpu op - GELOGI("Need to get dims from aicpu op: GETDYNAMICDIMS."); - return SUCCESS; - } else { - if (data_nodes.empty()) { - // just getnext_nosink - ParseInputsDimsForData(input_tensor); - } else { - // getnext_nosink + data, but only need to get shape_dims of getnext_nosink - if (ParseInputsDimsForGetNexNosinkAndData(getnext_nosink_nodes, input_tensor) != SUCCESS) { - GELOGE(PARAM_INVALID, "[Parse][Dims] from getnext nosink failed, when data coexist with getnext nosink"); - return PARAM_INVALID; - } - } - } - } - } - GELOGI("Parse %zu inputs dims success.", GetLocalOmgContext().user_real_input_dims.size()); - return SUCCESS; + (void)executor_->PushGraph(args); } -void GraphManager::RunThread(GraphManager *graph_manager) { - ErrorManager::GetInstance().SetStage(error_message::kModelExecute, error_message::kModelExecute); - if (prctl(PR_SET_NAME, ("GE_Run")) != 0) { - GELOGW("Set thread name failed."); - } - - RunArgs args; - while (graph_manager->thread_run_flag_) { - bool pop_status = graph_manager->run_args_q_.Pop(args); - if (!pop_status) { - continue; - } - - GELOGI("[RunThread] A new loop start, graph_id:%u.", args.graph_id); - - ErrorManager::GetInstance().SetErrorContext(args.error_context); - GetContext().SetSessionId(args.session_id); - GetThreadLocalContext() = args.context; - graph_manager->UpdateLocalOmgContext(args.graph_id); - - Status ret; - // parse inputs.dims to vector> dynamic_dims - ret = graph_manager->ParseInputsDims(args.input_tensor); - if (ret != SUCCESS) { - ReturnError(graph_manager, args.callback, ret, "ParseInputsDims failed, thread exit."); - args.graph_node->Unlock(); - return; - } - - args.graph_node->UpdateLoadFlag(); - if (!args.graph_node->GetLoadFlag()) { - ErrorManager::GetInstance().SetStage(error_message::kModelLoad, error_message::kModelLoad); - args.ge_root_model->SetTrainFlag(graph_manager->GetTrainFlag()); - ret = graph_manager->LoadGraphAsync(args.ge_root_model, args.graph_node); - if (ret != SUCCESS || args.ge_root_model == nullptr) { - StopQueue(graph_manager); - ReturnError(graph_manager, args.callback, ret, "LoadGraphAsync failed, thread exit."); - args.graph_node->Unlock(); - return; - } - // control the times of graph loading in multi-thread scenario - args.graph_node->DecreaseLoadCount(); - args.graph_node->IncreaseLoadRecord(); +void GraphManager::SetRunContext(const GraphNodePtr &graph_node) { + OmeContext ome_context; + ome_context.need_multi_batch = GetLocalOmgContext().need_multi_batch; + ome_context.dynamic_node_type = GetLocalOmgContext().dynamic_node_type; + ome_context.dynamic_shape_dims = StringUtils::Split(GetLocalOmgContext().dynamic_dims, ';'); + ome_context.user_input_dims = GetLocalOmgContext().user_input_dims; - args.graph_node->SetLoadFlag(true); - GELOGI("LoadGraph[%u], model[%u] success and set LoadFlag to true.", args.graph_node->GetGraphId(), - args.ge_root_model->GetModelId()); - } + ome_context.data_nodes = GetLocalOmgContext().data_nodes; + ome_context.getnext_nosink_nodes = GetLocalOmgContext().getnext_nosink_nodes; - ErrorManager::GetInstance().SetStage(error_message::kModelExecute, error_message::kModelExecute); - if (graph_manager->GetTrainFlag()) { - ret = graph_manager->graph_executor_.SetGraphContext(graph_manager->GetGraphContext()); - if (ret != SUCCESS) { - GELOGW("[GraphManager] SetGraphContext failed, graph_id=%u.", args.graph_id); - } - graph_manager->graph_executor_.SetTrainFlag(graph_manager->options_.train_graph_flag); - } + ome_context.user_real_input_dims = GetLocalOmgContext().user_real_input_dims; - ret = graph_manager->graph_executor_.ExecuteGraphAsync(args.graph_id, args.graph_node->GetGeRootModel(), - args.input_tensor, args.callback); - args.graph_node->SetRunFlag(false); - if (ret != SUCCESS) { - ReturnError(graph_manager, args.callback, ret, "ExecuteGraphAsync failed, thread exit."); - args.graph_node->Unlock(); - return; - } - args.graph_node->Unlock(); - GELOGI("[GraphManager] Run graph async success, graph_id=%u.", args.graph_id); - } + graph_node->SetOmeContext(ome_context); } -void GraphManager::StopQueue(GraphManager *graph_manager) { - if (graph_manager == nullptr) { - return; - } - - graph_manager->thread_run_flag_.store(false); - graph_manager->prerun_args_q_.Stop(); - graph_manager->run_args_q_.Stop(); +void GraphManager::StopQueue() { + thread_run_flag_.store(false); + prerun_args_q_.Stop(); } -void GraphManager::ReturnError(GraphManager *graph_manager, RunAsyncCallback callback, Status ret, const string &log) { - if (graph_manager == nullptr) { - return; - } - StopQueue(graph_manager); +void GraphManager::ReturnError(RunAsyncCallback callback, Status ret, const string &log) { + StopQueue(); GELOGE(ret, "%s.", log.c_str()); std::vector outputs; callback(ret, outputs); } -void GraphManager::ReturnError(GraphManager *graph_manager, GraphNodePtr &graph_node, RunAsyncCallback callback, - Status ret, const string &log) { - std::vector outputs; - auto compute_graph = GraphUtils::GetComputeGraph(*graph_node->GetGraph()); - if (graph_manager == nullptr || compute_graph == nullptr) { - REPORT_INNER_ERROR("E19999", "Param graph_manager or compute_graph in graph_node is nullptr, check invalid"); - GELOGE(GRAPH_FAILED, "[Check][Param] compute graph or graph manager is nullptr"); - callback(GRAPH_FAILED, outputs); - return; - } - - for (const auto &node : compute_graph->GetAllNodes()) { - if (node->GetType() != "NetOutput") { - continue; - } - for (size_t i = 0; i < node->GetAllInDataAnchorsSize(); i++) { - auto input_desc = node->GetOpDesc()->MutableInputDesc(i); - GeShape ge_shape(input_desc->GetShape().GetDims()); - GeTensorDesc ge_tensor_desc; - ge_tensor_desc.SetShape(ge_shape); - GeTensor ge_tensor(ge_tensor_desc); - int64_t len = 1; - if (input_desc->GetShape().GetDims() != std::vector({})) { - len = input_desc->GetShape().GetShapeSize(); - } - if (len < 0) { - REPORT_INNER_ERROR("E19999", "InputIndex:%zu ShapeSize:%ld of op:%s(%s) < 0, unknown shape is not support, " - "check invalid", i, len, - node->GetName().c_str(), node->GetType().c_str()); - GELOGE(GRAPH_FAILED, "[Check][Param] InputIndex:%zu ShapeSize:%ld of op:%s(%s) < 0, " - "unknown shape is not support", i, len, node->GetName().c_str(), node->GetType().c_str()); - callback(GRAPH_FAILED, outputs); - return; - } else if (len == 0) { - GELOGI("getted shape size is 0.Do process as empty tensor!"); - len = 1; - } - auto length = GetSizeInBytes(len, input_desc->GetDataType()); - auto aligned_ptr = MakeShared(length, kAlignment); - if (aligned_ptr == nullptr) { - REPORT_CALL_ERROR("E19999", "New AlignedPtr failed, len:%ld", length); - GELOGE(GRAPH_FAILED, "[Create][AlignedPtr] failed, len:%ld", length); - return; - } - ge_tensor.SetData(aligned_ptr, length); - ge::Tensor tensor = TensorAdapter::AsTensor(ge_tensor); - // To avoid global step too small and can not stop, totally set a bigger value - auto ptr = aligned_ptr->MutableGet(); - for (int64_t i = 0; i < length; i++) { - ptr[i] = 0x7F; // here stands for a positive max value - } - outputs.emplace_back(std::move(tensor)); - } - } - callback(SUCCESS, outputs); - return; -} - bool GraphManager::IsGraphNeedRebuild(uint32_t graph_id) { // find graph GraphNodePtr graph_node = nullptr; @@ -3649,6 +3154,7 @@ Status GraphManager::Build(const GraphNodePtr &graph_node, ComputeGraphPtr &comp GraphUtils::DumpGEGraph(compute_graph, "Build", is_always_dump); GraphUtils::DumpGEGraphToOnnx(*compute_graph, "Build"); + SetRunContext(graph_node); graph_node->SetGeRootModel(ge_root_model); return SUCCESS; } diff --git a/ge/graph/manager/graph_manager.h b/ge/graph/manager/graph_manager.h index 3475da6d..6773787c 100644 --- a/ge/graph/manager/graph_manager.h +++ b/ge/graph/manager/graph_manager.h @@ -31,7 +31,6 @@ #include "external/graph/types.h" #include "external/ge/ge_api_types.h" #include "graph/build/graph_builder.h" -#include "graph/execute/graph_execute.h" #include "graph/ge_local_context.h" #include "graph/load/graph_loader.h" #include "graph/manager/graph_manager_utils.h" @@ -41,11 +40,12 @@ #include "graph/preprocess/graph_preprocess.h" #include "graph/tuning_utils.h" #include "model/ge_model.h" +#include "common/executor.h" namespace ge { class GraphManager { public: - GraphManager(); + GraphManager() = default; ~GraphManager() = default; /// @@ -54,7 +54,7 @@ class GraphManager { /// @param [in] options user config params /// @return Status result of function /// - Status Initialize(const std::map &options); + Status Initialize(const std::map &options, Executor *executor = nullptr); /// /// @ingroup ge_graph @@ -113,7 +113,7 @@ class GraphManager { /// @param [out] outputs output data /// @return Status result of function /// - Status RunGraphWithStreamAsync(const GraphId &graph_id, rtStream_t stream, uint64_t session_id, + Status RunGraphWithStreamAsync(const GraphId &graph_id, rtStream_t stream, uint64_t session_id, const std::vector &inputs, std::vector &outputs); /// @@ -227,34 +227,18 @@ class GraphManager { RunAsyncCallback callback; }; - struct RunArgs { - GraphNodePtr graph_node; - GraphId graph_id; - uint64_t session_id; - struct error_message::Context error_context; - std::vector input_tensor; - GeRootModelPtr ge_root_model; - GEThreadLocalContext context; - RunAsyncCallback callback; - }; - void AddGraphNode(GraphId graph_id, const GraphNodePtr &graph_node); void RemoveGraphNode(GraphId graph_id); bool HasGraphNode(GraphId graph_id); Status GetGraphNode(const GraphId &graph_id, GraphNodePtr &out); - std::shared_ptr GetModelListener() const { return graph_run_listener_; } - static Status ProcessSubGraphWithMultiThreads(GraphManager *graph_manager, GraphId root_graph_id, const SubGraphInfoPtr &sub_graph_info_ptr, const std::string &root_graph_name, uint64_t session_id, const struct error_message::Context &error_context, const GEThreadLocalContext &ge_context); - Status ParseInputsDims(const std::vector &input_tensor); - void ParseInputsDimsForData(const std::vector &input_tensor); - Status ParseInputsDimsForGetNexNosinkAndData(const vector &dynamic_nodes, - const std::vector &input_tensor); + Status RunCustomPass(const GraphNodePtr &graph_node); Status PreRun(const GraphNodePtr &graph_node, const std::vector &inputs, GeRootModelPtr &ge_root_model, uint64_t session_id = INVALID_SESSION_ID); @@ -350,10 +334,6 @@ class GraphManager { Status SubexpressionMigration(ComputeGraphPtr &compute_graph); - Status LoadGraphAsync(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node); - - Status CheckAndReleaseMemory(const GeModelPtr &ge_model, const GraphNodePtr &graph_node); - bool CheckModelLoad(const GeRootModelPtr &ge_model, bool load_flag); Status LoadGraph(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node); @@ -368,12 +348,12 @@ class GraphManager { void RemoveModelCacheHelper(const GraphId &graph_id); ModelCacheHelperPtr FindModelCacheHelper(GraphId graph_id); - static void PreRunThread(GraphManager *graph_manager); - static void RunThread(GraphManager *graph_manager); - static void StopQueue(GraphManager *graph_manager); - static void ReturnError(GraphManager *graph_manager, RunAsyncCallback callback, Status ret, const string &log); - static void ReturnError(GraphManager *graph_manager, GraphNodePtr &graph_node, RunAsyncCallback callback, - Status ret, const string &log); + void SetRunContext(const GraphNodePtr &graph_node); + void PushGraph(const RunArgs &args); + + void PreRunThread(); + void StopQueue(); + void ReturnError(RunAsyncCallback callback, Status ret, const string &log); void ChangeConstTypeWhenTraining(const ComputeGraphPtr &compute_graph); @@ -409,11 +389,7 @@ class GraphManager { CompilerStages &GetCompilerStages(GraphId graph_id); void RemoveCompilerStages(GraphId graph_id); - static Status CheckIncreBuildAndPreRun(GraphManager *graph_manager, const PreRunArgs &args, GraphNodePtr &graph_node, - GeRootModelPtr &ge_root_model); - - void ReleaseMemory(const GeModelPtr &ge_model, GraphNodePtr &graph_node, const std::vector &model_ids, - uint32_t graph_id, uint64_t session_id); + Status CheckIncreBuildAndPreRun(const PreRunArgs &args, GraphNodePtr &graph_node, GeRootModelPtr &ge_root_model); Status CheckRepeatAdd(uint32_t graph_id, bool &is_added); @@ -431,34 +407,25 @@ class GraphManager { static Status CheckGraphAdded(const GraphId &graph_id, const Graph &graph); - std::atomic_bool thread_run_flag_; + std::atomic_bool thread_run_flag_{false}; BlockingQueue prerun_args_q_{}; - BlockingQueue run_args_q_{}; std::thread prerun_thread_; - std::thread run_thread_; ComputeGraphPtr compute_graph_; std::map graph_map_; std::map cache_helper_map_; - // for run graph synchronous return - std::mutex sync_run_mutex_; - std::condition_variable condition_; - // run graph synchronization call back listener - std::shared_ptr graph_run_listener_; - // summary and checkpoint callback function list for ME, key is summary or checkpoint std::map &)>> me_callback_map_; std::map &)>> callback_map_; - bool init_flag_; - + bool init_flag_{false}; GraphManagerOptions options_; GraphContextPtr graph_context_ = nullptr; map omg_contexts_; map compiler_stages_; - GraphExecutor graph_executor_; + Executor *executor_{nullptr}; VarAccelerateCtrl var_acc_ctrl_; diff --git a/ge/graph/manager/graph_manager_utils.h b/ge/graph/manager/graph_manager_utils.h index 6ed76e57..9cec6b6d 100644 --- a/ge/graph/manager/graph_manager_utils.h +++ b/ge/graph/manager/graph_manager_utils.h @@ -33,6 +33,7 @@ #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" #include "graph/compute_graph.h" +#include "graph/common/local_context.h" #include "external/graph/graph.h" #include "graph/model.h" #include "model/ge_model.h" @@ -154,6 +155,9 @@ class GraphNode { bool GetRunFlag() const { return run_flag_; } void SetRunFlag(bool flag) { run_flag_ = flag; } + void SetOmeContext(const OmeContext &context) { context_ = context; } + OmeContext &GetOmeContext() { return context_; } + bool IsAsync() const { return async_; } void SetAsync(bool flag) { async_ = flag; } @@ -196,6 +200,8 @@ class GraphNode { bool run_flag_; std::vector subgraph_ptr_list_; + OmeContext context_; + GraphPtr graph_; ComputeGraphPtr compute_graph_; bool build_flag_; diff --git a/ge/session/inner_session.cc b/ge/session/inner_session.cc index 58b78f41..236ec783 100755 --- a/ge/session/inner_session.cc +++ b/ge/session/inner_session.cc @@ -124,7 +124,7 @@ Status InnerSession::Initialize() { GE_CHK_STATUS_RET(dump_properties.InitByOptions(), "Init dump properties failed."); GE_CHK_STATUS_RET(AddDumpProperties(dump_properties), "[Add][DumpProperties] failed."); - ret = graph_manager_.Initialize(options_); + ret = InnerInitialize(); if (ret != SUCCESS) { GELOGE(ret, "[Init][GraphManager] failed, InnerSession:%lu.", session_id_); REPORT_CALL_ERROR("E19999", "GraphManager initialize failed, InnerSession:%lu.", session_id_); @@ -136,7 +136,7 @@ Status InnerSession::Initialize() { if (ret != SUCCESS) { GELOGE(ret, "[Set][MemoryMallocSize] failed."); REPORT_CALL_ERROR("E19999", "VarManager SetMemoryMallocSize failed, InnerSession:%lu.", session_id_); - (void)graph_manager_.Finalize(); + (void)InnerFinalize(); GE_CHK_STATUS(RemoveDumpProperties(), "[Remove][DumpProperties] failed."); GE_CHK_RT(rtDeviceReset(static_cast(GetContext().DeviceId()))); return ret; @@ -162,7 +162,7 @@ Status InnerSession::Finalize() { return SUCCESS; } UpdateThreadContext(std::map{}); - Status ret = graph_manager_.Finalize(); + Status ret = InnerFinalize(); if (ret != SUCCESS) { // Subsequent code execution is required, so no return is required GELOGE(ret, "[Finalize][GraphManager] failed, InnerSession:%lu.", session_id_); @@ -188,6 +188,44 @@ Status InnerSession::Finalize() { return ret; } +Status InnerSession::InnerInitialize() { + Status ret = model_executor_.Initialize(options_); + if (ret != SUCCESS) { + GELOGE(ret, "[Init][GraphExecutor] failed, InnerSession:%lu.", session_id_); + REPORT_CALL_ERROR("E19999", "GraphExecutor initialize failed, InnerSession:%lu.", session_id_); + GE_CHK_STATUS(RemoveDumpProperties(), "[Remove][DumpProperties] failed."); + return ret; + } + + ret = graph_manager_.Initialize(options_, &model_executor_); + if (ret != SUCCESS) { + GELOGE(ret, "[Init][GraphManager] failed, InnerSession:%lu.", session_id_); + REPORT_CALL_ERROR("E19999", "GraphManager initialize failed, InnerSession:%lu.", session_id_); + GE_CHK_STATUS(RemoveDumpProperties(), "[Remove][DumpProperties] failed."); + return ret; + } + + return SUCCESS; +} + +Status InnerSession::InnerFinalize() { + Status ret = graph_manager_.Finalize(); + if (ret != SUCCESS) { + // Subsequent code execution is required, so no return is required + GELOGE(ret, "[Finalize][GraphManager] failed, InnerSession:%lu.", session_id_); + REPORT_CALL_ERROR("E19999", "GraphManager Finalize failed, InnerSession:%lu.", session_id_); + } + + ret = model_executor_.Finalize(); + if (ret != SUCCESS) { + // Subsequent code execution is required, so no return is required + GELOGE(ret, "[Finalize][GraphExecutor] failed, InnerSession:%lu.", session_id_); + REPORT_CALL_ERROR("E19999", "GraphExecutor Finalize failed, InnerSession:%lu.", session_id_); + } + + return SUCCESS; +} + Status InnerSession::GetVariable(const std::string &name, Tensor &val) { UpdateThreadContext(std::map{}); return graph_manager_.GetVariable(name, val); diff --git a/ge/session/inner_session.h b/ge/session/inner_session.h index 35fe4692..afc273ac 100644 --- a/ge/session/inner_session.h +++ b/ge/session/inner_session.h @@ -23,6 +23,7 @@ #include "framework/common/ge_types.h" #include "external/ge/ge_api_types.h" #include "graph/manager/graph_manager.h" +#include "graph/execute/model_executor.h" namespace ge { class InnerSession { @@ -82,10 +83,14 @@ class InnerSession { void SetRtSocVersion(); private: + Status InnerInitialize(); + Status InnerFinalize(); + bool init_flag_; uint64_t session_id_; std::map options_; GraphManager graph_manager_; + ModelExecutor model_executor_; std::mutex resource_mutex_; // AddGraph, RemoveGraph and Finalize use void UpdateThreadContext(const std::map &options); void UpdateThreadContext(uint32_t graph_id); diff --git a/ge/single_op/task/op_task.h b/ge/single_op/task/op_task.h index 085bb5ff..a73bcfda 100644 --- a/ge/single_op/task/op_task.h +++ b/ge/single_op/task/op_task.h @@ -268,7 +268,7 @@ class MemcpyAsyncTask : public OpTask { friend class SingleOpModel; friend class RtsKernelTaskBuilder; - uintptr_t addresses_[kAddressNum]; + uintptr_t addresses_[kAddressNum] = {0}; size_t dst_max_; size_t count_; rtMemcpyKind_t kind_; diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index d8fcd6c3..7832c7b0 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -161,8 +161,9 @@ set(COMMON_SRC_FILES "${GE_CODE_DIR}/ge/common/profiling/profiling_manager.cc" "${GE_CODE_DIR}/ge/common/profiling/ge_profiling.cc" "${GE_CODE_DIR}/ge/graph/manager/host_mem_manager.cc" - "${GE_CODE_DIR}/ge/graph/manager/memory_api.cc" + "${GE_CODE_DIR}/ge/graph/manager/memory_api.cc" "${GE_CODE_DIR}/ge/session/inner_session.cc" + "${GE_CODE_DIR}/ge/graph/execute/model_executor.cc" "${GE_CODE_DIR}/ge/graph/manager/util/rt_context_util.cc" "${GE_CODE_DIR}/ge/graph/execute/graph_execute.cc" "${GE_CODE_DIR}/ge/graph/preprocess/graph_preprocess.cc" @@ -469,6 +470,7 @@ set(GRAPH_BUILD_COMMON_SRC_FILES "${GE_CODE_DIR}/ge/client/ge_api.cc" "${GE_CODE_DIR}/ge/session/inner_session.cc" "${GE_CODE_DIR}/ge/session/session_manager.cc" + "${GE_CODE_DIR}/ge/graph/execute/model_executor.cc" "${GE_CODE_DIR}/ge/engine_manager/dnnengine_manager.cc" "${GE_CODE_DIR}/ge/plugin/engine/engine_manage.cc" "${GE_CODE_DIR}/ge/graph/build/logical_stream_allocator.cc" @@ -810,6 +812,7 @@ set(MULTI_PARTS_TEST_FILES "graph/build/task_generator_unittest.cc" "graph/build/buffer_pool_mem_assigner_unittest.cc" "graph/execute/graph_execute_unittest.cc" + "graph/execute/model_executor_unittest.cc" "graph/preprocess/graph_preprocess_unittest.cc" "graph/manager/hcom_util_unittest.cc" "graph/manager/graph_caching_allocator_unittest.cc" diff --git a/tests/ut/ge/common/datatype_transfer_unittest.cc b/tests/ut/ge/common/datatype_transfer_unittest.cc index c311a7cf..ea131b2c 100644 --- a/tests/ut/ge/common/datatype_transfer_unittest.cc +++ b/tests/ut/ge/common/datatype_transfer_unittest.cc @@ -47,7 +47,7 @@ TEST_F(UtestDataTypeTransfer, fp16_fp32) { EXPECT_EQ(transfer.TransDataType(args, result), SUCCESS); EXPECT_EQ(result.length, sizeof(ret)); bool is_equal = true; - for (int i = 0; i < sizeof(ret) / sizeof(ret[0]); ++i) { + for (size_t i = 0; i < sizeof(ret) / sizeof(ret[0]); ++i) { if (abs((reinterpret_cast(result.data.get()))[i] - ret[i]) > 1.0e-6) { is_equal = false; break; @@ -60,7 +60,7 @@ TEST_F(UtestDataTypeTransfer, fp16_fp32) { CastArgs args2{reinterpret_cast(ret), sizeof(ret) / sizeof(ret[0]), DT_FLOAT, DT_FLOAT16}; EXPECT_EQ(transfer2.TransDataType(args2, result2), SUCCESS); EXPECT_EQ(result2.length, sizeof(data)); - for (int i = 0; i < sizeof(ret) / sizeof(ret[0]); ++i) { + for (size_t i = 0; i < sizeof(ret) / sizeof(ret[0]); ++i) { EXPECT_FLOAT_EQ((reinterpret_cast(result2.data.get()))[i].val, data[i].val); } EXPECT_EQ(TransDataType(args2, result2), SUCCESS); @@ -81,7 +81,7 @@ TEST_F(UtestDataTypeTransfer, int32_fp16) { CastArgs args{reinterpret_cast(data), sizeof(ret) / sizeof(ret[0]), DT_INT32, DT_FLOAT16}; EXPECT_EQ(transfer.TransDataType(args, result), SUCCESS); EXPECT_EQ(result.length, sizeof(ret)); - for (int i = 0; i < sizeof(ret) / sizeof(ret[0]); ++i) { + for (size_t i = 0; i < sizeof(ret) / sizeof(ret[0]); ++i) { EXPECT_FLOAT_EQ((reinterpret_cast(result.data.get()))[i].val, ret[i].val); } @@ -91,7 +91,7 @@ TEST_F(UtestDataTypeTransfer, int32_fp16) { EXPECT_EQ(transfer2.TransDataType(args2, result2), SUCCESS); EXPECT_EQ(result2.length, sizeof(data)); bool is_equal = true; - for (int i = 0; i < sizeof(data) / sizeof(data[0]); ++i) { + for (size_t i = 0; i < sizeof(data) / sizeof(data[0]); ++i) { if (abs((reinterpret_cast(result2.data.get()))[i] - data[i]) / abs(data[i]) > 0.05) { is_equal = false; break; @@ -154,7 +154,7 @@ TEST_F(UtestDataTypeTransfer, fp32_fp16) { EXPECT_EQ(transfer.TransDataType(args, result), SUCCESS); EXPECT_EQ(result.length, sizeof(ret)); bool is_equal = true; - for (int i = 0; i < sizeof(ret) / sizeof(ret[0]); ++i) { + for (size_t i = 0; i < sizeof(ret) / sizeof(ret[0]); ++i) { if (abs((reinterpret_cast(result.data.get()))[i] - ret[i]) > 1.0e-6) { is_equal = false; break; @@ -167,7 +167,7 @@ TEST_F(UtestDataTypeTransfer, fp32_fp16) { CastArgs args2{reinterpret_cast(ret), sizeof(data) / sizeof(data[0]), DT_FLOAT, DT_FLOAT16}; EXPECT_EQ(transfer2.TransDataType(args2, result2), SUCCESS); EXPECT_EQ(result2.length, sizeof(data)); - for (int i = 0; i < sizeof(data) / sizeof(data[0]); ++i) { + for (size_t i = 0; i < sizeof(data) / sizeof(data[0]); ++i) { EXPECT_FLOAT_EQ((reinterpret_cast(result2.data.get()))[i].val, data[i].val); } } @@ -238,7 +238,7 @@ TEST_F(UtestDataTypeTransfer, uint8_fp32) { DataTypeTransfer transfer; EXPECT_EQ(transfer.TransDataType(args, result), SUCCESS); EXPECT_EQ(result.length, sizeof(ret)); - for (int i = 0; i < sizeof(ret) / sizeof(ret[0]); ++i) { + for (size_t i = 0; i < sizeof(ret) / sizeof(ret[0]); ++i) { EXPECT_EQ((reinterpret_cast(result.data.get()))[i], ret[i]); } } @@ -259,7 +259,7 @@ TEST_F(UtestDataTypeTransfer, uint8_int32) { DataTypeTransfer transfer; EXPECT_EQ(transfer.TransDataType(args, result), SUCCESS); EXPECT_EQ(result.length, sizeof(ret)); - for (int i = 0; i < sizeof(ret) / sizeof(ret[0]); ++i) { + for (size_t i = 0; i < sizeof(ret) / sizeof(ret[0]); ++i) { EXPECT_EQ((reinterpret_cast(result.data.get()))[i], ret[i]); } } @@ -282,7 +282,7 @@ TEST_F(UtestDataTypeTransfer, fp32_int32) { DataTypeTransfer transfer; EXPECT_EQ(transfer.TransDataType(args, result), SUCCESS); EXPECT_EQ(result.length, sizeof(ret)); - for (int i = 0; i < sizeof(ret) / sizeof(ret[0]); ++i) { + for (size_t i = 0; i < sizeof(ret) / sizeof(ret[0]); ++i) { EXPECT_FLOAT_EQ((reinterpret_cast(result.data.get()))[i], ret[i]); } } @@ -304,7 +304,7 @@ TEST_F(UtestDataTypeTransfer, int32_fp32) { DataTypeTransfer transfer; EXPECT_EQ(transfer.TransDataType(args, result), SUCCESS); EXPECT_EQ(result.length, sizeof(ret)); - for (int i = 0; i < sizeof(ret) / sizeof(ret[0]); ++i) { + for (size_t i = 0; i < sizeof(ret) / sizeof(ret[0]); ++i) { EXPECT_FLOAT_EQ((reinterpret_cast(result.data.get()))[i], ret[i]); } } @@ -329,7 +329,7 @@ TEST_F(UtestDataTypeTransfer, int32_uint8) { DataTypeTransfer transfer; EXPECT_EQ(transfer.TransDataType(args, result), SUCCESS); EXPECT_EQ(result.length, sizeof(ret)); - for (int i = 0; i < sizeof(ret) / sizeof(ret[0]); ++i) { + for (size_t i = 0; i < sizeof(ret) / sizeof(ret[0]); ++i) { EXPECT_FLOAT_EQ((reinterpret_cast(result.data.get()))[i], ret[i]); } } diff --git a/tests/ut/ge/graph/execute/model_executor_unittest.cc b/tests/ut/ge/graph/execute/model_executor_unittest.cc new file mode 100644 index 00000000..33643993 --- /dev/null +++ b/tests/ut/ge/graph/execute/model_executor_unittest.cc @@ -0,0 +1,327 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#define protected public +#define private public +#include "graph/execute/model_executor.h" +#include "graph/manager/graph_manager.h" +#include "graph/load/model_manager/model_manager.h" +#include "graph/load/model_manager/davinci_model.h" + +using namespace std; + +namespace ge { +class UtestModelExecutorTest : public testing::Test { + protected: + void SetUp() {} + void TearDown() {} +}; + +static NodePtr CreateNode(ComputeGraph &graph, const string &name, const string &type, int in_num, int out_num) { + OpDescPtr op_desc = std::make_shared(name, type); + op_desc->SetStreamId(0); + static int32_t index = 0; + op_desc->SetId(index++); + + GeTensorDesc tensor(GeShape(), FORMAT_ND, DT_INT64); + TensorUtils::SetSize(tensor, 64); + vector input_offset; + for (int i = 0; i < in_num; i++) { + op_desc->AddInputDesc(tensor); + input_offset.emplace_back(index * 64 + i * 64); + } + op_desc->SetInputOffset(input_offset); + + vector output_offset; + for (int i = 0; i < out_num; i++) { + op_desc->AddOutputDesc(tensor); + output_offset.emplace_back(index * 64 + in_num * 64 + i * 64); + } + op_desc->SetOutputOffset(output_offset); + + op_desc->SetWorkspace({}); + op_desc->SetWorkspaceBytes({}); + op_desc->SetOpKernelLibName("DNN_VM_RTS_OP_STORE"); + + return graph.AddNode(op_desc); +} + +TEST_F(UtestModelExecutorTest, test_load_graph_sync) { + ModelExecutor model_executor; + EXPECT_EQ(model_executor.Initialize({}), SUCCESS); + + auto compute_graph = MakeShared("test_graph"); + GeRootModelPtr ge_root_model = MakeShared(compute_graph); + + GeModelPtr ge_model = MakeShared(); + ge_model->SetGraph(GraphUtils::CreateGraphFromComputeGraph(compute_graph)); + ge_root_model->SetSubgraphInstanceNameToModel(compute_graph->GetName(), ge_model); + + GraphId graph_id = 1; + GraphNodePtr graph_node = MakeShared(graph_id); + graph_node->SetGeRootModel(ge_root_model); + graph_node->SetLoadFlag(true); + graph_node->SetAsync(false); + + EXPECT_EQ(model_executor.LoadGraph(ge_root_model, graph_node), SUCCESS); + EXPECT_EQ(model_executor.UnloadGraph(ge_root_model, graph_id), SUCCESS); + + EXPECT_EQ(model_executor.Finalize(), SUCCESS); +} + +TEST_F(UtestModelExecutorTest, test_load_graph_async) { + ModelExecutor model_executor; + EXPECT_EQ(model_executor.Initialize({}), SUCCESS); + + Graph graph("test_graph"); + auto compute_graph = MakeShared("test_graph"); + GeRootModelPtr ge_root_model = MakeShared(compute_graph); + + GeModelPtr ge_model = MakeShared(); + ge_model->SetGraph(GraphUtils::CreateGraphFromComputeGraph(compute_graph)); + ge_root_model->SetSubgraphInstanceNameToModel(compute_graph->GetName(), ge_model); + + GraphId graph_id = 1; + GraphNodePtr graph_node = MakeShared(graph_id); + graph_node->SetGeRootModel(ge_root_model); + graph_node->SetLoadFlag(true); + graph_node->SetAsync(true); + + EXPECT_EQ(model_executor.LoadGraph(ge_root_model, graph_node), SUCCESS); + + EXPECT_EQ(model_executor.UnloadGraph(ge_root_model, graph_id), SUCCESS); + + EXPECT_EQ(model_executor.Finalize(), SUCCESS); +} + +TEST_F(UtestModelExecutorTest, test_load_graph_failed) { + ModelExecutor model_executor; + EXPECT_EQ(model_executor.Initialize({}), SUCCESS); + + Graph graph("test_graph"); + auto compute_graph = MakeShared("test_graph"); + GeRootModelPtr ge_root_model = MakeShared(compute_graph); + + GraphId graph_id = 1; + GraphNodePtr graph_node = MakeShared(graph_id); + graph_node->SetGeRootModel(ge_root_model); + graph_node->SetLoadFlag(true); + graph_node->SetAsync(true); + + // GeModel is null, DavinciModel::Assign will return FAILED + setenv(kEnvGeuseStaticMemory, "1", true); + EXPECT_EQ(model_executor.LoadGraph(ge_root_model, graph_node), FAILED); + EXPECT_EQ(model_executor.UnloadGraph(ge_root_model, graph_id), SUCCESS); + + EXPECT_EQ(model_executor.Finalize(), SUCCESS); + unsetenv(kEnvGeuseStaticMemory); +} + +TEST_F(UtestModelExecutorTest, test_check_and_release_memory) { + { + auto listener = MakeShared(); + shared_ptr davinci_model1 = MakeShared(1, listener); + davinci_model1->SetId(1); + ModelManager::GetInstance()->InsertModel(1, davinci_model1); + shared_ptr davinci_model2 = MakeShared(2, listener); + davinci_model1->SetId(2); + ModelManager::GetInstance()->InsertModel(2, davinci_model2); + } + + ModelExecutor model_executor; + EXPECT_EQ(model_executor.Initialize({}), SUCCESS); + + GeModelPtr ge_model = make_shared(); + int64_t memory_size = 25 * 1024UL * 1024UL * 1024UL; + int64_t weight_size = 25 * 1024UL * 1024UL * 1024UL; + uint64_t session_id = 0; + EXPECT_TRUE(AttrUtils::SetInt(ge_model, ATTR_MODEL_MEMORY_SIZE, memory_size)); + EXPECT_TRUE(AttrUtils::SetInt(ge_model, ATTR_MODEL_WEIGHT_SIZE, weight_size)); + EXPECT_TRUE(AttrUtils::SetInt(ge_model, MODEL_ATTR_SESSION_ID, session_id)); + + GraphId graph_id = 1; + GraphNodePtr graph_node = MakeShared(graph_id); + model_executor.AddGraphNode(graph_id, graph_node); + + ComputeGraphPtr compute_graph = MakeShared("test_graph"); + GeRootModelPtr ge_root_model = MakeShared(compute_graph); + ge_root_model->SetModelId(1); + ge_root_model->SetModelId(2); + graph_node->SetGeRootModel(ge_root_model); + graph_node->SetLoadFlag(true); + + EXPECT_EQ(model_executor.CheckAndReleaseMemory(ge_model, graph_node), SUCCESS); + EXPECT_EQ(model_executor.Finalize(), SUCCESS); +} + +TEST_F(UtestModelExecutorTest, parse_inputs_dims_data) { + ModelExecutor model_executor; + EXPECT_EQ(model_executor.Initialize({}), SUCCESS); + + OmeContext context; + SetLocalOmeContext(context); + ComputeGraphPtr compute_graph = MakeShared("test_graph"); + const auto data1 = CreateNode(*compute_graph, DATA, "data1", 1, 1); + const auto next1 = CreateNode(*compute_graph, GETNEXT, "data1", 1, 1); + + Tensor tensor; + std::vector input_tensors; + input_tensors.emplace_back(tensor); + EXPECT_EQ(model_executor.ParseInputsDims(input_tensors), SUCCESS); // dynamic_node_type is empty, just return + + context.dynamic_node_type = DATA; + EXPECT_EQ(model_executor.ParseInputsDims(input_tensors), SUCCESS); // ParseInputsDimsForData + + context.getnext_nosink_nodes.emplace_back(next1); + EXPECT_EQ(model_executor.ParseInputsDims(input_tensors), SUCCESS); // ParseInputsDimsForGetNexNosinkAndData + + EXPECT_EQ(model_executor.Finalize(), SUCCESS); +} + +TEST_F(UtestModelExecutorTest, parse_inputs_dims_getnext) { + ModelExecutor model_executor; + EXPECT_EQ(model_executor.Initialize({}), SUCCESS); + + OmeContext context; + SetLocalOmeContext(context); + ComputeGraphPtr compute_graph = MakeShared("test_graph"); + const auto data1 = CreateNode(*compute_graph, DATA, "data1", 1, 1); + const auto next1 = CreateNode(*compute_graph, GETNEXT, "data1", 1, 1); + + Tensor tensor; + std::vector input_tensors; + input_tensors.emplace_back(tensor); + + context.dynamic_node_type = GETNEXT; + EXPECT_EQ(model_executor.ParseInputsDims(input_tensors), SUCCESS); // just getnext_sink + + context.getnext_nosink_nodes.emplace_back(next1); + EXPECT_EQ(model_executor.ParseInputsDims(input_tensors), SUCCESS); // ParseInputsDimsForData + + context.data_nodes.emplace_back(data1); + EXPECT_EQ(model_executor.ParseInputsDims(input_tensors), PARAM_INVALID); // ParseInputsDimsForGetNexNosinkAndData + AttrUtils::SetInt(next1->GetOpDesc(), ATTR_NAME_INDEX, 0); + EXPECT_EQ(model_executor.ParseInputsDims(input_tensors), SUCCESS); // ParseInputsDimsForGetNexNosinkAndData + + EXPECT_EQ(model_executor.Finalize(), SUCCESS); +} + +TEST_F(UtestModelExecutorTest, test_run_thread) { + ModelExecutor model_executor; + EXPECT_EQ(model_executor.Initialize({}), SUCCESS); + + GraphId graph_id = 1; + uint64_t session_id = 0; + error_message::Context error_context; + GEThreadLocalContext context; + const auto callback = [](Status status, std::vector &outputs) { }; + + auto compute_graph = MakeShared("test_graph"); + GeRootModelPtr ge_root_model = MakeShared(compute_graph); + + GeModelPtr ge_model = MakeShared(); + ge_model->SetGraph(GraphUtils::CreateGraphFromComputeGraph(compute_graph)); + ge_root_model->SetSubgraphInstanceNameToModel(compute_graph->GetName(), ge_model); + + GraphNodePtr graph_node = MakeShared(graph_id); + graph_node->SetGeRootModel(ge_root_model); + graph_node->SetLoadFlag(false); + graph_node->SetAsync(true); + graph_node->IncreaseLoadCount(); + graph_node->Lock(); + + Tensor tensor; + std::vector input_tensors; + input_tensors.emplace_back(tensor); + + RunArgs run_args{graph_node, graph_id, session_id, error_context, input_tensors, ge_root_model, context, callback}; + EXPECT_EQ(model_executor.PushGraph(run_args), SUCCESS); + + while (model_executor.run_args_q_.Size() > 0) { + usleep(10); // 0.01ms, Wait for RunThread. + } + EXPECT_EQ(model_executor.Finalize(), SUCCESS); +} + +static void test_run_graph(ModelExecutor &model_executor) { + auto compute_graph = MakeShared("test_graph"); + GeRootModelPtr ge_root_model = MakeShared(compute_graph); + + GeModelPtr ge_model = MakeShared(); + ge_model->SetGraph(GraphUtils::CreateGraphFromComputeGraph(compute_graph)); + ge_root_model->SetSubgraphInstanceNameToModel(compute_graph->GetName(), ge_model); + + GraphId graph_id = 1; + GraphNodePtr graph_node = MakeShared(graph_id); + graph_node->SetGeRootModel(ge_root_model); + graph_node->SetLoadFlag(false); + graph_node->SetAsync(false); // RunGraph is Synchronization. + EXPECT_EQ(model_executor.LoadGraph(ge_root_model, graph_node), SUCCESS); + + std::vector inputs; + std::vector outputs; + EXPECT_EQ(model_executor.RunGraph(graph_node, graph_id, inputs, outputs), SUCCESS); +} + +TEST_F(UtestModelExecutorTest, test_run_graph_train) { + GetThreadLocalContext().SetGlobalOption({{OPTION_GRAPH_RUN_MODE, "1"}}); + ModelExecutor model_executor; + EXPECT_EQ(model_executor.Initialize({}), SUCCESS); + test_run_graph(model_executor); + EXPECT_EQ(model_executor.Finalize(), SUCCESS); +} + +TEST_F(UtestModelExecutorTest, test_run_graph_infer) { + GetThreadLocalContext().SetGlobalOption({}); + GetThreadLocalContext().SetSessionOption({}); + GetThreadLocalContext().SetGraphOption({}); + ModelExecutor model_executor; + EXPECT_EQ(model_executor.Initialize({}), SUCCESS); + test_run_graph(model_executor); + EXPECT_EQ(model_executor.Finalize(), SUCCESS); +} + +TEST_F(UtestModelExecutorTest, test_run_graph_with_stream) { + ModelExecutor model_executor; + EXPECT_EQ(model_executor.Initialize({}), SUCCESS); + + GraphId graph_id = 1; + auto compute_graph = MakeShared("test_graph"); + GeRootModelPtr ge_root_model = MakeShared(compute_graph); + + GeModelPtr ge_model = MakeShared(); + ge_model->SetGraph(GraphUtils::CreateGraphFromComputeGraph(compute_graph)); + ge_root_model->SetSubgraphInstanceNameToModel(compute_graph->GetName(), ge_model); + + GraphNodePtr graph_node = MakeShared(graph_id); + graph_node->SetGeRootModel(ge_root_model); + graph_node->SetLoadFlag(false); + graph_node->SetAsync(true); + + GeTensor tensor; + std::vector inputs{tensor}; + std::vector outputs; + + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + EXPECT_EQ(model_executor.RunGraphWithStream(graph_node, graph_id, stream, inputs, outputs), 145003); + + EXPECT_EQ(model_executor.Finalize(), SUCCESS); + rtStreamDestroy(stream); +} +} // namespace ge diff --git a/tests/ut/ge/graph/manager/graph_manager_unittest.cc b/tests/ut/ge/graph/manager/graph_manager_unittest.cc index 9bae10eb..9663e90f 100644 --- a/tests/ut/ge/graph/manager/graph_manager_unittest.cc +++ b/tests/ut/ge/graph/manager/graph_manager_unittest.cc @@ -15,20 +15,9 @@ */ #include + #include #include -#define protected public -#define private public -#include "graph/manager/graph_manager.h" -#include "graph/load/model_manager/model_manager.h" -#include "graph/load/model_manager/davinci_model.h" -#define const -#include "common/helper/model_cache_helper.h" -#undef const -#include "init/gelib.h" -#undef private -#undef public - #include #include #include @@ -38,6 +27,14 @@ #include #include +#define protected public +#define private public +#include "graph/manager/graph_manager.h" +#define const +#include "common/helper/model_cache_helper.h" +#undef const +#include "init/gelib.h" + #include "common/math/math_util.h" #include "common/thread_pool.h" #include "common/dump/dump_manager.h" @@ -121,7 +118,6 @@ using namespace std; using namespace testing; -using namespace ge; using namespace domi; namespace { @@ -129,6 +125,8 @@ const uint32_t kNotAdded = 0; const uint32_t kStartAdd = 1; const uint32_t kDoneAdded = 2; } + +namespace ge { class UtestGraphManagerTest : public testing::Test { protected: void SetUp() {} @@ -136,6 +134,31 @@ class UtestGraphManagerTest : public testing::Test { void TearDown() {} }; +class StubExecutor : public Executor { + public: + Status LoadGraph(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node) { + return SUCCESS; + } + + Status UnloadGraph(const GeRootModelPtr &ge_root_model, uint32_t graph_id) { + return SUCCESS; + } + + Status PushGraph(const RunArgs &args) { + return SUCCESS; + } + + Status RunGraph(const GraphNodePtr &graph_node, GraphId graph_id, + const std::vector &inputs, std::vector &outputs) { + return SUCCESS; + } + + Status RunGraphWithStream(const GraphNodePtr &graph_node, GraphId graph_id, rtStream_t stream, + const std::vector &inputs, std::vector &outputs){ + return SUCCESS; + } +}; + void CreateGraph(Graph &graph) { TensorDesc desc(ge::Shape({1, 3, 224, 224})); uint32_t size = desc.GetShape().GetShapeSize(); @@ -288,26 +311,20 @@ TEST_F(UtestGraphManagerTest, test_remove_graph_1) { TEST_F(UtestGraphManagerTest, test_remove_graph_2) { GraphId graph_id = 1; GraphManager graph_manager; + StubExecutor stub_executor; + graph_manager.executor_ = &stub_executor; + GraphNodePtr graph_node = MakeShared(graph_id); Graph graph("test_graph"); CreateGraph(graph); auto compute_graph = GraphUtils::GetComputeGraph(graph); GeRootModelPtr ge_root_model = MakeShared(compute_graph); - auto model_manager = ModelManager::GetInstance(); - auto listener = MakeShared(); - shared_ptr davinci_model1 = MakeShared(1, listener); - davinci_model1->SetId(1); - shared_ptr davinci_model2 = MakeShared(2, listener); - davinci_model1->SetId(2); - model_manager->InsertModel(1, davinci_model1); - model_manager->InsertModel(2, davinci_model2); ge_root_model->SetModelId(1); ge_root_model->SetModelId(2); graph_node->SetGeRootModel(ge_root_model); graph_node->SetLoadFlag(true); graph_manager.AddGraphNode(graph_id, graph_node); - Status status = graph_manager.RemoveGraph(graph_id); - EXPECT_EQ(status, ge::SUCCESS); + EXPECT_EQ(graph_manager.RemoveGraph(graph_id), SUCCESS); } TEST_F(UtestGraphManagerTest, test_pre_run_thread) { @@ -327,7 +344,7 @@ TEST_F(UtestGraphManagerTest, test_pre_run_thread) { GraphNodePtr graph_node = MakeShared(graph_id); graph_manager.AddGraphNode(graph_id, graph_node); - graph_manager.PreRunThread(&graph_manager); + graph_manager.PreRunThread(); // end with failed } @@ -355,48 +372,10 @@ TEST_F(UtestGraphManagerTest, test_pre_run_thread_2) { graph_manager.AddGraphNode(graph_id, graph_node_2); ret = graph_manager.prerun_args_q_.Push({graph_id, input_tensor, session_id, error_context, context, callback}); EXPECT_EQ(ret, true); - graph_manager.PreRunThread(&graph_manager); + graph_manager.PreRunThread(); // end with failed } -TEST_F(UtestGraphManagerTest, test_check_and_release_memory) { - - GraphManager graph_manager; - GeModelPtr ge_model = make_shared(); - int64_t memory_size = 25 * 1024UL * 1024UL * 1024UL; - int64_t weight_size = 25 * 1024UL * 1024UL * 1024UL; - uint64_t session_id = 0; - ge::AttrUtils::SetInt(ge_model, ATTR_MODEL_MEMORY_SIZE, memory_size); - ge::AttrUtils::SetInt(ge_model, ATTR_MODEL_WEIGHT_SIZE, weight_size); - ge::AttrUtils::SetInt(ge_model, MODEL_ATTR_SESSION_ID, session_id); - - - GraphId graph_id = 1; - GraphNodePtr graph_node = MakeShared(graph_id); - graph_manager.AddGraphNode(graph_id, graph_node); - graph_manager.IncreaseGraphCount(graph_id); - graph_manager.IncreaseGraphCount(graph_id); - - auto model_manager = ModelManager::GetInstance(); - auto listener = MakeShared(); - shared_ptr davinci_model1 = MakeShared(1, listener); - davinci_model1->SetId(1); - shared_ptr davinci_model2 = MakeShared(2, listener); - davinci_model1->SetId(2); - model_manager->InsertModel(1, davinci_model1); - model_manager->InsertModel(2, davinci_model2); - ComputeGraphPtr compute_graph = MakeShared("test_graph"); - bool is_dynamic_shape = false; - (void)AttrUtils::GetBool(compute_graph, ATTR_NAME_DYNAMIC_SHAPE_PARTITIONED, is_dynamic_shape); - GeRootModelPtr ge_root_model = MakeShared(compute_graph); - ge_root_model->SetModelId(1); - ge_root_model->SetModelId(2); - graph_node->SetGeRootModel(ge_root_model); - graph_node->SetLoadFlag(true); - Status status = graph_manager.CheckAndReleaseMemory(ge_model, graph_node); - EXPECT_EQ(status, ge::SUCCESS); -} - TEST_F(UtestGraphManagerTest, test_check_incre_build_and_pre_run_1) { // no need to build GraphId graph_id = 1; @@ -406,7 +385,7 @@ TEST_F(UtestGraphManagerTest, test_check_incre_build_and_pre_run_1) { GraphManager::PreRunArgs arg; GraphNodePtr graph_node = MakeShared(graph_id); graph_node->SetBuildFlag(true); - Status status = graph_manager.CheckIncreBuildAndPreRun(&graph_manager, arg, graph_node, ge_root_model); + Status status = graph_manager.CheckIncreBuildAndPreRun(arg, graph_node, ge_root_model); EXPECT_EQ(status, ge::SUCCESS); } @@ -422,7 +401,7 @@ TEST_F(UtestGraphManagerTest, test_check_incre_build_and_pre_run_2) { graph_node->SetBuildFlag(true); graph_node->Lock(); graph_manager.var_acc_ctrl_.graph_ids_need_rebuild_.insert(graph_id); - Status status = graph_manager.CheckIncreBuildAndPreRun(&graph_manager, arg, graph_node, ge_root_model); + Status status = graph_manager.CheckIncreBuildAndPreRun(arg, graph_node, ge_root_model); EXPECT_EQ(status, ge::PARAM_INVALID); } @@ -437,7 +416,7 @@ TEST_F(UtestGraphManagerTest, test_check_incre_build_and_pre_run_3) { GraphNodePtr graph_node = MakeShared(graph_id); graph_node->SetBuildFlag(false); graph_node->Lock(); - Status status = graph_manager.CheckIncreBuildAndPreRun(&graph_manager, arg, graph_node, ge_root_model); + Status status = graph_manager.CheckIncreBuildAndPreRun(arg, graph_node, ge_root_model); EXPECT_NE(status, ge::SUCCESS); } @@ -471,14 +450,6 @@ TEST_F(UtestGraphManagerTest, test_add_graph_with_copy_fail) { EXPECT_NE(status, ge::SUCCESS); } -TEST_F(UtestGraphManagerTest, ParseInputsDimsForData_success) { - GraphManager graph_manager; - std::vector input_tensors; - ge::Tensor tensor; - input_tensors.emplace_back(tensor); - graph_manager.ParseInputsDimsForData(input_tensors); -} - TEST_F(UtestGraphManagerTest, test_prerunthread_failed_1) { GraphId graph_id = 1; GraphManager graph_manager; @@ -509,7 +480,7 @@ TEST_F(UtestGraphManagerTest, test_prerunthread_failed_1) { graph_node->SetRunFlag(false); // function return. graph_manager.prerun_args_q_.Push(args); - auto t1 = std::thread(GraphManager::PreRunThread, &graph_manager); + auto t1 = std::thread(&GraphManager::PreRunThread, &graph_manager); if (t1.joinable()) { t1.join(); } @@ -549,7 +520,7 @@ TEST_F(UtestGraphManagerTest, test_prerunthread_failed_2) { int ret = setenv("ENABLE_NETWORK_ANALYSIS_DEBUG", "1", 1); EXPECT_EQ(ret, 0); graph_manager.prerun_args_q_.Push(args); - auto t1 = std::thread(GraphManager::PreRunThread, &graph_manager); + auto t1 = std::thread(&GraphManager::PreRunThread, &graph_manager); if (t1.joinable()) { t1.join(); } @@ -593,3 +564,4 @@ TEST_F(UtestGraphManagerTest, ChangeAndDeleteConst_success) { auto all_nodes = graph->GetDirectNode(); EXPECT_EQ(all_nodes.size(), 3); } +} // namespace ge diff --git a/tests/ut/ge/graph/passes/folding_kernel/gather_v2_kernel_unittest.cc b/tests/ut/ge/graph/passes/folding_kernel/gather_v2_kernel_unittest.cc index 0083146b..ad165d25 100644 --- a/tests/ut/ge/graph/passes/folding_kernel/gather_v2_kernel_unittest.cc +++ b/tests/ut/ge/graph/passes/folding_kernel/gather_v2_kernel_unittest.cc @@ -92,7 +92,7 @@ TEST_F(UtestGraphPassesFoldingKernelGatherV2Kernel, INT32Axis0VersionA) { GeTensorPtr tensor_out = outputs[0]; int32_t *data_buf = (int32_t *)tensor_out->GetData().data(); vector expect_out = {2, 2}; - for (int i = 0; i < expect_out.size(); i++) { + for (size_t i = 0; i < expect_out.size(); i++) { EXPECT_EQ(*(data_buf + i), expect_out[i]); } } @@ -139,7 +139,7 @@ TEST_F(UtestGraphPassesFoldingKernelGatherV2Kernel, INT32Axis0VersionB) { GeTensorPtr tensor_out = outputs[0]; int32_t *data_buf = (int32_t *)tensor_out->GetData().data(); vector expect_out = {3, 3}; - for (int i = 0; i < expect_out.size(); i++) { + for (size_t i = 0; i < expect_out.size(); i++) { EXPECT_EQ(*(data_buf + i), expect_out[i]); } } @@ -186,7 +186,7 @@ TEST_F(UtestGraphPassesFoldingKernelGatherV2Kernel, INT64Axis0) { GeTensorPtr tensor_out = outputs[0]; int64_t *data_buf = (int64_t *)tensor_out->GetData().data(); vector expect_out = {3, 3}; - for (int i = 0; i < expect_out.size(); i++) { + for (size_t i = 0; i < expect_out.size(); i++) { EXPECT_EQ(*(data_buf + i), expect_out[i]); } } @@ -233,7 +233,7 @@ TEST_F(UtestGraphPassesFoldingKernelGatherV2Kernel, INT32Axis0) { GeTensorPtr tensor_out = outputs[0]; int32_t *data_buf = (int32_t *)tensor_out->GetData().data(); vector expect_out = {11, 12, 13, 14, 15, 16, 17, 18, 19, 11, 12, 13, 14, 15, 16, 17, 18, 19}; - for (int i = 0; i < expect_out.size(); i++) { + for (size_t i = 0; i < expect_out.size(); i++) { EXPECT_EQ(*(data_buf + i), expect_out[i]); } } @@ -279,7 +279,7 @@ TEST_F(UtestGraphPassesFoldingKernelGatherV2Kernel, INT32Axis0And1) { GeTensorPtr tensor_out = outputs[0]; int32_t *data_buf = (int32_t *)tensor_out->GetData().data(); vector expect_out = {11, 12, 13, 14, 15, 16, 17, 18, 19, 1, 2, 3, 4, 5, 6, 7, 8, 9}; - for (int i = 0; i < expect_out.size(); i++) { + for (size_t i = 0; i < expect_out.size(); i++) { EXPECT_EQ(*(data_buf + i), expect_out[i]); } } @@ -327,7 +327,7 @@ TEST_F(UtestGraphPassesFoldingKernelGatherV2Kernel, INT32Axis1) { GeTensorPtr tensor_out = outputs[0]; int32_t *data_buf = (int32_t *)tensor_out->GetData().data(); vector expect_out = {4, 5, 6, 4, 5, 6, 14, 15, 16, 14, 15, 16}; - for (int i = 0; i < expect_out.size(); i++) { + for (size_t i = 0; i < expect_out.size(); i++) { EXPECT_EQ(*(data_buf + i), expect_out[i]); } } @@ -374,7 +374,7 @@ TEST_F(UtestGraphPassesFoldingKernelGatherV2Kernel, INT32Axis2) { GeTensorPtr tensor_out = outputs[0]; int32_t *data_buf = (int32_t *)tensor_out->GetData().data(); vector expect_out = {1, 1, 4, 4, 7, 7, 11, 11, 14, 14, 17, 17}; - for (int i = 0; i < expect_out.size(); i++) { + for (size_t i = 0; i < expect_out.size(); i++) { EXPECT_EQ(*(data_buf + i), expect_out[i]); } } @@ -422,7 +422,7 @@ TEST_F(UtestGraphPassesFoldingKernelGatherV2Kernel, INT32Axis3) { GeTensorPtr tensor_out = outputs[0]; int32_t *data_buf = (int32_t *)tensor_out->GetData().data(); vector expect_out = {1, 2, 4, 5, 7, 8, 11, 12, 14, 15, 17, 18, 1, 2, 4, 5, 7, 8, 11, 12, 14, 15, 17, 18}; - for (int i = 0; i < expect_out.size(); i++) { + for (size_t i = 0; i < expect_out.size(); i++) { EXPECT_EQ(*(data_buf + i), expect_out[i]); } } @@ -470,7 +470,7 @@ TEST_F(UtestGraphPassesFoldingKernelGatherV2Kernel, INT8Axis0) { GeTensorPtr tensor_out = outputs[0]; int8_t *data_buf = (int8_t *)tensor_out->GetData().data(); vector expect_out = {2, 2}; - for (int i = 0; i < expect_out.size(); i++) { + for (size_t i = 0; i < expect_out.size(); i++) { EXPECT_EQ(*(data_buf + i), expect_out[i]); } } @@ -517,7 +517,7 @@ TEST_F(UtestGraphPassesFoldingKernelGatherV2Kernel, INT16Axis0) { GeTensorPtr tensor_out = outputs[0]; int16_t *data_buf = (int16_t *)tensor_out->GetData().data(); vector expect_out = {2, 2}; - for (int i = 0; i < expect_out.size(); i++) { + for (size_t i = 0; i < expect_out.size(); i++) { EXPECT_EQ(*(data_buf + i), expect_out[i]); } } @@ -564,7 +564,7 @@ TEST_F(UtestGraphPassesFoldingKernelGatherV2Kernel, UINT8Axis0) { GeTensorPtr tensor_out = outputs[0]; uint8_t *data_buf = (uint8_t *)tensor_out->GetData().data(); vector expect_out = {2, 2}; - for (int i = 0; i < expect_out.size(); i++) { + for (size_t i = 0; i < expect_out.size(); i++) { EXPECT_EQ(*(data_buf + i), expect_out[i]); } } @@ -611,7 +611,7 @@ TEST_F(UtestGraphPassesFoldingKernelGatherV2Kernel, UINT16Axis0) { GeTensorPtr tensor_out = outputs[0]; uint16_t *data_buf = (uint16_t *)tensor_out->GetData().data(); vector expect_out = {2, 2}; - for (int i = 0; i < expect_out.size(); i++) { + for (size_t i = 0; i < expect_out.size(); i++) { EXPECT_EQ(*(data_buf + i), expect_out[i]); } } @@ -658,7 +658,7 @@ TEST_F(UtestGraphPassesFoldingKernelGatherV2Kernel, UINT32Axis0) { GeTensorPtr tensor_out = outputs[0]; uint32_t *data_buf = (uint32_t *)tensor_out->GetData().data(); vector expect_out = {2, 2}; - for (int i = 0; i < expect_out.size(); i++) { + for (size_t i = 0; i < expect_out.size(); i++) { EXPECT_EQ(*(data_buf + i), expect_out[i]); } } @@ -705,7 +705,7 @@ TEST_F(UtestGraphPassesFoldingKernelGatherV2Kernel, UINT64Axis0) { GeTensorPtr tensor_out = outputs[0]; uint64_t *data_buf = (uint64_t *)tensor_out->GetData().data(); vector expect_out = {2, 2}; - for (int i = 0; i < expect_out.size(); i++) { + for (size_t i = 0; i < expect_out.size(); i++) { EXPECT_EQ(*(data_buf + i), expect_out[i]); } } @@ -753,7 +753,7 @@ TEST_F(UtestGraphPassesFoldingKernelGatherV2Kernel, DoubleAxis0) { GeTensorPtr tensor_out = outputs[0]; double *data_buf = (double *)tensor_out->GetData().data(); vector expect_out = {2, 2}; - for (int i = 0; i < expect_out.size(); i++) { + for (size_t i = 0; i < expect_out.size(); i++) { double diff = *(data_buf + i) - expect_out[i]; bool is_same = fabs(diff) < 0.0001 ? true : false; EXPECT_EQ(is_same, true); @@ -802,7 +802,7 @@ TEST_F(UtestGraphPassesFoldingKernelGatherV2Kernel, Float16Axis0) { GeTensorPtr tensor_out = outputs[0]; fp16_t *data_buf = (fp16_t *)tensor_out->GetData().data(); vector expect_out = {2, 2}; - for (int i = 0; i < expect_out.size(); i++) { + for (size_t i = 0; i < expect_out.size(); i++) { double diff = (double)*(data_buf + i) - (double)expect_out[i]; bool is_same = fabs(diff) < 0.0001 ? true : false; EXPECT_EQ(is_same, true); diff --git a/tests/ut/ge/graph/passes/mark_node_unknown_shape_pass_unittest.cc b/tests/ut/ge/graph/passes/mark_node_unknown_shape_pass_unittest.cc index 5157e510..c7d36582 100644 --- a/tests/ut/ge/graph/passes/mark_node_unknown_shape_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/mark_node_unknown_shape_pass_unittest.cc @@ -33,7 +33,7 @@ protected: void SetUp() {} void TearDown() {} public: - NodePtr MakeNode(const ComputeGraphPtr &graph, uint32_t in_num, uint32_t out_num, string name, string type) { + NodePtr MakeNode(const ComputeGraphPtr &graph, int in_num, int out_num, string name, string type) { GeTensorDesc test_desc(GeShape(), FORMAT_NCHW, DT_FLOAT); auto op_desc = std::make_shared(name, type); for (auto i = 0; i < in_num; ++i) { diff --git a/tests/ut/ge/graph/passes/multi_batch_clone_pass_unittest.cc b/tests/ut/ge/graph/passes/multi_batch_clone_pass_unittest.cc index 1b75a613..c752cea4 100644 --- a/tests/ut/ge/graph/passes/multi_batch_clone_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/multi_batch_clone_pass_unittest.cc @@ -45,7 +45,7 @@ protected: } public: - NodePtr MakeNode(const ComputeGraphPtr &graph, uint32_t in_num, uint32_t out_num, string name, string type) { + NodePtr MakeNode(const ComputeGraphPtr &graph, int in_num, int out_num, string name, string type) { GeTensorDesc test_desc(GeShape(), FORMAT_NCHW, DT_FLOAT); auto op_desc = std::make_shared(name, type); for (auto i = 0; i < in_num; ++i) { diff --git a/tests/ut/ge/graph/passes/subgraph_const_migration_pass_unittest.cc b/tests/ut/ge/graph/passes/subgraph_const_migration_pass_unittest.cc index 00157395..c633c0e1 100644 --- a/tests/ut/ge/graph/passes/subgraph_const_migration_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/subgraph_const_migration_pass_unittest.cc @@ -32,7 +32,7 @@ class UtestSubgraphConstMigrationPass : public testing::Test { void TearDown() {} public: - NodePtr MakeNode(const ComputeGraphPtr &graph, uint32_t in_num, uint32_t out_num, string name, string type) { + NodePtr MakeNode(const ComputeGraphPtr &graph, int in_num, int out_num, string name, string type) { GeTensorDesc test_desc(GeShape(), FORMAT_NCHW, DT_FLOAT); auto op_desc = std::make_shared(name, type); for (auto i = 0; i < in_num; ++i) { From 27bdd194f78e263d4664a3dd56223751d7f2cbb2 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Mon, 5 Jul 2021 22:39:28 +0800 Subject: [PATCH 160/226] delete defined but not used --- ge/graph/manager/graph_manager.cc | 2 -- 1 file changed, 2 deletions(-) diff --git a/ge/graph/manager/graph_manager.cc b/ge/graph/manager/graph_manager.cc index b2528cdd..96dc59c5 100755 --- a/ge/graph/manager/graph_manager.cc +++ b/ge/graph/manager/graph_manager.cc @@ -129,8 +129,6 @@ const uint32_t kInitGraphCount = 1; const uint32_t kNotAdded = 0; const uint32_t kStartAdd = 1; const uint32_t kDoneAdded = 2; -const uint32_t kNeverLoaded = 0; -const size_t kAlignment = 64; bool IsTailingOptimization() { string is_tailing_optimization_option; From 99367eb363a4911a9eb098bb7ca6fb4f3c74f15b Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Tue, 6 Jul 2021 10:03:38 +0800 Subject: [PATCH 161/226] Add UT for InnerSession --- tests/ut/ge/session/inner_session_unittest.cc | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/tests/ut/ge/session/inner_session_unittest.cc b/tests/ut/ge/session/inner_session_unittest.cc index 0d20f06a..80cc2834 100644 --- a/tests/ut/ge/session/inner_session_unittest.cc +++ b/tests/ut/ge/session/inner_session_unittest.cc @@ -19,21 +19,18 @@ #define private public #define protected public #include "session/inner_session.h" -#undef private -#undef protected - using namespace std; namespace ge { -class Utest_Inner_session : public testing::Test { +class UtestInnerSession : public testing::Test { protected: void SetUp() override {} void TearDown() override {} }; -TEST_F(Utest_Inner_session, build_graph_success) { +TEST_F(UtestInnerSession, build_graph_success) { std::map options; uint64_t session_id = 1; InnerSession inner_seesion(session_id, options); @@ -44,17 +41,15 @@ TEST_F(Utest_Inner_session, build_graph_success) { EXPECT_NE(ret, ge::SUCCESS); } -TEST_F(Utest_Inner_session, initialize) { - std::map options = { - {ge::MODIFY_MIXLIST, "/modify.json"} - }; +TEST_F(UtestInnerSession, initialize) { + std::map options = {}; uint64_t session_id = 1; InnerSession inner_session(session_id, options); - auto ret = inner_session.Initialize(); - EXPECT_NE(ret, ge::SUCCESS); + EXPECT_EQ(inner_session.Initialize(), SUCCESS); + EXPECT_EQ(inner_session.Finalize(), SUCCESS); } -TEST_F(Utest_Inner_session, check_op_precision_mode) { +TEST_F(UtestInnerSession, check_op_precision_mode) { std::map options = { {ge::OP_PRECISION_MODE, "./op_precision_mode.ini"} }; From f2022b92cce348f7dc10d05363571438ff85f337 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Tue, 6 Jul 2021 10:09:54 +0800 Subject: [PATCH 162/226] Fix ut. --- tests/ut/ge/single_op/single_op_task_unittest.cc | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/ut/ge/single_op/single_op_task_unittest.cc b/tests/ut/ge/single_op/single_op_task_unittest.cc index 51ef928f..f6ae0dbf 100644 --- a/tests/ut/ge/single_op/single_op_task_unittest.cc +++ b/tests/ut/ge/single_op/single_op_task_unittest.cc @@ -158,16 +158,21 @@ TEST_F(UtestSingleOpTask, test_atomic_exec) { auto graph = make_shared("graph"); auto op_desc = make_shared("Add", "Add"); auto node = graph->AddNode(op_desc); - AtomicOpTask task; task.op_desc_ = op_desc; task.node_ = node; vector inputs; vector outputs; - task.atomic_output_indices_ = { 0 }; + std::vector atomic_output_indices; + ge::AttrUtils::SetListInt(op_desc, ATOMIC_ATTR_OUTPUT_INDEX, atomic_output_indices); + ASSERT_EQ(task.InitAtomicAddrCleanIndices(), INTERNAL_ERROR); + atomic_output_indices = { 0 }; + ge::AttrUtils::SetListInt(op_desc, ATOMIC_ATTR_OUTPUT_INDEX, atomic_output_indices); + ASSERT_EQ(task.InitAtomicAddrCleanIndices(), INTERNAL_ERROR); task.arg_size_ = sizeof(void *) * 2; task.args_.reset(new (std::nothrow) uint8_t[task.arg_size_]); + ASSERT_EQ(task.InitAtomicAddrCleanIndices(), SUCCESS); ASSERT_EQ(task.UpdateIoAddr(inputs, outputs), ACL_ERROR_GE_PARAM_INVALID); ge::DataBuffer data_buffer; From 1f5afea9635d8ddd5d29a64d99fbf439760d3155 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Tue, 6 Jul 2021 20:09:45 +0800 Subject: [PATCH 163/226] Separate unit test files --- tests/ut/ge/CMakeLists.txt | 378 +++++++++++++------------------------ 1 file changed, 128 insertions(+), 250 deletions(-) diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index 7832c7b0..a1abdfff 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -134,6 +134,7 @@ set(PARSER_SRC_FILES "${GE_CODE_DIR}/parser/parser/common/model_saver.cc" "${GE_CODE_DIR}/parser/parser/common/parser_types.cc" "${GE_CODE_DIR}/parser/parser/common/parser_inner_ctx.cc" + "${GE_CODE_DIR}/parser/parser/tensorflow/iterator_fusion_pass.cc" ) set(COMMON_SRC_FILES @@ -155,21 +156,12 @@ set(COMMON_SRC_FILES "${GE_CODE_DIR}/ge/init/gelib.cc" "${GE_CODE_DIR}/ge/engine_manager/dnnengine_manager.cc" "${GE_CODE_DIR}/ge/opskernel_manager/ops_kernel_manager.cc" - "${GE_CODE_DIR}/ge/session/session_manager.cc" "${GE_CODE_DIR}/ge/opskernel_manager/ops_kernel_builder_manager.cc" - "${GE_CODE_DIR}/ge/graph/load/model_manager/model_manager.cc" "${GE_CODE_DIR}/ge/common/profiling/profiling_manager.cc" "${GE_CODE_DIR}/ge/common/profiling/ge_profiling.cc" "${GE_CODE_DIR}/ge/graph/manager/host_mem_manager.cc" "${GE_CODE_DIR}/ge/graph/manager/memory_api.cc" - "${GE_CODE_DIR}/ge/session/inner_session.cc" - "${GE_CODE_DIR}/ge/graph/execute/model_executor.cc" "${GE_CODE_DIR}/ge/graph/manager/util/rt_context_util.cc" - "${GE_CODE_DIR}/ge/graph/execute/graph_execute.cc" - "${GE_CODE_DIR}/ge/graph/preprocess/graph_preprocess.cc" - "${GE_CODE_DIR}/ge/hybrid/hybrid_davinci_model_stub.cc" - "${GE_CODE_DIR}/ge/graph/load/model_manager/davinci_model.cc" - "${GE_CODE_DIR}/ge/graph/load/model_manager/data_inputer.cc" "${GE_CODE_DIR}/ge/common/dump/dump_properties.cc" "${GE_CODE_DIR}/ge/common/helper/model_helper.cc" "${GE_CODE_DIR}/ge/common/dump/dump_manager.cc" @@ -179,128 +171,16 @@ set(COMMON_SRC_FILES "${GE_CODE_DIR}/ge/common/helper/om_file_helper.cc" "${GE_CODE_DIR}/ge/model/ge_root_model.cc" "${GE_CODE_DIR}/ge/common/model_parser/model_parser.cc" - "${GE_CODE_DIR}/ge/graph/load/model_manager/data_dumper.cc" - "${GE_CODE_DIR}/ge/graph/manager/graph_manager.cc" "${GE_CODE_DIR}/ge/common/dump/dump_server.cc" - "${GE_CODE_DIR}/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc" "${GE_CODE_DIR}/ge/graph/preprocess/multi_batch_copy_graph.cc" "${GE_CODE_DIR}/ge/graph/optimize/mem_rw_conflict_optimize.cc" - "${GE_CODE_DIR}/ge/graph/passes/pass_manager.cc" - "${GE_CODE_DIR}/ge/graph/passes/resource_pair_add_control_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/resource_pair_remove_control_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/pass_utils.cc" - "${GE_CODE_DIR}/ge/graph/passes/base_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/bitcast_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/constant_folding_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/aicpu_constant_folding_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/reshape_remove_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/reshape_recovery_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/transop_breadth_fusion_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/transop_depth_fusion_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/transop_nearby_allreduce_fusion_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/same_transdata_breadth_fusion_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/transop_without_reshape_fusion_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/compile_nodes_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/variable_prepare_op_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/variable_ref_delete_op_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/variable_ref_useless_control_out_delete_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/subgraph_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/data_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/net_output_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/replace_transshape_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/constant_fuse_same_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/fuse_data_nodes_with_common_input_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/print_op_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/no_use_reshape_remove_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/iterator_op_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/input_output_connection_identify_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/atomic_addr_clean_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/mark_same_addr_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/mark_graph_unknown_status_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/mark_agnostic_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/dimension_compute_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/dimension_adjust_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/get_original_format_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/shape_operate_op_remove_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/assert_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/dropout_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/infer_base_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/infershape_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/infer_value_range_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/unused_const_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/permute_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/ctrl_edge_transfer_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/end_of_sequence_add_control_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/stop_gradient_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/prevent_gradient_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/identity_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/ref_identity_delete_op_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/placeholder_with_default_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/snapshot_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/guarantee_const_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/var_is_initialized_op_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/parallel_concat_start_op_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/folding_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/cast_translate_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/prune_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/merge_to_stream_merge_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/merge_input_memcpy_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/switch_to_stream_switch_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/mark_force_unknown_for_cond_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/attach_stream_label_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/multi_batch_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/multi_batch_clone_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/subexpression_migration_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/subgraph_const_migration_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/unused_args_clean_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/next_iteration_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/control_trigger_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/cond_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/cond_remove_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/for_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/enter_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/assign_remove_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/inplace_support_check_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/addn_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/common_subexpression_elimination_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/transop_symmetry_elimination_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/save_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/switch_dead_branch_elimination.cc" - "${GE_CODE_DIR}/ge/graph/passes/switch_logic_remove_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/switch_data_edges_bypass.cc" - "${GE_CODE_DIR}/ge/graph/passes/merge_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/variable_op_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/cast_remove_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/transpose_transdata_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/hccl_memcpy_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/hccl_continuous_memcpy_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/flow_ctrl_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/global_step_insert_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/link_gen_mask_nodes_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/replace_with_empty_const_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/hccl_group_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/hccl_tailing_optimization_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/memcpy_addr_async_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/set_input_output_offset_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/remove_same_const_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/useless_control_out_remove_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/parallel_group_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/buffer_pool_memory_pass.cc" - "${GE_CODE_DIR}/ge/graph/passes/mark_node_unknown_shape_pass.cc" "${GE_CODE_DIR}/ge/model/ge_model.cc" "${GE_CODE_DIR}/ge/common/cust_aicpu_kernel_store.cc" - "${GE_CODE_DIR}/ge/graph/load/model_manager/model_utils.cc" - "${GE_CODE_DIR}/ge/graph/load/model_manager/zero_copy_offset.cc" - "${GE_CODE_DIR}/ge/graph/load/model_manager/zero_copy_task.cc" - "${GE_CODE_DIR}/ge/graph/load/model_manager/cpu_queue_schedule.cc" - "${GE_CODE_DIR}/ge/graph/load/model_manager/aipp_utils.cc" - "${GE_CODE_DIR}/ge/graph/load/model_manager/tbe_handle_store.cc" "${GE_CODE_DIR}/ge/common/kernel_store.cc" "${GE_CODE_DIR}/ge/common/tbe_kernel_store.cc" "${GE_CODE_DIR}/ge/common/auth/file_saver.cc" "${GE_CODE_DIR}/ge/graph/manager/util/debug.cc" "${GE_CODE_DIR}/ge/common/debug/memory_dumper.cc" - "${GE_CODE_DIR}/ge/graph/manager/graph_context.cc" "${GE_CODE_DIR}/ge/graph/load/graph_loader.cc" "${GE_CODE_DIR}/ge/graph/optimize/graph_optimize.cc" "${GE_CODE_DIR}/ge/graph/build/graph_builder.cc" @@ -315,13 +195,10 @@ set(COMMON_SRC_FILES "${GE_CODE_DIR}/ge/graph/partition/dynamic_shape_partition.cc" "${GE_CODE_DIR}/ge/graph/optimize/summary_optimize.cc" "${GE_CODE_DIR}/ge/ir_build/option_utils.cc" - "${GE_CODE_DIR}/ge/graph/preprocess/insert_op/ge_aipp_op.cc" - "${GE_CODE_DIR}/ge/graph/preprocess/multi_batch_options.cc" "${GE_CODE_DIR}/ge/graph/build/model_builder.cc" "${GE_CODE_DIR}/ge/graph/build/run_context.cc" "${GE_CODE_DIR}/ge/graph/build/stream_graph_optimizer.cc" "${GE_CODE_DIR}/ge/graph/build/task_generator.cc" - "${GE_CODE_DIR}/ge/graph/partition/graph_partition.cc" "${GE_CODE_DIR}/ge/graph/partition/engine_place.cc" "${GE_CODE_DIR}/ge/graph/build/stream_allocator.cc" "${GE_CODE_DIR}/ge/graph/build/memory/memory_assigner.cc" @@ -348,10 +225,10 @@ set(COMMON_SRC_FILES "${GE_CODE_DIR}/ge/graph/manager/graph_mem_manager.cc" "${GE_CODE_DIR}/ge/common/dump/dump_op.cc" "${GE_CODE_DIR}/ge/common/model_saver.cc" - "${GE_CODE_DIR}/ge/hybrid/node_executor/aicpu/aicpu_ext_info.cc" "${GE_CODE_DIR}/ge/common/ge/datatype_util.cc" "${GE_CODE_DIR}/ge/ge_local_engine/engine/host_cpu_engine.cc" "${GE_CODE_DIR}/ge/session/omg.cc" + "${GE_CODE_DIR}/ge/common/thread_pool.cc" "${GE_CODE_DIR}/ge/ge_opt_info/ge_opt_info.cc" ) @@ -375,57 +252,26 @@ set(COMMON_FORMAT_SRC_FILES "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fracz_hwcn.cc" "${GE_CODE_DIR}/ge/common/formats/utils/formats_trans_utils.cc" "${GE_CODE_DIR}/ge/graph/manager/util/hcom_util.cc" - "${GE_CODE_DIR}/ge/common/dump/dump_manager.cc" ) -set(GRAPH_OPTIMIZE_COMMON_SRC_FILES - "${GE_CODE_DIR}/ge/graph/optimize/graph_optimize.cc" - "${GE_CODE_DIR}/ge/graph/optimize/summary_optimize.cc" - "${GE_CODE_DIR}/ge/graph/optimize/mem_rw_conflict_optimize.cc" -) - - set(GRAPH_PREPARE_COMMON_SRC_FILES "${GE_CODE_DIR}/ge/graph/preprocess/graph_preprocess.cc" "${GE_CODE_DIR}/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc" "${GE_CODE_DIR}/ge/graph/preprocess/insert_op/ge_aipp_op.cc" + "${GE_CODE_DIR}/ge/graph/preprocess/multi_batch_options.cc" #"${GE_CODE_DIR}/ge/graph/preprocess/insert_op/base_insert_op.cc" ) -set(GRAPH_PARTITION_COMMON_SRC_FILES - "${GE_CODE_DIR}/ge/graph/partition/graph_partition.cc" - "${GE_CODE_DIR}/ge/plugin/engine/dnnengines.cc" - "${GE_CODE_DIR}/ge/graph/partition/engine_place.cc" -) - -set(GRAPH_LOAD_COMMON_SRC_FILES - "${GE_CODE_DIR}/ge/graph/load/graph_loader.cc" - "${GE_CODE_DIR}/ge/graph/manager/graph_manager_utils.cc" - "${GE_CODE_DIR}/ge/graph/manager/graph_mem_allocator.cc" - "${GE_CODE_DIR}/ge/graph/manager/graph_var_manager.cc" - "${GE_CODE_DIR}/ge/graph/manager/trans_var_data_utils.cc" - "${GE_CODE_DIR}/ge/graph/manager/graph_caching_allocator.cc" - "${GE_CODE_DIR}/ge/graph/manager/session_scope_mem_allocator.cc" - "${GE_CODE_DIR}/ge/graph/manager/rdma_pool_allocator.cc" - "${GE_CODE_DIR}/ge/graph/manager/host_mem_allocator.cc" - "${GE_CODE_DIR}/ge/graph/manager/graph_mem_manager.cc" - "${GE_CODE_DIR}/ge/common/thread_pool.cc" -) - -set(DISTINCT_GRAPH_LOAD_SRC_FILES - "${GE_CODE_DIR}/ge/graph/manager/util/hcom_util.cc" - "${GE_CODE_DIR}/ge/graph/manager/util/debug.cc" - "${GE_CODE_DIR}/ge/common/properties_manager.cc" - "${GE_CODE_DIR}/ge/common/profiling/profiling_manager.cc" - "${GE_CODE_DIR}/ge/common/model_parser/model_parser.cc" - "${GE_CODE_DIR}/ge/common/tbe_kernel_store.cc" - "${GE_CODE_DIR}/ge/common/util.cc" +set(GRAPH_DAVINCI_MODEL_SRC_FILES + "${GE_CODE_DIR}/ge/graph/load/model_manager/aipp_utils.cc" "${GE_CODE_DIR}/ge/graph/load/model_manager/cpu_queue_schedule.cc" "${GE_CODE_DIR}/ge/graph/load/model_manager/data_dumper.cc" "${GE_CODE_DIR}/ge/graph/load/model_manager/data_inputer.cc" "${GE_CODE_DIR}/ge/graph/load/model_manager/davinci_model.cc" "${GE_CODE_DIR}/ge/graph/load/model_manager/model_manager.cc" "${GE_CODE_DIR}/ge/graph/load/model_manager/model_utils.cc" + "${GE_CODE_DIR}/ge/graph/load/model_manager/zero_copy_offset.cc" + "${GE_CODE_DIR}/ge/graph/load/model_manager/zero_copy_task.cc" "${GE_CODE_DIR}/ge/graph/load/model_manager/tbe_handle_store.cc" "${GE_CODE_DIR}/ge/graph/load/model_manager/task_info/task_info.cc" "${GE_CODE_DIR}/ge/graph/load/model_manager/task_info/event_record_task_info.cc" @@ -447,45 +293,24 @@ set(DISTINCT_GRAPH_LOAD_SRC_FILES "${GE_CODE_DIR}/ge/graph/load/model_manager/task_info/ffts_task_info.cc" "${GE_CODE_DIR}/ge/graph/load/model_manager/task_info/super_kernel/super_kernel.cc" "${GE_CODE_DIR}/ge/graph/load/model_manager/task_info/super_kernel/super_kernel_factory.cc" - "${GE_CODE_DIR}/ge/model/ge_model.cc" - "${GE_CODE_DIR}/ge/common/helper/om_file_helper.cc" - "${GE_CODE_DIR}/ge/common/debug/memory_dumper.cc" - "${GE_CODE_DIR}/ge/executor/ge_executor.cc" - "${GE_CODE_DIR}/ge/common/auth/file_saver.cc" + "${GE_CODE_DIR}/ge/hybrid/node_executor/aicpu/aicpu_ext_info.cc" "${GE_CODE_DIR}/ge/graph/manager/model_manager/event_manager.cc" ) set(GRAPH_EXECUTE_COMMON_SRC_FILES - "${GE_CODE_DIR}/ge/graph/execute/graph_execute.cc" - "${GE_CODE_DIR}/ge/graph/manager/graph_manager.cc" - "${GE_CODE_DIR}/ge/graph/manager/graph_context.cc" - "${GE_CODE_DIR}/ge/graph/manager/util/rt_context_util.cc" - "${GE_CODE_DIR}/ge/ge_opt_info/ge_opt_info.cc" - "${GE_CODE_DIR}/ge/graph/manager/graph_context.h" + "${GE_CODE_DIR}/ge/hybrid/hybrid_davinci_model_stub.cc" ) set(GRAPH_BUILD_COMMON_SRC_FILES - "${GE_CODE_DIR}/ge/graph/build/graph_builder.cc" - "${GE_CODE_DIR}/ge/graph/build/task_generator.cc" + "${GE_CODE_DIR}/ge/graph/manager/graph_manager.cc" "${GE_CODE_DIR}/ge/client/ge_api.cc" "${GE_CODE_DIR}/ge/session/inner_session.cc" "${GE_CODE_DIR}/ge/session/session_manager.cc" "${GE_CODE_DIR}/ge/graph/execute/model_executor.cc" - "${GE_CODE_DIR}/ge/engine_manager/dnnengine_manager.cc" + "${GE_CODE_DIR}/ge/graph/execute/graph_execute.cc" + "${GE_CODE_DIR}/ge/plugin/engine/dnnengines.cc" "${GE_CODE_DIR}/ge/plugin/engine/engine_manage.cc" - "${GE_CODE_DIR}/ge/graph/build/logical_stream_allocator.cc" - "${GE_CODE_DIR}/ge/graph/build/stream_allocator.cc" - "${GE_CODE_DIR}/ge/graph/build/memory/block_mem_assigner.cc" - "${GE_CODE_DIR}/ge/graph/build/memory/binary_block_mem_assigner.cc" - "${GE_CODE_DIR}/ge/graph/build/memory/hybrid_mem_assigner.cc" - "${GE_CODE_DIR}/ge/graph/build/memory/max_block_mem_assigner.cc" - "${GE_CODE_DIR}/ge/model/ge_model.cc" - "${GE_CODE_DIR}/ge/common/helper/om_file_helper.cc" - "${GE_CODE_DIR}/ge/common/tbe_kernel_store.cc" - "${GE_CODE_DIR}/ge/common/thread_pool.cc" - "${GE_CODE_DIR}/ge/common/model_parser/model_parser.cc" - "${GE_CODE_DIR}/ge/graph/build/run_context.cc" - "${GE_CODE_DIR}/ge/graph/common/local_context.cc" + "${GE_CODE_DIR}/ge/graph/manager/graph_context.cc" ) set(GRAPH_PASS_COMMON_SRC_FILES @@ -495,7 +320,6 @@ set(GRAPH_PASS_COMMON_SRC_FILES "${GE_CODE_DIR}/ge/graph/passes/variable_ref_delete_op_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/atomic_addr_clean_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/constant_folding_pass.cc" - "${GE_CODE_DIR}/parser/parser/tensorflow/iterator_fusion_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/iterator_op_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/net_output_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/print_op_pass.cc" @@ -534,7 +358,6 @@ set(GRAPH_PASS_COMMON_SRC_FILES "${GE_CODE_DIR}/ge/graph/passes/transop_nearby_allreduce_fusion_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/same_transdata_breadth_fusion_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/compile_nodes_pass.cc" - "${GE_CODE_DIR}/ge/graph/common/transop_util.cc" "${GE_CODE_DIR}/ge/graph/passes/flow_ctrl_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/parallel_group_pass.cc" #"${GE_CODE_DIR}/ge/graph/optimize/optimizer/allreduce_fusion_pass.cc" @@ -546,10 +369,106 @@ set(GRAPH_PASS_COMMON_SRC_FILES "${GE_CODE_DIR}/ge/graph/passes/infer_base_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/infershape_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/infer_value_range_pass.cc" - "${GE_CODE_DIR}/ge/ge_local_engine/engine/host_cpu_engine.cc" - "${GE_CODE_DIR}/ge/analyzer/analyzer.cc" + "${GE_CODE_DIR}/ge/graph/passes/resource_pair_add_control_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/resource_pair_remove_control_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/pass_utils.cc" + "${GE_CODE_DIR}/ge/graph/passes/base_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/bitcast_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/constant_folding_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/aicpu_constant_folding_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/reshape_remove_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/reshape_recovery_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/transop_breadth_fusion_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/transop_depth_fusion_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/transop_nearby_allreduce_fusion_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/same_transdata_breadth_fusion_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/transop_without_reshape_fusion_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/compile_nodes_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/variable_prepare_op_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/variable_ref_delete_op_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/variable_ref_useless_control_out_delete_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/subgraph_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/data_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/net_output_pass.cc" - "${GE_CODE_DIR}/ge/graph/common/local_context.cc" + "${GE_CODE_DIR}/ge/graph/passes/replace_transshape_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/constant_fuse_same_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/fuse_data_nodes_with_common_input_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/print_op_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/no_use_reshape_remove_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/iterator_op_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/input_output_connection_identify_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/atomic_addr_clean_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/mark_same_addr_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/mark_graph_unknown_status_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/mark_agnostic_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/dimension_compute_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/dimension_adjust_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/get_original_format_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/shape_operate_op_remove_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/assert_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/dropout_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/infer_base_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/infershape_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/infer_value_range_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/unused_const_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/permute_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/ctrl_edge_transfer_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/end_of_sequence_add_control_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/stop_gradient_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/prevent_gradient_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/identity_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/ref_identity_delete_op_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/placeholder_with_default_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/snapshot_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/guarantee_const_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/var_is_initialized_op_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/parallel_concat_start_op_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/folding_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/cast_translate_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/prune_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/merge_to_stream_merge_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/merge_input_memcpy_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/switch_to_stream_switch_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/mark_force_unknown_for_cond_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/attach_stream_label_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/multi_batch_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/multi_batch_clone_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/subexpression_migration_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/subgraph_const_migration_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/unused_args_clean_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/next_iteration_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/control_trigger_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/cond_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/cond_remove_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/for_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/enter_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/assign_remove_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/inplace_support_check_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/addn_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/common_subexpression_elimination_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/transop_symmetry_elimination_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/save_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/switch_dead_branch_elimination.cc" + "${GE_CODE_DIR}/ge/graph/passes/switch_logic_remove_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/switch_data_edges_bypass.cc" + "${GE_CODE_DIR}/ge/graph/passes/merge_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/variable_op_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/cast_remove_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/transpose_transdata_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/hccl_continuous_memcpy_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/flow_ctrl_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/global_step_insert_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/link_gen_mask_nodes_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/replace_with_empty_const_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/hccl_group_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/hccl_tailing_optimization_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/memcpy_addr_async_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/set_input_output_offset_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/remove_same_const_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/useless_control_out_remove_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/parallel_group_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/buffer_pool_memory_pass.cc" + "${GE_CODE_DIR}/ge/graph/passes/mark_node_unknown_shape_pass.cc" ) set(KERNEL_SRC_FILES @@ -590,6 +509,7 @@ set(KERNEL_SRC_FILES ) set(SINGLE_OP_SRC_FILES + "${GE_CODE_DIR}/ge/executor/ge_executor.cc" "${GE_CODE_DIR}/ge/single_op/task/build_task_utils.cc" "${GE_CODE_DIR}/ge/single_op/task/op_task.cc" "${GE_CODE_DIR}/ge/single_op/task/tbe_task_builder.cc" @@ -623,7 +543,6 @@ set(SINGLE_OP_SRC_FILES "${GE_CODE_DIR}/ge/hybrid/node_executor/aicore/aicore_op_task.cc" "${GE_CODE_DIR}/ge/hybrid/node_executor/aicore/aicore_task_builder.cc" "${GE_CODE_DIR}/ge/hybrid/node_executor/aicore/aicore_task_compiler.cc" - "${GE_CODE_DIR}/ge/hybrid/node_executor/aicpu/aicpu_ext_info.cc" "${GE_CODE_DIR}/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc" "${GE_CODE_DIR}/ge/hybrid/node_executor/compiledsubgraph/known_node_executor.cc" "${GE_CODE_DIR}/ge/hybrid/node_executor/ge_local/ge_local_node_executor.cc" @@ -639,10 +558,6 @@ set(SINGLE_OP_SRC_FILES "${GE_CODE_DIR}/ge/hybrid/hybrid_davinci_model.cc" ) -set(GE_OPT_INFO_SRC_FILES - "${GE_CODE_DIR}/ge/ge_opt_info/ge_opt_info.cc" -) - # test files set(COMMON_TEST_FILES "graph/passes/graph_builder_utils.cc" @@ -817,7 +732,7 @@ set(MULTI_PARTS_TEST_FILES "graph/manager/hcom_util_unittest.cc" "graph/manager/graph_caching_allocator_unittest.cc" "graph/manager/host_mem_allocator_unittest.cc" - "graph/manager/memory_api_unittest.cc" + "graph/manager/memory_api_unittest.cc" "graph/manager/session_scope_mem_allocator_unittest.cc" "graph/manager/run_graph_unittest.cc" "graph/partition/dynamic_shape_partition_unittest.cc" @@ -973,57 +888,19 @@ target_link_libraries(ge_prepare_common PRIVATE json ) -# build graph optimize common -add_library(ge_optimize_common STATIC ${GRAPH_OPTIMIZE_COMMON_SRC_FILES} ${PROTO_HDRS}) - -target_compile_definitions(ge_optimize_common PRIVATE - google=ascend_private -) - -target_compile_options(ge_optimize_common PRIVATE - -g --coverage -fprofile-arcs -ftest-coverage - -Werror=format -) - -target_link_libraries(ge_optimize_common PRIVATE - $ - ascend_protobuf - c_sec - json -) - -# build graph partition common -add_library(ge_partition_common STATIC ${GRAPH_PARTITION_COMMON_SRC_FILES} ${PROTO_HDRS}) - -target_compile_definitions(ge_partition_common PRIVATE - google=ascend_private -) - -target_compile_options(ge_partition_common PRIVATE - -g --coverage -fprofile-arcs -ftest-coverage - -Werror=format -) - -target_link_libraries(ge_partition_common PRIVATE - $ - ascend_protobuf - c_sec - json -) - # build build graph load common -add_library(ge_load_common STATIC ${GRAPH_LOAD_COMMON_SRC_FILES} ${PROTO_HDRS}) +add_library(ge_davinci_model STATIC ${GRAPH_DAVINCI_MODEL_SRC_FILES} ${PROTO_HDRS}) -target_compile_definitions(ge_load_common PRIVATE +target_compile_definitions(ge_davinci_model PRIVATE google=ascend_private ) -target_compile_options(ge_load_common PRIVATE +target_compile_options(ge_davinci_model PRIVATE -g --coverage -fprofile-arcs -ftest-coverage -Werror=format ) -target_link_libraries(ge_load_common PRIVATE +target_link_libraries(ge_davinci_model PRIVATE $ c_sec ascend_protobuf @@ -1126,14 +1003,14 @@ target_compile_definitions(ut_libge_multiparts_utest PRIVATE target_link_libraries(ut_libge_multiparts_utest $ - ge_build_common ge_load_common ge_execute_common ge_optimize_common ge_partition_common ge_prepare_common - ge_single_op ge_ut_common_format ge_ut_common + -Wl,--whole-archive + ge_davinci_model ge_build_common ge_prepare_common ge_execute_common ge_pass_common ge_ut_common_format ge_ut_common + -Wl,--no-whole-archive gtest gtest_main gmock gmock_main ${COMMON_SHARED_LIBRARIES} -lrt -ldl -lgcov ) # libge_others_utest add_executable(ut_libge_others_utest - ${GE_OPT_INFO_SRC_FILES} ${COMMON_TEST_FILES} ${PASS_TEST_FILES} ${EXECUTE_TEST_FILES} @@ -1148,7 +1025,9 @@ target_compile_options(ut_libge_others_utest PRIVATE target_link_libraries(ut_libge_others_utest $ - ge_load_common ge_execute_common ge_ut_common ge_ut_common_format + -Wl,--whole-archive + ge_davinci_model ge_build_common ge_prepare_common ge_pass_common ge_execute_common ge_ut_common ge_ut_common_format + -Wl,--no-whole-archive gtest gtest_main gmock gmock_main ${COMMON_SHARED_LIBRARIES} -lrt -ldl -lgcov ) @@ -1166,7 +1045,9 @@ target_compile_options(ut_libge_kernel_utest PRIVATE target_link_libraries(ut_libge_kernel_utest $ - ge_load_common ge_ut_common ge_ut_common_format + -Wl,--whole-archive + ge_davinci_model ge_build_common ge_prepare_common ge_pass_common ge_execute_common ge_ut_common ge_ut_common_format + -Wl,--no-whole-archive gtest gtest_main gmock gmock_main ${COMMON_SHARED_LIBRARIES} -lrt -ldl -lgcov ) @@ -1176,7 +1057,6 @@ add_executable(ut_libge_distinct_load_utest ${GENERATOR_TEST_FILES} ${EXECUTOR_TEST_FILES} ${DISTINCT_GRAPH_LOAD_TEST_FILES} - ${DISTINCT_GRAPH_LOAD_SRC_FILES} ${SINGLE_OP_TEST_FILES} ${PROFILING_MNG_TEST_FILES} ${HYBRID_TEST_FILES} @@ -1195,9 +1075,7 @@ target_compile_definitions(ut_libge_distinct_load_utest PRIVATE target_link_libraries(ut_libge_distinct_load_utest $ -Wl,--whole-archive - ge_single_op + ge_single_op ge_davinci_model ge_build_common ge_prepare_common ge_pass_common ge_ut_common ge_ut_common_format -Wl,--no-whole-archive - ge_execute_common ge_load_common - ge_prepare_common ge_optimize_common ge_build_common ge_partition_common ge_ut_common ge_ut_common_format gtest gtest_main gmock gmock_main ${COMMON_SHARED_LIBRARIES} -lrt -ldl -lpthread -lgcov ) From 3b261ee4a453b6c62fe33fef5708985bed8594a6 Mon Sep 17 00:00:00 2001 From: wuweikang Date: Mon, 5 Jul 2021 09:59:23 +0800 Subject: [PATCH 164/226] fix dump step check --- ge/common/dump/dump_properties.cc | 14 ++++---------- tests/ut/ge/common/dump_properties_unittest.cc | 4 ++-- 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/ge/common/dump/dump_properties.cc b/ge/common/dump/dump_properties.cc index 84bdb7bf..099920e7 100644 --- a/ge/common/dump/dump_properties.cc +++ b/ge/common/dump/dump_properties.cc @@ -204,7 +204,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY DumpProperties &DumpProperties: FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::SetDumpOptions() { if (enable_dump_ == kEnableFlag) { std::string dump_step; - if (GetContext().GetOption(OPTION_EXEC_DUMP_STEP, dump_step) == GRAPH_SUCCESS) { + if (GetContext().GetOption(OPTION_EXEC_DUMP_STEP, dump_step) == GRAPH_SUCCESS && !dump_step.empty()) { GE_CHK_STATUS_RET(CheckDumpStep(dump_step), "[Check][dump_step] failed."); GELOGI("Get dump step %s successfully", dump_step.c_str()); SetDumpStep(dump_step); @@ -441,16 +441,10 @@ Status DumpProperties::SetDumpDebugOptions() { if (enable_dump_debug_ == kEnableFlag) { std::string dump_debug_mode; if (GetContext().GetOption(OPTION_EXEC_DUMP_DEBUG_MODE, dump_debug_mode) == GRAPH_SUCCESS) { - GELOGD("Get dump debug mode %s successfully", dump_debug_mode.c_str()); + GELOGD("Get ge.exec.dumpDebugMode %s successfully", dump_debug_mode.c_str()); } else { - REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), - std::vector({ - "ge.exec.dumpDebugMode", - dump_debug_mode, - "ge.exec.dumpDebugMode is not set."})); - GELOGE(PARAM_INVALID, "[Check][dump_debug_mode] failed. Dump debug mode is not set."); - - return PARAM_INVALID; + GELOGW("ge.exec.dumpDebugMode is not set."); + return SUCCESS; } if (dump_debug_mode == OP_DEBUG_AICORE) { diff --git a/tests/ut/ge/common/dump_properties_unittest.cc b/tests/ut/ge/common/dump_properties_unittest.cc index 57809013..3623bc6d 100644 --- a/tests/ut/ge/common/dump_properties_unittest.cc +++ b/tests/ut/ge/common/dump_properties_unittest.cc @@ -115,12 +115,12 @@ TEST_F(UTEST_dump_properties, init_by_options_success_2) { EXPECT_EQ(st, SUCCESS); } -TEST_F(UTEST_dump_properties, init_by_options_failed) { +TEST_F(UTEST_dump_properties, init_by_options_success_3) { DumpProperties dp; std::map options {{OPTION_EXEC_ENABLE_DUMP_DEBUG, "1"}, {OPTION_EXEC_DUMP_PATH, "/tmp/"}}; GetThreadLocalContext().SetGlobalOption(options); Status st = dp.InitByOptions(); - EXPECT_NE(st, SUCCESS); + EXPECT_EQ(st, SUCCESS); } } // namespace ge \ No newline at end of file From 6da309710797d305774156125857e0de0dcfd57c Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Tue, 6 Jul 2021 21:21:02 +0800 Subject: [PATCH 165/226] Fix review comments. --- ge/single_op/single_op_model.cc | 36 +++++++++++-------- ge/single_op/single_op_model.h | 2 +- ge/single_op/task/op_task.cc | 32 ++++++++++------- ge/single_op/task/op_task.h | 14 ++++---- ge/single_op/task/tbe_task_builder.cc | 10 +++--- ge/single_op/task/tbe_task_builder.h | 6 ++-- .../ge/single_op/single_op_task_unittest.cc | 5 ++- 7 files changed, 62 insertions(+), 43 deletions(-) diff --git a/ge/single_op/single_op_model.cc b/ge/single_op/single_op_model.cc index a5547b39..426d3233 100755 --- a/ge/single_op/single_op_model.cc +++ b/ge/single_op/single_op_model.cc @@ -432,7 +432,7 @@ Status SingleOpModel::BuildKernelTask(const domi::TaskDef &task_def, TbeOpTask * return SUCCESS; } -Status SingleOpModel::BuildAtomicTask(const domi::TaskDef &task_def, AtomicOpTask **task) { +Status SingleOpModel::BuildAtomicTask(const domi::TaskDef &task_def, AtomicAddrCleanOpTask **task) { GE_CHECK_NOTNULL(task); const auto &context = task_def.kernel().context(); auto iter = op_list_.find(context.op_index()); @@ -442,18 +442,18 @@ Status SingleOpModel::BuildAtomicTask(const domi::TaskDef &task_def, AtomicOpTas return ACL_ERROR_GE_INTERNAL_ERROR; } - std::unique_ptr atomic_task(new (std::nothrow) AtomicOpTask()); + std::unique_ptr atomic_task(new (std::nothrow) AtomicAddrCleanOpTask()); if (atomic_task == nullptr) { - GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "[Create][AtomicOpTask]failed."); - REPORT_INNER_ERROR("E19999", "BuildKernelTask fail for new AtomicOpTask."); + GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "[Create][AtomicAddrCleanOpTask]failed."); + REPORT_INNER_ERROR("E19999", "BuildKernelTask fail for new AtomicAddrCleanOpTask."); return ACL_ERROR_GE_MEMORY_ALLOCATION; } - auto builder = AtomicTaskBuilder(model_name_, iter->second, task_def); + auto builder = AtomicAddrCleanTaskBuilder(model_name_, iter->second, task_def); auto ret = builder.BuildTask(*atomic_task, model_params_); if (ret != SUCCESS) { - GELOGE(ret, "[Build][AtomicOpTask]failed."); - REPORT_INNER_ERROR("E19999", "[Build][AtomicOpTask]failed."); + GELOGE(ret, "[Build][AtomicAddrCleanOpTask]failed."); + REPORT_INNER_ERROR("E19999", "[Build][AtomicAddrCleanOpTask]failed."); return ret; } @@ -571,13 +571,21 @@ Status SingleOpModel::BuildTaskListForDynamicOp(StreamResource *stream_resource, GE_CHECK_NOTNULL(compute_graph); single_op.compute_graph_ = compute_graph; - GE_CHK_BOOL_RET_STATUS(node_tasks_.size() == 1, ACL_ERROR_GE_PARAM_INVALID, - "[Check][Size]Node size must be 1, but get %zu.", node_tasks_.size()); + if (node_tasks_.size() != 1) { + GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Check][Size]Node size must be 1, but get %zu.", node_tasks_.size()); + REPORT_INNER_ERROR("E19999", "[Check][Size]Node size must be 1, but get %zu.", node_tasks_.size()); + return ACL_ERROR_GE_PARAM_INVALID; + } + auto iter = node_tasks_.begin(); auto node = iter->first; - auto task_defs = iter->second; - GE_CHK_BOOL_RET_STATUS(task_defs.size() > 0 && task_defs.size() <= kNumTaskWithAtomicAddrCleanTask, - ACL_ERROR_GE_PARAM_INVALID, "[Check][Size]task_defs size must be 1 or 2, but get %zu.", task_defs.size()); + const auto &task_defs = iter->second; + if (task_defs.size() <= 0 || task_defs.size() > kNumTaskWithAtomicAddrCleanTask) { + GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Check][Size]Node size must be 1, but get %zu.", node_tasks_.size()); + REPORT_INNER_ERROR("E19999", "[Check][Size]task_defs size must be 1 or 2, but get %zu.", task_defs.size()); + return ACL_ERROR_GE_PARAM_INVALID; + } + GE_CHECK_NOTNULL(node); auto op_desc = node->GetOpDesc(); GE_CHECK_NOTNULL(op_desc); @@ -594,10 +602,10 @@ Status SingleOpModel::BuildTaskListForDynamicOp(StreamResource *stream_resource, } if (task_defs.size() == kNumTaskWithAtomicAddrCleanTask) { const auto &atomic_task_def = task_defs.front(); - AtomicOpTask *atomic_task = nullptr; + AtomicAddrCleanOpTask *atomic_task = nullptr; GE_CHK_STATUS_RET_NOLOG(BuildAtomicTask(atomic_task_def, &atomic_task)); GE_CHK_STATUS_RET_NOLOG(atomic_task->InitAtomicAddrCleanIndices()); - tbe_task->SetAtomicTask(atomic_task); + tbe_task->SetAtomicAddrCleanTask(atomic_task); } single_op.op_task_.reset(tbe_task); } else if (lib_name == kEngineNameAiCpu) { diff --git a/ge/single_op/single_op_model.h b/ge/single_op/single_op_model.h index 83490f5f..b1cd161c 100755 --- a/ge/single_op/single_op_model.h +++ b/ge/single_op/single_op_model.h @@ -69,7 +69,7 @@ class SingleOpModel { Status BuildTaskList(StreamResource *stream_resource, SingleOp &single_op); Status BuildTaskListForDynamicOp(StreamResource *stream_resource, DynamicSingleOp &dynamic_single_op); Status BuildKernelTask(const domi::TaskDef &task_def, TbeOpTask **task); - Status BuildAtomicTask(const domi::TaskDef &task_def, AtomicOpTask **task); + Status BuildAtomicTask(const domi::TaskDef &task_def, AtomicAddrCleanOpTask **task); Status BuildKernelExTask(const domi::KernelExDef &kernel_def, AiCpuTask **task, uint64_t kernel_id); Status BuildCpuKernelTask(const domi::KernelDef &kernel_def, OpTask **task, uint64_t kernel_id); diff --git a/ge/single_op/task/op_task.cc b/ge/single_op/task/op_task.cc index dfdec750..c6c99ab0 100755 --- a/ge/single_op/task/op_task.cc +++ b/ge/single_op/task/op_task.cc @@ -268,15 +268,6 @@ Status TbeOpTask::UpdateTensorDesc(const GeTensorDesc &src_tensor, GeTensorDesc dst_tensor.SetShape(GeShape(std::move(storage_shape))); dst_tensor.SetOriginShape(src_tensor.GetShape()); } - - int64_t size = 0; - graphStatus graph_status = TensorUtils::GetTensorMemorySizeInBytes(dst_tensor, size); - if (graph_status != GRAPH_SUCCESS) { - REPORT_CALL_ERROR("E19999", "Get tensor size in bytes failed!"); - GELOGE(graph_status, "[Get][TensorMemorySize] In Bytes failed!"); - return FAILED; - } - TensorUtils::SetSize(dst_tensor, size); return SUCCESS; } @@ -490,7 +481,12 @@ void TbeOpTask::GetIoAddr(uintptr_t *&arg_base, size_t &arg_count) { } } -Status AtomicOpTask::UpdateIoAddr(const vector &inputs, const vector &outputs) { +Status AtomicAddrCleanOpTask::UpdateNodeByShape(const vector &input_desc, + const vector &output_desc) { + return SUCCESS; +} + +Status AtomicAddrCleanOpTask::UpdateIoAddr(const vector &inputs, const vector &outputs) { uintptr_t *arg_base = reinterpret_cast(args_.get()); for (auto atomic_output_index : atomic_output_indices_) { if (atomic_output_index >= static_cast(outputs.size())) { @@ -500,11 +496,21 @@ Status AtomicOpTask::UpdateIoAddr(const vector &inputs, const vector } auto &output_buffer = outputs[atomic_output_index]; *arg_base++ = reinterpret_cast(output_buffer.data); + + auto tensor_desc = op_desc_->MutableOutputDesc(atomic_output_index); + int64_t size = 0; + graphStatus graph_status = TensorUtils::GetTensorMemorySizeInBytes(*tensor_desc, size); + if (graph_status != GRAPH_SUCCESS) { + REPORT_CALL_ERROR("E19999", "Get tensor size in bytes failed!"); + GELOGE(graph_status, "[Get][TensorMemorySize] In Bytes failed!"); + return FAILED; + } + TensorUtils::SetSize(*tensor_desc, size); } return SUCCESS; } -Status AtomicOpTask::UpdateTilingArgs(rtStream_t stream) { +Status AtomicAddrCleanOpTask::UpdateTilingArgs(rtStream_t stream) { if (tiling_buffer_ != nullptr) { GELOGD("[%s] Start to copy tiling info. size = %zu", node_->GetName().c_str(), tiling_data_.size()); GE_CHK_RT_RET(rtMemcpyAsync(tiling_buffer_, max_tiling_size_, tiling_data_.data(), tiling_data_.size(), @@ -516,7 +522,7 @@ Status AtomicOpTask::UpdateTilingArgs(rtStream_t stream) { return SUCCESS; } -Status AtomicOpTask::CalcTilingInfo(optiling::utils::OpRunInfo &run_info) { +Status AtomicAddrCleanOpTask::CalcTilingInfo(optiling::utils::OpRunInfo &run_info) { auto ret = optiling::OpAtomicCalculateV2(*node_, run_info); if (ret != GRAPH_SUCCESS) { GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "[Invoke][OpAtomicCalculate] failed, ret = %u.", ret); @@ -526,7 +532,7 @@ Status AtomicOpTask::CalcTilingInfo(optiling::utils::OpRunInfo &run_info) { return SUCCESS; } -Status AtomicOpTask::InitAtomicAddrCleanIndices() { +Status AtomicAddrCleanOpTask::InitAtomicAddrCleanIndices() { GELOGD("[%s] Start to setup AtomicAddrClean task.", op_desc_->GetName().c_str()); std::vector atomic_output_indices; (void) ge::AttrUtils::GetListInt(op_desc_, ATOMIC_ATTR_OUTPUT_INDEX, atomic_output_indices); diff --git a/ge/single_op/task/op_task.h b/ge/single_op/task/op_task.h index 1e100a11..0ce4cbae 100644 --- a/ge/single_op/task/op_task.h +++ b/ge/single_op/task/op_task.h @@ -89,7 +89,7 @@ class TbeOpTask : public OpTask { void SetKernelArgs(std::unique_ptr &&args, size_t arg_size, uint32_t block_dim, const OpDescPtr &op_desc); void SetKernelWithHandleArgs(std::unique_ptr &&args, size_t arg_size, uint32_t block_dim, const OpDescPtr &op_desc, const domi::KernelDefWithHandle& kernel_def_with_handle); - void SetAtomicTask(OpTask *task) { atomic_task_.reset(task); } + void SetAtomicAddrCleanTask(OpTask *task) { atomic_task_.reset(task); } Status UpdateRunInfo() override; Status SetArgIndex(); @@ -108,13 +108,13 @@ class TbeOpTask : public OpTask { void *tiling_buffer_ = nullptr; uint32_t max_tiling_size_ = 0; std::string tiling_data_; + size_t input_num_; // include const input + size_t output_num_; private: friend class SingleOpModel; friend class TbeTaskBuilder; static Status UpdateTensorDesc(const GeTensorDesc &src_tensor, GeTensorDesc &dst_tensor); - Status UpdateNodeByShape(const vector &input_desc, - const vector &output_desc); Status AllocateWorkspaces(const std::vector &workspace_sizes); Status DoLaunchKernel(rtStream_t stream); Status CheckAndExecuteAtomic(const vector &input_desc, @@ -122,6 +122,8 @@ class TbeOpTask : public OpTask { vector &output_desc, vector &output_buffers, rtStream_t stream); + virtual Status UpdateNodeByShape(const vector &input_desc, + const vector &output_desc); virtual Status UpdateTilingArgs(rtStream_t stream); virtual Status UpdateIoAddr(const vector &inputs, const vector &outputs); virtual Status CalcTilingInfo(optiling::utils::OpRunInfo &run_info); @@ -140,17 +142,17 @@ class TbeOpTask : public OpTask { std::string original_kernel_key_; std::string node_info_; std::vector arg_index_; // data index in args - size_t input_num_; // include const input - size_t output_num_; std::unique_ptr atomic_task_; }; -class AtomicOpTask : public TbeOpTask { +class AtomicAddrCleanOpTask : public TbeOpTask { public: Status InitAtomicAddrCleanIndices(); private: + Status UpdateNodeByShape(const vector &input_desc, + const vector &output_desc) override; Status UpdateIoAddr(const vector &inputs, const vector &outputs) override; Status UpdateTilingArgs(rtStream_t stream) override; Status CalcTilingInfo(optiling::utils::OpRunInfo &run_info) override; diff --git a/ge/single_op/task/tbe_task_builder.cc b/ge/single_op/task/tbe_task_builder.cc index c5579a01..017dac25 100644 --- a/ge/single_op/task/tbe_task_builder.cc +++ b/ge/single_op/task/tbe_task_builder.cc @@ -459,23 +459,23 @@ std::string TbeTaskBuilder::GetKeyForTvmMetaData() const { return TVM_ATTR_NAME_METADATA; } -Status AtomicTaskBuilder::InitKernelArgs(void *args_addr, size_t arg_size, const SingleOpModelParam ¶m) { +Status AtomicAddrCleanTaskBuilder::InitKernelArgs(void *args_addr, size_t arg_size, const SingleOpModelParam ¶m) { return SUCCESS; } -std::string AtomicTaskBuilder::GetKeyForOpParamSize() const { +std::string AtomicAddrCleanTaskBuilder::GetKeyForOpParamSize() const { return kAttrAtomicOpParamSize; } -std::string AtomicTaskBuilder::GetKeyForTvmMetaData() const { +std::string AtomicAddrCleanTaskBuilder::GetKeyForTvmMetaData() const { return ATOMIC_ATTR_TVM_METADATA; } -void AtomicTaskBuilder::GetKernelName(const OpDescPtr &op_desc, std::string &kernel_name) const { +void AtomicAddrCleanTaskBuilder::GetKernelName(const OpDescPtr &op_desc, std::string &kernel_name) const { (void)AttrUtils::GetStr(op_desc, op_desc->GetName() + "_atomic_kernelname", kernel_name); } -TBEKernelPtr AtomicTaskBuilder::GetTbeKernel(const OpDescPtr &op_desc) const { +TBEKernelPtr AtomicAddrCleanTaskBuilder::GetTbeKernel(const OpDescPtr &op_desc) const { return op_desc->TryGetExtAttr(EXT_ATTR_ATOMIC_TBE_KERNEL, TBEKernelPtr()); } diff --git a/ge/single_op/task/tbe_task_builder.h b/ge/single_op/task/tbe_task_builder.h index 833ab0e0..06d17901 100755 --- a/ge/single_op/task/tbe_task_builder.h +++ b/ge/single_op/task/tbe_task_builder.h @@ -126,11 +126,11 @@ class TbeTaskBuilder { void *handle_ = nullptr; }; -class AtomicTaskBuilder : public TbeTaskBuilder { +class AtomicAddrCleanTaskBuilder : public TbeTaskBuilder { public: - AtomicTaskBuilder(const std::string &model_name, const NodePtr &node, const domi::TaskDef &task_def) + AtomicAddrCleanTaskBuilder(const std::string &model_name, const NodePtr &node, const domi::TaskDef &task_def) : TbeTaskBuilder(model_name, node, task_def) {} - ~AtomicTaskBuilder() override = default; + ~AtomicAddrCleanTaskBuilder() override = default; protected: std::string GetKeyForOpParamSize() const override; diff --git a/tests/ut/ge/single_op/single_op_task_unittest.cc b/tests/ut/ge/single_op/single_op_task_unittest.cc index f6ae0dbf..3e3160c2 100644 --- a/tests/ut/ge/single_op/single_op_task_unittest.cc +++ b/tests/ut/ge/single_op/single_op_task_unittest.cc @@ -157,8 +157,11 @@ TEST_F(UtestSingleOpTask, test_update_ioaddr) { TEST_F(UtestSingleOpTask, test_atomic_exec) { auto graph = make_shared("graph"); auto op_desc = make_shared("Add", "Add"); + GeTensorDesc desc; + op_desc->AddInputDesc(desc); + op_desc->AddOutputDesc(desc); auto node = graph->AddNode(op_desc); - AtomicOpTask task; + AtomicAddrCleanOpTask task; task.op_desc_ = op_desc; task.node_ = node; From 6b962fdee48d35df50d286f02c3b1e9e366b96a3 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Wed, 7 Jul 2021 10:16:10 +0800 Subject: [PATCH 166/226] delete cce stub & AUX_SOURCE_DIRECTORY for graph --- CMakeLists.txt | 12 +- tests/CMakeLists.txt | 2 +- tests/depends/cce/CMakeLists.txt | 98 ---- tests/depends/cce/src/cce_stub.cc | 576 -------------------- tests/depends/cce/src/op_kernel_registry.cc | 29 - tests/ut/common/graph/CMakeLists.txt | 65 +-- tests/ut/ge/CMakeLists.txt | 86 +-- 7 files changed, 51 insertions(+), 817 deletions(-) delete mode 100644 tests/depends/cce/CMakeLists.txt delete mode 100644 tests/depends/cce/src/cce_stub.cc delete mode 100644 tests/depends/cce/src/op_kernel_registry.cc diff --git a/CMakeLists.txt b/CMakeLists.txt index 41520b14..ac0240d9 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -108,7 +108,7 @@ else () elseif(PLATFORM STREQUAL "inference") find_module(adump_server libadump_server.a ${ASCEND_ACL_DIR}) find_module(runtime libruntime.so ${ASCEND_ACL_DIR}) - find_module(runtime_compile libruntime_compile.so ${ASCEND_ATC_DIR}) + find_module(runtime_compile libruntime_compile.so ${ASCEND_ATC_DIR}) find_module(msprofiler_ext libmsprofiler.a ${ASCEND_ACL_DIR}) if(PRODUCT STREQUAL "flr3") elseif(PRODUCT STREQUAL "flr1") @@ -119,12 +119,12 @@ else () find_module(ascend_hal_stub libascend_hal.so ${ASCEND_DRIVER_DIR}) endif() elseif(PLATFORM STREQUAL "all") - find_module(adump_server libadump_server.a ${ASCEND_RUNTIME_DIR}) - find_module(runtime libruntime.so ${ASCEND_RUNTIME_DIR}) + find_module(adump_server libadump_server.a ${ASCEND_RUNTIME_DIR}) + find_module(runtime libruntime.so ${ASCEND_RUNTIME_DIR}) find_module(msprofiler_fwk_ext libmsprofiler_fwk.a ${ASCEND_RUNTIME_DIR}) - find_module(ascend_hal_stub libascend_hal.so ${ASCEND_DRIVER_DIR}) - find_module(runtime_compile libruntime_compile.so ${ASCEND_ATC_DIR}) - find_module(msprofiler_ext libmsprofiler.a ${ASCEND_ACL_DIR}) + find_module(ascend_hal_stub libascend_hal.so ${ASCEND_DRIVER_DIR}) + find_module(runtime_compile libruntime_compile.so ${ASCEND_ATC_DIR}) + find_module(msprofiler_ext libmsprofiler.a ${ASCEND_ACL_DIR}) else() message(STATUS "PLATFORM param is invalid, should be train or inference, you choose nothing!") endif() diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 3dd94051..f5dab366 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -15,7 +15,7 @@ project(tests CXX C) find_package(Threads) -add_subdirectory(depends/cce) + add_subdirectory(depends/slog) add_subdirectory(depends/mmpa) add_subdirectory(depends/runtime) diff --git a/tests/depends/cce/CMakeLists.txt b/tests/depends/cce/CMakeLists.txt deleted file mode 100644 index 05fa8133..00000000 --- a/tests/depends/cce/CMakeLists.txt +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2019-2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -#cmake_minimum_required(VERSION 2.8) - -project(STUB_CCE) - -set(CMAKE_CXX_STANDARD 11) - -include_directories(${GE_CODE_DIR}/inc) -include_directories(${GE_CODE_DIR}/inc/framework) -include_directories(${GE_CODE_DIR}/metadef/inc/graph) -include_directories(${GE_CODE_DIR}/inc/external) -include_directories(${GE_CODE_DIR}/metadef/inc/external) -include_directories(${GE_CODE_DIR}/metadef/inc/external/graph) -include_directories(${GE_CODE_DIR}/metadef) -include_directories(${GE_CODE_DIR}/metadef/inc) -include_directories(${GE_CODE_DIR}/metadef/graph) -include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc) -include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc/cce) -include_directories(${GE_CODE_DIR}/third_party/fwkacllib/inc/ops) -include_directories(${CMAKE_BINARY_DIR}) -include_directories(${CMAKE_BINARY_DIR}/proto/ge) -set(PROTO_LIST - "${GE_CODE_DIR}/metadef/proto/om.proto" - "${GE_CODE_DIR}/metadef/proto/ge_ir.proto" - "${GE_CODE_DIR}/metadef/proto/task.proto" -) - -protobuf_generate(ge PROTO_SRCS PROTO_HDRS ${PROTO_LIST}) - -set(SRCS - "${GE_CODE_DIR}/metadef/graph/ge_attr_define.cc" - "${GE_CODE_DIR}/metadef/graph/anchor.cc" - "${GE_CODE_DIR}/metadef/graph/ge_attr_value.cc" - "${GE_CODE_DIR}/metadef/graph/buffer.cc" - "${GE_CODE_DIR}/metadef/graph/aligned_ptr.cc" - "${GE_CODE_DIR}/metadef/graph/compute_graph.cc" - "${GE_CODE_DIR}/metadef/graph/graph.cc" - "${GE_CODE_DIR}/metadef/graph/model.cc" - "${GE_CODE_DIR}/metadef/graph/model_serialize.cc" - "${GE_CODE_DIR}/metadef/graph/node.cc" - "${GE_CODE_DIR}/metadef/graph/op_desc.cc" - "${GE_CODE_DIR}/metadef/graph/operator.cc" - "${GE_CODE_DIR}/metadef/graph/operator_factory.cc" - "${GE_CODE_DIR}/metadef/graph/operator_factory_impl.cc" - "${GE_CODE_DIR}/metadef/graph/tensor.cc" - "${GE_CODE_DIR}/metadef/graph/detail/attributes_holder.cc" - "${GE_CODE_DIR}/metadef/graph/utils/anchor_utils.cc" - "${GE_CODE_DIR}/metadef/graph/utils/graph_utils.cc" - "${GE_CODE_DIR}/metadef/graph/utils/dumper/ge_graph_dumper.cc" - "${GE_CODE_DIR}/metadef/graph/utils/node_utils.cc" - "${GE_CODE_DIR}/metadef/graph/utils/op_desc_utils.cc" - "${GE_CODE_DIR}/metadef/graph/utils/type_utils.cc" - "${GE_CODE_DIR}/metadef/ops/op_imp.cpp" - "${GE_CODE_DIR}/metadef/graph/shape_refiner.cc" - "${GE_CODE_DIR}/metadef/graph/ge_tensor.cc" - "${GE_CODE_DIR}/metadef/graph/opsproto/opsproto_manager.cc" -) -add_library(cce_ge_stub SHARED src/cce_stub.cc ${PROTO_SRCS} ${PROTO_HDRS}) - -target_compile_definitions(cce_ge_stub PRIVATE - google=ascend_private -) - -target_link_libraries(cce_ge_stub - $ - -Wl,--no-as-needed - ascend_protobuf - -Wl,--as-needed - c_sec -) - -add_library(cce_stub SHARED ${SRCS} ${PROTO_SRCS} ${PROTO_HDRS}) - -target_compile_definitions(cce_stub PRIVATE - google=ascend_private -) - -target_link_libraries(cce_stub PRIVATE - $ - -Wl,--no-as-needed - ascend_protobuf - -Wl,--as-needed - c_sec -) diff --git a/tests/depends/cce/src/cce_stub.cc b/tests/depends/cce/src/cce_stub.cc deleted file mode 100644 index 03df3d0c..00000000 --- a/tests/depends/cce/src/cce_stub.cc +++ /dev/null @@ -1,576 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include -#include -#include - -#include "cce/optimizer/fusion_engine.h" -#include "common/op/attr_value_util.h" -#include "graph/utils/tensor_utils.h" -#include "graph/utils/graph_utils.h" - -using namespace cce; -using namespace std; -using namespace ge; -using namespace fusion; - -uint64_t global_mem_base = 0; - -namespace cce { -#define DIM_MAX_SIZE 8 -static const uint32_t C0 = 16; -struct tagCcPad {}; -struct tagCcConvolution {}; - -struct tagCcLRN {}; - -struct tagCcFasterRcnnProposal {}; -struct tagCcRoiAlign {}; -struct tagCcBatchNorm {}; -struct tagCcDetectpostprocess {}; - -struct tagCcSsdDetectionOutput {}; - -struct tagCcRefinedetDetectionOutput {}; - -struct tagCcMsrGenerateRpnProposals {}; - -struct tagCcFilter { - vector dims; -}; - -struct tagCcTensor { - ccTensorFormat_t format; - ccDataType_t data_type; - uint32_t dim_cnt; - int32_t real_dim_cnt; - uint32_t data_size; - int32_t dim_buf[DIM_MAX_SIZE]; - int32_t stride_buf[DIM_MAX_SIZE]; -}; - -typedef struct tagCcPooling { - ccPoolingMode_t mode; - ccPaddingMode_t pad_mode; - ccNanPropagation_t max_pooling_nan_opt; - uint32_t dim_cnt; - int32_t window_dim[6]; - int32_t padding[6]; - int32_t stride[6]; -} ccPooling_t; - -struct tagCcActivation {}; - -struct tagCcFasterRcnnDetectionOutput {}; -struct tagCcSpatialTransformer {}; - -struct tagCcPower {}; -struct tagCcResizeBilinear {}; -struct tagCcSsdNormalize {}; -struct tagCcSsdPostProcessor {}; -struct tagCcSsdPriorBox {}; -struct tagCcPsRoiPooling {}; - -struct tagMsrFastRcnnPredictions {}; -struct tagCcPRelu {}; -struct tagCcStridedSlice {}; - -struct tagCcStridedSliceAttrs {}; - -struct tagCcRnn {}; - -struct tagCcArgmaxmin {}; - -typedef struct tagCcLog { - ccDataType_t data_type; - uint32_t param_cnt; -} ccLog_t; -typedef struct tagCcLog *ccLogDescriptor_t; - -struct tagCcPadV2 {}; - -ccStatus_t ccGetPadV2OutputDim(const ccTensorDescriptor_t x_desc, const ccPadV2Descriptor_t pad_desc, int32_t *dim_cnt, - int32_t dim[], int32_t dim_len) { - *dim_cnt = 4; - dim[0] = 1; - dim[1] = 2; - dim[2] = 2; - dim[3] = 3; - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccPadV2Forward(ccHandle_t handle, const ccPadV2Descriptor_t pad_desc, const void *alpha, - const ccTensorDescriptor_t x_desc, const void *x, const void *beta, - const ccTensorDescriptor_t output_desc, void *output) { - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccCreatePadV2Descriptor(ccPadV2Descriptor_t *pad_desc) { return CC_STATUS_SUCCESS; } - -ccStatus_t ccDestroyPadV2Descriptor(ccPadV2Descriptor_t *pad_desc) { return CC_STATUS_SUCCESS; } - -ccStatus_t ccSetKernelOpMap(ccHandle_t handle) { return CC_STATUS_SUCCESS; } - -ccStatus_t ccDataDumpForward(ccHandle_t handle, const void *buffer, const uint64_t buf_len, const uint32_t task_index) { - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccSetPadV2Descriptor(ccPadV2Descriptor_t pad_desc, const int32_t pad_shape_cnt, - const int32_t pad_shape_low[], const int32_t pad_shape_high[], - const ccPadMode_t pad_mode, const void *pad_value, const ccDataType_t pad_value_type) { - return CC_STATUS_SUCCESS; -} - -struct tagCcYoloDetectionOutput { - ccYoloVersion_t yolo_version; - uint32_t net_h; - uint32_t net_w; - uint32_t post_top_k; - uint32_t classes; - float nms_threshold; - float iou_thre_decay; - float coor_scale_factor; - bool relative; - float obj_threshold; - float cls_threshold; - uint32_t bias_num; - float *bias; -}; - -struct tagCcYoloRegion {}; - -struct tagCcEltwise {}; - -struct tagCcHashTableLookup {}; - -struct tagCcEmbeddingAttnDecoder {}; -struct tagNonMaxSuppression {}; - -struct tagCcArcSinCos {}; -struct tagCcPow {}; -struct tagCcConcatFive2Four_t {}; -struct tagCcConcatFour2Five_t {}; - -ccStatus_t ccCreatePowDescriptor(ccPowDescriptor_t *pow_desc) { - *pow_desc = new tagCcPow(); - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccSetPowDescriptor(ccPowDescriptor_t pow_desc, ccDataType_t data_type, uint32_t param_cnt) { - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccDestroyPowDescriptor(ccPowDescriptor_t *pow_desc) { - if (nullptr == pow_desc) { - return CC_STATUS_BAD_PARAM; - } - - delete *pow_desc; - *pow_desc = 0; - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccPowForward(ccHandle_t handle, const ccPowDescriptor_t pow_desc, const void *pow_param, const void *alpha, - const ccTensorDescriptor_t x_desc, const void *x, const ccTensorDescriptor_t y_desc, - const void *y, const void *beta, const ccTensorDescriptor_t z_desc, void *z) { - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccLogicalOrForward(ccHandle_t handle, const void *alpha, const ccTensorDescriptor_t x_desc, const void *x, - const ccTensorDescriptor_t y_desc, const void *y, const void *beta, - const ccTensorDescriptor_t output_desc, void *output) { - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccCompareForward(ccHandle_t handle, ccCompareType_t compare_type, const void *alpha, - const ccTensorDescriptor_t x_desc, const void *x, const ccTensorDescriptor_t y_desc, - const void *y, const void *beta, const ccTensorDescriptor_t output_desc, void *output) { - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccGetCompareOutputDim(const ccTensorDescriptor_t x_desc, const ccTensorDescriptor_t y_desc, int32_t *dim_cnt, - int32_t *dim, int32_t dim_len) { - *dim_cnt = 4; - dim[0] = 1; - dim[1] = 1; - dim[2] = 1; - dim[3] = 1; - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccArcTanForward(ccHandle_t handle, const void *alpha, const ccTensorDescriptor_t x_desc, const void *x, - const void *beta, const ccTensorDescriptor_t y_desc, void *y) { - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccAtanhForward(ccHandle_t handle, const void *alpha, const ccTensorDescriptor_t x_desc, const void *x, - const void *beta, const ccTensorDescriptor_t y_desc, void *y) { - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccIsDepthwiseHighPerformance(int32_t input_n, int32_t input_c, int32_t input_h, int32_t input_w, - int32_t filter_n, int32_t filter_c, int32_t filter_h, int32_t filter_w, - int32_t dilation_h, int32_t dilation_w, int32_t pad_h_head, int32_t pad_h_tail, - int32_t pad_w_head, int32_t pad_w_tail, int32_t stride_h, int32_t stride_w, - int32_t group_num, bool &is_high_performance, bool is_quant, - ccDataType_t input_data_type, ccDataType_t output_data_type) { - is_high_performance = true; - return CC_STATUS_SUCCESS; -} - -struct tagCcSpaceToBatch {}; - -struct tagCcBatchToSpace {}; - -struct tagCcResizeNearestNeighbor {}; - -ccStatus_t ccGetStream(ccHandle_t handle, rtStream_t *stream_id) { return CC_STATUS_SUCCESS; } - -ccStatus_t ccGetRtVersion(uint32_t *count) { return CC_STATUS_SUCCESS; } - -ccStatus_t ccDestroyTensorDescriptor(ccTensorDescriptor_t *tensor_desc) { - if (nullptr == tensor_desc) { - return CC_STATUS_BAD_PARAM; - } - delete *tensor_desc; - *tensor_desc = 0; - return CC_STATUS_SUCCESS; -} -ccStatus_t ccDestroyFilterDescriptor(ccFilterDescriptor_t *filter_desc) { - delete *filter_desc; - *filter_desc = 0; - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccGetFilterSizeInBytes(const ccFilterDescriptor_t filter_desc, uint32_t *size) { - *size = filter_desc->dims[0] * filter_desc->dims[1] * filter_desc->dims[2] * filter_desc->dims[3] * sizeof(float); - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccTransFilter(const ccFilterDescriptor_t w_desc, const void *w, ccFilterDescriptor_t y_desc, void *y, - uint32_t y_size_in_bytes) { - y = const_cast(w); - - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccCreateTensorDescriptor(ccTensorDescriptor_t *tensor_desc) { - *tensor_desc = new tagCcTensor(); - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccSetTensor4dDescriptor(ccTensorDescriptor_t tensor_desc, ccTensorFormat_t format, ccDataType_t data_type, - int32_t n, int32_t c, int32_t h, int32_t w) { - if (CC_TENSOR_NHWC == format) { - tensor_desc->dim_buf[0] = n; - tensor_desc->dim_buf[1] = h; - tensor_desc->dim_buf[2] = w; - tensor_desc->dim_buf[3] = c; - } else { - tensor_desc->dim_buf[0] = n; - tensor_desc->dim_buf[1] = c; - tensor_desc->dim_buf[2] = h; - tensor_desc->dim_buf[3] = w; - } - tensor_desc->dim_cnt = 4; - tensor_desc->data_type = data_type; - tensor_desc->format = format; - tensor_desc->data_size = n * c * h * w * sizeof(data_type); - return CC_STATUS_SUCCESS; -} -ccStatus_t ccGetTensorSizeInBytes(const ccTensorDescriptor_t tensor_desc, uint32_t *size) { - if ((NULL == tensor_desc) || (NULL == size)) { - return CC_STATUS_BAD_PARAM; - } - *size = tensor_desc->data_size; - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccGetTensorMemorySizeInBytes(const ccTensorDescriptor_t tensor_desc, uint32_t *size) { - *size = tensor_desc->data_size; - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccCreateFilterDescriptor(ccFilterDescriptor_t *filter_desc) { - *filter_desc = new tagCcFilter(); - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccSetFilter4dDescriptor(ccFilterDescriptor_t filter_desc, ccTensorFormat_t format, ccDataType_t data_type, - int32_t k, int32_t c, int32_t h, int32_t w) { - filter_desc->dims.push_back(k); - filter_desc->dims.push_back(c); - filter_desc->dims.push_back(h); - filter_desc->dims.push_back(w); - - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccSetFilterFractalDescriptor(ccFilterDescriptor_t filter_desc, ccTensorFormat_t format, - ccDataType_t data_type, int32_t k, int32_t c, int32_t h, int32_t w) { - filter_desc->dims.push_back(k); - filter_desc->dims.push_back(c); - filter_desc->dims.push_back(h); - filter_desc->dims.push_back(w); - - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccSetStream(ccHandle_t handle, rtStream_t stream_id) { return CC_STATUS_SUCCESS; } -ccStatus_t ccCreatePoolingMaskDescriptor(ccTensorDescriptor_t *pooling_mask_desc) { - *pooling_mask_desc = new tagCcTensor(); - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccSetPoolingMaskTensorDescriptor(ccTensorDescriptor_t tensor_desc, ccTensorFormat_t format, - ccDataType_t data_type, int32_t n, int32_t c, int32_t h, int32_t w, - int32_t window_h, int32_t window_w) { - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccSetFilter6dDescriptor(ccTensorDescriptor_t filter_desc, ccTensorFormat_t format, ccDataType_t data_type, - int32_t c1, int32_t h, int32_t w, int32_t n, int32_t co, int32_t c0) { - return CC_STATUS_SUCCESS; -} - -/// @ingroup dnn -/// @brief get the format and dimcnt of GeTensor -/// @param [in] tensor_desc descriptor of tensor -/// @param [in|out] format point to format -/// @return ccStatus_t -ccStatus_t ccGetTensorFormat(const ccTensorDescriptor_t tensor_desc, ccTensorFormat_t *format) { - *format = tensor_desc->format; - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccTransTensor(const ccTensorDescriptor_t x_desc, const void *x, const ccTensorDescriptor_t y_desc, void *y, - uint32_t y_size_in_bytes) { - return CC_STATUS_SUCCESS; -} -void cceSysInit() {} - -bool compilerStubFree() { return true; } - -bool compilerStubInit() { return true; } - -ccStatus_t ccSetInt8Filter4dDescriptor(ccFilterDescriptor_t filter_desc, ccTensorFormat_t format, - ccDataType_t data_type, int32_t k, int32_t c, int32_t h, int32_t w, - ccDataType_t output_data_type) { - filter_desc->dims.push_back(k); - filter_desc->dims.push_back(c); - filter_desc->dims.push_back(h); - filter_desc->dims.push_back(w); - - return CC_STATUS_SUCCESS; -} -ccStatus_t ccSetTensorNdDescriptor(ccTensorDescriptor_t tensor_desc, ccDataType_t data_type, int32_t dim_cnt, - int32_t dimA[]) { - tensor_desc->data_type = data_type; - tensor_desc->data_size = sizeof(data_type); - for (int32_t i = 0; i < dim_cnt; i++) { - tensor_desc->data_size = tensor_desc->data_size * dimA[i]; - } - tensor_desc->format = CC_TENSOR_ND; - return CC_STATUS_SUCCESS; -} - -ccStatus_t CceProfilingConfig(const char *target, const char *job_ctx, uint32_t flag) { return CC_STATUS_SUCCESS; } -ccStatus_t ccSetTensorRealDimCnt(ccTensorDescriptor_t tensor_desc, int32_t real_dim_cnt) { - if (tensor_desc != NULL && tensor_desc != nullptr) { - tensor_desc->real_dim_cnt = real_dim_cnt; - } - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccGetTensorRealDimCnt(ccTensorDescriptor_t tensor_desc, int32_t *real_dim_cnt) { - *real_dim_cnt = tensor_desc->real_dim_cnt; - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccSetQuantizeFactors(ccQuantizeDescriptor_t quantize_info, ccScaleValueMode_t scale_val_mode, - const uint16_t *scale, const uint16_t *offset, const uint8_t *offset_pad) { - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccSetReQuantizeFactors(ccQuantizeDescriptor_t quantize_info, ccScaleValueMode_t scale_val_mode, - const uint16_t *scale_rq, const uint16_t *next_layer_offset, - const int32_t *offset_w) { - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccSetDeQuantizeFactors(ccQuantizeDescriptor_t quantize_info, ccScaleValueMode_t scale_val_mode, - const uint16_t *scale_dq, const int32_t *offset_w) { - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccSetQuantizeAlgoAndScaleType(ccQuantizeDescriptor_t quantize_info, ccQuantizeAlgo_t quant_algo, - ccScaleType_t scale_type, bool relu_flag) { - return CC_STATUS_SUCCESS; -} -ccStatus_t ccPrintTimeStat() { return CC_STATUS_SUCCESS; } -ccStatus_t ccSetModelId(ccHandle_t handle, uint32_t model_id) { return CC_STATUS_SUCCESS; } - -ccStatus_t ccGetKernelContext(rtStream_t stream_id, ccOpContext &op_context) { - if (stream_id == nullptr) { - op_context.kernelType = ccKernelType::TE; - } else { - op_context.kernelType = ccKernelType::CCE_AI_CORE; - op_context.opId = 1; - op_context.kernelFuncId = 1; - op_context.isFlowtable = true; - op_context.opCount = 1; - op_context.opIndex2[0] = 0; - } - - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccUpdateKernelArgs(ccOpContext &op_context, uint64_t data_base_addr, uint64_t weight_base_addr, - uint64_t variable_base_addr, void *args_addr, uint64_t args_size, void *l2ctrl_addr) { - return CC_STATUS_SUCCESS; -} -ccStatus_t ccGetKernelArgsAddrs(ccOpContext &op_context, void *args_addr, uint64_t args_size, void *l2ctrl_addr, - std::vector &op_addrs_info) { - // cce - ccOpAddrsInfo tmp_op_addrs_info; - uint64_t tmp_input = (uint64_t)global_mem_base; - tmp_op_addrs_info.addrPos = &tmp_input; - tmp_op_addrs_info.addrData = tmp_input; - op_addrs_info.push_back(tmp_op_addrs_info); - - uint64_t tmp_output = (uint64_t)(global_mem_base + 5476352); - tmp_op_addrs_info.addrPos = &tmp_output; - tmp_op_addrs_info.addrData = tmp_output; - op_addrs_info.push_back(tmp_op_addrs_info); - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccSetKernelArgs(std::vector &date_info) { return CC_STATUS_SUCCESS; } -} // namespace cce -// ccFusion no namespace -ccStatus_t ccFusionStart(ccHandle_t handle, uint32_t graph_id, uint32_t init_flag, CceFusionMemCfg_t mem_cfg) { - return CC_STATUS_SUCCESS; -} - -//???ccFusion ????namespace cce?? -ccStatus_t ccFusionStart(ccHandle_t handle, uint32_t graph_id, uint32_t init_flag, uint32_t addr_change_flag) { - return CC_STATUS_SUCCESS; -} - -ccStatus_t ccFusionEnd(ccHandle_t handle, uint32_t graph_id) { return CC_STATUS_SUCCESS; } - -ccStatus_t ccFusionTaskEnd(ccHandle_t handle, uint32_t graph_id) { return CC_STATUS_SUCCESS; } - -ccStatus_t ccKernelLaunchRepeat(ccHandle_t handle) { return CC_STATUS_SUCCESS; } - -ccStatus_t ccKernelDelete(ccHandle_t handle) { return CC_STATUS_SUCCESS; } - -ccStatus_t cce::ccSetTensorFormat(cce::tagCcTensor *, cce::tagCcTensorFormat) { return CC_STATUS_SUCCESS; } - -namespace fusion { -uint32_t BufferFusion(std::shared_ptr, std::shared_ptr, bool) { return 0; } - -uint32_t BufferFusionTrain(std::shared_ptr, std::shared_ptr) { return 0; } - -uint32_t GraphFusionTrain(ge::ComputeGraphPtr orig_graph, ge::ComputeGraphPtr fusion_graph) { return 0; } -} // namespace fusion -namespace fusion { -using namespace ge; - -uint32_t Fusion(ComputeGraphPtr model_graph, ComputeGraphPtr fusion_graph, kScopeNodeMap_t &te_fusion_map) { - OpDescPtr op_def_a = std::make_shared(); - op_def_a->SetName("reduction_nd"); - op_def_a->SetType("reduction_nd"); - - GeTensorDescPtr v_input_desc = std::make_shared(); - op_def_a->AddInputDesc(*v_input_desc); - - vector v_input; - v_input.push_back(0); - op_def_a->SetInputOffset(v_input); - - GeTensorDesc input_desc = op_def_a->GetInputDesc(0); - input_desc.SetFormat(FORMAT_NCHW); - input_desc.SetDataType(DT_FLOAT); - input_desc.SetShape(GeShape({1, 3, 5, 5})); - ge::TensorUtils::SetSize(input_desc, 192); - ge::TensorUtils::SetRealDimCnt(input_desc, 4); - - GeTensorDescPtr output_desc = std::make_shared(); - op_def_a->AddOutputDesc(*output_desc); - - output_desc->SetFormat(FORMAT_NCHW); - output_desc->SetDataType(DT_FLOAT); - output_desc->SetShape(GeShape({1, 3, 5})); - ge::TensorUtils::SetSize(*output_desc, 96); - ge::TensorUtils::SetRealDimCnt(*output_desc, 3); - - OpDescPtr op_def_b = std::make_shared(); - op_def_b->SetName("transdata_1"); - op_def_b->SetType("TransData"); - - int stream_num = 1; - int flag = 0; - - NodePtr node_a = fusion_graph->AddNode(op_def_a); - NodePtr node_b = fusion_graph->AddNode(op_def_b); - - GraphUtils::AddEdge(node_a->GetOutDataAnchor(0), node_b->GetInDataAnchor(0)); - int32_t a = 1; - int32_t b = 2; - - AttrUtils::SetInt(op_def_a, "fusion_scope", a); - AttrUtils::SetInt(op_def_b, "fusion_scope", b); - - vector node_list1; - node_list1.push_back(node_a); - vector node_list2; - node_list2.push_back(node_b); - te_fusion_map[1] = node_list1; - te_fusion_map[2] = node_list2; - - return FUSION_STATUS_SUCCESS; -} - -uint32_t FusionTaskBuild(cce::ccHandle_t cc_handle, ge::ComputeGraphPtr fusion_graph, ge::Buffer &buffer, - ModelRes &model_res, std::vector &task_def_list_) { - TaskDef task_def_temp; - task_def_list_.push_back(task_def_temp); - - return FUSION_STATUS_SUCCESS; -} -uint32_t GraphFusion(ge::ComputeGraphPtr orig_graph, ge::ComputeGraphPtr fusion_graph) { - *fusion_graph = *orig_graph; - return FUSION_STATUS_SUCCESS; -} - -void FusionTaskBuildComplete(std::vector cc_handle_list) { return; } - -} // namespace fusion - -ccStatus_t cce::ccSetTensorDescriptorQuantizeParam(ccTensorDescriptor_t tensor_desc, - const ccVecQuantizePara_t *vec_quantize_para) { - return CC_STATUS_SUCCESS; -} - -ccStatus_t cce::ccSetAllOffsetQuantizeFactors(ccQuantizeDescriptor_t quantize_info, const uint8_t *offset_w, - const uint8_t *offset_d, const uint16_t *scale_req, - const uint16_t *offset_d_next) { - return CC_STATUS_SUCCESS; -} diff --git a/tests/depends/cce/src/op_kernel_registry.cc b/tests/depends/cce/src/op_kernel_registry.cc deleted file mode 100644 index 5ccd1391..00000000 --- a/tests/depends/cce/src/op_kernel_registry.cc +++ /dev/null @@ -1,29 +0,0 @@ -#include "register/op_kernel_registry.h" - -namespace ge { -class OpKernelRegistry::OpKernelRegistryImpl { - -}; - -OpKernelRegistry::OpKernelRegistry() { -} - -OpKernelRegistry::~OpKernelRegistry() { - -} - -bool OpKernelRegistry::IsRegistered(const std::string &op_type) { - return false; -} - -std::unique_ptr OpKernelRegistry::CreateHostCpuOp(const std::string &op_type) { - return nullptr; -} - -void OpKernelRegistry::RegisterHostCpuOp(const std::string &op_type, CreateFn create_fn) { -} - -HostCpuOpRegistrar::HostCpuOpRegistrar(const char *op_type, HostCpuOp *(*create_fn)()) { - -} -} // namespace ge \ No newline at end of file diff --git a/tests/ut/common/graph/CMakeLists.txt b/tests/ut/common/graph/CMakeLists.txt index ccf9ce5e..a8092705 100644 --- a/tests/ut/common/graph/CMakeLists.txt +++ b/tests/ut/common/graph/CMakeLists.txt @@ -61,53 +61,26 @@ set(UT_FILES "testcase/ge_graph/ge_model_unittest.cc" ) -set(SRC_FILES - "${GE_CODE_DIR}/metadef/graph/option/ge_local_context.cc" - "${GE_CODE_DIR}/metadef/graph/option/ge_context.cc" - "${GE_CODE_DIR}/metadef/graph/anchor.cc" - "${GE_CODE_DIR}/metadef/graph/ge_attr_value.cc" - "${GE_CODE_DIR}/metadef/graph/attr_value.cc" - "${GE_CODE_DIR}/metadef/graph/buffer.cc" - "${GE_CODE_DIR}/metadef/graph/aligned_ptr.cc" - "${GE_CODE_DIR}/metadef/graph/compute_graph.cc" - "${GE_CODE_DIR}/metadef/graph/ge_attr_define.cc" - "${GE_CODE_DIR}/metadef/graph/graph.cc" - "${GE_CODE_DIR}/metadef/graph/gnode.cc" - "${GE_CODE_DIR}/metadef/graph/ascend_string.cc" - "${GE_CODE_DIR}/metadef/graph/model.cc" - "${GE_CODE_DIR}/metadef/graph/model_serialize.cc" - "${GE_CODE_DIR}/metadef/graph/node.cc" - "${GE_CODE_DIR}/metadef/graph/op_desc.cc" - "${GE_CODE_DIR}/metadef/graph/operator.cc" - "${GE_CODE_DIR}/metadef/graph/operator_factory.cc" - "${GE_CODE_DIR}/metadef/graph/operator_factory_impl.cc" - "${GE_CODE_DIR}/metadef/graph/tensor.cc" - "${GE_CODE_DIR}/metadef/graph/types.cc" - "${GE_CODE_DIR}/metadef/graph/ge_tensor.cc" - "${GE_CODE_DIR}/metadef/graph/shape_refiner.cc" - "${GE_CODE_DIR}/metadef/graph/format_refiner.cc" - "${GE_CODE_DIR}/metadef/graph/inference_context.cc" - "${GE_CODE_DIR}/metadef/graph/detail/attributes_holder.cc" - "${GE_CODE_DIR}/metadef/graph/utils/anchor_utils.cc" - "${GE_CODE_DIR}/metadef/graph/utils/graph_utils.cc" - "${GE_CODE_DIR}/metadef/graph/utils/dumper/ge_graph_dumper.cc" - "${GE_CODE_DIR}/metadef/graph/utils/node_utils.cc" - "${GE_CODE_DIR}/metadef/graph/utils/op_desc_utils.cc" - "${GE_CODE_DIR}/metadef/graph/utils/type_utils.cc" - "${GE_CODE_DIR}/metadef/graph/utils/ge_ir_utils.cc" - "${GE_CODE_DIR}/metadef/graph/utils/tensor_utils.cc" - "${GE_CODE_DIR}/metadef/ops/op_imp.cpp" - "${GE_CODE_DIR}/metadef/graph/opsproto/opsproto_manager.cc" - "${GE_CODE_DIR}/metadef/graph/runtime_inference_context.cc" - "${GE_CODE_DIR}/metadef/graph/ref_relation.cc" - "${GE_CODE_DIR}/metadef/third_party/transformer/src/transfer_shape_according_to_format.cc" - "${GE_CODE_DIR}/metadef/third_party/transformer/src/axis_util.cc" - "${GE_CODE_DIR}/metadef/third_party/transformer/src/expand_dimension.cc" - "${GE_CODE_DIR}/metadef/graph/utils/transformer_utils.cc" -) +AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph GRAPH_SRC_FILES) +AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph/detail GRAPH_DETAIL_SRC_FILES) +AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph/opsproto GRAPH_OPSPROTO_SRC_FILES) +AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph/option GRAPH_OPTION_SRC_FILES) +AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph/utils GRAPH_UTILS_SRC_FILES) +AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph/utils/dumper GRAPH_DUMPER_SRC_FILES) +AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/ops GRAPH_OPS_SRC_FILES) +AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/third_party/transformer/src TRANSFORMER_SRC_FILES) -#add_executable(ut_libgraph ${UT_FILES} ${SRC_FILES} ${PROTO_SRCS} ${PROTO_HDRS}) -add_executable(ut_libgraph ${UT_FILES} ${SRC_FILES} ${PROTO_SRCS} ${PROTO_HDRS}) +add_executable(ut_libgraph ${UT_FILES} + ${GRAPH_SRC_FILES} + ${GRAPH_UTILS_SRC_FILES} + ${GRAPH_OPSPROTO_SRC_FILES} + ${GRAPH_OPTION_SRC_FILES} + ${GRAPH_DETAIL_SRC_FILES} + ${GRAPH_DUMPER_SRC_FILES} + ${GRAPH_OPS_SRC_FILES} + ${TRANSFORMER_SRC_FILES} + ${PROTO_SRCS} ${PROTO_HDRS} +) target_compile_options(ut_libgraph PRIVATE -g --coverage -fprofile-arcs -ftest-coverage diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index a1abdfff..ed06bfe6 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -20,6 +20,7 @@ set(CMAKE_CXX_STANDARD 11) set(PROTO_LIST "${GE_CODE_DIR}/metadef/proto/om.proto" "${GE_CODE_DIR}/metadef/proto/ge_ir.proto" + "${GE_CODE_DIR}/metadef/proto/task.proto" "${GE_CODE_DIR}/metadef/proto/ge_api.proto" "${GE_CODE_DIR}/metadef/proto/insert_op.proto" "${GE_CODE_DIR}/metadef/proto/dump_task.proto" @@ -69,62 +70,19 @@ include_directories(${CMAKE_BINARY_DIR}) include_directories(${CMAKE_BINARY_DIR}/proto/ge) include_directories(${CMAKE_BINARY_DIR}/proto/ge/proto) -set(GRAPH_SRC_FILES - "${GE_CODE_DIR}/metadef/graph/option/ge_local_context.cc" - "${GE_CODE_DIR}/metadef/graph/option/ge_context.cc" - "${GE_CODE_DIR}/metadef/graph/ge_attr_define.cc" - "${GE_CODE_DIR}/metadef/graph/anchor.cc" - "${GE_CODE_DIR}/metadef/graph/ge_attr_value.cc" - "${GE_CODE_DIR}/metadef/graph/attr_value.cc" - "${GE_CODE_DIR}/metadef/graph/buffer.cc" - "${GE_CODE_DIR}/metadef/graph/aligned_ptr.cc" - "${GE_CODE_DIR}/metadef/graph/compute_graph.cc" - "${GE_CODE_DIR}/metadef/graph/graph.cc" - "${GE_CODE_DIR}/metadef/graph/gnode.cc" - "${GE_CODE_DIR}/metadef/graph/ascend_string.cc" - "${GE_CODE_DIR}/metadef/graph/inference_context.cc" - "${GE_CODE_DIR}/metadef/graph/shape_refiner.cc" - "${GE_CODE_DIR}/metadef/graph/model.cc" - "${GE_CODE_DIR}/metadef/graph/model_serialize.cc" - "${GE_CODE_DIR}/metadef/graph/node.cc" - "${GE_CODE_DIR}/metadef/graph/runtime_inference_context.cc" - "${GE_CODE_DIR}/metadef/graph/op_desc.cc" - "${GE_CODE_DIR}/metadef/third_party/transformer/src/transfer_shape_according_to_format.cc" - "${GE_CODE_DIR}/metadef/third_party/transformer/src/axis_util.cc" - "${GE_CODE_DIR}/metadef/third_party/transformer/src/expand_dimension.cc" - "${GE_CODE_DIR}/metadef/graph/operator.cc" - "${GE_CODE_DIR}/metadef/graph/operator_factory.cc" - "${GE_CODE_DIR}/metadef/graph/operator_factory_impl.cc" - "${GE_CODE_DIR}/metadef/graph/ge_tensor.cc" - "${GE_CODE_DIR}/metadef/graph/ref_relation.cc" - "${GE_CODE_DIR}/metadef/graph/tensor.cc" - "${GE_CODE_DIR}/metadef/graph/types.cc" - "${GE_CODE_DIR}/metadef/graph/detail/attributes_holder.cc" - "${GE_CODE_DIR}/metadef/graph/utils/anchor_utils.cc" - "${GE_CODE_DIR}/metadef/graph/utils/graph_utils.cc" - "${GE_CODE_DIR}/metadef/graph/utils/dumper/ge_graph_dumper.cc" - "${GE_CODE_DIR}/metadef/graph/utils/ge_ir_utils.cc" - "${GE_CODE_DIR}/metadef/graph/utils/node_utils.cc" - "${GE_CODE_DIR}/metadef/graph/utils/op_desc_utils.cc" - "${GE_CODE_DIR}/metadef/graph/utils/tensor_utils.cc" - "${GE_CODE_DIR}/metadef/graph/utils/type_utils.cc" - "${GE_CODE_DIR}/metadef/graph/utils/transformer_utils.cc" - "${GE_CODE_DIR}/metadef/graph/debug/graph_debug.cc" - "${GE_CODE_DIR}/metadef/graph/opsproto/opsproto_manager.cc" - "${GE_CODE_DIR}/metadef/ops/op_imp.cpp" - "${GE_CODE_DIR}/metadef/register/register.cpp" - "${GE_CODE_DIR}/metadef/register/register_pass.cpp" - "${GE_CODE_DIR}/metadef/register/op_kernel_registry.cpp" - "${GE_CODE_DIR}/metadef/register/auto_mapping_util.cpp" - "${GE_CODE_DIR}/metadef/register/tensor_assign.cpp" - "${GE_CODE_DIR}/metadef/register/register_format_transfer.cc" - "${GE_CODE_DIR}/metadef/graph/format_refiner.cc" - "${GE_CODE_DIR}/metadef/register/ops_kernel_builder_registry.cc" - "${GE_CODE_DIR}/metadef/register/op_tiling.cpp" - "${GE_CODE_DIR}/metadef/graph/utils/tuning_utils.cc" - "${GE_CODE_DIR}/metadef/register/op_tiling_registry.cpp" - "${GE_CODE_DIR}/metadef/register/op_tiling_registry_impl.cpp" -) + +#### GRAPH_SRC_FILES #### +AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph GRAPH_SRC_FILES) +AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph/debug GRAPH_DEBUG_SRC_FILES) +AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph/detail GRAPH_DETAIL_SRC_FILES) +AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph/opsproto GRAPH_OPSPROTO_SRC_FILES) +AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph/option GRAPH_OPTION_SRC_FILES) +AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph/utils GRAPH_UTILS_SRC_FILES) +AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph/utils/dumper GRAPH_DUMPER_SRC_FILES) +AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/ops GRAPH_OPS_SRC_FILES) +AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/register GRAPH_REGISTER_SRC_FILES) +AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/third_party/transformer/src TRANSFORMER_SRC_FILES) + set(PARSER_SRC_FILES "${GE_CODE_DIR}/parser/parser/common/op_map.cc" @@ -259,7 +217,6 @@ set(GRAPH_PREPARE_COMMON_SRC_FILES "${GE_CODE_DIR}/ge/graph/preprocess/insert_op/util_insert_aipp_op.cc" "${GE_CODE_DIR}/ge/graph/preprocess/insert_op/ge_aipp_op.cc" "${GE_CODE_DIR}/ge/graph/preprocess/multi_batch_options.cc" - #"${GE_CODE_DIR}/ge/graph/preprocess/insert_op/base_insert_op.cc" ) set(GRAPH_DAVINCI_MODEL_SRC_FILES @@ -360,7 +317,6 @@ set(GRAPH_PASS_COMMON_SRC_FILES "${GE_CODE_DIR}/ge/graph/passes/compile_nodes_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/flow_ctrl_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/parallel_group_pass.cc" - #"${GE_CODE_DIR}/ge/graph/optimize/optimizer/allreduce_fusion_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/folding_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/variable_op_pass.cc" "${GE_CODE_DIR}/ge/graph/passes/transpose_transdata_pass.cc" @@ -570,7 +526,6 @@ set(DISTINCT_GRAPH_LOAD_TEST_FILES #"graph/load/new_model_manager_data_inputer_unittest.cc" #"graph/load/new_model_manager_davinci_model_unittest.cc" "graph/load/model_manager_unittest.cc" - #"graph/load/new_model_manager_task_build_unittest.cc" "graph/load/new_model_manager_model_manager_aicpu_unittest.cc" "graph/load/end_graph_task_unittest.cc" "graph/load/new_model_manager_event_manager_unittest.cc" @@ -795,7 +750,6 @@ set(OTHERS_TEST_FILES list(APPEND COMMON_SHARED_LIBRARIES c_sec slog_stub - cce_ge_stub runtime_stub profiler_stub mmpa_stub @@ -808,7 +762,17 @@ list(APPEND COMMON_SHARED_LIBRARIES # build graph add_library(ge_ut_graph STATIC - ${GRAPH_SRC_FILES} ${PARSER_SRC_FILES} ${PROTO_SRCS} ${PROTO_HDRS} + ${GRAPH_SRC_FILES} + ${GRAPH_DEBUG_SRC_FILES} + ${GRAPH_DETAIL_SRC_FILES} + ${GRAPH_OPSPROTO_SRC_FILES} + ${GRAPH_OPTION_SRC_FILES} + ${GRAPH_UTILS_SRC_FILES} + ${GRAPH_DUMPER_SRC_FILES} + ${GRAPH_OPS_SRC_FILES} + ${GRAPH_REGISTER_SRC_FILES} + ${TRANSFORMER_SRC_FILES} + ${PARSER_SRC_FILES} ${PROTO_SRCS} ${PROTO_HDRS} ) target_compile_definitions(ge_ut_graph PRIVATE From db53f82c78dd8d90de48b7f576011f3b27db6138 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Wed, 7 Jul 2021 10:50:20 +0800 Subject: [PATCH 167/226] Use FILE GLOB_RECURSE for AUX_SOURCE_DIRECTORY --- tests/ut/common/graph/CMakeLists.txt | 19 +++++++------------ tests/ut/ge/CMakeLists.txt | 21 +++++++-------------- 2 files changed, 14 insertions(+), 26 deletions(-) diff --git a/tests/ut/common/graph/CMakeLists.txt b/tests/ut/common/graph/CMakeLists.txt index a8092705..8da69c14 100644 --- a/tests/ut/common/graph/CMakeLists.txt +++ b/tests/ut/common/graph/CMakeLists.txt @@ -61,22 +61,17 @@ set(UT_FILES "testcase/ge_graph/ge_model_unittest.cc" ) -AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph GRAPH_SRC_FILES) -AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph/detail GRAPH_DETAIL_SRC_FILES) -AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph/opsproto GRAPH_OPSPROTO_SRC_FILES) -AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph/option GRAPH_OPTION_SRC_FILES) -AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph/utils GRAPH_UTILS_SRC_FILES) -AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph/utils/dumper GRAPH_DUMPER_SRC_FILES) +FILE(GLOB_RECURSE GRAPH_SRC_FILES_DEPTH0 ${GE_CODE_DIR}/metadef/graph/*.cc) +FILE(GLOB_RECURSE GRAPH_SRC_FILES_DEPTH1 ${GE_CODE_DIR}/metadef/graph/*/*.cc) +FILE(GLOB_RECURSE GRAPH_SRC_FILES_DEPTH2 ${GE_CODE_DIR}/metadef/graph/*/*/*.cc) + AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/ops GRAPH_OPS_SRC_FILES) AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/third_party/transformer/src TRANSFORMER_SRC_FILES) add_executable(ut_libgraph ${UT_FILES} - ${GRAPH_SRC_FILES} - ${GRAPH_UTILS_SRC_FILES} - ${GRAPH_OPSPROTO_SRC_FILES} - ${GRAPH_OPTION_SRC_FILES} - ${GRAPH_DETAIL_SRC_FILES} - ${GRAPH_DUMPER_SRC_FILES} + ${GRAPH_SRC_FILES_DEPTH0} + ${GRAPH_SRC_FILES_DEPTH1} + ${GRAPH_SRC_FILES_DEPTH2} ${GRAPH_OPS_SRC_FILES} ${TRANSFORMER_SRC_FILES} ${PROTO_SRCS} ${PROTO_HDRS} diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index ed06bfe6..42fa6128 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -72,13 +72,10 @@ include_directories(${CMAKE_BINARY_DIR}/proto/ge/proto) #### GRAPH_SRC_FILES #### -AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph GRAPH_SRC_FILES) -AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph/debug GRAPH_DEBUG_SRC_FILES) -AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph/detail GRAPH_DETAIL_SRC_FILES) -AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph/opsproto GRAPH_OPSPROTO_SRC_FILES) -AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph/option GRAPH_OPTION_SRC_FILES) -AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph/utils GRAPH_UTILS_SRC_FILES) -AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/graph/utils/dumper GRAPH_DUMPER_SRC_FILES) +FILE(GLOB_RECURSE GRAPH_SRC_FILES_DEPTH0 ${GE_CODE_DIR}/metadef/graph/*.cc) +FILE(GLOB_RECURSE GRAPH_SRC_FILES_DEPTH1 ${GE_CODE_DIR}/metadef/graph/*/*.cc) +FILE(GLOB_RECURSE GRAPH_SRC_FILES_DEPTH2 ${GE_CODE_DIR}/metadef/graph/*/*/*.cc) + AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/ops GRAPH_OPS_SRC_FILES) AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/register GRAPH_REGISTER_SRC_FILES) AUX_SOURCE_DIRECTORY(${GE_CODE_DIR}/metadef/third_party/transformer/src TRANSFORMER_SRC_FILES) @@ -762,13 +759,9 @@ list(APPEND COMMON_SHARED_LIBRARIES # build graph add_library(ge_ut_graph STATIC - ${GRAPH_SRC_FILES} - ${GRAPH_DEBUG_SRC_FILES} - ${GRAPH_DETAIL_SRC_FILES} - ${GRAPH_OPSPROTO_SRC_FILES} - ${GRAPH_OPTION_SRC_FILES} - ${GRAPH_UTILS_SRC_FILES} - ${GRAPH_DUMPER_SRC_FILES} + ${GRAPH_SRC_FILES_DEPTH0} + ${GRAPH_SRC_FILES_DEPTH1} + ${GRAPH_SRC_FILES_DEPTH2} ${GRAPH_OPS_SRC_FILES} ${GRAPH_REGISTER_SRC_FILES} ${TRANSFORMER_SRC_FILES} From e9a9a9b88e8de86cec86497f90344e49c6dcbbeb Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Wed, 7 Jul 2021 10:59:01 +0800 Subject: [PATCH 168/226] Fix bug of tiling workspace --- ge/hybrid/node_executor/aicore/aicore_op_task.cc | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/ge/hybrid/node_executor/aicore/aicore_op_task.cc b/ge/hybrid/node_executor/aicore/aicore_op_task.cc index b34cc0c6..fe9bba9a 100644 --- a/ge/hybrid/node_executor/aicore/aicore_op_task.cc +++ b/ge/hybrid/node_executor/aicore/aicore_op_task.cc @@ -372,9 +372,6 @@ Status AiCoreOpTask::UpdateTilingInfo(TaskContext &context) { // update op args by tiling info block_dim_ = tiling_info.GetBlockDim(); clear_atomic_ = tiling_info.GetClearAtomic(); - std::vector workspaces; - tiling_info.GetAllWorkspaces(workspaces); - op_desc->SetWorkspaceBytes(workspaces); tiling_data_ = tiling_info.GetAllTilingData().str(); tiling_key_ = tiling_info.GetTilingKey(); @@ -417,6 +414,11 @@ Status AiCoreOpTask::CalcTilingInfo(const NodePtr &node, OpRunInfo &tiling_info) GE_CHK_STATUS_RET(optiling::OpParaCalculateV2(*node, tiling_info), "[Invoke][OpParaCalculate]Failed calc tiling data of node %s.", node->GetName().c_str()); + // Only non atomic task need update workspace + auto op_desc = node->GetOpDesc(); + std::vector workspaces; + tiling_info.GetAllWorkspaces(workspaces); + op_desc->SetWorkspaceBytes(workspaces); GELOGD("[%s] Done invoking OpParaCalculate successfully.", node->GetName().c_str()); return SUCCESS; } From 43cdd9d732a53220f24ae5738df1118ba780f2a4 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Wed, 7 Jul 2021 18:20:50 +0800 Subject: [PATCH 169/226] Detach SessionManager from GELib --- ge/CMakeLists.txt | 9 +++-- ge/client/ge_api.cc | 57 +++++++++++++++++++++--------- ge/graph/execute/model_executor.cc | 12 ++++++- ge/graph/execute/model_executor.h | 3 +- ge/init/gelib.cc | 28 --------------- ge/init/gelib.h | 12 ++++--- ge/session/inner_session.cc | 4 +-- ge/session/session_manager.cc | 5 --- ge/session/session_manager.h | 40 ++++++++++----------- 9 files changed, 86 insertions(+), 84 deletions(-) diff --git a/ge/CMakeLists.txt b/ge/CMakeLists.txt index 8fcf97ef..9fff30f7 100755 --- a/ge/CMakeLists.txt +++ b/ge/CMakeLists.txt @@ -474,9 +474,6 @@ set(INFER_SRC_LIST "common/ge/plugin_manager.cc" "common/ge/op_tiling_manager.cc" "init/gelib.cc" - "session/inner_session.cc" - "session/session_manager.cc" - "graph/execute/model_executor.cc" "engine_manager/dnnengine_manager.cc" "opskernel_manager/ops_kernel_manager.cc" "opskernel_manager/ops_kernel_builder_manager.cc" @@ -721,6 +718,12 @@ set(INFER_SRC_LIST "ge_opt_info/ge_opt_info.cc" ) +set(RUNNER_SRC_LIST + "client/ge_api.cc" + "session/inner_session.cc" + "session/session_manager.cc" +) + if (NOT ENABLE_D AND NOT ENABLE_ACL AND NOT ENABLE_MS_TESTCASES) message("CMAKE_CXX_COMPILER_VERSION = ${CMAKE_CXX_COMPILER_VERSION}") ############ libge_runner.so ############ diff --git a/ge/client/ge_api.cc b/ge/client/ge_api.cc index aa88cfb4..e62965c9 100644 --- a/ge/client/ge_api.cc +++ b/ge/client/ge_api.cc @@ -47,6 +47,7 @@ const int32_t kMaxStrLen = 128; static bool g_ge_initialized = false; static std::mutex g_ge_release_mutex; // GEFinalize and ~Session use +static std::shared_ptr g_session_manager; namespace ge { void GetOpsProtoPath(std::string &opsproto_path) { @@ -148,6 +149,22 @@ Status GEInitializeImpl(const std::map &options) { return FAILED; } + ErrorManager::GetInstance().SetStage(error_message::kInitialize, error_message::kOther); + GELOGI("sessionManager initial."); + GE_TIMESTAMP_START(SessionManagerInitialize); + g_session_manager = MakeShared(); + if (g_session_manager == nullptr) { + GELOGE(GE_CLI_INIT_FAILED, "[Init][Create]SessionManager failed"); + return FAILED; + } + ret = g_session_manager->Initialize(options); + GE_TIMESTAMP_END(SessionManagerInitialize, "InnerInitialize::SessionManagerInitialize"); + if (ret != SUCCESS) { + GELOGE(ret, "[Init][SessionManager] GE session manager initial failed."); + REPORT_CALL_ERROR("E19999", "SessionManager initialize failed."); + return ret; + } + // 7.check return status, return if (!g_ge_initialized) { // Initialize success, first time calling initialize @@ -217,6 +234,12 @@ Status GEFinalize() { ret = middle_ret; } } + + GELOGI("SessionManager finalization."); + if (g_session_manager != nullptr) { + (void)g_session_manager->Finalize(); // always success. + } + middle_ret = TBEPluginManager::Instance().Finalize(); if (middle_ret != SUCCESS) { ret = middle_ret; @@ -272,7 +295,7 @@ Session::Session(const std::map &options) { GELOGT(TRACE_RUNNING, "Creating session"); uint64_t session_id = 0; - Status ret = instance_ptr->SessionManagerObj().CreateSession(options, session_id); + Status ret = g_session_manager->CreateSession(options, session_id); GELOGT(TRACE_RUNNING, "Session id is %lu", session_id); // check return status, return, update session id if success @@ -321,7 +344,7 @@ Session::Session(const std::map &options) { str_options[key] = val; } uint64_t session_id = 0; - Status ret = instance_ptr->SessionManagerObj().CreateSession(str_options, session_id); + Status ret = g_session_manager->CreateSession(str_options, session_id); GELOGT(TRACE_RUNNING, "Session id is %lu", session_id); // check return status, return, update session id if success @@ -359,7 +382,7 @@ Session::~Session() { GELOGT(TRACE_RUNNING, "Destroying session"); - ret = instance_ptr->SessionManagerObj().DestroySession(session_id); + ret = g_session_manager->DestroySession(session_id); } catch (google::protobuf::FatalException &e) { GELOGE(GE_CLI_SESS_DESTROY_FAILED, "[Destruct][Session]Failed " "because get fatalException."); @@ -397,7 +420,7 @@ Status Session::AddGraph(uint32_t graph_id, const Graph &graph, const std::mapSessionManagerObj().AddGraph(sessionId_, graph_id, graph, options); + Status ret = g_session_manager->AddGraph(sessionId_, graph_id, graph, options); if (ret != SUCCESS) { GELOGE(ret, "[Add][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", @@ -435,7 +458,7 @@ Status Session::AddGraph(uint32_t graph_id, const Graph &graph, std::string val = option.second.GetString(); str_options[key] = val; } - Status ret = instance_ptr->SessionManagerObj().AddGraph(sessionId_, graph_id, graph, str_options); + Status ret = g_session_manager->AddGraph(sessionId_, graph_id, graph, str_options); if (ret != SUCCESS) { GELOGE(ret, "[Add][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", @@ -472,7 +495,7 @@ Status Session::AddGraphWithCopy(uint32_t graph_id, const Graph &graph, str_options.insert({it->first.GetString(), it->second.GetString()}); } GELOGD("Adding graph to session"); - Status ret = instance_ptr->SessionManagerObj().AddGraphWithCopy(sessionId_, graph_id, graph, str_options); + Status ret = g_session_manager->AddGraphWithCopy(sessionId_, graph_id, graph, str_options); if (ret != SUCCESS) { GELOGE(ret, "[Add][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", @@ -502,7 +525,7 @@ Status Session::RemoveGraph(uint32_t graph_id) { } GELOGT(TRACE_RUNNING, "Removing Graph from session"); - Status ret = instance_ptr->SessionManagerObj().RemoveGraph(sessionId_, graph_id); + Status ret = g_session_manager->RemoveGraph(sessionId_, graph_id); // check return status, return if (ret != SUCCESS) { GELOGE(ret, @@ -583,7 +606,7 @@ Status Session::RunGraph(uint32_t graph_id, const std::vector &inputs, s return FAILED; } GELOGT(TRACE_RUNNING, "Running Graph"); - Status ret = instance_ptr->SessionManagerObj().RunGraph(sessionId_, graph_id, graph_inputs, outputs); + Status ret = g_session_manager->RunGraph(sessionId_, graph_id, graph_inputs, outputs); // check return status if (ret != SUCCESS) { GELOGE(ret, @@ -631,7 +654,7 @@ Status Session::RunGraphWithStreamAsync(uint32_t graph_id, void *stream, const s return FAILED; } GELOGT(TRACE_RUNNING, "Run Graph Run graph with stream asyn."); - Status ret = instance_ptr->SessionManagerObj().RunGraphWithStreamAsync(sessionId_, graph_id, stream, inputs, + Status ret = g_session_manager->RunGraphWithStreamAsync(sessionId_, graph_id, stream, inputs, outputs); if (ret != SUCCESS) { GELOGE(ret, "[Run][Graph]Run graph with stream asyn Failed," @@ -648,7 +671,7 @@ Status Session::RunGraphWithStreamAsync(uint32_t graph_id, void *stream, const s // Register Call Back Status Session::RegisterCallBackFunc(const std::string &key, const pCallBackFunc &callback) { ErrorManager::GetInstance().GenWorkStreamIdDefault(); - return ge::GELib::GetInstance()->SessionManagerObj().RegisterCallBackFunc(sessionId_, key, callback); + return g_session_manager->RegisterCallBackFunc(sessionId_, key, callback); } Status Session::RegisterCallBackFunc(const char *key, const session::pCallBackFunc &callback) { @@ -657,7 +680,7 @@ Status Session::RegisterCallBackFunc(const char *key, const session::pCallBackFu if (key != nullptr) { str_key = key; } - return ge::GELib::GetInstance()->SessionManagerObj().RegisterCallBackFunc(sessionId_, str_key, callback); + return g_session_manager->RegisterCallBackFunc(sessionId_, str_key, callback); } // Build Graph @@ -675,7 +698,7 @@ Status Session::BuildGraph(uint32_t graph_id, const std::vector return FAILED; } GELOGT(TRACE_RUNNING, "Building Graph"); - Status ret = instance_ptr->SessionManagerObj().BuildGraph(sessionId_, graph_id, inputs); + Status ret = g_session_manager->BuildGraph(sessionId_, graph_id, inputs); if (ret != SUCCESS) { GELOGE(ret, "[Build][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", @@ -702,7 +725,7 @@ Status Session::BuildGraph(uint32_t graph_id, const std::vector &inp return FAILED; } GELOGT(TRACE_RUNNING, "Building Graph"); - Status ret = instance_ptr->SessionManagerObj().BuildGraph(sessionId_, graph_id, inputs); + Status ret = g_session_manager->BuildGraph(sessionId_, graph_id, inputs); if (ret != SUCCESS) { GELOGE(ret, "[Build][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", @@ -733,7 +756,7 @@ Status Session::RunGraphAsync(uint32_t graph_id, const std::vector & GELOGW( "The callback function will not be checked. Please ensure that the implementation of the function is trusted."); - Status ret = ge::GELib::GetInstance()->SessionManagerObj().RunGraphAsync(sessionId_, graph_id, inputs, callback); + Status ret = g_session_manager->RunGraphAsync(sessionId_, graph_id, inputs, callback); if (ret != SUCCESS) { GELOGE(ret, "[Run][Graph]RunGraphAsync Failed, error code:%u, session_id:%lu, graph_id:%u.", ret, sessionId_, graph_id); @@ -757,7 +780,7 @@ Status Session::GetVariables(const std::vector &var_names, std::vec return FAILED; } GELOGT(TRACE_RUNNING, "Get Variables"); - Status ret = ge::GELib::GetInstance()->SessionManagerObj().GetVariables(sessionId_, var_names, var_values); + Status ret = g_session_manager->GetVariables(sessionId_, var_names, var_values); if (ret != SUCCESS) { GELOGE(ret, "[Get][Variables]Failed, error code:%u, session_id:%lu.", ret, sessionId_); return FAILED; @@ -787,7 +810,7 @@ Status Session::GetVariables(const std::vector &var_names, std::ve } str_var_names.emplace_back(var_name.GetString()); } - Status ret = ge::GELib::GetInstance()->SessionManagerObj().GetVariables(sessionId_, str_var_names, var_values); + Status ret = g_session_manager->GetVariables(sessionId_, str_var_names, var_values); if (ret != SUCCESS) { GELOGE(ret, "[Get][Variables]Failed, error code:%u, session_id:%lu.", ret, sessionId_); REPORT_CALL_ERROR("E19999", "Get variables failed, error code:%u, session_id:%lu.", @@ -798,6 +821,6 @@ Status Session::GetVariables(const std::vector &var_names, std::ve } bool Session::IsGraphNeedRebuild(uint32_t graph_id) { - return ge::GELib::GetInstance()->SessionManagerObj().IsGraphNeedRebuild(sessionId_, graph_id); + return g_session_manager->IsGraphNeedRebuild(sessionId_, graph_id); } } // namespace ge diff --git a/ge/graph/execute/model_executor.cc b/ge/graph/execute/model_executor.cc index 50e8a5a5..7c31614d 100644 --- a/ge/graph/execute/model_executor.cc +++ b/ge/graph/execute/model_executor.cc @@ -23,6 +23,7 @@ #include "graph/manager/graph_var_manager.h" #include "graph/utils/tensor_adapter.h" #include "graph/load/graph_loader.h" +#include "graph/load/model_manager/model_manager.h" #include "common/math/math_util.h" #include "common/formats/utils/formats_trans_utils.h" @@ -38,7 +39,7 @@ namespace ge { /// @param [in] options user config params /// @return Status result of function /// -Status ModelExecutor::Initialize(const map &options) { +Status ModelExecutor::Initialize(const map &options, uint64_t session_id) { graph_run_listener_ = MakeShared(sync_run_mutex_, condition_); if (graph_run_listener_ == nullptr) { REPORT_CALL_ERROR("E19999", "New GraphModelListener fail"); @@ -46,6 +47,14 @@ Status ModelExecutor::Initialize(const map &options) { return MEMALLOC_FAILED; } + auto model_manager = ModelManager::GetInstance(); + GE_CHECK_NOTNULL(model_manager); + GE_IF_BOOL_EXEC(model_manager->EnableExceptionDump(options) != SUCCESS, + REPORT_CALL_ERROR("E19999", "ModelManager EnableExceptionDump failed."); + GELOGE(FAILED, "[Enable][ExceptionDump] failed."); + return FAILED); + + session_id_ = session_id; train_graph_flag_ = ParseTrainGraphFlag(); thread_run_flag_.store(true); run_thread_ = std::thread(&ModelExecutor::RunThread, this); @@ -74,6 +83,7 @@ Status ModelExecutor::Finalize() { GELOGW("Graph executor FreeExecuteMemory failed, resources may not be released correctly."); } + ModelManager::GetInstance()->DestroyAicpuSession(session_id_); return SUCCESS; } diff --git a/ge/graph/execute/model_executor.h b/ge/graph/execute/model_executor.h index f8e717a1..f11441e9 100644 --- a/ge/graph/execute/model_executor.h +++ b/ge/graph/execute/model_executor.h @@ -30,7 +30,7 @@ class ModelExecutor : public Executor { /// @param [in] options user config params /// @return Status result of function /// - Status Initialize(const map &options); + Status Initialize(const map &options, uint64_t session_id); /// /// @ingroup ge @@ -120,6 +120,7 @@ class ModelExecutor : public Executor { bool init_flag_{false}; bool train_graph_flag_{false}; + uint64_t session_id_{0}; GraphExecutor graph_executor_; std::mutex mutex_; diff --git a/ge/init/gelib.cc b/ge/init/gelib.cc index b34871a9..f7296144 100644 --- a/ge/init/gelib.cc +++ b/ge/init/gelib.cc @@ -38,7 +38,6 @@ #include "graph/common/ge_call_wrapper.h" #include "graph/ge_context.h" #include "graph/ge_global_options.h" -#include "graph/load/model_manager/model_manager.h" #include "graph/manager/graph_mem_manager.h" #include "graph/manager/host_mem_manager.h" #include "graph/manager/graph_var_manager.h" @@ -160,18 +159,6 @@ Status GELib::InnerInitialize(const map &options) { return initOpsBuilderStatus; } - ErrorManager::GetInstance().SetStage(error_message::kInitialize, error_message::kOther); - GELOGI("sessionManager initial."); - GE_TIMESTAMP_START(SessionManagerInitialize); - Status initSmStatus = sessionManager_.Initialize(options); - GE_TIMESTAMP_END(SessionManagerInitialize, "InnerInitialize::SessionManagerInitialize"); - if (initSmStatus != SUCCESS) { - GELOGE(initSmStatus, "[Init][SessionManager] GE session manager initial failed."); - REPORT_CALL_ERROR("E19999", "SessionManager initialize failed."); - RollbackInit(); - return initSmStatus; - } - GELOGI("Start to initialize HostCpuEngine"); GE_TIMESTAMP_START(HostCpuEngineInitialize); Status initHostCpuEngineStatus = HostCpuEngine::GetInstance().Initialize(); @@ -209,12 +196,6 @@ Status GELib::SystemInitialize(const map &options) { // In train and infer, profiling is always needed. InitProfiling(this->options_); - auto model_manager = ModelManager::GetInstance(); - GE_CHECK_NOTNULL(model_manager); - GE_IF_BOOL_EXEC(model_manager->EnableExceptionDump(options) != SUCCESS, - REPORT_CALL_ERROR("E19999", "ModelManager EnableExceptionDump failed."); - GELOGE(FAILED, "[Enable][ExceptionDump] failed."); - return FAILED); // 1.`is_train_mode_` means case: train // 2.`(!is_train_mode_) && (options_.device_id != kDefaultDeviceIdForInfer)` means case: online infer // these two case with logical device id @@ -454,12 +435,6 @@ Status GELib::Finalize() { GELOGW("engineManager finalize failed"); final_state = mid_state; } - GELOGI("sessionManager finalization."); - mid_state = sessionManager_.Finalize(); - if (mid_state != SUCCESS) { - GELOGW("sessionManager finalize failed"); - final_state = mid_state; - } GELOGI("opsBuilderManager finalization."); mid_state = OpsKernelBuilderManager::Instance().Finalize(); @@ -539,9 +514,6 @@ void GELib::RollbackInit() { if (opsManager_.init_flag_) { (void)opsManager_.Finalize(); } - if (sessionManager_.init_flag_) { - (void)sessionManager_.Finalize(); - } MemManager::Instance().Finalize(); HostMemManager::Instance().Finalize(); VarManagerPool::Instance().Destory(); diff --git a/ge/init/gelib.h b/ge/init/gelib.h index eb367578..5e66be51 100644 --- a/ge/init/gelib.h +++ b/ge/init/gelib.h @@ -22,7 +22,13 @@ #include #include "engine_manager/dnnengine_manager.h" #include "opskernel_manager/ops_kernel_manager.h" -#include "session/session_manager.h" +#include "graph/tuning_utils.h" +#include "graph/operator_factory.h" +#include "graph/ge_local_context.h" +#include "graph/debug/ge_attr_define.h" +#include "graph/utils/graph_utils.h" +#include "graph/utils/anchor_utils.h" +#include "graph/manager/graph_var_manager.h" #include "framework/common/ge_inner_error_codes.h" #include "framework/common/ge_types.h" @@ -53,9 +59,6 @@ class GE_FUNC_VISIBILITY GELib { // get OpsKernelManager object OpsKernelManager &OpsKernelManagerObj() { return opsManager_; } - // get SessionManager object - SessionManager &SessionManagerObj() { return sessionManager_; } - // get Initial flag bool InitFlag() const { return init_flag_; } @@ -90,7 +93,6 @@ class GE_FUNC_VISIBILITY GELib { DNNEngineManager engineManager_; OpsKernelManager opsManager_; - SessionManager sessionManager_; std::mutex status_mutex_; bool init_flag_ = false; Options options_; diff --git a/ge/session/inner_session.cc b/ge/session/inner_session.cc index 236ec783..fcb9d233 100755 --- a/ge/session/inner_session.cc +++ b/ge/session/inner_session.cc @@ -30,7 +30,6 @@ #include "graph/ge_global_options.h" #include "graph/ge_local_context.h" #include "graph/common/local_context.h" -#include "graph/load/model_manager/model_manager.h" #include "graph/manager/graph_var_manager.h" #include "graph/manager/graph_mem_manager.h" #include "graph/utils/tensor_adapter.h" @@ -169,7 +168,6 @@ Status InnerSession::Finalize() { REPORT_CALL_ERROR("E19999", "GraphManager Finalize failed, InnerSession:%lu.", session_id_); } - ModelManager::GetInstance()->DestroyAicpuSession(session_id_); init_flag_ = false; // release var memory GELOGI("VarManager free var memory."); @@ -189,7 +187,7 @@ Status InnerSession::Finalize() { } Status InnerSession::InnerInitialize() { - Status ret = model_executor_.Initialize(options_); + Status ret = model_executor_.Initialize(options_, session_id_); if (ret != SUCCESS) { GELOGE(ret, "[Init][GraphExecutor] failed, InnerSession:%lu.", session_id_); REPORT_CALL_ERROR("E19999", "GraphExecutor initialize failed, InnerSession:%lu.", session_id_); diff --git a/ge/session/session_manager.cc b/ge/session/session_manager.cc index fdf37d06..486dfd58 100755 --- a/ge/session/session_manager.cc +++ b/ge/session/session_manager.cc @@ -20,7 +20,6 @@ #include "common/ge/ge_util.h" #include "framework/common/debug/ge_log.h" #include "graph/ge_context.h" -#include "graph/load/model_manager/model_manager.h" #include "graph/manager/util/rt_context_util.h" using std::map; @@ -105,10 +104,6 @@ Status SessionManager::DestroySession(SessionId session_id) { return GE_SESSION_NOT_EXIST; } - if (ModelManager::GetInstance() != nullptr) { - ModelManager::GetInstance()->DestroyAicpuSession(session_id); - } - // Unified destruct rt_context RtContextUtil::GetInstance().DestroyRtContexts(session_id); diff --git a/ge/session/session_manager.h b/ge/session/session_manager.h index 4c3429c2..4a0b9d66 100644 --- a/ge/session/session_manager.h +++ b/ge/session/session_manager.h @@ -31,9 +31,26 @@ namespace ge { using SessionPtr = std::shared_ptr; class SessionManager { - friend class GELib; - public: + SessionManager() = default; + + ~SessionManager() = default; + + /// + /// @ingroup ge_session + /// @brief initialize session manager + /// @param [in] options session manager config options + /// @return Status result of function + /// + Status Initialize(const std::map &options); + + /// + /// @ingroup ge_session + /// @brief finalize session manager + /// @return Status result of function + /// + Status Finalize(); + /// /// @ingroup ge_session /// @brief create session @@ -181,25 +198,6 @@ class SessionManager { bool IsGraphNeedRebuild(SessionId session_id, uint32_t graph_id); private: - SessionManager() = default; - - ~SessionManager() = default; - - /// - /// @ingroup ge_session - /// @brief initialize session manager - /// @param [in] options session manager config options - /// @return Status result of function - /// - Status Initialize(const std::map &options); - - /// - /// @ingroup ge_session - /// @brief finalize session manager - /// @return Status result of function - /// - Status Finalize(); - bool HasSession(SessionId session_id); Status GetNextSessionId(SessionId &next_session_id); From 91d15cdc13a7acf67151afa990b6bdefaec84221 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Wed, 7 Jul 2021 18:43:12 +0800 Subject: [PATCH 170/226] Fix UT --- .../graph/execute/model_executor_unittest.cc | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/ut/ge/graph/execute/model_executor_unittest.cc b/tests/ut/ge/graph/execute/model_executor_unittest.cc index 33643993..d4e0e3a4 100644 --- a/tests/ut/ge/graph/execute/model_executor_unittest.cc +++ b/tests/ut/ge/graph/execute/model_executor_unittest.cc @@ -63,7 +63,7 @@ static NodePtr CreateNode(ComputeGraph &graph, const string &name, const string TEST_F(UtestModelExecutorTest, test_load_graph_sync) { ModelExecutor model_executor; - EXPECT_EQ(model_executor.Initialize({}), SUCCESS); + EXPECT_EQ(model_executor.Initialize({}, 0), SUCCESS); auto compute_graph = MakeShared("test_graph"); GeRootModelPtr ge_root_model = MakeShared(compute_graph); @@ -86,7 +86,7 @@ TEST_F(UtestModelExecutorTest, test_load_graph_sync) { TEST_F(UtestModelExecutorTest, test_load_graph_async) { ModelExecutor model_executor; - EXPECT_EQ(model_executor.Initialize({}), SUCCESS); + EXPECT_EQ(model_executor.Initialize({}, 0), SUCCESS); Graph graph("test_graph"); auto compute_graph = MakeShared("test_graph"); @@ -111,7 +111,7 @@ TEST_F(UtestModelExecutorTest, test_load_graph_async) { TEST_F(UtestModelExecutorTest, test_load_graph_failed) { ModelExecutor model_executor; - EXPECT_EQ(model_executor.Initialize({}), SUCCESS); + EXPECT_EQ(model_executor.Initialize({}, 0), SUCCESS); Graph graph("test_graph"); auto compute_graph = MakeShared("test_graph"); @@ -144,7 +144,7 @@ TEST_F(UtestModelExecutorTest, test_check_and_release_memory) { } ModelExecutor model_executor; - EXPECT_EQ(model_executor.Initialize({}), SUCCESS); + EXPECT_EQ(model_executor.Initialize({}, 0), SUCCESS); GeModelPtr ge_model = make_shared(); int64_t memory_size = 25 * 1024UL * 1024UL * 1024UL; @@ -171,7 +171,7 @@ TEST_F(UtestModelExecutorTest, test_check_and_release_memory) { TEST_F(UtestModelExecutorTest, parse_inputs_dims_data) { ModelExecutor model_executor; - EXPECT_EQ(model_executor.Initialize({}), SUCCESS); + EXPECT_EQ(model_executor.Initialize({}, 0), SUCCESS); OmeContext context; SetLocalOmeContext(context); @@ -195,7 +195,7 @@ TEST_F(UtestModelExecutorTest, parse_inputs_dims_data) { TEST_F(UtestModelExecutorTest, parse_inputs_dims_getnext) { ModelExecutor model_executor; - EXPECT_EQ(model_executor.Initialize({}), SUCCESS); + EXPECT_EQ(model_executor.Initialize({}, 0), SUCCESS); OmeContext context; SetLocalOmeContext(context); @@ -223,7 +223,7 @@ TEST_F(UtestModelExecutorTest, parse_inputs_dims_getnext) { TEST_F(UtestModelExecutorTest, test_run_thread) { ModelExecutor model_executor; - EXPECT_EQ(model_executor.Initialize({}), SUCCESS); + EXPECT_EQ(model_executor.Initialize({}, 0), SUCCESS); GraphId graph_id = 1; uint64_t session_id = 0; @@ -281,7 +281,7 @@ static void test_run_graph(ModelExecutor &model_executor) { TEST_F(UtestModelExecutorTest, test_run_graph_train) { GetThreadLocalContext().SetGlobalOption({{OPTION_GRAPH_RUN_MODE, "1"}}); ModelExecutor model_executor; - EXPECT_EQ(model_executor.Initialize({}), SUCCESS); + EXPECT_EQ(model_executor.Initialize({}, 0), SUCCESS); test_run_graph(model_executor); EXPECT_EQ(model_executor.Finalize(), SUCCESS); } @@ -291,14 +291,14 @@ TEST_F(UtestModelExecutorTest, test_run_graph_infer) { GetThreadLocalContext().SetSessionOption({}); GetThreadLocalContext().SetGraphOption({}); ModelExecutor model_executor; - EXPECT_EQ(model_executor.Initialize({}), SUCCESS); + EXPECT_EQ(model_executor.Initialize({}, 0), SUCCESS); test_run_graph(model_executor); EXPECT_EQ(model_executor.Finalize(), SUCCESS); } TEST_F(UtestModelExecutorTest, test_run_graph_with_stream) { ModelExecutor model_executor; - EXPECT_EQ(model_executor.Initialize({}), SUCCESS); + EXPECT_EQ(model_executor.Initialize({}, 0), SUCCESS); GraphId graph_id = 1; auto compute_graph = MakeShared("test_graph"); From cd27cbf7e2573a16b653be83381c4ab85fd1f6eb Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Wed, 7 Jul 2021 21:12:36 +0800 Subject: [PATCH 171/226] Add UT for Session --- ge/client/ge_api.cc | 275 +++++++++---------------- ge/graph/execute/model_executor.cc | 4 +- ge/graph/manager/graph_manager.cc | 4 +- tests/ut/ge/session/ge_api_unittest.cc | 112 +++++++++- 4 files changed, 211 insertions(+), 184 deletions(-) diff --git a/ge/client/ge_api.cc b/ge/client/ge_api.cc index e62965c9..3cf7c3c4 100644 --- a/ge/client/ge_api.cc +++ b/ge/client/ge_api.cc @@ -71,8 +71,7 @@ Status CheckOptionsValid(const std::map &options) { auto job_id_iter = options.find(OPTION_EXEC_JOB_ID); if (job_id_iter != options.end()) { if (job_id_iter->second.length() > kMaxStrLen) { - GELOGE(PARAM_INVALID, "[Check][JobId]Failed," - "the job_id [%s] string length: %zu > max string length: %d", + GELOGE(PARAM_INVALID, "[Check][JobId]Failed, the job_id [%s] string length: %zu > max string length: %d", job_id_iter->second.c_str(), job_id_iter->second.length(), kMaxStrLen); REPORT_INPUT_ERROR("E10051", std::vector({"id", "length"}), std::vector({job_id_iter->second, @@ -96,8 +95,7 @@ Status GEInitializeImpl(const std::map &options) { std::string path_base = ge::GELib::GetPath(); auto ret = ErrorManager::GetInstance().Init(path_base); if (ret != SUCCESS) { - GELOGE(GE_CLI_INIT_FAILED, - "[Init][PathBase]Init failed when pass param path_base:%s", path_base.c_str()); + GELOGE(GE_CLI_INIT_FAILED, "[Init][PathBase]Init failed when pass param path_base:%s", path_base.c_str()); REPORT_CALL_ERROR("E19999", "Init failed when pass param path_base:%s", path_base.c_str()); return ret; } @@ -118,11 +116,9 @@ Status GEInitializeImpl(const std::map &options) { bool is_proto_init = manager->Initialize(option_tmp); GE_TIMESTAMP_END(GEInitialize, "GEInitialize::ManagerInitialize"); if (!is_proto_init) { - GELOGE(GE_CLI_INIT_FAILED, - "[Init][OpsProtoPath]Loading OpsProto lib plugin failed, OpsProtoPath:%s invalid.", + GELOGE(GE_CLI_INIT_FAILED, "[Init][OpsProtoPath]Loading OpsProto lib plugin failed, OpsProtoPath:%s invalid.", opsproto_path.c_str()); - REPORT_CALL_ERROR("E19999", "Loading OpsProto lib plugin failed, OpsProtoPath:%s invalid", - opsproto_path.c_str()); + REPORT_CALL_ERROR("E19999", "Loading OpsProto lib plugin failed, OpsProtoPath:%s invalid", opsproto_path.c_str()); return FAILED; } @@ -190,8 +186,7 @@ Status GEInitialize(const std::map &options) { for (auto &option : options) { if (option.first.GetString() == nullptr || option.second.GetString() == nullptr) { GELOGE(FAILED, "[Check][Param]Options invalid, first or second option is nullptr."); - REPORT_INNER_ERROR("E19999", "Check parameter's options invalid," - "the first or second option is nullptr."); + REPORT_INNER_ERROR("E19999", "Check parameter's options invalid, the first or second option is nullptr."); return FAILED; } std::string key = option.first.GetString(); @@ -274,22 +269,12 @@ std::string GEGetWarningMsg() { Session::Session(const std::map &options) { ErrorManager::GetInstance().SetStage(error_message::kInitialize, error_message::kOther); GELOGT(TRACE_INIT, "Start to construct session."); - ErrorManager::GetInstance().GenWorkStreamIdDefault(); // check init status sessionId_ = 0; if (!g_ge_initialized) { - GELOGE(GE_CLI_GE_NOT_INITIALIZED, - "[Construct][Session]Failed because lack GEInitialize call before."); - REPORT_INNER_ERROR("E19999", - "Creating session failed because lack GEInitialize call before."); - return; - } - // call Initialize - std::shared_ptr instance_ptr = ge::GELib::GetInstance(); - if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { - GELOGE(GE_CLI_GE_NOT_INITIALIZED, - "[Construct][Session]Failed, GELib instance is nullptr or it is not InitFlag"); + GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); + REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); return; } @@ -311,32 +296,21 @@ Session::Session(const std::map &options) { Session::Session(const std::map &options) { ErrorManager::GetInstance().SetStage(error_message::kInitialize, error_message::kOther); GELOGT(TRACE_INIT, "Session Constructor start"); - ErrorManager::GetInstance().GenWorkStreamIdDefault(); // check init status sessionId_ = 0; if (!g_ge_initialized) { - GELOGE(GE_CLI_GE_NOT_INITIALIZED, - "[Construct][Session]Failed because lack GEInitialize call before."); - REPORT_INNER_ERROR("E19999", - "Creating session failed because lack GEInitialize call before."); + GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); + REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); return; } // call Initialize - std::shared_ptr instance_ptr = ge::GELib::GetInstance(); - if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { - GELOGE(GE_CLI_GE_NOT_INITIALIZED, - "[Construct][Session]Failed, the GELib instance is nullptr or is not InitFlag"); - return; - } - GELOGT(TRACE_RUNNING, "Creating session"); std::map str_options; for (auto &option : options) { if (option.first.GetString() == nullptr || option.second.GetString() == nullptr) { GELOGE(FAILED, "[Construct][Session]Failed, the first or second option is nullptr."); - REPORT_INNER_ERROR("E19999", "Creating session's options invalid," - "the first or second option is nullptr."); + REPORT_INNER_ERROR("E19999", "Creating session's options invalid, the first or second option is nullptr."); return; } std::string key = option.first.GetString(); @@ -373,19 +347,12 @@ Session::~Session() { try { uint64_t session_id = sessionId_; // call DestroySession - std::shared_ptr instance_ptr = ge::GELib::GetInstance(); - if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { - GELOGW("GE is not yet initialized or is finalized."); - return; - } GELOGT(TRACE_RUNNING, "Session id is %lu", session_id); - GELOGT(TRACE_RUNNING, "Destroying session"); ret = g_session_manager->DestroySession(session_id); } catch (google::protobuf::FatalException &e) { - GELOGE(GE_CLI_SESS_DESTROY_FAILED, "[Destruct][Session]Failed " - "because get fatalException."); + GELOGE(GE_CLI_SESS_DESTROY_FAILED, "[Destruct][Session]Failed because get fatalException."); REPORT_CALL_ERROR("E19999", "Destruct session failed, get fatal exception"); } @@ -400,9 +367,7 @@ Session::~Session() { // Add Graph Status Session::AddGraph(uint32_t graph_id, const Graph &graph) { - ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); std::map options; - ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); return AddGraph(graph_id, graph, options); } @@ -411,20 +376,16 @@ Status Session::AddGraph(uint32_t graph_id, const Graph &graph, const std::map instance_ptr = ge::GELib::GetInstance(); - if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { - GELOGE(GE_CLI_GE_NOT_INITIALIZED, - "[Add][Graph]Failed because GELib instance is nullptr or it is not InitFlag."); - REPORT_INNER_ERROR("E19999", - "AddGraph Failed, GELib instance is nullptr or it is not InitFlag."); + if (!g_ge_initialized) { + GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); + REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); return FAILED; } + GELOGD("Adding graph to session"); Status ret = g_session_manager->AddGraph(sessionId_, graph_id, graph, options); if (ret != SUCCESS) { - GELOGE(ret, - "[Add][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", - ret, sessionId_, graph_id); + GELOGE(ret, "[Add][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", ret, sessionId_, graph_id); return FAILED; } GELOGD("AddGraph finished in Session."); @@ -432,26 +393,22 @@ Status Session::AddGraph(uint32_t graph_id, const Graph &graph, const std::map &options) { +Status Session::AddGraph(uint32_t graph_id, const Graph &graph, const std::map &options) { ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); GELOGT(TRACE_INIT, "Start to add graph in Session. graph_id: %u, session_id: %lu.", graph_id, sessionId_); ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); - std::shared_ptr instance_ptr = ge::GELib::GetInstance(); - if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { - GELOGE(GE_CLI_GE_NOT_INITIALIZED, - "[Add][Graph]Failed, the GELib instance is nullptr or is not InitFlag."); - REPORT_INNER_ERROR("E19999", - "AddGraph Failed, GELib instance is nullptr or it is not InitFlag."); + if (!g_ge_initialized) { + GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); + REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); return FAILED; } + GELOGD("Adding graph to session"); std::map str_options; for (auto &option : options) { if (option.first.GetString() == nullptr || option.second.GetString() == nullptr) { GELOGE(FAILED, "[Add][Graph]Failed, the first or second option is nullptr."); - REPORT_INNER_ERROR("E19999", - "Add Graph Failed, the first or second option is nullptr."); + REPORT_INNER_ERROR("E19999", "Add Graph Failed, the first or second option is nullptr."); return FAILED; } std::string key = option.first.GetString(); @@ -460,9 +417,7 @@ Status Session::AddGraph(uint32_t graph_id, const Graph &graph, } Status ret = g_session_manager->AddGraph(sessionId_, graph_id, graph, str_options); if (ret != SUCCESS) { - GELOGE(ret, - "[Add][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", - ret, sessionId_, graph_id); + GELOGE(ret, "[Add][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", ret, sessionId_, graph_id); return FAILED; } GELOGD("AddGraph finished in Session."); @@ -470,8 +425,6 @@ Status Session::AddGraph(uint32_t graph_id, const Graph &graph, } Status Session::AddGraphWithCopy(uint32_t graph_id, const Graph &graph) { - ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); - ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); std::map options; return AddGraphWithCopy(graph_id, graph, options); } @@ -482,14 +435,12 @@ Status Session::AddGraphWithCopy(uint32_t graph_id, const Graph &graph, ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); GELOGT(TRACE_INIT, "Start to add graph in Session. graph_id: %u, session_id: %lu.", graph_id, sessionId_); ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); - std::shared_ptr instance_ptr = ge::GELib::GetInstance(); - if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { - GELOGE(GE_CLI_GE_NOT_INITIALIZED, - "[Add][Graph]Failed, the GELib instance is nullptr or is not InitFlag."); - REPORT_INNER_ERROR("E19999", - "AddGraph Failed, GELib instance is nullptr or is not InitFlag."); + if (!g_ge_initialized) { + GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); + REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); return FAILED; } + std::map str_options; for (auto it = options.begin(); it != options.end(); ++it) { str_options.insert({it->first.GetString(), it->second.GetString()}); @@ -497,9 +448,7 @@ Status Session::AddGraphWithCopy(uint32_t graph_id, const Graph &graph, GELOGD("Adding graph to session"); Status ret = g_session_manager->AddGraphWithCopy(sessionId_, graph_id, graph, str_options); if (ret != SUCCESS) { - GELOGE(ret, - "[Add][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", - ret, sessionId_, graph_id); + GELOGE(ret, "[Add][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", ret, sessionId_, graph_id); return FAILED; } GELOGD("AddGraph finished in Session."); @@ -510,17 +459,11 @@ Status Session::AddGraphWithCopy(uint32_t graph_id, const Graph &graph, Status Session::RemoveGraph(uint32_t graph_id) { ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); GELOGT(TRACE_INIT, "Session RemoveGraph start"); - ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); // call RemoveGraph - std::shared_ptr instance_ptr = ge::GELib::GetInstance(); - if (!instance_ptr || !instance_ptr->InitFlag()) { - GELOGE(GE_CLI_GE_NOT_INITIALIZED, - "[Remove][Graph]Failed, GELib instance is nullptr or is not InitFlag, " - "session_id %lu, graph_id %u", sessionId_, graph_id); - REPORT_INNER_ERROR("E19999", - "RemoveGraph Failed, GELib instance is nullptr or is not InitFlag, " - "session_id %lu, graph_id %u", sessionId_, graph_id); + if (!g_ge_initialized) { + GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); + REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); return FAILED; } @@ -528,11 +471,9 @@ Status Session::RemoveGraph(uint32_t graph_id) { Status ret = g_session_manager->RemoveGraph(sessionId_, graph_id); // check return status, return if (ret != SUCCESS) { - GELOGE(ret, - "[Remove][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", - ret, sessionId_, graph_id); - REPORT_CALL_ERROR("E19999", "Remove graph failed, error code:%u, " - "session_id:%lu, graph_id:%u", ret, sessionId_, graph_id); + GELOGE(ret, "[Remove][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", ret, sessionId_, graph_id); + REPORT_CALL_ERROR("E19999", "Remove graph failed, error code:%u, session_id:%lu, graph_id:%u", + ret, sessionId_, graph_id); return FAILED; } GELOGT(TRACE_STOP, "Session RemoveGraph finished"); @@ -591,29 +532,21 @@ void PrintOutputResult(std::vector &outputs) { Status Session::RunGraph(uint32_t graph_id, const std::vector &inputs, std::vector &outputs) { ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); GELOGT(TRACE_INIT, "Session RunGraph start"); - ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); - std::vector graph_inputs = inputs; - // call RunGraph - std::shared_ptr instance_ptr = ge::GELib::GetInstance(); - if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { - GELOGE(GE_CLI_GE_NOT_INITIALIZED, - "[Run][Graph]Failed, GELib instance is nullptr or is not InitFlag, " - "session_id %lu, graph_id %u", sessionId_, graph_id); - REPORT_INNER_ERROR("E19999", - "RunGraph Failed, GELib instance is nullptr or is not InitFlag, " - "session_id %lu, graph_id %u", sessionId_, graph_id); + if (!g_ge_initialized) { + GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); + REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); return FAILED; } + + // call RunGraph GELOGT(TRACE_RUNNING, "Running Graph"); - Status ret = g_session_manager->RunGraph(sessionId_, graph_id, graph_inputs, outputs); + Status ret = g_session_manager->RunGraph(sessionId_, graph_id, inputs, outputs); // check return status if (ret != SUCCESS) { - GELOGE(ret, - "[Run][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", - ret, sessionId_, graph_id); - REPORT_CALL_ERROR("E19999", "Remove graph failed, error code:%u, " - "session_id:%lu, graph_id:%u", ret, sessionId_, graph_id); + GELOGE(ret, "[Run][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", ret, sessionId_, graph_id); + REPORT_CALL_ERROR("E19999", "Remove graph failed, error code:%u, session_id:%lu, graph_id:%u", + ret, sessionId_, graph_id); return FAILED; } @@ -632,30 +565,15 @@ Status Session::RunGraphWithStreamAsync(uint32_t graph_id, void *stream, const s std::vector &outputs) { ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); GELOGT(TRACE_INIT, "Start to run graph with stream async."); - ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); - std::shared_ptr instance_ptr = ge::GELib::GetInstance(); - if (instance_ptr == nullptr) { - GELOGE(GE_CLI_GE_NOT_INITIALIZED, - "[Run][Graph]Run graph with stream async failed, the GELib instance is nullptr," - "session id = %lu, graph id = %u, stream = %p.", sessionId_, graph_id, stream); - REPORT_INNER_ERROR("E19999", - "Run graph with stream async failed, the GELib instance is nullptr" - "session id = %lu, graph id = %u, stream = %p.", sessionId_, graph_id, stream); - return FAILED; - } - if (!instance_ptr->InitFlag()) { - GELOGE(GE_CLI_GE_NOT_INITIALIZED, - "[Run][Graph]Run graph with stream asyn failed, the GELib instance is not init," - "session id = %lu, graph id = %u, stream = %p.", sessionId_, graph_id, stream); - REPORT_INNER_ERROR("E19999", - "Run graph with stream asyn failed, the GELib instance is not init," - "session id = %lu, graph id = %u, stream = %p.", sessionId_, graph_id, stream); + if (!g_ge_initialized) { + GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); + REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); return FAILED; } + GELOGT(TRACE_RUNNING, "Run Graph Run graph with stream asyn."); - Status ret = g_session_manager->RunGraphWithStreamAsync(sessionId_, graph_id, stream, inputs, - outputs); + Status ret = g_session_manager->RunGraphWithStreamAsync(sessionId_, graph_id, stream, inputs, outputs); if (ret != SUCCESS) { GELOGE(ret, "[Run][Graph]Run graph with stream asyn Failed," "error code = %u, session id = %lu, graph id = %u, stream = %p.", ret, sessionId_, graph_id, stream); @@ -671,11 +589,23 @@ Status Session::RunGraphWithStreamAsync(uint32_t graph_id, void *stream, const s // Register Call Back Status Session::RegisterCallBackFunc(const std::string &key, const pCallBackFunc &callback) { ErrorManager::GetInstance().GenWorkStreamIdDefault(); + if (!g_ge_initialized) { + GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); + REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); + return FAILED; + } + return g_session_manager->RegisterCallBackFunc(sessionId_, key, callback); } Status Session::RegisterCallBackFunc(const char *key, const session::pCallBackFunc &callback) { ErrorManager::GetInstance().GenWorkStreamIdDefault(); + if (!g_ge_initialized) { + GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); + REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); + return FAILED; + } + std::string str_key; if (key != nullptr) { str_key = key; @@ -687,24 +617,18 @@ Status Session::RegisterCallBackFunc(const char *key, const session::pCallBackFu Status Session::BuildGraph(uint32_t graph_id, const std::vector &inputs) { ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); - std::shared_ptr instance_ptr = ge::GELib::GetInstance(); - if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { - GELOGE(GE_CLI_GE_NOT_INITIALIZED, - "[Build][Graph]Failed, the GELib instance is nullptr or is not InitFlag, " - "session_id %lu, graph_id %u", sessionId_, graph_id); - REPORT_INNER_ERROR("E19999", - "Build graph failed, the GELib instance is nullptr or is not InitFlag, " - "session_id %lu, graph_id %u", sessionId_, graph_id); + if (!g_ge_initialized) { + GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); + REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); return FAILED; } + GELOGT(TRACE_RUNNING, "Building Graph"); Status ret = g_session_manager->BuildGraph(sessionId_, graph_id, inputs); if (ret != SUCCESS) { - GELOGE(ret, - "[Build][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", - ret, sessionId_, graph_id); - REPORT_CALL_ERROR("E19999", "Build graph failed , error code:%u, " - "session_id:%lu, graph_id:%u", ret, sessionId_, graph_id); + GELOGE(ret, "[Build][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", ret, sessionId_, graph_id); + REPORT_CALL_ERROR("E19999", "Build graph failed , error code:%u, session_id:%lu, graph_id:%u", + ret, sessionId_, graph_id); return FAILED; } return SUCCESS; @@ -714,24 +638,18 @@ Status Session::BuildGraph(uint32_t graph_id, const std::vector Status Session::BuildGraph(uint32_t graph_id, const std::vector &inputs) { ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); - std::shared_ptr instance_ptr = ge::GELib::GetInstance(); - if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { - GELOGE(GE_CLI_GE_NOT_INITIALIZED, - "[Build][Graph]Failed, the GELib instance is nullptr or is not InitFlag, " - "session_id %lu, graph_id %u", sessionId_, graph_id); - REPORT_INNER_ERROR("E19999", - "Build graph failed, the GELib instance is nullptr or is not InitFlag, " - "session_id %lu, graph_id %u", sessionId_, graph_id); + if (!g_ge_initialized) { + GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); + REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); return FAILED; } + GELOGT(TRACE_RUNNING, "Building Graph"); Status ret = g_session_manager->BuildGraph(sessionId_, graph_id, inputs); if (ret != SUCCESS) { - GELOGE(ret, - "[Build][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", - ret, sessionId_, graph_id); - REPORT_CALL_ERROR("E19999", "Build graph failed , error code:%u, " - "session_id:%lu, graph_id:%u", ret, sessionId_, graph_id); + GELOGE(ret, "[Build][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", ret, sessionId_, graph_id); + REPORT_CALL_ERROR("E19999", "Build graph failed , error code:%u, session_id:%lu, graph_id:%u", + ret, sessionId_, graph_id); return FAILED; } return SUCCESS; @@ -742,16 +660,12 @@ Status Session::RunGraphAsync(uint32_t graph_id, const std::vector & RunAsyncCallback callback) { ErrorManager::GetInstance().SetStage(error_message::kModelExecute, error_message::kModelExecute); ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); - std::shared_ptr instance_ptr = ge::GELib::GetInstance(); - if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { - GELOGE(GE_CLI_GE_NOT_INITIALIZED, - "[Run][Graph]RunGraphAsyncFailed, the GELib instance is nullptr or is not InitFlag, " - "session_id %lu, graph_id %u", sessionId_, graph_id); - REPORT_INNER_ERROR("E19999", - "RunGraphAsync Failed, the GELib instance is nullptr or is not InitFlag, " - "session_id %lu, graph_id %u", sessionId_, graph_id); + if (!g_ge_initialized) { + GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); + REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); return FAILED; } + GELOGT(TRACE_RUNNING, "Run Graph Asynchronously"); GELOGW( "The callback function will not be checked. Please ensure that the implementation of the function is trusted."); @@ -760,8 +674,8 @@ Status Session::RunGraphAsync(uint32_t graph_id, const std::vector & if (ret != SUCCESS) { GELOGE(ret, "[Run][Graph]RunGraphAsync Failed, error code:%u, session_id:%lu, graph_id:%u.", ret, sessionId_, graph_id); - REPORT_CALL_ERROR("E19999", "RunGraphAsync Failed, error code:%u, session_id:%lu, " - "graph_id:%u", ret, sessionId_, graph_id); + REPORT_CALL_ERROR("E19999", "RunGraphAsync Failed, error code:%u, session_id:%lu, graph_id:%u", + ret, sessionId_, graph_id); return FAILED; } return SUCCESS; @@ -771,14 +685,12 @@ Status Session::RunGraphAsync(uint32_t graph_id, const std::vector & Status Session::GetVariables(const std::vector &var_names, std::vector &var_values) { ErrorManager::GetInstance().SetStage(error_message::kModelExecute, error_message::kModelExecute); ErrorManager::GetInstance().GenWorkStreamIdDefault(); - auto instance_ptr = ge::GELib::GetInstance(); - if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { - GELOGE(GE_CLI_GE_NOT_INITIALIZED, - "[Get][Variables]Failed, the GELib instance is nullptr or is not InitFlag."); - REPORT_INNER_ERROR("E19999", - "GetVariables failed, the GELib instance is nullptr or is not InitFlag."); + if (!g_ge_initialized) { + GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); + REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); return FAILED; } + GELOGT(TRACE_RUNNING, "Get Variables"); Status ret = g_session_manager->GetVariables(sessionId_, var_names, var_values); if (ret != SUCCESS) { @@ -792,14 +704,12 @@ Status Session::GetVariables(const std::vector &var_names, std::vec Status Session::GetVariables(const std::vector &var_names, std::vector &var_values) { ErrorManager::GetInstance().SetStage(error_message::kModelExecute, error_message::kModelExecute); ErrorManager::GetInstance().GenWorkStreamIdDefault(); - auto instance_ptr = ge::GELib::GetInstance(); - if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { - GELOGE(GE_CLI_GE_NOT_INITIALIZED, - "[Get][Variables]Failed, the GELib instance is nullptr or is not InitFlag."); - REPORT_INNER_ERROR("E19999", - "GetVariables failed, the GELib instance is nullptr or is not InitFlag."); + if (!g_ge_initialized) { + GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); + REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); return FAILED; } + GELOGT(TRACE_RUNNING, "Get Variables"); std::vector str_var_names; for (auto &var_name : var_names) { @@ -813,14 +723,19 @@ Status Session::GetVariables(const std::vector &var_names, std::ve Status ret = g_session_manager->GetVariables(sessionId_, str_var_names, var_values); if (ret != SUCCESS) { GELOGE(ret, "[Get][Variables]Failed, error code:%u, session_id:%lu.", ret, sessionId_); - REPORT_CALL_ERROR("E19999", "Get variables failed, error code:%u, session_id:%lu.", - ret, sessionId_); + REPORT_CALL_ERROR("E19999", "Get variables failed, error code:%u, session_id:%lu.", ret, sessionId_); return FAILED; } return SUCCESS; } bool Session::IsGraphNeedRebuild(uint32_t graph_id) { + if (!g_ge_initialized) { + GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Construct][Session]Failed because lack GEInitialize call before."); + REPORT_INNER_ERROR("E19999", "Creating session failed because lack GEInitialize call before."); + return false; + } + return g_session_manager->IsGraphNeedRebuild(sessionId_, graph_id); } } // namespace ge diff --git a/ge/graph/execute/model_executor.cc b/ge/graph/execute/model_executor.cc index 7c31614d..36d21e1c 100644 --- a/ge/graph/execute/model_executor.cc +++ b/ge/graph/execute/model_executor.cc @@ -178,7 +178,9 @@ void ModelExecutor::ReturnError(RunAsyncCallback callback, Status ret, const str StopQueue(); GELOGE(ret, "%s.", log.c_str()); std::vector outputs; - callback(ret, outputs); + if (callback != nullptr) { + callback(ret, outputs); + } } void ModelExecutor::UpdateLocalOmeContext(const GraphNodePtr &graph_node) { diff --git a/ge/graph/manager/graph_manager.cc b/ge/graph/manager/graph_manager.cc index 96dc59c5..a3605ec2 100755 --- a/ge/graph/manager/graph_manager.cc +++ b/ge/graph/manager/graph_manager.cc @@ -2939,7 +2939,9 @@ void GraphManager::ReturnError(RunAsyncCallback callback, Status ret, const stri StopQueue(); GELOGE(ret, "%s.", log.c_str()); std::vector outputs; - callback(ret, outputs); + if (callback != nullptr) { + callback(ret, outputs); + } } bool GraphManager::IsGraphNeedRebuild(uint32_t graph_id) { diff --git a/tests/ut/ge/session/ge_api_unittest.cc b/tests/ut/ge/session/ge_api_unittest.cc index 9a7058f3..93e6a52c 100644 --- a/tests/ut/ge/session/ge_api_unittest.cc +++ b/tests/ut/ge/session/ge_api_unittest.cc @@ -26,8 +26,6 @@ #include "proto/ge_ir.pb.h" #include "inc/external/ge/ge_api.h" #include "session/session_manager.h" -#undef protected -#undef private using namespace std; @@ -71,4 +69,114 @@ TEST_F(UtestGeApi, ge_initialize_modify_mixlist) { auto ret = GEInitialize(options); ASSERT_NE(ret, SUCCESS); } + +TEST_F(UtestGeApi, ge_not_initialized) { + EXPECT_EQ(GEFinalize(), SUCCESS); + + std::map options; + std::map ascend_options; + Session session(options); + + GraphId graph_id = 1; + const auto compute_graph = MakeShared("test_graph"); + Graph graph = GraphUtils::CreateGraphFromComputeGraph(compute_graph); + + EXPECT_EQ(session.AddGraph(graph_id, graph), FAILED); + EXPECT_EQ(session.AddGraph(graph_id, graph, ascend_options), FAILED); + + EXPECT_EQ(session.AddGraphWithCopy(graph_id, graph), FAILED); + EXPECT_EQ(session.AddGraphWithCopy(graph_id, graph, ascend_options), FAILED); + + vector inputs; + vector tensors; + EXPECT_EQ(session.BuildGraph(graph_id, inputs), FAILED); + EXPECT_EQ(session.BuildGraph(graph_id, tensors), FAILED); + + vector outputs; + EXPECT_EQ(session.RunGraph(graph_id, inputs, outputs), FAILED); + EXPECT_EQ(session.RunGraphWithStreamAsync(graph_id, nullptr, inputs, outputs), FAILED); + EXPECT_EQ(session.RunGraphAsync(graph_id, inputs, nullptr), FAILED); + + vector var_inputs; + EXPECT_EQ(session.GetVariables(var_inputs, outputs), FAILED); + + vector var_names; + EXPECT_EQ(session.GetVariables(var_names, outputs), FAILED); + + std::string key; + pCallBackFunc ge_callback; + EXPECT_EQ(session.RegisterCallBackFunc(key, ge_callback), FAILED); + + session::pCallBackFunc session_callback; + EXPECT_EQ(session.RegisterCallBackFunc(key.c_str(), session_callback), FAILED); + + EXPECT_FALSE(session.IsGraphNeedRebuild(graph_id)); + + EXPECT_EQ(session.RemoveGraph(graph_id), FAILED); + EXPECT_EQ(GEFinalize(), SUCCESS); +} + +TEST_F(UtestGeApi, ge_session_ascend_string) { + std::map options; + EXPECT_EQ(GEInitialize(options), SUCCESS); + + Session session(options); + + GraphId graph_id = 1; + const auto compute_graph = MakeShared("test_graph"); + EXPECT_EQ(session.AddGraph(graph_id, GraphUtils::CreateGraphFromComputeGraph(compute_graph)), SUCCESS); + + EXPECT_TRUE(session.IsGraphNeedRebuild(graph_id)); + + EXPECT_EQ(session.RemoveGraph(graph_id), SUCCESS); + + EXPECT_EQ(GEFinalize(), SUCCESS); +} + +TEST_F(UtestGeApi, ge_session_test) { + std::map options; + EXPECT_EQ(GEInitialize(options), SUCCESS); + + std::map ascend_options; + Session session(options); + + GraphId graph_id = 1; + const auto compute_graph = MakeShared("test_graph"); + Graph graph = GraphUtils::CreateGraphFromComputeGraph(compute_graph); + + EXPECT_EQ(session.AddGraph(graph_id, graph), SUCCESS); + EXPECT_EQ(session.AddGraph(graph_id, graph, ascend_options), SUCCESS); + + EXPECT_EQ(session.AddGraphWithCopy(graph_id, graph), FAILED); + EXPECT_EQ(session.AddGraphWithCopy(graph_id, graph, ascend_options), FAILED); + + vector inputs; + vector tensors; + EXPECT_EQ(session.BuildGraph(graph_id, inputs), FAILED); + EXPECT_EQ(session.BuildGraph(graph_id, tensors), FAILED); + + vector outputs; + EXPECT_EQ(session.RunGraph(graph_id, inputs, outputs), FAILED); + EXPECT_EQ(session.RunGraphWithStreamAsync(graph_id, nullptr, inputs, outputs), FAILED); + EXPECT_EQ(session.RunGraphAsync(graph_id, inputs, nullptr), SUCCESS); // Push to queue. + + vector var_inputs; + EXPECT_EQ(session.GetVariables(var_inputs, outputs), FAILED); + + vector var_names; + EXPECT_EQ(session.GetVariables(var_names, outputs), FAILED); + + std::string key; + pCallBackFunc ge_callback; + EXPECT_EQ(session.RegisterCallBackFunc(key, ge_callback), SUCCESS); + + session::pCallBackFunc session_callback; + EXPECT_EQ(session.RegisterCallBackFunc(key.c_str(), session_callback), SUCCESS); + + EXPECT_TRUE(session.IsGraphNeedRebuild(graph_id)); + + EXPECT_EQ(session.RemoveGraph(graph_id), SUCCESS); + EXPECT_EQ(GEFinalize(), SUCCESS); +} + } // namespace ge From ea95be37a7116eea96a48511519aaaa5f5e59a2f Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Thu, 8 Jul 2021 13:50:00 +0800 Subject: [PATCH 172/226] Revert EnableExceptionDump --- ge/graph/execute/model_executor.cc | 7 ------- ge/init/gelib.cc | 7 +++++++ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ge/graph/execute/model_executor.cc b/ge/graph/execute/model_executor.cc index 36d21e1c..bcbc08e6 100644 --- a/ge/graph/execute/model_executor.cc +++ b/ge/graph/execute/model_executor.cc @@ -47,13 +47,6 @@ Status ModelExecutor::Initialize(const map &options, uint64_t se return MEMALLOC_FAILED; } - auto model_manager = ModelManager::GetInstance(); - GE_CHECK_NOTNULL(model_manager); - GE_IF_BOOL_EXEC(model_manager->EnableExceptionDump(options) != SUCCESS, - REPORT_CALL_ERROR("E19999", "ModelManager EnableExceptionDump failed."); - GELOGE(FAILED, "[Enable][ExceptionDump] failed."); - return FAILED); - session_id_ = session_id; train_graph_flag_ = ParseTrainGraphFlag(); thread_run_flag_.store(true); diff --git a/ge/init/gelib.cc b/ge/init/gelib.cc index f7296144..132d4680 100644 --- a/ge/init/gelib.cc +++ b/ge/init/gelib.cc @@ -38,6 +38,7 @@ #include "graph/common/ge_call_wrapper.h" #include "graph/ge_context.h" #include "graph/ge_global_options.h" +#include "graph/load/model_manager/model_manager.h" #include "graph/manager/graph_mem_manager.h" #include "graph/manager/host_mem_manager.h" #include "graph/manager/graph_var_manager.h" @@ -196,6 +197,12 @@ Status GELib::SystemInitialize(const map &options) { // In train and infer, profiling is always needed. InitProfiling(this->options_); + auto model_manager = ModelManager::GetInstance(); + GE_CHECK_NOTNULL(model_manager); + GE_IF_BOOL_EXEC(model_manager->EnableExceptionDump(options) != SUCCESS, + REPORT_CALL_ERROR("E19999", "ModelManager EnableExceptionDump failed."); + GELOGE(FAILED, "[Enable][ExceptionDump] failed."); + return FAILED); // 1.`is_train_mode_` means case: train // 2.`(!is_train_mode_) && (options_.device_id != kDefaultDeviceIdForInfer)` means case: online infer // these two case with logical device id From cca94f97fb4a8c21fc85bf21c08d146804965a59 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Thu, 8 Jul 2021 13:52:19 +0800 Subject: [PATCH 173/226] Fix case_load_model_encypt_type_unsupported --- ge/hybrid/model/node_item.cc | 4 ++-- tests/ut/ge/graph/load/model_manager_unittest.cc | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/ge/hybrid/model/node_item.cc b/ge/hybrid/model/node_item.cc index 77bd8efd..f66d4638 100644 --- a/ge/hybrid/model/node_item.cc +++ b/ge/hybrid/model/node_item.cc @@ -25,7 +25,7 @@ namespace ge { namespace hybrid { namespace { const uint8_t kMaxTransCount = 3; -const uint32_t kTransOpIoSize = 1; +const uint8_t kTransOpIoSize = 1; const char *const kAttrNameOriginalFusionGraph = "_original_fusion_graph"; const char *const kNodeTypeRetVal = "_RetVal"; const std::set kControlOpTypes{ @@ -47,7 +47,7 @@ bool IsEnterFeedNode(NodePtr node) { // For: Enter -> TransData -> Cast -> node for (uint8_t i = 0; i < kMaxTransCount; ++i) { if (kEnterOpTypes.count(NodeUtils::GetNodeType(node)) > 0) { - GELOGD("Node[%u] is Enter feed node.", node->GetName().c_str()); + GELOGD("Node[%s] is Enter feed node.", node->GetName().c_str()); return true; } diff --git a/tests/ut/ge/graph/load/model_manager_unittest.cc b/tests/ut/ge/graph/load/model_manager_unittest.cc index d9e4eabd..a0644510 100644 --- a/tests/ut/ge/graph/load/model_manager_unittest.cc +++ b/tests/ut/ge/graph/load/model_manager_unittest.cc @@ -78,7 +78,7 @@ class UtestModelManagerModelManager : public testing::Test { const int model_len = 10; data.model_len = sizeof(ModelFileHeader) + model_len; data.model_data = new uint8_t[data.model_len]; - memset((uint8_t *)data.model_data + sizeof(ModelFileHeader), 10, model_len); + memset((uint8_t *)data.model_data + sizeof(ModelFileHeader), 0, model_len); ModelFileHeader *header = (ModelFileHeader *)data.model_data; header->magic = MODEL_FILE_MAGIC_NUM; @@ -93,7 +93,7 @@ class UtestModelManagerModelManager : public testing::Test { data.key = ENC_KEY; data.model_data = new uint8_t[data.model_len]; uint8_t data_ori[model_len]; - memset(data_ori, 10, model_len); + memset(data_ori, 0, model_len); ModelFileHeader *header = (ModelFileHeader *)data.model_data; header->magic = MODEL_FILE_MAGIC_NUM; header->version = MODEL_VERSION; @@ -224,6 +224,7 @@ TEST_F(UtestModelManagerModelManager, case_load_model_encypt_type_unsupported) { ModelFileHeader *header = (ModelFileHeader *)data.model_data; header->is_encrypt = 255; uint32_t model_id = 1; + // Error for: LoadModelPartitionTable: Invalid partition_table->num:0 EXPECT_EQ(mm.LoadModelOffline(model_id, data, nullptr, nullptr), ACL_ERROR_GE_PARAM_INVALID); delete[](uint8_t *) data.model_data; } From 4077301b37a6852c2a116335195b529e2bdd63e4 Mon Sep 17 00:00:00 2001 From: zhupuxu Date: Thu, 8 Jul 2021 15:33:19 +0800 Subject: [PATCH 174/226] fix bug for step info Signed-off-by: zhupuxu --- ge/common/profiling/ge_profiling.cc | 14 ++++---------- ge/common/profiling/profiling_manager.cc | 2 ++ ge/common/profiling/profiling_manager.h | 3 +++ ge/single_op/single_op.cc | 2 +- 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/ge/common/profiling/ge_profiling.cc b/ge/common/profiling/ge_profiling.cc index c00af058..fd104e90 100644 --- a/ge/common/profiling/ge_profiling.cc +++ b/ge/common/profiling/ge_profiling.cc @@ -230,21 +230,15 @@ ge::Status ProfSetStepInfo(uint64_t index_id, uint16_t tag_id, rtStream_t stream REPORT_CALL_ERROR("E19999", "Get logic device id failed, ret 0x%X", rt_ret); return ge::FAILED; } + auto &profiling_manager = ge::ProfilingManager::Instance(); + profiling_manager.SetStepInfoIndex(index_id); if (is_first_run && tag_id == kStepStart) { - GE_CHK_STATUS_RET_NOLOG(ge::ProfilingManager::Instance().ProfileStepInfo(index_id, - kModelId, - tag_id, - stream, - device_id)); + GE_CHK_STATUS_RET_NOLOG(profiling_manager.ProfileStepInfo(index_id, kModelId, tag_id, stream, device_id)); is_first_run = false; return ge::SUCCESS; } if (!is_first_run && tag_id == kStepEnd) { - GE_CHK_STATUS_RET_NOLOG(ge::ProfilingManager::Instance().ProfileStepInfo(index_id, - kModelId, - tag_id, - stream, - device_id)); + GE_CHK_STATUS_RET_NOLOG(profiling_manager.ProfileStepInfo(index_id, kModelId, tag_id, stream, device_id)); is_first_run = true; return ge::SUCCESS; } diff --git a/ge/common/profiling/profiling_manager.cc b/ge/common/profiling/profiling_manager.cc index 6707d78e..7fd63d7e 100644 --- a/ge/common/profiling/profiling_manager.cc +++ b/ge/common/profiling/profiling_manager.cc @@ -69,6 +69,7 @@ ProfilingManager::ProfilingManager() : is_load_profiling_(false), is_execute_profiling_(false), is_training_trace_(false), subscribe_count_(0) { prof_cb_.msprofCtrlCallback = nullptr; prof_cb_.msprofReporterCallback = nullptr; + index_id_ = UINT64_MAX; } ProfilingManager::~ProfilingManager() {} @@ -604,6 +605,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfFi is_load_profiling_ = false; is_training_trace_ = false; is_execute_profiling_ = false; + index_id_ = UINT64_MAX; // profiling plugin uninit PluginUnInit(); diff --git a/ge/common/profiling/profiling_manager.h b/ge/common/profiling/profiling_manager.h index 049a4df4..25929895 100755 --- a/ge/common/profiling/profiling_manager.h +++ b/ge/common/profiling/profiling_manager.h @@ -101,6 +101,8 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ProfilingManager { void GetOpInputOutputInfo(const OpDescPtr &op, TaskDescInfo &task_desc_info) const; void ReportData(const int32_t &device_id, const std::string &data, const std::string &tag_name); Status ProfileStepInfo(uint64_t index_id, uint64_t model_id, uint16_t tag_id, rtStream_t stream, int32_t device_id); + void SetStepInfoIndex(uint64_t index_id) { index_id_ = index_id; } + uint64_t GetStepInfoIndex() { return index_id_; } private: Status InitFromOptions(const Options &options, MsprofGeOptions &prof_conf); Status ParseOptions(const std::string &options); @@ -127,6 +129,7 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ProfilingManager { std::string fp_point_; std::string bp_point_; uint32_t reporter_max_len_ = 0; + uint64_t index_id_; }; } // namespace ge #endif // GE_COMMON_PROFILING_PROFILING_MANAGER_H_ diff --git a/ge/single_op/single_op.cc b/ge/single_op/single_op.cc index 763d0fa4..a82c30ba 100755 --- a/ge/single_op/single_op.cc +++ b/ge/single_op/single_op.cc @@ -58,7 +58,7 @@ Status ProfilingTaskInfo(OpTask *op_task, const string &shape_type) { tmp_task_desc_info.op_name.c_str(), tmp_task_desc_info.model_name.c_str()); tmp_task_desc_info.shape_type = shape_type; - tmp_task_desc_info.cur_iter_num = 0; + tmp_task_desc_info.cur_iter_num = ProfilingManager::Instance().GetStepInfoIndex(); tmp_task_desc_info.task_type = op_task->GetTaskType(); std::vector task_desc_info; From d642976cb69fbd5b296372209d1becfeebacdb40 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Fri, 9 Jul 2021 16:43:43 +0800 Subject: [PATCH 175/226] Sort CMakeLists.txt SRC_LIST --- ge/CMakeLists.txt | 1102 ++++++++++------- ge/graph/manager/graph_manager.h | 1 - .../ge/graph/load/model_manager_unittest.cc | 33 +- 3 files changed, 624 insertions(+), 512 deletions(-) diff --git a/ge/CMakeLists.txt b/ge/CMakeLists.txt index 9fff30f7..eec992c8 100755 --- a/ge/CMakeLists.txt +++ b/ge/CMakeLists.txt @@ -2,7 +2,6 @@ if (NOT ENABLE_D AND NOT ENABLE_ACL AND NOT ENABLE_MS_TESTCASES) add_subdirectory(common) add_subdirectory(plugin/engine) add_subdirectory(ge_local_engine) - add_subdirectory(executor) add_subdirectory(offline) elseif (ENABLE_D) add_subdirectory(common) @@ -109,7 +108,346 @@ target_link_libraries(ge_proto_client PRIVATE endif () ################################################################## -set(TRAIN_SRC_LIST +set(EXECUTOR_SRC_LIST + #"analyzer/analyzer.cc" + #"client/ge_api.cc" + "common/dump/dump_manager.cc" + "common/dump/dump_op.cc" + "common/dump/dump_properties.cc" + "common/dump/exception_dumper.cc" + "common/dump/opdebug_register.cc" + #"common/formats/format_transfers/datatype_transfer.cc" + #"common/formats/format_transfers/format_transfer_c1hwncoc0_hwcn.cc" + #"common/formats/format_transfers/format_transfer_dhwcn_fracz3D.cc" + #"common/formats/format_transfers/format_transfer_dhwnc_fracz3D_transpose.cc" + #"common/formats/format_transfers/format_transfer_fractal_nz.cc" + #"common/formats/format_transfers/format_transfer_fractal_z.cc" + #"common/formats/format_transfers/format_transfer_fractal_zz.cc" + #"common/formats/format_transfers/format_transfer_fracz_hwcn.cc" + #"common/formats/format_transfers/format_transfer_fracz_nchw.cc" + #"common/formats/format_transfers/format_transfer_fracz_nhwc.cc" + #"common/formats/format_transfers/format_transfer_hwcn_c1hwncoc0.cc" + #"common/formats/format_transfers/format_transfer_nc1hwc0_nchw.cc" + #"common/formats/format_transfers/format_transfer_nc1hwc0_nhwc.cc" + #"common/formats/format_transfers/format_transfer_nchw_nc1hwc0.cc" + #"common/formats/format_transfers/format_transfer_nhwc_nc1hwc0.cc" + "common/formats/format_transfers/format_transfer_transpose.cc" + #"common/formats/formats.cc" + "common/formats/utils/formats_trans_utils.cc" + "common/fp16_t.cc" + "common/ge/op_tiling_manager.cc" + "common/ge/plugin_manager.cc" + #"common/helper/model_cache_helper.cc" + "common/profiling/ge_profiling.cc" + #"common/profiling/ge_runner_profiling.cc" + "common/profiling/profiling_manager.cc" + #"engine_manager/dnnengine_manager.cc" + "executor/ge_executor.cc" + "ge_local_engine/engine/host_cpu_engine.cc" + #"ge_opt_info/ge_opt_info.cc" + #"generator/ge_generator.cc" + #"generator/generator_api.cc" + #"graph/build/graph_builder.cc" + #"graph/build/label_allocator.cc" + #"graph/build/logical_stream_allocator.cc" + #"graph/build/memory/binary_block_mem_assigner.cc" + #"graph/build/memory/block_mem_assigner.cc" + #"graph/build/memory/buffer_pool_mem_assigner.cc" + #"graph/build/memory/graph_mem_assigner.cc" + #"graph/build/memory/hybrid_mem_assigner.cc" + #"graph/build/memory/max_block_mem_assigner.cc" + #"graph/build/memory/memory_assigner.cc" + "graph/build/memory/var_mem_assign_util.cc" + #"graph/build/model_builder.cc" + #"graph/build/run_context.cc" + #"graph/build/stream_allocator.cc" + #"graph/build/stream_graph_optimizer.cc" + #"graph/build/task_generator.cc" + "graph/common/bcast.cc" + "graph/common/local_context.cc" + "graph/common/omg_util.cc" + #"graph/common/transop_util.cc" + "graph/execute/graph_execute.cc" + "graph/execute/model_executor.cc" + #"graph/label/case_label_maker.cc" + #"graph/label/if_label_maker.cc" + #"graph/label/label_maker.cc" + #"graph/label/partitioned_call_label_maker.cc" + #"graph/label/while_label_maker.cc" + "graph/load/graph_loader.cc" + "graph/load/model_manager/aipp_utils.cc" + "graph/load/model_manager/cpu_queue_schedule.cc" + "graph/load/model_manager/data_dumper.cc" + "graph/load/model_manager/data_inputer.cc" + "graph/load/model_manager/davinci_model.cc" + "graph/load/model_manager/model_manager.cc" + "graph/load/model_manager/model_utils.cc" + "graph/load/model_manager/task_info/end_graph_task_info.cc" + "graph/load/model_manager/task_info/event_record_task_info.cc" + "graph/load/model_manager/task_info/event_wait_task_info.cc" + "graph/load/model_manager/task_info/ffts_task_info.cc" + "graph/load/model_manager/task_info/fusion_start_task_info.cc" + "graph/load/model_manager/task_info/fusion_stop_task_info.cc" + #"graph/load/model_manager/task_info/hccl_task_info.cc" # Just for runner. + "graph/load/model_manager/task_info/kernel_ex_task_info.cc" + "graph/load/model_manager/task_info/kernel_task_info.cc" + "graph/load/model_manager/task_info/label_goto_ex_task_info.cc" + "graph/load/model_manager/task_info/label_set_task_info.cc" + "graph/load/model_manager/task_info/label_switch_by_index_task_info.cc" + "graph/load/model_manager/task_info/memcpy_addr_async_task_info.cc" + "graph/load/model_manager/task_info/memcpy_async_task_info.cc" + "graph/load/model_manager/task_info/model_exit_task_info.cc" + "graph/load/model_manager/task_info/profiler_trace_task_info.cc" + "graph/load/model_manager/task_info/stream_active_task_info.cc" + "graph/load/model_manager/task_info/stream_switch_task_info.cc" + "graph/load/model_manager/task_info/stream_switchn_task_info.cc" + "graph/load/model_manager/task_info/super_kernel/super_kernel.cc" + "graph/load/model_manager/task_info/super_kernel/super_kernel_factory.cc" + "graph/load/model_manager/task_info/task_info.cc" + "graph/load/model_manager/tbe_handle_store.cc" + "graph/load/model_manager/zero_copy_offset.cc" + "graph/load/model_manager/zero_copy_task.cc" + "graph/manager/graph_caching_allocator.cc" + #"graph/manager/graph_context.cc" + #"graph/manager/graph_manager.cc" + "graph/manager/graph_manager_utils.cc" + "graph/manager/graph_mem_allocator.cc" + "graph/manager/graph_mem_manager.cc" + "graph/manager/graph_var_manager.cc" + "graph/manager/host_mem_allocator.cc" + "graph/manager/host_mem_manager.cc" + #"graph/manager/memory_api.cc" # Just for runner. + #"graph/manager/model_manager/event_manager.cc" + "graph/manager/rdma_pool_allocator.cc" + "graph/manager/session_scope_mem_allocator.cc" + "graph/manager/trans_var_data_utils.cc" + "graph/manager/util/debug.cc" + #"graph/manager/util/hcom_util.cc" # Just for runner. + #"graph/manager/util/rt_context_util.cc" + #"graph/manager/util/variable_accelerate_ctrl.cc" + #"graph/optimize/graph_optimize.cc" + #"graph/optimize/mem_rw_conflict_optimize.cc" + #"graph/optimize/summary_optimize.cc" + #"graph/partition/dynamic_shape_partition.cc" + #"graph/partition/engine_place.cc" + #"graph/partition/graph_partition.cc" + #"graph/partition/stage_partition.cc" + #"graph/passes/addn_pass.cc" + #"graph/passes/aicpu_constant_folding_pass.cc" + #"graph/passes/assert_pass.cc" + #"graph/passes/assign_remove_pass.cc" + #"graph/passes/atomic_addr_clean_pass.cc" + #"graph/passes/attach_stream_label_pass.cc" + #"graph/passes/base_pass.cc" + #"graph/passes/bitcast_pass.cc" + #"graph/passes/buffer_pool_memory_pass.cc" + #"graph/passes/cast_remove_pass.cc" + #"graph/passes/cast_translate_pass.cc" + #"graph/passes/common_subexpression_elimination_pass.cc" + #"graph/passes/compile_nodes_pass.cc" + #"graph/passes/cond_pass.cc" + #"graph/passes/cond_remove_pass.cc" + #"graph/passes/constant_folding_pass.cc" + #"graph/passes/constant_fuse_same_pass.cc" + #"graph/passes/control_trigger_pass.cc" + #"graph/passes/ctrl_edge_transfer_pass.cc" + #"graph/passes/data_pass.cc" + #"graph/passes/dimension_adjust_pass.cc" + #"graph/passes/dimension_compute_pass.cc" + #"graph/passes/dropout_pass.cc" + #"graph/passes/end_of_sequence_add_control_pass.cc" + #"graph/passes/enter_pass.cc" + #"graph/passes/flow_ctrl_pass.cc" + #"graph/passes/folding_pass.cc" + #"graph/passes/for_pass.cc" + #"graph/passes/fuse_data_nodes_with_common_input_pass.cc" + #"graph/passes/get_original_format_pass.cc" + #"graph/passes/global_step_insert_pass.cc" + #"graph/passes/guarantee_const_pass.cc" + #"graph/passes/hccl_continuous_memcpy_pass.cc" + #"graph/passes/hccl_group_pass.cc" + #"graph/passes/hccl_memcpy_pass.cc" + #"graph/passes/hccl_tailing_optimization_pass.cc" + #"graph/passes/identity_pass.cc" + #"graph/passes/infer_base_pass.cc" + #"graph/passes/infer_value_range_pass.cc" + #"graph/passes/infershape_pass.cc" + #"graph/passes/inplace_support_check_pass.cc" + #"graph/passes/input_output_connection_identify_pass.cc" + #"graph/passes/iterator_op_pass.cc" + #"graph/passes/link_gen_mask_nodes_pass.cc" + #"graph/passes/mark_agnostic_pass.cc" + #"graph/passes/mark_force_unknown_for_cond_pass.cc" + #"graph/passes/mark_graph_unknown_status_pass.cc" + #"graph/passes/mark_node_unknown_shape_pass.cc" + #"graph/passes/mark_same_addr_pass.cc" + #"graph/passes/memcpy_addr_async_pass.cc" + #"graph/passes/merge_input_memcpy_pass.cc" + #"graph/passes/merge_pass.cc" + #"graph/passes/merge_to_stream_merge_pass.cc" + #"graph/passes/multi_batch_clone_pass.cc" + #"graph/passes/multi_batch_pass.cc" + #"graph/passes/net_output_pass.cc" + #"graph/passes/next_iteration_pass.cc" + #"graph/passes/no_use_reshape_remove_pass.cc" + #"graph/passes/parallel_concat_start_op_pass.cc" + #"graph/passes/parallel_group_pass.cc" + #"graph/passes/pass_manager.cc" + "graph/passes/pass_utils.cc" + #"graph/passes/permute_pass.cc" + #"graph/passes/placeholder_with_default_pass.cc" + #"graph/passes/prevent_gradient_pass.cc" + #"graph/passes/print_op_pass.cc" + #"graph/passes/prune_pass.cc" + #"graph/passes/ref_identity_delete_op_pass.cc" + #"graph/passes/remove_same_const_pass.cc" + #"graph/passes/replace_transshape_pass.cc" + #"graph/passes/replace_with_empty_const_pass.cc" + #"graph/passes/reshape_recovery_pass.cc" + #"graph/passes/reshape_remove_pass.cc" + #"graph/passes/resource_pair_add_control_pass.cc" + #"graph/passes/resource_pair_remove_control_pass.cc" + #"graph/passes/same_transdata_breadth_fusion_pass.cc" + #"graph/passes/save_pass.cc" + #"graph/passes/set_input_output_offset_pass.cc" + #"graph/passes/shape_operate_op_remove_pass.cc" + #"graph/passes/snapshot_pass.cc" + #"graph/passes/stop_gradient_pass.cc" + #"graph/passes/subexpression_migration_pass.cc" + #"graph/passes/subgraph_const_migration_pass.cc" + #"graph/passes/subgraph_pass.cc" + #"graph/passes/switch_data_edges_bypass.cc" + #"graph/passes/switch_dead_branch_elimination.cc" + #"graph/passes/switch_logic_remove_pass.cc" + #"graph/passes/switch_to_stream_switch_pass.cc" + #"graph/passes/transop_breadth_fusion_pass.cc" + #"graph/passes/transop_depth_fusion_pass.cc" + #"graph/passes/transop_nearby_allreduce_fusion_pass.cc" + #"graph/passes/transop_symmetry_elimination_pass.cc" + #"graph/passes/transop_without_reshape_fusion_pass.cc" + #"graph/passes/transpose_transdata_pass.cc" + #"graph/passes/unused_args_clean_pass.cc" + #"graph/passes/unused_const_pass.cc" + #"graph/passes/useless_control_out_remove_pass.cc" + #"graph/passes/var_is_initialized_op_pass.cc" + #"graph/passes/variable_op_pass.cc" + #"graph/passes/variable_prepare_op_pass.cc" + #"graph/passes/variable_ref_delete_op_pass.cc" + #"graph/passes/variable_ref_useless_control_out_delete_pass.cc" + #"graph/preprocess/graph_preprocess.cc" + #"graph/preprocess/insert_op/ge_aipp_op.cc" + #"graph/preprocess/insert_op/util_insert_aipp_op.cc" + #"graph/preprocess/multi_batch_copy_graph.cc" + #"graph/preprocess/multi_batch_options.cc" + "host_kernels/add_kernel.cc" + "host_kernels/broadcast_args_kernel.cc" + "host_kernels/broadcast_gradient_args_kernel.cc" + "host_kernels/cast_kernel.cc" + "host_kernels/concat_offset_kernel.cc" + "host_kernels/concat_v2_kernel.cc" + "host_kernels/dynamic_stitch_kernel.cc" + "host_kernels/empty_kernel.cc" + "host_kernels/expanddims_kernel.cc" + "host_kernels/fill_kernel.cc" + "host_kernels/floordiv_kernel.cc" + "host_kernels/floormod_kernel.cc" + "host_kernels/gather_v2_kernel.cc" + "host_kernels/greater_kernel.cc" + "host_kernels/identity_kernel.cc" + "host_kernels/kernel_utils.cc" + "host_kernels/maximum_kernel.cc" + "host_kernels/mul_kernel.cc" + "host_kernels/pack_kernel.cc" + "host_kernels/permute_kernel.cc" + "host_kernels/range_kernel.cc" + "host_kernels/rank_kernel.cc" + "host_kernels/reduce_prod_kernel.cc" + "host_kernels/reformat_kernel.cc" + "host_kernels/reshape_kernel.cc" + "host_kernels/rsqrt_kernel.cc" + "host_kernels/shape_kernel.cc" + "host_kernels/shape_n_kernel.cc" + "host_kernels/size_kernel.cc" + "host_kernels/slice_d_kernel.cc" + "host_kernels/slice_kernel.cc" + "host_kernels/squeeze_kernel.cc" + "host_kernels/ssd_prior_box_kernel.cc" + "host_kernels/strided_slice_kernel.cc" + "host_kernels/sub_kernel.cc" + "host_kernels/transdata_kernel.cc" + "host_kernels/transpose_kernel.cc" + "host_kernels/unpack_kernel.cc" + "host_kernels/unsqueeze_kernel.cc" + "hybrid/common/npu_memory_allocator.cc" + "hybrid/common/tensor_value.cc" + "hybrid/executor/hybrid_execution_context.cc" + "hybrid/executor/hybrid_model_async_executor.cc" + "hybrid/executor/hybrid_model_executor.cc" + "hybrid/executor/hybrid_model_pipeline_executor.cc" + "hybrid/executor/hybrid_profiler.cc" + "hybrid/executor/node_done_manager.cc" + "hybrid/executor/node_state.cc" + "hybrid/executor/rt_callback_manager.cc" + "hybrid/executor/subgraph_context.cc" + "hybrid/executor/subgraph_executor.cc" + "hybrid/executor/worker/execution_engine.cc" + "hybrid/executor/worker/shape_inference_engine.cc" + "hybrid/executor/worker/task_compile_engine.cc" + "hybrid/hybrid_davinci_model.cc" + "hybrid/model/graph_item.cc" + "hybrid/model/hybrid_model.cc" + "hybrid/model/hybrid_model_builder.cc" + "hybrid/model/node_item.cc" + "hybrid/node_executor/aicore/aicore_node_executor.cc" + "hybrid/node_executor/aicore/aicore_op_task.cc" + "hybrid/node_executor/aicore/aicore_task_builder.cc" + #"hybrid/node_executor/aicore/aicore_task_compiler.cc" + "hybrid/node_executor/aicpu/aicpu_ext_info.cc" + "hybrid/node_executor/aicpu/aicpu_node_executor.cc" + "hybrid/node_executor/compiledsubgraph/known_node_executor.cc" + "hybrid/node_executor/controlop/control_op_executor.cc" + "hybrid/node_executor/ge_local/ge_local_node_executor.cc" + #"hybrid/node_executor/hccl/hccl_node_executor.cc" # Just for runner. + "hybrid/node_executor/host_cpu/host_cpu_node_executor.cc" + "hybrid/node_executor/node_executor.cc" + "hybrid/node_executor/partitioned_call/partitioned_call_node_executor.cc" + "hybrid/node_executor/rts/rts_node_executor.cc" + "hybrid/node_executor/rts/rts_node_task.cc" + "hybrid/node_executor/rts/rts_task_factory.cc" + "hybrid/node_executor/task_context.cc" + #"init/gelib.cc" + #"ir_build/attr_options/keep_dtype_option.cc" + #"ir_build/attr_options/utils.cc" + #"ir_build/attr_options/weight_compress_option.cc" + #"ir_build/ge_ir_build.cc" + #"ir_build/option_utils.cc" + "model/ge_model.cc" + "model/ge_root_model.cc" + "opskernel_manager/ops_kernel_builder_manager.cc" + #"opskernel_manager/ops_kernel_manager.cc" + #"session/inner_session.cc" + #"session/session_manager.cc" + "single_op/single_op.cc" + "single_op/single_op_manager.cc" + "single_op/single_op_model.cc" + "single_op/stream_resource.cc" + "single_op/task/aicpu_kernel_task_builder.cc" + "single_op/task/aicpu_task_builder.cc" + "single_op/task/build_task_utils.cc" + "single_op/task/op_task.cc" + "single_op/task/rts_kernel_task_builder.cc" + "single_op/task/tbe_task_builder.cc" +) + +################################################################## +set(COMPILER_SRC_LIST + "analyzer/analyzer.cc" + "common/dump/dump_manager.cc" + "common/dump/dump_op.cc" + "common/dump/dump_properties.cc" + "common/dump/dump_server.cc" + "common/dump/exception_dumper.cc" + "common/dump/opdebug_register.cc" "common/formats/format_transfers/datatype_transfer.cc" "common/formats/format_transfers/format_transfer_c1hwncoc0_hwcn.cc" "common/formats/format_transfers/format_transfer_dhwcn_fracz3D.cc" @@ -123,30 +461,33 @@ set(TRAIN_SRC_LIST "common/formats/format_transfers/format_transfer_hwcn_c1hwncoc0.cc" "common/formats/format_transfers/format_transfer_nc1hwc0_nchw.cc" "common/formats/format_transfers/format_transfer_nc1hwc0_nhwc.cc" + "common/formats/format_transfers/format_transfer_nchw_fz_c04.cc" "common/formats/format_transfers/format_transfer_nchw_nc1hwc0.cc" "common/formats/format_transfers/format_transfer_nhwc_nc1hwc0.cc" "common/formats/format_transfers/format_transfer_transpose.cc" "common/formats/formats.cc" "common/formats/utils/formats_trans_utils.cc" "common/fp16_t.cc" - "common/ge/plugin_manager.cc" "common/ge/op_tiling_manager.cc" + "common/ge/plugin_manager.cc" "common/helper/model_cache_helper.cc" "common/profiling/profiling_manager.cc" - "common/dump/dump_manager.cc" - "common/dump/exception_dumper.cc" - "common/dump/dump_properties.cc" - "common/dump/opdebug_register.cc" - "common/dump/dump_op.cc" - "common/profiling/ge_profiling.cc" - "common/profiling/ge_runner_profiling.cc" "engine_manager/dnnengine_manager.cc" "ge_local_engine/engine/host_cpu_engine.cc" + "ge_opt_info/ge_opt_info.cc" "generator/ge_generator.cc" "generator/generator_api.cc" "graph/build/graph_builder.cc" "graph/build/label_allocator.cc" "graph/build/logical_stream_allocator.cc" + "graph/build/memory/binary_block_mem_assigner.cc" + "graph/build/memory/block_mem_assigner.cc" + "graph/build/memory/buffer_pool_mem_assigner.cc" + "graph/build/memory/graph_mem_assigner.cc" + "graph/build/memory/hybrid_mem_assigner.cc" + "graph/build/memory/max_block_mem_assigner.cc" + "graph/build/memory/memory_assigner.cc" + "graph/build/memory/var_mem_assign_util.cc" "graph/build/model_builder.cc" "graph/build/run_context.cc" "graph/build/stream_allocator.cc" @@ -156,35 +497,34 @@ set(TRAIN_SRC_LIST "graph/common/local_context.cc" "graph/common/omg_util.cc" "graph/common/transop_util.cc" - "graph/execute/graph_execute.cc" + #"graph/execute/graph_execute.cc" "graph/label/case_label_maker.cc" "graph/label/if_label_maker.cc" "graph/label/label_maker.cc" "graph/label/partitioned_call_label_maker.cc" "graph/label/while_label_maker.cc" "graph/load/graph_loader.cc" + "graph/load/model_manager/aipp_utils.cc" "graph/load/model_manager/cpu_queue_schedule.cc" "graph/load/model_manager/data_dumper.cc" "graph/load/model_manager/data_inputer.cc" "graph/load/model_manager/davinci_model.cc" "graph/load/model_manager/model_manager.cc" "graph/load/model_manager/model_utils.cc" - "graph/load/model_manager/aipp_utils.cc" "graph/load/model_manager/task_info/end_graph_task_info.cc" - "graph/load/model_manager/task_info/model_exit_task_info.cc" "graph/load/model_manager/task_info/event_record_task_info.cc" "graph/load/model_manager/task_info/event_wait_task_info.cc" "graph/load/model_manager/task_info/ffts_task_info.cc" "graph/load/model_manager/task_info/fusion_start_task_info.cc" "graph/load/model_manager/task_info/fusion_stop_task_info.cc" - "graph/load/model_manager/task_info/hccl_task_info.cc" "graph/load/model_manager/task_info/kernel_ex_task_info.cc" "graph/load/model_manager/task_info/kernel_task_info.cc" + "graph/load/model_manager/task_info/label_goto_ex_task_info.cc" "graph/load/model_manager/task_info/label_set_task_info.cc" "graph/load/model_manager/task_info/label_switch_by_index_task_info.cc" - "graph/load/model_manager/task_info/label_goto_ex_task_info.cc" "graph/load/model_manager/task_info/memcpy_addr_async_task_info.cc" "graph/load/model_manager/task_info/memcpy_async_task_info.cc" + "graph/load/model_manager/task_info/model_exit_task_info.cc" "graph/load/model_manager/task_info/profiler_trace_task_info.cc" "graph/load/model_manager/task_info/stream_active_task_info.cc" "graph/load/model_manager/task_info/stream_switch_task_info.cc" @@ -193,542 +533,209 @@ set(TRAIN_SRC_LIST "graph/load/model_manager/task_info/super_kernel/super_kernel_factory.cc" "graph/load/model_manager/task_info/task_info.cc" "graph/load/model_manager/tbe_handle_store.cc" - "graph/load/model_manager/zero_copy_task.cc" "graph/load/model_manager/zero_copy_offset.cc" + "graph/load/model_manager/zero_copy_task.cc" + "graph/manager/graph_caching_allocator.cc" "graph/manager/graph_context.cc" "graph/manager/graph_manager.cc" "graph/manager/graph_manager_utils.cc" "graph/manager/graph_mem_allocator.cc" - "graph/manager/graph_caching_allocator.cc" - "graph/manager/session_scope_mem_allocator.cc" + "graph/manager/graph_mem_manager.cc" "graph/manager/graph_var_manager.cc" - "graph/manager/host_mem_manager.cc" - "graph/manager/rdma_pool_allocator.cc" "graph/manager/host_mem_allocator.cc" - "graph/manager/graph_mem_manager.cc" - "graph/manager/memory_api.cc" + "graph/manager/host_mem_manager.cc" "graph/manager/model_manager/event_manager.cc" + "graph/manager/rdma_pool_allocator.cc" + "graph/manager/session_scope_mem_allocator.cc" "graph/manager/trans_var_data_utils.cc" "graph/manager/util/debug.cc" - "graph/manager/util/hcom_util.cc" "graph/manager/util/rt_context_util.cc" "graph/manager/util/variable_accelerate_ctrl.cc" "graph/optimize/graph_optimize.cc" "graph/optimize/mem_rw_conflict_optimize.cc" "graph/optimize/summary_optimize.cc" + "graph/partition/dynamic_shape_partition.cc" "graph/partition/engine_place.cc" "graph/partition/graph_partition.cc" + "graph/partition/stage_partition.cc" "graph/passes/addn_pass.cc" "graph/passes/aicpu_constant_folding_pass.cc" "graph/passes/assert_pass.cc" - "graph/passes/input_output_connection_identify_pass.cc" + "graph/passes/assign_remove_pass.cc" "graph/passes/atomic_addr_clean_pass.cc" - "graph/passes/mark_same_addr_pass.cc" - "graph/passes/mark_graph_unknown_status_pass.cc" - "graph/passes/mark_node_unknown_shape_pass.cc" - "graph/passes/mark_agnostic_pass.cc" - "graph/partition/dynamic_shape_partition.cc" - "graph/partition/stage_partition.cc" + "graph/passes/attach_stream_label_pass.cc" "graph/passes/base_pass.cc" "graph/passes/bitcast_pass.cc" + "graph/passes/buffer_pool_memory_pass.cc" "graph/passes/cast_remove_pass.cc" "graph/passes/cast_translate_pass.cc" "graph/passes/common_subexpression_elimination_pass.cc" - "graph/passes/transop_symmetry_elimination_pass.cc" "graph/passes/compile_nodes_pass.cc" + "graph/passes/cond_pass.cc" + "graph/passes/cond_remove_pass.cc" "graph/passes/constant_folding_pass.cc" "graph/passes/constant_fuse_same_pass.cc" - "graph/passes/fuse_data_nodes_with_common_input_pass.cc" - "graph/passes/remove_same_const_pass.cc" - "graph/passes/useless_control_out_remove_pass.cc" "graph/passes/control_trigger_pass.cc" + "graph/passes/ctrl_edge_transfer_pass.cc" + "graph/passes/data_pass.cc" "graph/passes/dimension_adjust_pass.cc" "graph/passes/dimension_compute_pass.cc" "graph/passes/dropout_pass.cc" - "graph/passes/hccl_group_pass.cc" - "graph/passes/hccl_tailing_optimization_pass.cc" + "graph/passes/end_of_sequence_add_control_pass.cc" "graph/passes/enter_pass.cc" - "graph/passes/assign_remove_pass.cc" - "graph/passes/inplace_support_check_pass.cc" "graph/passes/flow_ctrl_pass.cc" + "graph/passes/folding_pass.cc" + "graph/passes/for_pass.cc" + "graph/passes/fuse_data_nodes_with_common_input_pass.cc" + "graph/passes/get_original_format_pass.cc" "graph/passes/global_step_insert_pass.cc" - "host_kernels/transpose_kernel.cc" - "host_kernels/add_kernel.cc" - "host_kernels/broadcast_args_kernel.cc" - "host_kernels/broadcast_gradient_args_kernel.cc" - "host_kernels/cast_kernel.cc" - "host_kernels/concat_offset_kernel.cc" - "host_kernels/concat_v2_kernel.cc" - "host_kernels/dynamic_stitch_kernel.cc" - "host_kernels/identity_kernel.cc" - "host_kernels/empty_kernel.cc" - "host_kernels/expanddims_kernel.cc" - "host_kernels/fill_kernel.cc" - "host_kernels/floordiv_kernel.cc" - "host_kernels/floormod_kernel.cc" - "host_kernels/gather_v2_kernel.cc" - "host_kernels/greater_kernel.cc" - "host_kernels/kernel_utils.cc" - "host_kernels/maximum_kernel.cc" - "host_kernels/mul_kernel.cc" - "host_kernels/pack_kernel.cc" - "host_kernels/permute_kernel.cc" - "host_kernels/range_kernel.cc" - "host_kernels/rank_kernel.cc" - "host_kernels/reduce_prod_kernel.cc" - "host_kernels/reshape_kernel.cc" - "host_kernels/rsqrt_kernel.cc" - "host_kernels/shape_kernel.cc" - "host_kernels/shape_n_kernel.cc" - "host_kernels/size_kernel.cc" - "host_kernels/slice_d_kernel.cc" - "host_kernels/slice_kernel.cc" - "host_kernels/squeeze_kernel.cc" - "host_kernels/unsqueeze_kernel.cc" - "host_kernels/ssd_prior_box_kernel.cc" - "host_kernels/strided_slice_kernel.cc" - "host_kernels/sub_kernel.cc" - "host_kernels/transdata_kernel.cc" - "host_kernels/unpack_kernel.cc" - "host_kernels/reformat_kernel.cc" - "graph/passes/folding_pass.cc" - "graph/passes/get_original_format_pass.cc" "graph/passes/guarantee_const_pass.cc" - "graph/passes/hccl_memcpy_pass.cc" "graph/passes/hccl_continuous_memcpy_pass.cc" + "graph/passes/hccl_group_pass.cc" + "graph/passes/hccl_memcpy_pass.cc" + "graph/passes/hccl_tailing_optimization_pass.cc" "graph/passes/identity_pass.cc" - "graph/passes/ref_identity_delete_op_pass.cc" "graph/passes/infer_base_pass.cc" - "graph/passes/infershape_pass.cc" "graph/passes/infer_value_range_pass.cc" + "graph/passes/infershape_pass.cc" + "graph/passes/inplace_support_check_pass.cc" + "graph/passes/input_output_connection_identify_pass.cc" "graph/passes/iterator_op_pass.cc" "graph/passes/link_gen_mask_nodes_pass.cc" + "graph/passes/mark_agnostic_pass.cc" + "graph/passes/mark_force_unknown_for_cond_pass.cc" + "graph/passes/mark_graph_unknown_status_pass.cc" + "graph/passes/mark_node_unknown_shape_pass.cc" + "graph/passes/mark_same_addr_pass.cc" + "graph/passes/memcpy_addr_async_pass.cc" + "graph/passes/merge_input_memcpy_pass.cc" "graph/passes/merge_pass.cc" - "graph/passes/multi_batch_pass.cc" + "graph/passes/merge_to_stream_merge_pass.cc" "graph/passes/multi_batch_clone_pass.cc" - "graph/passes/subexpression_migration_pass.cc" - "graph/passes/subgraph_const_migration_pass.cc" - "graph/passes/unused_args_clean_pass.cc" + "graph/passes/multi_batch_pass.cc" "graph/passes/net_output_pass.cc" "graph/passes/next_iteration_pass.cc" "graph/passes/no_use_reshape_remove_pass.cc" - "graph/passes/pass_manager.cc" - "graph/passes/pass_utils.cc" - "graph/passes/permute_pass.cc" - "graph/passes/placeholder_with_default_pass.cc" - "graph/passes/prevent_gradient_pass.cc" - "graph/passes/print_op_pass.cc" - "graph/passes/prune_pass.cc" - "graph/passes/ctrl_edge_transfer_pass.cc" - "graph/passes/replace_with_empty_const_pass.cc" - "graph/passes/reshape_remove_pass.cc" - "graph/passes/reshape_recovery_pass.cc" - "graph/passes/resource_pair_add_control_pass.cc" - "graph/passes/resource_pair_remove_control_pass.cc" - "graph/passes/same_transdata_breadth_fusion_pass.cc" - "graph/passes/save_pass.cc" - "graph/passes/shape_operate_op_remove_pass.cc" - "graph/passes/snapshot_pass.cc" - "graph/passes/stop_gradient_pass.cc" - "graph/passes/subgraph_pass.cc" - "graph/passes/data_pass.cc" - "graph/passes/switch_data_edges_bypass.cc" - "graph/passes/switch_logic_remove_pass.cc" - "graph/passes/merge_to_stream_merge_pass.cc" - "graph/passes/merge_input_memcpy_pass.cc" - "graph/passes/switch_to_stream_switch_pass.cc" - "graph/passes/mark_force_unknown_for_cond_pass.cc" - "graph/passes/attach_stream_label_pass.cc" - "graph/passes/switch_dead_branch_elimination.cc" - "graph/passes/replace_transshape_pass.cc" - "graph/passes/transop_breadth_fusion_pass.cc" - "graph/passes/transop_depth_fusion_pass.cc" - "graph/passes/transop_nearby_allreduce_fusion_pass.cc" - "graph/passes/transop_without_reshape_fusion_pass.cc" - "graph/passes/transpose_transdata_pass.cc" - "graph/passes/unused_const_pass.cc" - "graph/passes/var_is_initialized_op_pass.cc" "graph/passes/parallel_concat_start_op_pass.cc" - "graph/passes/cond_pass.cc" - "graph/passes/cond_remove_pass.cc" - "graph/passes/for_pass.cc" - "graph/passes/variable_op_pass.cc" - "graph/passes/variable_prepare_op_pass.cc" - "graph/passes/variable_ref_delete_op_pass.cc" - "graph/passes/variable_ref_useless_control_out_delete_pass.cc" - "graph/passes/end_of_sequence_add_control_pass.cc" - "graph/passes/memcpy_addr_async_pass.cc" "graph/passes/parallel_group_pass.cc" - "graph/passes/set_input_output_offset_pass.cc" - "graph/passes/buffer_pool_memory_pass.cc" - "graph/preprocess/graph_preprocess.cc" - "graph/preprocess/insert_op/ge_aipp_op.cc" - "graph/preprocess/insert_op/util_insert_aipp_op.cc" - "graph/preprocess/multi_batch_options.cc" - "graph/preprocess/multi_batch_copy_graph.cc" - "init/gelib.cc" - "model/ge_model.cc" - "model/ge_root_model.cc" - "opskernel_manager/ops_kernel_manager.cc" - "opskernel_manager/ops_kernel_builder_manager.cc" - "session/inner_session.cc" - "session/session_manager.cc" - "graph/execute/model_executor.cc" - "single_op/single_op.cc" - "single_op/single_op_manager.cc" - "single_op/single_op_model.cc" - "single_op/stream_resource.cc" - "single_op/task/build_task_utils.cc" - "single_op/task/op_task.cc" - "single_op/task/tbe_task_builder.cc" - "single_op/task/aicpu_task_builder.cc" - "single_op/task/aicpu_kernel_task_builder.cc" - "single_op/task/rts_kernel_task_builder.cc" - "hybrid/common/tensor_value.cc" - "hybrid/common/npu_memory_allocator.cc" - "hybrid/executor/rt_callback_manager.cc" - "hybrid/executor/node_state.cc" - "hybrid/executor/node_done_manager.cc" - "hybrid/executor/hybrid_profiler.cc" - "hybrid/executor/hybrid_model_executor.cc" - "hybrid/executor/hybrid_model_pipeline_executor.cc" - "hybrid/executor/hybrid_model_async_executor.cc" - "hybrid/executor/hybrid_execution_context.cc" - "hybrid/executor/subgraph_context.cc" - "hybrid/executor/subgraph_executor.cc" - "hybrid/executor/worker/task_compile_engine.cc" - "hybrid/executor/worker/shape_inference_engine.cc" - "hybrid/executor/worker/execution_engine.cc" - "hybrid/model/hybrid_model.cc" - "hybrid/model/hybrid_model_builder.cc" - "hybrid/model/node_item.cc" - "hybrid/model/graph_item.cc" - "hybrid/node_executor/aicore/aicore_node_executor.cc" - "hybrid/node_executor/aicore/aicore_op_task.cc" - "hybrid/node_executor/aicore/aicore_task_builder.cc" - "hybrid/node_executor/aicore/aicore_task_compiler.cc" - "hybrid/node_executor/aicpu/aicpu_ext_info.cc" - "hybrid/node_executor/aicpu/aicpu_node_executor.cc" - "hybrid/node_executor/compiledsubgraph/known_node_executor.cc" - "hybrid/node_executor/ge_local/ge_local_node_executor.cc" - "hybrid/node_executor/host_cpu/host_cpu_node_executor.cc" - "hybrid/node_executor/controlop/control_op_executor.cc" - "hybrid/node_executor/partitioned_call/partitioned_call_node_executor.cc" - "hybrid/node_executor/hccl/hccl_node_executor.cc" - "hybrid/node_executor/rts/rts_node_executor.cc" - "hybrid/node_executor/rts/rts_node_task.cc" - "hybrid/node_executor/rts/rts_task_factory.cc" - "hybrid/node_executor/node_executor.cc" - "hybrid/node_executor/task_context.cc" - "hybrid/hybrid_davinci_model.cc" - "executor/ge_executor.cc" - "client/ge_api.cc" - "analyzer/analyzer.cc" - "ir_build/ge_ir_build.cc" - "ir_build/attr_options/utils.cc" - "ir_build/attr_options/keep_dtype_option.cc" - "ir_build/attr_options/weight_compress_option.cc" - "ir_build/option_utils.cc" - "graph/build/memory/memory_assigner.cc" - "graph/build/memory/graph_mem_assigner.cc" - "graph/build/memory/binary_block_mem_assigner.cc" - "graph/build/memory/block_mem_assigner.cc" - "graph/build/memory/hybrid_mem_assigner.cc" - "graph/build/memory/max_block_mem_assigner.cc" - "graph/build/memory/var_mem_assign_util.cc" - "graph/build/memory/buffer_pool_mem_assigner.cc" - "ge_opt_info/ge_opt_info.cc" -) - -set(INFER_SRC_LIST - "graph/manager/trans_var_data_utils.cc" - "common/fp16_t.cc" - "common/formats/utils/formats_trans_utils.cc" - "common/formats/format_transfers/datatype_transfer.cc" - "common/formats/format_transfers/format_transfer_transpose.cc" - "common/formats/format_transfers/format_transfer_nchw_nc1hwc0.cc" - "common/formats/format_transfers/format_transfer_fractal_z.cc" - "common/formats/format_transfers/format_transfer_fractal_nz.cc" - "common/formats/format_transfers/format_transfer_fractal_zz.cc" - "common/formats/format_transfers/format_transfer_nhwc_nc1hwc0.cc" - "common/formats/format_transfers/format_transfer_nc1hwc0_nchw.cc" - "common/formats/format_transfers/format_transfer_nc1hwc0_nhwc.cc" - "common/formats/format_transfers/format_transfer_hwcn_c1hwncoc0.cc" - "common/formats/format_transfers/format_transfer_c1hwncoc0_hwcn.cc" - "common/formats/format_transfers/format_transfer_fracz_nchw.cc" - "common/formats/format_transfers/format_transfer_fracz_nhwc.cc" - "common/formats/format_transfers/format_transfer_fracz_hwcn.cc" - "common/formats/format_transfers/format_transfer_dhwcn_fracz3D.cc" - "common/formats/format_transfers/format_transfer_dhwnc_fracz3D_transpose.cc" - "common/formats/format_transfers/format_transfer_nchw_fz_c04.cc" - "common/formats/formats.cc" - "common/profiling/profiling_manager.cc" - "common/dump/dump_properties.cc" - "common/dump/exception_dumper.cc" - "common/dump/dump_manager.cc" - "common/dump/dump_op.cc" - "common/dump/opdebug_register.cc" - "common/dump/dump_server.cc" - "common/helper/model_cache_helper.cc" - "ge_local_engine/engine/host_cpu_engine.cc" - "common/ge/plugin_manager.cc" - "common/ge/op_tiling_manager.cc" - "init/gelib.cc" - "engine_manager/dnnengine_manager.cc" - "opskernel_manager/ops_kernel_manager.cc" - "opskernel_manager/ops_kernel_builder_manager.cc" - "graph/manager/graph_manager.cc" - "graph/manager/graph_manager_utils.cc" - "graph/manager/graph_context.cc" - "graph/preprocess/graph_preprocess.cc" - "graph/preprocess/multi_batch_options.cc" - "graph/preprocess/multi_batch_copy_graph.cc" - "graph/execute/graph_execute.cc" - "graph/load/graph_loader.cc" - "graph/optimize/graph_optimize.cc" - "graph/optimize/mem_rw_conflict_optimize.cc" - "graph/optimize/summary_optimize.cc" - "graph/build/graph_builder.cc" - "graph/partition/engine_place.cc" - "graph/partition/graph_partition.cc" - "graph/partition/dynamic_shape_partition.cc" - "graph/partition/stage_partition.cc" - "generator/ge_generator.cc" - "generator/generator_api.cc" - "graph/manager/graph_var_manager.cc" - "graph/manager/host_mem_manager.cc" - "graph/manager/rdma_pool_allocator.cc" - "graph/manager/host_mem_allocator.cc" - "graph/manager/graph_mem_allocator.cc" - "graph/manager/graph_caching_allocator.cc" - "graph/manager/session_scope_mem_allocator.cc" - "graph/manager/graph_mem_manager.cc" - "model/ge_model.cc" - "model/ge_root_model.cc" - "graph/common/transop_util.cc" "graph/passes/pass_manager.cc" - "graph/passes/resource_pair_add_control_pass.cc" - "graph/passes/resource_pair_remove_control_pass.cc" "graph/passes/pass_utils.cc" - "graph/passes/base_pass.cc" - "graph/passes/bitcast_pass.cc" - "graph/passes/constant_folding_pass.cc" - "graph/passes/aicpu_constant_folding_pass.cc" - "graph/passes/reshape_remove_pass.cc" - "graph/passes/reshape_recovery_pass.cc" - "graph/passes/transop_breadth_fusion_pass.cc" - "graph/passes/transop_depth_fusion_pass.cc" - "graph/passes/transop_nearby_allreduce_fusion_pass.cc" - "graph/passes/same_transdata_breadth_fusion_pass.cc" - "graph/passes/transop_without_reshape_fusion_pass.cc" - "graph/passes/compile_nodes_pass.cc" - "graph/passes/variable_prepare_op_pass.cc" - "graph/passes/variable_ref_delete_op_pass.cc" - "graph/passes/variable_ref_useless_control_out_delete_pass.cc" - "graph/passes/subgraph_pass.cc" - "graph/passes/data_pass.cc" - "graph/passes/net_output_pass.cc" - "graph/passes/replace_transshape_pass.cc" - "graph/passes/constant_fuse_same_pass.cc" - "graph/passes/fuse_data_nodes_with_common_input_pass.cc" - "graph/passes/print_op_pass.cc" - "graph/passes/no_use_reshape_remove_pass.cc" - "graph/passes/iterator_op_pass.cc" - "graph/passes/input_output_connection_identify_pass.cc" - "graph/passes/atomic_addr_clean_pass.cc" - "graph/passes/mark_same_addr_pass.cc" - "graph/passes/mark_graph_unknown_status_pass.cc" - "graph/passes/mark_node_unknown_shape_pass.cc" - "graph/passes/mark_agnostic_pass.cc" - "graph/common/omg_util.cc" - "graph/common/bcast.cc" - "graph/common/local_context.cc" - "graph/passes/dimension_compute_pass.cc" - "graph/passes/dimension_adjust_pass.cc" - "graph/passes/get_original_format_pass.cc" - "graph/passes/shape_operate_op_remove_pass.cc" - "graph/passes/assert_pass.cc" - "graph/passes/dropout_pass.cc" - "graph/passes/infer_base_pass.cc" - "graph/passes/infershape_pass.cc" - "graph/passes/infer_value_range_pass.cc" - "graph/passes/unused_const_pass.cc" "graph/passes/permute_pass.cc" - "graph/passes/ctrl_edge_transfer_pass.cc" - "graph/passes/end_of_sequence_add_control_pass.cc" - "host_kernels/broadcast_gradient_args_kernel.cc" - "host_kernels/greater_kernel.cc" - "host_kernels/gather_v2_kernel.cc" - "host_kernels/maximum_kernel.cc" - "host_kernels/floormod_kernel.cc" - "host_kernels/floordiv_kernel.cc" - "host_kernels/range_kernel.cc" - "host_kernels/shape_kernel.cc" - "host_kernels/size_kernel.cc" - "host_kernels/shape_n_kernel.cc" - "host_kernels/rank_kernel.cc" - "host_kernels/broadcast_args_kernel.cc" - "host_kernels/fill_kernel.cc" - "host_kernels/empty_kernel.cc" - "host_kernels/expanddims_kernel.cc" - "host_kernels/reshape_kernel.cc" - "host_kernels/squeeze_kernel.cc" - "host_kernels/unsqueeze_kernel.cc" - "host_kernels/kernel_utils.cc" - "host_kernels/cast_kernel.cc" - "host_kernels/transdata_kernel.cc" - "host_kernels/unpack_kernel.cc" - "host_kernels/transpose_kernel.cc" - "host_kernels/permute_kernel.cc" - "host_kernels/pack_kernel.cc" - "host_kernels/concat_v2_kernel.cc" - "host_kernels/concat_offset_kernel.cc" - "host_kernels/strided_slice_kernel.cc" - "host_kernels/ssd_prior_box_kernel.cc" - "host_kernels/add_kernel.cc" - "host_kernels/sub_kernel.cc" - "host_kernels/mul_kernel.cc" - "host_kernels/reduce_prod_kernel.cc" - "host_kernels/rsqrt_kernel.cc" - "host_kernels/slice_kernel.cc" - "host_kernels/slice_d_kernel.cc" - "host_kernels/dynamic_stitch_kernel.cc" - "host_kernels/identity_kernel.cc" - "host_kernels/reformat_kernel.cc" - "graph/passes/stop_gradient_pass.cc" - "graph/passes/prevent_gradient_pass.cc" - "graph/passes/identity_pass.cc" - "graph/passes/ref_identity_delete_op_pass.cc" "graph/passes/placeholder_with_default_pass.cc" - "graph/passes/snapshot_pass.cc" - "graph/passes/guarantee_const_pass.cc" - "graph/passes/var_is_initialized_op_pass.cc" - "graph/passes/parallel_concat_start_op_pass.cc" - "graph/passes/folding_pass.cc" - "graph/passes/cast_translate_pass.cc" - "graph/passes/prune_pass.cc" - "graph/passes/merge_to_stream_merge_pass.cc" - "graph/passes/merge_input_memcpy_pass.cc" - "graph/passes/switch_to_stream_switch_pass.cc" - "graph/passes/mark_force_unknown_for_cond_pass.cc" - "graph/passes/attach_stream_label_pass.cc" - "graph/passes/multi_batch_pass.cc" - "graph/passes/multi_batch_clone_pass.cc" - "graph/passes/subexpression_migration_pass.cc" - "graph/passes/subgraph_const_migration_pass.cc" - "graph/passes/unused_args_clean_pass.cc" - "graph/passes/next_iteration_pass.cc" - "graph/passes/control_trigger_pass.cc" - "graph/passes/cond_pass.cc" - "graph/passes/cond_remove_pass.cc" - "graph/passes/for_pass.cc" - "graph/passes/enter_pass.cc" - "graph/passes/assign_remove_pass.cc" - "graph/passes/inplace_support_check_pass.cc" - "graph/passes/addn_pass.cc" - "graph/passes/common_subexpression_elimination_pass.cc" + "graph/passes/prevent_gradient_pass.cc" + "graph/passes/print_op_pass.cc" + "graph/passes/prune_pass.cc" + "graph/passes/ref_identity_delete_op_pass.cc" "graph/passes/remove_same_const_pass.cc" - "graph/passes/useless_control_out_remove_pass.cc" - "graph/passes/transop_symmetry_elimination_pass.cc" + "graph/passes/replace_transshape_pass.cc" + "graph/passes/replace_with_empty_const_pass.cc" + "graph/passes/reshape_recovery_pass.cc" + "graph/passes/reshape_remove_pass.cc" + "graph/passes/resource_pair_add_control_pass.cc" + "graph/passes/resource_pair_remove_control_pass.cc" + "graph/passes/same_transdata_breadth_fusion_pass.cc" "graph/passes/save_pass.cc" + "graph/passes/set_input_output_offset_pass.cc" + "graph/passes/shape_operate_op_remove_pass.cc" + "graph/passes/snapshot_pass.cc" + "graph/passes/stop_gradient_pass.cc" + "graph/passes/subexpression_migration_pass.cc" + "graph/passes/subgraph_const_migration_pass.cc" + "graph/passes/subgraph_pass.cc" + "graph/passes/switch_data_edges_bypass.cc" "graph/passes/switch_dead_branch_elimination.cc" "graph/passes/switch_logic_remove_pass.cc" - "graph/passes/switch_data_edges_bypass.cc" - "graph/passes/merge_pass.cc" - "graph/passes/variable_op_pass.cc" - "graph/passes/cast_remove_pass.cc" + "graph/passes/switch_to_stream_switch_pass.cc" + "graph/passes/transop_breadth_fusion_pass.cc" + "graph/passes/transop_depth_fusion_pass.cc" + "graph/passes/transop_nearby_allreduce_fusion_pass.cc" + "graph/passes/transop_symmetry_elimination_pass.cc" + "graph/passes/transop_without_reshape_fusion_pass.cc" "graph/passes/transpose_transdata_pass.cc" - "graph/passes/hccl_memcpy_pass.cc" - "graph/passes/hccl_continuous_memcpy_pass.cc" - "graph/passes/flow_ctrl_pass.cc" - "graph/passes/global_step_insert_pass.cc" - "graph/passes/link_gen_mask_nodes_pass.cc" - "graph/passes/replace_with_empty_const_pass.cc" - "graph/passes/hccl_group_pass.cc" - "graph/passes/hccl_tailing_optimization_pass.cc" - "graph/passes/memcpy_addr_async_pass.cc" - "graph/passes/set_input_output_offset_pass.cc" - "graph/passes/parallel_group_pass.cc" - "graph/passes/buffer_pool_memory_pass.cc" - "graph/manager/model_manager/event_manager.cc" - "graph/manager/util/rt_context_util.cc" - "graph/manager/util/variable_accelerate_ctrl.cc" - "graph/manager/util/debug.cc" - "graph/load/model_manager/model_manager.cc" - "graph/load/model_manager/data_inputer.cc" - "graph/load/model_manager/davinci_model.cc" - "graph/load/model_manager/model_utils.cc" - "graph/load/model_manager/aipp_utils.cc" - "graph/load/model_manager/tbe_handle_store.cc" - "graph/load/model_manager/cpu_queue_schedule.cc" - "graph/load/model_manager/zero_copy_task.cc" - "graph/load/model_manager/zero_copy_offset.cc" - "graph/load/model_manager/data_dumper.cc" - "graph/load/model_manager/task_info/task_info.cc" - "graph/load/model_manager/task_info/event_record_task_info.cc" - "graph/load/model_manager/task_info/event_wait_task_info.cc" - "graph/load/model_manager/task_info/ffts_task_info.cc" - "graph/load/model_manager/task_info/fusion_start_task_info.cc" - "graph/load/model_manager/task_info/fusion_stop_task_info.cc" - "graph/load/model_manager/task_info/kernel_ex_task_info.cc" - "graph/load/model_manager/task_info/kernel_task_info.cc" - "graph/load/model_manager/task_info/label_set_task_info.cc" - "graph/load/model_manager/task_info/label_switch_by_index_task_info.cc" - "graph/load/model_manager/task_info/label_goto_ex_task_info.cc" - "graph/load/model_manager/task_info/memcpy_async_task_info.cc" - "graph/load/model_manager/task_info/memcpy_addr_async_task_info.cc" - "graph/load/model_manager/task_info/profiler_trace_task_info.cc" - "graph/load/model_manager/task_info/stream_active_task_info.cc" - "graph/load/model_manager/task_info/stream_switch_task_info.cc" - "graph/load/model_manager/task_info/stream_switchn_task_info.cc" - "graph/load/model_manager/task_info/end_graph_task_info.cc" - "graph/load/model_manager/task_info/model_exit_task_info.cc" - "graph/load/model_manager/task_info/super_kernel/super_kernel_factory.cc" - "graph/load/model_manager/task_info/super_kernel/super_kernel.cc" - "hybrid/hybrid_davinci_model_stub.cc" - "ir_build/ge_ir_build.cc" - "ir_build/attr_options/utils.cc" - "ir_build/attr_options/keep_dtype_option.cc" - "ir_build/attr_options/weight_compress_option.cc" - "ir_build/option_utils.cc" + "graph/passes/unused_args_clean_pass.cc" + "graph/passes/unused_const_pass.cc" + "graph/passes/useless_control_out_remove_pass.cc" + "graph/passes/var_is_initialized_op_pass.cc" + "graph/passes/variable_op_pass.cc" + "graph/passes/variable_prepare_op_pass.cc" + "graph/passes/variable_ref_delete_op_pass.cc" + "graph/passes/variable_ref_useless_control_out_delete_pass.cc" + "graph/preprocess/graph_preprocess.cc" "graph/preprocess/insert_op/ge_aipp_op.cc" "graph/preprocess/insert_op/util_insert_aipp_op.cc" + "graph/preprocess/multi_batch_copy_graph.cc" + "graph/preprocess/multi_batch_options.cc" + "host_kernels/add_kernel.cc" + "host_kernels/broadcast_args_kernel.cc" + "host_kernels/broadcast_gradient_args_kernel.cc" + "host_kernels/cast_kernel.cc" + "host_kernels/concat_offset_kernel.cc" + "host_kernels/concat_v2_kernel.cc" + "host_kernels/dynamic_stitch_kernel.cc" + "host_kernels/empty_kernel.cc" + "host_kernels/expanddims_kernel.cc" + "host_kernels/fill_kernel.cc" + "host_kernels/floordiv_kernel.cc" + "host_kernels/floormod_kernel.cc" + "host_kernels/gather_v2_kernel.cc" + "host_kernels/greater_kernel.cc" + "host_kernels/identity_kernel.cc" + "host_kernels/kernel_utils.cc" + "host_kernels/maximum_kernel.cc" + "host_kernels/mul_kernel.cc" + "host_kernels/pack_kernel.cc" + "host_kernels/permute_kernel.cc" + "host_kernels/range_kernel.cc" + "host_kernels/rank_kernel.cc" + "host_kernels/reduce_prod_kernel.cc" + "host_kernels/reformat_kernel.cc" + "host_kernels/reshape_kernel.cc" + "host_kernels/rsqrt_kernel.cc" + "host_kernels/shape_kernel.cc" + "host_kernels/shape_n_kernel.cc" + "host_kernels/size_kernel.cc" + "host_kernels/slice_d_kernel.cc" + "host_kernels/slice_kernel.cc" + "host_kernels/squeeze_kernel.cc" + "host_kernels/ssd_prior_box_kernel.cc" + "host_kernels/strided_slice_kernel.cc" + "host_kernels/sub_kernel.cc" + "host_kernels/transdata_kernel.cc" + "host_kernels/transpose_kernel.cc" + "host_kernels/unpack_kernel.cc" + "host_kernels/unsqueeze_kernel.cc" + #"hybrid/hybrid_davinci_model_stub.cc" "hybrid/node_executor/aicpu/aicpu_ext_info.cc" - "graph/build/model_builder.cc" - "graph/build/task_generator.cc" - "graph/build/stream_allocator.cc" - "graph/build/logical_stream_allocator.cc" - "graph/build/stream_graph_optimizer.cc" - "graph/build/run_context.cc" - "graph/build/label_allocator.cc" - "graph/label/label_maker.cc" - "graph/label/if_label_maker.cc" - "graph/label/case_label_maker.cc" - "graph/label/while_label_maker.cc" - "graph/label/partitioned_call_label_maker.cc" - "analyzer/analyzer.cc" - "graph/build/memory/memory_assigner.cc" - "graph/build/memory/graph_mem_assigner.cc" - "graph/build/memory/binary_block_mem_assigner.cc" - "graph/build/memory/block_mem_assigner.cc" - "graph/build/memory/hybrid_mem_assigner.cc" - "graph/build/memory/max_block_mem_assigner.cc" - "graph/build/memory/var_mem_assign_util.cc" - "graph/build/memory/buffer_pool_mem_assigner.cc" - "ge_opt_info/ge_opt_info.cc" + "init/gelib.cc" + "ir_build/attr_options/keep_dtype_option.cc" + "ir_build/attr_options/utils.cc" + "ir_build/attr_options/weight_compress_option.cc" + "ir_build/ge_ir_build.cc" + "ir_build/option_utils.cc" + "model/ge_model.cc" + "model/ge_root_model.cc" + "opskernel_manager/ops_kernel_builder_manager.cc" + "opskernel_manager/ops_kernel_manager.cc" ) set(RUNNER_SRC_LIST "client/ge_api.cc" "session/inner_session.cc" "session/session_manager.cc" + "common/profiling/ge_runner_profiling.cc" + "graph/manager/memory_api.cc" + "graph/manager/util/hcom_util.cc" + "graph/load/model_manager/task_info/hccl_task_info.cc" + "hybrid/node_executor/hccl/hccl_node_executor.cc" ) if (NOT ENABLE_D AND NOT ENABLE_ACL AND NOT ENABLE_MS_TESTCASES) message("CMAKE_CXX_COMPILER_VERSION = ${CMAKE_CXX_COMPILER_VERSION}") ############ libge_runner.so ############ add_library(ge_runner SHARED - ${TRAIN_SRC_LIST} + ${EXECUTOR_SRC_LIST} + ${COMPILER_SRC_LIST} + ${RUNNER_SRC_LIST} $,msprofiler_fwk,msprofiler_fwk_object>> ) @@ -777,6 +784,8 @@ target_include_directories(ge_runner SYSTEM PRIVATE ${GE_CODE_DIR}/../toolchain/ide/ide-daemon/external ${GE_CODE_DIR}/../abl/adump/external ${GE_CODE_DIR}/../abl/licctrl + ${GE_CODE_DIR}/../ace/comop/inc + ${GE_CODE_DIR}/../ace/comop/inc/external #### blue zone ${ASCEND_DIR}/driver/include ${ASCEND_DIR}/fwkacllib/include @@ -814,7 +823,8 @@ target_link_libraries(ge_runner PRIVATE ############ libge_compiler.so ############ add_library(ge_compiler SHARED - ${INFER_SRC_LIST} + "hybrid/hybrid_davinci_model_stub.cc" + ${COMPILER_SRC_LIST} ) add_dependencies(ge_compiler @@ -854,6 +864,8 @@ target_include_directories(ge_compiler SYSTEM PRIVATE ${GE_CODE_DIR}/../toolchain/ide/ide-daemon/external ${GE_CODE_DIR}/../abl/adump/external ${GE_CODE_DIR}/../abl/licctrl + ${GE_CODE_DIR}/../ace/comop/inc + ${GE_CODE_DIR}/../ace/comop/inc/external #### blue zone #### ${ASCEND_DIR}/driver/include ${ASCEND_DIR}/fwkacllib/include @@ -886,6 +898,138 @@ target_link_libraries(ge_compiler PRIVATE -ldl ) +######## libge_executor.a ######## +add_library(ge_executor STATIC + ${EXECUTOR_SRC_LIST} +) + +add_dependencies(ge_executor + graphengine_protos +) + +target_compile_options(ge_executor PRIVATE + $<$,$>:-fvisibility=hidden -O2 -Werror -Wno-deprecated-declarations -fno-common> + $<$,$>:/MTd> + $<$,$>:/MT> + $<$:-Werror=unused-variable> + $<$:-Werror=unused-const-variable -Werror=format> +) + +target_compile_definitions(ge_executor PRIVATE + PROTOBUF_INLINE_NOT_IN_HEADERS=0 + DAVINCI_SUPPORT_PROFILING + google=ascend_private + $,OS_TYPE=WIN,OS_TYPE=0> + $<$:SECUREC_USING_STD_SECURE_LIB=0 NOMINMAX> + $<$:ONLY_COMPILE_OPEN_SRC> + LOG_CPP +) + +target_include_directories(ge_executor SYSTEM PRIVATE + ${GE_CODE_DIR}/ge + ${GE_CODE_DIR}/inc + ${GE_CODE_DIR}/inc/external + ${GE_CODE_DIR}/inc/framework + ${METADEF_DIR}/inc + ${METADEF_DIR}/inc/external + ${CMAKE_BINARY_DIR} + ${CMAKE_BINARY_DIR}/proto/graphengine_protos + #### yellow zone #### + ${GE_CODE_DIR}/../ace/comop/inc + ${GE_CODE_DIR}/../ace/comop/inc/external + $<$>:${GE_DEPEND_DIR}/inc> + $<$>:$> + $<$>:$> + #### blue zone #### + $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc> + $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain> +) + +target_link_libraries(ge_executor PRIVATE + $ + $<$>:$> + $<$>:$> + $<$>:$> + json + ascend_protobuf_static + c_sec + $<$>:-lrt> + -ldl +) + +######## libge_executor.so ######## +add_library(ge_executor_shared SHARED + ${EXECUTOR_SRC_LIST} +) + +add_dependencies(ge_executor_shared + graphengine_protos +) + +target_compile_options(ge_executor_shared PRIVATE + -fno-common + -Werror + -O2 + -Wno-deprecated-declarations + -fvisibility=hidden +) + +target_compile_definitions(ge_executor_shared PRIVATE + PROTOBUF_INLINE_NOT_IN_HEADERS=0 + DAVINCI_SUPPORT_PROFILING + google=ascend_private + FUNC_VISIBILITY + $<$:ONLY_COMPILE_OPEN_SRC> +) + +target_include_directories(ge_executor_shared PRIVATE + ${GE_CODE_DIR}/ge + ${GE_CODE_DIR}/inc + ${GE_CODE_DIR}/inc/external + ${GE_CODE_DIR}/inc/framework + ${METADEF_DIR}/inc + ${METADEF_DIR}/inc/external + ${CMAKE_BINARY_DIR} + ${CMAKE_BINARY_DIR}/proto/graphengine_protos + #### yellow zone #### + ${GE_CODE_DIR}/../ace/comop/inc + ${GE_CODE_DIR}/../ace/comop/inc/external + $<$>:${GE_DEPEND_DIR}/inc> + #### blue zone #### + $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc> +) + +target_link_options(ge_executor_shared PRIVATE + -Wl,-Bsymbolic + -Wl,--exclude-libs,ALL +) + +target_link_libraries(ge_executor_shared PRIVATE + $ + $<$>:$> + $<$>:$> + $<$>:$> + $<$>:$> + $<$>:$> + -Wl,--no-as-needed + ge_common + runtime + slog + graph + register + error_manager + ascend_protobuf + c_sec + -Wl,--as-needed + json + $<$>:-lrt> + -ldl +) + +set_target_properties(ge_executor_shared PROPERTIES + OUTPUT_NAME ge_executor +) + ############ libascendcl.so ############ file(GENERATE OUTPUT ${CMAKE_BINARY_DIR}/dummy.c CONTENT "") #add_library(dummy_obj OBJECT ${CMAKE_BINARY_DIR}/dummy.c) @@ -1081,7 +1225,7 @@ add_custom_command( set(INSTALL_BASE_DIR "") set(INSTALL_LIBRARY_DIR lib) -install(TARGETS ge_runner ge_compiler opensrc_ascendcl OPTIONAL +install(TARGETS ge_runner ge_compiler ge_executor_shared opensrc_ascendcl OPTIONAL LIBRARY DESTINATION ${INSTALL_LIBRARY_DIR} ) diff --git a/ge/graph/manager/graph_manager.h b/ge/graph/manager/graph_manager.h index 6773787c..763654bd 100644 --- a/ge/graph/manager/graph_manager.h +++ b/ge/graph/manager/graph_manager.h @@ -32,7 +32,6 @@ #include "external/ge/ge_api_types.h" #include "graph/build/graph_builder.h" #include "graph/ge_local_context.h" -#include "graph/load/graph_loader.h" #include "graph/manager/graph_manager_utils.h" #include "graph/manager/util/variable_accelerate_ctrl.h" #include "graph/optimize/graph_optimize.h" diff --git a/tests/ut/ge/graph/load/model_manager_unittest.cc b/tests/ut/ge/graph/load/model_manager_unittest.cc index a0644510..166ae4af 100644 --- a/tests/ut/ge/graph/load/model_manager_unittest.cc +++ b/tests/ut/ge/graph/load/model_manager_unittest.cc @@ -54,31 +54,13 @@ class UtestModelManagerModelManager : public testing::Test { } void SetUp() {} - void TearDown() {} - void CreateGraph(Graph &graph) { - TensorDesc desc(ge::Shape({1, 3, 224, 224})); - uint32_t size = desc.GetShape().GetShapeSize(); - desc.SetSize(size); - auto data = op::Data("Data").set_attr_index(0); - data.update_input_desc_data(desc); - data.update_output_desc_out(desc); - - auto flatten = op::Flatten("Flatten").set_input_x(data, data.name_out_out()); - - std::vector inputs{data}; - std::vector outputs{flatten}; - std::vector targets{flatten}; - // Graph graph("test_graph"); - graph.SetInputs(inputs).SetOutputs(outputs).SetTargets(targets); - } - void GenUnencryptModelData(ModelData &data) { const int model_len = 10; data.model_len = sizeof(ModelFileHeader) + model_len; data.model_data = new uint8_t[data.model_len]; - memset((uint8_t *)data.model_data + sizeof(ModelFileHeader), 0, model_len); + memset(data.model_data, 0, data.model_len); ModelFileHeader *header = (ModelFileHeader *)data.model_data; header->magic = MODEL_FILE_MAGIC_NUM; @@ -88,19 +70,6 @@ class UtestModelManagerModelManager : public testing::Test { header->is_checksum = ModelCheckType::CHECK; } - void GenEncryptModelData(ModelData &data) { - const int model_len = 10; - data.key = ENC_KEY; - data.model_data = new uint8_t[data.model_len]; - uint8_t data_ori[model_len]; - memset(data_ori, 0, model_len); - ModelFileHeader *header = (ModelFileHeader *)data.model_data; - header->magic = MODEL_FILE_MAGIC_NUM; - header->version = MODEL_VERSION; - header->is_encrypt = ModelEncryptType::ENCRYPTED; - header->length = 10; // encrypt_len; - } - void LoadStandardModelData(ModelData &data) { data.model_len = 512; data.model_data = new uint8_t[data.model_len]; From eb0d262cb64d8acebe453714aa895d7714fb432e Mon Sep 17 00:00:00 2001 From: wuweikang Date: Fri, 9 Jul 2021 17:30:33 +0800 Subject: [PATCH 176/226] update submodule --- metadef | 2 +- parser | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/metadef b/metadef index 84e7ab39..3ace5b6f 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit 84e7ab39b0daf7ca2b2f5549e3279647da7875e2 +Subproject commit 3ace5b6f10e0af784a1c3211fd769d6e8860e864 diff --git a/parser b/parser index ffd94df4..db68a1a4 160000 --- a/parser +++ b/parser @@ -1 +1 @@ -Subproject commit ffd94df471f7dd2b1928cc8d27e43e7210aaa7e7 +Subproject commit db68a1a4f1a6ae69dbf9a5f338392d50ea3874e3 From fdab42b28ddb2f2a54b681205c7175dd57850844 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E7=A3=8A?= Date: Fri, 9 Jul 2021 14:50:30 +0800 Subject: [PATCH 177/226] fixex coverity warning --- ge/graph/load/model_manager/davinci_model.cc | 32 +++++++++---------- ge/hybrid/executor/worker/execution_engine.cc | 2 +- ge/hybrid/model/hybrid_model_builder.cc | 2 ++ .../node_executor/hccl/hccl_node_executor.cc | 3 +- ge/ir_build/ge_ir_build.cc | 1 + 5 files changed, 22 insertions(+), 18 deletions(-) diff --git a/ge/graph/load/model_manager/davinci_model.cc b/ge/graph/load/model_manager/davinci_model.cc index 9d86039a..ddd6c0c4 100755 --- a/ge/graph/load/model_manager/davinci_model.cc +++ b/ge/graph/load/model_manager/davinci_model.cc @@ -387,8 +387,8 @@ Status DavinciModel::InitWeightMem(void *dev_ptr, void *weight_ptr, size_t weigh Status DavinciModel::InitFeatureMapAndP2PMem(void *dev_ptr, size_t mem_size) { if (is_feature_map_mem_has_inited_) { - REPORT_INNER_ERROR("E19999", "Call InitFeatureMapMem more than once, model_id:%u, check invalid", model_id_); - GELOGE(PARAM_INVALID, "[Check][Param] call InitFeatureMapMem more than once, model_id:%u", model_id_); + REPORT_INNER_ERROR("E19999", "InitFeatureMapMem is called more than once, model_id:%u, check invalid", model_id_); + GELOGE(PARAM_INVALID, "[Check][Param] InitFeatureMapMem is called more than once, model_id:%u", model_id_); return PARAM_INVALID; } is_feature_map_mem_has_inited_ = true; @@ -456,8 +456,7 @@ Status DavinciModel::InitVariableMem() { void DavinciModel::InitRuntimeParams() { int64_t value = 0; - bool ret; - ret = ge::AttrUtils::GetInt(ge_model_, ATTR_MODEL_MEMORY_SIZE, value); + bool ret = ge::AttrUtils::GetInt(ge_model_, ATTR_MODEL_MEMORY_SIZE, value); runtime_param_.mem_size = ret ? (uint64_t)value : 0; ret = ge::AttrUtils::GetInt(ge_model_, ATTR_MODEL_WEIGHT_SIZE, value); runtime_param_.weight_size = ret ? (uint64_t)value : 0; @@ -983,7 +982,7 @@ Status DavinciModel::InitDataOp(const ComputeGraphPtr &graph, const NodePtr &nod // op_desc Checked by Init: Data, valid. auto op_desc = node->GetOpDesc(); if (node->GetOwnerComputeGraph() != graph) { - GELOGI("Skip subgraph Data node: %s.", op_desc->GetName().c_str()); + GELOGI("Skip Data node: %s in subgraph.", op_desc->GetName().c_str()); return SUCCESS; } @@ -1195,7 +1194,7 @@ Status DavinciModel::InitRealSizeAndShapeInfo(const ComputeGraphPtr &compute_gra GELOGD("No need to get size and shape of netoutput in subgraph."); return SUCCESS; } - GELOGD("Start init real size and shape info of %s.", node->GetName().c_str()); + GELOGD("Start to initialize real size and shape info of %s.", node->GetName().c_str()); GetAllGearsInfo(node); if (is_getnext_sink_dynamic_) { GE_IF_BOOL_EXEC(GetGetDynamicDimsNodeInfo(node) != SUCCESS, @@ -1238,7 +1237,7 @@ void DavinciModel::GetAllGearsInfo(const NodePtr &node) { } if (!gear_info.empty()) { all_gears_info_.emplace_back(gear_info); - GELOGD("Init all gears info from %s, gaer info is %s", node->GetName().c_str(), + GELOGD("Init all gears info from %s, gear info is %s", node->GetName().c_str(), formats::JoinToString(gear_info).c_str()); } } @@ -1318,7 +1317,7 @@ Status DavinciModel::GetGearAndRealOutSizeInfo(const ComputeGraphPtr &graph, con Status DavinciModel::GetRealOutputSizeOfCase(const ComputeGraphPtr &graph, size_t input_index, const NodePtr &case_node) { - GELOGD("Start get output size of %s, which is %zu input to netoutput", case_node->GetName().c_str(), input_index); + GELOGD("Start to get output size of %s, which is %zu input to netoutput", case_node->GetName().c_str(), input_index); const auto &func_desc = case_node->GetOpDesc(); GE_CHECK_NOTNULL(func_desc); std::map, int64_t> gear_and_real_out_size_info; @@ -2227,10 +2226,10 @@ void DavinciModel::CreateOutput(uint32_t index, const OpDescPtr &op_desc, InputO dims[i] = shape.GetDim(i); } } else { // FOR FORMAT_NHWC or FORMAT_NCHW - dims[0] = shape.GetDim(format == FORMAT_NHWC ? NHWC_DIM_N : NCHW_DIM_N); // 0: first dim - dims[1] = shape.GetDim(format == FORMAT_NHWC ? NHWC_DIM_C : NCHW_DIM_C); // 1: second dim - dims[2] = shape.GetDim(format == FORMAT_NHWC ? NHWC_DIM_H : NCHW_DIM_H); // 2: third dim - dims[3] = shape.GetDim(format == FORMAT_NHWC ? NHWC_DIM_W : NCHW_DIM_W); // 3: forth dim + dims[0] = shape.GetDim((format == FORMAT_NHWC) ? NHWC_DIM_N : NCHW_DIM_N); // 0: first dim + dims[1] = shape.GetDim((format == FORMAT_NHWC) ? NHWC_DIM_C : NCHW_DIM_C); // 1: second dim + dims[2] = shape.GetDim((format == FORMAT_NHWC) ? NHWC_DIM_H : NCHW_DIM_H); // 2: third dim + dims[3] = shape.GetDim((format == FORMAT_NHWC) ? NHWC_DIM_W : NCHW_DIM_W); // 3: forth dim } output.shape_info.num = dims[0]; // 0: first dim output.shape_info.channel = dims[1]; // 1: second dim @@ -2741,7 +2740,7 @@ Status DavinciModel::ReturnResult(uint32_t data_id, const bool rslt_flg, const b } if (!has_output_node_) { - GELOGW("Output tensor list is empty, model id: %u", model_id_); + GELOGW("The tensor list of output is empty, model id: %u", model_id_); GE_CHK_STATUS(listener_->OnComputeDone(model_id_, data_id, INTERNAL_ERROR, outputs), "[Call][OnComputeDone] failed, model_id:%u, data_id:%u.", model_id_, data_id); return INTERNAL_ERROR; @@ -3071,7 +3070,7 @@ Status DavinciModel::CreateKnownZeroCopyMap(const vector &inputs, const GELOGI("output %zu, v addr %p, r addr %p, p addr %p", i, addr_list[i], addr, outputs[i]); } - GELOGI("success, known input data info size: %zu, known output data info size: %zu", + GELOGI("create map for zero copy success, known input data info size: %zu, known output data info size: %zu", known_input_data_info_.size(), known_output_data_info_.size()); return SUCCESS; } @@ -3106,12 +3105,12 @@ Status DavinciModel::UpdateKnownZeroCopyAddr(vector &total_io_addrs, boo total_io_addrs[i] = known_output_data_info_.at(total_io_addrs[i]); } } - GELOGI("success, total io addrs size: %zu", total_io_addrs.size()); + GELOGI("update known zero copy addr success, total io addrs size: %zu", total_io_addrs.size()); return SUCCESS; } Status DavinciModel::UpdateKnownNodeArgs(const vector &inputs, const vector &outputs) { - GELOGI("DavinciModel::UpdateKnownNodeArgs in"); + GELOGI("DavinciModel::UpdateKnownNodeArgs begin"); GE_CHK_STATUS_RET(CreateKnownZeroCopyMap(inputs, outputs), "[Call][CreateKnownZeroCopyMap] failed, model_id:%u.", model_id_); total_io_addrs_.clear(); @@ -3683,6 +3682,7 @@ Status DavinciModel::InitConstant(const OpDescPtr &op_desc) { elem_num = 1; } uint64_t *buff = reinterpret_cast(tensor->MutableData().data()); + GE_CHECK_NOTNULL(buff); if (ge::CheckInt64Uint32MulOverflow(elem_num, kBytes * kStringHeadElems) != SUCCESS) { GELOGE(FAILED, "[Call][CheckInt64Uint32MulOverflow] Shape size:%ld is invalid", elem_num); return FAILED; diff --git a/ge/hybrid/executor/worker/execution_engine.cc b/ge/hybrid/executor/worker/execution_engine.cc index ca864244..4bd02193 100755 --- a/ge/hybrid/executor/worker/execution_engine.cc +++ b/ge/hybrid/executor/worker/execution_engine.cc @@ -428,7 +428,7 @@ Status ExecutionEngine::ValidateInputTensors(const NodeState &node_state, const continue; } - int64_t expected_size; + int64_t expected_size = 0; (void)TensorUtils::GetSize(*tensor_desc, expected_size); GELOGD("[%s] Input[%d] expects [%ld] bytes.", task_context.GetNodeName(), i, expected_size); auto size_diff = expected_size - static_cast(input_tensor->GetSize()); diff --git a/ge/hybrid/model/hybrid_model_builder.cc b/ge/hybrid/model/hybrid_model_builder.cc index 554ddbbb..8f015420 100755 --- a/ge/hybrid/model/hybrid_model_builder.cc +++ b/ge/hybrid/model/hybrid_model_builder.cc @@ -900,6 +900,7 @@ Status HybridModelBuilder::LoadGraph() { GE_CHECK_NOTNULL(node_item); AscendString graph_name; GE_CHK_GRAPH_STATUS_RET(it.second->GetGraph().GetName(graph_name), "Failed to get subgraph name"); + GE_CHECK_NOTNULL(graph_name.GetString()); auto subgraph = hybrid_model_.GetRootGraph()->GetSubgraph(graph_name.GetString()); GE_CHECK_NOTNULL(subgraph); GE_CHK_STATUS_RET(IdentifyVariableOutputs(*node_item, subgraph), @@ -967,6 +968,7 @@ Status HybridModelBuilder::HandleDtString(const GeTensor &tensor, void *var_addr auto &mutable_tensor = const_cast(tensor); uint64_t *buff = reinterpret_cast(mutable_tensor.MutableData().data()); + GE_CHECK_NOTNULL(buff); GE_CHK_BOOL_RET_STATUS(ge::CheckInt64Uint32MulOverflow(elem_num, kBytes * kStringHeadElems) == SUCCESS, FAILED, "[Invoke][CheckInt64Uint32MulOverflow] failed because Shape size is invalid."); auto offset = static_cast(elem_num * kBytes * kStringHeadElems); diff --git a/ge/hybrid/node_executor/hccl/hccl_node_executor.cc b/ge/hybrid/node_executor/hccl/hccl_node_executor.cc index 3f887819..31f2c7a1 100644 --- a/ge/hybrid/node_executor/hccl/hccl_node_executor.cc +++ b/ge/hybrid/node_executor/hccl/hccl_node_executor.cc @@ -417,7 +417,7 @@ Status BuildGatherAllToAllParams(TaskContext &context, HcomGatherAllToAllVParams } params.recvtype = iter->second; - int64_t addr_len; + int64_t addr_len = 0; (void) ge::AttrUtils::GetInt(op_desc, "addr_length", addr_len); params.addrLength = static_cast(addr_len); @@ -460,6 +460,7 @@ Status AllToAllNodeTask::ExecuteAsync(TaskContext &context, std::function &inputs, const vector &outputs, Graph &graph) { ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); + GE_CHECK_NOTNULL(op_type.GetString()); auto op_type_str = std::string(op_type.GetString()); auto op_name = op_type_str + "_" + std::to_string(ge::GetCurrentTimestamp()); auto op_desc = ge::MakeShared(op_name, op_type_str); From f7c45d81140f1bcdceb011219454ce724cbae23e Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Fri, 9 Jul 2021 19:28:05 +0800 Subject: [PATCH 178/226] mmSetCurrentThreadName --- ge/graph/execute/model_executor.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ge/graph/execute/model_executor.cc b/ge/graph/execute/model_executor.cc index bcbc08e6..2fc7b0af 100644 --- a/ge/graph/execute/model_executor.cc +++ b/ge/graph/execute/model_executor.cc @@ -193,7 +193,7 @@ Status ModelExecutor::PushGraph(const RunArgs &args) { void ModelExecutor::RunThread() { ErrorManager::GetInstance().SetStage(error_message::kModelExecute, error_message::kModelExecute); - if (prctl(PR_SET_NAME, ("GE_Run")) != 0) { + if (mmSetCurrentThreadName("GE_Run") != EN_OK) { GELOGW("Set thread name failed."); } From 8aa46479fa227ab9e4b4044a35e26bd6edd0d64f Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Fri, 9 Jul 2021 19:44:18 +0800 Subject: [PATCH 179/226] mmSetCurrentThreadName stub --- tests/depends/mmpa/src/mmpa_stub.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/depends/mmpa/src/mmpa_stub.cc b/tests/depends/mmpa/src/mmpa_stub.cc index b0f1fb87..8801aacd 100644 --- a/tests/depends/mmpa/src/mmpa_stub.cc +++ b/tests/depends/mmpa/src/mmpa_stub.cc @@ -381,3 +381,8 @@ INT32 mmGetPid() { return (INT32)getpid(); } + +INT32 mmSetCurrentThreadName(const CHAR *name) +{ + return EN_OK; +} \ No newline at end of file From bd76e4d1dd79789d5d4522e867a82637997c3237 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Sat, 10 Jul 2021 09:21:48 +0800 Subject: [PATCH 180/226] Get thre_pool from stream resource in single_op executor. --- ge/hybrid/executor/hybrid_model_executor.cc | 5 +++-- ge/hybrid/executor/hybrid_model_executor.h | 2 +- ge/hybrid/executor/subgraph_executor.cc | 20 +++++++++++++++---- ge/hybrid/executor/subgraph_executor.h | 6 ++++-- ge/single_op/single_op_model.cc | 4 +++- ge/single_op/stream_resource.cc | 11 ++++++++++ ge/single_op/stream_resource.h | 3 +++ metadef | 2 +- parser | 2 +- .../ge/single_op/stream_resource_unittest.cc | 3 +++ 10 files changed, 46 insertions(+), 12 deletions(-) diff --git a/ge/hybrid/executor/hybrid_model_executor.cc b/ge/hybrid/executor/hybrid_model_executor.cc index 2bb683c7..dd8aace6 100755 --- a/ge/hybrid/executor/hybrid_model_executor.cc +++ b/ge/hybrid/executor/hybrid_model_executor.cc @@ -35,10 +35,11 @@ HybridModelExecutor::HybridModelExecutor(HybridModel *model, uint32_t device_id, HybridModelExecutor::~HybridModelExecutor() { } -Status HybridModelExecutor::Init() { +Status HybridModelExecutor::Init(ThreadPool *thread_pool) { GELOGD("Start to init HybridGraphEngine."); GE_CHK_STATUS_RET_NOLOG(InitExecutionContext()); - root_graph_executor_.reset(new (std::nothrow) SubgraphExecutor(model_->GetRootGraphItem(), &context_)); + root_graph_executor_.reset( + new (std::nothrow) SubgraphExecutor(model_->GetRootGraphItem(), &context_, false, thread_pool)); GE_CHECK_NOTNULL(root_graph_executor_); GELOGD("HybridGraphEngine initialized successfully."); return SUCCESS; diff --git a/ge/hybrid/executor/hybrid_model_executor.h b/ge/hybrid/executor/hybrid_model_executor.h index 102e4f8b..dbec7adf 100644 --- a/ge/hybrid/executor/hybrid_model_executor.h +++ b/ge/hybrid/executor/hybrid_model_executor.h @@ -39,7 +39,7 @@ class HybridModelExecutor { ~HybridModelExecutor(); - Status Init(); + Status Init(ThreadPool *thread_pool = nullptr); const GraphExecutionContext* GetContext() const { return &context_; diff --git a/ge/hybrid/executor/subgraph_executor.cc b/ge/hybrid/executor/subgraph_executor.cc index 33a2846c..7fcdec5d 100644 --- a/ge/hybrid/executor/subgraph_executor.cc +++ b/ge/hybrid/executor/subgraph_executor.cc @@ -28,20 +28,30 @@ constexpr int kDefaultQueueSize = 16; constexpr int kDataInputIndex = 0; } -SubgraphExecutor::SubgraphExecutor(const GraphItem *graph_item, GraphExecutionContext *context, bool force_infer_shape) +SubgraphExecutor::SubgraphExecutor(const GraphItem *graph_item, GraphExecutionContext *context, bool force_infer_shape, + ThreadPool *pre_run_pool) : graph_item_(graph_item), context_(context), force_infer_shape_(force_infer_shape), - pre_run_pool_(kDefaultThreadNum), + pre_run_pool_(pre_run_pool), + own_thread_pool_(false), ready_queue_(kDefaultQueueSize) { } SubgraphExecutor::~SubgraphExecutor() { + if (own_thread_pool_ && pre_run_pool_ != nullptr) { + delete pre_run_pool_; + } GELOGD("[%s] SubgraphExecutor destroyed.", graph_item_->GetName().c_str()); } Status SubgraphExecutor::Init(const std::vector &inputs, const std::vector &input_desc) { + if (pre_run_pool_ == nullptr) { + pre_run_pool_ = new (std::nothrow) ThreadPool(kDefaultThreadNum); + GE_CHECK_NOTNULL(pre_run_pool_); + own_thread_pool_ = true; + } subgraph_context_.reset(new(std::nothrow)SubgraphContext(graph_item_, context_)); GE_CHECK_NOTNULL(subgraph_context_); GE_CHK_STATUS_RET(subgraph_context_->Init(), @@ -254,7 +264,8 @@ Status SubgraphExecutor::PrepareNode(const NodeItem &node_item, int group) { // only do shape inference and compilation for nodes with dynamic shapes. if (node_item.is_dynamic) { - auto prepare_future = pre_run_pool_.commit([this, p_node_state]() -> Status { + GE_CHECK_NOTNULL(pre_run_pool_); + auto prepare_future = pre_run_pool_->commit([this, p_node_state]() -> Status { GetContext().SetSessionId(context_->session_id); GetContext().SetContextId(context_->context_id); GE_CHK_STATUS_RET_NOLOG(InferShape(shape_inference_engine_.get(), *p_node_state)); @@ -349,7 +360,8 @@ Status SubgraphExecutor::NodeScheduled(NodeState *node_state) { node_state->GetNodeItem()->data_send_.size(), node_state->GetNodeItem()->ctrl_send_.size(), node_state->GetSwitchIndex(), node_state->GetMergeIndex()); - auto future = pre_run_pool_.commit([this, node_state]() -> Status { + GE_CHECK_NOTNULL(pre_run_pool_); + auto future = pre_run_pool_->commit([this, node_state]() -> Status { RECORD_CALLBACK_EVENT(context_, node_state->GetName().c_str(), "[NodeScheduled] Start"); std::function callback = [&](const NodeItem *node_item) { const auto &node_name = node_item->node_name; diff --git a/ge/hybrid/executor/subgraph_executor.h b/ge/hybrid/executor/subgraph_executor.h index 76732c37..be11ff59 100644 --- a/ge/hybrid/executor/subgraph_executor.h +++ b/ge/hybrid/executor/subgraph_executor.h @@ -33,7 +33,8 @@ namespace hybrid { // Executor for executing a subgraph class SubgraphExecutor { public: - SubgraphExecutor(const GraphItem *graph_item, GraphExecutionContext *context, bool force_infer_shape = false); + SubgraphExecutor(const GraphItem *graph_item, GraphExecutionContext *context, bool force_infer_shape = false, + ThreadPool *pre_run_pool = nullptr); ~SubgraphExecutor(); Status InitForPartialExecution(const std::vector &inputs, @@ -124,7 +125,8 @@ class SubgraphExecutor { GraphExecutionContext *context_; std::unique_ptr subgraph_context_; bool force_infer_shape_; - ThreadPool pre_run_pool_; + ThreadPool *pre_run_pool_; + bool own_thread_pool_; BlockingQueue ready_queue_; std::unique_ptr shape_inference_engine_; diff --git a/ge/single_op/single_op_model.cc b/ge/single_op/single_op_model.cc index 426d3233..ca07d2ae 100755 --- a/ge/single_op/single_op_model.cc +++ b/ge/single_op/single_op_model.cc @@ -713,7 +713,9 @@ Status SingleOpModel::BuildDynamicOp(StreamResource &resource, DynamicSingleOp & device_id, resource.GetStream())); GE_CHECK_NOTNULL(single_op.hybrid_model_executor_); - GE_CHK_STATUS_RET(single_op.hybrid_model_executor_->Init(), "[Init][HybridModelExecutor]Failed."); + ThreadPool *thread_pool = nullptr; + GE_CHK_STATUS_RET_NOLOG(resource.GetThreadPool(&thread_pool)); + GE_CHK_STATUS_RET(single_op.hybrid_model_executor_->Init(thread_pool), "[Init][HybridModelExecutor]Failed."); return SUCCESS; } return BuildTaskListForDynamicOp(&resource, single_op); diff --git a/ge/single_op/stream_resource.cc b/ge/single_op/stream_resource.cc index 9fe8f26a..10a8f72b 100755 --- a/ge/single_op/stream_resource.cc +++ b/ge/single_op/stream_resource.cc @@ -25,6 +25,7 @@ namespace ge { namespace { // limit available device mem size 1M const uint32_t kFuzzDeviceBufferSize = 1 * 1024 * 1024; +constexpr int kDefaultThreadNum = 4; } StreamResource::StreamResource(uintptr_t resource_id) : resource_id_(resource_id) { @@ -219,6 +220,16 @@ Status StreamResource::BuildOperator(const ModelData &model_data, SingleOp **sin return SUCCESS; } +Status StreamResource::GetThreadPool(ThreadPool **thread_pool) { + GE_CHECK_NOTNULL(thread_pool); + if (thread_pool_ == nullptr) { + thread_pool_.reset(new (std::nothrow) ThreadPool(kDefaultThreadNum)); + GE_CHECK_NOTNULL(thread_pool_); + } + *thread_pool = thread_pool_.get(); + return SUCCESS; +} + const uint8_t *StreamResource::GetMemoryBase() const { if (memory_list_.empty()) { return nullptr; diff --git a/ge/single_op/stream_resource.h b/ge/single_op/stream_resource.h index 8986634b..f1e1bebb 100755 --- a/ge/single_op/stream_resource.h +++ b/ge/single_op/stream_resource.h @@ -54,6 +54,8 @@ class StreamResource { return device_buffer_; } + Status GetThreadPool(ThreadPool **thread_pool); + private: uint8_t *DoMallocMemory(const std::string &purpose, size_t size, @@ -66,6 +68,7 @@ class StreamResource { std::vector weight_list_; std::unordered_map> op_map_; std::unordered_map> dynamic_op_map_; + std::unique_ptr thread_pool_; rtStream_t stream_ = nullptr; std::mutex mu_; std::mutex stream_mu_; diff --git a/metadef b/metadef index 3ace5b6f..84e7ab39 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit 3ace5b6f10e0af784a1c3211fd769d6e8860e864 +Subproject commit 84e7ab39b0daf7ca2b2f5549e3279647da7875e2 diff --git a/parser b/parser index db68a1a4..ffd94df4 160000 --- a/parser +++ b/parser @@ -1 +1 @@ -Subproject commit db68a1a4f1a6ae69dbf9a5f338392d50ea3874e3 +Subproject commit ffd94df471f7dd2b1928cc8d27e43e7210aaa7e7 diff --git a/tests/ut/ge/single_op/stream_resource_unittest.cc b/tests/ut/ge/single_op/stream_resource_unittest.cc index e07fc39d..e4ab469e 100644 --- a/tests/ut/ge/single_op/stream_resource_unittest.cc +++ b/tests/ut/ge/single_op/stream_resource_unittest.cc @@ -66,6 +66,9 @@ TEST_F(UtestStreamResource, test_build_op) { res.op_map_[0].reset(single_op); res.dynamic_op_map_[1].reset(dynamic_single_op); + ThreadPool *thread_pool = nullptr; + EXPECT_EQ(res.GetThreadPool(&thread_pool), SUCCESS); + EXPECT_EQ(res.GetOperator(0), nullptr); EXPECT_EQ(res.GetDynamicOperator(1), nullptr); EXPECT_EQ(res.BuildOperator(model_data, &single_op, 0), SUCCESS); From f96e62ad9204bc789de8416bae7b34457511f9ac Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Sat, 10 Jul 2021 09:23:40 +0800 Subject: [PATCH 181/226] Update submodule --- metadef | 2 +- parser | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/metadef b/metadef index 84e7ab39..f9a47a45 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit 84e7ab39b0daf7ca2b2f5549e3279647da7875e2 +Subproject commit f9a47a45cdd7e6dc507a15291fcb769f96b859b3 diff --git a/parser b/parser index ffd94df4..b42a99ea 160000 --- a/parser +++ b/parser @@ -1 +1 @@ -Subproject commit ffd94df471f7dd2b1928cc8d27e43e7210aaa7e7 +Subproject commit b42a99ea6e1be75156650675fd0aeabca6cb3de9 From ca3b811ba1142c69ac31865e2d4df779d5dcf576 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Sat, 10 Jul 2021 15:39:27 +0800 Subject: [PATCH 182/226] Fix root node for MergeInputNodes --- ge/hybrid/model/hybrid_model_builder.cc | 5 +- ge/opskernel_manager/ops_kernel_manager.cc | 2 +- tests/ut/ge/hybrid/ge_hybrid_unittest.cc | 139 ++++++++++++++------- 3 files changed, 98 insertions(+), 48 deletions(-) diff --git a/ge/hybrid/model/hybrid_model_builder.cc b/ge/hybrid/model/hybrid_model_builder.cc index f8ec6db1..c722d269 100755 --- a/ge/hybrid/model/hybrid_model_builder.cc +++ b/ge/hybrid/model/hybrid_model_builder.cc @@ -588,7 +588,10 @@ Status HybridModelBuilder::MergeInputNodes(ComputeGraph &graph) { for (auto &peer_in_data_anchor : out_data_anchor->GetPeerInDataAnchors()) { auto dst_node = peer_in_data_anchor->GetOwnerNode(); GE_CHECK_NOTNULL(dst_node); - root_nodes.emplace(dst_node); + const auto in_nodes = dst_node->GetInDataNodes(); + if (std::all_of(in_nodes.begin(), in_nodes.end(), [](const NodePtr &n) { return n->GetType() == DATA; })) { + root_nodes.emplace(dst_node); + } GE_CHK_STATUS_RET_NOLOG(DoUnlinkDataAnchors(out_data_anchor, peer_in_data_anchor)); GE_CHK_STATUS_RET_NOLOG(DoLinkDataAnchors(src_out_anchor, peer_in_data_anchor)); } diff --git a/ge/opskernel_manager/ops_kernel_manager.cc b/ge/opskernel_manager/ops_kernel_manager.cc index fc7bbdc2..d35ebda5 100644 --- a/ge/opskernel_manager/ops_kernel_manager.cc +++ b/ge/opskernel_manager/ops_kernel_manager.cc @@ -279,7 +279,7 @@ void OpsKernelManager::InitOpsKernelInfo() { if (it.second.empty()) { continue; } - auto comp_func = [this, &instance_ptr](const OpInfo &op_a, const OpInfo &op_b) -> bool { + auto comp_func = [&instance_ptr](const OpInfo &op_a, const OpInfo &op_b) -> bool { const string &a = op_a.engine; const string &b = op_b.engine; // check if a or b is registered diff --git a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc index 1d1c4fa9..b09211cb 100644 --- a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc +++ b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc @@ -43,14 +43,11 @@ #include "graph/testcase/ge_graph/graph_builder_utils.h" #include "single_op/task/build_task_utils.h" #include "graph/op_desc_impl.h" -#undef private -#undef protected using namespace std; -using namespace testing; -using namespace ge; -using namespace hybrid; +namespace ge { +using namespace hybrid; class UtestGeHybrid : public testing::Test { protected: @@ -61,16 +58,30 @@ class UtestGeHybrid : public testing::Test { } }; -static ge::OpDescPtr CreateOpDesc(string name = "", string type = "") { +static ge::OpDescPtr CreateOpDesc(string name = "", string type = "", int in_num = 0, int out_num = 0) { auto op_desc = std::make_shared(name, type); op_desc->SetStreamId(0); - op_desc->SetId(0); + static int32_t index = 0; + op_desc->SetId(index++); + + GeTensorDesc tensor(GeShape(), FORMAT_ND, DT_INT64); + TensorUtils::SetSize(tensor, 64); + vector input_offset; + for (int i = 0; i < in_num; ++i) { + op_desc->AddInputDesc(tensor); + input_offset.emplace_back(index * 64 + i * 64); + } + op_desc->SetInputOffset(input_offset); + + vector output_offset; + for (int i = 0; i < out_num; ++i) { + op_desc->AddOutputDesc(tensor); + output_offset.emplace_back(index * 64 + in_num * 64 + i * 64); + } + op_desc->SetOutputOffset(output_offset); op_desc->SetWorkspace({}); - ; op_desc->SetWorkspaceBytes({}); - op_desc->SetInputOffset({}); - op_desc->SetOutputOffset({}); ge::AttrUtils::SetStr(op_desc, ge::TVM_ATTR_NAME_MAGIC, "RT_DEV_BINARY_MAGIC_ELF_AIVEC"); bool support_dynamic = true; @@ -414,49 +425,84 @@ TEST_F(UtestGeHybrid, test_parse_parallel_group) { } TEST_F(UtestGeHybrid, unfold_subgraphs_success) { - ComputeGraphPtr merged_graph = nullptr; + ComputeGraphPtr root_graph = std::make_shared("root_graph"); + auto partitioned_call_op_desc = CreateOpDesc("partitioned_call", PARTITIONEDCALL, 3, 1); + auto partitioned_call_node = root_graph->AddNode(partitioned_call_op_desc); + partitioned_call_op_desc->AddSubgraphName("f"); + partitioned_call_op_desc->SetSubgraphInstanceName(0, "sub_graph"); ComputeGraphPtr sub_sub_graph1 = std::make_shared("while_cond"); - OpDescPtr sub_sub_graph_while_cond_data_op_desc = CreateOpDesc("cond_data", DATA); - NodePtr sub_sub_graph_while_cond_data_node = sub_sub_graph1->AddNode(sub_sub_graph_while_cond_data_op_desc); + { + OpDescPtr sub_sub_graph_while_cond_data_op_desc = CreateOpDesc("cond_data", DATA); + NodePtr sub_sub_graph_while_cond_data_node = sub_sub_graph1->AddNode(sub_sub_graph_while_cond_data_op_desc); + sub_sub_graph1->SetParentGraph(root_graph); + root_graph->AddSubGraph(sub_sub_graph1); + } ComputeGraphPtr sub_sub_graph2 = std::make_shared("while_body"); - /*OpDescPtr sub_sub_graph_while_body_const_op_desc = CreateOpDesc("body_const", CONSTANT); - NodePtr sub_sub_graph_while_body_const_node = sub_sub_graph2->AddNode(sub_sub_graph_while_body_const_op_desc);*/ - OpDescPtr sub_sub_graph_while_body_data_op_desc = CreateOpDesc("body_data", DATA); - NodePtr sub_sub_graph_while_body_data_node = sub_sub_graph2->AddNode(sub_sub_graph_while_body_data_op_desc); - sub_sub_graph2->SetGraphUnknownFlag(true); - /*OpDescPtr sub_sub_graph_while_body_add_op_desc = CreateOpDesc("body_add", ADD); - NodePtr sub_sub_graph_while_body_add_node = sub_sub_graph2->AddNode(sub_sub_graph_while_body_add_node); - sub_sub_graph_while_body_add_node->AddLinkFrom(sub_sub_graph_while_body_data_node); - sub_sub_graph_while_body_add_node->AddLinkFrom(sub_sub_graph_while_body_const_node);*/ + { + OpDescPtr sub_sub_graph_while_body_data_op_desc = CreateOpDesc("body_data", DATA); + NodePtr sub_sub_graph_while_body_data_node = sub_sub_graph2->AddNode(sub_sub_graph_while_body_data_op_desc); + sub_sub_graph2->SetGraphUnknownFlag(true); + sub_sub_graph2->SetParentGraph(root_graph); + root_graph->AddSubGraph(sub_sub_graph2); + } + // Will unfold to merged_graph. ComputeGraphPtr sub_graph = std::make_shared("sub_graph"); - OpDescPtr sub_graph_while_op_desc = CreateOpDesc("while", WHILE); - NodePtr sub_graph_while_node = sub_graph->AddNode(sub_graph_while_op_desc); - sub_graph->SetGraphUnknownFlag(true); - sub_graph_while_node->GetOpDesc()->AddSubgraphName("while_cond"); - sub_graph_while_node->GetOpDesc()->AddSubgraphName("while_body"); - sub_graph_while_node->GetOpDesc()->SetSubgraphInstanceName(0, "while_cond"); - sub_graph_while_node->GetOpDesc()->SetSubgraphInstanceName(1, "while_body"); + { + OpDescPtr sub_graph_data1_op_desc = CreateOpDesc("data1", DATA, 1, 1); + OpDescPtr sub_graph_data2_op_desc = CreateOpDesc("data2", DATA, 1, 1); + OpDescPtr sub_graph_data3_op_desc = CreateOpDesc("data3", DATA, 1, 1); + NodePtr sub_graph_data1_node = sub_graph->AddNode(sub_graph_data1_op_desc); + NodePtr sub_graph_data2_node = sub_graph->AddNode(sub_graph_data2_op_desc); + NodePtr sub_graph_data3_node = sub_graph->AddNode(sub_graph_data3_op_desc); + + AttrUtils::SetInt(sub_graph_data1_op_desc, ATTR_NAME_PARENT_NODE_INDEX, 0); + AttrUtils::SetInt(sub_graph_data2_op_desc, ATTR_NAME_PARENT_NODE_INDEX, 1); + AttrUtils::SetInt(sub_graph_data3_op_desc, ATTR_NAME_PARENT_NODE_INDEX, 2); + + OpDescPtr sub_graph_while_op_desc = CreateOpDesc("while", WHILE, 2, 2); + NodePtr sub_graph_while_node = sub_graph->AddNode(sub_graph_while_op_desc); + sub_sub_graph1->SetParentNode(sub_graph_while_node); + sub_sub_graph2->SetParentNode(sub_graph_while_node); + sub_graph_while_op_desc->AddSubgraphName("while_cond"); + sub_graph_while_op_desc->SetSubgraphInstanceName(0, "while_cond"); + sub_graph_while_op_desc->AddSubgraphName("while_body"); + sub_graph_while_op_desc->SetSubgraphInstanceName(1, "while_body"); + + OpDescPtr sub_graph_matmul_op_desc = CreateOpDesc("matmul", MATMUL, 2, 1); + NodePtr sub_graph_matmul_node = sub_graph->AddNode(sub_graph_matmul_op_desc); + + OpDescPtr sub_graph_output_op_desc = CreateOpDesc("output", NETOUTPUT, 1, 1); + NodePtr sub_graph_output_node = sub_graph->AddNode(sub_graph_output_op_desc); + + GraphUtils::AddEdge(sub_graph_data1_node->GetOutDataAnchor(0), sub_graph_while_node->GetInDataAnchor(0)); + GraphUtils::AddEdge(sub_graph_data2_node->GetOutDataAnchor(0), sub_graph_while_node->GetInDataAnchor(1)); + GraphUtils::AddEdge(sub_graph_data3_node->GetOutDataAnchor(0), sub_graph_matmul_node->GetInDataAnchor(0)); + GraphUtils::AddEdge(sub_graph_while_node->GetOutDataAnchor(0), sub_graph_matmul_node->GetInDataAnchor(1)); + GraphUtils::AddEdge(sub_graph_matmul_node->GetOutDataAnchor(0), sub_graph_output_node->GetInDataAnchor(0)); + + sub_graph->SetGraphUnknownFlag(true); + sub_graph->SetParentNode(partitioned_call_node); + sub_graph->SetParentGraph(root_graph); + root_graph->AddSubGraph(sub_graph); + } - ComputeGraphPtr root_graph = std::make_shared("root_graph"); - auto partitioned_call_op_desc = MakeShared("partitioned_call", PARTITIONEDCALL); - auto partitioned_call_node = root_graph->AddNode(partitioned_call_op_desc); - partitioned_call_node->GetOpDesc()->AddSubgraphName("sub_graph"); - partitioned_call_node->GetOpDesc()->SetSubgraphInstanceName(0, "sub_graph"); - - root_graph->AddSubGraph(sub_sub_graph1); - root_graph->AddSubGraph(sub_sub_graph2); - sub_sub_graph1->SetParentGraph(root_graph); - sub_sub_graph2->SetParentGraph(root_graph); - sub_sub_graph1->SetParentNode(sub_graph_while_node); - sub_sub_graph2->SetParentNode(sub_graph_while_node); - - root_graph->AddSubGraph(sub_graph); - sub_graph->SetParentNode(partitioned_call_node); - sub_graph->SetParentGraph(root_graph); + OpDescPtr graph_data1_op_desc = CreateOpDesc("data1", DATA, 1, 1); + OpDescPtr graph_data2_op_desc = CreateOpDesc("data2", DATA, 1, 1); + OpDescPtr graph_data3_op_desc = CreateOpDesc("data3", DATA, 1, 1); + NodePtr graph_data1_node = root_graph->AddNode(graph_data1_op_desc); + NodePtr graph_data2_node = root_graph->AddNode(graph_data2_op_desc); + NodePtr graph_data3_node = root_graph->AddNode(graph_data3_op_desc); + AttrUtils::SetInt(graph_data1_op_desc, ATTR_NAME_INDEX, 0); + AttrUtils::SetInt(graph_data2_op_desc, ATTR_NAME_INDEX, 1); + AttrUtils::SetInt(graph_data3_op_desc, ATTR_NAME_INDEX, 2); + GraphUtils::AddEdge(graph_data1_node->GetOutDataAnchor(0), partitioned_call_node->GetInDataAnchor(0)); + GraphUtils::AddEdge(graph_data2_node->GetOutDataAnchor(0), partitioned_call_node->GetInDataAnchor(1)); + GraphUtils::AddEdge(graph_data3_node->GetOutDataAnchor(0), partitioned_call_node->GetInDataAnchor(2)); + ComputeGraphPtr merged_graph = nullptr; GeRootModelPtr root_model = MakeShared(root_graph); HybridModel hybrid_model(root_model); HybridModelBuilder hybrid_model_builder(hybrid_model); @@ -787,4 +833,5 @@ TEST_F(UtestGeHybrid, TestTaskExecuteAsync) { std::vector> tasks; AiCoreNodeTask node_task(std::move(tasks)); ASSERT_EQ(node_task.ExecuteAsync(task_context, nullptr), SUCCESS); -} \ No newline at end of file +} +} // namespace ge \ No newline at end of file From 69a8d570d0d911ff49d9d39f8e30550e378216d8 Mon Sep 17 00:00:00 2001 From: zhupuxu Date: Fri, 9 Jul 2021 17:49:27 +0800 Subject: [PATCH 183/226] graph id Signed-off-by: zhupuxu --- ge/common/profiling/ge_profiling.cc | 32 +++++ ge/common/profiling/profiling_manager.cc | 51 +++++++- ge/common/profiling/profiling_manager.h | 19 ++- ge/graph/execute/graph_execute.cc | 44 ++++++- ge/graph/execute/graph_execute.h | 5 + ge/graph/load/model_manager/davinci_model.cc | 13 +- ge/graph/load/model_manager/model_manager.cc | 22 +++- ge/graph/manager/graph_manager.cc | 5 + ge/session/inner_session.cc | 4 + inc/framework/common/profiling/ge_profiling.h | 2 + .../graph/execute/graph_execute_unittest.cc | 44 +++++++ .../ge/graph/load/model_manager_unittest.cc | 30 ++++- .../ge_profiling_manager_unittest.cc | 113 ++++++++++++++++++ 13 files changed, 370 insertions(+), 14 deletions(-) diff --git a/ge/common/profiling/ge_profiling.cc b/ge/common/profiling/ge_profiling.cc index fd104e90..1b5e5c84 100644 --- a/ge/common/profiling/ge_profiling.cc +++ b/ge/common/profiling/ge_profiling.cc @@ -20,9 +20,11 @@ #include "framework/common/debug/ge_log.h" #include "framework/common/debug/log.h" #include "graph/load/graph_loader.h" +#include "graph/ge_context.h" #include "init/gelib.h" #include "framework/common/ge_inner_error_codes.h" #include "model/ge_model.h" +#include "framework/omg/omg_inner_types.h" namespace { const uint32_t kDeviceListIndex = 3; @@ -35,6 +37,7 @@ const std::string kProfilingStop = "prof_stop"; const std::string kProfModelSubscribe = "prof_model_subscribe"; const std::string kProfModelUnsubscribe = "prof_model_cancel_subscribe"; const std::string kRtSetDeviceRegName = "profiling"; +const std::string kPofilingModelId = "modelId"; const std::map kProfCommandTypeMap = { {kProfCommandhandleInit, kProfilingInit}, @@ -195,6 +198,31 @@ ge::Status ProfCommandHandle(ProfCommandHandleType type, void *data, uint32_t le return ge::PARAM_INVALID; } } + auto &profiling_manager = ge::ProfilingManager::Instance(); + auto is_train = domi::GetContext().train_flag; + if (type == kProfCommandhandleModelSubscribe && is_train) { + profiling_manager.SetSubscribeInfo(prof_config_param->profSwitch, prof_config_param->modelId, true); + return ge::SUCCESS; + } + auto is_subscribe = profiling_manager.GetSubscribeInfo().is_subscribe; + if (type == kProfCommandhandleModelUnsubscribe && is_subscribe) { + prof_params.clear(); + prof_params.emplace_back(kPofilingModelId); + uint32_t model_id = 0; + // GraphId is actually stored in prof_config_param + uint32_t graph_id = prof_config_param->modelId; + auto ret = profiling_manager.GetModelIdFromGraph(graph_id, model_id); + if (ret != ge::SUCCESS) { + GELOGE(ret, "graph_id:%u not not found", graph_id); + REPORT_INPUT_ERROR("E10001", std::vector({"value", "parameter", "reason"}), + std::vector({std::to_string(graph_id), + "GraphToModelMap", + "graph_id does not exist!"})); + return ge::FAILED; + } + + prof_params.emplace_back(std::to_string(model_id)); + } ge::GraphLoader graph_loader; ge::Command command; command.cmd_params.clear(); @@ -248,3 +276,7 @@ ge::Status ProfSetStepInfo(uint64_t index_id, uint16_t tag_id, rtStream_t stream "tag id must be 0 when first run, must be 1 when second run"})); return ge::FAILED; } + +ge::Status ProfGetDeviceFormGraphId(uint32_t graph_id, uint32_t &device_id) { + return ge::ProfilingManager::Instance().GetDeviceIdFromGraph(graph_id, device_id); +} diff --git a/ge/common/profiling/profiling_manager.cc b/ge/common/profiling/profiling_manager.cc index 7fd63d7e..0464491d 100644 --- a/ge/common/profiling/profiling_manager.cc +++ b/ge/common/profiling/profiling_manager.cc @@ -66,10 +66,13 @@ const std::string kIdx = "idx"; namespace ge { ProfilingManager::ProfilingManager() - : is_load_profiling_(false), is_execute_profiling_(false), is_training_trace_(false), subscribe_count_(0) { - prof_cb_.msprofCtrlCallback = nullptr; - prof_cb_.msprofReporterCallback = nullptr; - index_id_ = UINT64_MAX; + : is_load_profiling_(false), + is_execute_profiling_(false), + is_training_trace_(false), + subscribe_count_(0), + prof_cb_({nullptr, nullptr}), + index_id_(UINT64_MAX), + subscribe_info_({false, 0, 0}) { } ProfilingManager::~ProfilingManager() {} @@ -610,6 +613,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfFi // profiling plugin uninit PluginUnInit(); + CleanSubscribeInfo(); + int32_t dev_num = -1; rtError_t rt_ret = rtProfilerStop(PROF_MODEL_LOAD_MASK, dev_num, nullptr); if (rt_ret != RT_ERROR_NONE) { @@ -632,6 +637,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfFi } device_id_module_map_.clear(); device_id_.clear(); + device_id_map_.clear(); + model_id_map_.clear(); GELOGI("Prof finalize success."); #endif return SUCCESS; @@ -1057,4 +1064,40 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::GetFpBpP return; } +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::GetDeviceIdFromGraph( + uint32_t graph_id, uint32_t &device_id) { + auto iter = device_id_map_.find(graph_id); + if (iter != device_id_map_.end()) { + device_id = iter->second; + return SUCCESS; + } + REPORT_CALL_ERROR("E19999", "graph_id:%u does not exist!", graph_id); + GELOGE(PARAM_INVALID, "[Check][GraphId]graph_id:%u does not exist!", graph_id); + return FAILED; +} + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::SetSubscribeInfo( + uint64_t prof_switch, uint32_t model_id, bool is_subscribe) { + subscribe_info_.is_subscribe = is_subscribe; + subscribe_info_.prof_switch = prof_switch; + subscribe_info_.graph_id = model_id; +} + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::CleanSubscribeInfo() { + subscribe_info_.is_subscribe = false; + subscribe_info_.prof_switch = 0; + subscribe_info_.graph_id = 0; +} + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::GetModelIdFromGraph( + uint32_t graph_id, uint32_t &model_id) { + auto iter = model_id_map_.find(graph_id); + if (iter != model_id_map_.end()) { + model_id = iter->second; + return SUCCESS; + } + REPORT_CALL_ERROR("E19999", "graph_id:%u does not exist!", graph_id); + GELOGE(PARAM_INVALID, "[Check][GraphId]graph_id:%u does not exist!", graph_id); + return FAILED; +} } // namespace ge diff --git a/ge/common/profiling/profiling_manager.h b/ge/common/profiling/profiling_manager.h index 25929895..e5137562 100755 --- a/ge/common/profiling/profiling_manager.h +++ b/ge/common/profiling/profiling_manager.h @@ -62,6 +62,12 @@ struct DeviceSubsInfo { uint32_t subscribe_count; }; +struct ProfSubscribeInfo { + bool is_subscribe; + uint64_t prof_switch; + uint32_t graph_id; +}; + struct MsprofCallback { MsprofCtrlCallback msprofCtrlCallback; MsprofReporterCallback msprofReporterCallback; @@ -102,7 +108,15 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ProfilingManager { void ReportData(const int32_t &device_id, const std::string &data, const std::string &tag_name); Status ProfileStepInfo(uint64_t index_id, uint64_t model_id, uint16_t tag_id, rtStream_t stream, int32_t device_id); void SetStepInfoIndex(uint64_t index_id) { index_id_ = index_id; } - uint64_t GetStepInfoIndex() { return index_id_; } + uint64_t GetStepInfoIndex() const { return index_id_; } + void SetGraphIdToDeviceMap(uint32_t graph_id, uint32_t device_id) { device_id_map_[graph_id] = device_id; } + Status GetDeviceIdFromGraph(uint32_t graph_id, uint32_t &device_id); + void SetSubscribeInfo(uint64_t prof_switch, uint32_t model_id, bool is_subscribe); + const ProfSubscribeInfo &GetSubscribeInfo() const { return subscribe_info_; } + void CleanSubscribeInfo(); + void SetGraphIdToModelMap(uint32_t graph_id, uint32_t model_id) { model_id_map_[graph_id] = model_id; } + Status GetModelIdFromGraph(uint32_t graph_id, uint32_t &model_id); + private: Status InitFromOptions(const Options &options, MsprofGeOptions &prof_conf); Status ParseOptions(const std::string &options); @@ -130,6 +144,9 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ProfilingManager { std::string bp_point_; uint32_t reporter_max_len_ = 0; uint64_t index_id_; + std::map device_id_map_; // key: graph_id, value: device_id + std::map model_id_map_; // key: graph_id, value: model_id + ProfSubscribeInfo subscribe_info_; }; } // namespace ge #endif // GE_COMMON_PROFILING_PROFILING_MANAGER_H_ diff --git a/ge/graph/execute/graph_execute.cc b/ge/graph/execute/graph_execute.cc index ba35e7c0..03abf91f 100755 --- a/ge/graph/execute/graph_execute.cc +++ b/ge/graph/execute/graph_execute.cc @@ -21,6 +21,7 @@ #include "graph/load/model_manager/model_manager.h" #include "graph/load/model_manager/davinci_model.h" +#include "common/profiling/profiling_manager.h" namespace ge { using Uint32Pair = pair; @@ -365,7 +366,11 @@ Status GraphExecutor::ExecuteGraph(GraphId graph_id, const GeRootModelPtr &ge_ro GELOGE(GE_GRAPH_SYNC_MODEL_FAILED, "[SyncExecute][Model] Error! graph id:%u", graph_id); return GE_GRAPH_SYNC_MODEL_FAILED; } - + ret = ModelSubscribe(graph_id); + if (ret != SUCCESS) { + GELOGE(ret, "[Call][ModelSubscribe] failed, graph_id:%u", graph_id); + return ret; + } return SUCCESS; } @@ -776,4 +781,41 @@ Status GraphExecutor::GetOpDescInfo(uint32_t device_id, uint32_t stream_id, uint } return SUCCESS; } + +Status GraphExecutor::GetModelByID(uint32_t model_id, std::shared_ptr &davinci_model) { + auto model_manager = ge::ModelManager::GetInstance(); + GE_CHECK_NOTNULL(model_manager); + davinci_model = model_manager->GetModel(static_cast(model_id)); + if (davinci_model == nullptr) { + REPORT_INNER_ERROR("E19999", "GetModel from model_manager fail, model_id:%u", model_id); + GELOGE(ge::FAILED, "[Get][Model] failed, Model id:%d is invaild or model is not loaded.", model_id); + return ge::FAILED; + } + return ge::SUCCESS; +} + +Status GraphExecutor::ModelSubscribe(uint32_t graph_id) { + auto &profiling_manager = ProfilingManager::Instance(); + const auto &subcribe_info = profiling_manager.GetSubscribeInfo(); + if (subcribe_info.is_subscribe) { + std::shared_ptr davinci_model = nullptr; + uint32_t model_id = 0; + Status ret = profiling_manager.GetModelIdFromGraph(graph_id, model_id); + if (ret != SUCCESS) { + GELOGE(ret, "[Call][GetModelIdFromGraph] failed, graph_id:%u", graph_id); + return ret; + } + ret = GetModelByID(model_id, davinci_model); + if (ret != SUCCESS) { + GELOGE(ret, "[Call][GetModelByID] failed, model_id:%u", model_id); + return ret; + } + ret = profiling_manager.ProfModelSubscribe(subcribe_info.prof_switch, davinci_model.get()); + if (ret != SUCCESS) { + GELOGE(ret, "[Call][ProfModelSubscribe] failed"); + return ret; + } + } + return SUCCESS; +} } // namespace ge diff --git a/ge/graph/execute/graph_execute.h b/ge/graph/execute/graph_execute.h index b6d56dff..56e322f1 100755 --- a/ge/graph/execute/graph_execute.h +++ b/ge/graph/execute/graph_execute.h @@ -38,6 +38,7 @@ #include "graph/model.h" #include "graph/utils/graph_utils.h" #include "graph/utils/tensor_utils.h" +#include "graph/load/model_manager/davinci_model.h" namespace ge { class GraphExecutor { @@ -148,6 +149,10 @@ class GraphExecutor { static Status SetCallback(uint32_t model_id, const GeRootModelPtr &ge_root_model, const RunAsyncCallback &callback); + Status ModelSubscribe(uint32_t graph_id); + + Status GetModelByID(uint32_t model_id, std::shared_ptr &davinci_model); + bool init_flag_; bool train_graph_flag_; diff --git a/ge/graph/load/model_manager/davinci_model.cc b/ge/graph/load/model_manager/davinci_model.cc index ddd6c0c4..1d6f7aff 100755 --- a/ge/graph/load/model_manager/davinci_model.cc +++ b/ge/graph/load/model_manager/davinci_model.cc @@ -62,6 +62,7 @@ #include "graph/common/omg_util.h" #include "graph/build/memory/block_mem_assigner.h" #include "graph/manager/session_scope_mem_allocator.h" +#include "framework/omg/omg_inner_types.h" // create std::thread, catch exceptions using try/catch #define CREATE_STD_THREAD(thread_id, func, args) \ @@ -763,8 +764,16 @@ void DavinciModel::SaveSpecifyAttrValues(const OpDescPtr &op_desc) { } Status DavinciModel::ReportProfilingData() { - ProfilingManager::Instance().ReportProfilingData(model_id_, GetTaskDescInfo()); - GE_CHK_STATUS(SinkModelProfile(), "[Sink][ModelProfile] failed, model_id:%u.", model_id_); + bool is_train = domi::GetContext().train_flag; + auto model_id = model_id_; + auto &profiling_manager = ProfilingManager::Instance(); + auto graph_id = runtime_param_.graph_id; + if (is_train) { + GELOGD("Replace model_id:%u with graph_id:%u, when training.", model_id, graph_id); + model_id = graph_id; + } + profiling_manager.ReportProfilingData(model_id, GetTaskDescInfo()); + GE_CHK_STATUS(SinkModelProfile(), "[Sink][ModelProfile] failed, model_id:%u.", model_id); return SUCCESS; } diff --git a/ge/graph/load/model_manager/model_manager.cc b/ge/graph/load/model_manager/model_manager.cc index 8b44daea..f1db6a99 100755 --- a/ge/graph/load/model_manager/model_manager.cc +++ b/ge/graph/load/model_manager/model_manager.cc @@ -368,7 +368,17 @@ Status ModelManager::LoadModelOnline(uint32_t &model_id, const shared_ptrGetRuntimeParam().graph_id; + if(subcribe_info.graph_id == graph_id) { + profiling_manager.SetGraphIdToModelMap(graph_id, model_id); + } + else { + GELOGW("graph_id:%u is not in subcribe info.", graph_id); + } + } return ret; } @@ -758,12 +768,15 @@ Status ModelManager::HandleProfModelUnsubscribeCommand(const Command &command) { if (ret != SUCCESS) { return ret; } - - if (ProfilingManager::Instance().ProfModelUnsubscribe(static_cast(davinci_model.get())) != SUCCESS) { + auto &profiling_manager = ProfilingManager::Instance(); + if (profiling_manager.ProfModelUnsubscribe(static_cast(davinci_model.get())) != SUCCESS) { GELOGE(FAILED, "[Handle][ProfModelUnsubscribe] failed."); return FAILED; } - + auto is_subscribe = profiling_manager.GetSubscribeInfo().is_subscribe; + if (is_subscribe) { + profiling_manager.CleanSubscribeInfo(); + } return SUCCESS; } @@ -1826,5 +1839,4 @@ Status ModelManager::CheckAicpuOpList(GeModelPtr ge_model) { "[Call][LaunchKernelCheckAicpuOp] failed."); return SUCCESS; } - } // namespace ge diff --git a/ge/graph/manager/graph_manager.cc b/ge/graph/manager/graph_manager.cc index a3605ec2..84ed3ab0 100755 --- a/ge/graph/manager/graph_manager.cc +++ b/ge/graph/manager/graph_manager.cc @@ -109,6 +109,7 @@ #include "register/custom_pass_helper.h" #include "external/graph/types.h" #include "common/util/error_manager/error_manager.h" +#include "common/profiling/profiling_manager.h" namespace { const char *const kSummary = "Summary"; @@ -462,6 +463,9 @@ Status GraphManager::AddGraph(const GraphId &graph_id, const Graph &graph, const std::map &options, const OmgContext &omg_context) { IncreaseGraphCount(graph_id); + auto device_id = GetContext().DeviceId(); + GELOGD("Device id is %u", device_id); + ProfilingManager::Instance().SetGraphIdToDeviceMap(graph_id, device_id); // validation for adding graphs of same graph_id in multi-thread secenario // 1.previous thread owns same graph_id has finished the AddGraph procession if (GetAddGraphCondition(graph_id) == kDoneAdded) { @@ -1715,6 +1719,7 @@ Status GraphManager::ParseTrainGraphFlag(bool &train_flag) { train_flag = true; } } + domi::GetContext().train_flag = train_flag; GELOGI("Is train flag: %d.", train_flag); return SUCCESS; } diff --git a/ge/session/inner_session.cc b/ge/session/inner_session.cc index fcb9d233..1dcc2996 100755 --- a/ge/session/inner_session.cc +++ b/ge/session/inner_session.cc @@ -35,6 +35,7 @@ #include "graph/utils/tensor_adapter.h" #include "runtime/mem.h" #include "ir_build/option_utils.h" +#include "common/profiling/profiling_manager.h" namespace ge { namespace { @@ -231,6 +232,9 @@ Status InnerSession::GetVariable(const std::string &name, Tensor &val) { Status InnerSession::AddGraph(uint32_t graph_id, const Graph &graph) { std::map options; + auto device_id = GetContext().DeviceId(); + GELOGD("Device id is %u", device_id); + ProfilingManager::Instance().SetGraphIdToDeviceMap(graph_id, device_id); return AddGraph(graph_id, graph, options); } diff --git a/inc/framework/common/profiling/ge_profiling.h b/inc/framework/common/profiling/ge_profiling.h index 7a238b2f..c87c082c 100644 --- a/inc/framework/common/profiling/ge_profiling.h +++ b/inc/framework/common/profiling/ge_profiling.h @@ -50,4 +50,6 @@ GE_FUNC_VISIBILITY ge::Status ProfCommandHandle(ProfCommandHandleType type, void /// GE_FUNC_VISIBILITY ge::Status ProfSetStepInfo(uint64_t index_id, uint16_t tag_id, rtStream_t stream); +GE_FUNC_VISIBILITY ge::Status ProfGetDeviceFormGraphId(uint32_t graph_id, uint32_t &device_id); + #endif // INC_FRAMEWORK_COMMON_GE_PROFILING_H_ diff --git a/tests/ut/ge/graph/execute/graph_execute_unittest.cc b/tests/ut/ge/graph/execute/graph_execute_unittest.cc index 6d982454..3e32405b 100644 --- a/tests/ut/ge/graph/execute/graph_execute_unittest.cc +++ b/tests/ut/ge/graph/execute/graph_execute_unittest.cc @@ -17,6 +17,8 @@ #include #include +#include "common/profiling/profiling_manager.h" + #define protected public #define private public #include "graph/execute/graph_execute.h" @@ -125,4 +127,46 @@ TEST_F(UtestGraphExecuteTest, test_set_callback) { auto status = executor.SetCallback(1, ge_root_model, callback); EXPECT_EQ(status, SUCCESS); } + +TEST_F(UtestGraphExecuteTest, test_without_subscribe) { + GraphExecutor executor; + auto ret = executor.ModelSubscribe(1); + EXPECT_EQ(ret, SUCCESS); +} + +TEST_F(UtestGraphExecuteTest, test_with_subscribe_failed1) { + GraphExecutor executor; + uint32_t graph_id = 1; + auto &profiling_manager = ProfilingManager::Instance(); + profiling_manager.SetSubscribeInfo(0, 1, true); + auto ret = executor.ModelSubscribe(graph_id); + profiling_manager.CleanSubscribeInfo(); + EXPECT_NE(ret, SUCCESS); +} + +TEST_F(UtestGraphExecuteTest, test_with_subscribe_failed2) { + GraphExecutor executor; + uint32_t graph_id = 1; + uint32_t model_id = 1; + auto &profiling_manager = ProfilingManager::Instance(); + profiling_manager.SetSubscribeInfo(0, 1, true); + profiling_manager.SetGraphIdToModelMap(2, model_id); + auto ret = executor.ModelSubscribe(graph_id); + profiling_manager.CleanSubscribeInfo(); + EXPECT_NE(ret, SUCCESS); +} + +TEST_F(UtestGraphExecuteTest, test_with_subscribe_success) { + GraphExecutor executor; + uint32_t graph_id = 1; + uint32_t model_id = 1; + GraphNodePtr graph_node = std::make_shared(graph_id); + DavinciModel model(model_id, nullptr); + auto &profiling_manager = ProfilingManager::Instance(); + profiling_manager.SetSubscribeInfo(0, 1, true); + profiling_manager.SetGraphIdToModelMap(graph_id, model_id); + auto ret = executor.ModelSubscribe(graph_id); + profiling_manager.CleanSubscribeInfo(); + EXPECT_EQ(ret, SUCCESS); +} } // namespace ge \ No newline at end of file diff --git a/tests/ut/ge/graph/load/model_manager_unittest.cc b/tests/ut/ge/graph/load/model_manager_unittest.cc index 166ae4af..65b70a24 100644 --- a/tests/ut/ge/graph/load/model_manager_unittest.cc +++ b/tests/ut/ge/graph/load/model_manager_unittest.cc @@ -26,6 +26,7 @@ #include "graph/load/graph_loader.h" #include "graph/load/model_manager/davinci_model.h" #include "graph/ops_stub.h" +#include "common/profiling/profiling_manager.h" using namespace std; using namespace testing; @@ -135,7 +136,8 @@ class UtestModelManagerModelManager : public testing::Test { class DModelListener : public ModelListener { public: DModelListener(){}; - uint32_t OnComputeDone(uint32_t model_id, uint32_t data_index, uint32_t resultCode) { return 0; } + uint32_t OnComputeDone(uint32_t model_id, uint32_t data_index, + uint32_t resultCode, std::vector &outputs) { return 0; } }; TEST_F(UtestModelManagerModelManager, case_is_need_hybrid_load) { @@ -426,4 +428,30 @@ TEST_F(UtestModelManagerModelManager, test_launch_kernel_cust_aicpu) { EXPECT_EQ(mm.LaunchKernelCustAicpuSo("deleteCustOp"), SUCCESS); EXPECT_TRUE(mm.cust_aicpu_so_.empty()); } + +shared_ptr listerner(new DModelListener()); +TEST_F(UtestModelManagerModelManager, test_load_model_online) { + ModelManager mm; + uint32_t model_id = 1; + ComputeGraphPtr graph = std::make_shared("test"); + GeRootModelPtr ge_root_model = make_shared(graph); + auto &profiling_manager = ge::ProfilingManager::Instance(); + profiling_manager.SetSubscribeInfo(0, model_id, true); + Status ret = mm.LoadModelOnline(model_id, ge_root_model, listerner); + profiling_manager.CleanSubscribeInfo(); +} + +TEST_F(UtestModelManagerModelManager, command_profiling) { + ModelManager manager; + uint32_t model_id = 1; + Command cmd; + auto model = std::make_shared(1, listerner); + model->SetId(model_id); + cmd.cmd_params.push_back("modelId"); + cmd.cmd_params.push_back(to_string(model_id)); + auto &profiling_manager = ge::ProfilingManager::Instance(); + profiling_manager.SetSubscribeInfo(0, model_id, true); + Status ret = manager.HandleProfModelUnsubscribeCommand(cmd); + profiling_manager.CleanSubscribeInfo(); +} } // namespace ge diff --git a/tests/ut/ge/profiling/ge_profiling_manager_unittest.cc b/tests/ut/ge/profiling/ge_profiling_manager_unittest.cc index aae3f535..35879df8 100644 --- a/tests/ut/ge/profiling/ge_profiling_manager_unittest.cc +++ b/tests/ut/ge/profiling/ge_profiling_manager_unittest.cc @@ -21,11 +21,16 @@ #include #include +#include "graph/load/model_manager/davinci_model.h" + #define protected public #define private public #include "common/profiling/profiling_manager.h" #include "graph/ge_local_context.h" #include "inc/framework/common/profiling/ge_profiling.h" +#include "graph/manager/graph_manager.h" +#include "graph/ops_stub.h" +#include "inc/framework/omg/omg_inner_types.h" #undef protected #undef private @@ -43,6 +48,23 @@ int32_t ReporterCallback(uint32_t moduleId, uint32_t type, void *data, uint32_t return -1; } +void CreateGraph(Graph &graph) { + TensorDesc desc(ge::Shape({1, 3, 224, 224})); + uint32_t size = desc.GetShape().GetShapeSize(); + desc.SetSize(size); + auto data = op::Data("Data").set_attr_index(0); + data.update_input_desc_data(desc); + data.update_output_desc_out(desc); + + auto flatten = op::Flatten("Flatten").set_input_x(data, data.name_out_out()); + + std::vector inputs{data}; + std::vector outputs{flatten}; + std::vector targets{flatten}; + // Graph graph("test_graph"); + graph.SetInputs(inputs).SetOutputs(outputs).SetTargets(targets); +} + TEST_F(UtestGeProfilinganager, init_success) { setenv("PROFILING_MODE", "true", true); Options options; @@ -133,3 +155,94 @@ TEST_F(UtestGeProfilinganager, set_step_info_failed) { Status ret = ProfSetStepInfo(index_id, 1, stream); EXPECT_EQ(ret, ge::FAILED); } + +TEST_F(UtestGeProfilinganager, get_device_from_graph) { + GraphId graph_id = 1; + uint32_t device_id = 0; + GraphManager graph_manager; + GraphNodePtr graph_node = MakeShared(graph_id); + graph_manager.AddGraphNode(graph_id, graph_node); + graph_manager.SetAddGraphCondition(graph_id, 2); + Graph graph("test_graph"); + CreateGraph(graph); + std::map options; + OmgContext context; + Status ret = graph_manager.AddGraph(graph_id, graph, options, context); + EXPECT_EQ(ret, ge::SUCCESS); + ret = ProfGetDeviceFormGraphId(graph_id, device_id); + EXPECT_EQ(ret, ge::SUCCESS); +} + +TEST_F(UtestGeProfilinganager, handle_subscribe_info) { + ProfCommandHandleType prof_type = kProfCommandhandleModelSubscribe; + ProfCommandHandleData prof_data; + prof_data.profSwitch = 0; + prof_data.modelId = 1; + domi::GetContext().train_flag = true; + auto prof_ptr = std::make_shared(prof_data); + Status ret = ProfCommandHandle(prof_type, static_cast(prof_ptr.get()), sizeof(prof_data)); + EXPECT_EQ(ret, ge::SUCCESS); +} + +TEST_F(UtestGeProfilinganager, handle_unsubscribe_info) { + ProfCommandHandleType prof_type = kProfCommandhandleModelUnsubscribe; + ProfCommandHandleData prof_data; + prof_data.profSwitch = 0; + prof_data.modelId = 1; + domi::GetContext().train_flag = true; + auto &profiling_manager = ge::ProfilingManager::Instance(); + profiling_manager.SetSubscribeInfo(0, 1, true); + auto prof_ptr = std::make_shared(prof_data); + Status ret = ProfCommandHandle(prof_type, static_cast(prof_ptr.get()), sizeof(prof_data)); + profiling_manager.CleanSubscribeInfo(); +} + +TEST_F(UtestGeProfilinganager, set_subscribe_info) { + auto &profiling_manager = ge::ProfilingManager::Instance(); + profiling_manager.SetSubscribeInfo(0, 1, true); + const auto &subInfo = profiling_manager.GetSubscribeInfo(); + EXPECT_EQ(subInfo.prof_switch, 0); + EXPECT_EQ(subInfo.graph_id, 1); + EXPECT_EQ(subInfo.is_subscribe, true); +} + +TEST_F(UtestGeProfilinganager, clean_subscribe_info) { + auto &profiling_manager = ge::ProfilingManager::Instance(); + profiling_manager.CleanSubscribeInfo(); + const auto &subInfo = profiling_manager.GetSubscribeInfo(); + EXPECT_EQ(subInfo.prof_switch, 0); + EXPECT_EQ(subInfo.graph_id, 0); + EXPECT_EQ(subInfo.is_subscribe, false); +} + +TEST_F(UtestGeProfilinganager, get_model_id_success) { + auto &profiling_manager = ge::ProfilingManager::Instance(); + profiling_manager.SetGraphIdToModelMap(0, 1); + uint32_t model_id = 0; + Status ret = profiling_manager.GetModelIdFromGraph(0, model_id); + EXPECT_EQ(ret, ge::SUCCESS); +} + +TEST_F(UtestGeProfilinganager, get_model_id_failed) { + auto &profiling_manager = ge::ProfilingManager::Instance(); + profiling_manager.SetGraphIdToModelMap(0, 1); + uint32_t model_id = 0; + Status ret = profiling_manager.GetModelIdFromGraph(10, model_id); + EXPECT_EQ(ret, ge::FAILED); +} + +TEST_F(UtestGeProfilinganager, get_device_id_success) { + auto &profiling_manager = ge::ProfilingManager::Instance(); + profiling_manager.SetGraphIdToDeviceMap(0, 1); + uint32_t device_id = 0; + Status ret = profiling_manager.GetDeviceIdFromGraph(0, device_id); + EXPECT_EQ(ret, ge::SUCCESS); +} + +TEST_F(UtestGeProfilinganager, get_device_id_failed) { + auto &profiling_manager = ge::ProfilingManager::Instance(); + profiling_manager.SetGraphIdToDeviceMap(0, 1); + uint32_t device_id = 0; + Status ret = profiling_manager.GetDeviceIdFromGraph(10, device_id); + EXPECT_EQ(ret, ge::FAILED); +} From ecd48b072de0337cdd023397f0b34f4e181b43eb Mon Sep 17 00:00:00 2001 From: y00500818 Date: Wed, 7 Jul 2021 15:27:27 +0800 Subject: [PATCH 184/226] bugfix for RecoverTransRoadForVar --- ge/graph/preprocess/graph_preprocess.cc | 20 ++++++++-------- .../preprocess/graph_preprocess_unittest.cc | 23 +++++++++++++++++++ 2 files changed, 33 insertions(+), 10 deletions(-) diff --git a/ge/graph/preprocess/graph_preprocess.cc b/ge/graph/preprocess/graph_preprocess.cc index d7f33b4b..8d59d9f9 100644 --- a/ge/graph/preprocess/graph_preprocess.cc +++ b/ge/graph/preprocess/graph_preprocess.cc @@ -415,16 +415,16 @@ Status UpdateVarFormats(const NodePtr &var, const GeTensorDesc &tensor_desc) { Status RecoverTransRoadForVar(const NodePtr &var, const VarTransRoad &road) { GE_CHECK_NOTNULL(var); - int index = 0; + static std::atomic_int index(0); NodePtr last_node = var; for (auto iter = road.rbegin(); iter != road.rend(); ++iter) { auto trans_name = var->GetName() + "_trans_" + std::to_string(index++); auto ret = RecoverOneTransNodeForVar(trans_name, *iter, last_node, last_node); if (ret != SUCCESS) { - REPORT_CALL_ERROR("E19999", "Failed to recover trans node for variable %s, index %d, type %s", - var->GetName().c_str(), index, iter->node_type.c_str()); - GELOGE(INTERNAL_ERROR, "[Recover][TransNode] for variable %s, index %d, type %s", var->GetName().c_str(), - index, iter->node_type.c_str()); + REPORT_CALL_ERROR("E19999", "Failed to recover trans node for variable %s, index %s, type %s", + var->GetName().c_str(), std::to_string(index).c_str(), iter->node_type.c_str()); + GELOGE(INTERNAL_ERROR, "[Recover][TransNode] for variable %s, index %s, type %s", var->GetName().c_str(), + std::to_string(index).c_str(), iter->node_type.c_str()); return INTERNAL_ERROR; } // set stream_label @@ -460,17 +460,17 @@ Status RecoverTransRoadForVar(const NodePtr &var, const VarTransRoad &road) { Status RecoverTransRoadForVarRef(const std::set &nodes, const VarTransRoad &road) { for (auto &var : nodes) { GE_CHECK_NOTNULL(var); - int index = 0; + static std::atomic_int index(0); NodePtr last_node = var; GELOGI("Recover trans nodes for variable ref %s", var->GetName().c_str()); for (auto iter = road.rbegin(); iter != road.rend(); ++iter) { auto trans_name = var->GetName() + "_trans_" + std::to_string(index++); auto ret = RecoverOneTransNodeForVarRef(trans_name, *iter, last_node, last_node); if (ret != SUCCESS) { - REPORT_CALL_ERROR("E19999", "Failed to recover trans node for variable %s, index %d, type %s", - var->GetName().c_str(), index, iter->node_type.c_str()); - GELOGE(INTERNAL_ERROR, "[Recover][TransNode] for variable %s failed, index %d, type %s", - var->GetName().c_str(), index, iter->node_type.c_str()); + REPORT_CALL_ERROR("E19999", "Failed to recover trans node for variable %s, index %s, type %s", + var->GetName().c_str(), std::to_string(index).c_str(), iter->node_type.c_str()); + GELOGE(INTERNAL_ERROR, "[Recover][TransNode] for variable %s failed, index %s, type %s", + var->GetName().c_str(), std::to_string(index).c_str(), iter->node_type.c_str()); return INTERNAL_ERROR; } // set stream_label diff --git a/tests/ut/ge/graph/preprocess/graph_preprocess_unittest.cc b/tests/ut/ge/graph/preprocess/graph_preprocess_unittest.cc index e53a9f96..b1c07d81 100644 --- a/tests/ut/ge/graph/preprocess/graph_preprocess_unittest.cc +++ b/tests/ut/ge/graph/preprocess/graph_preprocess_unittest.cc @@ -23,6 +23,7 @@ #include "graph/passes/graph_builder_utils.h" #include "graph/utils/attr_utils.h" #include "graph/debug/ge_attr_define.h" +#include "graph/manager/graph_var_manager.h" #define private public #define protected public @@ -285,4 +286,26 @@ TEST_F(UtestGraphPreproces, test_prepare_dyn_shape) { GraphPrepare graph_prepare; EXPECT_EQ(graph_prepare.PrepareDynShape(graph_node, user_input, compute_graph, 0), SUCCESS); } + +TEST_F(UtestGraphPreproces, test_updar_variable_formats) { + auto builder = ut::GraphBuilder("g1"); + auto var = builder.AddNode("var", VARIABLE, 1, 1); + auto g1 = builder.GetGraph(); + g1->SetSessionID(0); + TransNodeInfo trans_node_info; + VarTransRoad fusion_road; + fusion_road.emplace_back(trans_node_info); + VarManager::Instance(g1->GetSessionID())->SetTransRoad(var->GetName(), fusion_road); + GraphPrepare graph_prepare; + EXPECT_EQ(graph_prepare.UpdateVariableFormats(g1), INTERNAL_ERROR); + + auto builder1 = ut::GraphBuilder("g2"); + auto var1 = builder1.AddNode("var1", VARIABLE, 1, 1); + auto g2 = builder1.GetGraph(); + g2->SetSessionID(0); + VarTransRoad fusion_road1; + VarManager::Instance(g2->GetSessionID())->SetTransRoad(var1->GetName(), fusion_road1); + AttrUtils::SetStr(var1->GetOpDesc(), REF_VAR_SRC_VAR_NAME, "var1"); + EXPECT_EQ(graph_prepare.UpdateVariableFormats(g2), SUCCESS); +} } \ No newline at end of file From 4e5b81838d7429f0dd545722d85d54a5c3989579 Mon Sep 17 00:00:00 2001 From: wqtshg Date: Mon, 12 Jul 2021 10:27:07 +0800 Subject: [PATCH 185/226] all-in-one---runtime unitize --- ge/CMakeLists.txt | 27 +++++++++++++------ ge/offline/CMakeLists.txt | 3 ++- .../ops_kernel_builder_manager.cc | 10 +++++++ metadef | 2 +- 4 files changed, 32 insertions(+), 10 deletions(-) diff --git a/ge/CMakeLists.txt b/ge/CMakeLists.txt index eec992c8..c22f88d8 100755 --- a/ge/CMakeLists.txt +++ b/ge/CMakeLists.txt @@ -786,12 +786,12 @@ target_include_directories(ge_runner SYSTEM PRIVATE ${GE_CODE_DIR}/../abl/licctrl ${GE_CODE_DIR}/../ace/comop/inc ${GE_CODE_DIR}/../ace/comop/inc/external - #### blue zone + #### blue zone #### ${ASCEND_DIR}/driver/include ${ASCEND_DIR}/fwkacllib/include - ${GE_CODE_DIR}/third_party/fwkacllib/inc - ${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain - ${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info + $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc> + $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain> + $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info> ) target_link_options(ge_runner PRIVATE @@ -800,6 +800,11 @@ target_link_options(ge_runner PRIVATE target_link_libraries(ge_runner PRIVATE $ + $<$>:$> + $<$>:$> + $<$>:$> + $<$>:$> + $<$>:$> adump_server static_mmpa ge_proto_common @@ -869,9 +874,9 @@ target_include_directories(ge_compiler SYSTEM PRIVATE #### blue zone #### ${ASCEND_DIR}/driver/include ${ASCEND_DIR}/fwkacllib/include - ${GE_CODE_DIR}/third_party/fwkacllib/inc - ${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain - ${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info + $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc> + $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc/toolchain> + $<$:${GE_CODE_DIR}/third_party/fwkacllib/inc/opt_info> ) target_link_options(ge_compiler PRIVATE @@ -880,6 +885,11 @@ target_link_options(ge_compiler PRIVATE target_link_libraries(ge_compiler PRIVATE $ + $<$>:$> + $<$>:$> + $<$>:$> + $<$>:$> + $<$>:$> static_mmpa ge_proto_common -Wl,--no-as-needed @@ -890,7 +900,8 @@ target_link_libraries(ge_compiler PRIVATE c_sec error_manager slog - runtime_compile + $<$>:$> + $<$:$> opt_feature -Wl,--as-needed json diff --git a/ge/offline/CMakeLists.txt b/ge/offline/CMakeLists.txt index 3a320226..e11e4a03 100644 --- a/ge/offline/CMakeLists.txt +++ b/ge/offline/CMakeLists.txt @@ -62,7 +62,8 @@ target_link_libraries(atc_atc.bin PRIVATE parser_common gflags json - runtime_compile + $<$>:$> + $<$:$> slog static_mmpa -lrt diff --git a/ge/opskernel_manager/ops_kernel_builder_manager.cc b/ge/opskernel_manager/ops_kernel_builder_manager.cc index 33ffddf5..9f981302 100644 --- a/ge/opskernel_manager/ops_kernel_builder_manager.cc +++ b/ge/opskernel_manager/ops_kernel_builder_manager.cc @@ -20,6 +20,7 @@ namespace ge { namespace { +#ifdef ONLY_COMPILE_OPEN_SRC const std::vector kBasicBuilderLibs = { "libge_local_opskernel_builder.so", "libhost_cpu_opskernel_builder.so", @@ -27,6 +28,15 @@ const std::vector kBasicBuilderLibs = { "libaicpu_ascend_builder.so", "libaicpu_tf_builder.so" }; +#else +const std::vector kBasicBuilderLibs = { + "libge_local_opskernel_builder.so", + "libhost_cpu_opskernel_builder.so", + "librts_engine.so", + "libaicpu_ascend_engine.so", + "libaicpu_tf_engine.so" +}; +#endif const std::vector kHcclBuilderLibs = { "libhcom_opskernel_builder.so", diff --git a/metadef b/metadef index f9a47a45..d5101eed 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit f9a47a45cdd7e6dc507a15291fcb769f96b859b3 +Subproject commit d5101eed670e0ecf8391db616c12582ed577adab From 729aa9d0573f0834366168890b84245994defeb1 Mon Sep 17 00:00:00 2001 From: wq160 Date: Wed, 7 Jul 2021 14:35:05 +0800 Subject: [PATCH 186/226] deal with unknown value range --- ge/graph/passes/infer_value_range_pass.cc | 24 +++++++++++---- ge/graph/passes/infer_value_range_pass.h | 2 +- .../passes/infer_value_range_pass_unittest.cc | 29 +++++++++++++++++++ 3 files changed, 49 insertions(+), 6 deletions(-) diff --git a/ge/graph/passes/infer_value_range_pass.cc b/ge/graph/passes/infer_value_range_pass.cc index 03a18fdb..c183a599 100644 --- a/ge/graph/passes/infer_value_range_pass.cc +++ b/ge/graph/passes/infer_value_range_pass.cc @@ -85,8 +85,16 @@ graphStatus InferValueRangePass::Infer(NodePtr &node) { return GRAPH_SUCCESS; } - // if input value range has -1, cpu kernel cannot calculate correctly, so set {1:-1} - if (InputHasUnknownValueRange(node)) { + // Deal with scenes with unknown value range + bool has_unknown_value_range = false; + bool has_zero_in_value_range = false; + CheckInputValueRange(node, has_unknown_value_range, has_zero_in_value_range); + if (has_unknown_value_range) { + if (has_zero_in_value_range) { + // When there is zero in input value range, it is unreasonable to always set output value range {1:-1}. + GELOGW("Node %s has -1 and 0 in value range, skip setting value range.", node->GetName().c_str()); + return GRAPH_NOT_CHANGED; + } GELOGI("Node %s has unknown value range in input tensors, set value range {1:-1}, and skip cpu kernel.", node->GetName().c_str()); return GenerateWorstValueRange(node); @@ -188,14 +196,21 @@ bool InferValueRangePass::InputIsConstOrHasValueRange(const NodePtr &node) const return input_is_const_or_has_value_range; } -bool InferValueRangePass::InputHasUnknownValueRange(const NodePtr &node) const { - bool has_unknown_value_range = false; +void InferValueRangePass::CheckInputValueRange(const NodePtr &node, bool &has_unknown_value_range, + bool &has_zero_in_value_range) const { + has_unknown_value_range = false; + has_zero_in_value_range = false; auto cur_op_desc = node->GetOpDesc(); for (const auto &input_desc : cur_op_desc->GetAllInputsDescPtr()) { std::vector> input_desc_value_range; input_desc->GetValueRange(input_desc_value_range); if (!input_desc_value_range.empty()) { for (const auto &range : input_desc_value_range) { + if (range.first == 0 || range.second == 0) { + GELOGD("Node %s input tensors have zero in value range %s.", node->GetName().c_str(), + formats::RangeToString(input_desc_value_range).c_str()); + has_zero_in_value_range = true; + } if (range.first == -1 || range.second == -1) { GELOGD("Node %s input tensors have unknown value range, value range is %s.", node->GetName().c_str(), formats::RangeToString(input_desc_value_range).c_str()); @@ -204,7 +219,6 @@ bool InferValueRangePass::InputHasUnknownValueRange(const NodePtr &node) const { } } } - return has_unknown_value_range; } graphStatus InferValueRangePass::UpdateTensorDesc(const GeTensorDescPtr &src, GeTensorDescPtr &dst, bool &changed) { diff --git a/ge/graph/passes/infer_value_range_pass.h b/ge/graph/passes/infer_value_range_pass.h index eb485c87..503b5a9f 100644 --- a/ge/graph/passes/infer_value_range_pass.h +++ b/ge/graph/passes/infer_value_range_pass.h @@ -34,7 +34,7 @@ class InferValueRangePass : public InferBasePass { bool InputIsDynamic(const NodePtr &node) const; bool InputIsConstOrHasValueRange(const NodePtr &node) const; - bool InputHasUnknownValueRange(const NodePtr &node) const; + void CheckInputValueRange(const NodePtr &node, bool &has_unknown_value_range, bool &has_zero_in_value_range) const; graphStatus GenerateWorstValueRange(NodePtr &node); template graphStatus ConstructData(const GeTensorDesc &tensor_desc, bool use_floor_value, GeTensorPtr &output_ptr); diff --git a/tests/ut/ge/graph/passes/infer_value_range_pass_unittest.cc b/tests/ut/ge/graph/passes/infer_value_range_pass_unittest.cc index c39755b3..014d87dc 100644 --- a/tests/ut/ge/graph/passes/infer_value_range_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/infer_value_range_pass_unittest.cc @@ -365,6 +365,35 @@ TEST_F(UtestGraphInferValueRangePass, CallRun_NoSubgraph_UseCpuKernel_InputsHave EXPECT_EQ(unknown_target_value_range, output_value_range); } +TEST_F(UtestGraphInferValueRangePass, CallRun_NoSubgraph_UseCpuKernel_InputsHaveZeroInValueRange) { + // shape --- add --- sqrt + auto graph = std::make_shared("test_graph"); + GeTensorDesc shape_tensor_desc(GeShape({2}), ge::FORMAT_NCHW, ge::DT_INT64); + std::vector> unknown_value_range = {make_pair(1, -1), make_pair(0, 240)}; + shape_tensor_desc.SetValueRange(unknown_value_range); + auto shape_op_desc = std::make_shared("Shape", "Shape"); + shape_op_desc->AddOutputDesc(shape_tensor_desc); + auto shape_node = graph->AddNode(shape_op_desc); + + GeTensorDesc add_tensor_desc(GeShape({2}), ge::FORMAT_NCHW, ge::DT_INT64); + auto add_op_desc = std::make_shared("Add", "Add"); + add_op_desc->AddInputDesc(shape_tensor_desc); + add_op_desc->AddInputDesc(shape_tensor_desc); + add_op_desc->AddOutputDesc(add_tensor_desc); + auto add_node = graph->AddNode(add_op_desc); + + ge::GraphUtils::AddEdge(shape_node->GetOutDataAnchor(0), add_node->GetInDataAnchor(0)); + ge::GraphUtils::AddEdge(shape_node->GetOutDataAnchor(0), add_node->GetInDataAnchor(1)); + + // test unknown value range + InferValueRangePass infer_pass; + EXPECT_EQ(infer_pass.Run(add_node), SUCCESS); + auto output_0_desc = add_node->GetOpDesc()->GetOutputDesc(0); + std::vector> out_value_range; + output_0_desc.GetValueRange(out_value_range); + EXPECT_EQ(out_value_range.size(), 0); +} + TEST_F(UtestGraphInferValueRangePass, CallRun_NoSubgraph_UseCpuKernel_InputsHaveUnKnownValueRange_ScalarOutput) { // shape --- add --- sqrt // constant / From 238c7dfe1e3ccf4c0d62d21b48edc05d0bd90257 Mon Sep 17 00:00:00 2001 From: zhupuxu Date: Mon, 12 Jul 2021 14:22:01 +0800 Subject: [PATCH 187/226] reduce cm Signed-off-by: zhupuxu --- ge/common/profiling/ge_profiling.cc | 45 ++++++++++++-------- ge/graph/load/model_manager/model_manager.cc | 2 +- 2 files changed, 28 insertions(+), 19 deletions(-) diff --git a/ge/common/profiling/ge_profiling.cc b/ge/common/profiling/ge_profiling.cc index 1b5e5c84..fcd01a12 100644 --- a/ge/common/profiling/ge_profiling.cc +++ b/ge/common/profiling/ge_profiling.cc @@ -50,6 +50,22 @@ const std::map kProfCommandTypeMap = { const uint64_t kModelId = ge::INVALID_MODEL_ID; const uint16_t kStepStart = 0; const uint16_t kStepEnd = 1; + +ge::Status NeedUnsubscribe(ProfCommandHandleType type, bool is_subscribe, + uint32_t graph_id, vector &prof_params) { + if (type == kProfCommandhandleModelUnsubscribe && is_subscribe) { + prof_params.clear(); + prof_params.emplace_back(kPofilingModelId); + uint32_t model_id = 0; + auto ret = ge::ProfilingManager::Instance().GetModelIdFromGraph(graph_id, model_id); + if (ret != ge::SUCCESS) { + GELOGE(ret, "graph_id:%u not not found", graph_id); + return ret; + } + prof_params.emplace_back(std::to_string(model_id)); + } + return ge::SUCCESS; +} } // namespace bool TransProfConfigToParam(const ProfCommandHandleData &profCommand, vector &prof_config_params) { @@ -205,23 +221,16 @@ ge::Status ProfCommandHandle(ProfCommandHandleType type, void *data, uint32_t le return ge::SUCCESS; } auto is_subscribe = profiling_manager.GetSubscribeInfo().is_subscribe; - if (type == kProfCommandhandleModelUnsubscribe && is_subscribe) { - prof_params.clear(); - prof_params.emplace_back(kPofilingModelId); - uint32_t model_id = 0; - // GraphId is actually stored in prof_config_param - uint32_t graph_id = prof_config_param->modelId; - auto ret = profiling_manager.GetModelIdFromGraph(graph_id, model_id); - if (ret != ge::SUCCESS) { - GELOGE(ret, "graph_id:%u not not found", graph_id); - REPORT_INPUT_ERROR("E10001", std::vector({"value", "parameter", "reason"}), - std::vector({std::to_string(graph_id), - "GraphToModelMap", - "graph_id does not exist!"})); - return ge::FAILED; - } - - prof_params.emplace_back(std::to_string(model_id)); + // GraphId is actually stored in prof_config_param + auto graph_id = prof_config_param->modelId; + ge::Status ret = NeedUnsubscribe(type, is_subscribe, graph_id, prof_params); + if (ret != ge::SUCCESS) { + GELOGE(ret, "graph_id:%u not not found", graph_id); + REPORT_INPUT_ERROR("E10001", std::vector({"value", "parameter", "reason"}), + std::vector({std::to_string(graph_id), + "GraphToModelMap", + "graph_id does not exist!"})); + return ge::FAILED; } ge::GraphLoader graph_loader; ge::Command command; @@ -236,7 +245,7 @@ ge::Status ProfCommandHandle(ProfCommandHandleType type, void *data, uint32_t le if (type == kProfCommandhandleStart || type == kProfCommandhandleStop) { GELOGI("Profiling device nums:%s , deviceID:[%s]", prof_params[0].c_str(), prof_params[kDeviceListIndex].c_str()); } - ge::Status ret = graph_loader.CommandHandle(command); + ret = graph_loader.CommandHandle(command); if (ret != ge::SUCCESS) { GELOGE(ret, "[Handle][Command]Handle profiling command failed, command type %s, error_code %u", iter->second.c_str(), ret); diff --git a/ge/graph/load/model_manager/model_manager.cc b/ge/graph/load/model_manager/model_manager.cc index f1db6a99..5af503b2 100755 --- a/ge/graph/load/model_manager/model_manager.cc +++ b/ge/graph/load/model_manager/model_manager.cc @@ -372,7 +372,7 @@ Status ModelManager::LoadModelOnline(uint32_t &model_id, const shared_ptrGetRuntimeParam().graph_id; - if(subcribe_info.graph_id == graph_id) { + if (subcribe_info.graph_id == graph_id) { profiling_manager.SetGraphIdToModelMap(graph_id, model_id); } else { From e23e98bfa63acc4042b7cd6303a1904435db213c Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Mon, 12 Jul 2021 19:06:22 +0800 Subject: [PATCH 188/226] Add aicore_task_compiler.cc to executor --- ge/CMakeLists.txt | 15 +++++++++------ ge/engine_manager/dnnengine_manager.cc | 1 - ge/init/gelib.cc | 1 - ge/opskernel_manager/ops_kernel_manager.cc | 10 ---------- 4 files changed, 9 insertions(+), 18 deletions(-) diff --git a/ge/CMakeLists.txt b/ge/CMakeLists.txt index c22f88d8..5470542f 100755 --- a/ge/CMakeLists.txt +++ b/ge/CMakeLists.txt @@ -109,7 +109,7 @@ endif () ################################################################## set(EXECUTOR_SRC_LIST - #"analyzer/analyzer.cc" + "analyzer/analyzer.cc" #"client/ge_api.cc" "common/dump/dump_manager.cc" "common/dump/dump_op.cc" @@ -141,7 +141,7 @@ set(EXECUTOR_SRC_LIST "common/profiling/ge_profiling.cc" #"common/profiling/ge_runner_profiling.cc" "common/profiling/profiling_manager.cc" - #"engine_manager/dnnengine_manager.cc" + "engine_manager/dnnengine_manager.cc" "executor/ge_executor.cc" "ge_local_engine/engine/host_cpu_engine.cc" #"ge_opt_info/ge_opt_info.cc" @@ -401,7 +401,7 @@ set(EXECUTOR_SRC_LIST "hybrid/node_executor/aicore/aicore_node_executor.cc" "hybrid/node_executor/aicore/aicore_op_task.cc" "hybrid/node_executor/aicore/aicore_task_builder.cc" - #"hybrid/node_executor/aicore/aicore_task_compiler.cc" + "hybrid/node_executor/aicore/aicore_task_compiler.cc" "hybrid/node_executor/aicpu/aicpu_ext_info.cc" "hybrid/node_executor/aicpu/aicpu_node_executor.cc" "hybrid/node_executor/compiledsubgraph/known_node_executor.cc" @@ -415,7 +415,7 @@ set(EXECUTOR_SRC_LIST "hybrid/node_executor/rts/rts_node_task.cc" "hybrid/node_executor/rts/rts_task_factory.cc" "hybrid/node_executor/task_context.cc" - #"init/gelib.cc" + "init/gelib.cc" #"ir_build/attr_options/keep_dtype_option.cc" #"ir_build/attr_options/utils.cc" #"ir_build/attr_options/weight_compress_option.cc" @@ -424,7 +424,7 @@ set(EXECUTOR_SRC_LIST "model/ge_model.cc" "model/ge_root_model.cc" "opskernel_manager/ops_kernel_builder_manager.cc" - #"opskernel_manager/ops_kernel_manager.cc" + "opskernel_manager/ops_kernel_manager.cc" #"session/inner_session.cc" #"session/session_manager.cc" "single_op/single_op.cc" @@ -445,7 +445,7 @@ set(COMPILER_SRC_LIST "common/dump/dump_manager.cc" "common/dump/dump_op.cc" "common/dump/dump_properties.cc" - "common/dump/dump_server.cc" + #"common/dump/dump_server.cc" "common/dump/exception_dumper.cc" "common/dump/opdebug_register.cc" "common/formats/format_transfers/datatype_transfer.cc" @@ -828,6 +828,7 @@ target_link_libraries(ge_runner PRIVATE ############ libge_compiler.so ############ add_library(ge_compiler SHARED + "common/dump/dump_server.cc" "hybrid/hybrid_davinci_model_stub.cc" ${COMPILER_SRC_LIST} ) @@ -962,6 +963,7 @@ target_link_libraries(ge_executor PRIVATE $<$>:$> $<$>:$> json + ge_proto_client ascend_protobuf_static c_sec $<$>:-lrt> @@ -1024,6 +1026,7 @@ target_link_libraries(ge_executor_shared PRIVATE $<$>:$> -Wl,--no-as-needed ge_common + ge_proto_client runtime slog graph diff --git a/ge/engine_manager/dnnengine_manager.cc b/ge/engine_manager/dnnengine_manager.cc index 9e338295..0fadd993 100644 --- a/ge/engine_manager/dnnengine_manager.cc +++ b/ge/engine_manager/dnnengine_manager.cc @@ -16,7 +16,6 @@ #include "engine_manager/dnnengine_manager.h" -#include #include #include #include diff --git a/ge/init/gelib.cc b/ge/init/gelib.cc index 132d4680..0350328d 100644 --- a/ge/init/gelib.cc +++ b/ge/init/gelib.cc @@ -16,7 +16,6 @@ #include "init/gelib.h" -#include #include #include #include diff --git a/ge/opskernel_manager/ops_kernel_manager.cc b/ge/opskernel_manager/ops_kernel_manager.cc index d35ebda5..60958872 100644 --- a/ge/opskernel_manager/ops_kernel_manager.cc +++ b/ge/opskernel_manager/ops_kernel_manager.cc @@ -16,17 +16,7 @@ #include "opskernel_manager/ops_kernel_manager.h" -#include -#include -#include -#include - -#include -#include -#include #include "init/gelib.h" -#include "framework/common/debug/ge_log.h" -#include "external/ge/ge_api.h" #include "proto/optimizer_priority.pb.h" namespace { From 87fd8b270b2a280680e0663bed504965fd6756a2 Mon Sep 17 00:00:00 2001 From: wangzhengjun Date: Sat, 10 Jul 2021 17:23:31 +0800 Subject: [PATCH 189/226] single op support null tensor --- ge/graph/manager/util/hcom_util.cc | 3 +- ge/single_op/task/op_task.cc | 16 +++++-- .../ut/ge/graph/manager/hcom_util_unittest.cc | 11 +++++ .../ge/single_op/single_op_task_unittest.cc | 48 +++++++++++++++++++ 4 files changed, 72 insertions(+), 6 deletions(-) diff --git a/ge/graph/manager/util/hcom_util.cc b/ge/graph/manager/util/hcom_util.cc index 8e12ff27..021a458e 100644 --- a/ge/graph/manager/util/hcom_util.cc +++ b/ge/graph/manager/util/hcom_util.cc @@ -109,8 +109,7 @@ Status HcomOmeUtil::GetHcomCount(const ge::ConstOpDescPtr &op_desc, HcclDataType GE_CHK_STATUS_RET(ge::TensorUtils::GetSize(*op_desc->GetInputDescPtr(i), input_size), "[Get][Size] from TensorDesc failed, op:%s, input index:%zu", op_desc->GetName().c_str(), i); // dynamic shape hccl op get size from output tensor desc - if (op_desc->HasAttr(ATTR_NAME_IS_UNKNOWN_SHAPE)) { - GE_CHECK_NOTNULL(op_desc->GetOutputDescPtr(i)); + if (op_desc->HasAttr(ATTR_NAME_IS_UNKNOWN_SHAPE) && (op_desc->GetOutputDescPtr(i) != nullptr)) { GE_CHK_STATUS_RET(ge::TensorUtils::GetSize(*op_desc->GetOutputDescPtr(i), input_size), "[Get][Size] from TensorDesc failed, op:%s, input index:%zu", op_desc->GetName().c_str(), i); } diff --git a/ge/single_op/task/op_task.cc b/ge/single_op/task/op_task.cc index c6c99ab0..dbc90ac5 100755 --- a/ge/single_op/task/op_task.cc +++ b/ge/single_op/task/op_task.cc @@ -746,16 +746,24 @@ Status AiCpuBaseTask::UpdateIoAddr(const vector &inputs, const vecto GE_CHK_BOOL_RET_STATUS(non_const_index < inputs.size(), ACL_ERROR_GE_PARAM_INVALID, "[Check][Size] Input size is %zu, but get non_const_index is %zu", inputs.size(), non_const_index); auto addr = inputs[non_const_index].data; - GE_CHECK_NOTNULL(addr); - GELOGD("AICpuTask input[%zu] addr = %p", input_index, addr); + uint64_t length = inputs[non_const_index].length; + if (length != 0 && addr == nullptr) { + GELOGE(PARAM_INVALID, "[Check][Addr]AiCpuTask input[%zu] addr is nullptr, length = %lu", input_index, length); + return PARAM_INVALID; + } + GELOGD("AICpuTask input[%zu] addr = %p, length = %lu.", input_index, addr, length); *arg_base++ = reinterpret_cast(addr); non_const_index++; } for (size_t i = 0; i < outputs.size(); ++i) { auto addr = outputs[i].data; - GE_CHECK_NOTNULL(addr); - GELOGD("AICpuTask output[%zu] addr = %p", i, addr); + uint64_t length = outputs[i].length; + if (length != 0 && addr == nullptr) { + GELOGE(PARAM_INVALID, "[Check][Addr]AiCpuTask output[%zu] addr is nullptr, length = %lu", i, length); + return PARAM_INVALID; + } + GELOGD("AICpuTask output[%zu] addr = %p, length = %lu.", i, addr, length); *arg_base++ = reinterpret_cast(addr); } diff --git a/tests/ut/ge/graph/manager/hcom_util_unittest.cc b/tests/ut/ge/graph/manager/hcom_util_unittest.cc index 9f104f5f..4aeeddb9 100644 --- a/tests/ut/ge/graph/manager/hcom_util_unittest.cc +++ b/tests/ut/ge/graph/manager/hcom_util_unittest.cc @@ -94,4 +94,15 @@ TEST_F(UtestHcomUtil, test_GetHcomCount_succ) { auto ret = hcom_ome_util.GetHcomCount(op_desc, HCCL_DATA_TYPE_FP32, true, count); EXPECT_EQ(ret, 0); } + +TEST_F(UtestHcomUtil, test_GetHcomCount_succ_2) { + ComputeGraphPtr graph = std::make_shared("test"); + NodePtr node = NodeBuilder("node", HCOMSEND).AddInputDesc({1, 1, 224, 224}).Build(graph); + auto op_desc = node->GetOpDesc(); + HcomOmeUtil hcom_util; + int count = 0; + auto ret = hcom_util.GetHcomCount(op_desc, HCCL_DATA_TYPE_FP32, true, count); + EXPECT_EQ(ret, SUCCESS); + EXPECT_EQ(count, 224 * 224); +} } // namespace ge \ No newline at end of file diff --git a/tests/ut/ge/single_op/single_op_task_unittest.cc b/tests/ut/ge/single_op/single_op_task_unittest.cc index 3e3160c2..8964df74 100644 --- a/tests/ut/ge/single_op/single_op_task_unittest.cc +++ b/tests/ut/ge/single_op/single_op_task_unittest.cc @@ -189,3 +189,51 @@ TEST_F(UtestSingleOpTask, test_atomic_exec) { optiling::utils::OpRunInfo run_info(0, true, 0); task.CalcTilingInfo(run_info); } + +TEST_F(UtestSingleOpTask, test_aicpu_task_update_io_addr) { + AiCpuCCTask task; + task.num_inputs_ = 2; + task.num_outputs_ = 1; + task.input_is_const_ = {true, false}; + int total_addr = 3; + uint32_t* addrs[total_addr] = {nullptr, nullptr, nullptr}; + task.io_addr_ = reinterpret_cast(addrs); + task.io_addr_num_ = total_addr; + + { + vector inputs(1, DataBuffer()); + vector outputs(1, DataBuffer()); + auto ret = task.UpdateIoAddr(inputs, outputs); + ASSERT_EQ(ret, SUCCESS); + ASSERT_EQ(addrs[0], nullptr); + ASSERT_EQ(addrs[1], nullptr); + ASSERT_EQ(addrs[2], nullptr); + } + + { + uint32_t data_buf[2]; + vector inputs{DataBuffer(&data_buf[0], 4, false)}; + vector outputs{DataBuffer(&data_buf[1], 4, false)}; + auto ret = task.UpdateIoAddr(inputs, outputs); + ASSERT_EQ(ret, SUCCESS); + ASSERT_EQ(addrs[0], nullptr); + ASSERT_EQ(addrs[1], &data_buf[0]); + ASSERT_EQ(addrs[2], &data_buf[1]); + } + + { + uint32_t data_buf[2]; + vector inputs{DataBuffer(nullptr, 4, false)}; + vector outputs{DataBuffer(&data_buf[1], 4, false)}; + auto ret = task.UpdateIoAddr(inputs, outputs); + ASSERT_EQ(ret, PARAM_INVALID); + } + + { + uint32_t data_buf[2]; + vector inputs{DataBuffer(&data_buf[0], 4, false)}; + vector outputs{DataBuffer(nullptr, 4, false)}; + auto ret = task.UpdateIoAddr(inputs, outputs); + ASSERT_EQ(ret, PARAM_INVALID); + } +} From 56337a19936667867eb5fdafd4f5a4fe6fbdf0ac Mon Sep 17 00:00:00 2001 From: wqtshg Date: Mon, 12 Jul 2021 20:57:02 +0800 Subject: [PATCH 190/226] all in one--runtime --- ge/CMakeLists.txt | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ge/CMakeLists.txt b/ge/CMakeLists.txt index 5470542f..aca8145b 100755 --- a/ge/CMakeLists.txt +++ b/ge/CMakeLists.txt @@ -786,6 +786,9 @@ target_include_directories(ge_runner SYSTEM PRIVATE ${GE_CODE_DIR}/../abl/licctrl ${GE_CODE_DIR}/../ace/comop/inc ${GE_CODE_DIR}/../ace/comop/inc/external + $<$>:${GE_DEPEND_DIR}/inc> + $<$>:$> + $<$>:$> #### blue zone #### ${ASCEND_DIR}/driver/include ${ASCEND_DIR}/fwkacllib/include @@ -872,6 +875,9 @@ target_include_directories(ge_compiler SYSTEM PRIVATE ${GE_CODE_DIR}/../abl/licctrl ${GE_CODE_DIR}/../ace/comop/inc ${GE_CODE_DIR}/../ace/comop/inc/external + $<$>:${GE_DEPEND_DIR}/inc> + $<$>:$> + $<$>:$> #### blue zone #### ${ASCEND_DIR}/driver/include ${ASCEND_DIR}/fwkacllib/include From 8c6a15b480da71f2af1bb768a117e169de5f1ba7 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Tue, 13 Jul 2021 10:34:05 +0800 Subject: [PATCH 191/226] Clean MakeLists --- ge/CMakeLists.txt | 206 ----------------------------- ge/graph/execute/model_executor.cc | 7 + ge/graph/label/while_label_maker.h | 104 ++++++++------- ge/init/gelib.cc | 7 - 4 files changed, 60 insertions(+), 264 deletions(-) diff --git a/ge/CMakeLists.txt b/ge/CMakeLists.txt index 5470542f..2fa96735 100755 --- a/ge/CMakeLists.txt +++ b/ge/CMakeLists.txt @@ -110,70 +110,27 @@ endif () ################################################################## set(EXECUTOR_SRC_LIST "analyzer/analyzer.cc" - #"client/ge_api.cc" "common/dump/dump_manager.cc" "common/dump/dump_op.cc" "common/dump/dump_properties.cc" "common/dump/exception_dumper.cc" "common/dump/opdebug_register.cc" - #"common/formats/format_transfers/datatype_transfer.cc" - #"common/formats/format_transfers/format_transfer_c1hwncoc0_hwcn.cc" - #"common/formats/format_transfers/format_transfer_dhwcn_fracz3D.cc" - #"common/formats/format_transfers/format_transfer_dhwnc_fracz3D_transpose.cc" - #"common/formats/format_transfers/format_transfer_fractal_nz.cc" - #"common/formats/format_transfers/format_transfer_fractal_z.cc" - #"common/formats/format_transfers/format_transfer_fractal_zz.cc" - #"common/formats/format_transfers/format_transfer_fracz_hwcn.cc" - #"common/formats/format_transfers/format_transfer_fracz_nchw.cc" - #"common/formats/format_transfers/format_transfer_fracz_nhwc.cc" - #"common/formats/format_transfers/format_transfer_hwcn_c1hwncoc0.cc" - #"common/formats/format_transfers/format_transfer_nc1hwc0_nchw.cc" - #"common/formats/format_transfers/format_transfer_nc1hwc0_nhwc.cc" - #"common/formats/format_transfers/format_transfer_nchw_nc1hwc0.cc" - #"common/formats/format_transfers/format_transfer_nhwc_nc1hwc0.cc" "common/formats/format_transfers/format_transfer_transpose.cc" - #"common/formats/formats.cc" "common/formats/utils/formats_trans_utils.cc" "common/fp16_t.cc" "common/ge/op_tiling_manager.cc" "common/ge/plugin_manager.cc" - #"common/helper/model_cache_helper.cc" "common/profiling/ge_profiling.cc" - #"common/profiling/ge_runner_profiling.cc" "common/profiling/profiling_manager.cc" "engine_manager/dnnengine_manager.cc" "executor/ge_executor.cc" "ge_local_engine/engine/host_cpu_engine.cc" - #"ge_opt_info/ge_opt_info.cc" - #"generator/ge_generator.cc" - #"generator/generator_api.cc" - #"graph/build/graph_builder.cc" - #"graph/build/label_allocator.cc" - #"graph/build/logical_stream_allocator.cc" - #"graph/build/memory/binary_block_mem_assigner.cc" - #"graph/build/memory/block_mem_assigner.cc" - #"graph/build/memory/buffer_pool_mem_assigner.cc" - #"graph/build/memory/graph_mem_assigner.cc" - #"graph/build/memory/hybrid_mem_assigner.cc" - #"graph/build/memory/max_block_mem_assigner.cc" - #"graph/build/memory/memory_assigner.cc" "graph/build/memory/var_mem_assign_util.cc" - #"graph/build/model_builder.cc" - #"graph/build/run_context.cc" - #"graph/build/stream_allocator.cc" - #"graph/build/stream_graph_optimizer.cc" - #"graph/build/task_generator.cc" "graph/common/bcast.cc" "graph/common/local_context.cc" "graph/common/omg_util.cc" - #"graph/common/transop_util.cc" "graph/execute/graph_execute.cc" "graph/execute/model_executor.cc" - #"graph/label/case_label_maker.cc" - #"graph/label/if_label_maker.cc" - #"graph/label/label_maker.cc" - #"graph/label/partitioned_call_label_maker.cc" - #"graph/label/while_label_maker.cc" "graph/load/graph_loader.cc" "graph/load/model_manager/aipp_utils.cc" "graph/load/model_manager/cpu_queue_schedule.cc" @@ -208,8 +165,6 @@ set(EXECUTOR_SRC_LIST "graph/load/model_manager/zero_copy_offset.cc" "graph/load/model_manager/zero_copy_task.cc" "graph/manager/graph_caching_allocator.cc" - #"graph/manager/graph_context.cc" - #"graph/manager/graph_manager.cc" "graph/manager/graph_manager_utils.cc" "graph/manager/graph_mem_allocator.cc" "graph/manager/graph_mem_manager.cc" @@ -217,128 +172,12 @@ set(EXECUTOR_SRC_LIST "graph/manager/host_mem_allocator.cc" "graph/manager/host_mem_manager.cc" #"graph/manager/memory_api.cc" # Just for runner. - #"graph/manager/model_manager/event_manager.cc" "graph/manager/rdma_pool_allocator.cc" "graph/manager/session_scope_mem_allocator.cc" "graph/manager/trans_var_data_utils.cc" "graph/manager/util/debug.cc" #"graph/manager/util/hcom_util.cc" # Just for runner. - #"graph/manager/util/rt_context_util.cc" - #"graph/manager/util/variable_accelerate_ctrl.cc" - #"graph/optimize/graph_optimize.cc" - #"graph/optimize/mem_rw_conflict_optimize.cc" - #"graph/optimize/summary_optimize.cc" - #"graph/partition/dynamic_shape_partition.cc" - #"graph/partition/engine_place.cc" - #"graph/partition/graph_partition.cc" - #"graph/partition/stage_partition.cc" - #"graph/passes/addn_pass.cc" - #"graph/passes/aicpu_constant_folding_pass.cc" - #"graph/passes/assert_pass.cc" - #"graph/passes/assign_remove_pass.cc" - #"graph/passes/atomic_addr_clean_pass.cc" - #"graph/passes/attach_stream_label_pass.cc" - #"graph/passes/base_pass.cc" - #"graph/passes/bitcast_pass.cc" - #"graph/passes/buffer_pool_memory_pass.cc" - #"graph/passes/cast_remove_pass.cc" - #"graph/passes/cast_translate_pass.cc" - #"graph/passes/common_subexpression_elimination_pass.cc" - #"graph/passes/compile_nodes_pass.cc" - #"graph/passes/cond_pass.cc" - #"graph/passes/cond_remove_pass.cc" - #"graph/passes/constant_folding_pass.cc" - #"graph/passes/constant_fuse_same_pass.cc" - #"graph/passes/control_trigger_pass.cc" - #"graph/passes/ctrl_edge_transfer_pass.cc" - #"graph/passes/data_pass.cc" - #"graph/passes/dimension_adjust_pass.cc" - #"graph/passes/dimension_compute_pass.cc" - #"graph/passes/dropout_pass.cc" - #"graph/passes/end_of_sequence_add_control_pass.cc" - #"graph/passes/enter_pass.cc" - #"graph/passes/flow_ctrl_pass.cc" - #"graph/passes/folding_pass.cc" - #"graph/passes/for_pass.cc" - #"graph/passes/fuse_data_nodes_with_common_input_pass.cc" - #"graph/passes/get_original_format_pass.cc" - #"graph/passes/global_step_insert_pass.cc" - #"graph/passes/guarantee_const_pass.cc" - #"graph/passes/hccl_continuous_memcpy_pass.cc" - #"graph/passes/hccl_group_pass.cc" - #"graph/passes/hccl_memcpy_pass.cc" - #"graph/passes/hccl_tailing_optimization_pass.cc" - #"graph/passes/identity_pass.cc" - #"graph/passes/infer_base_pass.cc" - #"graph/passes/infer_value_range_pass.cc" - #"graph/passes/infershape_pass.cc" - #"graph/passes/inplace_support_check_pass.cc" - #"graph/passes/input_output_connection_identify_pass.cc" - #"graph/passes/iterator_op_pass.cc" - #"graph/passes/link_gen_mask_nodes_pass.cc" - #"graph/passes/mark_agnostic_pass.cc" - #"graph/passes/mark_force_unknown_for_cond_pass.cc" - #"graph/passes/mark_graph_unknown_status_pass.cc" - #"graph/passes/mark_node_unknown_shape_pass.cc" - #"graph/passes/mark_same_addr_pass.cc" - #"graph/passes/memcpy_addr_async_pass.cc" - #"graph/passes/merge_input_memcpy_pass.cc" - #"graph/passes/merge_pass.cc" - #"graph/passes/merge_to_stream_merge_pass.cc" - #"graph/passes/multi_batch_clone_pass.cc" - #"graph/passes/multi_batch_pass.cc" - #"graph/passes/net_output_pass.cc" - #"graph/passes/next_iteration_pass.cc" - #"graph/passes/no_use_reshape_remove_pass.cc" - #"graph/passes/parallel_concat_start_op_pass.cc" - #"graph/passes/parallel_group_pass.cc" - #"graph/passes/pass_manager.cc" "graph/passes/pass_utils.cc" - #"graph/passes/permute_pass.cc" - #"graph/passes/placeholder_with_default_pass.cc" - #"graph/passes/prevent_gradient_pass.cc" - #"graph/passes/print_op_pass.cc" - #"graph/passes/prune_pass.cc" - #"graph/passes/ref_identity_delete_op_pass.cc" - #"graph/passes/remove_same_const_pass.cc" - #"graph/passes/replace_transshape_pass.cc" - #"graph/passes/replace_with_empty_const_pass.cc" - #"graph/passes/reshape_recovery_pass.cc" - #"graph/passes/reshape_remove_pass.cc" - #"graph/passes/resource_pair_add_control_pass.cc" - #"graph/passes/resource_pair_remove_control_pass.cc" - #"graph/passes/same_transdata_breadth_fusion_pass.cc" - #"graph/passes/save_pass.cc" - #"graph/passes/set_input_output_offset_pass.cc" - #"graph/passes/shape_operate_op_remove_pass.cc" - #"graph/passes/snapshot_pass.cc" - #"graph/passes/stop_gradient_pass.cc" - #"graph/passes/subexpression_migration_pass.cc" - #"graph/passes/subgraph_const_migration_pass.cc" - #"graph/passes/subgraph_pass.cc" - #"graph/passes/switch_data_edges_bypass.cc" - #"graph/passes/switch_dead_branch_elimination.cc" - #"graph/passes/switch_logic_remove_pass.cc" - #"graph/passes/switch_to_stream_switch_pass.cc" - #"graph/passes/transop_breadth_fusion_pass.cc" - #"graph/passes/transop_depth_fusion_pass.cc" - #"graph/passes/transop_nearby_allreduce_fusion_pass.cc" - #"graph/passes/transop_symmetry_elimination_pass.cc" - #"graph/passes/transop_without_reshape_fusion_pass.cc" - #"graph/passes/transpose_transdata_pass.cc" - #"graph/passes/unused_args_clean_pass.cc" - #"graph/passes/unused_const_pass.cc" - #"graph/passes/useless_control_out_remove_pass.cc" - #"graph/passes/var_is_initialized_op_pass.cc" - #"graph/passes/variable_op_pass.cc" - #"graph/passes/variable_prepare_op_pass.cc" - #"graph/passes/variable_ref_delete_op_pass.cc" - #"graph/passes/variable_ref_useless_control_out_delete_pass.cc" - #"graph/preprocess/graph_preprocess.cc" - #"graph/preprocess/insert_op/ge_aipp_op.cc" - #"graph/preprocess/insert_op/util_insert_aipp_op.cc" - #"graph/preprocess/multi_batch_copy_graph.cc" - #"graph/preprocess/multi_batch_options.cc" "host_kernels/add_kernel.cc" "host_kernels/broadcast_args_kernel.cc" "host_kernels/broadcast_gradient_args_kernel.cc" @@ -416,17 +255,10 @@ set(EXECUTOR_SRC_LIST "hybrid/node_executor/rts/rts_task_factory.cc" "hybrid/node_executor/task_context.cc" "init/gelib.cc" - #"ir_build/attr_options/keep_dtype_option.cc" - #"ir_build/attr_options/utils.cc" - #"ir_build/attr_options/weight_compress_option.cc" - #"ir_build/ge_ir_build.cc" - #"ir_build/option_utils.cc" "model/ge_model.cc" "model/ge_root_model.cc" "opskernel_manager/ops_kernel_builder_manager.cc" "opskernel_manager/ops_kernel_manager.cc" - #"session/inner_session.cc" - #"session/session_manager.cc" "single_op/single_op.cc" "single_op/single_op_manager.cc" "single_op/single_op_model.cc" @@ -445,9 +277,6 @@ set(COMPILER_SRC_LIST "common/dump/dump_manager.cc" "common/dump/dump_op.cc" "common/dump/dump_properties.cc" - #"common/dump/dump_server.cc" - "common/dump/exception_dumper.cc" - "common/dump/opdebug_register.cc" "common/formats/format_transfers/datatype_transfer.cc" "common/formats/format_transfers/format_transfer_c1hwncoc0_hwcn.cc" "common/formats/format_transfers/format_transfer_dhwcn_fracz3D.cc" @@ -497,44 +326,12 @@ set(COMPILER_SRC_LIST "graph/common/local_context.cc" "graph/common/omg_util.cc" "graph/common/transop_util.cc" - #"graph/execute/graph_execute.cc" "graph/label/case_label_maker.cc" "graph/label/if_label_maker.cc" "graph/label/label_maker.cc" "graph/label/partitioned_call_label_maker.cc" "graph/label/while_label_maker.cc" - "graph/load/graph_loader.cc" - "graph/load/model_manager/aipp_utils.cc" - "graph/load/model_manager/cpu_queue_schedule.cc" - "graph/load/model_manager/data_dumper.cc" - "graph/load/model_manager/data_inputer.cc" - "graph/load/model_manager/davinci_model.cc" - "graph/load/model_manager/model_manager.cc" "graph/load/model_manager/model_utils.cc" - "graph/load/model_manager/task_info/end_graph_task_info.cc" - "graph/load/model_manager/task_info/event_record_task_info.cc" - "graph/load/model_manager/task_info/event_wait_task_info.cc" - "graph/load/model_manager/task_info/ffts_task_info.cc" - "graph/load/model_manager/task_info/fusion_start_task_info.cc" - "graph/load/model_manager/task_info/fusion_stop_task_info.cc" - "graph/load/model_manager/task_info/kernel_ex_task_info.cc" - "graph/load/model_manager/task_info/kernel_task_info.cc" - "graph/load/model_manager/task_info/label_goto_ex_task_info.cc" - "graph/load/model_manager/task_info/label_set_task_info.cc" - "graph/load/model_manager/task_info/label_switch_by_index_task_info.cc" - "graph/load/model_manager/task_info/memcpy_addr_async_task_info.cc" - "graph/load/model_manager/task_info/memcpy_async_task_info.cc" - "graph/load/model_manager/task_info/model_exit_task_info.cc" - "graph/load/model_manager/task_info/profiler_trace_task_info.cc" - "graph/load/model_manager/task_info/stream_active_task_info.cc" - "graph/load/model_manager/task_info/stream_switch_task_info.cc" - "graph/load/model_manager/task_info/stream_switchn_task_info.cc" - "graph/load/model_manager/task_info/super_kernel/super_kernel.cc" - "graph/load/model_manager/task_info/super_kernel/super_kernel_factory.cc" - "graph/load/model_manager/task_info/task_info.cc" - "graph/load/model_manager/tbe_handle_store.cc" - "graph/load/model_manager/zero_copy_offset.cc" - "graph/load/model_manager/zero_copy_task.cc" "graph/manager/graph_caching_allocator.cc" "graph/manager/graph_context.cc" "graph/manager/graph_manager.cc" @@ -704,7 +501,6 @@ set(COMPILER_SRC_LIST "host_kernels/transpose_kernel.cc" "host_kernels/unpack_kernel.cc" "host_kernels/unsqueeze_kernel.cc" - #"hybrid/hybrid_davinci_model_stub.cc" "hybrid/node_executor/aicpu/aicpu_ext_info.cc" "init/gelib.cc" "ir_build/attr_options/keep_dtype_option.cc" @@ -828,8 +624,6 @@ target_link_libraries(ge_runner PRIVATE ############ libge_compiler.so ############ add_library(ge_compiler SHARED - "common/dump/dump_server.cc" - "hybrid/hybrid_davinci_model_stub.cc" ${COMPILER_SRC_LIST} ) diff --git a/ge/graph/execute/model_executor.cc b/ge/graph/execute/model_executor.cc index 2fc7b0af..d1683f1d 100644 --- a/ge/graph/execute/model_executor.cc +++ b/ge/graph/execute/model_executor.cc @@ -47,6 +47,13 @@ Status ModelExecutor::Initialize(const map &options, uint64_t se return MEMALLOC_FAILED; } + const auto model_manager = ModelManager::GetInstance(); + GE_CHECK_NOTNULL(model_manager); + Status status = model_manager->EnableExceptionDump(options); + if (status != SUCCESS) { + return status; + } + session_id_ = session_id; train_graph_flag_ = ParseTrainGraphFlag(); thread_run_flag_.store(true); diff --git a/ge/graph/label/while_label_maker.h b/ge/graph/label/while_label_maker.h index 6c30475b..1561b860 100644 --- a/ge/graph/label/while_label_maker.h +++ b/ge/graph/label/while_label_maker.h @@ -19,57 +19,59 @@ #include "graph/node.h" #include "graph/label/label_maker.h" -/******************************************************************************* - +------------+ - | Node | - +------------+ - | Node | - +------------+ - | While | - +------------+ - +-----------+ - | Node | +------------+ - +-----------+ | LabelSet |\ - | Node | +------------+ \ - +-----------+ |StreamActive| \ - | Node | +------------+ A - +-----------+ | c | | - | While | +------------+ | - +-----------+ | o | | - | Node | +------------+ | - +-----------+ | n | | - | Node | +------------+ | - +-----------+ | d | | - | Node | +------------+ | - +-----------+ /|SwitchByIdx | | - / +------------+ | - ====> / | - | \ +------------+ | - | \|LabelSet(1) | | - | +------------+ | - | |StreamActive| | - | +------------+ | - +-----------+ +-----------+ | | b | | - | c | | b | | +------------+ | - +-----------+ +-----------+ | | o | | - | o | | o | | +------------+ | - +-----------+ +-----------+ | | d | | - | n | | d | | +------------+ | - +-----------+ +-----------+ | | y | / - | d | | y | V +------------+ / - +-----------+ +-----------+ \ | LabelGoto |/ - \ +------------+ - \|LabelSet(0) | - +------------+ - - +------------+ - | Node | - +------------+ - | Node | - +------------+ - | Node | - +------------+ -*******************************************************************************/ +/*********************************************************************************************************************** + +------------+ Step0: DavinciModel::InitNodes + | Node | + +------------+ rtLabelCreateExV2 + | Node | + +------------+ + | Node | + +------------+ + | While | + +------------+ + +-----------+ Step1: TaskInfo::Init + | Node | +------------+ + +-----------+ | LabelSet(0)|\ LabelSetTaskInfo --> id=0 + | Node | +------------+ \ + +-----------+ |StreamActive| \ If active_stream_list empty, not task. + | Node | +------------+ A + +-----------+ | c | | + | While | +------------+ | + +-----------+ | o | | + | Node | +------------+ | + +-----------+ | n | | + | Node | +------------+ | + +-----------+ | d | | + | Node | +------------+ | + +-----------+ /|SwitchByIdx | | LabelSwitchByIndexTaskInfo --> rtLabelListCpy({1,2}) + / +------------+ | + ====> / | + | \ +------------+ | + | \| LabelSet(1)| | LabelSetTaskInfo --> id=1 + | +------------+ | + | |StreamActive| | If active_stream_list empty, not task. + | +------------+ | + +-----------+ +-----------+ | | b | | + | c | | b | | +------------+ | + +-----------+ +-----------+ | | o | | + | o | | o | | +------------+ | + +-----------+ +-----------+ | | d | | + | n | | d | | +------------+ | + +-----------+ +-----------+ | | y | / + | d | | y | V +------------+ / + +-----------+ +-----------+ \ | LabelGoto |/ LabelGotoExTaskInfo --> GetLabelGotoAddr(id=0) + \ +------------+ + \| LabelSet(2)| LabelSetTaskInfo --> id=2 + +------------+ + Step2: TaskInfo::Distribute + +------------+ + | Node | LabelSetTaskInfo --> rtLabelSet + +------------+ LabelSwitchByIndexTaskInfo --> rtLabelSwitchByIndex + | Node | LabelSetTaskInfo --> rtLabelSet + +------------+ LabelGotoExTaskInfo --> rtLabelSwitchByIndex + | Node | LabelSetTaskInfo --> rtLabelSet + +------------+ +***********************************************************************************************************************/ namespace ge { class WhileOpLabelMaker : public LabelMaker { public: diff --git a/ge/init/gelib.cc b/ge/init/gelib.cc index 0350328d..1a2f0d5b 100644 --- a/ge/init/gelib.cc +++ b/ge/init/gelib.cc @@ -37,7 +37,6 @@ #include "graph/common/ge_call_wrapper.h" #include "graph/ge_context.h" #include "graph/ge_global_options.h" -#include "graph/load/model_manager/model_manager.h" #include "graph/manager/graph_mem_manager.h" #include "graph/manager/host_mem_manager.h" #include "graph/manager/graph_var_manager.h" @@ -196,12 +195,6 @@ Status GELib::SystemInitialize(const map &options) { // In train and infer, profiling is always needed. InitProfiling(this->options_); - auto model_manager = ModelManager::GetInstance(); - GE_CHECK_NOTNULL(model_manager); - GE_IF_BOOL_EXEC(model_manager->EnableExceptionDump(options) != SUCCESS, - REPORT_CALL_ERROR("E19999", "ModelManager EnableExceptionDump failed."); - GELOGE(FAILED, "[Enable][ExceptionDump] failed."); - return FAILED); // 1.`is_train_mode_` means case: train // 2.`(!is_train_mode_) && (options_.device_id != kDefaultDeviceIdForInfer)` means case: online infer // these two case with logical device id From c99de0ec98175f2601d163511bca6e4b5e31e9e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=99=88=E5=8D=8E?= Date: Tue, 13 Jul 2021 15:14:02 +0800 Subject: [PATCH 192/226] update submodule --- metadef | 2 +- parser | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/metadef b/metadef index d5101eed..5a9605f6 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit d5101eed670e0ecf8391db616c12582ed577adab +Subproject commit 5a9605f6cb1204a729a51fe36bc614cf1d94a496 diff --git a/parser b/parser index b42a99ea..7a2daaa2 160000 --- a/parser +++ b/parser @@ -1 +1 @@ -Subproject commit b42a99ea6e1be75156650675fd0aeabca6cb3de9 +Subproject commit 7a2daaa2625505e1a15e1faa46c90df1a23dd6fa From 82f767585cff6a9678447512948b456903ba921f Mon Sep 17 00:00:00 2001 From: wangzhengjun Date: Tue, 13 Jul 2021 15:32:29 +0800 Subject: [PATCH 193/226] fix static check warning --- ge/graph/common/omg_util.h | 9 ++++----- tests/ut/ge/graph/optimize/graph_optimize_unittest.cc | 11 ++++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/ge/graph/common/omg_util.h b/ge/graph/common/omg_util.h index d55cc7c8..83057dfb 100644 --- a/ge/graph/common/omg_util.h +++ b/ge/graph/common/omg_util.h @@ -27,11 +27,10 @@ #include "graph/node.h" namespace ge { -namespace { -const int64_t kBufferPoolMemAlignSize = 512; -const uint32_t kBufferPoolNodeOutIndex = 0; -const uint32_t kEventReuseThreshold = 65500; -} // namespace +static constexpr int64_t kBufferPoolMemAlignSize = 512; +static constexpr uint32_t kBufferPoolNodeOutIndex = 0; +static constexpr uint32_t kEventReuseThreshold = 65500; + /// /// @brief get the Original Type of FrameworkOp /// @param [in] node diff --git a/tests/ut/ge/graph/optimize/graph_optimize_unittest.cc b/tests/ut/ge/graph/optimize/graph_optimize_unittest.cc index 5468ec97..7f26aa8c 100644 --- a/tests/ut/ge/graph/optimize/graph_optimize_unittest.cc +++ b/tests/ut/ge/graph/optimize/graph_optimize_unittest.cc @@ -32,14 +32,14 @@ using namespace ge; namespace { const char *const kVectorCore = "VectorCore"; const char *const kAicoreEngine = "AIcoreEngine"; -string CreateEngineConfigJson() { +void CreateEngineConfigJson(string &dir_path, string &file_path) { GELOGI("Begin to create engine config json file."); string base_path = PluginManager::GetPath(); GELOGI("Base path is %s.", base_path.c_str()); - string dir_path = base_path.substr(0, base_path.rfind('/') + 1) + "plugin/nnengine/ge_config"; + dir_path = base_path.substr(0, base_path.rfind('/') + 1) + "plugin/nnengine/ge_config"; string cmd = "mkdir -p " + dir_path; system(cmd.c_str()); - string file_path = dir_path + "/engine_conf.json"; + file_path = dir_path + "/engine_conf.json"; GELOGI("Begin to write into the config file: %s.", file_path.c_str()); ofstream ofs(file_path, ios::out); EXPECT_EQ(!ofs, false); @@ -56,7 +56,6 @@ string CreateEngineConfigJson() { "}"; ofs.close(); GELOGI("Json config file %s has been written.", file_path.c_str()); - return file_path; } void DeleteFile(const string &file_name) { @@ -69,14 +68,16 @@ void DeleteFile(const string &file_name) { class UtestGraphOptimizeTest : public testing::Test { protected: void SetUp() { - config_file_ = CreateEngineConfigJson(); + CreateEngineConfigJson(config_dir_, config_file_); } void TearDown() { DeleteFile(config_file_); + DeleteFile(config_dir_); } private: + string config_dir_; string config_file_; }; From 4f049ae644495582cb5707cb16c572dc9dc74f5a Mon Sep 17 00:00:00 2001 From: zhou_chao1993 Date: Tue, 13 Jul 2021 17:13:38 +0800 Subject: [PATCH 194/226] modify ge common so --- .clang-format | 2 +- ge/CMakeLists.txt | 13 -- ge/client/ge_api.cc | 2 +- ge/common/CMakeLists.txt | 15 +- ge/common/auth/file_saver.cc | 22 ++- ge/{graph => }/common/bcast.cc | 4 +- ge/{graph => }/common/bcast.h | 6 +- ge/common/context/ctx.cc | 2 +- ge/common/cust_aicpu_kernel_store.h | 2 +- ge/common/debug/memory_dumper.cc | 11 +- ge/common/dump/dump_manager.cc | 19 ++- ge/common/dump/dump_properties.cc | 74 +++++---- ge/common/fmk_error_codes.cc | 7 +- .../format_transfer_transpose.h | 1 - ge/common/formats/formats.cc | 16 +- .../formats/utils/formats_trans_utils.cc | 5 +- ge/common/fp16_t.cc | 74 ++++++--- ge/common/ge/datatype_util.h | 2 +- ge/common/ge/tbe_plugin_manager.cc | 6 +- ge/{graph => }/common/ge_call_wrapper.h | 0 ge/common/ge_format_util.cc | 4 +- ge/common/helper/model_cache_helper.h | 2 +- ge/common/helper/model_helper.cc | 77 ++++------ ge/common/helper/om_file_helper.cc | 40 ++--- ge/common/kernel_store.h | 2 +- ge/{graph => }/common/local_context.cc | 2 +- ge/{graph => }/common/local_context.h | 0 ge/common/math/fp16_math.cc | 28 ++-- ge/{ => common}/model/ge_model.cc | 2 +- ge/{ => common}/model/ge_model.h | 12 +- ge/{ => common}/model/ge_root_model.cc | 3 +- ge/{ => common}/model/ge_root_model.h | 2 +- ge/common/model_parser/model_parser.cc | 12 +- ge/common/model_saver.cc | 3 +- ge/{graph => }/common/omg_util.cc | 22 +-- ge/{graph => }/common/omg_util.h | 0 ge/common/op/attr_value_util.cc | 141 ++++++++---------- ge/common/op/ge_op_utils.cc | 41 ++--- ge/common/profiling/ge_profiling.cc | 2 +- ge/common/profiling/profiling_manager.cc | 65 ++++---- ge/common/profiling/profiling_manager.h | 2 +- ge/common/properties_manager.cc | 15 +- ge/common/tbe_kernel_store.h | 2 +- ge/common/thread_pool.cc | 4 +- ge/common/thread_pool.h | 2 +- ge/{graph => }/common/transop_util.cc | 4 +- ge/{graph => }/common/transop_util.h | 2 +- ge/common/util.cc | 34 ++--- ge/executor/CMakeLists.txt | 14 +- ge/executor/module.mk | 6 +- ge/ge_inference.mk | 8 +- ge/ge_runner.mk | 8 +- ge/generator/ge_generator.cc | 2 +- ge/graph/build/graph_builder.cc | 4 +- ge/graph/build/graph_builder.h | 2 +- ge/graph/build/logical_stream_allocator.cc | 2 +- ge/graph/build/memory/block_mem_assigner.cc | 2 +- .../build/memory/buffer_pool_mem_assigner.cc | 2 +- ge/graph/build/memory/graph_mem_assigner.cc | 2 +- ge/graph/build/memory/var_mem_assign_util.cc | 2 +- ge/graph/build/model_builder.cc | 6 +- ge/graph/build/model_builder.h | 2 +- ge/graph/build/run_context.cc | 2 +- ge/graph/build/stream_allocator.cc | 2 +- ge/graph/build/task_generator.cc | 2 +- ge/graph/execute/model_executor.cc | 4 +- ge/graph/load/model_manager/davinci_model.cc | 6 +- ge/graph/load/model_manager/davinci_model.h | 2 +- ge/graph/load/model_manager/model_manager.cc | 4 +- ge/graph/load/model_manager/model_manager.h | 2 +- ge/graph/manager/graph_manager.cc | 10 +- ge/graph/manager/graph_manager.h | 2 +- ge/graph/manager/graph_manager_utils.h | 6 +- ge/graph/optimize/graph_optimize.cc | 2 +- ge/graph/optimize/mem_rw_conflict_optimize.cc | 2 +- ge/graph/partition/dynamic_shape_partition.cc | 2 +- ge/graph/partition/graph_partition.cc | 2 +- ge/graph/partition/graph_partition.h | 2 + ge/graph/passes/atomic_addr_clean_pass.cc | 2 +- ge/graph/passes/attach_stream_label_pass.cc | 2 +- ge/graph/passes/buffer_pool_memory_pass.cc | 2 +- ge/graph/passes/cast_remove_pass.cc | 2 +- ge/graph/passes/cast_translate_pass.cc | 2 +- ge/graph/passes/compile_nodes_pass.cc | 2 +- ge/graph/passes/control_trigger_pass.cc | 2 +- ge/graph/passes/dimension_adjust_pass.h | 2 +- ge/graph/passes/flow_ctrl_pass.cc | 2 +- ge/graph/passes/get_original_format_pass.cc | 2 +- ge/graph/passes/guarantee_const_pass.cc | 2 +- .../passes/hccl_tailing_optimization_pass.cc | 2 +- ge/graph/passes/identity_pass.cc | 2 +- ge/graph/passes/infershape_pass.cc | 2 +- ge/graph/passes/iterator_op_pass.cc | 2 +- .../mark_force_unknown_for_cond_pass.cc | 2 +- .../passes/mark_node_unknown_shape_pass.cc | 2 +- ge/graph/passes/merge_input_memcpy_pass.cc | 2 +- ge/graph/passes/merge_pass.cc | 2 +- ge/graph/passes/merge_to_stream_merge_pass.cc | 2 +- ge/graph/passes/multi_batch_clone_pass.cc | 4 +- ge/graph/passes/multi_batch_pass.cc | 2 +- ge/graph/passes/net_output_pass.cc | 2 +- ge/graph/passes/next_iteration_pass.cc | 2 +- ge/graph/passes/pass_manager.cc | 2 +- ge/graph/passes/pass_utils.cc | 2 +- ge/graph/passes/permute_pass.cc | 2 +- .../passes/placeholder_with_default_pass.cc | 2 +- ge/graph/passes/prevent_gradient_pass.cc | 2 +- ge/graph/passes/print_op_pass.h | 2 +- .../passes/ref_identity_delete_op_pass.cc | 2 +- ge/graph/passes/replace_transshape_pass.cc | 2 +- ge/graph/passes/snapshot_pass.cc | 2 +- ge/graph/passes/stop_gradient_pass.h | 2 +- .../passes/switch_dead_branch_elimination.cc | 2 +- .../passes/switch_to_stream_switch_pass.cc | 2 +- .../passes/transop_breadth_fusion_pass.cc | 2 +- ge/graph/passes/transop_depth_fusion_pass.cc | 2 +- .../transop_nearby_allreduce_fusion_pass.cc | 2 +- .../transop_symmetry_elimination_pass.cc | 2 +- .../transop_without_reshape_fusion_pass.cc | 2 +- ge/graph/passes/variable_op_pass.h | 2 +- ge/graph/passes/variable_prepare_op_pass.cc | 2 +- ge/graph/preprocess/graph_preprocess.cc | 6 +- ge/graph/preprocess/insert_op/ge_aipp_op.cc | 2 +- ge/graph/preprocess/multi_batch_copy_graph.cc | 4 +- ge/graph/preprocess/multi_batch_options.cc | 4 +- ge/host_kernels/add_kernel.cc | 2 +- ge/host_kernels/broadcast_args_kernel.cc | 2 +- .../broadcast_gradient_args_kernel.cc | 2 +- ge/host_kernels/cast_kernel.cc | 2 +- ge/host_kernels/floormod_kernel.cc | 2 +- ge/host_kernels/greater_kernel.cc | 2 +- ge/host_kernels/maximum_kernel.cc | 2 +- ge/host_kernels/mul_kernel.cc | 2 +- ge/host_kernels/permute_kernel.cc | 2 +- ge/host_kernels/sub_kernel.cc | 2 +- ge/host_kernels/transdata_kernel.cc | 2 +- ge/hybrid/hybrid_davinci_model.h | 2 +- ge/hybrid/model/hybrid_model.h | 2 +- ge/hybrid/model/hybrid_model_builder.cc | 2 +- ge/hybrid/model/hybrid_model_builder.h | 2 +- ge/init/gelib.cc | 2 +- ge/ir_build/attr_options/utils.cc | 2 +- ge/ir_build/ge_ir_build.cc | 2 +- ge/session/inner_session.cc | 2 +- inc/framework/common/helper/model_helper.h | 20 ++- tests/ut/ge/CMakeLists.txt | 13 +- tests/ut/ge/common/fp16_unittest.cc | 56 +++++++ .../ge/graph/build/model_builder_unittest.cc | 2 +- tests/ut/ge/graph/graph_load_unittest.cc | 2 +- .../ut/ge/graph/load/model_helper_unittest.cc | 2 +- .../graph/manager/graph_manager_unittest.cc | 10 +- .../dynamic_shape_partition_unittest.cc | 2 +- .../mark_node_unknown_shape_pass_unittest.cc | 2 +- .../passes/multi_batch_clone_pass_unittest.cc | 2 +- .../subgraph_const_migration_pass_unittest.cc | 2 +- tests/ut/ge/graph/transop_util_unittest.cc | 2 +- tests/ut/ge/hybrid/ge_hybrid_unittest.cc | 4 +- .../model/hybrid_model_builder_unittest.cc | 2 +- .../ge_local_node_executor_unittest.cc | 2 +- .../host_cpu/host_cpu_node_task_unittest.cc | 2 +- .../rts/rts_node_task_unittest.cc | 2 +- 161 files changed, 592 insertions(+), 599 deletions(-) rename ge/{graph => }/common/bcast.cc (98%) rename ge/{graph => }/common/bcast.h (98%) rename ge/{graph => }/common/ge_call_wrapper.h (100%) rename ge/{graph => }/common/local_context.cc (97%) rename ge/{graph => }/common/local_context.h (100%) rename ge/{ => common}/model/ge_model.cc (99%) rename ge/{ => common}/model/ge_model.h (90%) rename ge/{ => common}/model/ge_root_model.cc (95%) rename ge/{ => common}/model/ge_root_model.h (98%) rename ge/{graph => }/common/omg_util.cc (95%) rename ge/{graph => }/common/omg_util.h (100%) rename ge/{graph => }/common/transop_util.cc (97%) rename ge/{graph => }/common/transop_util.h (95%) create mode 100644 tests/ut/ge/common/fp16_unittest.cc diff --git a/.clang-format b/.clang-format index 6faea40d..dd8abe32 100644 --- a/.clang-format +++ b/.clang-format @@ -11,7 +11,7 @@ AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: true AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false -AllowShortFunctionsOnASingleLine: All +AllowShortFunctionsOnASingleLine: Empty AllowShortIfStatementsOnASingleLine: true AllowShortLoopsOnASingleLine: true AlwaysBreakAfterDefinitionReturnType: None diff --git a/ge/CMakeLists.txt b/ge/CMakeLists.txt index dead6aa5..cb4c84b1 100755 --- a/ge/CMakeLists.txt +++ b/ge/CMakeLists.txt @@ -112,7 +112,6 @@ set(EXECUTOR_SRC_LIST "analyzer/analyzer.cc" "common/dump/dump_manager.cc" "common/dump/dump_op.cc" - "common/dump/dump_properties.cc" "common/dump/exception_dumper.cc" "common/dump/opdebug_register.cc" "common/formats/format_transfers/format_transfer_transpose.cc" @@ -126,9 +125,6 @@ set(EXECUTOR_SRC_LIST "executor/ge_executor.cc" "ge_local_engine/engine/host_cpu_engine.cc" "graph/build/memory/var_mem_assign_util.cc" - "graph/common/bcast.cc" - "graph/common/local_context.cc" - "graph/common/omg_util.cc" "graph/execute/graph_execute.cc" "graph/execute/model_executor.cc" "graph/load/graph_loader.cc" @@ -255,8 +251,6 @@ set(EXECUTOR_SRC_LIST "hybrid/node_executor/rts/rts_task_factory.cc" "hybrid/node_executor/task_context.cc" "init/gelib.cc" - "model/ge_model.cc" - "model/ge_root_model.cc" "opskernel_manager/ops_kernel_builder_manager.cc" "opskernel_manager/ops_kernel_manager.cc" "single_op/single_op.cc" @@ -274,7 +268,6 @@ set(EXECUTOR_SRC_LIST ################################################################## set(COMPILER_SRC_LIST "analyzer/analyzer.cc" - "common/dump/dump_manager.cc" "common/dump/dump_op.cc" "common/dump/dump_properties.cc" "common/formats/format_transfers/datatype_transfer.cc" @@ -322,10 +315,6 @@ set(COMPILER_SRC_LIST "graph/build/stream_allocator.cc" "graph/build/stream_graph_optimizer.cc" "graph/build/task_generator.cc" - "graph/common/bcast.cc" - "graph/common/local_context.cc" - "graph/common/omg_util.cc" - "graph/common/transop_util.cc" "graph/label/case_label_maker.cc" "graph/label/if_label_maker.cc" "graph/label/label_maker.cc" @@ -508,8 +497,6 @@ set(COMPILER_SRC_LIST "ir_build/attr_options/weight_compress_option.cc" "ir_build/ge_ir_build.cc" "ir_build/option_utils.cc" - "model/ge_model.cc" - "model/ge_root_model.cc" "opskernel_manager/ops_kernel_builder_manager.cc" "opskernel_manager/ops_kernel_manager.cc" ) diff --git a/ge/client/ge_api.cc b/ge/client/ge_api.cc index 3cf7c3c4..e4a016b3 100644 --- a/ge/client/ge_api.cc +++ b/ge/client/ge_api.cc @@ -29,7 +29,7 @@ #include "graph/opsproto_manager.h" #include "graph/utils/type_utils.h" #include "graph/manager/util/rt_context_util.h" -#include "graph/common/ge_call_wrapper.h" +#include "common/ge_call_wrapper.h" #include "register/op_registry.h" #include "common/ge/tbe_plugin_manager.h" #include "common/util/error_manager/error_manager.h" diff --git a/ge/common/CMakeLists.txt b/ge/common/CMakeLists.txt index 1872b4c2..0d41b86f 100755 --- a/ge/common/CMakeLists.txt +++ b/ge/common/CMakeLists.txt @@ -2,16 +2,23 @@ set(SRC_LIST "context/ctx.cc" "model_saver.cc" "ge/datatype_util.cc" + "ge/plugin_manager.cc" + "ge/op_tiling_manager.cc" "helper/om_file_helper.cc" "helper/model_helper.cc" - "../model/ge_model.cc" - "../model/ge_root_model.cc" + "model/ge_model.cc" + "model/ge_root_model.cc" + "bcast.cc" + "local_context.cc" + "omg_util.cc" + "transop_util.cc" "auth/file_saver.cc" "fp16_t.cc" "math/fp16_math.cc" "debug/memory_dumper.cc" "formats/utils/formats_trans_utils.cc" "dump/dump_properties.cc" + "dump/dump_manager.cc" "formats/format_transfers/datatype_transfer.cc" "formats/format_transfers/format_transfer_transpose.cc" "formats/format_transfers/format_transfer_nchw_nc1hwc0.cc" @@ -63,7 +70,7 @@ target_compile_definitions(ge_common PRIVATE ) target_compile_options(ge_common PRIVATE - -fvisibility=hidden + -fvisibility=default -O2 -Werror -Wno-deprecated-declarations @@ -183,7 +190,7 @@ target_compile_definitions(ge_common PRIVATE ) target_compile_options(ge_common PRIVATE - -fvisibility=hidden + -fvisibility=default -O2 -Werror -Wno-deprecated-declarations diff --git a/ge/common/auth/file_saver.cc b/ge/common/auth/file_saver.cc index 57ab901b..d6f24497 100755 --- a/ge/common/auth/file_saver.cc +++ b/ge/common/auth/file_saver.cc @@ -238,7 +238,7 @@ Status FileSaver::SaveToBuffWithFileHeader(const ModelFileHeader &file_header, return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status FileSaver::CheckPath(const std::string &file_path) { +Status FileSaver::CheckPath(const std::string &file_path) { // Determine file path length if (file_path.size() >= MMPA_MAX_PATH) { GELOGE(FAILED, "[Check][FilePath]Failed, file path's length:%zu > mmpa_max_path:%d", @@ -271,8 +271,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status FileSaver::CheckPath(con return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status -FileSaver::SaveToFile(const string &file_path, const ge::ModelData &model, const ModelFileHeader *model_file_header) { +Status FileSaver::SaveToFile(const string &file_path, const ge::ModelData &model, + const ModelFileHeader *model_file_header) { if (file_path.empty() || model.model_data == nullptr || model.model_len == 0) { GELOGE(FAILED, "[Save][File]Incorrect input param, " "file_path is empty or model_data is nullptr or model_len is 0"); @@ -301,19 +301,18 @@ FileSaver::SaveToFile(const string &file_path, const ge::ModelData &model, const return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status -FileSaver::SaveToFile(const string &file_path, ModelFileHeader &file_header, ModelPartitionTable &model_partition_table, - const std::vector &partition_datas) { +Status FileSaver::SaveToFile(const string &file_path, ModelFileHeader &file_header, + ModelPartitionTable &model_partition_table, + const std::vector &partition_datas) { const Status ret = SaveWithFileHeader(file_path, file_header, model_partition_table, partition_datas); GE_CHK_BOOL_RET_STATUS(ret == SUCCESS, FAILED, "save file failed, file_path:%s, file header len:%u.", file_path.c_str(), file_header.length); return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status -FileSaver::SaveToFile(const string &file_path, ModelFileHeader &file_header, - vector &model_partition_tables, - const vector> &all_partition_datas) { +Status FileSaver::SaveToFile(const string &file_path, ModelFileHeader &file_header, + vector &model_partition_tables, + const vector> &all_partition_datas) { const Status ret = SaveWithFileHeader(file_path, file_header, model_partition_tables, all_partition_datas); GE_CHK_BOOL_RET_STATUS(ret == SUCCESS, FAILED, "save file failed, file_path:%s, file header len:%u.", file_path.c_str(), file_header.length); @@ -372,8 +371,7 @@ Status FileSaver::SaveWithFileHeader(const std::string &file_path, const ModelFi return ret; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status FileSaver::SaveToFile(const string &file_path, const void *data, - int len) { +Status FileSaver::SaveToFile(const string &file_path, const void *data, int len) { if (data == nullptr || len <= 0) { GELOGE(FAILED, "[Check][Param]Failed, model_data is null or the " "length[%d] is less than 1.", len); diff --git a/ge/graph/common/bcast.cc b/ge/common/bcast.cc similarity index 98% rename from ge/graph/common/bcast.cc rename to ge/common/bcast.cc index fcc8f9a1..a4e8d1a1 100644 --- a/ge/graph/common/bcast.cc +++ b/ge/common/bcast.cc @@ -14,12 +14,12 @@ * limitations under the License. */ -#include "graph/common/bcast.h" +#include "common/bcast.h" #include #include "common/math_util.h" -#include "framework/common/util.h" +#include "common/util.h" using domi::Status; diff --git a/ge/graph/common/bcast.h b/ge/common/bcast.h similarity index 98% rename from ge/graph/common/bcast.h rename to ge/common/bcast.h index 184751fe..a8399896 100644 --- a/ge/graph/common/bcast.h +++ b/ge/common/bcast.h @@ -21,11 +21,11 @@ #include #include -#include "framework/common/debug/log.h" -#include "framework/common/types.h" +#include "common/debug/log.h" +#include "common/types.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" -#include "external/graph/attr_value.h" +#include "graph/attr_value.h" #include "graph/ge_tensor.h" #include "graph/utils/tensor_adapter.h" diff --git a/ge/common/context/ctx.cc b/ge/common/context/ctx.cc index 9fe2f8c7..8e138ade 100755 --- a/ge/common/context/ctx.cc +++ b/ge/common/context/ctx.cc @@ -18,7 +18,7 @@ using ge::OmgContext; namespace domi { -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY OmgContext &GetContext() { +OmgContext &GetContext() { static OmgContext context; return context; } diff --git a/ge/common/cust_aicpu_kernel_store.h b/ge/common/cust_aicpu_kernel_store.h index 033a636b..38124587 100755 --- a/ge/common/cust_aicpu_kernel_store.h +++ b/ge/common/cust_aicpu_kernel_store.h @@ -21,7 +21,7 @@ namespace ge { -class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY CustAICPUKernelStore : public KernelStore { +class CustAICPUKernelStore : public KernelStore { public: CustAICPUKernelStore(); ~CustAICPUKernelStore() {} diff --git a/ge/common/debug/memory_dumper.cc b/ge/common/debug/memory_dumper.cc index 78ef2daa..f4a49440 100644 --- a/ge/common/debug/memory_dumper.cc +++ b/ge/common/debug/memory_dumper.cc @@ -30,13 +30,12 @@ const int kInvalidFd = (-1); } // namespace namespace ge { -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY MemoryDumper::MemoryDumper() : fd_(kInvalidFd) {} +MemoryDumper::MemoryDumper() : fd_(kInvalidFd) {} -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY MemoryDumper::~MemoryDumper() { Close(); } +MemoryDumper::~MemoryDumper() { Close(); } // Dump the data to the file -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status MemoryDumper::DumpToFile(const char *filename, void *data, - int64_t len) { +Status MemoryDumper::DumpToFile(const char *filename, void *data, int64_t len) { #ifdef FMK_SUPPORT_DUMP GE_CHECK_NOTNULL(filename); GE_CHECK_NOTNULL(data); @@ -81,7 +80,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status MemoryDumper::DumpToFile } // Open file -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status MemoryDumper::Open(const char *filename) { +Status MemoryDumper::Open(const char *filename) { GE_CHK_BOOL_RET_STATUS(filename != nullptr, FAILED, "Incorrect parameter. filename is nullptr"); // Try to remove file first for reduce the close time by overwriting way @@ -104,7 +103,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status MemoryDumper::Open(const } // Dump the data to file -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status MemoryDumper::Dump(void *data, uint32_t len) const { +Status MemoryDumper::Dump(void *data, uint32_t len) const { GE_CHK_BOOL_RET_STATUS(data != nullptr, FAILED, "Incorrect parameter. data is nullptr"); #ifdef FMK_SUPPORT_DUMP diff --git a/ge/common/dump/dump_manager.cc b/ge/common/dump/dump_manager.cc index ebe16fed..da8160ff 100644 --- a/ge/common/dump/dump_manager.cc +++ b/ge/common/dump/dump_manager.cc @@ -15,6 +15,7 @@ */ #include "common/dump/dump_manager.h" + #include "framework/common/debug/ge_log.h" #include "framework/common/debug/log.h" @@ -26,7 +27,7 @@ const uint64_t kInferSessionId = 0; const uint32_t kAllOverflow = 3; } // namespace namespace ge { -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY DumpManager &DumpManager::GetInstance() { +DumpManager &DumpManager::GetInstance() { static DumpManager instance; return instance; } @@ -74,7 +75,7 @@ void DumpManager::SetDumpList(const DumpConfig &dump_config, DumpProperties &dum Status DumpManager::SetNormalDumpConf(const DumpConfig &dump_config, DumpProperties &dump_properties) { if (dump_config.dump_status == kDumpOn) { - GELOGI("Only do normal dump process, dump status is %s.", dump_config.dump_status.c_str()); + GELOGI("Only do normal dump process, dump status is %s", dump_config.dump_status.c_str()); dump_properties.SetDumpStatus(dump_config.dump_status); std::string dump_op_switch = dump_config.dump_op_switch; dump_properties.SetDumpOpSwitch(dump_op_switch); @@ -104,8 +105,8 @@ Status DumpManager::SetNormalDumpConf(const DumpConfig &dump_config, DumpPropert Status DumpManager::SetDumpPath(const DumpConfig &dump_config, DumpProperties &dump_properties) { std::string dump_path = dump_config.dump_path; if (dump_path.empty()) { - GELOGE(PARAM_INVALID, "[Check][DumpPath]It is empty"); - REPORT_INNER_ERROR("E19999", "Dump path check is empty"); + GELOGE(PARAM_INVALID, "[Check][DumpPath]It is empty."); + REPORT_INNER_ERROR("E19999", "Dump path check is empty."); return PARAM_INVALID; } if (dump_path[dump_path.size() - 1] != '/') { @@ -117,7 +118,7 @@ Status DumpManager::SetDumpPath(const DumpConfig &dump_config, DumpProperties &d return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpManager::SetDumpConf(const DumpConfig &dump_config) { +Status DumpManager::SetDumpConf(const DumpConfig &dump_config) { DumpProperties dump_properties; if (!NeedDoDump(dump_config, dump_properties)) { GELOGD("No need do dump process."); @@ -131,8 +132,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpManager::SetDumpConf return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY const DumpProperties &DumpManager::GetDumpProperties( - uint64_t session_id) { +const DumpProperties &DumpManager::GetDumpProperties(uint64_t session_id) { std::lock_guard lock(mutex_); auto iter = dump_properties_map_.find(session_id); if (iter != dump_properties_map_.end()) { @@ -142,13 +142,12 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY const DumpProperties &DumpManag return default_properties; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpManager::AddDumpProperties( - uint64_t session_id, const DumpProperties &dump_properties) { +void DumpManager::AddDumpProperties(uint64_t session_id, const DumpProperties &dump_properties) { std::lock_guard lock(mutex_); dump_properties_map_.emplace(session_id, dump_properties); } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpManager::RemoveDumpProperties(uint64_t session_id) { +void DumpManager::RemoveDumpProperties(uint64_t session_id) { std::lock_guard lock(mutex_); auto iter = dump_properties_map_.find(session_id); if (iter != dump_properties_map_.end()) { diff --git a/ge/common/dump/dump_properties.cc b/ge/common/dump/dump_properties.cc index 099920e7..3bed76d9 100644 --- a/ge/common/dump/dump_properties.cc +++ b/ge/common/dump/dump_properties.cc @@ -38,9 +38,7 @@ const uint32_t kAtomicOverflow = (0x1 << 1); const uint32_t kAllOverflow = (kAicoreOverflow | kAtomicOverflow); } // namespace namespace ge { -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::Split(const std::string &s, - std::vector &result, - const char *delchar) { +void DumpProperties::Split(const std::string &s, std::vector &result, const char *delchar) { if (s.empty()) { return; } @@ -68,7 +66,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::Split(cons delete[] buffer; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::CheckDumpStep(const std::string &dump_step) { +Status DumpProperties::CheckDumpStep(const std::string &dump_step) { std::string modified_dum_step = dump_step + "|"; std::smatch result; std::vector match_vecs; @@ -126,7 +124,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::CheckDum return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::CheckDumpMode(const std::string &dump_mode) { +Status DumpProperties::CheckDumpMode(const std::string &dump_mode) { const std::set dump_mode_list = {"input", "output", "all"}; std::set::iterator iter; @@ -143,7 +141,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::CheckDum return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::CheckDumpPath(const std::string &input) { +Status DumpProperties::CheckDumpPath(const std::string &input) { if (mmIsDir(input.c_str()) != EN_OK) { REPORT_INPUT_ERROR("E10001", std::vector({"parameter", "value", "reason"}), std::vector({ @@ -175,7 +173,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::CheckDum return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::CheckEnableDump(const std::string &input) { +Status DumpProperties::CheckEnableDump(const std::string &input) { std::set enable_dump_option_list = {"1", "0"}; auto it = enable_dump_option_list.find(input); if (it == enable_dump_option_list.end()) { @@ -191,17 +189,16 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::CheckEna return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY DumpProperties::DumpProperties(const DumpProperties &other) { +DumpProperties::DumpProperties(const DumpProperties &other) { CopyFrom(other); } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY DumpProperties &DumpProperties::operator=( - const DumpProperties &other) { +DumpProperties &DumpProperties::operator=(const DumpProperties &other) { CopyFrom(other); return *this; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::SetDumpOptions() { +Status DumpProperties::SetDumpOptions() { if (enable_dump_ == kEnableFlag) { std::string dump_step; if (GetContext().GetOption(OPTION_EXEC_DUMP_STEP, dump_step) == GRAPH_SUCCESS && !dump_step.empty()) { @@ -220,7 +217,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::SetDumpO return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::InitByOptions() { +Status DumpProperties::InitByOptions() { enable_dump_.clear(); enable_dump_debug_.clear(); dump_path_.clear(); @@ -281,8 +278,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status DumpProperties::InitByOp } // The following is the new dump scenario of the fusion operator -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::AddPropertyValue( - const std::string &model, const std::set &layers) { +void DumpProperties::AddPropertyValue(const std::string &model, const std::set &layers) { for (const std::string &layer : layers) { GELOGI("This model %s config to dump layer %s", model.c_str(), layer.c_str()); } @@ -290,18 +286,18 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::AddPropert model_dump_properties_map_[model] = layers; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::DeletePropertyValue(const std::string &model) { +void DumpProperties::DeletePropertyValue(const std::string &model) { auto iter = model_dump_properties_map_.find(model); if (iter != model_dump_properties_map_.end()) { model_dump_properties_map_.erase(iter); } } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::ClearDumpPropertyValue() { +void DumpProperties::ClearDumpPropertyValue() { model_dump_properties_map_.clear(); } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::ClearDumpInfo() { +void DumpProperties::ClearDumpInfo() { enable_dump_.clear(); enable_dump_debug_.clear(); dump_path_.clear(); @@ -314,7 +310,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::ClearDumpI op_debug_mode_ = 0; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::set DumpProperties::GetAllDumpModel() const { +std::set DumpProperties::GetAllDumpModel() const { std::set model_list; for (auto &iter : model_dump_properties_map_) { model_list.insert(iter.first); @@ -323,8 +319,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::set DumpPrope return model_list; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::set DumpProperties::GetPropertyValue( - const std::string &model) const { +std::set DumpProperties::GetPropertyValue(const std::string &model) const { auto iter = model_dump_properties_map_.find(model); if (iter != model_dump_properties_map_.end()) { return iter->second; @@ -332,8 +327,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::set DumpPrope return {}; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool DumpProperties::IsLayerNeedDump( - const std::string &model, const std::string &om_name, const std::string &op_name) const { +bool DumpProperties::IsLayerNeedDump(const std::string &model, const std::string &om_name, + const std::string &op_name) const { // if dump all GELOGD("model name is %s om name is %s op is %s in layer need dump", model.c_str(), om_name.c_str(), op_name.c_str()); if (model_dump_properties_map_.find(DUMP_ALL_MODEL) != model_dump_properties_map_.end()) { @@ -353,67 +348,66 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool DumpProperties::IsLayerNee return model_iter->second.find(op_name) != model_iter->second.end(); } - GELOGD("Model %s is not seated to be dump.", model.c_str()); + GELOGD("Model %s is not seated to be dump", model.c_str()); return false; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::SetDumpPath(const std::string &path) { +void DumpProperties::SetDumpPath(const std::string &path) { dump_path_ = path; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY const std::string &DumpProperties::GetDumpPath() const { +const std::string &DumpProperties::GetDumpPath() const { return dump_path_; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::SetDumpStep(const std::string &step) { +void DumpProperties::SetDumpStep(const std::string &step) { dump_step_ = step; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY const std::string &DumpProperties::GetDumpStep() const { +const std::string &DumpProperties::GetDumpStep() const { return dump_step_; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::SetDumpMode(const std::string &mode) { +void DumpProperties::SetDumpMode(const std::string &mode) { dump_mode_ = mode; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY const std::string &DumpProperties::GetDumpMode() const { +const std::string &DumpProperties::GetDumpMode() const { return dump_mode_; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::SetDumpStatus(const std::string &status) { +void DumpProperties::SetDumpStatus(const std::string &status) { dump_status_ = status; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY const std::string &DumpProperties::GetDumpStatus() const { +const std::string &DumpProperties::GetDumpStatus() const { return dump_status_; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::InitInferOpDebug() { +void DumpProperties::InitInferOpDebug() { is_infer_op_debug_ = true; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::SetOpDebugMode(const uint32_t &op_debug_mode) { +void DumpProperties::SetOpDebugMode(const uint32_t &op_debug_mode) { op_debug_mode_ = op_debug_mode; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void DumpProperties::SetDumpOpSwitch( - const std::string &dump_op_switch) { +void DumpProperties::SetDumpOpSwitch(const std::string &dump_op_switch) { dump_op_switch_ = dump_op_switch; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY const std::string &DumpProperties::GetDumpOpSwitch() const { +const std::string &DumpProperties::GetDumpOpSwitch() const { return dump_op_switch_; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool DumpProperties::IsSingleOpNeedDump() const { +bool DumpProperties::IsSingleOpNeedDump() const { if (dump_op_switch_ == kDumpStatusOpen) { return true; } return false; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool DumpProperties::IsDumpOpen() const { +bool DumpProperties::IsDumpOpen() const { if (enable_dump_ == kEnableFlag || dump_status_ == kDumpStatusOpen) { return true; } @@ -441,7 +435,7 @@ Status DumpProperties::SetDumpDebugOptions() { if (enable_dump_debug_ == kEnableFlag) { std::string dump_debug_mode; if (GetContext().GetOption(OPTION_EXEC_DUMP_DEBUG_MODE, dump_debug_mode) == GRAPH_SUCCESS) { - GELOGD("Get ge.exec.dumpDebugMode %s successfully", dump_debug_mode.c_str()); + GELOGD("Get ge.exec.dumpDebugMode %s successfully.", dump_debug_mode.c_str()); } else { GELOGW("ge.exec.dumpDebugMode is not set."); return SUCCESS; @@ -469,7 +463,7 @@ Status DumpProperties::SetDumpDebugOptions() { return PARAM_INVALID; } } else { - GELOGI("ge.exec.enableDumpDebug is false or is not set."); + GELOGI("ge.exec.enableDumpDebug is false or is not set"); } return SUCCESS; } diff --git a/ge/common/fmk_error_codes.cc b/ge/common/fmk_error_codes.cc index ddb8089d..180af0e2 100755 --- a/ge/common/fmk_error_codes.cc +++ b/ge/common/fmk_error_codes.cc @@ -17,19 +17,18 @@ #include "framework/common/fmk_error_codes.h" namespace domi { -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY StatusFactory *StatusFactory::Instance() { +StatusFactory *StatusFactory::Instance() { static StatusFactory instance; return &instance; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void StatusFactory::RegisterErrorNo(uint32_t err, - const std::string &desc) { +void StatusFactory::RegisterErrorNo(uint32_t err, const std::string &desc) { if (err_desc_.find(err) != err_desc_.end()) { return; } err_desc_[err] = desc; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::string StatusFactory::GetErrDesc(uint32_t err) { +std::string StatusFactory::GetErrDesc(uint32_t err) { auto iter_find = err_desc_.find(err); if (iter_find == err_desc_.end()) { return ""; diff --git a/ge/common/formats/format_transfers/format_transfer_transpose.h b/ge/common/formats/format_transfers/format_transfer_transpose.h index 7fa19ff0..b608777c 100755 --- a/ge/common/formats/format_transfers/format_transfer_transpose.h +++ b/ge/common/formats/format_transfers/format_transfer_transpose.h @@ -33,7 +33,6 @@ Status TransposeWithShapeCheck(const uint8_t *src, const std::vector &s Status GetPermByForamt(Format src_format, Format dst_format, std::vector &perm); - class FormatTransferTranspose : public FormatTransfer { public: Status TransFormat(const TransArgs &args, TransResult &result) override; diff --git a/ge/common/formats/formats.cc b/ge/common/formats/formats.cc index 9e97a4d2..5a454d60 100755 --- a/ge/common/formats/formats.cc +++ b/ge/common/formats/formats.cc @@ -17,6 +17,7 @@ #include "common/formats/formats.h" #include + #include #include #include @@ -32,7 +33,7 @@ namespace ge { namespace formats { -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Status TransFormat(const TransArgs &args, TransResult &result) { +Status TransFormat(const TransArgs &args, TransResult &result) { auto transfer = BuildFormatTransfer(args); if (transfer == nullptr) { std::string error = "Failed to trans data from format " + @@ -56,11 +57,8 @@ GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Status TransFormat(const TransArg return transfer->TransFormat(args, result); } -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Status TransShape(Format src_format, - const std::vector &src_shape, - DataType data_type, - Format dst_format, - std::vector &dst_shape) { +Status TransShape(Format src_format, const std::vector &src_shape, DataType data_type, Format dst_format, + std::vector &dst_shape) { formats::TransArgs args; args.src_format = src_format; args.dst_format = dst_format; @@ -76,7 +74,7 @@ GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Status TransShape(Format src_form return transfer->TransShape(src_format, src_shape, data_type, dst_format, dst_shape); } -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Status TransDataType(const CastArgs &args, TransResult &result) { +Status TransDataType(const CastArgs &args, TransResult &result) { auto transfer = BuildDataTypeTransfer(args); if (transfer == nullptr) { std::string error = "Failed to trans data from datatype " + @@ -95,11 +93,11 @@ GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Status TransDataType(const CastAr return transfer->TransDataType(args, result); } -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY bool IsTransFormatSupport(const TransArgs &args) { +bool IsTransFormatSupport(const TransArgs &args) { return FormatTransferExists(args); } -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY bool IsTransDataTypeSupport(const CastArgs &args) { +bool IsTransDataTypeSupport(const CastArgs &args) { return DataTypeTransferExists(args); } } // namespace formats diff --git a/ge/common/formats/utils/formats_trans_utils.cc b/ge/common/formats/utils/formats_trans_utils.cc index db1812d0..63ad424f 100755 --- a/ge/common/formats/utils/formats_trans_utils.cc +++ b/ge/common/formats/utils/formats_trans_utils.cc @@ -41,15 +41,14 @@ int64_t GetCubeSizeByDataType(DataType data_type) { } } -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY std::string ShapeToString(const GeShape &shape) { +std::string ShapeToString(const GeShape &shape) { return ShapeToString(shape.GetDims()); } -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY std::string ShapeToString(const std::vector &shape) { +std::string ShapeToString(const std::vector &shape) { return JoinToString(shape); } -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY std::string RangeToString(const std::vector> &ranges) { bool first = true; std::stringstream ss; diff --git a/ge/common/fp16_t.cc b/ge/common/fp16_t.cc index 2f94323d..adb55dfb 100755 --- a/ge/common/fp16_t.cc +++ b/ge/common/fp16_t.cc @@ -1180,20 +1180,40 @@ fp16_t &fp16_t::operator=(const double &d_val) { } // convert -fp16_t::operator float() const { return Fp16ToFloat(val); } -fp16_t::operator double() const { return Fp16ToDouble(val); } -fp16_t::operator int8_t() const { return Fp16ToInt8(val); } -fp16_t::operator uint8_t() const { return Fp16ToUInt8(val); } -fp16_t::operator int16_t() const { return Fp16ToInt16(val); } -fp16_t::operator uint16_t() const { return Fp16ToUInt16(val); } -fp16_t::operator int32_t() const { return Fp16ToInt32(val); } -fp16_t::operator uint32_t() const { return Fp16ToUInt32(val); } +fp16_t::operator float() const { + return Fp16ToFloat(val); +} +fp16_t::operator double() const { + return Fp16ToDouble(val); +} +fp16_t::operator int8_t() const { + return Fp16ToInt8(val); +} +fp16_t::operator uint8_t() const { + return Fp16ToUInt8(val); +} +fp16_t::operator int16_t() const { + return Fp16ToInt16(val); +} +fp16_t::operator uint16_t() const { + return Fp16ToUInt16(val); +} +fp16_t::operator int32_t() const { + return Fp16ToInt32(val); +} +fp16_t::operator uint32_t() const { + return Fp16ToUInt32(val); +} // Cannot be used, just in order to solve the compile error -fp16_t::operator int64_t() const { return 0; } +fp16_t::operator int64_t() const { + return 0; +} // Cannot be used, just in order to solve the compile error -fp16_t::operator uint64_t() const { return 0; } +fp16_t::operator uint64_t() const { + return 0; +} -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY int fp16_t::IsInf() { +int fp16_t::IsInf() { if ((val & kFp16AbsMax) == kFp16ExpMask) { if (val & kFp16SignMask) { return -1; @@ -1205,12 +1225,28 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY int fp16_t::IsInf() { } } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY float fp16_t::ToFloat() const { return Fp16ToFloat(val); } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY double fp16_t::ToDouble() const { return Fp16ToDouble(val); } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY int8_t fp16_t::ToInt8() const { return Fp16ToInt8(val); } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY uint8_t fp16_t::ToUInt8() const { return Fp16ToUInt8(val); } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY int16_t fp16_t::ToInt16() const { return Fp16ToInt16(val); } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY uint16_t fp16_t::ToUInt16() const { return Fp16ToUInt16(val); } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY int32_t fp16_t::ToInt32() const { return Fp16ToInt32(val); } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY uint32_t fp16_t::ToUInt32() const { return Fp16ToUInt32(val); } +float fp16_t::ToFloat() const { + return Fp16ToFloat(val); +} +double fp16_t::ToDouble() const { + return Fp16ToDouble(val); +} +int8_t fp16_t::ToInt8() const { + return Fp16ToInt8(val); +} +uint8_t fp16_t::ToUInt8() const { + return Fp16ToUInt8(val); +} +int16_t fp16_t::ToInt16() const { + return Fp16ToInt16(val); +} +uint16_t fp16_t::ToUInt16() const { + return Fp16ToUInt16(val); +} +int32_t fp16_t::ToInt32() const { + return Fp16ToInt32(val); +} +uint32_t fp16_t::ToUInt32() const { + return Fp16ToUInt32(val); +} } // namespace ge diff --git a/ge/common/ge/datatype_util.h b/ge/common/ge/datatype_util.h index c3b41b81..82c8d259 100644 --- a/ge/common/ge/datatype_util.h +++ b/ge/common/ge/datatype_util.h @@ -42,7 +42,7 @@ static std::map CONST_OPDATA_TYPE_SIZE_MAP = { {ge::DT_UINT8, kGeSizeUint8}, {ge::DT_UINT16, kGeSizeUint16}, {ge::DT_UINT32, kGeSizeUint32}, {ge::DT_UINT64, kGeSizeUint64}, {ge::DT_DOUBLE, kGeSizeDouble}, {ge::DT_BOOL, kGeSizeBool}}; -class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY DataTypeUtil { +class DataTypeUtil { public: static bool DataTypeTranslatable(const ge::DataType &src_out_data_type, const ge::DataType &dst_in_data_type); static const std::vector &GetTranslatableDataTypesBySrc(const ge::DataType &src_out_data_type); diff --git a/ge/common/ge/tbe_plugin_manager.cc b/ge/common/ge/tbe_plugin_manager.cc index 70c1ab94..3680a8bb 100755 --- a/ge/common/ge/tbe_plugin_manager.cc +++ b/ge/common/ge/tbe_plugin_manager.cc @@ -42,7 +42,7 @@ const int kBaseInt = 10; std::map TBEPluginManager::options_ = {}; // Get Singleton Instance -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY TBEPluginManager &TBEPluginManager::Instance() { +TBEPluginManager &TBEPluginManager::Instance() { static TBEPluginManager instance_ptr_; return instance_ptr_; } @@ -61,7 +61,7 @@ Status TBEPluginManager::ClearHandles_() { return ret; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status TBEPluginManager::Finalize() { +Status TBEPluginManager::Finalize() { Status ret = ClearHandles_(); return ret; } @@ -207,7 +207,6 @@ void TBEPluginManager::LoadCustomOpLib() { } } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void TBEPluginManager::LoadPluginSo(const std::map &options) { vector file_list; string caffe_parser_path; @@ -246,7 +245,6 @@ void TBEPluginManager::LoadPluginSo(const std::map &options) { } } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void TBEPluginManager::InitPreparation(const std::map &options) { options_.insert(options.begin(), options.end()); // Load TBE plugin diff --git a/ge/graph/common/ge_call_wrapper.h b/ge/common/ge_call_wrapper.h similarity index 100% rename from ge/graph/common/ge_call_wrapper.h rename to ge/common/ge_call_wrapper.h diff --git a/ge/common/ge_format_util.cc b/ge/common/ge_format_util.cc index f3dee571..0ffa686f 100755 --- a/ge/common/ge_format_util.cc +++ b/ge/common/ge_format_util.cc @@ -18,9 +18,7 @@ #include "common/formats/formats.h" namespace ge { -GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Status GeFormatUtil::TransShape(const TensorDesc &src_desc, - Format dst_format, - std::vector &dst_shape) { +Status GeFormatUtil::TransShape(const TensorDesc &src_desc, Format dst_format, std::vector &dst_shape) { return formats::TransShape(src_desc.GetFormat(), src_desc.GetShape().GetDims(), src_desc.GetDataType(), dst_format, dst_shape); } diff --git a/ge/common/helper/model_cache_helper.h b/ge/common/helper/model_cache_helper.h index 13253cbe..f0831075 100755 --- a/ge/common/helper/model_cache_helper.h +++ b/ge/common/helper/model_cache_helper.h @@ -24,7 +24,7 @@ #include "external/ge/ge_api_error_codes.h" #include "graph/compute_graph.h" #include "graph/manager/graph_var_manager.h" -#include "model/ge_model.h" +#include "common/model/ge_model.h" namespace ge { using Json = nlohmann::json; diff --git a/ge/common/helper/model_helper.cc b/ge/common/helper/model_helper.cc index 4e760a4a..2608b1e1 100644 --- a/ge/common/helper/model_helper.cc +++ b/ge/common/helper/model_helper.cc @@ -33,7 +33,7 @@ const uint32_t kStatiOmFileModelNum = 1; namespace ge { -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelHelper::~ModelHelper() { (void)ReleaseLocalModelData(); } +ModelHelper::~ModelHelper() { (void)ReleaseLocalModelData(); } Status ModelHelper::SaveModelPartition(std::shared_ptr &om_file_save_helper, ModelPartitionType type, const uint8_t *data, size_t size, size_t model_index) { @@ -108,8 +108,8 @@ Status ModelHelper::SaveSizeToModelDef(const GeModelPtr &ge_model) { return SUCCESS; } -Status ModelHelper::SaveModelDef(std::shared_ptr &om_file_save_helper, - const GeModelPtr &ge_model, ge::Buffer &model_buffer, size_t model_index) { +Status ModelHelper::SaveModelDef(std::shared_ptr &om_file_save_helper, const GeModelPtr &ge_model, + ge::Buffer &model_buffer, size_t model_index) { ModelPtr model_tmp = ge::MakeShared(ge_model->GetName(), ge_model->GetPlatformVersion()); if (model_tmp == nullptr) { GELOGE(FAILED, "[Creat][Model]Failed, Model %s Ptr", ge_model->GetName().c_str()); @@ -143,8 +143,8 @@ Status ModelHelper::SaveModelDef(std::shared_ptr &om_file_save return SUCCESS; } -Status ModelHelper::SaveModelWeights(std::shared_ptr &om_file_save_helper, - const GeModelPtr &ge_model, size_t model_index) { +Status ModelHelper::SaveModelWeights(std::shared_ptr &om_file_save_helper, const GeModelPtr &ge_model, + size_t model_index) { auto ge_model_weight = ge_model->GetWeight(); GELOGD("WEIGHTS_DATA size is %zu, %p", ge_model_weight.GetSize(), ge_model_weight.GetData()); // weight is not necessary @@ -187,8 +187,8 @@ Status ModelHelper::SaveModelCustAICPU(std::shared_ptr &om_fil return SUCCESS; } -Status ModelHelper::SaveModelTaskDef(std::shared_ptr &om_file_save_helper, - const GeModelPtr &ge_model, ge::Buffer &task_buffer, size_t model_index) { +Status ModelHelper::SaveModelTaskDef(std::shared_ptr &om_file_save_helper, const GeModelPtr &ge_model, + ge::Buffer &task_buffer, size_t model_index) { std::shared_ptr model_task_def = ge_model->GetModelTaskDefPtr(); if (model_task_def == nullptr) { GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "[Creat][ModelTaskDef]Failed, it is nullptr, " @@ -231,8 +231,8 @@ Status ModelHelper::SaveModelTaskDef(std::shared_ptr &om_file_ return SUCCESS; } -Status ModelHelper::SaveModelHeader(std::shared_ptr &om_file_save_helper, - const GeModelPtr &ge_model, size_t model_num) { +Status ModelHelper::SaveModelHeader(std::shared_ptr &om_file_save_helper, const GeModelPtr &ge_model, + size_t model_num) { // Save target/version to model_header ModelFileHeader &model_header = om_file_save_helper->GetModelFileHeader(); model_header.platform_type = ge_model->GetPlatformType(); @@ -246,8 +246,10 @@ Status ModelHelper::SaveModelHeader(std::shared_ptr &om_file_s if (err != EOK) { GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "[Save][Model]Failed while allocating memory for platform_version %s, model %s, " - "errno %d", platform_version.c_str(), ge_model->GetName().c_str(), err); - REPORT_CALL_ERROR("E19999", "ModelHelper save model %s failed while " + "errno %d", + platform_version.c_str(), ge_model->GetName().c_str(), err); + REPORT_CALL_ERROR("E19999", + "ModelHelper save model %s failed while " "allocating memory for platform_version %s, errno %d", ge_model->GetName().c_str(), platform_version.c_str(), err); return ACL_ERROR_GE_MEMORY_ALLOCATION; @@ -271,9 +273,9 @@ Status ModelHelper::SaveModelHeader(std::shared_ptr &om_file_s return SUCCESS; } -Status ModelHelper::SaveAllModelPartiton(std::shared_ptr& om_file_save_helper, - const GeModelPtr &ge_model, ge::Buffer &model_buffer, - ge::Buffer &task_buffer, size_t model_index) { +Status ModelHelper::SaveAllModelPartiton(std::shared_ptr &om_file_save_helper, + const GeModelPtr &ge_model, ge::Buffer &model_buffer, ge::Buffer &task_buffer, + size_t model_index) { if (SaveModelDef(om_file_save_helper, ge_model, model_buffer, model_index) != SUCCESS) { GELOGE(FAILED, "[Save][ModelDef]Failed, model %s, model index %zu", ge_model->GetName().c_str(), model_index); @@ -316,10 +318,8 @@ Status ModelHelper::SaveAllModelPartiton(std::shared_ptr& om_f return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::SaveToOmModel(const GeModelPtr &ge_model, - const SaveParam &save_param, - const std::string &output_file, - ModelBufferData& model) { +Status ModelHelper::SaveToOmModel(const GeModelPtr &ge_model, const SaveParam &save_param, + const std::string &output_file, ModelBufferData &model) { if (output_file.empty()) { GELOGE(FAILED, "[Save][Model]GraphBuilder SaveModel received invalid file name prefix, " "model %s", ge_model->GetName().c_str()); @@ -367,13 +367,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::SaveToOmMod return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::SaveToOmRootModel( - const GeRootModelPtr &ge_root_model, - const SaveParam &save_param, - const std::string &output_file, - ModelBufferData& model, - bool is_unknown_shape) { - +Status ModelHelper::SaveToOmRootModel(const GeRootModelPtr &ge_root_model, const SaveParam &save_param, + const std::string &output_file, ModelBufferData &model, bool is_unknown_shape) { GE_CHECK_NOTNULL(ge_root_model); GE_IF_BOOL_EXEC(ge_root_model == nullptr, GELOGE(FAILED, "[Check][GERootModel]Ge_root_model is nullptr"); @@ -466,8 +461,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::SaveToOmRoo return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status -ModelHelper::SaveOriginalGraphToOmModel(const ge::Graph &graph, const std::string &output_file) { +Status ModelHelper::SaveOriginalGraphToOmModel(const ge::Graph &graph, const std::string &output_file) { if (output_file.empty()) { GELOGE(FAILED, "[Save][Model]Received invalid file name prefix, output_file %s", output_file.c_str()); REPORT_INNER_ERROR("E19999", "Save model received invalid file name prefix, output_file %s", output_file.c_str()); @@ -545,7 +539,7 @@ ModelHelper::SaveOriginalGraphToOmModel(const ge::Graph &graph, const std::strin return (ret == SUCCESS ? SUCCESS : FAILED); } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::LoadModel(const ge::ModelData &model_data) { +Status ModelHelper::LoadModel(const ge::ModelData &model_data) { if (model_data.model_data == nullptr || model_data.model_len == 0) { GELOGE(ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID, "[Load][Model]Model_data is nullptr or model_data_size is 0"); @@ -597,7 +591,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::LoadModel(c return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::LoadRootModel(const ge::ModelData &model_data) { +Status ModelHelper::LoadRootModel(const ge::ModelData &model_data) { if (model_data.model_data == nullptr || model_data.model_len == 0) { GELOGE(ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID, "[Load][RootModel] " "Model_data is nullptr or model data is empty."); @@ -783,7 +777,6 @@ Status ModelHelper::LoadModelData(OmFileLoadHelper &om_load_helper, GeModelPtr & return SUCCESS; } - Status ModelHelper::LoadWeights(OmFileLoadHelper &om_load_helper) { ModelPartition partition; if (om_load_helper.GetModelPartition(ModelPartitionType::WEIGHTS_DATA, partition) != SUCCESS) { @@ -814,7 +807,7 @@ Status ModelHelper::LoadWeights(OmFileLoadHelper &om_load_helper, GeModelPtr &cu return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::LoadTask(OmFileLoadHelper &om_load_helper) { +Status ModelHelper::LoadTask(OmFileLoadHelper &om_load_helper) { ModelPartition task_partition; if (om_load_helper.GetModelPartition(ModelPartitionType::TASK_INFO, task_partition) != SUCCESS) { GELOGE(FAILED, "[Get][ModelTaskPartition]Failed, task_partition size:%u", task_partition.size); @@ -838,9 +831,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::LoadTask(Om return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::LoadTask(OmFileLoadHelper &om_load_helper, - GeModelPtr &cur_model, - size_t mode_index) { +Status ModelHelper::LoadTask(OmFileLoadHelper &om_load_helper, GeModelPtr &cur_model, size_t mode_index) { ModelPartition task_partition; if (om_load_helper.GetModelPartition(ModelPartitionType::TASK_INFO, task_partition, mode_index) != SUCCESS) { GELOGE(FAILED, "Get task model partition failed."); @@ -915,8 +906,8 @@ Status ModelHelper::LoadCustAICPUKernelStore(OmFileLoadHelper &om_load_helper) { return SUCCESS; } -Status ModelHelper::LoadCustAICPUKernelStore(OmFileLoadHelper &om_load_helper, - GeModelPtr &cur_model, size_t mode_index) { +Status ModelHelper::LoadCustAICPUKernelStore(OmFileLoadHelper &om_load_helper, GeModelPtr &cur_model, + size_t mode_index) { // Load cust aicpu kernels ModelPartition partition_kernel_def; CustAICPUKernelStore kernel_store; @@ -933,7 +924,7 @@ Status ModelHelper::LoadCustAICPUKernelStore(OmFileLoadHelper &om_load_helper, return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY GeModelPtr ModelHelper::GetGeModel() { +GeModelPtr ModelHelper::GetGeModel() { if (model_ != nullptr) { return model_; } @@ -946,7 +937,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY GeModelPtr ModelHelper::GetGeMo return out_model; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY GeRootModelPtr ModelHelper::GetGeRootModel() { +GeRootModelPtr ModelHelper::GetGeRootModel() { if (root_model_ != nullptr) { return root_model_; } @@ -959,7 +950,6 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY GeRootModelPtr ModelHelper::Get return out_model; } - Status ModelHelper::ReleaseLocalModelData() noexcept { Status result = SUCCESS; if (model_addr_tmp_ != nullptr) { @@ -976,8 +966,7 @@ Status ModelHelper::ReleaseLocalModelData() noexcept { return result; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::GetBaseNameFromFileName( - const string &file_name, string &base_name) { +Status ModelHelper::GetBaseNameFromFileName(const string &file_name, string &base_name) { GELOGD("Get base_name from file, file_name:%s", file_name.c_str()); GE_CHK_BOOL_EXEC_WARN(!file_name.empty(), return FAILED, "File path may not valid, check params --output"); size_t start_position = 0; @@ -992,8 +981,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::GetBaseName return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelHelper::GetModelNameFromMergedGraphName( - const string &graph_name, string &model_name) { +Status ModelHelper::GetModelNameFromMergedGraphName(const string &graph_name, string &model_name) { GELOGD("Get model_name from graph_name, graph_name:%s", graph_name.c_str()); // this can only be used after merged graph(graph name will be append with "_x", x is index); GE_CHK_BOOL_EXEC_WARN(!graph_name.empty(), return FAILED, "File path may not valid, check params --output"); @@ -1035,8 +1023,7 @@ Status ModelTool::GetModelInfoFromOm(const char *model_file, ge::proto::ModelDef ErrorManager::GetInstance().ATCReportErrMessage("E10003", {"parameter", "value", "reason"}, {"om", model_file, "invalid om file, can't be parsed"}); GELOGE(ACL_ERROR_GE_PARAM_INVALID, - "[Parse][ModelContent]Failed because of invalid om file %s, please check om param", - model_file); + "[Parse][ModelContent]Failed because of invalid om file %s, please check om param", model_file); return ret; } diff --git a/ge/common/helper/om_file_helper.cc b/ge/common/helper/om_file_helper.cc index cd13c5d8..a42316ff 100644 --- a/ge/common/helper/om_file_helper.cc +++ b/ge/common/helper/om_file_helper.cc @@ -18,10 +18,11 @@ #include #include -#include "common/math/math_util.h" + #include "common/auth/file_saver.h" -#include "framework/common/debug/log.h" +#include "common/math/math_util.h" #include "framework/common/debug/ge_log.h" +#include "framework/common/debug/log.h" #include "framework/common/ge_inner_error_codes.h" #include "framework/common/util.h" @@ -32,7 +33,7 @@ const int32_t kOptionalNum = 2; } namespace ge { // For Load -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OmFileLoadHelper::Init(const ge::ModelData &model) { +Status OmFileLoadHelper::Init(const ge::ModelData &model) { if (CheckModelValid(model) != SUCCESS) { return FAILED; } @@ -42,8 +43,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OmFileLoadHelper::Init(c return ret; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OmFileLoadHelper::Init(uint8_t *model_data, - const uint32_t model_data_size) { +Status OmFileLoadHelper::Init(uint8_t *model_data, const uint32_t model_data_size) { Status status = LoadModelPartitionTable(model_data, model_data_size); if (status != SUCCESS) { return status; @@ -52,9 +52,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OmFileLoadHelper::Init(u return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OmFileLoadHelper::Init(uint8_t *model_data, - uint32_t model_data_size, - uint32_t model_num) { +Status OmFileLoadHelper::Init(uint8_t *model_data, uint32_t model_data_size, uint32_t model_num) { Status status = LoadModelPartitionTable(model_data, model_data_size, model_num); if (status != SUCCESS) { return status; @@ -64,8 +62,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OmFileLoadHelper::Init(u } // Use both -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OmFileLoadHelper::GetModelPartition(ModelPartitionType type, - ModelPartition &partition) { +Status OmFileLoadHelper::GetModelPartition(ModelPartitionType type, ModelPartition &partition) { if (!is_inited_) { GELOGE(PARAM_INVALID, "OmFileLoadHelper has not been initialized!"); return PARAM_INVALID; @@ -90,9 +87,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OmFileLoadHelper::GetMod return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OmFileLoadHelper::GetModelPartition(ModelPartitionType type, - ModelPartition &partition, - size_t model_index) { +Status OmFileLoadHelper::GetModelPartition(ModelPartitionType type, ModelPartition &partition, size_t model_index) { if (!is_inited_) { GELOGE(PARAM_INVALID, "OmFileLoadHelper has not been initialized!"); return PARAM_INVALID; @@ -248,12 +243,11 @@ Status OmFileLoadHelper::LoadModelPartitionTable(uint8_t *model_data, uint32_t m return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY const std::vector - &OmFileSaveHelper::GetModelPartitions() const { +const std::vector &OmFileSaveHelper::GetModelPartitions() const { return context_.partition_datas_; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelPartitionTable *OmFileSaveHelper::GetPartitionTable() { +ModelPartitionTable *OmFileSaveHelper::GetPartitionTable() { auto partition_size = static_cast(context_.partition_datas_.size()); // Build ModelPartitionTable, flex array context_.partition_table_.clear(); @@ -272,8 +266,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelPartitionTable *OmFileSave return partition_table; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelPartitionTable *OmFileSaveHelper::GetPartitionTable( - size_t cur_ctx_index) { +ModelPartitionTable *OmFileSaveHelper::GetPartitionTable(size_t cur_ctx_index) { auto &cur_ctx = model_contexts_[cur_ctx_index]; auto partition_size = static_cast(cur_ctx.partition_datas_.size()); // Build ModelPartitionTable, flex array @@ -293,8 +286,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelPartitionTable *OmFileSave return partition_table; } - -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OmFileSaveHelper::AddPartition(ModelPartition &partition) { +Status OmFileSaveHelper::AddPartition(ModelPartition &partition) { if (ge::CheckUint32AddOverflow(context_.model_data_len_, partition.size) != SUCCESS) { GELOGE(FAILED, "UINT32 %u and %u addition can result in overflow!", context_.model_data_len_, partition.size); return FAILED; @@ -379,8 +371,8 @@ Status OmFileSaveHelper::SaveModelToFile(const char *output_file, ModelBufferDat #endif } -Status OmFileSaveHelper::SaveRootModel(const SaveParam &save_param, const char *output_file, - ModelBufferData &model, bool is_offline) { +Status OmFileSaveHelper::SaveRootModel(const SaveParam &save_param, const char *output_file, ModelBufferData &model, + bool is_offline) { (void)save_param.cert_file; (void)save_param.ek_file; (void)save_param.encode_mode; @@ -409,8 +401,8 @@ Status OmFileSaveHelper::SaveRootModel(const SaveParam &save_param, const char * model_header_.length += size_of_table + cur_model_data_len; model_partition_tabels.push_back(tmp_table); all_model_partitions.push_back(cur_ctx.partition_datas_); - GELOGD("sizeof(ModelPartitionTable):%u, cur_model_data_len:%u, cur_context_index:%zu", - size_of_table, cur_model_data_len, ctx_index); + GELOGD("sizeof(ModelPartitionTable):%u, cur_model_data_len:%u, cur_context_index:%zu", size_of_table, + cur_model_data_len, ctx_index); } Status ret; if (is_offline) { diff --git a/ge/common/kernel_store.h b/ge/common/kernel_store.h index b3f4a62e..e7b867a3 100755 --- a/ge/common/kernel_store.h +++ b/ge/common/kernel_store.h @@ -48,7 +48,7 @@ struct KernelStoreItemHead { uint32_t bin_len; }; -class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY KernelStore { +class KernelStore { public: KernelStore() = default; virtual ~KernelStore() = default; diff --git a/ge/graph/common/local_context.cc b/ge/common/local_context.cc similarity index 97% rename from ge/graph/common/local_context.cc rename to ge/common/local_context.cc index bd747021..e31f2342 100644 --- a/ge/graph/common/local_context.cc +++ b/ge/common/local_context.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "graph/common/local_context.h" +#include "common/local_context.h" #include "framework/common/debug/ge_log.h" diff --git a/ge/graph/common/local_context.h b/ge/common/local_context.h similarity index 100% rename from ge/graph/common/local_context.h rename to ge/common/local_context.h diff --git a/ge/common/math/fp16_math.cc b/ge/common/math/fp16_math.cc index 6a9c2fb3..c2dfeb61 100755 --- a/ge/common/math/fp16_math.cc +++ b/ge/common/math/fp16_math.cc @@ -18,7 +18,7 @@ #include "external/register/register_types.h" namespace ge { -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t sqrt(fp16_t fp) { +fp16_t sqrt(fp16_t fp) { fp16_t ret; // Convert half precision float number to double double dVal = fp; @@ -29,7 +29,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t sqrt(fp16_t fp) { return ret; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t rsqrt(fp16_t fp) { +fp16_t rsqrt(fp16_t fp) { fp16_t ret; // Convert half precision float number to double double dVal = fp; @@ -40,7 +40,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t rsqrt(fp16_t fp) { return ret; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t rcp(fp16_t fp) { +fp16_t rcp(fp16_t fp) { fp16_t ret; // Convert half precision float number to double double dVal = fp; @@ -51,7 +51,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t rcp(fp16_t fp) { return ret; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t exp(fp16_t fp) { +fp16_t exp(fp16_t fp) { fp16_t ret; // Convert half precision float number to double double dVal = fp; @@ -63,7 +63,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t exp(fp16_t fp) { return ret; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t pow2(fp16_t fp) { +fp16_t pow2(fp16_t fp) { fp16_t ret; // Convert half precision float number to double double dVal = fp; @@ -75,7 +75,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t pow2(fp16_t fp) { return ret; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t pow10(fp16_t fp) { +fp16_t pow10(fp16_t fp) { fp16_t ret; // Convert half precision float number to double double dVal = fp; @@ -87,7 +87,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t pow10(fp16_t fp) { return ret; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t ln(fp16_t fp) { +fp16_t ln(fp16_t fp) { fp16_t ret; // Convert half precision float number to double double dVal = fp; @@ -99,7 +99,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t ln(fp16_t fp) { return ret; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t log2(fp16_t fp) { +fp16_t log2(fp16_t fp) { fp16_t ret; // Convert half precision float number to double double dVal = fp; @@ -111,7 +111,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t log2(fp16_t fp) { return ret; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t log10(fp16_t fp) { +fp16_t log10(fp16_t fp) { fp16_t ret; // Convert half precision float number to double double dVal = fp; @@ -123,7 +123,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t log10(fp16_t fp) { return ret; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t cos(fp16_t fp) { +fp16_t cos(fp16_t fp) { fp16_t ret; // Convert half precision float number to double double dVal = fp; @@ -135,7 +135,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t cos(fp16_t fp) { return ret; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t sin(fp16_t fp) { +fp16_t sin(fp16_t fp) { fp16_t ret; // Convert half precision float number to double double dVal = fp; @@ -147,13 +147,13 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t sin(fp16_t fp) { return ret; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t abs(fp16_t fp) { +fp16_t abs(fp16_t fp) { fp16_t ret; ret.val = (fp.val & kFp16AbsMax); return ret; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t max(fp16_t fp1, fp16_t fp2) { +fp16_t max(fp16_t fp1, fp16_t fp2) { if (fp1 >= fp2) { return fp1; } else { @@ -161,7 +161,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t max(fp16_t fp1, fp16_t f } } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY fp16_t min(fp16_t fp1, fp16_t fp2) { +fp16_t min(fp16_t fp1, fp16_t fp2) { if (fp1 <= fp2) { return fp1; } else { diff --git a/ge/model/ge_model.cc b/ge/common/model/ge_model.cc similarity index 99% rename from ge/model/ge_model.cc rename to ge/common/model/ge_model.cc index 1bf35afc..7fc58b6d 100755 --- a/ge/model/ge_model.cc +++ b/ge/common/model/ge_model.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "model/ge_model.h" +#include "common/model/ge_model.h" #include #include "framework/common/debug/log.h" #include "graph/debug/ge_attr_define.h" diff --git a/ge/model/ge_model.h b/ge/common/model/ge_model.h similarity index 90% rename from ge/model/ge_model.h rename to ge/common/model/ge_model.h index 6356c621..0e791746 100755 --- a/ge/model/ge_model.h +++ b/ge/common/model/ge_model.h @@ -31,7 +31,7 @@ namespace ge { const uint32_t INVALID_MODEL_ID = 0xFFFFFFFFUL; -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeModel : public AttrHolder { +class GeModel : public AttrHolder { public: GeModel(); ~GeModel() = default; @@ -82,13 +82,13 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeModel : public AttrHolder private: void Init(); - ProtoAttrMapHelper attrs_; + ProtoAttrMapHelper attrs_; /*lint !e148*/ Graph graph_; - std::shared_ptr task_; - TBEKernelStore tbe_kernal_store_; - CustAICPUKernelStore cust_aicpu_kernal_store_; - Buffer weights_buffer_; + std::shared_ptr task_; /*lint !e148*/ + TBEKernelStore tbe_kernal_store_; /*lint !e148*/ + CustAICPUKernelStore cust_aicpu_kernal_store_; /*lint !e148*/ + Buffer weights_buffer_; /*lint !e148*/ std::string name_; uint32_t version_ = {0}; diff --git a/ge/model/ge_root_model.cc b/ge/common/model/ge_root_model.cc similarity index 95% rename from ge/model/ge_root_model.cc rename to ge/common/model/ge_root_model.cc index b6a1e175..3fe10991 100644 --- a/ge/model/ge_root_model.cc +++ b/ge/common/model/ge_root_model.cc @@ -14,8 +14,9 @@ * limitations under the License. */ -#include "model/ge_root_model.h" +#include "common/model/ge_root_model.h" #include "graph/debug/ge_attr_define.h" + namespace ge { void GeRootModel::SetSubgraphInstanceNameToModel(string instance_name, GeModelPtr ge_model) { subgraph_instance_name_to_model_.insert(std::pair(instance_name, ge_model)); diff --git a/ge/model/ge_root_model.h b/ge/common/model/ge_root_model.h similarity index 98% rename from ge/model/ge_root_model.h rename to ge/common/model/ge_root_model.h index 9e8e116e..e9ba3da6 100755 --- a/ge/model/ge_root_model.h +++ b/ge/common/model/ge_root_model.h @@ -15,7 +15,7 @@ */ #include #include "graph/compute_graph.h" -#include "model/ge_model.h" +#include "common/model/ge_model.h" #ifndef GE_MODEL_GE_ROOT_MODEL_H_ #define GE_MODEL_GE_ROOT_MODEL_H_ diff --git a/ge/common/model_parser/model_parser.cc b/ge/common/model_parser/model_parser.cc index 7447cdf8..5d1869be 100644 --- a/ge/common/model_parser/model_parser.cc +++ b/ge/common/model_parser/model_parser.cc @@ -23,12 +23,10 @@ #include "framework/common/helper/model_helper.h" namespace ge { -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelParserBase::ModelParserBase() {} -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelParserBase::~ModelParserBase() {} +ModelParserBase::ModelParserBase() {} +ModelParserBase::~ModelParserBase() {} -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelParserBase::LoadFromFile(const char *model_path, - int32_t priority, - ge::ModelData &model_data) { +Status ModelParserBase::LoadFromFile(const char *model_path, int32_t priority, ge::ModelData &model_data) { std::string real_path = RealPath(model_path); if (real_path.empty()) { GELOGE(ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID, "[Check][Param]Model file path %s is invalid", @@ -81,9 +79,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelParserBase::LoadFro return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelParserBase::ParseModelContent(const ge::ModelData &model, - uint8_t *&model_data, - uint32_t &model_len) { +Status ModelParserBase::ParseModelContent(const ge::ModelData &model, uint8_t *&model_data, uint32_t &model_len) { // Parameter validity check GE_CHECK_NOTNULL(model.model_data); diff --git a/ge/common/model_saver.cc b/ge/common/model_saver.cc index 24e837f7..56045030 100755 --- a/ge/common/model_saver.cc +++ b/ge/common/model_saver.cc @@ -29,8 +29,7 @@ namespace ge { const uint32_t kInteval = 2; -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelSaver::SaveJsonToFile(const char *file_path, - const Json &model) { +Status ModelSaver::SaveJsonToFile(const char *file_path, const Json &model) { Status ret = SUCCESS; if (file_path == nullptr || SUCCESS != CheckPath(file_path)) { GELOGE(FAILED, "[Check][OutputFile]Failed, file %s", file_path); diff --git a/ge/graph/common/omg_util.cc b/ge/common/omg_util.cc similarity index 95% rename from ge/graph/common/omg_util.cc rename to ge/common/omg_util.cc index b2017e4d..31e4270a 100644 --- a/ge/graph/common/omg_util.cc +++ b/ge/common/omg_util.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/debug/ge_attr_define.h" #include "graph/utils/graph_utils.h" @@ -59,8 +59,8 @@ Status SetStreamLabel(const ge::NodePtr &node, const std::string &label) { if (!AttrUtils::SetStr(tmp_desc, ge::ATTR_NAME_STREAM_LABEL, label)) { REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_STREAM_LABEL.c_str(), node->GetName().c_str(), node->GetType().c_str()); - GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_STREAM_LABEL.c_str(), - node->GetName().c_str(), node->GetType().c_str()); + GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_STREAM_LABEL.c_str(), node->GetName().c_str(), + node->GetType().c_str()); return FAILED; } @@ -100,8 +100,8 @@ Status SetActiveLabelList(const ge::NodePtr &node, const std::vectorGetName().c_str(), node->GetType().c_str()); - GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_ACTIVE_LABEL_LIST.c_str(), - node->GetName().c_str(), node->GetType().c_str()); + GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_ACTIVE_LABEL_LIST.c_str(), node->GetName().c_str(), + node->GetType().c_str()); return FAILED; } @@ -163,8 +163,8 @@ Status SetOriginalNodeName(const ge::NodePtr &node, const std::string &orig_name if (!AttrUtils::SetStr(tmp_desc, ge::ATTR_NAME_ORIG_NODE_NAME, orig_name)) { REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_ORIG_NODE_NAME.c_str(), node->GetName().c_str(), node->GetType().c_str()); - GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_ORIG_NODE_NAME.c_str(), - node->GetName().c_str(), node->GetType().c_str()); + GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_ORIG_NODE_NAME.c_str(), node->GetName().c_str(), + node->GetType().c_str()); return FAILED; } @@ -207,8 +207,8 @@ Status SetNextIteration(const NodePtr &node, const NodePtr &next) { if (!AttrUtils::SetStr(op_desc, ATTR_NAME_NEXT_ITERATION, name)) { REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_NEXT_ITERATION.c_str(), op_desc->GetName().c_str(), op_desc->GetType().c_str()); - GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_NEXT_ITERATION.c_str(), - op_desc->GetName().c_str(), op_desc->GetType().c_str()); + GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_NEXT_ITERATION.c_str(), op_desc->GetName().c_str(), + op_desc->GetType().c_str()); return FAILED; } return SUCCESS; @@ -290,8 +290,8 @@ void SetControlFlowGroup(const NodePtr &node, int64_t group) { if (!AttrUtils::SetInt(op_desc, ATTR_NAME_CONTROL_FLOW_GROUP, group)) { REPORT_INNER_ERROR("E19999", "Set Attr:%s fail for op:%s(%s)", ATTR_NAME_CONTROL_FLOW_GROUP.c_str(), node->GetName().c_str(), node->GetType().c_str()); - GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_CONTROL_FLOW_GROUP.c_str(), - node->GetName().c_str(), node->GetType().c_str()); + GELOGE(FAILED, "[Set][Attr] %s fail for op:%s(%s)", ATTR_NAME_CONTROL_FLOW_GROUP.c_str(), node->GetName().c_str(), + node->GetType().c_str()); } } } // namespace ge diff --git a/ge/graph/common/omg_util.h b/ge/common/omg_util.h similarity index 100% rename from ge/graph/common/omg_util.h rename to ge/common/omg_util.h diff --git a/ge/common/op/attr_value_util.cc b/ge/common/op/attr_value_util.cc index 8be0ecd1..fd5b842a 100644 --- a/ge/common/op/attr_value_util.cc +++ b/ge/common/op/attr_value_util.cc @@ -77,37 +77,33 @@ DEFINE_SET_ATTR_VALUE_LIST(const std::string &, s); } \ } while (0); -#define DEFINE_ADD_ATTR_VALUE(KEY_TYPE, VALUE_TYPE) \ - FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void AddOpAttr(KEY_TYPE map_key, VALUE_TYPE value, OpDef *op_def) { \ - GE_CHECK_NOTNULL_JUST_RETURN(op_def); \ - auto attr = op_def->mutable_attr(); \ - ADD_TO_ATTR_MAP(map_key, value, attr) \ - } \ - FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void AddOpAttr(KEY_TYPE map_key, VALUE_TYPE value, \ - AttrDefMap *attr_map) { \ - ADD_TO_ATTR_MAP(map_key, value, attr_map) \ - } \ - FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void AddModelAttr(KEY_TYPE map_key, VALUE_TYPE value, \ - ModelDef *model_def) { \ - GE_CHECK_NOTNULL_JUST_RETURN(model_def); \ - auto attr = model_def->mutable_attr(); \ - ADD_TO_ATTR_MAP(map_key, value, attr) \ +#define DEFINE_ADD_ATTR_VALUE(KEY_TYPE, VALUE_TYPE) \ + void AddOpAttr(KEY_TYPE map_key, VALUE_TYPE value, OpDef *op_def) { \ + GE_CHECK_NOTNULL_JUST_RETURN(op_def); \ + auto attr = op_def->mutable_attr(); \ + ADD_TO_ATTR_MAP(map_key, value, attr) \ + } \ + void AddOpAttr(KEY_TYPE map_key, VALUE_TYPE value, AttrDefMap *attr_map) { \ + ADD_TO_ATTR_MAP(map_key, value, attr_map) \ + } \ + void AddModelAttr(KEY_TYPE map_key, VALUE_TYPE value, ModelDef *model_def) { \ + GE_CHECK_NOTNULL_JUST_RETURN(model_def); \ + auto attr = model_def->mutable_attr(); \ + ADD_TO_ATTR_MAP(map_key, value, attr) \ } -#define DEFINE_ADD_ATTR_VALUE_LIST(KEY_TYPE, VALUE_TYPE) \ - FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void AddOpAttrList(KEY_TYPE map_key, VALUE_TYPE value, \ - OpDef *op_def) { \ - GE_CHECK_NOTNULL_JUST_RETURN(op_def); \ - auto attr = op_def->mutable_attr(); \ - ADD_TO_ATTR_MAP_LIST(map_key, value, attr) \ - } \ - FMK_FUNC_DEV_VISIBILITY void AddOpAttrList(KEY_TYPE map_key, VALUE_TYPE value, AttrDefMap *attr_map) { \ - ADD_TO_ATTR_MAP_LIST(map_key, value, attr_map) \ - } \ - FMK_FUNC_DEV_VISIBILITY void AddModelAttrList(KEY_TYPE map_key, VALUE_TYPE value, ModelDef *model_def) { \ - GE_CHECK_NOTNULL_JUST_RETURN(model_def); \ - auto attr = model_def->mutable_attr(); \ - ADD_TO_ATTR_MAP_LIST(map_key, value, attr) \ +#define DEFINE_ADD_ATTR_VALUE_LIST(KEY_TYPE, VALUE_TYPE) \ + void AddOpAttrList(KEY_TYPE map_key, VALUE_TYPE value, OpDef *op_def) { \ + GE_CHECK_NOTNULL_JUST_RETURN(op_def); \ + auto attr = op_def->mutable_attr(); \ + ADD_TO_ATTR_MAP_LIST(map_key, value, attr) \ + } \ + void AddOpAttrList(KEY_TYPE map_key, VALUE_TYPE value, AttrDefMap *attr_map) { \ + ADD_TO_ATTR_MAP_LIST(map_key, value, attr_map)} FMK_FUNC_DEV_VISIBILITY void \ + AddModelAttrList(KEY_TYPE map_key, VALUE_TYPE value, ModelDef *model_def) { \ + GE_CHECK_NOTNULL_JUST_RETURN(model_def); \ + auto attr = model_def->mutable_attr(); \ + ADD_TO_ATTR_MAP_LIST(map_key, value, attr) \ } DEFINE_ADD_ATTR_VALUE(const std::string &, const std::string &); @@ -127,46 +123,42 @@ DEFINE_ADD_ATTR_VALUE_LIST(const std::string &, const bool); DEFINE_ADD_ATTR_VALUE_LIST(const std::string &, const int64_t); DEFINE_ADD_ATTR_VALUE_LIST(const std::string &, const std::string &); -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void AddOpAttr(const std::string &map_key, AttrDef &attr, - OpDef *op_def) { +void AddOpAttr(const std::string &map_key, AttrDef &attr, OpDef *op_def) { GE_CHECK_NOTNULL_JUST_RETURN(op_def); GE_CHECK_NOTNULL_JUST_RETURN(op_def->mutable_attr()); (void)op_def->mutable_attr()->insert(AttrDefPair(map_key, attr)); } -#define DEFINE_GET_ATTR_VALUE(ARG_TYPE_KEY, ARG_TYPE_VALUE, FIELD) \ - FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool GetAttrDefValue(ARG_TYPE_KEY map_key, ARG_TYPE_VALUE value, \ - const AttrDefMap &attr) { \ - auto it = attr.find(map_key); \ - if (it != attr.end()) { \ - *value = it->second.FIELD(); \ - return true; \ - } \ - return false; \ +#define DEFINE_GET_ATTR_VALUE(ARG_TYPE_KEY, ARG_TYPE_VALUE, FIELD) \ + bool GetAttrDefValue(ARG_TYPE_KEY map_key, ARG_TYPE_VALUE value, const AttrDefMap &attr) { \ + auto it = attr.find(map_key); \ + if (it != attr.end()) { \ + *value = it->second.FIELD(); \ + return true; \ + } \ + return false; \ } -#define DEFINE_GET_ATTR_POINT_REF(ARG_TYPE_KEY, ARG_TYPE_VALUE, FIELD) \ - FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool GetAttrDefValue(ARG_TYPE_KEY map_key, ARG_TYPE_VALUE *&value, \ - AttrDefMap *attr) { \ - GE_RT_FALSE_CHECK_NOTNULL(attr); \ - auto it = attr->find(map_key); \ - if (it != attr->end()) { \ - value = it->second.mutable_##FIELD(); \ - return true; \ - } \ - return false; \ +#define DEFINE_GET_ATTR_POINT_REF(ARG_TYPE_KEY, ARG_TYPE_VALUE, FIELD) \ + bool GetAttrDefValue(ARG_TYPE_KEY map_key, ARG_TYPE_VALUE *&value, AttrDefMap *attr) { \ + GE_RT_FALSE_CHECK_NOTNULL(attr); \ + auto it = attr->find(map_key); \ + if (it != attr->end()) { \ + value = it->second.mutable_##FIELD(); \ + return true; \ + } \ + return false; \ } -#define DEFINE_GET_ATTR_CONST_POINT_REF(ARG_TYPE_KEY, ARG_TYPE_VALUE, FIELD) \ - FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool GetAttrDefValue( \ - ARG_TYPE_KEY map_key, const ARG_TYPE_VALUE *&value, const AttrDefMap &attr) { \ - auto it = attr.find(map_key); \ - if (it == attr.end()) { \ - return false; \ - } \ - \ - value = &(it->second.FIELD()); \ - return true; \ +#define DEFINE_GET_ATTR_CONST_POINT_REF(ARG_TYPE_KEY, ARG_TYPE_VALUE, FIELD) \ + bool GetAttrDefValue(ARG_TYPE_KEY map_key, const ARG_TYPE_VALUE *&value, const AttrDefMap &attr) { \ + auto it = attr.find(map_key); \ + if (it == attr.end()) { \ + return false; \ + } \ + \ + value = &(it->second.FIELD()); \ + return true; \ } #define DEFINE_GET_BYTES_ATTR_VALUE(ARG_TYPE_KEY, ARG_TYPE_VALUE) \ @@ -216,16 +208,14 @@ DEFINE_GET_ATTR_CONST_POINT_REF(const std::string &, NamedAttrs, func); DEFINE_GET_BYTES_ATTR_VALUE(const std::string &, std::string *); -#define DEFINE_GET_OP_ATTR(ARG_TYPE_KEY, ARG_TYPE_VALUE) \ - FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool GetOpAttr(ARG_TYPE_KEY map_key, ARG_TYPE_VALUE value, \ - const OpDef *op_def) { \ - GE_RT_FALSE_CHECK_NOTNULL(op_def); \ - return GetAttrDefValue(map_key, value, op_def->attr()); \ - } \ - FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool GetModelAttr(ARG_TYPE_KEY map_key, ARG_TYPE_VALUE value, \ - const ModelDef *model_def) { \ - GE_RT_FALSE_CHECK_NOTNULL(model_def); \ - return GetAttrDefValue(map_key, value, model_def->attr()); \ +#define DEFINE_GET_OP_ATTR(ARG_TYPE_KEY, ARG_TYPE_VALUE) \ + bool GetOpAttr(ARG_TYPE_KEY map_key, ARG_TYPE_VALUE value, const OpDef *op_def) { \ + GE_RT_FALSE_CHECK_NOTNULL(op_def); \ + return GetAttrDefValue(map_key, value, op_def->attr()); \ + } \ + bool GetModelAttr(ARG_TYPE_KEY map_key, ARG_TYPE_VALUE value, const ModelDef *model_def) { \ + GE_RT_FALSE_CHECK_NOTNULL(model_def); \ + return GetAttrDefValue(map_key, value, model_def->attr()); \ } DEFINE_GET_OP_ATTR(const std::string &, std::string *); @@ -238,8 +228,7 @@ DEFINE_GET_OP_ATTR(const std::string &, bool *); DEFINE_GET_OP_ATTR(const std::string &, AttrDef_ListValue *); #define DEFINE_GET_BT_ATTR(ARG_TYPE_KEY, ARG_TYPE_VALUE) \ - FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool GetBytesAttr(ARG_TYPE_KEY key, ARG_TYPE_VALUE value, \ - const OpDef *op_def) { \ + bool GetBytesAttr(ARG_TYPE_KEY key, ARG_TYPE_VALUE value, const OpDef *op_def) { \ GE_RT_FALSE_CHECK_NOTNULL(op_def); \ return GetBytesValue(key, value, op_def->attr()); \ } \ @@ -250,7 +239,7 @@ DEFINE_GET_OP_ATTR(const std::string &, AttrDef_ListValue *); DEFINE_GET_BT_ATTR(const std::string &, std::string *); -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool HasOpAttr(const OpDef *op_def, const std::string &attr_name) { +bool HasOpAttr(const OpDef *op_def, const std::string &attr_name) { if (op_def == nullptr) { return false; } @@ -263,8 +252,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool HasOpAttr(const OpDef *op_ return false; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void AddModelAttr(const std::string &map_key, const void *value, - size_t size, ModelDef *model_def) { +void AddModelAttr(const std::string &map_key, const void *value, size_t size, ModelDef *model_def) { if (model_def == nullptr) { return; } @@ -280,8 +268,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void AddModelAttr(const std::st } } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void AddOpBytesAttr(const std::string &key, const void *value, - size_t size, OpDef *op_def) { +void AddOpBytesAttr(const std::string &key, const void *value, size_t size, OpDef *op_def) { if (op_def == nullptr) { return; } diff --git a/ge/common/op/ge_op_utils.cc b/ge/common/op/ge_op_utils.cc index 99b5733c..429ce909 100644 --- a/ge/common/op/ge_op_utils.cc +++ b/ge/common/op/ge_op_utils.cc @@ -115,8 +115,7 @@ const int NORMAL_TENSOR_SIZE = 4; #define AIPP_CONVERT_LIST_FLOAT(KEY, REQUIRED) AIPP_CONVERT_LIST_FORMAT(KEY, float, REQUIRED, GeAttrValue::FLOAT) -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status -OpUtils::ConvertAippParams(const GeAttrValue::NAMED_ATTRS &aipp_attr, domi::AippOpParams *aipp_params) { +Status OpUtils::ConvertAippParams(const GeAttrValue::NAMED_ATTRS &aipp_attr, domi::AippOpParams *aipp_params) { GE_CHECK_NOTNULL(aipp_params); AIPP_CONVERT_FORMAT_EX(aipp_mode, domi::AippOpParams::AippMode, int32_t, GeAttrValue::INT); AIPP_CONVERT_INT(related_input_rank); @@ -178,8 +177,7 @@ OpUtils::ConvertAippParams(const GeAttrValue::NAMED_ATTRS &aipp_attr, domi::Aipp return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OpUtils::TransferDim(const std::vector &dim, - std::vector &dim_vector) { +Status OpUtils::TransferDim(const std::vector &dim, std::vector &dim_vector) { size_t input_shape_size = dim.size(); std::list new_dim_list; for (auto dim_temp : dim) { @@ -301,9 +299,9 @@ Status OpUtils::SetOutputSliceDataByDataType(void *data, int64_t data_size, cons return ret; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OpUtils::SetOutputSliceData( - void *data, int64_t data_size, int32_t data_type, std::vector &input_dims, std::vector &begin, - std::vector &output_dims, GeTensor *output, std::vector &stride) { +Status OpUtils::SetOutputSliceData(void *data, int64_t data_size, int32_t data_type, std::vector &input_dims, + std::vector &begin, std::vector &output_dims, GeTensor *output, + std::vector &stride) { if (data == nullptr || output == nullptr) { GELOGE(PARAM_INVALID, "[Check][Param]Input param is nullptr"); REPORT_INNER_ERROR("E19999", "Input param is nullptr"); @@ -352,9 +350,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OpUtils::SetOutputSliceD return ret; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void OpUtils::TransDataHWCK2KCHW(const void *input, int64_t h, - int64_t w, int64_t c, int64_t k, - void **output) { +void OpUtils::TransDataHWCK2KCHW(const void *input, int64_t h, int64_t w, int64_t c, int64_t k, void **output) { if (input == nullptr) { return; } @@ -386,9 +382,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void OpUtils::TransDataHWCK2KCH *output = buf; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void OpUtils::TransDataKCHW2HWCK(const void *input, int64_t k, - int64_t c, int64_t h, int64_t w, - void *output) { +void OpUtils::TransDataKCHW2HWCK(const void *input, int64_t k, int64_t c, int64_t h, int64_t w, void *output) { if ((input == nullptr) || (output == nullptr)) { GELOGD("%s[%d]: input param is nullptr.", __FILE__, __LINE__); return; @@ -417,31 +411,22 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void OpUtils::TransDataKCHW2HWC vector OpUtils::GetWeights(const ge::Node &node) { return OpDescUtils::GetWeights(node); } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY vector OpUtils::GetWeights(ge::ConstNodePtr node) { - return OpDescUtils::GetWeights(node); -} +vector OpUtils::GetWeights(ge::ConstNodePtr node) { return OpDescUtils::GetWeights(node); } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY vector OpUtils::MutableWeights(const ge::Node &node) { - return OpDescUtils::MutableWeights(node); -} +vector OpUtils::MutableWeights(const ge::Node &node) { return OpDescUtils::MutableWeights(node); } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY vector OpUtils::MutableWeights(const ge::NodePtr node) { - return OpDescUtils::MutableWeights(node); -} +vector OpUtils::MutableWeights(const ge::NodePtr node) { return OpDescUtils::MutableWeights(node); } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OpUtils::SetWeights(ge::Node &node, - const vector &weights) { +Status OpUtils::SetWeights(ge::Node &node, const vector &weights) { return OpDescUtils::SetWeights(node, weights); } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status OpUtils::SetWeights(ge::NodePtr node, - const vector &weights) { +Status OpUtils::SetWeights(ge::NodePtr node, const vector &weights) { return OpDescUtils::SetWeights(node, weights); } // The caller guarantees that the input sensor is constant -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status -OpUtils::GetShapeDataFromConstTensor(const ConstGeTensorPtr &tensor, DataType type, std::vector &dims) { +Status OpUtils::GetShapeDataFromConstTensor(const ConstGeTensorPtr &tensor, DataType type, std::vector &dims) { if (tensor == nullptr) { GELOGE(PARAM_INVALID, "[Check][Param]Input tensor is nullptr"); REPORT_INNER_ERROR("E19999", "Input tensor is nullptr"); diff --git a/ge/common/profiling/ge_profiling.cc b/ge/common/profiling/ge_profiling.cc index fcd01a12..a5857b35 100644 --- a/ge/common/profiling/ge_profiling.cc +++ b/ge/common/profiling/ge_profiling.cc @@ -23,7 +23,7 @@ #include "graph/ge_context.h" #include "init/gelib.h" #include "framework/common/ge_inner_error_codes.h" -#include "model/ge_model.h" +#include "common/model/ge_model.h" #include "framework/omg/omg_inner_types.h" namespace { diff --git a/ge/common/profiling/profiling_manager.cc b/ge/common/profiling/profiling_manager.cc index 0464491d..e8f41cc4 100644 --- a/ge/common/profiling/profiling_manager.cc +++ b/ge/common/profiling/profiling_manager.cc @@ -77,12 +77,12 @@ ProfilingManager::ProfilingManager() ProfilingManager::~ProfilingManager() {} -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ProfilingManager &ProfilingManager::Instance() { +ProfilingManager &ProfilingManager::Instance() { static ProfilingManager profiling_manager; return profiling_manager; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ge::Status ProfilingManager::Init(const Options &options) { +ge::Status ProfilingManager::Init(const Options &options) { #ifdef DAVINCI_SUPPORT_PROFILING vector().swap(device_id_); subscribe_count_ = 0; @@ -221,7 +221,7 @@ ge::Status ProfilingManager::ParseOptions(const std::string &options) { return ge::SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::StopProfiling() { +void ProfilingManager::StopProfiling() { #ifdef DAVINCI_SUPPORT_PROFILING uint64_t module = GetProfilingModule(); // The following if case will not be executed in normal case, inc case of ProfStopProfiling is abnormal @@ -259,8 +259,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::StopProf #endif } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::ProfilingOpInputOutInfo( - const TaskDescInfo &task, Json &task_json) { +void ProfilingManager::ProfilingOpInputOutInfo(const TaskDescInfo &task, Json &task_json) { #ifdef DAVINCI_SUPPORT_PROFILING for (size_t i = 0; i < task.input_format.size(); i++) { Json tmp_input; @@ -286,8 +285,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::Profilin #endif } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::ProfilingTaskDescInfo( - uint32_t model_id, const std::vector &task_desc_info, const int32_t &device_id) { +void ProfilingManager::ProfilingTaskDescInfo(uint32_t model_id, const std::vector &task_desc_info, + const int32_t &device_id) { #ifdef DAVINCI_SUPPORT_PROFILING for (const auto &task : task_desc_info) { Json task_info; @@ -324,8 +323,8 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::Profilin #endif } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfileStepInfo( - uint64_t index_id, uint64_t model_id, uint16_t tag_id, rtStream_t stream, int32_t device_id) { +Status ProfilingManager::ProfileStepInfo(uint64_t index_id, uint64_t model_id, uint16_t tag_id, rtStream_t stream, + int32_t device_id) { #ifdef DAVINCI_SUPPORT_PROFILING if (!is_load_profiling_ && subscribe_count_ == 0) { GELOGD("Profiling is not turned on, no need to profile step info."); @@ -385,8 +384,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::Profil return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::ReportData( - const int32_t &device_id, const string &data, const string &tag_name) { +void ProfilingManager::ReportData(const int32_t &device_id, const string &data, const string &tag_name) { #ifdef DAVINCI_SUPPORT_PROFILING ReporterData reporter_data{}; int ret = -1; @@ -426,8 +424,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::ReportDa #endif } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::ReportProfilingData( - uint32_t model_id, const std::vector &task_desc_info) { +void ProfilingManager::ReportProfilingData(uint32_t model_id, const std::vector &task_desc_info) { #ifdef DAVINCI_SUPPORT_PROFILING int32_t logic_device_id = 0; rtError_t rt_ret = rtGetDevice(&logic_device_id); @@ -443,7 +440,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::ReportPr #endif } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY uint64_t ProfilingManager::GetProfilingModule() { +uint64_t ProfilingManager::GetProfilingModule() { uint64_t module = PROF_MODEL_EXECUTE_MASK | PROF_RUNTIME_API_MASK | PROF_RUNTIME_TRACE_MASK | @@ -485,8 +482,7 @@ void ProfilingManager::UpdateSubscribeDeviceModuleMap(std::string prof_type, uin #endif } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfModelSubscribe( - uint64_t module, void *model) { +Status ProfilingManager::ProfModelSubscribe(uint64_t module, void *model) { #ifdef DAVINCI_SUPPORT_PROFILING std::lock_guard lock(mutex_); uint64_t model_load_mask = module & PROF_MODEL_LOAD_MASK; @@ -526,8 +522,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfMo return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfModelUnsubscribe( - void *model) { +Status ProfilingManager::ProfModelUnsubscribe(void *model) { #ifdef DAVINCI_SUPPORT_PROFILING std::lock_guard lock(mutex_); if (subscribe_count_ == 0) { @@ -568,7 +563,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfMo return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfInit(uint64_t module) { +Status ProfilingManager::ProfInit(uint64_t module) { #ifdef DAVINCI_SUPPORT_PROFILING std::lock_guard lock(mutex_); uint64_t model_load_mask = module & PROF_MODEL_LOAD_MASK; @@ -602,7 +597,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfIn return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfFinalize() { +Status ProfilingManager::ProfFinalize() { #ifdef DAVINCI_SUPPORT_PROFILING std::lock_guard lock(mutex_); is_load_profiling_ = false; @@ -697,8 +692,8 @@ Status ProfilingManager::ProfParseDeviceId(const std::map &config_para, - int32_t &device_num, vector &device_list) { +Status ProfilingManager::ProfParseParam(const std::map &config_para, int32_t &device_num, + vector &device_list) { #ifdef DAVINCI_SUPPORT_PROFILING // device num auto iter = config_para.find(kConfigNumsdev); @@ -747,8 +742,7 @@ Status ProfilingManager::ProfParseParam(const std::map return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfStartProfiling( - uint64_t module, const std::map &config_para) { +Status ProfilingManager::ProfStartProfiling(uint64_t module, const std::map &config_para) { #ifdef DAVINCI_SUPPORT_PROFILING std::lock_guard lock(mutex_); uint64_t training_trace_mask = module & PROF_TRAINING_TRACE_MASK; @@ -803,8 +797,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfSt return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfStopProfiling(uint64_t module, - const std::map &config_para) { +Status ProfilingManager::ProfStopProfiling(uint64_t module, const std::map &config_para) { #ifdef DAVINCI_SUPPORT_PROFILING std::lock_guard lock(mutex_); int32_t device_num = 0; @@ -855,8 +848,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::ProfSt return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::UpdateDeviceIdModuleMap(string prof_type, - uint64_t module, const vector &device_list) { +void ProfilingManager::UpdateDeviceIdModuleMap(string prof_type, uint64_t module, const vector &device_list) { #ifdef DAVINCI_SUPPORT_PROFILING if (prof_type == kProfStart) { for (uint32_t i = 0; i < device_list.size(); i++) { @@ -886,7 +878,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::UpdateDe #endif } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ProfilingManager::ProfilingModelExecuteOn() const { +bool ProfilingManager::ProfilingModelExecuteOn() const { int32_t logic_device_id = 0; rtError_t rt_ret = rtGetDevice(&logic_device_id); if (rt_ret != RT_ERROR_NONE) { @@ -904,7 +896,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ProfilingManager::Profilin return execute_model_prof_on; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::PluginInit() { +Status ProfilingManager::PluginInit() { if (prof_cb_.msprofReporterCallback == nullptr) { GELOGE(ge::PARAM_INVALID, "[Check][Param]MsprofReporterCallback callback is nullptr"); REPORT_INNER_ERROR("E19999", "MsprofReporterCallback callback is nullptr"); @@ -933,7 +925,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::Plugin return SUCCESS; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::PluginUnInit() const { +void ProfilingManager::PluginUnInit() const { #ifdef DAVINCI_SUPPORT_PROFILING if (prof_cb_.msprofReporterCallback == nullptr) { GELOGE(ge::PARAM_INVALID, "[Check][Param]MsprofReporterCallback callback is nullptr"); @@ -950,8 +942,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::PluginUn #endif } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ProfilingManager::CallMsprofReport( - ReporterData &reporter_data) const { +Status ProfilingManager::CallMsprofReport(ReporterData &reporter_data) const { if (prof_cb_.msprofReporterCallback == nullptr) { GELOGE(ge::PARAM_INVALID, "[Check][Param]MsprofReporterCallback callback is nullptr"); REPORT_INNER_ERROR("E19999", "MsprofReporterCallback callback is nullptr"); @@ -1007,14 +998,12 @@ void ProfilingManager::GetOpOutputInfo(const OpDescPtr &op, TaskDescInfo &task_d task_desc_info.output_data_type = output_data_type.empty() ? data_type_default : output_data_type; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::GetOpInputOutputInfo( - const OpDescPtr &op, TaskDescInfo &task_desc_info) const { +void ProfilingManager::GetOpInputOutputInfo(const OpDescPtr &op, TaskDescInfo &task_desc_info) const { GetOpInputInfo(op, task_desc_info); GetOpOutputInfo(op, task_desc_info); } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::GetFpBpPoint( - std::string &fp_point, std::string &bp_point) { +void ProfilingManager::GetFpBpPoint(std::string &fp_point, std::string &bp_point) { // Env or options mode, fp_point_/bp_point_ have initiliazed on profiling init if (!fp_point_.empty() && !bp_point_.empty()) { fp_point = fp_point_; @@ -1025,7 +1014,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void ProfilingManager::GetFpBpP } // ProfApi mode and training trace is set // Parse options first - char env_profiling_options[MSPROF_OPTIONS_DEF_LEN_MAX] = { 0x00 }; + char env_profiling_options[MSPROF_OPTIONS_DEF_LEN_MAX] = {0x00}; bool is_profiling_valid = false; std::string profiling_options; if (ge::GetContext().GetOption(OPTION_EXEC_PROFILING_OPTIONS, profiling_options) == SUCCESS && diff --git a/ge/common/profiling/profiling_manager.h b/ge/common/profiling/profiling_manager.h index e5137562..86371d51 100755 --- a/ge/common/profiling/profiling_manager.h +++ b/ge/common/profiling/profiling_manager.h @@ -73,7 +73,7 @@ struct MsprofCallback { MsprofReporterCallback msprofReporterCallback; }; -class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ProfilingManager { +class ProfilingManager { public: ProfilingManager(); virtual ~ProfilingManager(); diff --git a/ge/common/properties_manager.cc b/ge/common/properties_manager.cc index 0c5ef1fe..aeabb008 100644 --- a/ge/common/properties_manager.cc +++ b/ge/common/properties_manager.cc @@ -35,13 +35,13 @@ PropertiesManager::PropertiesManager() : is_inited_(false), delimiter("=") {} PropertiesManager::~PropertiesManager() {} // singleton -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY PropertiesManager &PropertiesManager::Instance() { +PropertiesManager &PropertiesManager::Instance() { static PropertiesManager instance; return instance; } // Initialize property configuration -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool PropertiesManager::Init(const std::string &file_path) { +bool PropertiesManager::Init(const std::string &file_path) { std::lock_guard lock(mutex_); if (is_inited_) { GELOGW("Already inited, will be initialized again"); @@ -139,8 +139,7 @@ std::string PropertiesManager::Trim(const std::string &str) { } // Get property value, if not found, return "" -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::string PropertiesManager::GetPropertyValue( - const std::string &map_key) { +std::string PropertiesManager::GetPropertyValue(const std::string &map_key) { std::lock_guard lock(mutex_); auto iter = properties_map_.find(map_key); if (properties_map_.end() != iter) { @@ -151,21 +150,19 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::string PropertiesManager:: } // Set property value -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void PropertiesManager::SetPropertyValue(const std::string &map_key, - const std::string &value) { +void PropertiesManager::SetPropertyValue(const std::string &map_key, const std::string &value) { std::lock_guard lock(mutex_); properties_map_[map_key] = value; } // return properties_map_ -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::map -PropertiesManager::GetPropertyMap() { +std::map PropertiesManager::GetPropertyMap() { std::lock_guard lock(mutex_); return properties_map_; } // Set separator -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY void PropertiesManager::SetPropertyDelimiter(const std::string &de) { +void PropertiesManager::SetPropertyDelimiter(const std::string &de) { std::lock_guard lock(mutex_); delimiter = de; } diff --git a/ge/common/tbe_kernel_store.h b/ge/common/tbe_kernel_store.h index 6304af50..1492bdd9 100755 --- a/ge/common/tbe_kernel_store.h +++ b/ge/common/tbe_kernel_store.h @@ -21,7 +21,7 @@ namespace ge { -class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY TBEKernelStore : public KernelStore { +class TBEKernelStore : public KernelStore { public: TBEKernelStore(); ~TBEKernelStore() {} diff --git a/ge/common/thread_pool.cc b/ge/common/thread_pool.cc index f9b7bb99..56f8ee60 100644 --- a/ge/common/thread_pool.cc +++ b/ge/common/thread_pool.cc @@ -26,7 +26,7 @@ #include "external/register/register_types.h" namespace ge { -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ThreadPool::ThreadPool(uint32_t size) : is_stoped_(false) { +ThreadPool::ThreadPool(uint32_t size) : is_stoped_(false) { idle_thrd_num_ = size < 1 ? 1 : size; for (uint32_t i = 0; i < idle_thrd_num_; ++i) { @@ -34,7 +34,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ThreadPool::ThreadPool(uint32_t } } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ThreadPool::~ThreadPool() { +ThreadPool::~ThreadPool() { is_stoped_.store(true); { std::unique_lock lock{m_lock_}; diff --git a/ge/common/thread_pool.h b/ge/common/thread_pool.h index 7e52edcc..777a3c9b 100755 --- a/ge/common/thread_pool.h +++ b/ge/common/thread_pool.h @@ -37,7 +37,7 @@ namespace ge { using ThreadTask = std::function; -class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY ThreadPool { +class ThreadPool { public: explicit ThreadPool(uint32_t size = 4); ~ThreadPool(); diff --git a/ge/graph/common/transop_util.cc b/ge/common/transop_util.cc similarity index 97% rename from ge/graph/common/transop_util.cc rename to ge/common/transop_util.cc index 871ecdb1..914e80aa 100755 --- a/ge/graph/common/transop_util.cc +++ b/ge/common/transop_util.cc @@ -14,9 +14,9 @@ * limitations under the License. */ -#include "graph/common/transop_util.h" +#include "common/transop_util.h" -#include "framework/common/types.h" +#include "common/types.h" #include "graph/utils/type_utils.h" #include "framework/common/debug/ge_log.h" diff --git a/ge/graph/common/transop_util.h b/ge/common/transop_util.h similarity index 95% rename from ge/graph/common/transop_util.h rename to ge/common/transop_util.h index 883ae41b..57e4adad 100644 --- a/ge/graph/common/transop_util.h +++ b/ge/common/transop_util.h @@ -23,7 +23,7 @@ #include "graph/node.h" namespace ge { -class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY TransOpUtil { +class TransOpUtil { public: static bool IsTransOp(const NodePtr &node); diff --git a/ge/common/util.cc b/ge/common/util.cc index dfb5bac4..6d77dbc8 100644 --- a/ge/common/util.cc +++ b/ge/common/util.cc @@ -70,7 +70,7 @@ static bool ReadProtoFromCodedInputStream(CodedInputStream &coded_stream, Messag return proto->ParseFromCodedStream(&coded_stream); } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ReadProtoFromArray(const void *data, int size, Message *proto) { +bool ReadProtoFromArray(const void *data, int size, Message *proto) { GE_CHK_BOOL_TRUE_EXEC_WITH_LOG((proto == nullptr || data == nullptr || size == 0), return false, "incorrect parameter. proto is nullptr || data is nullptr || size is 0"); @@ -112,8 +112,7 @@ long GetFileLength(const std::string &input_file) { * @return false fail * @return true success */ -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ReadBytesFromBinaryFile(const char *file_name, char **buffer, - int &length) { +bool ReadBytesFromBinaryFile(const char *file_name, char **buffer, int &length) { GE_CHK_BOOL_TRUE_EXEC_WITH_LOG((file_name == nullptr), return false, "incorrect parameter. file is nullptr"); GE_CHK_BOOL_TRUE_EXEC_WITH_LOG((buffer == nullptr), return false, "incorrect parameter. buffer is nullptr"); @@ -141,8 +140,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ReadBytesFromBinaryFile(co return true; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ReadBytesFromBinaryFile(const char *file_name, - std::vector &buffer) { +bool ReadBytesFromBinaryFile(const char *file_name, std::vector &buffer) { GE_CHK_BOOL_TRUE_EXEC_WITH_LOG((file_name == nullptr), return false, "incorrect parameter. file path is null"); std::string real_path = RealPath(file_name); @@ -177,7 +175,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ReadBytesFromBinaryFile(co * @return -1 fail * @return 0 success */ -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY int CreateDirectory(const std::string &directory_path) { +int CreateDirectory(const std::string &directory_path) { GE_CHK_BOOL_EXEC(!directory_path.empty(), return -1, "directory path is empty."); auto dir_path_len = directory_path.length(); if (dir_path_len >= MMPA_MAX_PATH) { @@ -219,7 +217,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY int CreateDirectory(const std:: return 0; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::string CurrentTimeInStr() { +std::string CurrentTimeInStr() { std::time_t now = std::time(nullptr); std::tm *ptm = std::localtime(&now); if (ptm == nullptr) { @@ -235,8 +233,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::string CurrentTimeInStr() return std::string(buffer); } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ReadProtoFromText(const char *file, - google::protobuf::Message *message) { +bool ReadProtoFromText(const char *file, google::protobuf::Message *message) { GE_CHK_BOOL_TRUE_EXEC_WITH_LOG((file == nullptr || message == nullptr), return false, "incorrect parameter. nullptr == file || nullptr == message"); @@ -266,8 +263,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ReadProtoFromText(const ch return ret; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ReadProtoFromMem(const char *data, int size, - google::protobuf::Message *message) { +bool ReadProtoFromMem(const char *data, int size, google::protobuf::Message *message) { GE_CHK_BOOL_TRUE_EXEC_WITH_LOG((data == nullptr || message == nullptr), return false, "incorrect parameter. data is nullptr || message is nullptr"); std::string str(data, static_cast(size)); @@ -281,7 +277,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool ReadProtoFromMem(const cha return ret; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY uint64_t GetCurrentTimestamp() { +uint64_t GetCurrentTimestamp() { mmTimeval tv{}; int ret = mmGetTimeOfDay(&tv, nullptr); GE_LOGE_IF(ret != EN_OK, "Func gettimeofday may failed, ret:%d, errmsg:%s", ret, strerror(errno)); @@ -289,7 +285,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY uint64_t GetCurrentTimestamp() return static_cast(total_use_time); } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY uint32_t GetCurrentSecondTimestap() { +uint32_t GetCurrentSecondTimestap() { mmTimeval tv{}; int ret = mmGetTimeOfDay(&tv, nullptr); GE_LOGE_IF(ret != EN_OK, "Func gettimeofday may failed, ret:%d, errmsg:%s", ret, strerror(errno)); @@ -297,7 +293,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY uint32_t GetCurrentSecondTimest return static_cast(total_use_time); } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool CheckInt64MulOverflow(int64_t a, int64_t b) { +bool CheckInt64MulOverflow(int64_t a, int64_t b) { if (a > 0) { if (b > 0) { if (a > (INT64_MAX / b)) { @@ -322,7 +318,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool CheckInt64MulOverflow(int6 return true; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY std::string RealPath(const char *path) { +std::string RealPath(const char *path) { GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(path == nullptr, return "", "path pointer is NULL."); GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(strlen(path) >= MMPA_MAX_PATH, ErrorManager::GetInstance().ATCReportErrMessage("E19002", {"filepath", "size"}, @@ -349,8 +345,7 @@ void PathValidErrReport(const std::string &file_path, const std::string &atc_par } } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool CheckInputPathValid(const std::string &file_path, - const std::string &atc_param) { +bool CheckInputPathValid(const std::string &file_path, const std::string &atc_param) { // The specified path is empty std::map args_map; if (file_path.empty()) { @@ -395,8 +390,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool CheckInputPathValid(const return true; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY bool CheckOutputPathValid(const std::string &file_path, - const std::string &atc_param) { +bool CheckOutputPathValid(const std::string &file_path, const std::string &atc_param) { // The specified path is empty if (file_path.empty()) { if (!atc_param.empty()) { @@ -552,7 +546,7 @@ FMK_FUNC_HOST_VISIBILITY bool IsValidFile(const char *file_path) { return true; } -FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status CheckPath(const char *path, size_t length) { +Status CheckPath(const char *path, size_t length) { if (path == nullptr) { GELOGE(PARAM_INVALID, "[Check][Param]Config path is invalid"); REPORT_CALL_ERROR("E19999", "Config path is invalid"); diff --git a/ge/executor/CMakeLists.txt b/ge/executor/CMakeLists.txt index 44ba3131..54cb7639 100755 --- a/ge/executor/CMakeLists.txt +++ b/ge/executor/CMakeLists.txt @@ -1,13 +1,9 @@ set(SRC_LIST "ge_executor.cc" "../common/profiling/profiling_manager.cc" - "../common/ge/plugin_manager.cc" - "../common/ge/op_tiling_manager.cc" - "../common/dump/dump_properties.cc" - "../common/dump/exception_dumper.cc" - "../common/dump/dump_manager.cc" "../common/dump/dump_op.cc" "../common/dump/opdebug_register.cc" + "../common/dump/exception_dumper.cc" "../common/profiling/ge_profiling.cc" "../graph/load/graph_loader.cc" "../graph/execute/graph_execute.cc" @@ -22,8 +18,6 @@ set(SRC_LIST "../graph/manager/rdma_pool_allocator.cc" "../graph/manager/host_mem_allocator.cc" "../hybrid/node_executor/aicpu/aicpu_ext_info.cc" - "../model/ge_model.cc" - "../model/ge_root_model.cc" "../graph/load/model_manager/davinci_model.cc" "../graph/load/model_manager/model_manager.cc" "../graph/load/model_manager/tbe_handle_store.cc" @@ -55,7 +49,6 @@ set(SRC_LIST "../graph/load/model_manager/task_info/model_exit_task_info.cc" "../graph/load/model_manager/task_info/super_kernel/super_kernel_factory.cc" "../graph/load/model_manager/task_info/super_kernel/super_kernel.cc" - "../graph/common/local_context.cc" "../opskernel_manager/ops_kernel_builder_manager.cc" "../single_op/single_op_manager.cc" "../single_op/single_op_model.cc" @@ -102,7 +95,6 @@ set(SRC_LIST "../hybrid/node_executor/task_context.cc" "../hybrid/hybrid_davinci_model.cc" "../ge_local_engine/engine/host_cpu_engine.cc" - "../graph/common/omg_util.cc" "../graph/manager/host_mem_manager.cc" "../graph/build/memory/var_mem_assign_util.cc" "../host_kernels/transpose_kernel.cc" @@ -144,10 +136,6 @@ set(SRC_LIST "../host_kernels/transdata_kernel.cc" "../host_kernels/unpack_kernel.cc" "../graph/passes/pass_utils.cc" - "../graph/common/bcast.cc" - "../common/fp16_t.cc" - "../common/formats/format_transfers/format_transfer_transpose.cc" - "../common/formats/utils/formats_trans_utils.cc" ) ######## libge_executor.a ######## diff --git a/ge/executor/module.mk b/ge/executor/module.mk index 7a7e2b51..430efa75 100644 --- a/ge/executor/module.mk +++ b/ge/executor/module.mk @@ -63,7 +63,7 @@ local_ge_executor_src_files := \ ../single_op/task/aicpu_task_builder.cc \ ../single_op/task/aicpu_kernel_task_builder.cc \ ../hybrid/node_executor/aicpu/aicpu_ext_info.cc \ - ../graph/common/local_context.cc \ + ../common/local_context.cc \ ../hybrid/common/tensor_value.cc \ ../hybrid/common/npu_memory_allocator.cc \ ../hybrid/executor/rt_callback_manager.cc \ @@ -102,7 +102,7 @@ local_ge_executor_src_files := \ ../hybrid/node_executor/task_context.cc \ ../hybrid/hybrid_davinci_model.cc \ ../ge_local_engine/engine/host_cpu_engine.cc \ - ../graph/common/omg_util.cc \ + ../common/omg_util.cc \ ../graph/manager/host_mem_manager.cc \ ../graph/build/memory/var_mem_assign_util.cc \ ../host_kernels/transpose_kernel.cc \ @@ -144,7 +144,7 @@ local_ge_executor_src_files := \ ../host_kernels/transdata_kernel.cc \ ../host_kernels/unpack_kernel.cc \ ../graph/passes/pass_utils.cc \ - ../graph/common/bcast.cc \ + ../common/bcast.cc \ ../common/fp16_t.cc \ ../common/formats/format_transfers/format_transfer_transpose.cc \ ../common/formats/utils/formats_trans_utils.cc \ diff --git a/ge/ge_inference.mk b/ge/ge_inference.mk index a56eaadf..3fd8be1a 100755 --- a/ge/ge_inference.mk +++ b/ge/ge_inference.mk @@ -80,7 +80,7 @@ ANALYZER_SRC_FILES:= \ OMG_HOST_SRC_FILES := \ model/ge_model.cc \ model/ge_root_model.cc \ - graph/common/transop_util.cc \ + common/transop_util.cc \ graph/passes/pass_manager.cc \ graph/passes/resource_pair_add_control_pass.cc \ graph/passes/resource_pair_remove_control_pass.cc \ @@ -115,9 +115,9 @@ OMG_HOST_SRC_FILES := \ graph/passes/mark_graph_unknown_status_pass.cc \ graph/passes/mark_node_unknown_shape_pass.cc \ graph/passes/mark_agnostic_pass.cc \ - graph/common/omg_util.cc \ - graph/common/bcast.cc \ - graph/common/local_context.cc \ + common/omg_util.cc \ + common/bcast.cc \ + common/local_context.cc \ graph/passes/dimension_compute_pass.cc \ graph/passes/dimension_adjust_pass.cc \ graph/passes/get_original_format_pass.cc \ diff --git a/ge/ge_runner.mk b/ge/ge_runner.mk index 8ca8572c..d6462542 100644 --- a/ge/ge_runner.mk +++ b/ge/ge_runner.mk @@ -43,10 +43,10 @@ LIBGE_LOCAL_SRC_FILES := \ graph/build/stream_allocator.cc \ graph/build/stream_graph_optimizer.cc \ graph/build/task_generator.cc \ - graph/common/bcast.cc \ - graph/common/local_context.cc \ - graph/common/omg_util.cc \ - graph/common/transop_util.cc \ + common/bcast.cc \ + common/local_context.cc \ + common/omg_util.cc \ + common/transop_util.cc \ graph/execute/graph_execute.cc \ graph/label/case_label_maker.cc \ graph/label/if_label_maker.cc \ diff --git a/ge/generator/ge_generator.cc b/ge/generator/ge_generator.cc index 07355ab5..45eaed59 100644 --- a/ge/generator/ge_generator.cc +++ b/ge/generator/ge_generator.cc @@ -36,7 +36,7 @@ #include "graph/utils/graph_utils.h" #include "graph/utils/type_utils.h" #include "init/gelib.h" -#include "model/ge_model.h" +#include "common/model/ge_model.h" #include "analyzer/analyzer.h" using std::map; diff --git a/ge/graph/build/graph_builder.cc b/ge/graph/build/graph_builder.cc index 96dea02e..e1398d1f 100644 --- a/ge/graph/build/graph_builder.cc +++ b/ge/graph/build/graph_builder.cc @@ -21,14 +21,14 @@ #include "graph/build/logical_stream_allocator.h" #include "graph/build/run_context.h" #include "graph/build/stream_graph_optimizer.h" -#include "graph/common/ge_call_wrapper.h" +#include "common/ge_call_wrapper.h" #include "graph/ge_context.h" #include "graph/manager/graph_var_manager.h" #include "graph/passes/mark_same_addr_pass.h" #include "graph/utils/node_utils.h" #include "graph/utils/type_utils.h" #include "init/gelib.h" -#include "model/ge_model.h" +#include "common/model/ge_model.h" #include "graph/ge_context.h" #include "opskernel_manager/ops_kernel_builder_manager.h" #include "graph/utils/op_desc_utils.h" diff --git a/ge/graph/build/graph_builder.h b/ge/graph/build/graph_builder.h index c4b16814..6ed14dae 100644 --- a/ge/graph/build/graph_builder.h +++ b/ge/graph/build/graph_builder.h @@ -38,7 +38,7 @@ #include "graph/partition/graph_partition.h" #include "graph/utils/graph_utils.h" #include "graph/utils/tensor_utils.h" -#include "model/ge_root_model.h" +#include "common/model/ge_root_model.h" namespace ge { class GraphBuilder { diff --git a/ge/graph/build/logical_stream_allocator.cc b/ge/graph/build/logical_stream_allocator.cc index 58763aa9..3d6ca74a 100644 --- a/ge/graph/build/logical_stream_allocator.cc +++ b/ge/graph/build/logical_stream_allocator.cc @@ -22,7 +22,7 @@ #include "framework/common/types.h" #include "graph/debug/ge_attr_define.h" #include "graph/utils/graph_utils.h" -#include "graph/common/ge_call_wrapper.h" +#include "common/ge_call_wrapper.h" using std::map; using std::set; diff --git a/ge/graph/build/memory/block_mem_assigner.cc b/ge/graph/build/memory/block_mem_assigner.cc index 159e68a7..7d0db676 100755 --- a/ge/graph/build/memory/block_mem_assigner.cc +++ b/ge/graph/build/memory/block_mem_assigner.cc @@ -34,7 +34,7 @@ #include "graph/debug/ge_attr_define.h" -#include "graph/common/local_context.h" +#include "common/local_context.h" #include "graph/optimize/common/params.h" #include "framework/omg/omg_inner_types.h" #include "runtime/mem.h" diff --git a/ge/graph/build/memory/buffer_pool_mem_assigner.cc b/ge/graph/build/memory/buffer_pool_mem_assigner.cc index d66fe038..ca197e02 100644 --- a/ge/graph/build/memory/buffer_pool_mem_assigner.cc +++ b/ge/graph/build/memory/buffer_pool_mem_assigner.cc @@ -15,7 +15,7 @@ */ #include "graph/build/memory/buffer_pool_mem_assigner.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/utils/tensor_utils.h" #include "framework/common/util.h" #include "graph/compute_graph.h" diff --git a/ge/graph/build/memory/graph_mem_assigner.cc b/ge/graph/build/memory/graph_mem_assigner.cc index e086940a..f8878383 100755 --- a/ge/graph/build/memory/graph_mem_assigner.cc +++ b/ge/graph/build/memory/graph_mem_assigner.cc @@ -24,7 +24,7 @@ #include "graph/build/memory/hybrid_mem_assigner.h" #include "graph/build/memory/var_mem_assign_util.h" #include "graph/build/memory/block_mem_assigner.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/debug/ge_attr_define.h" #include "graph/ge_attr_value.h" #include "graph/manager/graph_var_manager.h" diff --git a/ge/graph/build/memory/var_mem_assign_util.cc b/ge/graph/build/memory/var_mem_assign_util.cc index adddf6bd..dc7c3b01 100755 --- a/ge/graph/build/memory/var_mem_assign_util.cc +++ b/ge/graph/build/memory/var_mem_assign_util.cc @@ -18,7 +18,7 @@ #include #include "framework/common/types.h" #include "framework/common/debug/ge_log.h" -#include "graph/common/transop_util.h" +#include "common/transop_util.h" #include "graph/debug/ge_attr_define.h" #include "graph/manager/graph_mem_allocator.h" #include "graph/manager/graph_var_manager.h" diff --git a/ge/graph/build/model_builder.cc b/ge/graph/build/model_builder.cc index 2816f170..897be1f8 100755 --- a/ge/graph/build/model_builder.cc +++ b/ge/graph/build/model_builder.cc @@ -25,9 +25,9 @@ #include "external/graph/attr_value.h" #include "graph/buffer.h" #include "graph/build/stream_allocator.h" -#include "graph/common/omg_util.h" -#include "graph/common/ge_call_wrapper.h" -#include "graph/common/local_context.h" +#include "common/omg_util.h" +#include "common/ge_call_wrapper.h" +#include "common/local_context.h" #include "graph/debug/ge_attr_define.h" #include "graph/ge_attr_value.h" #include "graph/ge_context.h" diff --git a/ge/graph/build/model_builder.h b/ge/graph/build/model_builder.h index 151e6006..d87976dd 100644 --- a/ge/graph/build/model_builder.h +++ b/ge/graph/build/model_builder.h @@ -32,7 +32,7 @@ #include "graph/manager/graph_manager_utils.h" #include "graph/model.h" #include "graph/node.h" -#include "model/ge_model.h" +#include "common/model/ge_model.h" #include "framework/omg/omg_inner_types.h" namespace ge { diff --git a/ge/graph/build/run_context.cc b/ge/graph/build/run_context.cc index e7f07c0a..e629bddc 100644 --- a/ge/graph/build/run_context.cc +++ b/ge/graph/build/run_context.cc @@ -18,7 +18,7 @@ #include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "graph/debug/ge_attr_define.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" namespace ge { RunContextUtil::~RunContextUtil() { DestroyRtModelResources(); } diff --git a/ge/graph/build/stream_allocator.cc b/ge/graph/build/stream_allocator.cc index bc34a228..987a77f7 100644 --- a/ge/graph/build/stream_allocator.cc +++ b/ge/graph/build/stream_allocator.cc @@ -22,7 +22,7 @@ #include "framework/common/fmk_error_codes.h" #include "framework/common/types.h" #include "graph/build/logical_stream_allocator.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/debug/ge_attr_define.h" #include "graph/ge_context.h" #include "graph/utils/graph_utils.h" diff --git a/ge/graph/build/task_generator.cc b/ge/graph/build/task_generator.cc index 67289f73..7bb2e2f6 100755 --- a/ge/graph/build/task_generator.cc +++ b/ge/graph/build/task_generator.cc @@ -29,7 +29,7 @@ #include "graph/utils/node_utils.h" #include "graph/utils/tensor_utils.h" #include "graph/utils/type_utils.h" -#include "graph/common/ge_call_wrapper.h" +#include "common/ge_call_wrapper.h" #include "init/gelib.h" #include "graph/ge_local_context.h" #include "external/ge/ge_api_types.h" diff --git a/ge/graph/execute/model_executor.cc b/ge/graph/execute/model_executor.cc index d1683f1d..993ba8c3 100644 --- a/ge/graph/execute/model_executor.cc +++ b/ge/graph/execute/model_executor.cc @@ -18,8 +18,8 @@ #include "graph/ge_context.h" #include "graph/debug/ge_attr_define.h" -#include "graph/common/ge_call_wrapper.h" -#include "graph/common/local_context.h" +#include "common/ge_call_wrapper.h" +#include "common/local_context.h" #include "graph/manager/graph_var_manager.h" #include "graph/utils/tensor_adapter.h" #include "graph/load/graph_loader.h" diff --git a/ge/graph/load/model_manager/davinci_model.cc b/ge/graph/load/model_manager/davinci_model.cc index 1d6f7aff..aba06173 100755 --- a/ge/graph/load/model_manager/davinci_model.cc +++ b/ge/graph/load/model_manager/davinci_model.cc @@ -32,7 +32,7 @@ #include "common/thread_pool.h" #include "framework/common/debug/ge_log.h" #include "framework/common/util.h" -#include "graph/common/ge_call_wrapper.h" +#include "common/ge_call_wrapper.h" #include "graph/compute_graph.h" #include "graph/debug/ge_attr_define.h" #include "graph/ge_context.h" @@ -57,9 +57,9 @@ #include "runtime/rt_model.h" #include "runtime/stream.h" #include "securec.h" -#include "graph/common/local_context.h" +#include "common/local_context.h" #include "common/formats/utils/formats_trans_utils.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/build/memory/block_mem_assigner.h" #include "graph/manager/session_scope_mem_allocator.h" #include "framework/omg/omg_inner_types.h" diff --git a/ge/graph/load/model_manager/davinci_model.h b/ge/graph/load/model_manager/davinci_model.h index 4ff36677..fe89f66f 100755 --- a/ge/graph/load/model_manager/davinci_model.h +++ b/ge/graph/load/model_manager/davinci_model.h @@ -49,7 +49,7 @@ #include "mmpa/mmpa_api.h" #include "proto/task.pb.h" #include "graph/load/model_manager/task_info/task_info.h" -#include "graph/common/local_context.h" +#include "common/local_context.h" using std::mutex; using std::thread; diff --git a/ge/graph/load/model_manager/model_manager.cc b/ge/graph/load/model_manager/model_manager.cc index 5af503b2..d0d88e66 100755 --- a/ge/graph/load/model_manager/model_manager.cc +++ b/ge/graph/load/model_manager/model_manager.cc @@ -23,9 +23,9 @@ #include "common/dump/dump_manager.h" #include "framework/common/l2_cache_optimize.h" #include "common/profiling/profiling_manager.h" -#include "graph/common/ge_call_wrapper.h" +#include "common/ge_call_wrapper.h" #include "graph/load/model_manager/davinci_model.h" -#include "model/ge_root_model.h" +#include "common/model/ge_root_model.h" #include "common/formats/utils/formats_trans_utils.h" namespace ge { diff --git a/ge/graph/load/model_manager/model_manager.h b/ge/graph/load/model_manager/model_manager.h index 63a03dd7..6389d6db 100755 --- a/ge/graph/load/model_manager/model_manager.h +++ b/ge/graph/load/model_manager/model_manager.h @@ -17,7 +17,7 @@ #ifndef GE_GRAPH_LOAD_NEW_MODEL_MANAGER_MODEL_MANAGER_H_ #define GE_GRAPH_LOAD_NEW_MODEL_MANAGER_MODEL_MANAGER_H_ -#include +#include #include #include #include diff --git a/ge/graph/manager/graph_manager.cc b/ge/graph/manager/graph_manager.cc index 84ed3ab0..7d72d85b 100755 --- a/ge/graph/manager/graph_manager.cc +++ b/ge/graph/manager/graph_manager.cc @@ -29,9 +29,9 @@ #include "common/dump/dump_manager.h" #include "ge_opt_info/ge_opt_info.h" #include "analyzer/analyzer.h" -#include "graph/common/ge_call_wrapper.h" -#include "graph/common/local_context.h" -#include "graph/common/transop_util.h" +#include "common/ge_call_wrapper.h" +#include "common/local_context.h" +#include "common/transop_util.h" #include "graph/ge_context.h" #include "graph/ge_global_options.h" #include "graph/manager/util/rt_context_util.h" @@ -103,8 +103,8 @@ #include "inc/pass_manager.h" #include "init/gelib.h" #include "ir_build/option_utils.h" -#include "graph/common/local_context.h" -#include "graph/common/omg_util.h" +#include "common/local_context.h" +#include "common/omg_util.h" #include "common/formats/utils/formats_trans_utils.h" #include "register/custom_pass_helper.h" #include "external/graph/types.h" diff --git a/ge/graph/manager/graph_manager.h b/ge/graph/manager/graph_manager.h index 763654bd..84d2b11e 100644 --- a/ge/graph/manager/graph_manager.h +++ b/ge/graph/manager/graph_manager.h @@ -38,7 +38,7 @@ #include "graph/partition/graph_partition.h" #include "graph/preprocess/graph_preprocess.h" #include "graph/tuning_utils.h" -#include "model/ge_model.h" +#include "common/model/ge_model.h" #include "common/executor.h" namespace ge { diff --git a/ge/graph/manager/graph_manager_utils.h b/ge/graph/manager/graph_manager_utils.h index 9cec6b6d..14eb67f2 100644 --- a/ge/graph/manager/graph_manager_utils.h +++ b/ge/graph/manager/graph_manager_utils.h @@ -33,11 +33,11 @@ #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" #include "graph/compute_graph.h" -#include "graph/common/local_context.h" +#include "common/local_context.h" #include "external/graph/graph.h" #include "graph/model.h" -#include "model/ge_model.h" -#include "model/ge_root_model.h" +#include "common/model/ge_model.h" +#include "common/model/ge_root_model.h" #include "external/register/register_fmk_types.h" #include "external/ge/ge_api_types.h" diff --git a/ge/graph/optimize/graph_optimize.cc b/ge/graph/optimize/graph_optimize.cc index 55f374eb..a321ed43 100644 --- a/ge/graph/optimize/graph_optimize.cc +++ b/ge/graph/optimize/graph_optimize.cc @@ -17,7 +17,7 @@ #include "graph/optimize/graph_optimize.h" #include "graph/ge_context.h" -#include "graph/common/local_context.h" +#include "common/local_context.h" #include "graph/passes/dimension_adjust_pass.h" #include "inc/pass_manager.h" #include "init/gelib.h" diff --git a/ge/graph/optimize/mem_rw_conflict_optimize.cc b/ge/graph/optimize/mem_rw_conflict_optimize.cc index 7e7ab908..2edb1828 100644 --- a/ge/graph/optimize/mem_rw_conflict_optimize.cc +++ b/ge/graph/optimize/mem_rw_conflict_optimize.cc @@ -17,7 +17,7 @@ #include #include "common/ge/ge_util.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/debug/ge_attr_define.h" #include "graph/optimize/graph_optimize.h" #include "graph/utils/graph_utils.h" diff --git a/ge/graph/partition/dynamic_shape_partition.cc b/ge/graph/partition/dynamic_shape_partition.cc index 8fc19ff2..cd98b6c5 100755 --- a/ge/graph/partition/dynamic_shape_partition.cc +++ b/ge/graph/partition/dynamic_shape_partition.cc @@ -31,7 +31,7 @@ #include "graph/debug/ge_attr_define.h" #include "graph/utils/graph_utils.h" #include "graph/utils/op_desc_utils.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #define REQUIRE(cond, ...) \ do { \ diff --git a/ge/graph/partition/graph_partition.cc b/ge/graph/partition/graph_partition.cc index 6f221d97..86c9f1fd 100755 --- a/ge/graph/partition/graph_partition.cc +++ b/ge/graph/partition/graph_partition.cc @@ -28,7 +28,7 @@ #include "framework/common/types.h" #include "graph/debug/ge_attr_define.h" #include "graph/manager/graph_manager_utils.h" -#include "graph/common/ge_call_wrapper.h" +#include "common/ge_call_wrapper.h" #include "graph/utils/graph_utils.h" #include "graph/utils/op_desc_utils.h" #include "graph/utils/type_utils.h" diff --git a/ge/graph/partition/graph_partition.h b/ge/graph/partition/graph_partition.h index 560aa9e7..6c21fabe 100644 --- a/ge/graph/partition/graph_partition.h +++ b/ge/graph/partition/graph_partition.h @@ -70,6 +70,8 @@ class GraphPartitioner { // Return all subgraphs const Graph2SubGraphInfoList &GetSubGraphMap(); + const Graph2InputNodesSubGraphInfo &GetSubGraphInfoMap() {return graph_2_input_subgraph_; } + private: Status MergeSubGraph(ge::ComputeGraphPtr &output_merged_compute_graph, const ge::ComputeGraphPtr &original_compute_graph); diff --git a/ge/graph/passes/atomic_addr_clean_pass.cc b/ge/graph/passes/atomic_addr_clean_pass.cc index cc22d126..13700e2e 100755 --- a/ge/graph/passes/atomic_addr_clean_pass.cc +++ b/ge/graph/passes/atomic_addr_clean_pass.cc @@ -24,7 +24,7 @@ #include "framework/common/ge_inner_error_codes.h" #include "common/ge/ge_util.h" -#include "graph/common/ge_call_wrapper.h" +#include "common/ge_call_wrapper.h" #include "graph/debug/ge_attr_define.h" #include "graph/utils/node_utils.h" #include "init/gelib.h" diff --git a/ge/graph/passes/attach_stream_label_pass.cc b/ge/graph/passes/attach_stream_label_pass.cc index bcf86bc2..71d74500 100644 --- a/ge/graph/passes/attach_stream_label_pass.cc +++ b/ge/graph/passes/attach_stream_label_pass.cc @@ -16,7 +16,7 @@ #include "graph/passes/attach_stream_label_pass.h" #include "external/ge/ge_api_types.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" using std::string; diff --git a/ge/graph/passes/buffer_pool_memory_pass.cc b/ge/graph/passes/buffer_pool_memory_pass.cc index 8a64da59..deb25325 100644 --- a/ge/graph/passes/buffer_pool_memory_pass.cc +++ b/ge/graph/passes/buffer_pool_memory_pass.cc @@ -18,7 +18,7 @@ #include #include -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/utils/node_utils.h" #include "graph/utils/tensor_utils.h" #include "graph/utils/op_desc_utils.h" diff --git a/ge/graph/passes/cast_remove_pass.cc b/ge/graph/passes/cast_remove_pass.cc index 564b311d..1e1f4eb4 100644 --- a/ge/graph/passes/cast_remove_pass.cc +++ b/ge/graph/passes/cast_remove_pass.cc @@ -18,7 +18,7 @@ #include #include #include "framework/common/debug/ge_log.h" -#include "graph/common/transop_util.h" +#include "common/transop_util.h" #include "graph/debug/ge_attr_define.h" #include "graph/utils/type_utils.h" diff --git a/ge/graph/passes/cast_translate_pass.cc b/ge/graph/passes/cast_translate_pass.cc index d49424c8..704faeda 100644 --- a/ge/graph/passes/cast_translate_pass.cc +++ b/ge/graph/passes/cast_translate_pass.cc @@ -22,7 +22,7 @@ #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/debug/ge_attr_define.h" #include "graph/passes/pass_utils.h" #include "graph/utils/node_utils.h" diff --git a/ge/graph/passes/compile_nodes_pass.cc b/ge/graph/passes/compile_nodes_pass.cc index 1e734178..c5976f11 100755 --- a/ge/graph/passes/compile_nodes_pass.cc +++ b/ge/graph/passes/compile_nodes_pass.cc @@ -22,7 +22,7 @@ #include "framework/common/ge_inner_error_codes.h" #include "framework/common/debug/ge_log.h" #include "graph/debug/ge_attr_define.h" -#include "graph/common/ge_call_wrapper.h" +#include "common/ge_call_wrapper.h" #include "graph/op_desc.h" using domi::ImplyType; diff --git a/ge/graph/passes/control_trigger_pass.cc b/ge/graph/passes/control_trigger_pass.cc index 85505dc5..d81edefd 100644 --- a/ge/graph/passes/control_trigger_pass.cc +++ b/ge/graph/passes/control_trigger_pass.cc @@ -17,7 +17,7 @@ #include "graph/passes/control_trigger_pass.h" #include #include "common/ge/ge_util.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/utils/type_utils.h" namespace ge { diff --git a/ge/graph/passes/dimension_adjust_pass.h b/ge/graph/passes/dimension_adjust_pass.h index a84f0d8d..cba283ed 100755 --- a/ge/graph/passes/dimension_adjust_pass.h +++ b/ge/graph/passes/dimension_adjust_pass.h @@ -21,7 +21,7 @@ #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" #include "framework/common/types.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/passes/base_pass.h" #include "graph/utils/attr_utils.h" #include "graph/utils/graph_utils.h" diff --git a/ge/graph/passes/flow_ctrl_pass.cc b/ge/graph/passes/flow_ctrl_pass.cc index 87896dc3..e75a4592 100755 --- a/ge/graph/passes/flow_ctrl_pass.cc +++ b/ge/graph/passes/flow_ctrl_pass.cc @@ -22,7 +22,7 @@ #include "framework/common/debug/ge_log.h" #include "graph/debug/ge_attr_define.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "common/ge/ge_util.h" #include "graph/manager/graph_var_manager.h" #include "graph/passes/pass_utils.h" diff --git a/ge/graph/passes/get_original_format_pass.cc b/ge/graph/passes/get_original_format_pass.cc index 4b27dd0e..0da4c5cc 100644 --- a/ge/graph/passes/get_original_format_pass.cc +++ b/ge/graph/passes/get_original_format_pass.cc @@ -25,7 +25,7 @@ #include "framework/omg/omg_inner_types.h" #include "graph/utils/attr_utils.h" #include "graph/utils/op_desc_utils.h" -#include "graph/common/local_context.h" +#include "common/local_context.h" using domi::DOMI_TENSOR_NCHW; using domi::DOMI_TENSOR_NHWC; diff --git a/ge/graph/passes/guarantee_const_pass.cc b/ge/graph/passes/guarantee_const_pass.cc index b1df73a9..06bc821c 100644 --- a/ge/graph/passes/guarantee_const_pass.cc +++ b/ge/graph/passes/guarantee_const_pass.cc @@ -21,7 +21,7 @@ #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" #include "framework/common/types.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/utils/attr_utils.h" #include "graph/utils/graph_utils.h" #include "graph/utils/type_utils.h" diff --git a/ge/graph/passes/hccl_tailing_optimization_pass.cc b/ge/graph/passes/hccl_tailing_optimization_pass.cc index d952885d..fe606067 100644 --- a/ge/graph/passes/hccl_tailing_optimization_pass.cc +++ b/ge/graph/passes/hccl_tailing_optimization_pass.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "graph/passes/hccl_tailing_optimization_pass.h" -#include "graph/common/transop_util.h" +#include "common/transop_util.h" namespace ge { Status HcclTailingOptimizationPass::Run(ComputeGraphPtr graph) { diff --git a/ge/graph/passes/identity_pass.cc b/ge/graph/passes/identity_pass.cc index f0653983..0a346bb1 100755 --- a/ge/graph/passes/identity_pass.cc +++ b/ge/graph/passes/identity_pass.cc @@ -19,7 +19,7 @@ #include #include #include "framework/common/debug/ge_log.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/utils/node_utils.h" #include "graph/utils/attr_utils.h" #include "graph/debug/ge_attr_define.h" diff --git a/ge/graph/passes/infershape_pass.cc b/ge/graph/passes/infershape_pass.cc index 60a2f09a..a5e64519 100755 --- a/ge/graph/passes/infershape_pass.cc +++ b/ge/graph/passes/infershape_pass.cc @@ -22,7 +22,7 @@ #include "graph/shape_refiner.h" #include "graph/utils/graph_utils.h" #include "graph/utils/node_utils.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/debug/ge_attr_define.h" #include "graph/utils/tensor_utils.h" #include "graph/utils/type_utils.h" diff --git a/ge/graph/passes/iterator_op_pass.cc b/ge/graph/passes/iterator_op_pass.cc index d1de809d..57416017 100644 --- a/ge/graph/passes/iterator_op_pass.cc +++ b/ge/graph/passes/iterator_op_pass.cc @@ -26,7 +26,7 @@ #include "common/ge/ge_util.h" #include "framework/common/debug/ge_log.h" #include "graph/anchor.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "external/graph/graph.h" #include "graph/node.h" #include "graph/passes/pass_utils.h" diff --git a/ge/graph/passes/mark_force_unknown_for_cond_pass.cc b/ge/graph/passes/mark_force_unknown_for_cond_pass.cc index 67b6c617..3989e54f 100644 --- a/ge/graph/passes/mark_force_unknown_for_cond_pass.cc +++ b/ge/graph/passes/mark_force_unknown_for_cond_pass.cc @@ -17,7 +17,7 @@ #include "graph/passes/mark_force_unknown_for_cond_pass.h" #include "graph/utils/node_utils.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" namespace ge { namespace { diff --git a/ge/graph/passes/mark_node_unknown_shape_pass.cc b/ge/graph/passes/mark_node_unknown_shape_pass.cc index c040e846..eadd3ca7 100644 --- a/ge/graph/passes/mark_node_unknown_shape_pass.cc +++ b/ge/graph/passes/mark_node_unknown_shape_pass.cc @@ -17,7 +17,7 @@ #include "graph/passes/mark_node_unknown_shape_pass.h" #include "graph/utils/node_utils.h" #include "graph/debug/ge_attr_define.h" -#include "graph/common/local_context.h" +#include "common/local_context.h" namespace ge { namespace { diff --git a/ge/graph/passes/merge_input_memcpy_pass.cc b/ge/graph/passes/merge_input_memcpy_pass.cc index 044d4ad9..97a17d99 100644 --- a/ge/graph/passes/merge_input_memcpy_pass.cc +++ b/ge/graph/passes/merge_input_memcpy_pass.cc @@ -18,7 +18,7 @@ #include "common/ge/ge_util.h" #include "external/ge/ge_api_types.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" namespace ge { Status MergeInputMemcpyPass::Run(ComputeGraphPtr graph) { diff --git a/ge/graph/passes/merge_pass.cc b/ge/graph/passes/merge_pass.cc index fec9c6d0..2ddfcaab 100644 --- a/ge/graph/passes/merge_pass.cc +++ b/ge/graph/passes/merge_pass.cc @@ -22,7 +22,7 @@ #include "framework/common/debug/ge_log.h" #include "common/ge/ge_util.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/debug/ge_attr_define.h" #include "graph/utils/graph_utils.h" #include "graph/passes/pass_utils.h" diff --git a/ge/graph/passes/merge_to_stream_merge_pass.cc b/ge/graph/passes/merge_to_stream_merge_pass.cc index c58def59..e91410e1 100644 --- a/ge/graph/passes/merge_to_stream_merge_pass.cc +++ b/ge/graph/passes/merge_to_stream_merge_pass.cc @@ -17,7 +17,7 @@ #include "graph/passes/merge_to_stream_merge_pass.h" #include "common/ge/ge_util.h" #include "external/ge/ge_api_types.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" namespace ge { Status MergeToStreamMergePass::Run(ComputeGraphPtr graph) { diff --git a/ge/graph/passes/multi_batch_clone_pass.cc b/ge/graph/passes/multi_batch_clone_pass.cc index d36b4186..b25239b1 100755 --- a/ge/graph/passes/multi_batch_clone_pass.cc +++ b/ge/graph/passes/multi_batch_clone_pass.cc @@ -18,14 +18,14 @@ #include "common/formats/utils/formats_trans_utils.h" #include "common/ge/ge_util.h" -#include "graph/common/local_context.h" +#include "common/local_context.h" #include "graph/preprocess/multi_batch_options.h" #include "graph/utils/node_utils.h" #include "graph/utils/op_desc_utils.h" #include "graph/utils/tensor_utils.h" #include "graph/utils/type_utils.h" #include "register/op_registry.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" namespace ge { namespace { diff --git a/ge/graph/passes/multi_batch_pass.cc b/ge/graph/passes/multi_batch_pass.cc index 25d629fa..9fba362c 100644 --- a/ge/graph/passes/multi_batch_pass.cc +++ b/ge/graph/passes/multi_batch_pass.cc @@ -19,7 +19,7 @@ #include #include #include "common/ge/ge_util.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/utils/type_utils.h" #include "common/formats/utils/formats_trans_utils.h" diff --git a/ge/graph/passes/net_output_pass.cc b/ge/graph/passes/net_output_pass.cc index 30455fa0..9aea4863 100644 --- a/ge/graph/passes/net_output_pass.cc +++ b/ge/graph/passes/net_output_pass.cc @@ -27,7 +27,7 @@ #include "framework/common/ge_inner_error_codes.h" #include "framework/omg/omg_inner_types.h" #include "graph/debug/ge_attr_define.h" -#include "graph/common/local_context.h" +#include "common/local_context.h" #include "graph/passes/pass_utils.h" #include "graph/utils/tensor_utils.h" #include "graph/utils/type_utils.h" diff --git a/ge/graph/passes/next_iteration_pass.cc b/ge/graph/passes/next_iteration_pass.cc index 1c2d7218..af3e4d2d 100644 --- a/ge/graph/passes/next_iteration_pass.cc +++ b/ge/graph/passes/next_iteration_pass.cc @@ -17,7 +17,7 @@ #include "graph/passes/next_iteration_pass.h" #include "common/ge/ge_util.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/utils/node_utils.h" using std::string; diff --git a/ge/graph/passes/pass_manager.cc b/ge/graph/passes/pass_manager.cc index 7c9aa414..afd2e4a7 100644 --- a/ge/graph/passes/pass_manager.cc +++ b/ge/graph/passes/pass_manager.cc @@ -19,7 +19,7 @@ #include "framework/common/types.h" #include "framework/common/util.h" #include "graph/utils/node_utils.h" -#include "graph/common/ge_call_wrapper.h" +#include "common/ge_call_wrapper.h" #include "framework/omg/omg_inner_types.h" namespace ge { diff --git a/ge/graph/passes/pass_utils.cc b/ge/graph/passes/pass_utils.cc index d5306f5f..0e056a0f 100644 --- a/ge/graph/passes/pass_utils.cc +++ b/ge/graph/passes/pass_utils.cc @@ -27,7 +27,7 @@ #include "common/ge/ge_util.h" #include "framework/common/op/ge_op_utils.h" #include "framework/common/types.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/debug/ge_attr_define.h" #include "graph/ge_tensor.h" #include "graph/manager/graph_var_manager.h" diff --git a/ge/graph/passes/permute_pass.cc b/ge/graph/passes/permute_pass.cc index 21222b2c..f3045b1a 100644 --- a/ge/graph/passes/permute_pass.cc +++ b/ge/graph/passes/permute_pass.cc @@ -24,7 +24,7 @@ #include "inc/kernel.h" #include "inc/kernel_factory.h" #include "framework/omg/omg_inner_types.h" -#include "graph/common/local_context.h" +#include "common/local_context.h" using domi::DOMI_TENSOR_ND; using domi::DOMI_TENSOR_NHWC; diff --git a/ge/graph/passes/placeholder_with_default_pass.cc b/ge/graph/passes/placeholder_with_default_pass.cc index 893ee798..bc51b217 100644 --- a/ge/graph/passes/placeholder_with_default_pass.cc +++ b/ge/graph/passes/placeholder_with_default_pass.cc @@ -18,7 +18,7 @@ #include #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" namespace ge { Status PlaceholderWithDefaultPass::Run(NodePtr &node) { diff --git a/ge/graph/passes/prevent_gradient_pass.cc b/ge/graph/passes/prevent_gradient_pass.cc index c531fd2f..8b8b17bd 100644 --- a/ge/graph/passes/prevent_gradient_pass.cc +++ b/ge/graph/passes/prevent_gradient_pass.cc @@ -19,7 +19,7 @@ #include #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" namespace ge { Status PreventGradientPass::Run(NodePtr &node) { diff --git a/ge/graph/passes/print_op_pass.h b/ge/graph/passes/print_op_pass.h index 7ee19d5d..96501dc5 100755 --- a/ge/graph/passes/print_op_pass.h +++ b/ge/graph/passes/print_op_pass.h @@ -20,7 +20,7 @@ #include "framework/common/debug/ge_log.h" #include "framework/common/types.h" #include "graph/debug/ge_attr_define.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "external/graph/graph.h" #include "graph/passes/base_pass.h" #include "graph/utils/graph_utils.h" diff --git a/ge/graph/passes/ref_identity_delete_op_pass.cc b/ge/graph/passes/ref_identity_delete_op_pass.cc index 7bc5804b..46bc7467 100644 --- a/ge/graph/passes/ref_identity_delete_op_pass.cc +++ b/ge/graph/passes/ref_identity_delete_op_pass.cc @@ -17,7 +17,7 @@ #include "graph/passes/ref_identity_delete_op_pass.h" #include #include -#include "graph/common/transop_util.h" +#include "common/transop_util.h" namespace ge { Status RefIdentityDeleteOpPass::Run(ComputeGraphPtr graph) { diff --git a/ge/graph/passes/replace_transshape_pass.cc b/ge/graph/passes/replace_transshape_pass.cc index c7844619..0e1701ab 100644 --- a/ge/graph/passes/replace_transshape_pass.cc +++ b/ge/graph/passes/replace_transshape_pass.cc @@ -21,7 +21,7 @@ #include "common/ge/ge_util.h" #include "framework/common/ge_inner_error_codes.h" #include "framework/common/debug/ge_log.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/utils/graph_utils.h" namespace ge { diff --git a/ge/graph/passes/snapshot_pass.cc b/ge/graph/passes/snapshot_pass.cc index 95733e67..a6cd79a3 100644 --- a/ge/graph/passes/snapshot_pass.cc +++ b/ge/graph/passes/snapshot_pass.cc @@ -18,7 +18,7 @@ #include #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" namespace ge { Status SnapshotPass::Run(NodePtr &node) { diff --git a/ge/graph/passes/stop_gradient_pass.h b/ge/graph/passes/stop_gradient_pass.h index 5132b889..5f022200 100755 --- a/ge/graph/passes/stop_gradient_pass.h +++ b/ge/graph/passes/stop_gradient_pass.h @@ -20,7 +20,7 @@ #include "framework/common/debug/ge_log.h" #include "framework/common/types.h" #include "framework/common/ge_inner_error_codes.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/passes/base_pass.h" namespace ge { diff --git a/ge/graph/passes/switch_dead_branch_elimination.cc b/ge/graph/passes/switch_dead_branch_elimination.cc index 3c6c57d0..284111ba 100644 --- a/ge/graph/passes/switch_dead_branch_elimination.cc +++ b/ge/graph/passes/switch_dead_branch_elimination.cc @@ -19,7 +19,7 @@ #include #include #include "framework/common/debug/ge_log.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/passes/pass_utils.h" #include "graph/utils/graph_utils.h" diff --git a/ge/graph/passes/switch_to_stream_switch_pass.cc b/ge/graph/passes/switch_to_stream_switch_pass.cc index 7fecae31..acbf27e3 100644 --- a/ge/graph/passes/switch_to_stream_switch_pass.cc +++ b/ge/graph/passes/switch_to_stream_switch_pass.cc @@ -18,7 +18,7 @@ #include #include "common/ge/ge_util.h" #include "external/ge/ge_api_types.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/ge_context.h" #include "graph/utils/type_utils.h" diff --git a/ge/graph/passes/transop_breadth_fusion_pass.cc b/ge/graph/passes/transop_breadth_fusion_pass.cc index 5b8e1940..88db9501 100644 --- a/ge/graph/passes/transop_breadth_fusion_pass.cc +++ b/ge/graph/passes/transop_breadth_fusion_pass.cc @@ -20,7 +20,7 @@ #include #include "framework/common/types.h" -#include "graph/common/transop_util.h" +#include "common/transop_util.h" #include "graph/utils/node_utils.h" namespace ge { diff --git a/ge/graph/passes/transop_depth_fusion_pass.cc b/ge/graph/passes/transop_depth_fusion_pass.cc index 66ce346a..3ce54e50 100755 --- a/ge/graph/passes/transop_depth_fusion_pass.cc +++ b/ge/graph/passes/transop_depth_fusion_pass.cc @@ -23,7 +23,7 @@ #include "graph/ge_tensor.h" #include "graph/op_desc.h" #include "graph/utils/graph_utils.h" -#include "graph/common/transop_util.h" +#include "common/transop_util.h" #include "graph/utils/node_utils.h" namespace ge { diff --git a/ge/graph/passes/transop_nearby_allreduce_fusion_pass.cc b/ge/graph/passes/transop_nearby_allreduce_fusion_pass.cc index 483575a4..437926ef 100644 --- a/ge/graph/passes/transop_nearby_allreduce_fusion_pass.cc +++ b/ge/graph/passes/transop_nearby_allreduce_fusion_pass.cc @@ -19,7 +19,7 @@ #include "framework/common/debug/log.h" #include "framework/common/types.h" #include "graph/utils/graph_utils.h" -#include "graph/common/transop_util.h" +#include "common/transop_util.h" namespace ge { Status TransOpNearbyAllreduceFusionPass::Run(NodePtr &node) { diff --git a/ge/graph/passes/transop_symmetry_elimination_pass.cc b/ge/graph/passes/transop_symmetry_elimination_pass.cc index fe0e48f9..2bd00206 100644 --- a/ge/graph/passes/transop_symmetry_elimination_pass.cc +++ b/ge/graph/passes/transop_symmetry_elimination_pass.cc @@ -18,7 +18,7 @@ #include "common/formats/utils/formats_trans_utils.h" #include "framework/common/debug/ge_log.h" #include "framework/common/util.h" -#include "graph/common/transop_util.h" +#include "common/transop_util.h" #include "graph/debug/ge_attr_define.h" #include "graph/utils/graph_utils.h" #include "graph/utils/node_utils.h" diff --git a/ge/graph/passes/transop_without_reshape_fusion_pass.cc b/ge/graph/passes/transop_without_reshape_fusion_pass.cc index 10e619b9..58145fe7 100644 --- a/ge/graph/passes/transop_without_reshape_fusion_pass.cc +++ b/ge/graph/passes/transop_without_reshape_fusion_pass.cc @@ -22,7 +22,7 @@ #include "common/ge/ge_util.h" #include "framework/common/ge_inner_error_codes.h" #include "framework/common/types.h" -#include "graph/common/transop_util.h" +#include "common/transop_util.h" #include "graph/compute_graph.h" #include "graph/debug/ge_attr_define.h" #include "graph/ge_tensor.h" diff --git a/ge/graph/passes/variable_op_pass.h b/ge/graph/passes/variable_op_pass.h index d442fdf4..e314fd12 100755 --- a/ge/graph/passes/variable_op_pass.h +++ b/ge/graph/passes/variable_op_pass.h @@ -18,7 +18,7 @@ #define GE_GRAPH_PASSES_VARIABLE_OP_PASS_H_ #include #include -#include "graph/common/transop_util.h" +#include "common/transop_util.h" #include "external/graph/graph.h" #include "graph/manager/graph_var_manager.h" #include "graph/manager/util/variable_accelerate_ctrl.h" diff --git a/ge/graph/passes/variable_prepare_op_pass.cc b/ge/graph/passes/variable_prepare_op_pass.cc index 3bb9a2fa..288ff185 100644 --- a/ge/graph/passes/variable_prepare_op_pass.cc +++ b/ge/graph/passes/variable_prepare_op_pass.cc @@ -21,7 +21,7 @@ #include "common/ge/ge_util.h" #include "external/graph/graph.h" #include "framework/common/debug/ge_log.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/debug/ge_attr_define.h" #include "graph/node.h" #include "graph/utils/tensor_utils.h" diff --git a/ge/graph/preprocess/graph_preprocess.cc b/ge/graph/preprocess/graph_preprocess.cc index 8d59d9f9..2efe623e 100644 --- a/ge/graph/preprocess/graph_preprocess.cc +++ b/ge/graph/preprocess/graph_preprocess.cc @@ -28,9 +28,9 @@ #include "common/math/math_util.h" #include "framework/common/op/ge_op_utils.h" #include "ir_build/option_utils.h" -#include "graph/common/ge_call_wrapper.h" -#include "graph/common/local_context.h" -#include "graph/common/transop_util.h" +#include "common/ge_call_wrapper.h" +#include "common/local_context.h" +#include "common/transop_util.h" #include "graph/ge_context.h" #include "graph/shape_refiner.h" #include "graph/manager/graph_var_manager.h" diff --git a/ge/graph/preprocess/insert_op/ge_aipp_op.cc b/ge/graph/preprocess/insert_op/ge_aipp_op.cc index 48bfa3e6..7a89a1f4 100755 --- a/ge/graph/preprocess/insert_op/ge_aipp_op.cc +++ b/ge/graph/preprocess/insert_op/ge_aipp_op.cc @@ -39,7 +39,7 @@ #include "graph/utils/tensor_utils.h" #include "graph/utils/type_utils.h" #include "proto/insert_op.pb.h" -#include "graph/common/local_context.h" +#include "common/local_context.h" #define SAVE_AIPP_ATTR(KEY, SAVE_TYPE) \ do { \ diff --git a/ge/graph/preprocess/multi_batch_copy_graph.cc b/ge/graph/preprocess/multi_batch_copy_graph.cc index fd3a4e91..d88cf6cd 100644 --- a/ge/graph/preprocess/multi_batch_copy_graph.cc +++ b/ge/graph/preprocess/multi_batch_copy_graph.cc @@ -38,8 +38,8 @@ #include "graph/utils/tensor_utils.h" #include "graph/utils/type_utils.h" #include "inc/pass_manager.h" -#include "graph/common/local_context.h" -#include "graph/common/omg_util.h" +#include "common/local_context.h" +#include "common/omg_util.h" using std::set; using std::string; diff --git a/ge/graph/preprocess/multi_batch_options.cc b/ge/graph/preprocess/multi_batch_options.cc index 21cbc0c2..9cda6194 100644 --- a/ge/graph/preprocess/multi_batch_options.cc +++ b/ge/graph/preprocess/multi_batch_options.cc @@ -25,11 +25,11 @@ #include "graph/debug/ge_attr_define.h" #include "graph/utils/node_utils.h" #include "graph/ge_context.h" -#include "graph/common/local_context.h" +#include "common/local_context.h" #include "framework/common/types.h" #include "graph/compute_graph.h" #include "graph/utils/graph_utils.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" namespace ge { namespace multibatch { diff --git a/ge/host_kernels/add_kernel.cc b/ge/host_kernels/add_kernel.cc index 1c206018..eb0ea86d 100644 --- a/ge/host_kernels/add_kernel.cc +++ b/ge/host_kernels/add_kernel.cc @@ -19,7 +19,7 @@ #include #include "common/math/math_util.h" -#include "graph/common/bcast.h" +#include "common/bcast.h" #include "graph/utils/type_utils.h" #include "inc/kernel_factory.h" diff --git a/ge/host_kernels/broadcast_args_kernel.cc b/ge/host_kernels/broadcast_args_kernel.cc index 796142f4..660717ad 100644 --- a/ge/host_kernels/broadcast_args_kernel.cc +++ b/ge/host_kernels/broadcast_args_kernel.cc @@ -22,7 +22,7 @@ #include "framework/common/types.h" #include "framework/common/util.h" #include "framework/common/ge_inner_error_codes.h" -#include "graph/common/bcast.h" +#include "common/bcast.h" #include "graph/passes/pass_utils.h" #include "inc/kernel_factory.h" diff --git a/ge/host_kernels/broadcast_gradient_args_kernel.cc b/ge/host_kernels/broadcast_gradient_args_kernel.cc index 59993171..8b9e3fb5 100644 --- a/ge/host_kernels/broadcast_gradient_args_kernel.cc +++ b/ge/host_kernels/broadcast_gradient_args_kernel.cc @@ -22,7 +22,7 @@ #include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" -#include "graph/common/bcast.h" +#include "common/bcast.h" #include "graph/passes/pass_utils.h" #include "inc/kernel_factory.h" diff --git a/ge/host_kernels/cast_kernel.cc b/ge/host_kernels/cast_kernel.cc index 3f09974f..2d2f463c 100644 --- a/ge/host_kernels/cast_kernel.cc +++ b/ge/host_kernels/cast_kernel.cc @@ -28,7 +28,7 @@ #include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" -#include "graph/common/bcast.h" +#include "common/bcast.h" #include "host_kernels/kernel_utils.h" #include "graph/utils/type_utils.h" #include "inc/kernel_factory.h" diff --git a/ge/host_kernels/floormod_kernel.cc b/ge/host_kernels/floormod_kernel.cc index bef6d014..1d101667 100644 --- a/ge/host_kernels/floormod_kernel.cc +++ b/ge/host_kernels/floormod_kernel.cc @@ -23,7 +23,7 @@ #include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" -#include "graph/common/bcast.h" +#include "common/bcast.h" #include "graph/utils/type_utils.h" #include "inc/kernel_factory.h" diff --git a/ge/host_kernels/greater_kernel.cc b/ge/host_kernels/greater_kernel.cc index 3e62db04..0cc895c4 100644 --- a/ge/host_kernels/greater_kernel.cc +++ b/ge/host_kernels/greater_kernel.cc @@ -25,7 +25,7 @@ #include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" -#include "graph/common/bcast.h" +#include "common/bcast.h" #include "graph/utils/type_utils.h" #include "inc/kernel_factory.h" diff --git a/ge/host_kernels/maximum_kernel.cc b/ge/host_kernels/maximum_kernel.cc index 314bc7be..0e28fcdc 100644 --- a/ge/host_kernels/maximum_kernel.cc +++ b/ge/host_kernels/maximum_kernel.cc @@ -25,7 +25,7 @@ #include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" -#include "graph/common/bcast.h" +#include "common/bcast.h" #include "graph/utils/type_utils.h" #include "inc/kernel_factory.h" diff --git a/ge/host_kernels/mul_kernel.cc b/ge/host_kernels/mul_kernel.cc index e3657197..608f351d 100644 --- a/ge/host_kernels/mul_kernel.cc +++ b/ge/host_kernels/mul_kernel.cc @@ -25,7 +25,7 @@ #include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" -#include "graph/common/bcast.h" +#include "common/bcast.h" #include "graph/utils/type_utils.h" #include "inc/kernel_factory.h" diff --git a/ge/host_kernels/permute_kernel.cc b/ge/host_kernels/permute_kernel.cc index 93d56415..9e9462b6 100755 --- a/ge/host_kernels/permute_kernel.cc +++ b/ge/host_kernels/permute_kernel.cc @@ -24,7 +24,7 @@ #include "framework/common/op/ge_op_utils.h" #include "framework/common/types.h" #include "framework/common/util.h" -#include "graph/common/bcast.h" +#include "common/bcast.h" #include "graph/utils/type_utils.h" #include "inc/kernel_factory.h" #include "common/formats/formats.h" diff --git a/ge/host_kernels/sub_kernel.cc b/ge/host_kernels/sub_kernel.cc index 84c334b0..0aebb946 100644 --- a/ge/host_kernels/sub_kernel.cc +++ b/ge/host_kernels/sub_kernel.cc @@ -23,7 +23,7 @@ #include "framework/common/debug/log.h" #include "common/math/math_util.h" #include "framework/common/op/ge_op_utils.h" -#include "graph/common/bcast.h" +#include "common/bcast.h" #include "graph/utils/type_utils.h" #include "inc/kernel_factory.h" diff --git a/ge/host_kernels/transdata_kernel.cc b/ge/host_kernels/transdata_kernel.cc index a06db78b..7d44fdae 100644 --- a/ge/host_kernels/transdata_kernel.cc +++ b/ge/host_kernels/transdata_kernel.cc @@ -28,7 +28,7 @@ #include "framework/common/util.h" #include "framework/common/debug/ge_log.h" #include "framework/common/ge_inner_error_codes.h" -#include "graph/common/bcast.h" +#include "common/bcast.h" #include "host_kernels/kernel_utils.h" #include "graph/utils/type_utils.h" #include "inc/kernel_factory.h" diff --git a/ge/hybrid/hybrid_davinci_model.h b/ge/hybrid/hybrid_davinci_model.h index 34503b01..abab74f6 100644 --- a/ge/hybrid/hybrid_davinci_model.h +++ b/ge/hybrid/hybrid_davinci_model.h @@ -20,7 +20,7 @@ #include #include "external/ge/ge_api_error_codes.h" #include "graph/load/model_manager/data_inputer.h" -#include "model/ge_root_model.h" +#include "common/model/ge_root_model.h" namespace ge { namespace hybrid { diff --git a/ge/hybrid/model/hybrid_model.h b/ge/hybrid/model/hybrid_model.h index 77246e20..3cb936f6 100644 --- a/ge/hybrid/model/hybrid_model.h +++ b/ge/hybrid/model/hybrid_model.h @@ -27,7 +27,7 @@ #include "hybrid/common/tensor_value.h" #include "hybrid/model/node_item.h" #include "hybrid/model/graph_item.h" -#include "model/ge_root_model.h" +#include "common/model/ge_root_model.h" namespace ge { namespace hybrid { diff --git a/ge/hybrid/model/hybrid_model_builder.cc b/ge/hybrid/model/hybrid_model_builder.cc index c722d269..44115240 100755 --- a/ge/hybrid/model/hybrid_model_builder.cc +++ b/ge/hybrid/model/hybrid_model_builder.cc @@ -21,7 +21,7 @@ #include "graph/ge_context.h" #include "graph/build/memory/var_mem_assign_util.h" #include "graph/debug/ge_attr_define.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" #include "graph/load/model_manager/model_utils.h" #include "graph/load/model_manager/model_manager.h" #include "graph/manager/graph_var_manager.h" diff --git a/ge/hybrid/model/hybrid_model_builder.h b/ge/hybrid/model/hybrid_model_builder.h index 05830e82..3592d3d2 100644 --- a/ge/hybrid/model/hybrid_model_builder.h +++ b/ge/hybrid/model/hybrid_model_builder.h @@ -25,7 +25,7 @@ #include "graph/node.h" #include "hybrid/model/hybrid_model.h" #include "hybrid/model/node_item.h" -#include "model/ge_model.h" +#include "common/model/ge_model.h" namespace ge { class VarManager; diff --git a/ge/init/gelib.cc b/ge/init/gelib.cc index 1a2f0d5b..2491715b 100644 --- a/ge/init/gelib.cc +++ b/ge/init/gelib.cc @@ -34,7 +34,7 @@ #include "analyzer/analyzer.h" #include "external/ge/ge_api_types.h" #include "ge_local_engine/engine/host_cpu_engine.h" -#include "graph/common/ge_call_wrapper.h" +#include "common/ge_call_wrapper.h" #include "graph/ge_context.h" #include "graph/ge_global_options.h" #include "graph/manager/graph_mem_manager.h" diff --git a/ge/ir_build/attr_options/utils.cc b/ge/ir_build/attr_options/utils.cc index 5398c220..23bb0b7b 100644 --- a/ge/ir_build/attr_options/utils.cc +++ b/ge/ir_build/attr_options/utils.cc @@ -17,7 +17,7 @@ #include #include "graph/debug/ge_attr_define.h" #include "framework/common/debug/ge_log.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" namespace ge { namespace { const std::string CFG_PRE_OPTYPE = "OpType::"; diff --git a/ge/ir_build/ge_ir_build.cc b/ge/ir_build/ge_ir_build.cc index e1cd5d29..cafc534d 100644 --- a/ge/ir_build/ge_ir_build.cc +++ b/ge/ir_build/ge_ir_build.cc @@ -33,7 +33,7 @@ #include "graph/ge_global_options.h" #include "init/gelib.h" #include "ir_build/option_utils.h" -#include "model/ge_model.h" +#include "common/model/ge_model.h" #include "graph/shape_refiner.h" #include "graph/opsproto_manager.h" #include "inc/pass_manager.h" diff --git a/ge/session/inner_session.cc b/ge/session/inner_session.cc index 1dcc2996..b9c44ef1 100755 --- a/ge/session/inner_session.cc +++ b/ge/session/inner_session.cc @@ -29,7 +29,7 @@ #include "graph/ge_context.h" #include "graph/ge_global_options.h" #include "graph/ge_local_context.h" -#include "graph/common/local_context.h" +#include "common/local_context.h" #include "graph/manager/graph_var_manager.h" #include "graph/manager/graph_mem_manager.h" #include "graph/utils/tensor_adapter.h" diff --git a/inc/framework/common/helper/model_helper.h b/inc/framework/common/helper/model_helper.h index e25d5d6f..2a63291c 100644 --- a/inc/framework/common/helper/model_helper.h +++ b/inc/framework/common/helper/model_helper.h @@ -22,10 +22,10 @@ #include "common/fmk_types.h" #include "common/helper/om_file_helper.h" +#include "common/model/ge_model.h" +#include "common/model/ge_root_model.h" #include "common/types.h" #include "graph/model.h" -#include "model/ge_model.h" -#include "model/ge_root_model.h" namespace ge { class GE_FUNC_VISIBILITY ModelHelper { @@ -42,13 +42,21 @@ class GE_FUNC_VISIBILITY ModelHelper { Status LoadRootModel(const ge::ModelData &model_data); Status GetModelBufferData(ge::ModelBufferData &model); - const ModelFileHeader *GetFileHeader() const { return file_header_; } + const ModelFileHeader *GetFileHeader() const { + return file_header_; + } GeModelPtr GetGeModel(); GeRootModelPtr GetGeRootModel(); - void SetSaveMode(bool val) { is_offline_ = val; } - bool GetSaveMode(void) const { return is_offline_; } - bool GetModelType() const { return is_unknown_shape_model_; }; + void SetSaveMode(bool val) { + is_offline_ = val; + } + bool GetSaveMode(void) const { + return is_offline_; + } + bool GetModelType() const { + return is_unknown_shape_model_; + }; Status GetBaseNameFromFileName(const std::string &file_name, std::string &base_name); Status GetModelNameFromMergedGraphName(const std::string &graph_name, std::string &model_name); diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index 42fa6128..856d9d43 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -104,8 +104,8 @@ set(COMMON_SRC_FILES "${GE_CODE_DIR}/ge/opskernel_manager/ops_kernel_manager.cc" "${GE_CODE_DIR}/ge/generator/ge_generator.cc" "${GE_CODE_DIR}/ge/generator/generator_api.cc" - "${GE_CODE_DIR}/ge/graph/common/omg_util.cc" - "${GE_CODE_DIR}/ge/graph/common/bcast.cc" + "${GE_CODE_DIR}/ge/common/omg_util.cc" + "${GE_CODE_DIR}/ge/common/bcast.cc" "${GE_CODE_DIR}/ge/common/util.cc" "${GE_CODE_DIR}/ge/common/ge/op_tiling_manager.cc" "${GE_CODE_DIR}/ge/init/gelib.cc" @@ -124,12 +124,12 @@ set(COMMON_SRC_FILES "${GE_CODE_DIR}/ge/common/dump/opdebug_register.cc" "${GE_CODE_DIR}/ge/common/dump/dump_op.cc" "${GE_CODE_DIR}/ge/common/helper/om_file_helper.cc" - "${GE_CODE_DIR}/ge/model/ge_root_model.cc" + "${GE_CODE_DIR}/ge/common/model/ge_root_model.cc" "${GE_CODE_DIR}/ge/common/model_parser/model_parser.cc" "${GE_CODE_DIR}/ge/common/dump/dump_server.cc" "${GE_CODE_DIR}/ge/graph/preprocess/multi_batch_copy_graph.cc" "${GE_CODE_DIR}/ge/graph/optimize/mem_rw_conflict_optimize.cc" - "${GE_CODE_DIR}/ge/model/ge_model.cc" + "${GE_CODE_DIR}/ge/common/model/ge_model.cc" "${GE_CODE_DIR}/ge/common/cust_aicpu_kernel_store.cc" "${GE_CODE_DIR}/ge/common/kernel_store.cc" "${GE_CODE_DIR}/ge/common/tbe_kernel_store.cc" @@ -169,10 +169,10 @@ set(COMMON_SRC_FILES "${GE_CODE_DIR}/ge/graph/manager/graph_var_manager.cc" "${GE_CODE_DIR}/ge/analyzer/analyzer.cc" "${GE_CODE_DIR}/ge/common/thread_pool.cc" - "${GE_CODE_DIR}/ge/graph/common/transop_util.cc" + "${GE_CODE_DIR}/ge/common/transop_util.cc" "${GE_CODE_DIR}/ge/graph/manager/graph_manager_utils.cc" "${GE_CODE_DIR}/ge/graph/manager/trans_var_data_utils.cc" - "${GE_CODE_DIR}/ge/graph/common/local_context.cc" + "${GE_CODE_DIR}/ge/common/local_context.cc" "${GE_CODE_DIR}/ge/graph/manager/graph_caching_allocator.cc" "${GE_CODE_DIR}/ge/graph/manager/session_scope_mem_allocator.cc" "${GE_CODE_DIR}/ge/graph/manager/rdma_pool_allocator.cc" @@ -648,6 +648,7 @@ set(MULTI_PARTS_TEST_FILES "graph/transop_util_unittest.cc" "common/datatype_transfer_unittest.cc" "common/util_unittest.cc" + "common/fp16_unittest.cc" "common/dump_manager_unittest.cc" "common/dump_op_unittest.cc" "common/dump_properties_unittest.cc" diff --git a/tests/ut/ge/common/fp16_unittest.cc b/tests/ut/ge/common/fp16_unittest.cc new file mode 100644 index 00000000..a9590fe2 --- /dev/null +++ b/tests/ut/ge/common/fp16_unittest.cc @@ -0,0 +1,56 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "common/fp16_t.h" + +namespace ge { +namespace formats { +class UtestFP16 : public testing::Test { + protected: + void SetUp() {} + void TearDown() {} +}; + +TEST_F(UtestFP16, fp16_to_other) { + fp16_t test; + float num = test.ToFloat(); + EXPECT_EQ(num, 0.0); + + double num2 = test.ToDouble(); + EXPECT_EQ(num2, 0); + + int16_t num3 = test.ToInt16(); + EXPECT_EQ(num3, 0); + + int32_t num4 = test.ToInt32(); + EXPECT_EQ(num4, 0); + + int8_t num5 = test.ToInt8(); + EXPECT_EQ(num5, 0); + + uint16_t num6 = test.ToUInt16(); + EXPECT_EQ(num6, 0); + + uint32_t num7 = test.ToUInt16(); + EXPECT_EQ(num7, 0); + + uint8_t num8 = test.ToUInt8(); + EXPECT_EQ(num8, 0); +} +} // namespace formats +} // namespace ge diff --git a/tests/ut/ge/graph/build/model_builder_unittest.cc b/tests/ut/ge/graph/build/model_builder_unittest.cc index d544e1a3..4f061e27 100644 --- a/tests/ut/ge/graph/build/model_builder_unittest.cc +++ b/tests/ut/ge/graph/build/model_builder_unittest.cc @@ -17,7 +17,7 @@ #include #include -#include "graph/common/local_context.h" +#include "common/local_context.h" #include "graph/anchor.h" #include "graph/attr_value.h" #include "graph/debug/ge_attr_define.h" diff --git a/tests/ut/ge/graph/graph_load_unittest.cc b/tests/ut/ge/graph/graph_load_unittest.cc index cbcefd03..93282a5e 100644 --- a/tests/ut/ge/graph/graph_load_unittest.cc +++ b/tests/ut/ge/graph/graph_load_unittest.cc @@ -36,7 +36,7 @@ #include "framework/common/ge_inner_error_codes.h" #include "graph/load/model_manager/model_manager.h" #include "graph/manager/graph_manager_utils.h" -#include "model/ge_model.h" +#include "common/model/ge_model.h" #undef private #undef protected diff --git a/tests/ut/ge/graph/load/model_helper_unittest.cc b/tests/ut/ge/graph/load/model_helper_unittest.cc index 8fd8f014..8af329ed 100644 --- a/tests/ut/ge/graph/load/model_helper_unittest.cc +++ b/tests/ut/ge/graph/load/model_helper_unittest.cc @@ -20,7 +20,7 @@ #include "framework/common/helper/model_helper.h" #include "framework/omg/model_tool.h" #include "framework/omg/ge_init.h" -#include "ge/model/ge_model.h" +#include "ge/common/model/ge_model.h" #undef private #undef protected diff --git a/tests/ut/ge/graph/manager/graph_manager_unittest.cc b/tests/ut/ge/graph/manager/graph_manager_unittest.cc index 9663e90f..518cfdcd 100644 --- a/tests/ut/ge/graph/manager/graph_manager_unittest.cc +++ b/tests/ut/ge/graph/manager/graph_manager_unittest.cc @@ -39,9 +39,9 @@ #include "common/thread_pool.h" #include "common/dump/dump_manager.h" #include "analyzer/analyzer.h" -#include "graph/common/ge_call_wrapper.h" -#include "graph/common/local_context.h" -#include "graph/common/transop_util.h" +#include "common/ge_call_wrapper.h" +#include "common/local_context.h" +#include "common/transop_util.h" #include "graph/ge_context.h" #include "graph/ge_global_options.h" #include "graph/manager/util/rt_context_util.h" @@ -108,8 +108,8 @@ #include "graph/utils/tensor_adapter.h" #include "inc/pass_manager.h" #include "ir_build/option_utils.h" -#include "graph/common/local_context.h" -#include "graph/common/omg_util.h" +#include "common/local_context.h" +#include "common/omg_util.h" #include "common/formats/utils/formats_trans_utils.h" #include "../passes/graph_builder_utils.h" #include "register/custom_pass_helper.h" diff --git a/tests/ut/ge/graph/partition/dynamic_shape_partition_unittest.cc b/tests/ut/ge/graph/partition/dynamic_shape_partition_unittest.cc index da1abd0f..1d19a8bd 100644 --- a/tests/ut/ge/graph/partition/dynamic_shape_partition_unittest.cc +++ b/tests/ut/ge/graph/partition/dynamic_shape_partition_unittest.cc @@ -24,7 +24,7 @@ #include "inc/framework/common/types.h" #include "utils/graph_utils.h" #include "graph/debug/ge_attr_define.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" namespace ge { namespace { diff --git a/tests/ut/ge/graph/passes/mark_node_unknown_shape_pass_unittest.cc b/tests/ut/ge/graph/passes/mark_node_unknown_shape_pass_unittest.cc index c7d36582..7d4663b3 100644 --- a/tests/ut/ge/graph/passes/mark_node_unknown_shape_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/mark_node_unknown_shape_pass_unittest.cc @@ -24,7 +24,7 @@ #include "common/ge_inner_error_codes.h" #include "inc/pass_manager.h" -#include "graph/common/local_context.h" +#include "common/local_context.h" #undef private namespace ge { diff --git a/tests/ut/ge/graph/passes/multi_batch_clone_pass_unittest.cc b/tests/ut/ge/graph/passes/multi_batch_clone_pass_unittest.cc index c752cea4..9ec254d7 100644 --- a/tests/ut/ge/graph/passes/multi_batch_clone_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/multi_batch_clone_pass_unittest.cc @@ -22,7 +22,7 @@ #include "inc/pass_manager.h" #include "graph/utils/tensor_utils.h" -#include "graph/common/local_context.h" +#include "common/local_context.h" #include "graph/passes/multi_batch_pass.h" #include "graph/preprocess/multi_batch_copy_graph.h" #include "graph/preprocess/insert_op/util_insert_aipp_op.h" diff --git a/tests/ut/ge/graph/passes/subgraph_const_migration_pass_unittest.cc b/tests/ut/ge/graph/passes/subgraph_const_migration_pass_unittest.cc index c633c0e1..6565295c 100644 --- a/tests/ut/ge/graph/passes/subgraph_const_migration_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/subgraph_const_migration_pass_unittest.cc @@ -20,7 +20,7 @@ #include #include "framework/omg/omg_inner_types.h" -#include "graph/common/local_context.h" +#include "common/local_context.h" #include "graph/passes/subgraph_const_migration_pass.h" #include "inc/pass_manager.h" #include "register/op_registry.h" diff --git a/tests/ut/ge/graph/transop_util_unittest.cc b/tests/ut/ge/graph/transop_util_unittest.cc index 9f645c22..02aa97bf 100644 --- a/tests/ut/ge/graph/transop_util_unittest.cc +++ b/tests/ut/ge/graph/transop_util_unittest.cc @@ -16,7 +16,7 @@ #include -#include "graph/common/transop_util.h" +#include "common/transop_util.h" #include "common/debug/log.h" #include "common/types.h" diff --git a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc index b09211cb..782a06d6 100644 --- a/tests/ut/ge/hybrid/ge_hybrid_unittest.cc +++ b/tests/ut/ge/hybrid/ge_hybrid_unittest.cc @@ -25,8 +25,8 @@ #include "hybrid/model/hybrid_model_builder.h" #include "hybrid/model/hybrid_model.h" #include "hybrid/node_executor/node_executor.h" -#include "model/ge_model.h" -#include "model/ge_root_model.h" +#include "common/model/ge_model.h" +#include "common/model/ge_root_model.h" #include "hybrid/node_executor/aicore/aicore_op_task.h" #include "framework/common/taskdown_common.h" #include "framework/common/debug/log.h" diff --git a/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc b/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc index 10f7c0fe..eb6030dc 100644 --- a/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc +++ b/tests/ut/ge/hybrid/model/hybrid_model_builder_unittest.cc @@ -29,7 +29,7 @@ #include "graph/utils/graph_utils.h" #include "graph/debug/ge_attr_define.h" #include "graph/ge_local_context.h" -#include "graph/common/omg_util.h" +#include "common/omg_util.h" using namespace std; using namespace testing; diff --git a/tests/ut/ge/hybrid/node_executor/ge_local/ge_local_node_executor_unittest.cc b/tests/ut/ge/hybrid/node_executor/ge_local/ge_local_node_executor_unittest.cc index e4d211f9..53b28762 100644 --- a/tests/ut/ge/hybrid/node_executor/ge_local/ge_local_node_executor_unittest.cc +++ b/tests/ut/ge/hybrid/node_executor/ge_local/ge_local_node_executor_unittest.cc @@ -22,7 +22,7 @@ #define protected public #include "hybrid/executor/subgraph_context.h" #include "hybrid/node_executor/ge_local/ge_local_node_executor.h" -#include "model/ge_root_model.h" +#include "common/model/ge_root_model.h" #undef protected #undef private diff --git a/tests/ut/ge/hybrid/node_executor/host_cpu/host_cpu_node_task_unittest.cc b/tests/ut/ge/hybrid/node_executor/host_cpu/host_cpu_node_task_unittest.cc index b113fa9b..bb134175 100644 --- a/tests/ut/ge/hybrid/node_executor/host_cpu/host_cpu_node_task_unittest.cc +++ b/tests/ut/ge/hybrid/node_executor/host_cpu/host_cpu_node_task_unittest.cc @@ -22,7 +22,7 @@ #define protected public #include "hybrid/executor/subgraph_context.h" #include "hybrid/node_executor/host_cpu/host_cpu_node_executor.h" -#include "model/ge_root_model.h" +#include "common/model/ge_root_model.h" #include "graph/passes/graph_builder_utils.h" #include "aicpu/common/aicpu_task_struct.h" #include "graph/manager/graph_mem_manager.h" diff --git a/tests/ut/ge/hybrid/node_executor/rts/rts_node_task_unittest.cc b/tests/ut/ge/hybrid/node_executor/rts/rts_node_task_unittest.cc index 109e5192..d21ae1e0 100644 --- a/tests/ut/ge/hybrid/node_executor/rts/rts_node_task_unittest.cc +++ b/tests/ut/ge/hybrid/node_executor/rts/rts_node_task_unittest.cc @@ -22,7 +22,7 @@ #define protected public #include "hybrid/executor/subgraph_context.h" #include "hybrid/node_executor/rts/rts_node_executor.h" -#include "model/ge_root_model.h" +#include "common/model/ge_root_model.h" using namespace std; using namespace testing; From b8882fd650f8080026a31d4721fc7fbab75fc783 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Tue, 13 Jul 2021 20:36:08 +0800 Subject: [PATCH 195/226] Sort common\CMakeLists.txt --- ge/common/CMakeLists.txt | 102 +++++++++++++++++++-------------------- 1 file changed, 51 insertions(+), 51 deletions(-) diff --git a/ge/common/CMakeLists.txt b/ge/common/CMakeLists.txt index 0d41b86f..99d6ead3 100755 --- a/ge/common/CMakeLists.txt +++ b/ge/common/CMakeLists.txt @@ -1,55 +1,55 @@ set(SRC_LIST - "context/ctx.cc" - "model_saver.cc" - "ge/datatype_util.cc" - "ge/plugin_manager.cc" - "ge/op_tiling_manager.cc" - "helper/om_file_helper.cc" - "helper/model_helper.cc" - "model/ge_model.cc" - "model/ge_root_model.cc" - "bcast.cc" - "local_context.cc" - "omg_util.cc" - "transop_util.cc" - "auth/file_saver.cc" - "fp16_t.cc" - "math/fp16_math.cc" - "debug/memory_dumper.cc" - "formats/utils/formats_trans_utils.cc" - "dump/dump_properties.cc" - "dump/dump_manager.cc" - "formats/format_transfers/datatype_transfer.cc" - "formats/format_transfers/format_transfer_transpose.cc" - "formats/format_transfers/format_transfer_nchw_nc1hwc0.cc" - "formats/format_transfers/format_transfer_fractal_z.cc" - "formats/format_transfers/format_transfer_fractal_nz.cc" - "formats/format_transfers/format_transfer_fractal_zz.cc" - "formats/format_transfers/format_transfer_nhwc_nc1hwc0.cc" - "formats/format_transfers/format_transfer_nc1hwc0_nchw.cc" - "formats/format_transfers/format_transfer_nc1hwc0_nhwc.cc" - "formats/format_transfers/format_transfer_hwcn_c1hwncoc0.cc" - "formats/format_transfers/format_transfer_c1hwncoc0_hwcn.cc" - "formats/format_transfers/format_transfer_fracz_nchw.cc" - "formats/format_transfers/format_transfer_fracz_nhwc.cc" - "formats/format_transfers/format_transfer_fracz_hwcn.cc" - "formats/format_transfers/format_transfer_dhwcn_fracz3D.cc" - "formats/format_transfers/format_transfer_dhwnc_fracz3D_transpose.cc" - "formats/format_transfers/format_transfer_nchw_fz_c04.cc" - "formats/formats.cc" - "ge_format_util.cc" - "fmk_error_codes.cc" - "util.cc" - "properties_manager.cc" - "types.cc" - "model_parser/model_parser.cc" - "kernel_store.cc" - "tbe_kernel_store.cc" - "cust_aicpu_kernel_store.cc" - "op/attr_value_util.cc" - "op/ge_op_utils.cc" - "thread_pool.cc" - "ge/tbe_plugin_manager.cc" + "${GE_CODE_DIR}/ge/common/auth/file_saver.cc" + "${GE_CODE_DIR}/ge/common/bcast.cc" + "${GE_CODE_DIR}/ge/common/context/ctx.cc" + "${GE_CODE_DIR}/ge/common/cust_aicpu_kernel_store.cc" + "${GE_CODE_DIR}/ge/common/debug/memory_dumper.cc" + "${GE_CODE_DIR}/ge/common/dump/dump_manager.cc" + "${GE_CODE_DIR}/ge/common/dump/dump_properties.cc" + "${GE_CODE_DIR}/ge/common/fmk_error_codes.cc" + "${GE_CODE_DIR}/ge/common/formats/format_transfers/datatype_transfer.cc" + "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_c1hwncoc0_hwcn.cc" + "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_dhwcn_fracz3D.cc" + "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_dhwnc_fracz3D_transpose.cc" + "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fractal_nz.cc" + "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fractal_z.cc" + "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fractal_zz.cc" + "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fracz_hwcn.cc" + "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fracz_nchw.cc" + "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_fracz_nhwc.cc" + "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_hwcn_c1hwncoc0.cc" + "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nchw.cc" + "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_nc1hwc0_nhwc.cc" + "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_nchw_fz_c04.cc" + "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_nchw_nc1hwc0.cc" + "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_nhwc_nc1hwc0.cc" + "${GE_CODE_DIR}/ge/common/formats/format_transfers/format_transfer_transpose.cc" + "${GE_CODE_DIR}/ge/common/formats/formats.cc" + "${GE_CODE_DIR}/ge/common/formats/utils/formats_trans_utils.cc" + "${GE_CODE_DIR}/ge/common/fp16_t.cc" + "${GE_CODE_DIR}/ge/common/ge/datatype_util.cc" + "${GE_CODE_DIR}/ge/common/ge/op_tiling_manager.cc" + "${GE_CODE_DIR}/ge/common/ge/plugin_manager.cc" + "${GE_CODE_DIR}/ge/common/ge/tbe_plugin_manager.cc" + "${GE_CODE_DIR}/ge/common/ge_format_util.cc" + "${GE_CODE_DIR}/ge/common/helper/model_helper.cc" + "${GE_CODE_DIR}/ge/common/helper/om_file_helper.cc" + "${GE_CODE_DIR}/ge/common/kernel_store.cc" + "${GE_CODE_DIR}/ge/common/local_context.cc" + "${GE_CODE_DIR}/ge/common/math/fp16_math.cc" + "${GE_CODE_DIR}/ge/common/model/ge_model.cc" + "${GE_CODE_DIR}/ge/common/model/ge_root_model.cc" + "${GE_CODE_DIR}/ge/common/model_parser/model_parser.cc" + "${GE_CODE_DIR}/ge/common/model_saver.cc" + "${GE_CODE_DIR}/ge/common/omg_util.cc" + "${GE_CODE_DIR}/ge/common/op/attr_value_util.cc" + "${GE_CODE_DIR}/ge/common/op/ge_op_utils.cc" + "${GE_CODE_DIR}/ge/common/properties_manager.cc" + "${GE_CODE_DIR}/ge/common/tbe_kernel_store.cc" + "${GE_CODE_DIR}/ge/common/thread_pool.cc" + "${GE_CODE_DIR}/ge/common/transop_util.cc" + "${GE_CODE_DIR}/ge/common/types.cc" + "${GE_CODE_DIR}/ge/common/util.cc" ) if (NOT ENABLE_D AND NOT ENABLE_ACL) From 3128929306fcc3983fa1f6e230814df2cf917c3d Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Wed, 14 Jul 2021 12:45:41 +0800 Subject: [PATCH 196/226] aicore_task_compiler.cc to runner --- ge/CMakeLists.txt | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/ge/CMakeLists.txt b/ge/CMakeLists.txt index cb4c84b1..d1a0da0f 100755 --- a/ge/CMakeLists.txt +++ b/ge/CMakeLists.txt @@ -109,7 +109,6 @@ endif () ################################################################## set(EXECUTOR_SRC_LIST - "analyzer/analyzer.cc" "common/dump/dump_manager.cc" "common/dump/dump_op.cc" "common/dump/exception_dumper.cc" @@ -121,7 +120,6 @@ set(EXECUTOR_SRC_LIST "common/ge/plugin_manager.cc" "common/profiling/ge_profiling.cc" "common/profiling/profiling_manager.cc" - "engine_manager/dnnengine_manager.cc" "executor/ge_executor.cc" "ge_local_engine/engine/host_cpu_engine.cc" "graph/build/memory/var_mem_assign_util.cc" @@ -236,7 +234,6 @@ set(EXECUTOR_SRC_LIST "hybrid/node_executor/aicore/aicore_node_executor.cc" "hybrid/node_executor/aicore/aicore_op_task.cc" "hybrid/node_executor/aicore/aicore_task_builder.cc" - "hybrid/node_executor/aicore/aicore_task_compiler.cc" "hybrid/node_executor/aicpu/aicpu_ext_info.cc" "hybrid/node_executor/aicpu/aicpu_node_executor.cc" "hybrid/node_executor/compiledsubgraph/known_node_executor.cc" @@ -250,9 +247,7 @@ set(EXECUTOR_SRC_LIST "hybrid/node_executor/rts/rts_node_task.cc" "hybrid/node_executor/rts/rts_task_factory.cc" "hybrid/node_executor/task_context.cc" - "init/gelib.cc" "opskernel_manager/ops_kernel_builder_manager.cc" - "opskernel_manager/ops_kernel_manager.cc" "single_op/single_op.cc" "single_op/single_op_manager.cc" "single_op/single_op_model.cc" @@ -510,6 +505,7 @@ set(RUNNER_SRC_LIST "graph/manager/util/hcom_util.cc" "graph/load/model_manager/task_info/hccl_task_info.cc" "hybrid/node_executor/hccl/hccl_node_executor.cc" + "hybrid/node_executor/aicore/aicore_task_compiler.cc" ) if (NOT ENABLE_D AND NOT ENABLE_ACL AND NOT ENABLE_MS_TESTCASES) @@ -750,7 +746,6 @@ target_link_libraries(ge_executor PRIVATE $<$>:$> $<$>:$> json - ge_proto_client ascend_protobuf_static c_sec $<$>:-lrt> @@ -813,7 +808,6 @@ target_link_libraries(ge_executor_shared PRIVATE $<$>:$> -Wl,--no-as-needed ge_common - ge_proto_client runtime slog graph From 5bf4d4c4245b801b754efd46c3c865f8659260f8 Mon Sep 17 00:00:00 2001 From: lichun Date: Wed, 14 Jul 2021 15:38:58 +0800 Subject: [PATCH 197/226] assign graph memory max size and variable memory max size adaptively --- ge/graph/manager/graph_var_manager.cc | 47 +++++++++++++++++++---- ge/graph/manager/graph_var_manager.h | 3 ++ tests/depends/runtime/src/runtime_stub.cc | 6 +++ 3 files changed, 49 insertions(+), 7 deletions(-) diff --git a/ge/graph/manager/graph_var_manager.cc b/ge/graph/manager/graph_var_manager.cc index 89a4e45b..5138a0f5 100755 --- a/ge/graph/manager/graph_var_manager.cc +++ b/ge/graph/manager/graph_var_manager.cc @@ -20,6 +20,7 @@ #include "graph/manager/graph_mem_manager.h" #include "graph/manager/trans_var_data_utils.h" #include "graph/utils/type_utils.h" +#include "graph/ge_context.h" using std::map; using std::string; @@ -767,24 +768,54 @@ Status VarManager::GetChangedGraphId(const std::string &var_name, uint32_t &grap return var_resource_->GetChangedGraphId(var_name, graph_id); } +Status VarManager::GetTotalMemorySize(size_t &total_mem_size) { + rtError_t rt_ret = rtSetDevice(GetContext().DeviceId()); + if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtSetDevice failed, device_id:%u, ret:0x%X", + GetContext().DeviceId(), rt_ret); + GELOGE(RT_FAILED, "[Call][RtSetDevice] failed, device_id:%u, ret:0x%X", GetContext().DeviceId(), rt_ret); + return RT_FAILED; + } + size_t free_mem = 0; + rt_ret = rtMemGetInfoEx(RT_MEMORYINFO_HBM, &free_mem, &total_mem_size); + if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtMemGetInfo failed, ret:0x%X", rt_ret); + GELOGE(RT_FAILED, "[Call][RtMemGetInfo] failed, ret:0x%X", rt_ret); + return RT_FAILED; + } + rt_ret = rtDeviceReset(GetContext().DeviceId()); + if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtDeviceReset failed, device_id:%u, ret:0x%X", + GetContext().DeviceId(), rt_ret); + GELOGE(RT_FAILED, "[Call][RtDeviceReset] failed, device_id:%u, ret:0x%X", GetContext().DeviceId(), rt_ret); + return RT_FAILED; + } + return SUCCESS; +} + Status VarManager::SetMemoryMallocSize(const map &options) { + size_t total_mem_size = 0; + Status ret = VarManager::GetTotalMemorySize(total_mem_size); + if (ret != SUCCESS) { + return ret; + } + GEEVENT("Total memory size is %zu", total_mem_size); + + graph_mem_max_size_ = floor(total_mem_size * kGraphMemoryManagerMallocRatio); + var_mem_max_size_ = floor(total_mem_size * kVarMemoryManagerMallocRatio); + auto it = options.find(GRAPH_MEMORY_MAX_SIZE); - if (it == options.end()) { - graph_mem_max_size_ = kGraphMemoryManagerMallocMaxSize; - } else { + if (it != options.end()) { string graph_memory_manager_malloc_max_size = it->second; ge::Status ret = ParseMemoryMallocSize(graph_memory_manager_malloc_max_size, graph_mem_max_size_); if (ret != SUCCESS) { GELOGE(ge::GE_GRAPH_OPTIONS_INVALID, "[Call][ParseMemoryMallocSize] failed, session id:%lu.", session_id_); return ge::GE_GRAPH_OPTIONS_INVALID; } - GELOGI("The max size for graph mem is set to %zu", graph_mem_max_size_); } it = options.find(VARIABLE_MEMORY_MAX_SIZE); - if (it == options.end()) { - var_mem_max_size_ = kMemoryVarManagerMallocSize; - } else { + if (it != options.end()) { string memory_var_manager_malloc_size = it->second; ge::Status ret = ParseMemoryMallocSize(memory_var_manager_malloc_size, var_mem_max_size_); if (ret != SUCCESS) { @@ -793,6 +824,8 @@ Status VarManager::SetMemoryMallocSize(const map &options) { } } + GEEVENT("The graph_mem_max_size is %zu and the var_mem_max_size is %zu", graph_mem_max_size_, var_mem_max_size_); + var_mem_logic_base_ = graph_mem_max_size_ + kGraphMemoryBuffer; if (var_mem_logic_base_ > kMaxMemorySize) { REPORT_INNER_ERROR("E19999", "var_login_base:%zu can not exeed limit:%zu, session_id:%lu, check invalid", diff --git a/ge/graph/manager/graph_var_manager.h b/ge/graph/manager/graph_var_manager.h index f2b68e79..a1b45959 100755 --- a/ge/graph/manager/graph_var_manager.h +++ b/ge/graph/manager/graph_var_manager.h @@ -43,6 +43,8 @@ const size_t kMaxMemorySize = 256UL * 1024UL * 1024UL * 1024UL; const char kEnvGeuseStaticMemory[] = "GE_USE_STATIC_MEMORY"; const uint64_t kSessionMemAlignSize = 512; const size_t kSessionMemAlignUnit = 2; +const double kGraphMemoryManagerMallocRatio = 26.0 / 32.0; +const double kVarMemoryManagerMallocRatio = 5.0 / 32.0; enum MemStatus { NORMAL = 0, @@ -301,6 +303,7 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY VarManager { mutable std::recursive_mutex mutex_; Status ParseMemoryMallocSize(std::string &memory_size, size_t &my_size); + Status GetTotalMemorySize(size_t &total_mem_size); }; class VarManagerPool { diff --git a/tests/depends/runtime/src/runtime_stub.cc b/tests/depends/runtime/src/runtime_stub.cc index 0c9e2c27..a8f7e59a 100644 --- a/tests/depends/runtime/src/runtime_stub.cc +++ b/tests/depends/runtime/src/runtime_stub.cc @@ -193,6 +193,12 @@ rtError_t rtMemGetInfo(size_t *free, size_t *total) { return RT_ERROR_NONE; } +rtError_t rtMemGetInfoEx(rtMemInfoType_t memInfoType, size_t *free, size_t *total) { + *free = 512UL * 1024UL * 1024UL; + *total = 1024UL * 1024UL * 1024UL; + return RT_ERROR_NONE; +} + rtError_t rtMemAllocManaged(void **ptr, uint64_t size, uint32_t flag) { return RT_ERROR_NONE; } rtError_t rtMemFreeManaged(void *ptr) { return RT_ERROR_NONE; } From e271d40b9ce1e673b2d550a70a27b2b328ce4433 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Wed, 14 Jul 2021 15:41:17 +0800 Subject: [PATCH 198/226] delete common file form compoler --- ge/CMakeLists.txt | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/ge/CMakeLists.txt b/ge/CMakeLists.txt index d1a0da0f..f83d2607 100755 --- a/ge/CMakeLists.txt +++ b/ge/CMakeLists.txt @@ -109,15 +109,9 @@ endif () ################################################################## set(EXECUTOR_SRC_LIST - "common/dump/dump_manager.cc" "common/dump/dump_op.cc" "common/dump/exception_dumper.cc" "common/dump/opdebug_register.cc" - "common/formats/format_transfers/format_transfer_transpose.cc" - "common/formats/utils/formats_trans_utils.cc" - "common/fp16_t.cc" - "common/ge/op_tiling_manager.cc" - "common/ge/plugin_manager.cc" "common/profiling/ge_profiling.cc" "common/profiling/profiling_manager.cc" "executor/ge_executor.cc" @@ -264,29 +258,6 @@ set(EXECUTOR_SRC_LIST set(COMPILER_SRC_LIST "analyzer/analyzer.cc" "common/dump/dump_op.cc" - "common/dump/dump_properties.cc" - "common/formats/format_transfers/datatype_transfer.cc" - "common/formats/format_transfers/format_transfer_c1hwncoc0_hwcn.cc" - "common/formats/format_transfers/format_transfer_dhwcn_fracz3D.cc" - "common/formats/format_transfers/format_transfer_dhwnc_fracz3D_transpose.cc" - "common/formats/format_transfers/format_transfer_fractal_nz.cc" - "common/formats/format_transfers/format_transfer_fractal_z.cc" - "common/formats/format_transfers/format_transfer_fractal_zz.cc" - "common/formats/format_transfers/format_transfer_fracz_hwcn.cc" - "common/formats/format_transfers/format_transfer_fracz_nchw.cc" - "common/formats/format_transfers/format_transfer_fracz_nhwc.cc" - "common/formats/format_transfers/format_transfer_hwcn_c1hwncoc0.cc" - "common/formats/format_transfers/format_transfer_nc1hwc0_nchw.cc" - "common/formats/format_transfers/format_transfer_nc1hwc0_nhwc.cc" - "common/formats/format_transfers/format_transfer_nchw_fz_c04.cc" - "common/formats/format_transfers/format_transfer_nchw_nc1hwc0.cc" - "common/formats/format_transfers/format_transfer_nhwc_nc1hwc0.cc" - "common/formats/format_transfers/format_transfer_transpose.cc" - "common/formats/formats.cc" - "common/formats/utils/formats_trans_utils.cc" - "common/fp16_t.cc" - "common/ge/op_tiling_manager.cc" - "common/ge/plugin_manager.cc" "common/helper/model_cache_helper.cc" "common/profiling/profiling_manager.cc" "engine_manager/dnnengine_manager.cc" From fec3176277626c8e80c69a2d76ed36f08cd7ae43 Mon Sep 17 00:00:00 2001 From: lichun Date: Wed, 14 Jul 2021 16:31:09 +0800 Subject: [PATCH 199/226] assign graph memory max size and variable memory max size adaptively --- tests/ut/ge/CMakeLists.txt | 1 + .../manager/graph_var_manager_unittest.cc | 63 +++++++++++++++++++ 2 files changed, 64 insertions(+) create mode 100644 tests/ut/ge/graph/manager/graph_var_manager_unittest.cc diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index 856d9d43..773a2686 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -690,6 +690,7 @@ set(MULTI_PARTS_TEST_FILES "graph/manager/run_graph_unittest.cc" "graph/partition/dynamic_shape_partition_unittest.cc" "graph/manager/graph_manager_unittest.cc" + "graph/manager/graph_var_manager_unittest.cc" "graph/optimize/mem_rw_conflict_optimize_unittest.cc" "graph/optimize/graph_optimize_unittest.cc" "session/omg_omg_unittest.cc" diff --git a/tests/ut/ge/graph/manager/graph_var_manager_unittest.cc b/tests/ut/ge/graph/manager/graph_var_manager_unittest.cc new file mode 100644 index 00000000..3eda6c47 --- /dev/null +++ b/tests/ut/ge/graph/manager/graph_var_manager_unittest.cc @@ -0,0 +1,63 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include + +#define protected public +#define private public +#include "graph/manager/graph_var_manager.h" +#include "graph/ge_context.h" +#undef protected +#undef private + +namespace ge { +class UtestGraphVarManagerTest : public testing::Test { + protected: + void SetUp() {} + void TearDown() {} +}; + +TEST_F(UtestGraphVarManagerTest, test_get_total_memory_size) { + size_t total_mem_size = 0; + Status ret = VarManager::Instance(0)->GetTotalMemorySize(total_mem_size); + EXPECT_EQ(total_mem_size, 1024UL * 1024UL * 1024UL); + EXPECT_EQ(ret, SUCCESS); +} + +TEST_F(UtestGraphVarManagerTest, test_set_memory_malloc_size_no_related_option) { + const map options{}; + Status ret = VarManager::Instance(0)->SetMemoryMallocSize(options); + EXPECT_EQ(VarManager::Instance(0)->graph_mem_max_size_, floor(1024UL * 1024UL * 1024UL * (26.0f / 32.0f))); + EXPECT_EQ(VarManager::Instance(0)->var_mem_max_size_, floor(1024UL * 1024UL * 1024UL * (5.0f / 32.0f))); + EXPECT_EQ(ret, SUCCESS); +} + +TEST_F(UtestGraphVarManagerTest, test_set_memory_malloc_size_with_user_specify_graph_mem_max_size) { + const map options{{"ge.graphMemoryMaxSize", 1024UL * 1024UL * 1024UL / 2}}; + Status ret = VarManager::Instance(0)->SetMemoryMallocSize(options); + EXPECT_EQ(VarManager::Instance(0)->graph_mem_max_size_, floor(1024UL * 1024UL * 1024UL / 2)); + EXPECT_EQ(VarManager::Instance(0)->var_mem_max_size_, floor(1024UL * 1024UL * 1024UL * (5.0f / 32.0f))); + EXPECT_EQ(ret, SUCCESS); +} + +TEST_F(UtestGraphVarManagerTest, test_set_memory_malloc_size_with_user_specify_var_mem_max_size) { + const map options{{"ge.variableMemoryMaxSize", 1024UL * 1024UL * 1024UL / 2}}; + Status ret = VarManager::Instance(0)->SetMemoryMallocSize(options); + EXPECT_EQ(VarManager::Instance(0)->graph_mem_max_size_, floor(1024UL * 1024UL * 1024UL * (26.0f / 32.0f))); + EXPECT_EQ(VarManager::Instance(0)->var_mem_max_size_, floor(1024UL * 1024UL * 1024UL / 2)); + EXPECT_EQ(ret, SUCCESS); +} +} // namespace ge From 3fd107039848df8f7e6a403beee4cf73ca30ee1c Mon Sep 17 00:00:00 2001 From: lichun Date: Wed, 14 Jul 2021 16:59:49 +0800 Subject: [PATCH 200/226] assign graph memory max size and variable memory max size adaptively --- tests/ut/ge/graph/manager/graph_var_manager_unittest.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/ut/ge/graph/manager/graph_var_manager_unittest.cc b/tests/ut/ge/graph/manager/graph_var_manager_unittest.cc index 3eda6c47..c20e786d 100644 --- a/tests/ut/ge/graph/manager/graph_var_manager_unittest.cc +++ b/tests/ut/ge/graph/manager/graph_var_manager_unittest.cc @@ -46,7 +46,7 @@ TEST_F(UtestGraphVarManagerTest, test_set_memory_malloc_size_no_related_option) } TEST_F(UtestGraphVarManagerTest, test_set_memory_malloc_size_with_user_specify_graph_mem_max_size) { - const map options{{"ge.graphMemoryMaxSize", 1024UL * 1024UL * 1024UL / 2}}; + const map options{{"ge.graphMemoryMaxSize", "536870912"}}; Status ret = VarManager::Instance(0)->SetMemoryMallocSize(options); EXPECT_EQ(VarManager::Instance(0)->graph_mem_max_size_, floor(1024UL * 1024UL * 1024UL / 2)); EXPECT_EQ(VarManager::Instance(0)->var_mem_max_size_, floor(1024UL * 1024UL * 1024UL * (5.0f / 32.0f))); @@ -54,7 +54,7 @@ TEST_F(UtestGraphVarManagerTest, test_set_memory_malloc_size_with_user_specify_g } TEST_F(UtestGraphVarManagerTest, test_set_memory_malloc_size_with_user_specify_var_mem_max_size) { - const map options{{"ge.variableMemoryMaxSize", 1024UL * 1024UL * 1024UL / 2}}; + const map options{{"ge.variableMemoryMaxSize", "536870912"}}; Status ret = VarManager::Instance(0)->SetMemoryMallocSize(options); EXPECT_EQ(VarManager::Instance(0)->graph_mem_max_size_, floor(1024UL * 1024UL * 1024UL * (26.0f / 32.0f))); EXPECT_EQ(VarManager::Instance(0)->var_mem_max_size_, floor(1024UL * 1024UL * 1024UL / 2)); From 191f381cc5251711c4f65ef11f7262f47e583068 Mon Sep 17 00:00:00 2001 From: wangxiaotian22 Date: Tue, 13 Jul 2021 11:49:50 +0800 Subject: [PATCH 201/226] runtime api transfer --- .../task_info/kernel_ex_task_info.cc | 3 +- .../task_info/kernel_ex_task_info.h | 1 + .../task_info/kernel_task_info.cc | 7 +- .../aicpu/aicpu_node_executor.cc | 15 +- inc/external/OWNERS | 10 ++ tests/depends/runtime/src/runtime_stub.cc | 15 ++ tests/ut/ge/CMakeLists.txt | 1 + .../aicpu/aicpu_node_executor_unittest.cc | 168 ++++++++++++++++++ 8 files changed, 209 insertions(+), 11 deletions(-) create mode 100644 inc/external/OWNERS create mode 100644 tests/ut/ge/hybrid/node_executor/aicpu/aicpu_node_executor_unittest.cc diff --git a/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc b/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc index a4b3de75..ee358b5c 100644 --- a/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc +++ b/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc @@ -106,6 +106,7 @@ Status KernelExTaskInfo::Init(const domi::TaskDef &task_def, DavinciModel *davin // 1. Copy context from kernelExDef.private to workspace uint32_t op_index = kernel_ex_def.op_index(); OpDescPtr op_desc = davinci_model_->GetOpByIndex(op_index); + op_desc_ = op_desc; if (op_desc == nullptr) { REPORT_INNER_ERROR("E19999", "Can't get op_desc from davinci_model by index:%u", op_index); GELOGE(INTERNAL_ERROR, "[Get][Op] by index failed, index:%u is out of range!", op_index); @@ -422,7 +423,7 @@ Status KernelExTaskInfo::Distribute() { if (topic_type_flag_ > 0) { dump_flag_ = dump_flag_ | topic_type_flag_; } - rtError_t rt_ret = rtKernelLaunchEx(kernel_buf_, kernel_buf_size_, dump_flag_, stream_); + rtError_t rt_ret = rtKernelLaunchFwk(op_desc_->GetName().c_str(), kernel_buf_, kernel_buf_size_, dump_flag_, stream_); if (rt_ret != RT_ERROR_NONE) { REPORT_CALL_ERROR("E19999", "Call rtKernelLaunchEx failed, ret:0x%X", rt_ret); GELOGE(RT_FAILED, "[Call][RtKernelLaunchEx] failed, ret:0x%X", rt_ret); diff --git a/ge/graph/load/model_manager/task_info/kernel_ex_task_info.h b/ge/graph/load/model_manager/task_info/kernel_ex_task_info.h index 1b77b715..7d07eb7f 100644 --- a/ge/graph/load/model_manager/task_info/kernel_ex_task_info.h +++ b/ge/graph/load/model_manager/task_info/kernel_ex_task_info.h @@ -70,6 +70,7 @@ class KernelExTaskInfo : public TaskInfo { uint32_t dump_flag_; uint32_t kernel_buf_size_; DavinciModel *davinci_model_; + OpDescPtr op_desc_; void *kernel_buf_; void *input_output_addr_; void *ext_info_addr_; diff --git a/ge/graph/load/model_manager/task_info/kernel_task_info.cc b/ge/graph/load/model_manager/task_info/kernel_task_info.cc index 07ad63ca..63f4257c 100755 --- a/ge/graph/load/model_manager/task_info/kernel_task_info.cc +++ b/ge/graph/load/model_manager/task_info/kernel_task_info.cc @@ -440,9 +440,10 @@ Status KernelTaskInfo::Distribute() { } GELOGI("distribute task info kernel_type %d, flag %d", kernel_type_, dump_flag_); // blockDim is reserved parameter, set to 1 - rt_ret = rtCpuKernelLaunchWithFlag(reinterpret_cast(so_name_.c_str()), - reinterpret_cast(kernel_name_.c_str()), 1, args_, args_size_, - nullptr, stream_, dump_flag_); + std::string op_name = op_desc_->GetName(); + rtKernelLaunchNames_t launch_name = {so_name_.c_str(), kernel_name_.c_str(), op_name.c_str()}; + rt_ret = rtAicpuKernelLaunchWithFlag(&launch_name, 1, args_, args_size_, + nullptr, stream_, dump_flag_); call_save_dump_ = true; } else { /* default: not skt launch */ diff --git a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc index 820c9b56..cf20303c 100755 --- a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc +++ b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc @@ -477,7 +477,7 @@ Status AicpuTfNodeTask::CopyDataToHbm(TaskContext &context, GE_CHK_STATUS_RET_NOLOG(PrepareCopyInputs(context, out_shape_hbm)); RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[LaunchCopy] Start"); - GE_CHK_RT_RET(rtKernelLaunchEx(copy_task_args_buf_->GetData(), sizeof(STR_FWK_OP_KERNEL), + GE_CHK_RT_RET(rtKernelLaunchFwk(node_name_.c_str(), copy_task_args_buf_->GetData(), sizeof(STR_FWK_OP_KERNEL), RT_KERNEL_DEFAULT, context.GetStream())); RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[LaunchCopy] End"); @@ -638,7 +638,8 @@ Status AicpuTfNodeTask::LaunchTask(TaskContext &context) { GELOGD("Node[%s] launch task start, unknown_type=%d.", node_name_.c_str(), unknown_type_); uint32_t flag = RT_KERNEL_DEFAULT; RECORD_EXECUTION_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[AicpuTfNodertKernelLaunchEx] Start"); - GE_CHK_RT_RET(rtKernelLaunchEx(kernel_buf_->GetData(), kernel_buf_->GetSize(), flag, context.GetStream())); + GE_CHK_RT_RET(rtKernelLaunchFwk(node_name_.c_str(), kernel_buf_->GetData(), + kernel_buf_->GetSize(), flag, context.GetStream())); RECORD_EXECUTION_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[AicpuTfNodertKernelLaunchEx] End"); GELOGD("Node[%s] launch end.", node_name_.c_str()); if (need_sync_) { @@ -819,11 +820,11 @@ Status AicpuNodeTask::LaunchTask(TaskContext &context) { if (kernel_type == ccKernelType::CUST_AI_CPU) { flag |= static_cast(RT_KERNEL_CUSTOM_AICPU); } - auto rt_ret = rtCpuKernelLaunchWithFlag(reinterpret_cast(so_name.c_str()), - reinterpret_cast(kernel_name.c_str()), - 1, // default core dim is 1 - args_.get(), args_size_, - nullptr, context.GetStream(), flag); + rtKernelLaunchNames_t launch_name = {so_name.c_str(), kernel_name.c_str(), node_name_.c_str()}; + auto rt_ret = rtAicpuKernelLaunchWithFlag(&launch_name, + 1, // default core dim is 1 + args_.get(), args_size_, + nullptr, context.GetStream(), flag); GE_CHK_RT_RET(rt_ret); GELOGD("Node[%s] launch task end.", node_name_.c_str()); return SUCCESS; diff --git a/inc/external/OWNERS b/inc/external/OWNERS new file mode 100644 index 00000000..934272a6 --- /dev/null +++ b/inc/external/OWNERS @@ -0,0 +1,10 @@ +approvers: +- gegenhua +reviewers: +- wqtshg +- ji_chen +- xchu42 +- sheng-nan +- wangxiaotian22 +- zhangxiaokun9 +- tangqunzhang diff --git a/tests/depends/runtime/src/runtime_stub.cc b/tests/depends/runtime/src/runtime_stub.cc index 0c9e2c27..510eb1ad 100644 --- a/tests/depends/runtime/src/runtime_stub.cc +++ b/tests/depends/runtime/src/runtime_stub.cc @@ -460,6 +460,21 @@ rtError_t rtDebugUnRegisterForStream(rtStream_t stream) { rtError_t rtFftsTaskLaunch(rtFftsTaskInfo_t *fftsTaskInfo, rtStream_t stream) { return RT_ERROR_NONE; } + +rtError_t rtKernelLaunchFwk(const char *opName, void *args, uint32_t argSize, uint32_t flags, rtStream_t rtStream) { + return RT_ERROR_NONE; +} + +rtError_t rtAicpuKernelLaunchWithFlag(const rtKernelLaunchNames_t *launchNames, uint32_t blockDim, const void *args, + uint32_t argSize, rtSmDesc_t *smDesc, rtStream_t stream, uint32_t flags) { + return RT_ERROR_NONE; +} + +rtError_t rtAicpuKernelLaunch(const rtKernelLaunchNames_t *launchNames, uint32_t blockDim, const void *args, + uint32_t argSize, rtSmDesc_t *smDesc, rtStream_t stream) { + return RT_ERROR_NONE; +} + #ifdef __cplusplus } #endif diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index 42fa6128..ebaee921 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -735,6 +735,7 @@ set(HYBRID_TEST_FILES "hybrid/node_executor/host_cpu/host_cpu_node_task_unittest.cc" "hybrid/node_executor/ge_local/ge_local_node_executor_unittest.cc" "hybrid/node_executor/hccl/hccl_node_executor_unittest.cc" + "hybrid/node_executor/aicpu/aicpu_node_executor_unittest.cc" "hybrid/executor/hybrid_model_async_executor_unittest.cc" "hybrid/executor/hybrid_model_pipeline_executor_unittest.cc" "hybrid/node_executor/aicore/aicore_task_compiler_unittest.cc" diff --git a/tests/ut/ge/hybrid/node_executor/aicpu/aicpu_node_executor_unittest.cc b/tests/ut/ge/hybrid/node_executor/aicpu/aicpu_node_executor_unittest.cc new file mode 100644 index 00000000..b225949b --- /dev/null +++ b/tests/ut/ge/hybrid/node_executor/aicpu/aicpu_node_executor_unittest.cc @@ -0,0 +1,168 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include + +#define private public +#define protected public +#include "graph/runtime_inference_context.h" +#include "aicpu/common/aicpu_task_struct.h" +#include "hybrid/executor/subgraph_context.h" +#include "hybrid/node_executor/aicpu/aicpu_node_executor.h" +#undef protected +#undef private + +using namespace std; +using namespace testing; + +namespace { +struct AicpuTaskStruct { + aicpu::AicpuParamHead head; + uint64_t io_addrp[6]; +}__attribute__((packed)); +} // namespace + +namespace ge { +using namespace hybrid; + +class UtestAicpuNodeExecutor : public testing::Test { + protected: + void SetUp() {} + void TearDown() {} +}; + +static NodePtr CreateNode(ComputeGraphPtr graph, const string &name, const string &type, int in_num, int out_num) { + OpDescPtr op_desc = std::make_shared(name, type); + op_desc->SetStreamId(0); + static int32_t index = 0; + op_desc->SetId(index++); + + GeTensorDesc tensor(GeShape(), FORMAT_ND, DT_INT64); + TensorUtils::SetSize(tensor, 64); + vector input_offset; + for (int i = 0; i < in_num; i++) { + op_desc->AddInputDesc(tensor); + input_offset.emplace_back(i * 64); + } + op_desc->SetInputOffset(input_offset); + + vector output_offset; + for (int i = 0; i < out_num; i++) { + op_desc->AddOutputDesc(tensor); + output_offset.emplace_back(in_num * 64 + i * 64); + } + op_desc->SetOutputOffset(output_offset); + + return graph->AddNode(op_desc); +} + +TEST_F(UtestAicpuNodeExecutor, aicpu_tf_node_task) { + ComputeGraphPtr graph = std::make_shared("test"); + GeModelPtr ge_sub_model = std::make_shared(); + GeRootModelPtr ge_root_model = std::make_shared(graph); + ge_root_model->SetModelName("test_name"); + ge_root_model->SetSubgraphInstanceNameToModel("sub", ge_sub_model); + HybridModel hybrid_model(ge_root_model); + + NodePtr node = CreateNode(graph, "frameworkop", FRAMEWORK_OP_TYPE, 4, 2); + + std::unique_ptr new_node; + ASSERT_EQ(NodeItem::Create(node, new_node), SUCCESS); + NodeItem *node_item = new_node.get(); + hybrid_model.node_items_[node] = std::move(new_node); + node_item->input_start = 0; + node_item->output_start = 0; + node_item->is_dynamic = true; + node_item->shape_inference_type = DEPEND_COMPUTE; + + GraphItem graph_item; + graph_item.node_items_.emplace_back(node_item); + graph_item.total_inputs_ = 4; + graph_item.total_outputs_ = 2; + + GraphExecutionContext graph_context; + SubgraphContext subgraph_context(&graph_item, &graph_context); + ASSERT_EQ(subgraph_context.Init(), SUCCESS); + graph_context.callback_manager = std::unique_ptr(new CallbackManager()); + + auto node_state = subgraph_context.GetOrCreateNodeState(node_item); + ASSERT_NE(node_state, nullptr); + + for (int i=0; i<4; ++i) { + uint64_t value_0 = 512; + TensorValue in_tensor0(&value_0, sizeof(value_0)); + subgraph_context.SetInput(*node_item, 0, in_tensor0); + } + + uint64_t value_0 = 512; + TensorValue out_tensor0(&value_0, sizeof(value_0)); + subgraph_context.SetOutput(*node_item, 0, out_tensor0); + + uint64_t value_1 = 512; + TensorValue out_tensor1(&value_1, sizeof(value_1)); + subgraph_context.SetOutput(*node_item, 1, out_tensor1); + + // task + domi::TaskDef task_def; + domi::KernelExDef *kernel_ex_def = task_def.mutable_kernel_ex(); + kernel_ex_def->set_kernel_ext_info_size(12); + + AicpuExtInfo aicpu_ext_info; + aicpu_ext_info.infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_SHAPE_TYPE; + aicpu_ext_info.infoLen = sizeof(int32_t); + int32_t type = node_item->shape_inference_type; + memcpy_s(aicpu_ext_info.infoMsg, sizeof(int32_t), &type, sizeof(int32_t)); + char *ext_mem = (char*)malloc(sizeof(AicpuExtInfo) + sizeof(int32_t)); + memcpy_s(ext_mem, sizeof(AicpuExtInfo) + sizeof(int32_t), &aicpu_ext_info, sizeof(AicpuExtInfo) + sizeof(int32_t)); + std::string ext_info(ext_mem, sizeof(AicpuExtInfo) + sizeof(int32_t)); + + std::string *mutable_ext_info = kernel_ex_def->mutable_kernel_ext_info(); + (*mutable_ext_info) = ext_info; + + hybrid_model.task_defs_[node] = std::vector({task_def, task_def}); + + AicpuTfNodeTask aicpu_tf_node_task(node_item, task_def); + + ASSERT_EQ(aicpu_tf_node_task.Init(hybrid_model), SUCCESS); + ASSERT_EQ(aicpu_tf_node_task.LaunchTask(*node_state->GetTaskContext()), SUCCESS); + + AicpuTaskStruct args; + args.head.length = sizeof(args); + args.head.ioAddrNum = 6; + + domi::TaskDef task_def2; + task_def2.set_type(RT_MODEL_TASK_ALL_KERNEL); + task_def2.mutable_kernel()->set_args(reinterpret_cast(&args), args.head.length); + task_def2.mutable_kernel()->set_args_size(args.head.length); + + hybrid_model.task_defs_[node] = std::vector({task_def2}); + + AicpuNodeTask aicpu_node_task(node_item, task_def); + ASSERT_EQ(aicpu_node_task.Init(hybrid_model), FAILED); + ASSERT_EQ(aicpu_node_task.LaunchTask(*node_state->GetTaskContext()), SUCCESS); + + + //kernel_ex_def->set_allocated_kernel_ext_info(nullptr); + + free(ext_mem); + +} + +} // namespace ge + From 45be54175221c42c1bcb7e46b230fc79c8aa2d02 Mon Sep 17 00:00:00 2001 From: lichun Date: Wed, 14 Jul 2021 19:59:55 +0800 Subject: [PATCH 202/226] assign graph memory max size and variable memory max size adaptively --- ge/graph/manager/graph_var_manager.cc | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/ge/graph/manager/graph_var_manager.cc b/ge/graph/manager/graph_var_manager.cc index 5138a0f5..d0669254 100755 --- a/ge/graph/manager/graph_var_manager.cc +++ b/ge/graph/manager/graph_var_manager.cc @@ -795,18 +795,15 @@ Status VarManager::GetTotalMemorySize(size_t &total_mem_size) { Status VarManager::SetMemoryMallocSize(const map &options) { size_t total_mem_size = 0; - Status ret = VarManager::GetTotalMemorySize(total_mem_size); - if (ret != SUCCESS) { - return ret; - } + GE_CHK_STATUS_RET_NOLOG(VarManager::GetTotalMemorySize(total_mem_size)); GEEVENT("Total memory size is %zu", total_mem_size); graph_mem_max_size_ = floor(total_mem_size * kGraphMemoryManagerMallocRatio); var_mem_max_size_ = floor(total_mem_size * kVarMemoryManagerMallocRatio); - auto it = options.find(GRAPH_MEMORY_MAX_SIZE); - if (it != options.end()) { - string graph_memory_manager_malloc_max_size = it->second; + auto it1 = options.find(GRAPH_MEMORY_MAX_SIZE); + if (it1 != options.end()) { + string graph_memory_manager_malloc_max_size = it1->second; ge::Status ret = ParseMemoryMallocSize(graph_memory_manager_malloc_max_size, graph_mem_max_size_); if (ret != SUCCESS) { GELOGE(ge::GE_GRAPH_OPTIONS_INVALID, "[Call][ParseMemoryMallocSize] failed, session id:%lu.", session_id_); @@ -814,9 +811,9 @@ Status VarManager::SetMemoryMallocSize(const map &options) { } } - it = options.find(VARIABLE_MEMORY_MAX_SIZE); - if (it != options.end()) { - string memory_var_manager_malloc_size = it->second; + auto it2 = options.find(VARIABLE_MEMORY_MAX_SIZE); + if (it2 != options.end()) { + string memory_var_manager_malloc_size = it2->second; ge::Status ret = ParseMemoryMallocSize(memory_var_manager_malloc_size, var_mem_max_size_); if (ret != SUCCESS) { GELOGE(ge::GE_GRAPH_OPTIONS_INVALID, "[Call][ParseMemoryMallocSize] failed, session id:%lu.", session_id_); From f6755b5681a5ed5a3618b3aa79d77b1e8c1680c2 Mon Sep 17 00:00:00 2001 From: chenyemeng Date: Thu, 15 Jul 2021 18:51:46 +0800 Subject: [PATCH 203/226] revert --- ge/CMakeLists.txt | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/ge/CMakeLists.txt b/ge/CMakeLists.txt index f83d2607..d1a0da0f 100755 --- a/ge/CMakeLists.txt +++ b/ge/CMakeLists.txt @@ -109,9 +109,15 @@ endif () ################################################################## set(EXECUTOR_SRC_LIST + "common/dump/dump_manager.cc" "common/dump/dump_op.cc" "common/dump/exception_dumper.cc" "common/dump/opdebug_register.cc" + "common/formats/format_transfers/format_transfer_transpose.cc" + "common/formats/utils/formats_trans_utils.cc" + "common/fp16_t.cc" + "common/ge/op_tiling_manager.cc" + "common/ge/plugin_manager.cc" "common/profiling/ge_profiling.cc" "common/profiling/profiling_manager.cc" "executor/ge_executor.cc" @@ -258,6 +264,29 @@ set(EXECUTOR_SRC_LIST set(COMPILER_SRC_LIST "analyzer/analyzer.cc" "common/dump/dump_op.cc" + "common/dump/dump_properties.cc" + "common/formats/format_transfers/datatype_transfer.cc" + "common/formats/format_transfers/format_transfer_c1hwncoc0_hwcn.cc" + "common/formats/format_transfers/format_transfer_dhwcn_fracz3D.cc" + "common/formats/format_transfers/format_transfer_dhwnc_fracz3D_transpose.cc" + "common/formats/format_transfers/format_transfer_fractal_nz.cc" + "common/formats/format_transfers/format_transfer_fractal_z.cc" + "common/formats/format_transfers/format_transfer_fractal_zz.cc" + "common/formats/format_transfers/format_transfer_fracz_hwcn.cc" + "common/formats/format_transfers/format_transfer_fracz_nchw.cc" + "common/formats/format_transfers/format_transfer_fracz_nhwc.cc" + "common/formats/format_transfers/format_transfer_hwcn_c1hwncoc0.cc" + "common/formats/format_transfers/format_transfer_nc1hwc0_nchw.cc" + "common/formats/format_transfers/format_transfer_nc1hwc0_nhwc.cc" + "common/formats/format_transfers/format_transfer_nchw_fz_c04.cc" + "common/formats/format_transfers/format_transfer_nchw_nc1hwc0.cc" + "common/formats/format_transfers/format_transfer_nhwc_nc1hwc0.cc" + "common/formats/format_transfers/format_transfer_transpose.cc" + "common/formats/formats.cc" + "common/formats/utils/formats_trans_utils.cc" + "common/fp16_t.cc" + "common/ge/op_tiling_manager.cc" + "common/ge/plugin_manager.cc" "common/helper/model_cache_helper.cc" "common/profiling/profiling_manager.cc" "engine_manager/dnnengine_manager.cc" From 4be882056625080d270df9e9eecebb428da3bed6 Mon Sep 17 00:00:00 2001 From: wqtshg Date: Thu, 15 Jul 2021 19:05:14 +0800 Subject: [PATCH 204/226] delete compiling macros --- CMakeLists.txt | 8 ++------ cmake/external_libs/gflags.cmake | 15 ++++++++++----- ge/CMakeLists.txt | 4 +--- ge/offline/CMakeLists.txt | 6 ++---- metadef | 2 +- 5 files changed, 16 insertions(+), 19 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ac0240d9..60509838 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -88,11 +88,9 @@ else () find_module(hccl libhccl.so ${GE_LIB_PATH}) find_module(adump_server libadump_server.a ${GE_LIB_PATH}) find_module(runtime libruntime.so ${GE_LIB_PATH}) - find_module(runtime_compile libruntime_compile.so ${GE_LIB_PATH}) find_module(resource libresource.so ${GE_LIB_PATH}) find_module(ascend_hal_stub libascend_hal.so ${GE_LIB_PATH}) find_module(msprofiler_fwk_ext libmsprofiler_fwk.a ${GE_LIB_PATH}) - #find_module(ascendcl_static libascendcl.a ${GE_LIB_PATH}) else() find_module(slog libalog.so ${ASCEND_ATC_DIR}) find_module(opt_feature libopt_feature.so ${ASCEND_ATC_DIR}) @@ -108,7 +106,6 @@ else () elseif(PLATFORM STREQUAL "inference") find_module(adump_server libadump_server.a ${ASCEND_ACL_DIR}) find_module(runtime libruntime.so ${ASCEND_ACL_DIR}) - find_module(runtime_compile libruntime_compile.so ${ASCEND_ATC_DIR}) find_module(msprofiler_ext libmsprofiler.a ${ASCEND_ACL_DIR}) if(PRODUCT STREQUAL "flr3") elseif(PRODUCT STREQUAL "flr1") @@ -120,10 +117,9 @@ else () endif() elseif(PLATFORM STREQUAL "all") find_module(adump_server libadump_server.a ${ASCEND_RUNTIME_DIR}) - find_module(runtime libruntime.so ${ASCEND_RUNTIME_DIR}) + find_module(runtime libruntime.so ${ASCEND_ATC_DIR}) find_module(msprofiler_fwk_ext libmsprofiler_fwk.a ${ASCEND_RUNTIME_DIR}) - find_module(ascend_hal_stub libascend_hal.so ${ASCEND_DRIVER_DIR}) - find_module(runtime_compile libruntime_compile.so ${ASCEND_ATC_DIR}) + find_module(ascend_hal_stub libascend_hal.so ${ASCEND_ATC_DIR}/stub) find_module(msprofiler_ext libmsprofiler.a ${ASCEND_ACL_DIR}) else() message(STATUS "PLATFORM param is invalid, should be train or inference, you choose nothing!") diff --git a/cmake/external_libs/gflags.cmake b/cmake/external_libs/gflags.cmake index 50cfb2bc..b4b57dd7 100755 --- a/cmake/external_libs/gflags.cmake +++ b/cmake/external_libs/gflags.cmake @@ -10,12 +10,17 @@ if ((${CMAKE_INSTALL_PREFIX} STREQUAL /usr/local) OR message(STATUS "No install prefix selected, default to ${CMAKE_INSTALL_PREFIX}.") endif() -if (ENABLE_GITEE) - set(REQ_URL "https://gitee.com/mirrors/gflags/repository/archive/v2.2.2.tar.gz") - set(MD5 "") +if (GE_PB_PKG) + set(REQ_URL "${GE_PB_PKG}/libs/gflags/v2.2.2.tar.gz") + set(MD5 "1a865b93bacfa963201af3f75b7bd64c") else() - set(REQ_URL "https://github.com/gflags/gflags/archive/v2.2.2.tar.gz") - set(MD5 "") + if (ENABLE_GITEE) + set(REQ_URL "https://gitee.com/mirrors/gflags/repository/archive/v2.2.2.tar.gz") + set(MD5 "") + else() + set(REQ_URL "https://github.com/gflags/gflags/archive/v2.2.2.tar.gz") + set(MD5 "1a865b93bacfa963201af3f75b7bd64c") + endif () endif () set (gflags_CXXFLAGS "-D_GLIBCXX_USE_CXX11_ABI=0 -Dgoogle=ascend_private") diff --git a/ge/CMakeLists.txt b/ge/CMakeLists.txt index f83d2607..cd255c79 100755 --- a/ge/CMakeLists.txt +++ b/ge/CMakeLists.txt @@ -593,7 +593,6 @@ target_compile_definitions(ge_compiler PRIVATE REUSE_MEMORY=1 FMK_SUPPORT_DUMP FMK_HOST_INFER - COMPILE_OMG_PACKAGE google=ascend_private FUNC_VISIBILITY $<$:ONLY_COMPILE_OPEN_SRC> @@ -655,8 +654,7 @@ target_link_libraries(ge_compiler PRIVATE c_sec error_manager slog - $<$>:$> - $<$:$> + runtime opt_feature -Wl,--as-needed json diff --git a/ge/offline/CMakeLists.txt b/ge/offline/CMakeLists.txt index e11e4a03..935d8a30 100644 --- a/ge/offline/CMakeLists.txt +++ b/ge/offline/CMakeLists.txt @@ -22,7 +22,6 @@ target_compile_options(atc_atc.bin PRIVATE target_compile_definitions(atc_atc.bin PRIVATE PROTOBUF_INLINE_NOT_IN_HEADERS=0 - COMPILE_OMG_PACKAGE google=ascend_private LOG_CPP FUNC_VISIBILITY @@ -48,6 +47,7 @@ target_include_directories(atc_atc.bin PRIVATE target_link_options(atc_atc.bin PRIVATE -Wl,-Bsymbolic + -Wl,-rpath-link,${ASCEND_ATC_DIR}/stub ) target_link_libraries(atc_atc.bin PRIVATE @@ -62,8 +62,7 @@ target_link_libraries(atc_atc.bin PRIVATE parser_common gflags json - $<$>:$> - $<$:$> + runtime slog static_mmpa -lrt @@ -92,7 +91,6 @@ target_compile_options(fwk_atc.bin PRIVATE target_compile_definitions(fwk_atc.bin PRIVATE PROTOBUF_INLINE_NOT_IN_HEADERS=0 - COMPILE_OMG_PACKAGE google=ascend_private LOG_CPP FUNC_VISIBILITY diff --git a/metadef b/metadef index 5a9605f6..a725349b 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit 5a9605f6cb1204a729a51fe36bc614cf1d94a496 +Subproject commit a725349b65aef2940555af2ddb7b9461fbe0d5fd From 207bf69c20a5953ae01499434922244161e67206 Mon Sep 17 00:00:00 2001 From: "gengchao4@huawei.com" Date: Thu, 15 Jul 2021 20:01:58 +0800 Subject: [PATCH 205/226] bugfix for taskdef's random variation in offline case --- ge/graph/build/task_generator.cc | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/ge/graph/build/task_generator.cc b/ge/graph/build/task_generator.cc index 7bb2e2f6..1adcd0aa 100755 --- a/ge/graph/build/task_generator.cc +++ b/ge/graph/build/task_generator.cc @@ -50,6 +50,7 @@ const char *const kIsInputVar = "INPUT_IS_VAR"; const char *const kIsOutputVar = "OUTPUT_IS_VAR"; const char *const kProfilingMode = "PROFILING_MODE"; const char *const kIteratorV2 = "IteratorV2"; +const char *const kKernelInfoNameHccl = "ops_kernel_info_hccl"; const uint32_t kProfilingArStep = 2; const uint64_t kProfilingFpStartLogid = 1; const uint64_t kProfilingBpEndLogid = 2; @@ -437,14 +438,15 @@ Status TaskGenerator::GenerateTask(RunContext &run_context, ComputeGraphPtr &gra } // Reset stream id to ge stream id, as graph load must use ge stream to reassign stream - void *ops_kernel_info_store_ptr = kernel_info_store.get(); for (size_t idx = task_list_size_before; idx < task_list_size_after; ++idx) { task_def_list[idx].set_stream_id(static_cast(stream_id)); op_name_map[idx] = name; - // Set opsKernelInfoStorePtr and op_index, the two fields be use in DistributeTask and InitTaskInfo TaskDef *task_def_ptr = &task_def_list[idx]; GE_CHECK_NOTNULL(task_def_ptr); - task_def_ptr->set_ops_kernel_store_ptr(reinterpret_cast(ops_kernel_info_store_ptr)); + // Set opsKernelInfoStorePtr for hccl which will be use in DistributeTask and InitTaskInfo + if (op_kernel_lib_name == kKernelInfoNameHccl) { + task_def_ptr->set_ops_kernel_store_ptr(reinterpret_cast(kernel_info_store.get())); + } } GELOGD("Call %s to generate node[name:%s(%s), id:%ld, stream_id:%ld] task finished, generate %zu task(s).", op_kernel_lib_name.c_str(), name.c_str(), type.c_str(), op_id, stream_id, From 4132d6dcd22bad7c9c73a5f3e12a62051478a528 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Thu, 15 Jul 2021 20:19:28 +0800 Subject: [PATCH 206/226] Delete common format_transfers files --- ge/CMakeLists.txt | 25 ------------------------- 1 file changed, 25 deletions(-) diff --git a/ge/CMakeLists.txt b/ge/CMakeLists.txt index d9ef5eef..0236e8bd 100755 --- a/ge/CMakeLists.txt +++ b/ge/CMakeLists.txt @@ -109,13 +109,9 @@ endif () ################################################################## set(EXECUTOR_SRC_LIST - "common/dump/dump_manager.cc" "common/dump/dump_op.cc" "common/dump/exception_dumper.cc" "common/dump/opdebug_register.cc" - "common/formats/format_transfers/format_transfer_transpose.cc" - "common/formats/utils/formats_trans_utils.cc" - "common/fp16_t.cc" "common/ge/op_tiling_manager.cc" "common/ge/plugin_manager.cc" "common/profiling/ge_profiling.cc" @@ -264,27 +260,6 @@ set(EXECUTOR_SRC_LIST set(COMPILER_SRC_LIST "analyzer/analyzer.cc" "common/dump/dump_op.cc" - "common/dump/dump_properties.cc" - "common/formats/format_transfers/datatype_transfer.cc" - "common/formats/format_transfers/format_transfer_c1hwncoc0_hwcn.cc" - "common/formats/format_transfers/format_transfer_dhwcn_fracz3D.cc" - "common/formats/format_transfers/format_transfer_dhwnc_fracz3D_transpose.cc" - "common/formats/format_transfers/format_transfer_fractal_nz.cc" - "common/formats/format_transfers/format_transfer_fractal_z.cc" - "common/formats/format_transfers/format_transfer_fractal_zz.cc" - "common/formats/format_transfers/format_transfer_fracz_hwcn.cc" - "common/formats/format_transfers/format_transfer_fracz_nchw.cc" - "common/formats/format_transfers/format_transfer_fracz_nhwc.cc" - "common/formats/format_transfers/format_transfer_hwcn_c1hwncoc0.cc" - "common/formats/format_transfers/format_transfer_nc1hwc0_nchw.cc" - "common/formats/format_transfers/format_transfer_nc1hwc0_nhwc.cc" - "common/formats/format_transfers/format_transfer_nchw_fz_c04.cc" - "common/formats/format_transfers/format_transfer_nchw_nc1hwc0.cc" - "common/formats/format_transfers/format_transfer_nhwc_nc1hwc0.cc" - "common/formats/format_transfers/format_transfer_transpose.cc" - "common/formats/formats.cc" - "common/formats/utils/formats_trans_utils.cc" - "common/fp16_t.cc" "common/ge/op_tiling_manager.cc" "common/ge/plugin_manager.cc" "common/helper/model_cache_helper.cc" From 051d0e9fab55a2530b364ecea1e98c1705e308de Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Thu, 15 Jul 2021 20:43:09 +0800 Subject: [PATCH 207/226] Fix bug of single_op. --- ge/single_op/single_op.cc | 4 ++- ge/single_op/task/op_task.cc | 25 ++++++++++++++++--- ge/single_op/task/op_task.h | 5 ++-- ge/single_op/task/tbe_task_builder.cc | 2 +- .../ge/single_op/single_op_task_unittest.cc | 20 +++++++++++++++ 5 files changed, 48 insertions(+), 8 deletions(-) diff --git a/ge/single_op/single_op.cc b/ge/single_op/single_op.cc index a82c30ba..23f4cfad 100755 --- a/ge/single_op/single_op.cc +++ b/ge/single_op/single_op.cc @@ -433,11 +433,13 @@ Status DynamicSingleOp::ExecuteAsync(const vector &input_desc, if (!inputs_size.empty()) { StreamResource *stream_resource = SingleOpManager::GetInstance().GetResource(resource_id_, stream_); GE_CHK_STATUS_RET_NOLOG(UpdateInputsBufferAddr(stream_resource, stream_, inputs_size, update_buffers)); - GE_CHK_STATUS_RET_NOLOG(SetHostTensorValue(input_desc, input_buffers)); } if (hybrid_model_executor_ != nullptr) { GELOGD("Execute multi-task dynamic single op by hybrid model executor"); + if (!inputs_size.empty()) { + GE_CHK_STATUS_RET_NOLOG(SetHostTensorValue(input_desc, input_buffers)); + } hybrid::HybridModelExecutor::ExecuteArgs args; GE_CHK_STATUS_RET_NOLOG(InitHybridModelArgs(update_buffers, output_buffers, input_desc, args)); diff --git a/ge/single_op/task/op_task.cc b/ge/single_op/task/op_task.cc index dbc90ac5..fd6639a5 100755 --- a/ge/single_op/task/op_task.cc +++ b/ge/single_op/task/op_task.cc @@ -294,16 +294,15 @@ Status TbeOpTask::UpdateNodeByShape(const vector &input_desc, cons Status TbeOpTask::EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size) { if (tiling_buffer != nullptr) { - uintptr_t *arg_base = nullptr; - size_t arg_num = 0; - GetIoAddr(arg_base, arg_num); + uintptr_t *arg_base = reinterpret_cast(args_.get()); + size_t arg_num = arg_size_ / sizeof(void *); GE_CHECK_NOTNULL(node); GE_CHECK_NOTNULL(node->GetOpDesc()); uint32_t inputs_num = node->GetOpDesc()->GetInputsSize(); uint32_t outputs_num = node->GetOpDesc()->GetOutputsSize(); uint32_t workspace_nums = node->GetOpDesc()->GetWorkspace().size(); uint32_t tiling_index = inputs_num + outputs_num + workspace_nums; - if (arg_num == 0 || arg_num < tiling_index) { + if (arg_num == 0 || arg_num <= tiling_index) { GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "[Check][Size]Tiling index %u, arg number %zu is invalid.", tiling_index, arg_num); return ACL_ERROR_GE_INTERNAL_ERROR; @@ -481,6 +480,24 @@ void TbeOpTask::GetIoAddr(uintptr_t *&arg_base, size_t &arg_count) { } } +Status AtomicAddrCleanOpTask::EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size) { + if (tiling_buffer != nullptr) { + uintptr_t *arg_base = reinterpret_cast(args_.get()); + size_t arg_num = arg_size_ / sizeof(void *); + uint32_t tiling_index = atomic_output_indices_.size(); + if (arg_num == 0 || arg_num <= tiling_index) { + GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "[Check][Size]Tiling index %u, arg number %zu is invalid.", + tiling_index, arg_num); + return ACL_ERROR_GE_INTERNAL_ERROR; + } + arg_base[tiling_index] = reinterpret_cast(tiling_buffer); + } + node_ = node; + tiling_buffer_ = tiling_buffer; + max_tiling_size_ = max_tiling_size; + return SUCCESS; +} + Status AtomicAddrCleanOpTask::UpdateNodeByShape(const vector &input_desc, const vector &output_desc) { return SUCCESS; diff --git a/ge/single_op/task/op_task.h b/ge/single_op/task/op_task.h index 132672b0..4a839389 100644 --- a/ge/single_op/task/op_task.h +++ b/ge/single_op/task/op_task.h @@ -97,7 +97,7 @@ class TbeOpTask : public OpTask { const void *GetArgs() const; size_t GetArgSize() const; const std::string &GetStubName() const; - Status EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size); + virtual Status EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size); const std::string &GetTaskType() const override; void SetHandle(void *handle); @@ -149,6 +149,7 @@ class TbeOpTask : public OpTask { class AtomicAddrCleanOpTask : public TbeOpTask { public: Status InitAtomicAddrCleanIndices(); + Status EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size) override; private: Status UpdateNodeByShape(const vector &input_desc, @@ -156,8 +157,8 @@ class AtomicAddrCleanOpTask : public TbeOpTask { Status UpdateIoAddr(const vector &inputs, const vector &outputs) override; Status UpdateTilingArgs(rtStream_t stream) override; Status CalcTilingInfo(optiling::utils::OpRunInfo &run_info) override; - std::vector atomic_output_indices_; + std::vector atomic_output_indices_; }; class AiCpuBaseTask : public OpTask { diff --git a/ge/single_op/task/tbe_task_builder.cc b/ge/single_op/task/tbe_task_builder.cc index 017dac25..f947ca57 100644 --- a/ge/single_op/task/tbe_task_builder.cc +++ b/ge/single_op/task/tbe_task_builder.cc @@ -425,7 +425,7 @@ Status TbeTaskBuilder::InitTilingInfo(TbeOpTask &task) { GELOGD("[%s] Done allocating tiling buffer, size=%ld.", op_desc_->GetName().c_str(), max_size); } - task.EnableDynamicSupport(node_, tiling_buffer, static_cast(max_size)); + GE_CHK_STATUS_RET_NOLOG(task.EnableDynamicSupport(node_, tiling_buffer, static_cast(max_size))); return SUCCESS; } diff --git a/tests/ut/ge/single_op/single_op_task_unittest.cc b/tests/ut/ge/single_op/single_op_task_unittest.cc index 8964df74..5960fbbc 100644 --- a/tests/ut/ge/single_op/single_op_task_unittest.cc +++ b/tests/ut/ge/single_op/single_op_task_unittest.cc @@ -237,3 +237,23 @@ TEST_F(UtestSingleOpTask, test_aicpu_task_update_io_addr) { ASSERT_EQ(ret, PARAM_INVALID); } } + +TEST_F(UtestSingleOpTask, test_dynamic_support) { + auto graph = make_shared("graph"); + auto op_desc = make_shared("Add", "Add"); + auto node = graph->AddNode(op_desc); + AtomicAddrCleanOpTask atomic_task; + TbeOpTask tbe_task; + + ASSERT_EQ(tbe_task.EnableDynamicSupport(node, (void *)0x0001, 1), ACL_ERROR_GE_INTERNAL_ERROR); + ASSERT_EQ(atomic_task.EnableDynamicSupport(node, (void *)0x0001, 1), ACL_ERROR_GE_INTERNAL_ERROR); + + tbe_task.arg_size_ = sizeof(void *); + tbe_task.args_.reset(new (std::nothrow) uint8_t[tbe_task.arg_size_]); + atomic_task.arg_size_ = sizeof(void *); + atomic_task.args_.reset(new (std::nothrow) uint8_t[atomic_task.arg_size_]); + ASSERT_EQ(tbe_task.EnableDynamicSupport(node, (void *)0x0001, 1), SUCCESS); + ASSERT_EQ(atomic_task.EnableDynamicSupport(node, (void *)0x0001, 1), SUCCESS); + tbe_task.tiling_buffer_ = nullptr; + atomic_task.tiling_buffer_ = nullptr; +} From 927439cb92722d36401af139899288d824f333c2 Mon Sep 17 00:00:00 2001 From: lichun Date: Fri, 16 Jul 2021 11:14:50 +0800 Subject: [PATCH 208/226] fix error code and add complex128 support --- ge/generator/ge_generator.cc | 1 - ge/graph/build/memory/graph_mem_assigner.cc | 2 +- ge/graph/build/memory/memory_assigner.cc | 5 +++-- ge/graph/manager/graph_manager.cc | 4 ++-- ge/offline/single_op_parser.cc | 3 ++- 5 files changed, 8 insertions(+), 7 deletions(-) diff --git a/ge/generator/ge_generator.cc b/ge/generator/ge_generator.cc index 45eaed59..d35d7d6e 100644 --- a/ge/generator/ge_generator.cc +++ b/ge/generator/ge_generator.cc @@ -1157,7 +1157,6 @@ Status GeGenerator::Impl::BuildModel(const Graph &graph, const vector if (ret != SUCCESS) { REPORT_CALL_ERROR("E19999", "build graph failed, graph id:%u, ret:%d", graph_id, ret); GELOGE(GE_GENERATOR_GRAPH_MANAGER_BUILD_GRAPH_FAILED, "[Build][Graph] fail, graph id: %u", graph_id); - ret = GE_GENERATOR_GRAPH_MANAGER_BUILD_GRAPH_FAILED; } RtContextUtil::GetInstance().DestroyRtContexts(session_id); diff --git a/ge/graph/build/memory/graph_mem_assigner.cc b/ge/graph/build/memory/graph_mem_assigner.cc index f8878383..542b6215 100755 --- a/ge/graph/build/memory/graph_mem_assigner.cc +++ b/ge/graph/build/memory/graph_mem_assigner.cc @@ -275,7 +275,7 @@ Status GraphMemoryAssigner::ReAssignMemory(bool is_loop_graph, map({"size", "item", "maxsize"}), std::vector({std::to_string(total_mem_offset), "featuremap", std::to_string(VarManager::Instance(session_id)->GetGraphMemoryMaxSize())})); - return ge::FAILED; + return ACL_ERROR_GE_MEMORY_ALLOCATION; } return SUCCESS; } diff --git a/ge/graph/build/memory/memory_assigner.cc b/ge/graph/build/memory/memory_assigner.cc index 6e49827f..5846e922 100755 --- a/ge/graph/build/memory/memory_assigner.cc +++ b/ge/graph/build/memory/memory_assigner.cc @@ -29,9 +29,10 @@ Status MemoryAssigner::AssignMemory(bool is_loop_graph, map &m } // Reassign memory for special nodes - if (graph_mem_assigner.ReAssignMemory(is_loop_graph, mem_offset) != ge::SUCCESS) { + Status ret = graph_mem_assigner.ReAssignMemory(is_loop_graph, mem_offset) + if (ret != ge::SUCCESS) { GELOGE(ge::FAILED, "[ReAssign][Memory] failed, graph:%s", compute_graph_->GetName().c_str()); - return ge::FAILED; + return ret; } // Assign memory (block and offset) for zero copy nodes diff --git a/ge/graph/manager/graph_manager.cc b/ge/graph/manager/graph_manager.cc index 7d72d85b..9749010a 100755 --- a/ge/graph/manager/graph_manager.cc +++ b/ge/graph/manager/graph_manager.cc @@ -1482,8 +1482,8 @@ Status GraphManager::BuildGraph(const GraphId &graph_id, const std::vectorSetRunFlag(false); if (ret != SUCCESS) { - GELOGE(GE_GRAPH_PRERUN_FAILED, "[Call][StartForRunGraph] failed! graph_id:%u.", graph_id); - return GE_GRAPH_PRERUN_FAILED; + GELOGE(ret, "[Call][StartForRunGraph] failed! graph_id:%u.", graph_id); + return ret; } GELOGI("[BuildGraph] build graph success, graph_id=%u.", graph_id); diff --git a/ge/offline/single_op_parser.cc b/ge/offline/single_op_parser.cc index 6bc5cb3d..aeb73116 100644 --- a/ge/offline/single_op_parser.cc +++ b/ge/offline/single_op_parser.cc @@ -89,7 +89,8 @@ map kDataTypeDict = { {"float", DT_FLOAT}, {"float32", DT_FLOAT}, {"double", DT_DOUBLE}, - {"complex64", DT_COMPLEX64} + {"complex64", DT_COMPLEX64}, + {"complex128", DT_COMPLEX128} }; map kFormatDict = { From a5137fb87f65cb8bd6c940f4d153f430692b767f Mon Sep 17 00:00:00 2001 From: lichun Date: Fri, 16 Jul 2021 11:23:15 +0800 Subject: [PATCH 209/226] fix error code and add complex128 support --- ge/graph/build/memory/memory_assigner.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ge/graph/build/memory/memory_assigner.cc b/ge/graph/build/memory/memory_assigner.cc index 5846e922..41171164 100755 --- a/ge/graph/build/memory/memory_assigner.cc +++ b/ge/graph/build/memory/memory_assigner.cc @@ -29,7 +29,7 @@ Status MemoryAssigner::AssignMemory(bool is_loop_graph, map &m } // Reassign memory for special nodes - Status ret = graph_mem_assigner.ReAssignMemory(is_loop_graph, mem_offset) + Status ret = graph_mem_assigner.ReAssignMemory(is_loop_graph, mem_offset); if (ret != ge::SUCCESS) { GELOGE(ge::FAILED, "[ReAssign][Memory] failed, graph:%s", compute_graph_->GetName().c_str()); return ret; From 21886e608e12983bbe3aecf74e053ed1707ce121 Mon Sep 17 00:00:00 2001 From: zhaozhixuan Date: Fri, 16 Jul 2021 12:00:31 +0800 Subject: [PATCH 210/226] Fix review advice. --- ge/single_op/task/op_task.cc | 26 ++++++++++--------- .../ge/single_op/single_op_task_unittest.cc | 8 ++++-- 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/ge/single_op/task/op_task.cc b/ge/single_op/task/op_task.cc index fd6639a5..ee752022 100755 --- a/ge/single_op/task/op_task.cc +++ b/ge/single_op/task/op_task.cc @@ -293,25 +293,26 @@ Status TbeOpTask::UpdateNodeByShape(const vector &input_desc, cons } Status TbeOpTask::EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size) { + node_ = node; + tiling_buffer_ = tiling_buffer; + max_tiling_size_ = max_tiling_size; if (tiling_buffer != nullptr) { - uintptr_t *arg_base = reinterpret_cast(args_.get()); - size_t arg_num = arg_size_ / sizeof(void *); + uintptr_t *arg_base = nullptr; + size_t arg_num = 0; + GetIoAddr(arg_base, arg_num); GE_CHECK_NOTNULL(node); GE_CHECK_NOTNULL(node->GetOpDesc()); uint32_t inputs_num = node->GetOpDesc()->GetInputsSize(); uint32_t outputs_num = node->GetOpDesc()->GetOutputsSize(); uint32_t workspace_nums = node->GetOpDesc()->GetWorkspace().size(); uint32_t tiling_index = inputs_num + outputs_num + workspace_nums; - if (arg_num == 0 || arg_num <= tiling_index) { + if (arg_num == 0 || arg_num < tiling_index) { GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "[Check][Size]Tiling index %u, arg number %zu is invalid.", tiling_index, arg_num); return ACL_ERROR_GE_INTERNAL_ERROR; } arg_base[tiling_index] = reinterpret_cast(tiling_buffer); } - node_ = node; - tiling_buffer_ = tiling_buffer; - max_tiling_size_ = max_tiling_size; return SUCCESS; } @@ -481,20 +482,21 @@ void TbeOpTask::GetIoAddr(uintptr_t *&arg_base, size_t &arg_count) { } Status AtomicAddrCleanOpTask::EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size) { + node_ = node; + tiling_buffer_ = tiling_buffer; + max_tiling_size_ = max_tiling_size; if (tiling_buffer != nullptr) { - uintptr_t *arg_base = reinterpret_cast(args_.get()); - size_t arg_num = arg_size_ / sizeof(void *); + uintptr_t *arg_base = nullptr; + size_t arg_num = 0; + GetIoAddr(arg_base, arg_num); uint32_t tiling_index = atomic_output_indices_.size(); - if (arg_num == 0 || arg_num <= tiling_index) { + if (arg_num == 0 || arg_num < tiling_index) { GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "[Check][Size]Tiling index %u, arg number %zu is invalid.", tiling_index, arg_num); return ACL_ERROR_GE_INTERNAL_ERROR; } arg_base[tiling_index] = reinterpret_cast(tiling_buffer); } - node_ = node; - tiling_buffer_ = tiling_buffer; - max_tiling_size_ = max_tiling_size; return SUCCESS; } diff --git a/tests/ut/ge/single_op/single_op_task_unittest.cc b/tests/ut/ge/single_op/single_op_task_unittest.cc index 5960fbbc..9a0381cd 100644 --- a/tests/ut/ge/single_op/single_op_task_unittest.cc +++ b/tests/ut/ge/single_op/single_op_task_unittest.cc @@ -245,12 +245,16 @@ TEST_F(UtestSingleOpTask, test_dynamic_support) { AtomicAddrCleanOpTask atomic_task; TbeOpTask tbe_task; + tbe_task.arg_size_ = sizeof(void *) * 1; + tbe_task.args_.reset(new (std::nothrow) uint8_t[tbe_task.arg_size_]); + atomic_task.arg_size_ = sizeof(void *) * 1; + atomic_task.args_.reset(new (std::nothrow) uint8_t[atomic_task.arg_size_]); ASSERT_EQ(tbe_task.EnableDynamicSupport(node, (void *)0x0001, 1), ACL_ERROR_GE_INTERNAL_ERROR); ASSERT_EQ(atomic_task.EnableDynamicSupport(node, (void *)0x0001, 1), ACL_ERROR_GE_INTERNAL_ERROR); - tbe_task.arg_size_ = sizeof(void *); + tbe_task.arg_size_ = sizeof(void *) * 2; tbe_task.args_.reset(new (std::nothrow) uint8_t[tbe_task.arg_size_]); - atomic_task.arg_size_ = sizeof(void *); + atomic_task.arg_size_ = sizeof(void *) * 2; atomic_task.args_.reset(new (std::nothrow) uint8_t[atomic_task.arg_size_]); ASSERT_EQ(tbe_task.EnableDynamicSupport(node, (void *)0x0001, 1), SUCCESS); ASSERT_EQ(atomic_task.EnableDynamicSupport(node, (void *)0x0001, 1), SUCCESS); From a029050b65f62ee5f52643babc092ec99efc7e6d Mon Sep 17 00:00:00 2001 From: zhaoxinxin Date: Tue, 13 Jul 2021 21:10:15 +0800 Subject: [PATCH 211/226] support v1 infershape modified: ge/graph/passes/base_pass.cc modified: ge/graph/passes/base_pass.h modified: ge/graph/passes/infer_base_pass.cc modified: ge/graph/passes/infershape_pass.cc modified: ge/graph/passes/infershape_pass.h modified: ge/graph/preprocess/graph_preprocess.cc modified: tests/ut/ge/graph/passes/addn_pass_unittest.cc modified: tests/ut/ge/graph/passes/base_pass_unittest.cc modified: tests/ut/ge/graph/passes/infershape_pass_unittest.cc modified: ge/graph/passes/base_pass.cc modified: ge/graph/passes/base_pass.h modified: ge/graph/passes/infer_base_pass.cc modified: ge/graph/passes/infershape_pass.cc modified: ge/graph/passes/infershape_pass.h modified: ge/graph/preprocess/graph_preprocess.cc modified: tests/ut/ge/graph/passes/addn_pass_unittest.cc modified: tests/ut/ge/graph/passes/base_pass_unittest.cc modified: tests/ut/ge/graph/passes/infershape_pass_unittest.cc --- ge/graph/passes/base_pass.cc | 849 +++++----- ge/graph/passes/base_pass.h | 121 +- ge/graph/passes/infer_base_pass.cc | 3 + ge/graph/passes/infershape_pass.cc | 545 +++++-- ge/graph/passes/infershape_pass.h | 94 +- ge/graph/preprocess/graph_preprocess.cc | 16 + .../ut/ge/graph/passes/addn_pass_unittest.cc | 2 +- .../ut/ge/graph/passes/base_pass_unittest.cc | 1426 +++++++++++------ .../graph/passes/infershape_pass_unittest.cc | 423 +++-- 9 files changed, 2187 insertions(+), 1292 deletions(-) diff --git a/ge/graph/passes/base_pass.cc b/ge/graph/passes/base_pass.cc index a1551eb2..8b4a8b88 100755 --- a/ge/graph/passes/base_pass.cc +++ b/ge/graph/passes/base_pass.cc @@ -1,374 +1,475 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "graph/passes/base_pass.h" - -#include -#include - -#include "framework/common/debug/log.h" -#include "framework/common/debug/ge_log.h" -#include "graph/compute_graph.h" -#include "graph/utils/graph_utils.h" - -namespace ge { -namespace { -constexpr int kMaxRePassTimes = 10000; -constexpr size_t kMaxOneInNodes = 1000; -// Each iteration, we take about 0.3k memory on the stack, we should change the recursion to loop later -constexpr int kMaxRecursiveDepth = 20; -struct DuringPassNodeSets { - std::unordered_set nodes_seen; - std::unordered_set nodes_deleted; - std::unordered_set nodes_re_pass; - std::unordered_set nodes_re_pass_immediately; - std::unordered_set nodes_last; - std::unordered_set nodes_suspend; - std::unordered_set nodes_resume; -}; - -void GetAllNodesNoInputEdge(const ComputeGraphPtr &graph, std::deque &input_edge_nodes, - std::unordered_set &nodes_seen, std::unordered_set &nodes_last) { - nodes_last.clear(); - for (auto &node : graph->GetDirectNode()) { - if (node == nullptr) { - continue; - } - size_t in_nums = node->GetInNodes().size(); - if (in_nums == 0) { - input_edge_nodes.push_back(node); - nodes_seen.insert(node.get()); - } else if (in_nums > kMaxOneInNodes) { - nodes_last.insert(node); - } - } -} - -bool IsAllInNodesAlive(const Node::Vistor &nodes, const std::unordered_set &nodes_suspend) { - return !std::any_of(nodes.begin(), nodes.end(), [&](const NodePtr &n) { return nodes_suspend.count(n) > 0; }); -} - -void AddNextIterNodes(const Node::Vistor &nodes, std::deque &nodes_to_pass, - DuringPassNodeSets &during_pass_node_set) { - auto &nodes_seen = during_pass_node_set.nodes_seen; - const auto &nodes_last = during_pass_node_set.nodes_last; - const auto &nodes_suspend = during_pass_node_set.nodes_suspend; - for (auto &node : nodes) { - if (node == nullptr) { - continue; - } - if (nodes_last.count(node) != 0) { - continue; - } - if (nodes_suspend.count(node) > 0) { - GELOGD("The node %s has suspend by pass, skip it.", node->GetName().c_str()); - continue; - } - - bool all_in_nodes_alive = IsAllInNodesAlive(node->GetInAllNodes(), nodes_suspend); - bool all_in_nodes_seen = node->IsAllInNodesSeen(nodes_seen); - if (all_in_nodes_seen && all_in_nodes_alive && nodes_seen.insert(node.get()).second) { - nodes_to_pass.push_back(node); - } - } -} - -void AddRepassNodes(DuringPassNodeSets &during_pass_node_set, std::deque &nodes) { - for (const auto &node : during_pass_node_set.nodes_re_pass_immediately) { - GELOGD("The node %s will be re-pass immediately.", node->GetName().c_str()); - nodes.push_front(node); - } - during_pass_node_set.nodes_re_pass_immediately.clear(); -} - -void AddResumeNodes(DuringPassNodeSets &during_pass_node_set, std::deque &nodes) { - for (auto &node : during_pass_node_set.nodes_resume) { - const auto &it = during_pass_node_set.nodes_suspend.find(node); - if (it != during_pass_node_set.nodes_suspend.end()) { - during_pass_node_set.nodes_suspend.erase(node); - GELOGD("The node %s resumed by pass.", node->GetName().c_str()); - nodes.push_back(node); - } else { - GELOGW("The node %s not suspend, drop from resumed", node->GetName().c_str()); - } - } - during_pass_node_set.nodes_resume.clear(); -} - -void PushToSuspendNodes(DuringPassNodeSets &during_pass_node_set, const std::string &pass_name, - const std::unordered_set &nodes_suspend, - const std::unordered_set &nodes_resume) { - for (const auto &node : nodes_suspend) { - GELOGD("The iteration suspend of node %s has been set by pass %s", node->GetName().c_str(), pass_name.c_str()); - during_pass_node_set.nodes_suspend.emplace(node); - } - - for (const auto &node : nodes_resume) { - GELOGD("The iteration suspend of node %s has been resumed by pass %s", node->GetName().c_str(), pass_name.c_str()); - during_pass_node_set.nodes_resume.emplace(node); - } -} - -void PushToRePassIfSeen(NodePtr &node, const std::pair &name_to_pass, - std::unordered_set &nodes_seen, const std::unordered_set &nodes_to_re_pass, - std::unordered_set &nodes_re_pass) { - for (const auto &node_to_re_pass : nodes_to_re_pass) { - if (node_to_re_pass == nullptr) { - GELOGW("Found null re-pass node when executing %s on node %s type %s", name_to_pass.first.c_str(), - node->GetName().c_str(), node->GetType().c_str()); - continue; - } - if (nodes_seen.count(node_to_re_pass.get()) > 0 || node_to_re_pass->IsAllInNodesSeen(nodes_seen)) { - GELOGD("The node %s will be re-pass.", node_to_re_pass->GetName().c_str()); - nodes_re_pass.insert(node_to_re_pass); - } else { - GELOGD("The node %s are not all seen, don't set repass this time", node_to_re_pass->GetName().c_str()); - } - } -} - -Status RunPasses(NodePtr &node, const NamesToPass &names_to_passes, DuringPassNodeSets &during_pass_node_set) { - if (node == nullptr) { - REPORT_INNER_ERROR("E19999", "Param node is nullptr, check invalid."); - GELOGE(FAILED, "[Check][Param] parameter node is nullptr."); - return FAILED; - } - GELOGD("Begin to run pass for node %s", node->GetName().c_str()); - for (const auto &name_to_pass : names_to_passes) { - if (name_to_pass.second == nullptr) { - GELOGE(INTERNAL_ERROR, "[Check][Param] There is null pointer in passes(%s), skip it", name_to_pass.first.c_str()); - continue; - } - - GELOGD("Begin to run pass %s for node %s", name_to_pass.first.c_str(), node->GetName().c_str()); - name_to_pass.second->init(); - auto result = name_to_pass.second->Run(node); - if (result != SUCCESS) { - REPORT_CALL_ERROR("E19999", "process pass %s on node:%s failed, ret:%u", - name_to_pass.first.c_str(), node->GetName().c_str(), result); - GELOGE(INTERNAL_ERROR, "[Process][Pass] %s on node %s failed, result " - "%u, the passes will be terminated immediately.", - name_to_pass.first.c_str(), node->GetName().c_str(), result); - return result; - } - - const auto &nodes_to_re_pass = name_to_pass.second->GetNodesNeedRePass(); - PushToRePassIfSeen(node, name_to_pass, during_pass_node_set.nodes_seen, nodes_to_re_pass, - during_pass_node_set.nodes_re_pass); - - const auto &nodes_to_re_pass_immediately = name_to_pass.second->GetNodesNeedRePassImmediately(); - PushToRePassIfSeen(node, name_to_pass, during_pass_node_set.nodes_seen, nodes_to_re_pass_immediately, - during_pass_node_set.nodes_re_pass_immediately); - - PushToSuspendNodes(during_pass_node_set, name_to_pass.first, - name_to_pass.second->GetNodesSuspend(), name_to_pass.second->GetNodesResume()); - - const auto &nodes_deleted_by_pass = name_to_pass.second->GetNodesDeleted(); - during_pass_node_set.nodes_deleted.insert(nodes_deleted_by_pass.begin(), nodes_deleted_by_pass.end()); - if (nodes_deleted_by_pass.count(node) > 0) { - GELOGD("The node %s was deleted by pass %s, stop the remain passes", node->GetName().c_str(), - name_to_pass.first.c_str()); - break; - } - } - - return SUCCESS; -} - -void SetFlagOption(NodePassOption option, NamesToPass names_to_pass) { - for (auto &name_to_pass : names_to_pass) { - name_to_pass.second->SetOption(option, ""); - } -} - -void ClearOption(NamesToPass names_to_pass) { - for (auto &name_to_pass : names_to_pass) { - name_to_pass.second->ClearOptions(); - } -} - -bool CheckNode(const NodePtr &node, const DuringPassNodeSets &during_pass_node_set) { - if (node == nullptr) { - GELOGW("node is null"); - return false; - } - if (during_pass_node_set.nodes_deleted.count(node) > 0) { - GELOGD("The node %s was deleted before, skip it.", node->GetName().c_str()); - return false; - } - if (during_pass_node_set.nodes_suspend.count(node) > 0) { - GELOGD("The node %s has been added to suspend-iteration nodes list, the iteration of it will be suspend.", - node->GetName().c_str()); - return false; - } - - return true; -} -} // namespace - -Status BaseNodePass::IsolateAndDeleteNode(NodePtr &node, const std::vector &io_map) { - if (node == nullptr) { - REPORT_INNER_ERROR("E19999", "Param node is nullptr, check invalid."); - GELOGE(FAILED, "[Check][Param] parameter node is nullptr."); - return FAILED; - } - GELOGI("Prepare to isolate and delete node, name:%s, type:%s.", node->GetName().c_str(), - node->GetType().c_str()); - ComputeGraphPtr graph = node->GetOwnerComputeGraph(); - if (graph == nullptr) { - REPORT_INNER_ERROR("E19999", "The owner graph of node:%s must not be null.", node->GetName().c_str()); - GELOGE(FAILED, "[Get][OwnerComputeGraph] failed, The owner graph of node:%s must not be null.", - node->GetName().c_str()); - return FAILED; - } - - AddRePassNodesWithInOut(node); - - if (GraphUtils::IsolateNode(node, io_map) != GRAPH_SUCCESS) { - REPORT_CALL_ERROR("E19999", "Isolate Node:%s failed", node->GetName().c_str()); - GELOGE(FAILED, "[Isolate][Node] %s failed.", node->GetName().c_str()); - return FAILED; - } - - if (GraphUtils::RemoveNodeWithoutRelink(graph, node) != SUCCESS) { - REPORT_CALL_ERROR("E19999", "call RemoveNodeWithoutRelink for node:%s failed.", node->GetName().c_str()); - GELOGE(FAILED, "[Call][RemoveNodeWithoutRelink] for node:%s failed.", node->GetName().c_str()); - return FAILED; - } - - AddNodeDeleted(node); - return SUCCESS; -} - -Status GEPass::Run(const NamesToPass &names_to_passes) { - if (graph_ == nullptr) { - REPORT_INNER_ERROR("E19999", "graph_ is nullptr, check invalid."); - GELOGE(INTERNAL_ERROR, "[Check][Param] The graph is nullptr"); - return INTERNAL_ERROR; - } - if (names_to_passes.empty()) { - GELOGW("No passes input, the GEPass will do nothing"); - return INTERNAL_ERROR; - } - - if (depth_ > kMaxRecursiveDepth) { - GELOGE(PARAM_INVALID, - "[Check][Param] The pass for root graph %s will be terminated because too many nesting" - " levels(%d) of subgraphs, last subgraph is %s", - root_graph_->GetName().c_str(), depth_, graph_->GetName().c_str()); - return PARAM_INVALID; - } - - return RunPassesOneGraph(names_to_passes); -} - -Status GEPass::RunPassesOneGraph(const NamesToPass &names_to_passes) { - GELOGD("Begin to run pass on graph, passes count %zu", names_to_passes.size()); - std::deque nodes; - DuringPassNodeSets during_pass_node_set; - GetAllNodesNoInputEdge(graph_, nodes, during_pass_node_set.nodes_seen, during_pass_node_set.nodes_last); - GELOGD("Start points count %zu", nodes.size()); - int re_pass_times = 0; - - do { - for (auto &node : during_pass_node_set.nodes_re_pass) { - nodes.push_back(node); - during_pass_node_set.nodes_seen.insert(node.get()); - } - during_pass_node_set.nodes_re_pass.clear(); - - while (!nodes.empty()) { - NodePtr node = nodes.front(); - nodes.pop_front(); - - (void)during_pass_node_set.nodes_re_pass.erase(node); - if (!CheckNode(node, during_pass_node_set)) { - continue; - } - AddNextIterNodes(node->GetOutNodes(), nodes, during_pass_node_set); - - auto ret = RunPasses(node, names_to_passes, during_pass_node_set); - if (ret != SUCCESS) { - GELOGE(ret, "[Process][Passes] on node %s type %s failed, error code:%u", - node->GetName().c_str(), node->GetType().c_str(), ret); - return ret; - } - - bool has_sub_graph = false; - ret = RunPassesOnSubGraph(node, names_to_passes, has_sub_graph); - if (ret != SUCCESS) { - GELOGE(ret, "[Run][Passes] on the sub graph of node %s failed", node->GetName().c_str()); - return ret; - } - - if (has_sub_graph) { - GELOGD("There are subgraphs on node %s, run passes for for the second time", node->GetName().c_str()); - SetFlagOption(kOptimizeAfterSubGraph, names_to_passes); - ret = RunPasses(node, names_to_passes, during_pass_node_set); - if (ret != SUCCESS) { - GELOGE(ret, "[Process][Passes] on node %s type %s failed, error code: %u", - node->GetName().c_str(), node->GetType().c_str(), ret); - return ret; - } - - // There is only one option scene, so set and clear options around the `RunPasses` func. - // if there are more than one scene to set options, the `ClearOption` function - // should be called each time at the begin of the iteration - ClearOption(names_to_passes); - } - - AddRepassNodes(during_pass_node_set, nodes); - AddResumeNodes(during_pass_node_set, nodes); - } - - for (auto &node : during_pass_node_set.nodes_last) { - bool all_in_nodes_seen = node->IsAllInNodesSeen(during_pass_node_set.nodes_seen); - if (all_in_nodes_seen && during_pass_node_set.nodes_seen.insert(node.get()).second) { - nodes.push_back(node); - } - } - during_pass_node_set.nodes_last.clear(); - } while ((!during_pass_node_set.nodes_re_pass.empty() || !nodes.empty()) && ++re_pass_times < kMaxRePassTimes); - - if (re_pass_times == kMaxRePassTimes) { - GELOGW("re_pass_times should not come to %d", kMaxRePassTimes); - } - GELOGD("All passes runs end"); - - return SUCCESS; -} -Status GEPass::RunPassesOnSubGraph(const NodePtr &node, const NamesToPass &names_to_passes, bool &has_sub_graph) { - auto sub_graph_names = node->GetOpDesc()->GetSubgraphInstanceNames(); - has_sub_graph = false; - for (const auto &name : sub_graph_names) { - auto graph = root_graph_->GetSubgraph(name); - if (graph == nullptr) { - GELOGW("Can not find the sub graph %s from node %s, the pass-process will skip it", - name.c_str(), node->GetName().c_str()); - continue; - } - has_sub_graph = true; - GELOGI("Begin to run passes on the sub graph %s of node %s", name.c_str(), node->GetName().c_str()); - GEPass pass(graph, root_graph_, depth_ + 1); - auto ret = pass.Run(names_to_passes); - if (ret != SUCCESS) { - GELOGE(ret, "[Run][Passes] for sub graph:%s from node:%s failed", name.c_str(), node->GetName().c_str()); - return ret; - } - } - return SUCCESS; -} -} // namespace ge +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "graph/passes/base_pass.h" + +#include +#include + +#include "common/debug/log.h" +#include "graph/utils/graph_utils.h" + +namespace ge { +namespace { +constexpr int kMaxRePassTimes = 10000; +constexpr size_t kMaxOneInNodes = 1000; +// Each iteration, we take about 0.3k memory on the stack, we should change the recursion to loop later +constexpr int kMaxRecursiveDepth = 20; + +void GetAllNodesNoInputEdge(const ComputeGraphPtr &graph, + GEPass::GraphLevelState &g_state) { + for (auto &node : graph->GetDirectNode()) { + if (node == nullptr) { + continue; + } + size_t in_nums = node->GetInNodes().size(); + if (in_nums == 0) { + g_state.AddNodeToQueueIfNotSeen(node); + } else if (in_nums > kMaxOneInNodes) { + g_state.nodes_last.insert(node); + } + } +} + +bool AnyNodesIn(const Node::Vistor &nodes, const std::unordered_set &nodes_set) { + return std::any_of(nodes.begin(), nodes.end(), [&](const NodePtr &n) { + return nodes_set.count(n) > 0; + }); +} + + +bool IsNodeReadyToQueue(const NodePtr &node, GEPass::GraphLevelState &g_state) { + if (node == nullptr) { + GELOGW("node is null"); + return false; + } + if (g_state.nodes_deleted.count(node) > 0) { + GELOGD("The node %s was deleted before, skip it.", node->GetName().c_str()); + return false; + } + + if (g_state.nodes_last.count(node) != 0) { + return false; + } + + // all in_node seen && all in_node not suspend + if (!node->IsAllInNodesSeen(g_state.nodes_seen)) { + return false; + } + + if (g_state.nodes_suspend.count(node) > 0) { + GELOGD("The node %s has been added to suspend-iteration nodes list, the iteration of it will be suspend.", + node->GetName().c_str()); + return false; + } + + if (AnyNodesIn(node->GetInAllNodes(), g_state.nodes_suspend)) { + GELOGD("The node %s has been added to suspend-iteration nodes list, the iteration of it will be suspend.", + node->GetName().c_str()); + return false; + } + return true; +} + +void AddNextIterNodes(const NodePtr &cur_node, + std::unordered_set &out_nodes_before_pass, + GEPass::GraphLevelState &g_state) { + for (auto &node : cur_node->GetOutNodes()) { + if (node == nullptr) { + continue; + } + if(out_nodes_before_pass.erase(node) == 0) { + // after pass node, new output node come up + GELOGI("New output node %s come up after pass %s.", + node->GetName().c_str(), cur_node->GetName().c_str()); + } + + // all in_node seen && all in_node not suspend + if (IsNodeReadyToQueue(node, g_state)) { + g_state.AddNodeToQueueIfNotSeen(node); + } + } + + // + for (const auto &node : out_nodes_before_pass) { + // A-->B-->C if B was + // unlink edge may happend, add these node to queue if needed + if (node->GetInAllNodes().empty() && IsNodeReadyToQueue(node, g_state)) { + GELOGI("Node %s may lost from cur node, add to queue if not seen.", + node->GetName().c_str(), cur_node->GetName().c_str()); + g_state.AddNodeToQueueIfNotSeen(node); + } + } +} + +void AddImmediateRepassNodesToQueue(NodePtr &cur_node, + std::unordered_map re_pass_imm_nodes_to_pass_names, + GEPass::GraphLevelState &g_state) { + for (const auto &node_2_pass_names : re_pass_imm_nodes_to_pass_names) { + auto imme_repass_node = node_2_pass_names.first; + if (imme_repass_node == nullptr) { + GELOGW("Found null immediately re-pass node when executing pass %s on node %s type %s", + node_2_pass_names.second.c_str(), + cur_node->GetName().c_str(), cur_node->GetType().c_str()); + continue; + } + if (g_state.nodes_passed.count(imme_repass_node) > 0) { + GELOGD("The node %s specified by pass %s has been passed, it will repass immediately", + imme_repass_node->GetName().c_str(), node_2_pass_names.second.c_str()); + g_state.AddNodeToQueueFront(imme_repass_node); + continue; + } + GELOGW("The node %s specified by pass %s has un-passed, it will not repass immediately", + node_2_pass_names.first->GetName().c_str(), node_2_pass_names.second.c_str()); + } +} + +void AddLastNodesToQueue(GEPass::GraphLevelState &g_state) { + for (auto &node : g_state.nodes_last) { + if (node->IsAllInNodesSeen(g_state.nodes_seen)) { + g_state.AddNodeToQueueIfNotSeen(node); + } + } + g_state.nodes_last.clear(); +} + +void AddResumeNodesToQueue(const std::unordered_map resume_node_2_pass_names, + GEPass::GraphLevelState &g_state) { + // Now base pass doesnt record the order of suspend & resume, so we dont know which one come first in a node pass. + // Here if one node pass suspend and resume a node ,consider it resume that node. + // Better way to record the order, and here suspend or resume in order. + for (const auto &node_2_pass_names : resume_node_2_pass_names) { + auto node = node_2_pass_names.first; + if (g_state.nodes_suspend.erase(node) > 0) { + if (g_state.nodes_seen.count(node.get()) > 0 || node->IsAllInNodesSeen(g_state.nodes_seen)) { + g_state.nodes.push_back(node); + GELOGD("Node %s has been resumed by pass %s, and add to pass queue", + node->GetName().c_str(), node_2_pass_names.second.c_str()); + } + } + } +} + +void PushToRePassIfSeen(NodePtr &node, const std::pair &name_to_pass, + std::unordered_set &nodes_seen, const std::vector &nodes_to_re_pass, + GEPass::RepassLevelState &rp_state) { + for (const auto &node_to_re_pass : nodes_to_re_pass) { + if (node_to_re_pass == nullptr) { + GELOGW("Found null re-pass node when executing %s on node %s type %s", name_to_pass.first.c_str(), + node->GetName().c_str(), node->GetType().c_str()); + continue; + } + if (nodes_seen.count(node_to_re_pass.get()) > 0 || node_to_re_pass->IsAllInNodesSeen(nodes_seen)) { + if (rp_state.AddNodeToRepass(node_to_re_pass)) { + GELOGD("The node %s will be re-pass.", node_to_re_pass->GetName().c_str()); + continue; + } + GELOGD("Node %s has been added to repass queue, no need to add again.", node_to_re_pass->GetName().c_str()); + } else { + GELOGD("The node %s are not all seen, don't set repass this time", node_to_re_pass->GetName().c_str()); + } + } +} + +void SetFlagOption(NodePassOption option, NamesToPass names_to_pass) { + for (auto &name_to_pass : names_to_pass) { + name_to_pass.second->SetOption(option, ""); + } +} + +void ClearOption(NamesToPass names_to_pass) { + for (auto &name_to_pass : names_to_pass) { + name_to_pass.second->ClearOptions(); + } +} +} // namespace + +Status BaseNodePass::IsolateAndDeleteNode(NodePtr &node, const std::vector &io_map, + bool is_repass_io_immediately) { + if (node == nullptr) { + REPORT_INNER_ERROR("E19999", "Param node is nullptr, check invalid."); + GELOGE(FAILED, "[Check][Param] parameter node is nullptr."); + return FAILED; + } + GELOGI("Prepare to isolate and delete node, name:%s, type:%s.", node->GetName().c_str(), + node->GetType().c_str()); + ComputeGraphPtr graph = node->GetOwnerComputeGraph(); + if (graph == nullptr) { + REPORT_INNER_ERROR("E19999", "The owner graph of node:%s must not be null.", node->GetName().c_str()); + GELOGE(FAILED, "[Get][OwnerComputeGraph] failed, The owner graph of node:%s must not be null.", + node->GetName().c_str()); + return FAILED; + } + + is_repass_io_immediately ? AddImmediateRePassNodesWithInOut(node) : AddRePassNodesWithInOut(node); + + if (GraphUtils::IsolateNode(node, io_map) != GRAPH_SUCCESS) { + REPORT_CALL_ERROR("E19999", "Isolate Node:%s failed", node->GetName().c_str()); + GELOGE(FAILED, "[Isolate][Node] %s failed.", node->GetName().c_str()); + return FAILED; + } + + if (GraphUtils::RemoveNodeWithoutRelink(graph, node) != SUCCESS) { + REPORT_CALL_ERROR("E19999", "call RemoveNodeWithoutRelink for node:%s failed.", node->GetName().c_str()); + GELOGE(FAILED, "[Call][RemoveNodeWithoutRelink] for node:%s failed.", node->GetName().c_str()); + return FAILED; + } + + AddNodeDeleted(node); + return SUCCESS; +} + +Status GEPass::Run(const NamesToPass &names_to_passes) { + if (graph_ == nullptr) { + REPORT_INNER_ERROR("E19999", "graph_ is nullptr, check invalid."); + GELOGE(INTERNAL_ERROR, "[Check][Param] The graph is nullptr"); + return INTERNAL_ERROR; + } + if (names_to_passes.empty()) { + GELOGW("No passes input, the GEPass will do nothing"); + return INTERNAL_ERROR; + } + for (const auto &name_to_pass : names_to_passes) { + if (name_to_pass.second == nullptr) { + GELOGE(INTERNAL_ERROR, "[Check][Param] There is null pointer in passes(%s)", name_to_pass.first.c_str()); + return INTERNAL_ERROR; + } + } + + if (depth_ > kMaxRecursiveDepth) { + GELOGE(PARAM_INVALID, + "[Check][Param] The pass for root graph %s will be terminated because too many nesting" + " levels(%d) of subgraphs, last subgraph is %s", + root_graph_->GetName().c_str(), depth_, graph_->GetName().c_str()); + return PARAM_INVALID; + } + + return RunPassesOneGraph(names_to_passes); + // todo debug mode is on, find first node in topo order which is not passed. and give a warning +} + +void NotifyPassGraphStart(const ComputeGraphPtr &graph, const NamesToPass &names_to_pass) { + for (auto &name_to_pass : names_to_pass) { + name_to_pass.second->OnStartPassGraph(graph); + } +} + +Status GEPass::HandleLeakedSuspendNodes(const NamesToPass &names_to_passes, GraphLevelState &g_state) { + std::unordered_map resume_nodes_to_pass_names; + for (auto &name_to_pass : names_to_passes) { + name_to_pass.second->init(); + auto ret = name_to_pass.second->OnSuspendNodesLeaked(); + if (ret != SUCCESS) { + GELOGE(ret, "Internal error with OnSuspendNodesLeaked on pass %s.", name_to_pass.first.c_str()); + return ret; + } + for (const auto &resume_node : name_to_pass.second->GetNodesResume()){ + resume_nodes_to_pass_names[resume_node].append(name_to_pass.first + ","); + } + } + AddResumeNodesToQueue(resume_nodes_to_pass_names, g_state); + return SUCCESS; +} + +Status GEPass::RunPassesOneGraph(const NamesToPass &names_to_passes) { + GELOGD("Begin to run pass on graph, passes count %zu", names_to_passes.size()); + NotifyPassGraphStart(graph_, names_to_passes); + GraphLevelState g_state; + g_state.re_pass_times = 0; + GetAllNodesNoInputEdge(graph_, g_state); + GELOGD("Start points count %zu", g_state.nodes.size()); + + do { + if (!g_state.nodes_suspend.empty()) { + auto ret = HandleLeakedSuspendNodes(names_to_passes, g_state); + if (ret != SUCCESS) { + // log inside upper function + return ret; + } + if (g_state.nodes.empty()) { + GELOGE(INTERNAL_ERROR, "There are some suspended nodes leaked and no pass resume them."); + return INTERNAL_ERROR; + } + } + auto ret = RunPassesGraphRepass(names_to_passes, g_state); + if (ret != SUCCESS) { + return ret; + } + } while (!g_state.nodes_suspend.empty()); + + return SUCCESS; +} + + +Status GEPass::RunPassesGraphRepass(const NamesToPass &names_to_passes, GraphLevelState &g_state) { + RepassLevelState rp_state; + do { + for (auto &node : rp_state.nodes_re_pass) { + if (rp_state.nodes_re_pass_set.count(node) > 0) { + GELOGD("Add node %s to queue for re-pass", node->GetName().c_str()); + g_state.AddNodeToQueue(node); + } + } + rp_state.ClearRepass(); + + while (!g_state.nodes.empty()) { + auto node = g_state.PopFront(); + if (g_state.nodes_deleted.count(node) > 0) { + GELOGD("The node %s was deleted before, skip it.", node->GetName().c_str()); + continue; + } + rp_state.EraseNodeFromRepass(node); + g_state.nodes_seen.insert(node.get()); + + // collect out nodes before pass + std::unordered_set out_nodes_before_pass; + for (const auto &out_node : node->GetOutNodes()) { + out_nodes_before_pass.insert(out_node); + } + auto ret = RunPassesNodeOnce(node, names_to_passes, g_state, rp_state); + if (ret != SUCCESS) { + GELOGE(ret, "[Process][Passes] on node %s type %s failed, error code:%u", node->GetName().c_str(), + node->GetType().c_str(), ret); + return ret; + } + AddNextIterNodes(node, out_nodes_before_pass, g_state); + + } + AddLastNodesToQueue(g_state); + } while ((!rp_state.nodes_re_pass.empty() || !g_state.nodes.empty()) && ++g_state.re_pass_times < kMaxRePassTimes); + + if (g_state.re_pass_times == kMaxRePassTimes) { + GELOGW("re_pass_times should not come to %d", kMaxRePassTimes); + } + GELOGD("All passes runs end"); + return SUCCESS; +} + +Status GEPass::RunPassesOnSubGraph(const NodePtr &node, const NamesToPass &names_to_passes, bool &has_sub_graph) { + auto sub_graph_names = node->GetOpDesc()->GetSubgraphInstanceNames(); + has_sub_graph = false; + for (const auto &name : sub_graph_names) { + auto graph = root_graph_->GetSubgraph(name); + if (graph == nullptr) { + GELOGW("Can not find the sub graph %s from node %s, the pass-process will skip it", + name.c_str(), node->GetName().c_str()); + continue; + } + has_sub_graph = true; + GELOGI("Begin to run passes on the sub graph %s of node %s", name.c_str(), node->GetName().c_str()); + GEPass pass(graph, root_graph_, depth_ + 1); + auto ret = pass.Run(names_to_passes); + if (ret != SUCCESS) { + GELOGE(ret, "[Run][Passes] for sub graph:%s from node:%s failed", name.c_str(), node->GetName().c_str()); + return ret; + } + } + return SUCCESS; +} + +Status GEPass::RunPassesNodeOnce(NodePtr &node, const NamesToPass &names_to_passes, + GraphLevelState &g_state, RepassLevelState &rp_state) { + auto ret = RunPassesOnNode(node, names_to_passes, g_state, rp_state); + if (ret != SUCCESS) { + GELOGE(ret, "[Process][Passes] on node %s type %s failed, error code:%u", node->GetName().c_str(), + node->GetType().c_str(), ret); + return ret; + } + + bool has_sub_graph = false; + ret = RunPassesOnSubGraph(node, names_to_passes, has_sub_graph); + if (ret != SUCCESS) { + GELOGE(ret, "[Run][Passes] on the sub graph of node %s failed", node->GetName().c_str()); + return ret; + } + + if (has_sub_graph) { + GELOGD("There are subgraphs on node %s, run passes for for the second time", node->GetName().c_str()); + SetFlagOption(kOptimizeAfterSubGraph, names_to_passes); + ret = RunPassesOnNode(node, names_to_passes, g_state, rp_state); + if (ret != SUCCESS) { + GELOGE(ret, "[Process][Passes] on node %s type %s failed, error code: %u", node->GetName().c_str(), + node->GetType().c_str(), ret); + return ret; + } + + // There is only one option scene, so set and clear options around the `RunPasses` func. + // if there are more than one scene to set options, the `ClearOption` function + // should be called each time at the begin of the iteration + ClearOption(names_to_passes); + } + return SUCCESS; +} + +Status GEPass::RunPassesOnNode(NodePtr &node, const NamesToPass &names_to_passes, GraphLevelState &g_state, + RepassLevelState &rp_state) { + if (node == nullptr) { + REPORT_INNER_ERROR("E19999", "Param node is nullptr, check invalid."); + GELOGE(FAILED, "[Check][Param] parameter node is nullptr."); + return FAILED; + } + GELOGD("Begin to run pass for node %s", node->GetName().c_str()); + for (const auto &name_to_pass : names_to_passes) { + GELOGD("Begin to run pass %s for node %s", name_to_pass.first.c_str(), node->GetName().c_str()); + name_to_pass.second->init(); + auto result = name_to_pass.second->Run(node); + if (result != SUCCESS) { + REPORT_CALL_ERROR("E19999", "process pass %s on node:%s failed, ret:%u", + name_to_pass.first.c_str(), node->GetName().c_str(), result); + GELOGE(INTERNAL_ERROR, "[Process][Pass] %s on node %s failed, result " + "%u, the passes will be terminated immediately.", + name_to_pass.first.c_str(), node->GetName().c_str(), result); + return result; + } + if (name_to_pass.second->GetNodesDeleted().count(node) > 0) { + GELOGD("The node %s was deleted by pass %s, stop the remain passes", node->GetName().c_str(), + name_to_pass.first.c_str()); + break; + } + } + + g_state.nodes_passed.insert(node); + + std::unordered_map re_pass_imm_nodes_to_pass_names; + std::unordered_map resume_nodes_to_pass_names; + // if muti psss repass one same node, it will add to queue many times, so collect and duplicate + for (const auto &name_to_pass : names_to_passes) { + PushToRePassIfSeen(node, name_to_pass, g_state.nodes_seen, + name_to_pass.second->GetNodesNeedRePass(), + rp_state); + // collect imm_node && resume_node among these passes + for (const auto &imm_node : name_to_pass.second->GetNodesNeedRePassImmediately()){ + re_pass_imm_nodes_to_pass_names[imm_node].append(name_to_pass.first + ","); + } + for (const auto &resume_node : name_to_pass.second->GetNodesResume()){ + resume_nodes_to_pass_names[resume_node].append(name_to_pass.first + ","); + } + + for (const auto &suspend_node : name_to_pass.second->GetNodesSuspend()) { + GELOGD("The iteration suspend of node %s has been set by pass %s", suspend_node->GetName().c_str(), + name_to_pass.first.c_str()); + g_state.nodes_suspend.insert(suspend_node); + } + const auto &nodes_deleted_by_pass = name_to_pass.second->GetNodesDeleted(); + g_state.nodes_deleted.insert(nodes_deleted_by_pass.begin(), nodes_deleted_by_pass.end()); + } + + AddImmediateRepassNodesToQueue(node, re_pass_imm_nodes_to_pass_names, g_state); + AddResumeNodesToQueue(resume_nodes_to_pass_names, g_state); + + return SUCCESS; +} +} // namespace ge diff --git a/ge/graph/passes/base_pass.h b/ge/graph/passes/base_pass.h index d0f125b2..093e2dce 100644 --- a/ge/graph/passes/base_pass.h +++ b/ge/graph/passes/base_pass.h @@ -22,7 +22,6 @@ #include #include #include - #include "framework/common/ge_inner_error_codes.h" #include "framework/common/types.h" #include "graph/compute_graph.h" @@ -40,6 +39,7 @@ enum NodePassOption { }; class BaseNodePass { + // todo comments public: /// /// Optimize on one node. the function can add nodes to the graph, change @@ -51,7 +51,7 @@ class BaseNodePass { virtual ~BaseNodePass() = default; - const std::unordered_set &GetNodesNeedRePass() { return nodes_need_re_pass_; } + const std::vector &GetNodesNeedRePass() { return nodes_need_re_pass_; } const std::unordered_set &GetNodesNeedRePassImmediately() { return nodes_need_re_pass_immediately_; } @@ -61,23 +61,32 @@ class BaseNodePass { const std::unordered_set &GetNodesResume() { return nodes_resume_; } + virtual Status OnSuspendNodesLeaked() { return SUCCESS; } + void SetOption(NodePassOption option, const std::string &value) { options_[option] = value; } void ClearOptions() { options_.clear(); } void init() { nodes_need_re_pass_.clear(); - nodes_deleted_.clear(); nodes_need_re_pass_immediately_.clear(); + nodes_deleted_.clear(); nodes_suspend_.clear(); nodes_resume_.clear(); } + virtual void OnStartPassGraph(const ComputeGraphPtr &graph) { + current_graph_name_ = graph->GetName(); + } + protected: - Status IsolateAndDeleteNode(NodePtr &node, const std::vector &io_map); + const string &GetCurrentGraphName() const { + return current_graph_name_; + } + Status IsolateAndDeleteNode(NodePtr &node, const std::vector &io_map, bool is_repass_io_immediately = false); - Status IsolateAndDeleteNode(NodePtr &node, const std::initializer_list &io_map) { - return IsolateAndDeleteNode(node, std::vector(io_map)); + Status IsolateAndDeleteNode(NodePtr &node, const std::initializer_list &io_map, bool is_repass_io_immediately = false) { + return IsolateAndDeleteNode(node, std::vector(io_map), is_repass_io_immediately); } /// @@ -86,7 +95,7 @@ class BaseNodePass { /// optimized by other passes, call this function. /// @param node /// - void AddRePassNode(const NodePtr &node) { nodes_need_re_pass_.insert(node); } + void AddRePassNode(const NodePtr &node) { nodes_need_re_pass_.emplace_back(node); } /// /// Add a node to be optimized immediately again. If you add a new node to the graph, or @@ -101,14 +110,30 @@ class BaseNodePass { /// @param node /// void AddRePassNodesWithInOut(const NodePtr &node) { + auto in_nodes = node->GetInNodes(); + for (auto &in_node : in_nodes) { + AddRePassNode(in_node); + } AddRePassNode(node); auto out_nodes = node->GetOutNodes(); for (auto &out_node : out_nodes) { AddRePassNode(out_node); } + } + + /// + /// Add a node and it's input/output data nodes to be optimized immediately again. + /// @param node + /// + void AddImmediateRePassNodesWithInOut(const NodePtr &node) { auto in_nodes = node->GetInNodes(); for (auto &in_node : in_nodes) { - AddRePassNode(in_node); + AddImmediateRePassNode(in_node); + } + AddImmediateRePassNode(node); + auto out_nodes = node->GetOutNodes(); + for (auto &out_node : out_nodes) { + AddImmediateRePassNode(out_node); } } @@ -123,34 +148,27 @@ class BaseNodePass { void AddNodeDeleted(const NodePtr &node) { nodes_deleted_.insert(node); } /// - /// If you suspend a node from the graph, especially following node. The remain - /// iterate passes will stop process on the suspend node(if it can be + /// If you postpone a node from the graph, especially following node. The remain + /// iterate passes will stop process on the postpone node(if it can be /// reached by edge connections) till the last one. Obviously it is a waste of - /// time. You can add the suspend nodes by calling this function, to stop the + /// time. You can add the postpone nodes by calling this function, to stop the /// next iterations. /// @param node /// void AddNodeSuspend(const NodePtr &node) { nodes_suspend_.insert(node); } - /// - /// If you resume a node from the graph, especially following node. The remain - /// iterate passes will continue process on the resume node(if it can be - /// reached by edge connections) till the last one. - /// You can add the resume nodes by calling this function, to resume the - /// next iterations. - /// @param node - /// void AddNodeResume(const NodePtr &node) { nodes_resume_.insert(node); } bool OptionExists(NodePassOption option) { return options_.count(option) > 0; } private: - std::unordered_set nodes_need_re_pass_; + std::vector nodes_need_re_pass_; std::unordered_set nodes_need_re_pass_immediately_; std::unordered_set nodes_deleted_; std::unordered_set nodes_suspend_; std::unordered_set nodes_resume_; std::map options_; + std::string current_graph_name_; }; using NamesToPass = std::vector>; @@ -160,12 +178,75 @@ class GEPass { explicit GEPass(ComputeGraphPtr &graph) : graph_(graph), root_graph_(graph), depth_(1) {} virtual ~GEPass() = default; Status Run(const NamesToPass &names_to_passes); + /* +* todo +* OneGraph: nodes_deleted, nodes_seen, nodes_passed, nodes_suspended +* RePass: nodes_re_pass +* GraphOneTime: nodes_last +* NodeOneTime: nodes_re_pass_immediately, nodes_resume +*/ + struct GraphLevelState { + std::unordered_set nodes_deleted; + std::unordered_set nodes_seen; + std::unordered_set nodes_passed; + std::unordered_set nodes_suspend; + std::unordered_set nodes_last; + std::deque nodes; + int re_pass_times; + + void AddNodeToQueueFront(NodePtr node) { + nodes_seen.insert(node.get()); + nodes.emplace_front(std::move(node)); + } + + void AddNodeToQueue(NodePtr node) { + nodes_seen.insert(node.get()); + nodes.emplace_back(std::move(node)); + } + void AddNodeToQueueIfNotSeen(NodePtr node) { + if (nodes_seen.insert(node.get()).second) { + nodes.emplace_back(std::move(node)); + } + } + NodePtr PopFront() { + NodePtr node = nodes.front(); + nodes.pop_front(); + return node; + } + }; + struct RepassLevelState { + std::vector nodes_re_pass; + std::unordered_set nodes_re_pass_set; + bool AddNodeToRepass(NodePtr node) { + if (!nodes_re_pass_set.insert(node).second) { + return false; + } + nodes_re_pass.emplace_back(node); + return true; + } + void EraseNodeFromRepass(NodePtr node) { + nodes_re_pass_set.erase(node); + } + void ClearRepass() { + nodes_re_pass_set.clear(); + nodes_re_pass.clear(); + } + }; + struct GraphOneTimeLevelState { + std::unordered_set nodes_last; + }; private: GEPass(ComputeGraphPtr &graph, ComputeGraphPtr &root_graph, int depth) : graph_(graph), root_graph_(root_graph), depth_(depth) {} + Status RunPassesNodeOnce(NodePtr &node, const NamesToPass &names_to_passes, + GraphLevelState &g_state, RepassLevelState &rp_state); + Status RunPassesGraphRepass(const NamesToPass &names_to_passes, GraphLevelState &g_state); Status RunPassesOneGraph(const NamesToPass &names_to_passes); Status RunPassesOnSubGraph(const NodePtr &node, const NamesToPass &names_to_passes, bool &has_sub_graph); + Status RunPassesOnNode(NodePtr &node, const NamesToPass &names_to_passes, GraphLevelState &g_state, + RepassLevelState &rp_state); + Status HandleLeakedSuspendNodes(const NamesToPass &names_to_passes, GraphLevelState &g_state); ComputeGraphPtr graph_; ComputeGraphPtr root_graph_; int depth_; diff --git a/ge/graph/passes/infer_base_pass.cc b/ge/graph/passes/infer_base_pass.cc index 25c45677..636cf2ab 100644 --- a/ge/graph/passes/infer_base_pass.cc +++ b/ge/graph/passes/infer_base_pass.cc @@ -86,6 +86,9 @@ bool InferBasePass::NeedInfer(const NodePtr &node) const { return true; } void InferBasePass::AddChangedNodesImmediateRepass(const std::set &changed_nodes) { // need passed_nodes set to solve the problem that multi-input operators do repass in advance. // when there is passed_nodes set, wo should call AddImmediateRePassNode for all nodes in changed_nodes. + for (const auto &node_ele : changed_nodes) { + AddImmediateRePassNode(node_ele); + } } graphStatus InferBasePass::InferAndUpdate(NodePtr &node, bool before_subgraph, std::set &changed_nodes) { diff --git a/ge/graph/passes/infershape_pass.cc b/ge/graph/passes/infershape_pass.cc index a5e64519..deaebf4f 100755 --- a/ge/graph/passes/infershape_pass.cc +++ b/ge/graph/passes/infershape_pass.cc @@ -1,175 +1,370 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "graph/passes/infershape_pass.h" -#include "common/util/error_manager/error_manager.h" -#include "framework/common/debug/ge_log.h" -#include "analyzer/analyzer.h" -#include "framework/common/util.h" -#include "graph/shape_refiner.h" -#include "graph/utils/graph_utils.h" -#include "graph/utils/node_utils.h" -#include "common/omg_util.h" -#include "graph/debug/ge_attr_define.h" -#include "graph/utils/tensor_utils.h" -#include "graph/utils/type_utils.h" - -namespace ge { - -void SerialShapeRange(const GeTensorDescPtr &desc, std::string &desc_str) { - desc_str += "["; - std::vector> shape_range; - (void)desc->GetShapeRange(shape_range); - for (const auto &pair : shape_range) { - desc_str += "{"; - desc_str += std::to_string(pair.first) + "," + std::to_string(pair.second); - desc_str += "},"; - } - desc_str += "]"; - shape_range.clear(); - (void)desc->GetOriginShapeRange(shape_range); - for (const auto &pair : shape_range) { - desc_str += ",{"; - desc_str += std::to_string(pair.first) + "," + std::to_string(pair.second); - desc_str += "},"; - } -} - -std::string GetInTensorInfoWithString(const ge::NodePtr &node) { - ge::OpDescPtr op_desc = node->GetOpDesc(); - std::stringstream ss; - ss << "{"; - int32_t in_idx = 0; - for (const auto &input_desc : op_desc->GetAllInputsDescPtr()) { - if (input_desc == nullptr) { - in_idx++; - continue; - } - if (in_idx > 0) { - ss << " "; - } - ss << "input_" << in_idx << " " << "tensor: ["; - ss << "(shape:[" << input_desc->MutableShape().ToString() << "]),"; - ss << "(format:" << TypeUtils::FormatToSerialString(input_desc->GetFormat()) << "),"; - ss << "(dtype:" << TypeUtils::DataTypeToSerialString(input_desc->GetDataType()) << "),"; - ss << "(origin_shape:" << input_desc->GetOriginShape().ToString() << "),"; - ss << "(origin_format:" << TypeUtils::FormatToSerialString(input_desc->GetOriginFormat()) << "),"; - ss << "(origin_dtype:" << TypeUtils::DataTypeToSerialString(input_desc->GetOriginDataType()) << "),"; - string range_str; - SerialShapeRange(input_desc, range_str); - ss << "(shape_range:" << range_str << ")]"; - in_idx++; - } - return ss.str(); -} - -Status InferShapePass::Run(NodePtr &node) { - // kOptimizeAfterSubGraph exist means after subgraph - auto ret = ShapeRefiner::InferShapeAndType(node, !OptionExists(kOptimizeAfterSubGraph)); - if (ret != GRAPH_SUCCESS) { - // select INFERSHAPE failed info - auto graph = node->GetOwnerComputeGraph(); - GE_CHECK_NOTNULL(graph); - auto root_graph = ge::GraphUtils::FindRootGraph(graph); - GE_CHECK_NOTNULL(root_graph); - analyzer::DataInfo analyze_info{root_graph->GetSessionID(), root_graph->GetGraphID(), - analyzer::INFER_SHAPE, node, "InferShapeFailed!"}; - (void)Analyzer::GetInstance()->DoAnalyze(analyze_info); - (void)Analyzer::GetInstance()->SaveAnalyzerDataToFile(root_graph->GetSessionID(), - root_graph->GetGraphID()); - - REPORT_CALL_ERROR("E19999", "Call InferShapeAndType for node:%s(%s) failed, input_tensor:%s", - node->GetName().c_str(), node->GetType().c_str(), GetInTensorInfoWithString(node).c_str()); - GELOGE(GE_GRAPH_INFERSHAPE_FAILED, "[Call][InferShapeAndType] for node:%s(%s) failed, input_tensor:%s", - node->GetName().c_str(), node->GetType().c_str(), GetInTensorInfoWithString(node).c_str()); - return GE_GRAPH_INFERSHAPE_FAILED; - } - - GE_CHK_STATUS_RET_NOLOG(RePassLoopNode(node)); - bool need_repass = false; - auto has_attr = AttrUtils::GetBool(node->GetOpDesc(), ATTR_NAME_NEED_INFER_AGAIN, need_repass); - if (has_attr) { - if (!OptionExists(kOptimizeAfterSubGraph)) { - return SUCCESS; - } - if (need_repass) { - AddImmediateRePassNode(node); - GELOGD("Node %s need repass immediately.", node->GetName().c_str()); - } else { - // clear attr on while - node->GetOpDesc()->DelAttr(ATTR_NAME_NEED_INFER_AGAIN); - } - } - return SUCCESS; -} - -Status InferShapePass::RePassLoopNode(const NodePtr &node) { - const auto RePassNode = [&](const std::set &re_pass_types) { - for (auto &n : node->GetOutDataNodes()) { - GE_CHECK_NOTNULL(n); - std::string node_type; - GE_CHK_STATUS_RET(GetOriginalType(n, node_type), "[Get][OriginalType] of node:%s failed.", n->GetName().c_str()); - if (re_pass_types.count(node_type) > 0) { - AddImmediateRePassNode(n); - (void)AttrUtils::SetBool(n->GetOpDesc(), ATTR_NAME_NEED_INFER_AGAIN, false); - GELOGD("Node %s need repass immediately after %s.", n->GetName().c_str(), node->GetName().c_str()); - } - } - return SUCCESS; - }; - - const auto ExProcNode = [&](const std::set &proc_types, - const std::function &proc_func, - const std::string &info) { - for (auto &n : node->GetOutDataNodes()) { - GE_CHECK_NOTNULL(n); - std::string node_type; - GE_CHK_STATUS_RET(GetOriginalType(n, node_type), "[Get][OriginalType] of node:%s failed.", n->GetName().c_str()); - if (proc_types.count(node_type) > 0) { - proc_func(this, n); - GELOGD("Node %s %s after %s.", n->GetName().c_str(), info.c_str(), node->GetName().c_str()); - } - } - return SUCCESS; - }; - - std::string node_type; - GE_CHK_STATUS_RET(GetOriginalType(node, node_type), - "[Get][OriginalType] of node:%s failed.", node->GetName().c_str()); - if (kNextIterationOpTypes.count(node_type) > 0) { - return RePassNode(kMergeOpTypes); // Re-Pass Merge - } - - if (kMergeOpTypes.count(node_type) > 0) { - if (node->GetOpDesc()->HasAttr(ATTR_NAME_NEED_INFER_AGAIN)) { - node->GetOpDesc()->DelAttr(ATTR_NAME_NEED_INFER_AGAIN); - return RePassNode(kSwitchOpTypes); // Re-Pass Switch - } - return SUCCESS; - } - - if (kSwitchOpTypes.count(node_type) > 0) { - if (node->GetOpDesc()->HasAttr(ATTR_NAME_NEED_INFER_AGAIN)) { - node->GetOpDesc()->DelAttr(ATTR_NAME_NEED_INFER_AGAIN); - return ExProcNode(kExitOpTypes, &InferShapePass::AddNodeResume, "need resume"); // Resume Exit - } else { - return ExProcNode(kExitOpTypes, &InferShapePass::AddNodeSuspend, "need suspend"); // Suspend Exit - } - } - - return SUCCESS; -} -} // namespace ge +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + *+ + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "graph/passes/infershape_pass.h" +#include "common/util/error_manager/error_manager.h" +#include "framework/common/debug/ge_log.h" +#include "analyzer/analyzer.h" +#include "framework/common/util.h" +#include "graph/shape_refiner.h" +#include "graph/utils/graph_utils.h" +#include "graph/utils/node_utils.h" +#include "common/omg_util.h" +#include "graph/debug/ge_attr_define.h" +#include "graph/utils/tensor_utils.h" +#include "graph/utils/type_utils.h" + +#include "external/graph/operator_factory.h" + +namespace ge { +namespace { +constexpr int kSwitchExitAnchorIndex = 0; +constexpr int kSwitchPredAnchorIndex = 1; +void SerialShapeRange(const GeTensorDescPtr &desc, std::string &desc_str) { + desc_str += "["; + std::vector> shape_range; + (void)desc->GetShapeRange(shape_range); + for (const auto &pair : shape_range) { + desc_str += "{"; + desc_str += std::to_string(pair.first) + "," + std::to_string(pair.second); + desc_str += "},"; + } + desc_str += "]"; + shape_range.clear(); + (void)desc->GetOriginShapeRange(shape_range); + for (const auto &pair : shape_range) { + desc_str += ",{"; + desc_str += std::to_string(pair.first) + "," + std::to_string(pair.second); + desc_str += "},"; + } +} +void UpdateShapeAndDType(const GeTensorDescPtr &src, GeTensorDescPtr &dst) { + dst->SetOriginShape(src->GetOriginShape()); + dst->SetShape(src->GetShape()); + dst->SetDataType(src->GetDataType()); + dst->SetOriginDataType(src->GetOriginDataType()); + vector> src_shape_range; + src->GetShapeRange(src_shape_range); + dst->SetShapeRange(src_shape_range); + dst->SetOriginShapeRange(src_shape_range); + ge::TensorUtils::SetRealDimCnt(*dst, static_cast(src->GetShape().GetDims().size())); +} +} // namespace + +std::string InferShapePass::SerialTensorInfo(const GeTensorDescPtr &tensor_desc) const { + std::stringstream ss; + ss << "(shape:[" << tensor_desc->MutableShape().ToString() << "]),"; + ss << "(format:" << TypeUtils::FormatToSerialString(tensor_desc->GetFormat()) << "),"; + ss << "(dtype:" << TypeUtils::DataTypeToSerialString(tensor_desc->GetDataType()) << "),"; + ss << "(origin_shape:" << tensor_desc->GetOriginShape().ToString() << "),"; + ss << "(origin_format:" << TypeUtils::FormatToSerialString(tensor_desc->GetOriginFormat()) << "),"; + ss << "(origin_dtype:" << TypeUtils::DataTypeToSerialString(tensor_desc->GetOriginDataType()) << "),"; + string range_str; + SerialShapeRange(tensor_desc, range_str); + ss << "(shape_range:" << range_str << ")"; + return ss.str(); +} +Status InferShapePass::SuspendV1LoopExitNodes(const NodePtr &node) { + if (node->GetType() != SWITCH) { + return SUCCESS; + } + auto pred_node = NodeUtils::GetInDataNodeByIndex(*node, kSwitchPredAnchorIndex); + GE_CHECK_NOTNULL(pred_node); + if (pred_node->GetType() != LOOPCOND) { + return SUCCESS; + } + + for (const auto &anchor_2_node : NodeUtils::GetOutDataNodesWithAnchorByIndex(*node, kSwitchExitAnchorIndex)) { + GELOGI("Found v1 loop when infershape, suspend Exit node %s, type %s.", anchor_2_node.second->GetName().c_str(), + anchor_2_node.second->GetType().c_str()); + auto &suspend_nodes = graphs_2_suspend_nodes_[GetCurrentGraphName()]; + if (suspend_nodes.nodes_set.insert(anchor_2_node.second).second) { + suspend_nodes.nodes.push(anchor_2_node.second); + AddNodeSuspend(anchor_2_node.second); + } + } + return SUCCESS; +} + +Status InferShapePass::Infer(NodePtr &node) { + auto ret = InferShapeAndType(node); + if (ret != GRAPH_SUCCESS) { + auto graph = node->GetOwnerComputeGraph(); + GE_CHECK_NOTNULL(graph); + auto root_graph = ge::GraphUtils::FindRootGraph(graph); + GE_CHECK_NOTNULL(root_graph); + analyzer::DataInfo analyze_info{root_graph->GetSessionID(), root_graph->GetGraphID(), + analyzer::INFER_SHAPE, node, "InferShapeFailed!"}; + (void)Analyzer::GetInstance()->DoAnalyze(analyze_info); + (void)Analyzer::GetInstance()->SaveAnalyzerDataToFile(root_graph->GetSessionID(), + root_graph->GetGraphID()); + REPORT_CALL_ERROR("E19999", "Call InferShapeAndType for node:%s(%s) failed", node->GetName().c_str(), + node->GetType().c_str()); + GELOGE(GE_GRAPH_INFERSHAPE_FAILED, "[Call][InferShapeAndType] for node:%s(%s) failed", node->GetName().c_str(), + node->GetType().c_str()); + return GE_GRAPH_INFERSHAPE_FAILED; + } + return SUCCESS; +} + +graphStatus InferShapePass::InferShapeAndType(NodePtr &node) { + auto ret = SuspendV1LoopExitNodes(node); + if (ret != SUCCESS) { + GELOGE(ret, "Suspend V1 loop exit nodes failed."); + return ret; + } + bool is_unknown_graph = node->GetOwnerComputeGraph()->GetGraphUnknownFlag(); + auto opdesc = node->GetOpDesc(); + if (node->Verify() != GRAPH_SUCCESS) { + REPORT_CALL_ERROR("E19999", "Verifying %s failed.", node->GetName().c_str()); + GELOGE(GRAPH_FAILED, "[Call][Verify] Verifying %s failed.", node->GetName().c_str()); + return GRAPH_FAILED; + } + Operator op = OpDescUtils::CreateOperatorFromNode(node); + + if (!is_unknown_graph) { + auto inference_context = ShapeRefiner::CreateInferenceContext(node); + GE_CHECK_NOTNULL(inference_context); + GELOGD("create context for node:%s, marks %zu", node->GetName().c_str(), inference_context->GetMarks().size()); + op.SetInferenceContext(inference_context); + } + + graphStatus status = CallInferShapeFunc(node, op); + if (status != GRAPH_NODE_NEED_REPASS && status != GRAPH_PARAM_INVALID && status != GRAPH_SUCCESS) { + // node like netoutput return param_invalid, but valid ? + return GE_GRAPH_INFERSHAPE_FAILED; + } + UpdateCurNodeOutputDesc(node); + if (!is_unknown_graph) { + auto ctx_after_infer = op.GetInferenceContext(); + if (ctx_after_infer != nullptr) { + GELOGD("[%s] after infershape. mark:%zu", node->GetName().c_str(), ctx_after_infer->GetMarks().size()); + if (!ctx_after_infer->GetOutputHandleShapesAndTypes().empty() || !ctx_after_infer->GetMarks().empty()) { + GELOGD("[%s] set inference context after. mark:%zu", node->GetName().c_str(), + ctx_after_infer->GetMarks().size()); + ShapeRefiner::PushToContextMap(node, ctx_after_infer); + } + } + } + + return (status == GRAPH_NODE_NEED_REPASS) ? GRAPH_NODE_NEED_REPASS : GRAPH_SUCCESS; +} + +void InferShapePass::UpdateCurNodeOutputDesc(NodePtr &node) { + auto op_desc = node->GetOpDesc(); + for (const auto &out_anchor : node->GetAllOutDataAnchors()) { + auto output_tensor = op_desc->MutableOutputDesc(out_anchor->GetIdx()); + GE_IF_BOOL_EXEC(output_tensor == nullptr, continue); + GE_IF_BOOL_EXEC(output_tensor->MutableShape().GetDims().empty(), + output_tensor->SetOriginShape(output_tensor->GetShape())); + + ge::TensorUtils::SetRealDimCnt(*output_tensor, static_cast(output_tensor->GetOriginShape().GetDims() + .size())); + output_tensor->SetOriginDataType(output_tensor->GetDataType()); + // set output origin shape range + std::vector> range; + (void)output_tensor->GetShapeRange(range); + output_tensor->SetOriginShapeRange(range); + GELOGD("node name is %s, origin shape is %ld, origin format is %s, origin data type is %s", + node->GetName().c_str(), output_tensor->GetOriginShape().GetShapeSize(), + TypeUtils::FormatToSerialString(output_tensor->GetOriginFormat()).c_str(), + TypeUtils::DataTypeToSerialString(output_tensor->GetOriginDataType()).c_str()); + } +} + +bool InferShapePass::SameTensorDesc(const GeTensorDescPtr &src, const GeTensorDescPtr &dst) { + // check shape range + vector> src_shape_range; + vector> dst_shape_range; + src->GetShapeRange(src_shape_range); + dst->GetShapeRange(dst_shape_range); + if (src_shape_range.size() != dst_shape_range.size()) { + GELOGI("Src shape range size is %zu, dst shape range size is %zu, not same.", src_shape_range.size(), + dst_shape_range.size()); + return false; + } + for (size_t i = 0; i < src_shape_range.size(); ++i) { + if (src_shape_range[i].first != dst_shape_range[i].first || + src_shape_range[i].second != dst_shape_range[i].second) { + GELOGI("Current dim %zu. Src shape range is [%lu-%lu], dst shape range is [%lu-%lu], not same.", + i, src_shape_range[i].first, src_shape_range[i].second, dst_shape_range[i].first, dst_shape_range[i].second); + return false; + } + } + + // check shape + auto src_shape = src->GetShape(); + auto dst_shape = dst->GetShape(); + if (src_shape.GetDims() != dst_shape.GetDims() || src->GetOriginShape().GetDims() != dst->GetOriginShape().GetDims() || + src->GetDataType() != dst->GetDataType() || src->GetOriginDataType() != dst->GetOriginDataType()) { + GELOGD( + "Src shape is %s, origin_shape is %s, data_type is %s, origin data_type is %s; " + "Dst shape is %s, origin_shape is %s, data_type is %s, original data_type is %s, not same.", + src_shape.ToString().c_str(), src->GetOriginShape().ToString().c_str(), + TypeUtils::DataTypeToSerialString(src->GetDataType()).c_str(), + TypeUtils::DataTypeToSerialString(src->GetOriginDataType()).c_str(), dst_shape.ToString().c_str(), + dst->GetOriginShape().ToString().c_str(), TypeUtils::DataTypeToSerialString(dst->GetDataType()).c_str(), + TypeUtils::DataTypeToSerialString(dst->GetOriginDataType()).c_str()); + return false; + } + return true; +} + +graphStatus InferShapePass::UpdateTensorDesc(const GeTensorDescPtr &src, GeTensorDescPtr &dst, bool &changed) { + changed = !SameTensorDesc(src, dst); + // refresh src itself + src->SetOriginShape(src->GetShape()); + src->SetOriginDataType(src->GetDataType()); + TensorUtils::SetRealDimCnt(*src, static_cast(src->GetOriginShape().GetDims().size())); + vector> src_shape_range; + src->GetShapeRange(src_shape_range); + src->SetOriginShapeRange(src_shape_range); + + if (!changed) { + GELOGD("Peer dst tensor_desc is same as src tensor_desc. No need update."); + return SUCCESS; + } + UpdateShapeAndDType(src, dst); + GELOGD( + "UpdatePeerInputDesc from src Node: shape: [%s], datatype: %s, original datatype is %s." + "To dst Node: shape: [%s], datatype: %s, original datatype is %s.", + src->GetShape().ToString().c_str(), TypeUtils::DataTypeToSerialString(src->GetDataType()).c_str(), + TypeUtils::DataTypeToSerialString(src->GetOriginDataType()).c_str(), dst->GetShape().ToString().c_str(), + TypeUtils::DataTypeToSerialString(dst->GetDataType()).c_str(), + TypeUtils::DataTypeToSerialString(dst->GetOriginDataType()).c_str()); + return SUCCESS; +} + +graphStatus InferShapePass::CallInferShapeFunc(NodePtr &node, Operator &op) { + auto op_desc = node->GetOpDesc(); + const auto &op_type = op_desc->GetType(); + auto ret = op_desc->CallInferFunc(op); + if (ret == GRAPH_PARAM_INVALID) { + // Op ir no infer func, try to get infer func from operator factory + auto node_op = ge::OperatorFactory::CreateOperator("node_op", op_desc->GetType()); + if (node_op.IsEmpty()) { + GELOGW("get op from OperatorFactory fail. opType: %s", op_type.c_str()); + return ret; + } + + GELOGD("get op from OperatorFactory success. opType: %s", op_type.c_str()); + auto temp_op_desc = ge::OpDescUtils::GetOpDescFromOperator(node_op); + node_op.BreakConnect(); + if (temp_op_desc == nullptr) { + REPORT_CALL_ERROR("E19999", "GetOpDescFromOperator failed, return nullptr."); + GELOGE(GRAPH_FAILED, "[Get][OpDesc] temp op desc is null"); + return GRAPH_FAILED; + } + if (!op_desc->UpdateInputName(temp_op_desc->GetAllInputName())) { + GELOGW("InferShapeAndType UpdateInputName failed"); + for (const auto &out_desc : op_desc->GetAllOutputsDescPtr()) { + if (out_desc != nullptr && out_desc->GetShape().GetDims().empty()) { + break; + } + return GRAPH_SUCCESS; + } + } + if (!op_desc->UpdateOutputName(temp_op_desc->GetAllOutputName())) { + GELOGW("InferShapeAndType UpdateOutputName failed"); + } + op_desc->AddInferFunc(temp_op_desc->GetInferFunc()); + ret = op_desc->CallInferFunc(op); + GELOGI("op CallInferFunc second. ret: %u", ret); + } + return ret; +} + +graphStatus InferShapePass::UpdateOutputFromSubgraphs(const std::vector &src, GeTensorDescPtr &dst) { + GELOGD("Enter update parent node shape for class branch op process"); + // check sub_graph shape.If not same ,do unknown shape process + auto ref_out_tensor = src.at(0); + ge::GeShape &ref_out_tensor_shape = ref_out_tensor->MutableShape(); + for (auto &tensor : src) { + if (ref_out_tensor->GetDataType() != tensor->GetDataType()) { + REPORT_INNER_ERROR("E19999", "Does not support diff dtype among all ref output, shape:%s", + ref_out_tensor_shape.ToString().c_str()); + GELOGE(GRAPH_FAILED, "[Check][Param] node does not support diff dtype output"); + return GRAPH_FAILED; + } + auto shape = tensor->MutableShape(); + if (shape.GetDims().size() != ref_out_tensor_shape.GetDims().size()) { + GELOGD("Shape from subgraph size: %lu, ref_out_tensor_shape size: %lu", shape.GetShapeSize(), + ref_out_tensor_shape.GetShapeSize()); + ref_out_tensor_shape = GeShape(UNKNOWN_RANK); + break; + } + for (size_t j = 0; j < ref_out_tensor_shape.GetDims().size(); j++) { + if (ref_out_tensor_shape.GetDim(j) == shape.GetDim(j)) { + continue; + } + GELOGD("j: %zu ,shape from subgraph size: %lu, ref_out_tensor_shape size: %lu", j, shape.GetShapeSize(), + ref_out_tensor_shape.GetShapeSize()); + (void)ref_out_tensor_shape.SetDim(j, UNKNOWN_DIM); + } + } + UpdateShapeAndDType(ref_out_tensor, dst); + return GRAPH_SUCCESS; +} +graphStatus InferShapePass::UpdateOutputFromSubgraphsForMultiDims(const std::vector &src, + GeTensorDescPtr &dst) { + // check sub_graph shape. Get max for update. + if (src.empty()) { + GELOGI("Src subgraph shape is empty."); + return SUCCESS; + } + + int64_t max_size = 0; + size_t max_shape_index = 0; + auto &ref_out_tensor = src.at(0); + for (size_t j = 0; j < src.size(); ++j) { + auto &tensor = src.at(j); + if (ref_out_tensor->GetDataType() != tensor->GetDataType()) { + REPORT_INNER_ERROR("E19999", "node does not support diff dtype among all ref output"); + GELOGE(GRAPH_FAILED, "[Check][Param] node does not support diff dtype among all ref output"); + return GRAPH_FAILED; + } + + auto shape = tensor->MutableShape(); + int64_t size = 1; + for (auto dim : shape.GetDims()) { + if (dim != 0 && INT64_MAX / dim < size) { + REPORT_INNER_ERROR("E19999", "The shape:%s size overflow", shape.ToString().c_str()); + GELOGE(PARAM_INVALID, "[Check][Overflow] The shape size overflow"); + return PARAM_INVALID; + } + size *= dim; + } + + if (size > max_size) { + max_size = size; + max_shape_index = j; + } + } + UpdateShapeAndDType(src.at(max_shape_index), dst); + return GRAPH_SUCCESS; +} +Status InferShapePass::OnSuspendNodesLeaked() { + auto iter = graphs_2_suspend_nodes_.find(GetCurrentGraphName()); + if (iter == graphs_2_suspend_nodes_.end()) { + GELOGI("Current graph %s no suspend node.", GetCurrentGraphName().c_str()); + return SUCCESS; + } + if (!iter->second.nodes.empty()) { + AddNodeResume(iter->second.PopSuspendedNode()); + } + return SUCCESS; +} +} // namespace ge diff --git a/ge/graph/passes/infershape_pass.h b/ge/graph/passes/infershape_pass.h index 9c5d432d..00d90775 100644 --- a/ge/graph/passes/infershape_pass.h +++ b/ge/graph/passes/infershape_pass.h @@ -1,38 +1,56 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef GE_GRAPH_PASSES_INFERSHAPE_PASS_H_ -#define GE_GRAPH_PASSES_INFERSHAPE_PASS_H_ - -#include "graph/passes/base_pass.h" - -namespace ge { -class InferShapePass : public BaseNodePass { - public: - /// - /// Entry of the InferShapePass optimizer - /// @param [in] graph: Input ComputeGraph - /// @return SUCCESS: Execution succeed - /// @return OTHERS: Execution failed - /// @author - /// - Status Run(ge::NodePtr &node) override; - - private: - Status RePassLoopNode(const NodePtr &node); -}; -} // namespace ge -#endif // GE_GRAPH_PASSES_INFERSHAPE_PASS_H_ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef GE_GRAPH_PASSES_INFERSHAPE_PASS_H_ +#define GE_GRAPH_PASSES_INFERSHAPE_PASS_H_ + +#include "graph/passes/infer_base_pass.h" +#include + +namespace ge { +class InferShapePass : public InferBasePass { + public: + std::string SerialTensorInfo(const GeTensorDescPtr &tensor_desc) const override; + graphStatus Infer(NodePtr &node) override; + + graphStatus UpdateTensorDesc(const GeTensorDescPtr &src, GeTensorDescPtr &dst, bool &changed) override; + graphStatus UpdateOutputFromSubgraphs(const std::vector &src, GeTensorDescPtr &dst) override; + graphStatus UpdateOutputFromSubgraphsForMultiDims(const std::vector &src, + GeTensorDescPtr &dst) override; + + Status OnSuspendNodesLeaked() override; + + private: + graphStatus InferShapeAndType(NodePtr &node); + graphStatus CallInferShapeFunc(NodePtr &node, Operator &op); + bool SameTensorDesc(const GeTensorDescPtr &src, const GeTensorDescPtr &dst); + void UpdateCurNodeOutputDesc(NodePtr &node); + Status SuspendV1LoopExitNodes(const NodePtr &node); + struct SuspendNodes { + std::stack nodes; + std::unordered_set nodes_set; + + NodePtr PopSuspendedNode() { + auto top_node = nodes.top(); + nodes.pop(); + nodes_set.erase(top_node); + return top_node; + } + }; + std::map graphs_2_suspend_nodes_; +}; +} // namespace ge +#endif // GE_GRAPH_PASSES_INFERSHAPE_PASS_H_ diff --git a/ge/graph/preprocess/graph_preprocess.cc b/ge/graph/preprocess/graph_preprocess.cc index 2efe623e..446af9bf 100644 --- a/ge/graph/preprocess/graph_preprocess.cc +++ b/ge/graph/preprocess/graph_preprocess.cc @@ -1999,6 +1999,22 @@ Status GraphPrepare::CheckUserInput(const std::vector &user_input) { Status GraphPrepare::InferShapeForPreprocess() { GELOGI("Start infershape for preprocess."); + // Prepare dummy_shape for v1 control_flow op before infershape + for (const auto &node : compute_graph_->GetAllNodes()) { + string type; + GetOriginalType(node, type); + if (type == MERGE || type == REFMERGE) { + for (size_t i = 0; i < node->GetAllInDataAnchorsSize(); ++i) { + GELOGD("Prepare for infershape: update %s input_shape as dummy.", node->GetName().c_str()); + NodeUtils::UpdateInputShape(*node, i, GeShape(DUMMY_SHAPE)); + } + } else if (type == WHILE) { + for (size_t i = 0; i < node->GetAllInDataAnchorsSize(); ++i) { + GELOGD("Prepare for infershape: update %s output_shape as dummy.", node->GetName().c_str()); + NodeUtils::UpdateOutputShape(*node, i, GeShape(DUMMY_SHAPE)); + } + } + } GEPass ge_passes(compute_graph_); NamesToPass names_to_passes; AssertPass assert_pass; diff --git a/tests/ut/ge/graph/passes/addn_pass_unittest.cc b/tests/ut/ge/graph/passes/addn_pass_unittest.cc index 6107a7d8..39029e8c 100644 --- a/tests/ut/ge/graph/passes/addn_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/addn_pass_unittest.cc @@ -72,7 +72,7 @@ TEST(UtestGraphPassesAddnPass, null_pass) { AddNPass *addn_pass = nullptr; NamesToPass names_to_pass; names_to_pass.emplace_back("Test", addn_pass); - EXPECT_EQ(pass.Run(names_to_pass), SUCCESS); + EXPECT_EQ(pass.Run(names_to_pass), INTERNAL_ERROR); } TEST(UtestGraphPassesAddnPass, null_graph) { diff --git a/tests/ut/ge/graph/passes/base_pass_unittest.cc b/tests/ut/ge/graph/passes/base_pass_unittest.cc index c687e07f..3b0235f5 100644 --- a/tests/ut/ge/graph/passes/base_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/base_pass_unittest.cc @@ -1,523 +1,903 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#define protected public -#include "graph/passes/base_pass.h" -#undef protected - -#include "external/graph/ge_error_codes.h" -#include "framework/common/ge_inner_error_codes.h" -#include "framework/common/types.h" -#include "graph/node.h" -#include "graph/utils/graph_utils.h" -#include "graph_builder_utils.h" - -template class std::unordered_set; - -namespace ge { -class UtestTestPass : public BaseNodePass { - public: - UtestTestPass() = default; - UtestTestPass(bool dead_loop) : dead_loop_(dead_loop), run_times_(0) {} - - Status Run(NodePtr &node) override { - ++run_times_; - iter_nodes_.push_back(node); - auto iter = names_to_add_del_.find(node->GetName()); - if (iter != names_to_add_del_.end()) { - for (const auto &node_name : iter->second) { - auto del_node = node->GetOwnerComputeGraph()->FindNode(node_name); - GraphUtils::IsolateNode(del_node, {0}); - AddNodeDeleted(del_node); - } - } - iter = names_to_add_repass_.find(node->GetName()); - if (iter != names_to_add_repass_.end()) { - auto all_nodes = node->GetOwnerComputeGraph()->GetAllNodes(); - for (const auto &node_name : iter->second) { - for (auto &node_re_pass : all_nodes) { - if (node_re_pass->GetName() == node_name) { - AddRePassNode(node_re_pass); - break; - } - } - } - if (!dead_loop_) { - names_to_add_repass_.erase(iter); - } - } - // simulate infershape pass - if(node->GetType() == WHILE){ - bool need_repass = false; - AttrUtils::GetBool(node->GetOpDesc(),"_need_infer_again", need_repass); - if(!OptionExists(kOptimizeAfterSubGraph)){ - return SUCCESS; - } - if(need_repass){ - AttrUtils::SetBool(node->GetOpDesc(),"_need_infer_again", false); - AddImmediateRePassNode(node); - } - else{ - // clear attr on while - node->GetOpDesc()->DelAttr("_need_infer_again"); - } - } - return SUCCESS; - } - void clear() { iter_nodes_.clear(); } - std::vector GetIterNodes() { return iter_nodes_; } - - void AddRePassNodeName(const std::string &iter_node, const std::string &re_pass_node) { - names_to_add_repass_[iter_node].insert(re_pass_node); - } - void AddDelNodeName(const std::string &iter_node, const std::string &del_node) { - names_to_add_del_[iter_node].insert(del_node); - } - unsigned int GetRunTimes() { return run_times_; } - - private: - std::vector iter_nodes_; - std::map> names_to_add_del_; - std::map> names_to_add_repass_; - bool dead_loop_; - unsigned int run_times_; -}; - -class TestDelPass : public BaseNodePass { - public: - Status Run(NodePtr &node) override { return SUCCESS; } -}; - -class UTESTGraphPassesBasePass : public testing::Test { - protected: - UTESTGraphPassesBasePass() { - auto p1 = new UtestTestPass; - names_to_pass_.push_back(std::make_pair("test1", p1)); - } - void SetUp() override { - for (auto &name_to_pass : names_to_pass_) { - dynamic_cast(name_to_pass.second)->clear(); - } - } - ~UTESTGraphPassesBasePass() override { - for (auto &name_to_pass : names_to_pass_) { - delete name_to_pass.second; - } - } - NamesToPass names_to_pass_; -}; -/// reshape1 -/// | -/// add1 -/// / \. -/// | | -/// data1 const1 -ComputeGraphPtr BuildGraph1() { - auto builder = ut::GraphBuilder("g1"); - auto data = builder.AddNode("data1", DATA, 0, 1); - auto a1 = builder.AddNode("add1", ADD, 2, 1); - auto c1 = builder.AddNode("const1", CONSTANT, 0, 1); - auto r1 = builder.AddNode("reshape1", RESHAPE, 1, 1); - - builder.AddDataEdge(data, 0, a1, 0); - builder.AddDataEdge(c1, 0, a1, 1); - builder.AddDataEdge(a1, 0, r1, 0); - - return builder.GetGraph(); -} - -/// sum1 -/// / \. -/// / \. -/// / \. -/// reshape1 addn1 -/// | c | -/// add1 <--- shape1 -/// / \ | -/// | | | -/// data1 const1 const2 -ComputeGraphPtr BuildGraph2() { - auto builder = ut::GraphBuilder("g1"); - auto data1 = builder.AddNode("data1", DATA, 0, 1); - auto const1 = builder.AddNode("const1", CONSTANT, 0, 1); - auto const2 = builder.AddNode("const2", CONSTANT, 0, 1); - auto add1 = builder.AddNode("add1", ADD, 2, 1); - auto shape1 = builder.AddNode("shape1", SHAPE, 1, 1); - auto reshape1 = builder.AddNode("reshape1", RESHAPE, 1, 1); - auto addn1 = builder.AddNode("addn1", ADDN, 1, 1); - auto sum1 = builder.AddNode("sum1", SUM, 2, 1); - - builder.AddDataEdge(data1, 0, add1, 0); - builder.AddDataEdge(const1, 0, add1, 1); - builder.AddDataEdge(const2, 0, shape1, 0); - builder.AddControlEdge(shape1, add1); - builder.AddDataEdge(add1, 0, reshape1, 0); - builder.AddDataEdge(shape1, 0, addn1, 0); - builder.AddDataEdge(reshape1, 0, sum1, 0); - builder.AddDataEdge(addn1, 0, sum1, 1); - - return builder.GetGraph(); -} - -/// rnextiteration -/// | | -/// merge -/// | -/// data1 -ComputeGraphPtr BuildGraph3() { - auto builder = ut::GraphBuilder("g1"); - auto data1 = builder.AddNode("data1", DATA, 0, 1); - auto merge1 = builder.AddNode("merge1", MERGE, 2, 1); - auto next1 = builder.AddNode("next1", NEXTITERATION, 1, 1); - - builder.AddDataEdge(data1, 0, merge1, 0); - builder.AddDataEdge(merge1, 0, next1, 0); - builder.AddDataEdge(next1, 0, merge1, 1); - builder.AddControlEdge(merge1, next1); - builder.AddControlEdge(next1, merge1); - - return builder.GetGraph(); -} - -void CheckIterOrder(UtestTestPass *pass, std::vector> &nodes_layers) { - std::unordered_set layer_nodes; - size_t layer_index = 0; - for (const auto &node : pass->GetIterNodes()) { - layer_nodes.insert(node->GetName()); - EXPECT_LT(layer_index, nodes_layers.size()); - if (layer_nodes == nodes_layers[layer_index]) { - layer_index++; - layer_nodes.clear(); - } - } - EXPECT_EQ(layer_index, nodes_layers.size()); -} - -/// Op1 -/// | -/// Merge -/// / \. -/// Op2 Op3 -TEST_F(UTESTGraphPassesBasePass, del_isolate_fail) { - auto builder = ut::GraphBuilder("g1"); - auto merge_node = builder.AddNode("Merge", MERGE, 1, 1); - auto node1 = builder.AddNode("Op1", RELU, 1, 1); - auto node2 = builder.AddNode("Op2", CONVOLUTION, 1, 1); - auto node3 = builder.AddNode("Op3", CONVOLUTION, 1, 1); - - GraphUtils::AddEdge(node1->GetOutDataAnchor(0), merge_node->GetInDataAnchor(0)); - GraphUtils::AddEdge(merge_node->GetOutDataAnchor(0), node2->GetInDataAnchor(0)); - GraphUtils::AddEdge(merge_node->GetOutDataAnchor(0), node3->GetInDataAnchor(0)); - - EXPECT_EQ(node1->GetOutDataNodes().size(), 1); - - TestDelPass del_pass; - auto ret = del_pass.IsolateAndDeleteNode(merge_node, {0, -1}); - EXPECT_EQ(ret, FAILED); - - OpDescPtr op_desc = std::make_shared("merge", MERGE); - NodePtr node = shared_ptr(new (std::nothrow) Node(op_desc, nullptr)); - ret = del_pass.IsolateAndDeleteNode(node, {0, -1}); - EXPECT_EQ(ret, FAILED); -} - -/// Op1 -/// | -/// Merge -/// / \. -/// Op2 Op3 -TEST_F(UTESTGraphPassesBasePass, del_isolate_success) { - auto builder = ut::GraphBuilder("g1"); - auto merge_node = builder.AddNode("Merge", MERGE, 1, 2); - auto node1 = builder.AddNode("Op1", RELU, 1, 1); - auto node2 = builder.AddNode("Op2", CONVOLUTION, 1, 1); - auto node3 = builder.AddNode("Op3", CONVOLUTION, 1, 1); - - GraphUtils::AddEdge(node1->GetOutDataAnchor(0), merge_node->GetInDataAnchor(0)); - GraphUtils::AddEdge(merge_node->GetOutDataAnchor(0), node2->GetInDataAnchor(0)); - GraphUtils::AddEdge(merge_node->GetOutDataAnchor(0), node3->GetInDataAnchor(0)); - - EXPECT_EQ(node1->GetOutDataNodes().size(), 1); - - TestDelPass del_pass; - auto ret = del_pass.IsolateAndDeleteNode(merge_node, {0, -1}); - EXPECT_EQ(ret, SUCCESS); -} - -TEST_F(UTESTGraphPassesBasePass, data_graph) { - auto graph = BuildGraph1(); - auto ge_pass = GEPass(graph); - EXPECT_EQ(ge_pass.Run(names_to_pass_), SUCCESS); - auto *pass = dynamic_cast(names_to_pass_[0].second); - - EXPECT_EQ(pass->GetIterNodes().size(), 4); - std::vector> layers; - layers.push_back({"data1", "const1"}); - layers.push_back({"add1"}); - layers.push_back({"reshape1"}); - CheckIterOrder(pass, layers); -} - -TEST_F(UTESTGraphPassesBasePass, graph_with_control_link) { - auto graph = BuildGraph2(); - auto ge_pass = GEPass(graph); - EXPECT_EQ(ge_pass.Run(names_to_pass_), SUCCESS); - auto *pass = dynamic_cast(names_to_pass_[0].second); - - EXPECT_EQ(pass->GetIterNodes().size(), 8); - EXPECT_EQ(pass->GetIterNodes().at(3)->GetName(), "shape1"); - - std::vector> layers; - layers.push_back({"data1", "const1", "const2"}); - layers.push_back({"shape1"}); - layers.push_back({"add1", "addn1", "reshape1"}); - layers.push_back({"sum1"}); - CheckIterOrder(pass, layers); -} - -TEST_F(UTESTGraphPassesBasePass, re_pass_after) { - NamesToPass names_to_pass; - auto test_pass = UtestTestPass(); - names_to_pass.push_back(std::make_pair("test", &test_pass)); - - test_pass.AddRePassNodeName("add1", "sum1"); - test_pass.AddRePassNodeName("shape1", "sum1"); - test_pass.AddRePassNodeName("shape1", "add1"); - test_pass.AddRePassNodeName("data1", "add1"); - - auto graph = BuildGraph2(); - auto ge_pass = GEPass(graph); - EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); - EXPECT_EQ(test_pass.GetIterNodes().size(), 8); -} - -TEST_F(UTESTGraphPassesBasePass, re_pass_before) { - NamesToPass names_to_pass; - auto test_pass = UtestTestPass(); - names_to_pass.push_back(std::make_pair("test", &test_pass)); - - test_pass.AddRePassNodeName("add1", "data1"); - - auto graph = BuildGraph1(); - auto ge_pass = GEPass(graph); - EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); - EXPECT_EQ(test_pass.GetIterNodes().size(), 5); - EXPECT_EQ(test_pass.GetIterNodes().at(2)->GetName(), "add1"); - EXPECT_EQ(test_pass.GetIterNodes().at(3)->GetName(), "reshape1"); - EXPECT_EQ(test_pass.GetIterNodes().at(4)->GetName(), "data1"); -} - -TEST_F(UTESTGraphPassesBasePass, re_pass_before_multi_times) { - NamesToPass names_to_pass; - auto test_pass = UtestTestPass(); - names_to_pass.push_back(std::make_pair("test", &test_pass)); - - test_pass.AddRePassNodeName("add1", "data1"); - test_pass.AddRePassNodeName("add1", "const1"); - test_pass.AddRePassNodeName("reshape1", "data1"); - - auto graph = BuildGraph1(); - auto ge_pass = GEPass(graph); - EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); - EXPECT_EQ(test_pass.GetIterNodes().size(), 6); - EXPECT_EQ(test_pass.GetIterNodes().at(2)->GetName(), "add1"); - EXPECT_EQ(test_pass.GetIterNodes().at(3)->GetName(), "reshape1"); -} - -TEST_F(UTESTGraphPassesBasePass, del_after) { - NamesToPass names_to_pass; - auto test_pass = UtestTestPass(); - names_to_pass.push_back(std::make_pair("test", &test_pass)); - - test_pass.AddDelNodeName("add1", "sum1"); - - auto graph = BuildGraph2(); - auto ge_pass = GEPass(graph); - EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); - EXPECT_EQ(test_pass.GetIterNodes().size(), 7); -} - -TEST_F(UTESTGraphPassesBasePass, del_after_multiple) { - NamesToPass names_to_pass; - auto test_pass = UtestTestPass(); - names_to_pass.push_back(std::make_pair("test", &test_pass)); - - test_pass.AddDelNodeName("add1", "sum1"); - test_pass.AddDelNodeName("add1", "reshape1"); - - auto graph = BuildGraph2(); - auto ge_pass = GEPass(graph); - EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); - EXPECT_EQ(test_pass.GetIterNodes().size(), 6); -} - -TEST_F(UTESTGraphPassesBasePass, del_after_break_link) { - NamesToPass names_to_pass; - auto test_pass = UtestTestPass(); - names_to_pass.push_back(std::make_pair("test", &test_pass)); - - test_pass.AddDelNodeName("shape1", "add1"); - test_pass.AddDelNodeName("shape1", "addn1"); - test_pass.AddRePassNodeName("shape1", "shape1"); - test_pass.AddRePassNodeName("shape1", "reshape1"); - test_pass.AddRePassNodeName("shape1", "sum1"); - - auto graph = BuildGraph2(); - auto ge_pass = GEPass(graph); - EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); - EXPECT_EQ(test_pass.GetIterNodes().size(), 7); -} - -TEST_F(UTESTGraphPassesBasePass, del_self_and_after) { - NamesToPass names_to_pass; - auto test_pass = UtestTestPass(); - names_to_pass.push_back(std::make_pair("test", &test_pass)); - - test_pass.AddDelNodeName("shape1", "add1"); - test_pass.AddDelNodeName("shape1", "addn1"); - - auto graph = BuildGraph2(); - auto ge_pass = GEPass(graph); - EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); - EXPECT_EQ(test_pass.GetIterNodes().size(), 4); -} - -TEST_F(UTESTGraphPassesBasePass, del_before) { - NamesToPass names_to_pass; - auto test_pass = UtestTestPass(); - names_to_pass.push_back(std::make_pair("test", &test_pass)); - - test_pass.AddDelNodeName("reshape1", "add1"); - test_pass.AddDelNodeName("sum1", "addn1"); - - auto graph = BuildGraph2(); - auto ge_pass = GEPass(graph); - EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); - EXPECT_EQ(test_pass.GetIterNodes().size(), 8); -} - -TEST_F(UTESTGraphPassesBasePass, re_pass_and_del) { - NamesToPass names_to_pass; - auto test_pass = UtestTestPass(); - names_to_pass.push_back(std::make_pair("test", &test_pass)); - - test_pass.AddRePassNodeName("add1", "sum1"); - test_pass.AddDelNodeName("reshape1", "sum1"); - - auto graph = BuildGraph2(); - auto ge_pass = GEPass(graph); - EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); - EXPECT_EQ(test_pass.GetIterNodes().size(), 7); -} -/* -TEST_F(UTESTGraphPassesBasePass, dead_loop) { - NamesToPass names_to_pass; - auto test_pass = UtestTestPass(true); - names_to_pass.push_back(std::make_pair("test", &test_pass)); - - test_pass.AddRePassNodeName("add1", "sum1"); - test_pass.AddRePassNodeName("sum1", "add1"); - - auto graph = BuildGraph2(); - auto ge_pass = GEPass(graph); - EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); - EXPECT_EQ(test_pass.GetRunTimes(), 1007); -} -*/ - -TEST_F(UTESTGraphPassesBasePass, while_loop) { - NamesToPass names_to_pass; - auto test_pass = UtestTestPass(true); - names_to_pass.push_back(std::make_pair("test", &test_pass)); - - auto graph = BuildGraph3(); - auto ge_pass = GEPass(graph); - EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); -} - -/// data1 const -/// \ / -/// while -/// / \. -/// | | -/// cast1 cast2 -ComputeGraphPtr BuildWhileGraph1() { - // build sub graph - auto builder_sub = ut::GraphBuilder("sub"); - auto data_1 = builder_sub.AddNode("data_1", DATA, 0, 1); - auto data_2 = builder_sub.AddNode("data_2", DATA, 0, 1); - auto add = builder_sub.AddNode("add", ADD, 2, 1); - - builder_sub.AddDataEdge(data_1, 0, add, 0); - builder_sub.AddDataEdge(data_2, 0, add, 1); - auto sub_graph = builder_sub.GetGraph(); - sub_graph->SetName("while_sub"); - // build root graph - auto builder = ut::GraphBuilder("g1"); - auto data = builder.AddNode("data1", DATA, 0, 1); - auto const_op = builder.AddNode("const_op", CONSTANT, 0, 1); - auto c1 = builder.AddNode("cast1", CAST, 1, 1); - auto c2 = builder.AddNode("cast2", CAST, 1, 1); - // add while op - auto tensor_desc = std::make_shared(); - tensor_desc->SetShape(GeShape({1,1,1,1})); - tensor_desc->SetFormat(FORMAT_ND); - tensor_desc->SetDataType(DT_INT32); - - auto op_desc = std::make_shared("while", WHILE); - for (int i = 0; i < 2; ++i) { - op_desc->AddInputDesc(tensor_desc->Clone()); - } - for (int i = 0; i < 2; ++i) { - op_desc->AddOutputDesc(tensor_desc->Clone()); - } - AttrUtils::SetBool(op_desc,"_need_infer_again", true); - op_desc->AddSubgraphName(sub_graph->GetName()); - op_desc->SetSubgraphInstanceName(0,sub_graph->GetName()); - auto root_graph = builder.GetGraph(); - auto while_op = root_graph->AddNode(op_desc); - - builder.AddDataEdge(data, 0, while_op, 0); - builder.AddDataEdge(const_op, 0, while_op, 1); - builder.AddDataEdge(while_op, 0, c1, 0); - builder.AddDataEdge(while_op, 1, c2, 0); - sub_graph->SetParentGraph(root_graph); - sub_graph->SetParentNode(while_op); - root_graph->AddSubgraph(sub_graph); - return root_graph; -} - -TEST_F(UTESTGraphPassesBasePass, while_infershape) { -NamesToPass names_to_pass; -auto test_pass = UtestTestPass(); -names_to_pass.push_back(std::make_pair("test", &test_pass)); - -auto graph = BuildWhileGraph1(); -auto ge_pass = GEPass(graph); -auto while_node = graph->FindNode("while"); -EXPECT_EQ(while_node->GetOpDesc()->GetSubgraphInstanceNames().size(),1); -EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); -} - -} // namespace ge +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#define protected public +#include "graph/passes/base_pass.h" +#undef protected + +#include "framework/common/types.h" +#include "graph/node.h" +#include "graph/utils/graph_utils.h" +#include "graph_builder_utils.h" + +template class std::unordered_set; + +namespace ge { +class UtestTestPass : public BaseNodePass { + public: + UtestTestPass() = default; + UtestTestPass(bool dead_loop) : dead_loop_(dead_loop), run_times_(0) {} + + Status Run(NodePtr &node) override { + ++run_times_; + iter_nodes_.push_back(node); + auto iter = names_to_add_del_.find(node->GetName()); + if (iter != names_to_add_del_.end()) { + for (const auto &node_name : iter->second) { + auto del_node = node->GetOwnerComputeGraph()->FindNode(node_name); + GraphUtils::IsolateNode(del_node, {0}); + AddNodeDeleted(del_node); + } + } + iter = names_to_add_repass_.find(node->GetName()); + if (iter != names_to_add_repass_.end()) { + auto all_nodes = node->GetOwnerComputeGraph()->GetAllNodes(); + for (const auto &node_name : iter->second) { + for (auto &node_re_pass : all_nodes) { + if (node_re_pass->GetName() == node_name) { + AddRePassNode(node_re_pass); + break; + } + } + } + if (!dead_loop_) { + names_to_add_repass_.erase(iter); + } + } + + iter = names_to_add_repass_immediate_.find(node->GetName()); + if (iter != names_to_add_repass_immediate_.end()) { + auto all_nodes = node->GetOwnerComputeGraph()->GetAllNodes(); + for (const auto &node_name : iter->second) { + for (auto &node_re_pass : all_nodes) { + if (node_re_pass->GetName() == node_name) { + AddImmediateRePassNode(node_re_pass); + break; + } + } + } + if (!dead_loop_) { + names_to_add_repass_immediate_.erase(iter); + } + } + + iter = names_to_add_suspend_.find(node->GetName()); + if (iter != names_to_add_suspend_.end()) { + auto all_nodes = node->GetOwnerComputeGraph()->GetAllNodes(); + for (const auto &node_name : iter->second) { + for (auto &node_re_pass : all_nodes) { + if (node_re_pass->GetName() == node_name) { + AddNodeSuspend(node_re_pass); + break; + } + } + } + if (!dead_loop_) { + names_to_add_suspend_.erase(iter); + } + } + + iter = names_to_add_resume_.find(node->GetName()); + if (iter != names_to_add_resume_.end()) { + auto all_nodes = node->GetOwnerComputeGraph()->GetAllNodes(); + for (const auto &node_name : iter->second) { + for (auto &node_re_pass : all_nodes) { + if (node_re_pass->GetName() == node_name) { + AddNodeResume(node_re_pass); + break; + } + } + } + if (!dead_loop_) { + names_to_add_resume_.erase(iter); + } + } + // simulate infershape pass + if(node->GetType() == WHILE){ + bool need_repass = false; + AttrUtils::GetBool(node->GetOpDesc(),"_need_infer_again", need_repass); + if(!OptionExists(kOptimizeAfterSubGraph)){ + return SUCCESS; + } + if(need_repass){ + AttrUtils::SetBool(node->GetOpDesc(),"_need_infer_again", false); + AddImmediateRePassNode(node); + } + else{ + // clear attr on while + node->GetOpDesc()->DelAttr("_need_infer_again"); + } + } + return SUCCESS; + } + + Status OnSuspendNodesLeaked() override { + // resume all node remain in suspend_nodes when leaked + auto compute_graph = (iter_nodes_.size() > 0) ? iter_nodes_[0]->GetOwnerComputeGraph() : nullptr; + if (compute_graph == nullptr) { + return SUCCESS; + } + + for (const auto &node_name : names_to_add_resume_onleaked_) { + auto node_to_resume = compute_graph->FindNode(node_name); + AddNodeResume(node_to_resume); + } + return SUCCESS; + } + void clear() { iter_nodes_.clear(); } + std::vector GetIterNodes() { return iter_nodes_; } + + void AddRePassNodeName(const std::string &iter_node, const std::string &re_pass_node) { + names_to_add_repass_[iter_node].insert(re_pass_node); + } + void AddDelNodeName(const std::string &iter_node, const std::string &del_node) { + names_to_add_del_[iter_node].insert(del_node); + } + void AddRePassImmediateNodeName(const std::string &iter_node, const std::string &re_pass_node) { + names_to_add_repass_immediate_[iter_node].insert(re_pass_node); + } + + void AddSuspendNodeName(const std::string &iter_node, const std::string &suspend_node) { + names_to_add_suspend_[iter_node].insert(suspend_node); + } + void AddResumeNodeName(const std::string &iter_node, const std::string &resume_node) { + names_to_add_resume_[iter_node].insert(resume_node); + } + void AddResumeNodeNameOnLeaked(const std::string &resume_node) { + names_to_add_resume_onleaked_.insert(resume_node); + } + + unsigned int GetRunTimes() { return run_times_; } + + private: + std::vector iter_nodes_; + std::map> names_to_add_del_; + std::map> names_to_add_repass_; + std::map> names_to_add_repass_immediate_; + std::map> names_to_add_suspend_; + std::map> names_to_add_resume_; + std::unordered_set names_to_add_resume_onleaked_; + + bool dead_loop_; + unsigned int run_times_; +}; + +class TestDelPass : public BaseNodePass { + public: + Status Run(NodePtr &node) override { return SUCCESS; } +}; + +class UTESTGraphPassesBasePass : public testing::Test { + protected: + UTESTGraphPassesBasePass() { + auto p1 = new UtestTestPass; + names_to_pass_.push_back(std::make_pair("test1", p1)); + } + void SetUp() override { + for (auto &name_to_pass : names_to_pass_) { + dynamic_cast(name_to_pass.second)->clear(); + } + } + ~UTESTGraphPassesBasePass() override { + for (auto &name_to_pass : names_to_pass_) { + delete name_to_pass.second; + } + } + NamesToPass names_to_pass_; +}; +/// reshape1 +/// | +/// add1 +/// / \. +/// | | +/// data1 const1 +ComputeGraphPtr BuildGraph1() { + auto builder = ut::GraphBuilder("g1"); + auto data = builder.AddNode("data1", DATA, 0, 1); + auto a1 = builder.AddNode("add1", ADD, 2, 1); + auto c1 = builder.AddNode("const1", CONSTANT, 0, 1); + auto r1 = builder.AddNode("reshape1", RESHAPE, 1, 1); + + builder.AddDataEdge(data, 0, a1, 0); + builder.AddDataEdge(c1, 0, a1, 1); + builder.AddDataEdge(a1, 0, r1, 0); + + return builder.GetGraph(); +} + +/// sum1 +/// / \. +/// / \. +/// / \. +/// reshape1 addn1 +/// | c | +/// add1 <--- shape1 +/// / \ | +/// | | | +/// data1 const1 const2 +ComputeGraphPtr BuildGraph2() { + auto builder = ut::GraphBuilder("g1"); + auto data1 = builder.AddNode("data1", DATA, 0, 1); + auto const1 = builder.AddNode("const1", CONSTANT, 0, 1); + auto const2 = builder.AddNode("const2", CONSTANT, 0, 1); + auto add1 = builder.AddNode("add1", ADD, 2, 1); + auto shape1 = builder.AddNode("shape1", SHAPE, 1, 1); + auto reshape1 = builder.AddNode("reshape1", RESHAPE, 1, 1); + auto addn1 = builder.AddNode("addn1", ADDN, 1, 1); + auto sum1 = builder.AddNode("sum1", SUM, 2, 1); + + builder.AddDataEdge(data1, 0, add1, 0); + builder.AddDataEdge(const1, 0, add1, 1); + builder.AddDataEdge(const2, 0, shape1, 0); + builder.AddControlEdge(shape1, add1); + builder.AddDataEdge(add1, 0, reshape1, 0); + builder.AddDataEdge(shape1, 0, addn1, 0); + builder.AddDataEdge(reshape1, 0, sum1, 0); + builder.AddDataEdge(addn1, 0, sum1, 1); + + return builder.GetGraph(); +} + +/// rnextiteration +/// | | +/// merge +/// | +/// data1 +ComputeGraphPtr BuildGraph3() { + auto builder = ut::GraphBuilder("g1"); + auto data1 = builder.AddNode("data1", DATA, 0, 1); + auto merge1 = builder.AddNode("merge1", MERGE, 2, 1); + auto next1 = builder.AddNode("next1", NEXTITERATION, 1, 1); + + builder.AddDataEdge(data1, 0, merge1, 0); + builder.AddDataEdge(merge1, 0, next1, 0); + builder.AddDataEdge(next1, 0, merge1, 1); + builder.AddControlEdge(merge1, next1); + builder.AddControlEdge(next1, merge1); + + return builder.GetGraph(); +} + +/// cast1--shape1 +/// / +/// data1 +/// \ +/// transdata1--shape2 +ComputeGraphPtr BuildGraph4() { + auto builder = ut::GraphBuilder("g1"); + auto data1 = builder.AddNode("data1", DATA, 0, 1); + auto cast1 = builder.AddNode("cast1", CAST, 1, 1); + auto shape1 = builder.AddNode("shape1", SHAPE, 1, 1); + auto transdata1 = builder.AddNode("transdata1", TRANSDATA, 1, 1); + auto shape2 = builder.AddNode("shape2", SHAPE, 1, 1); + + builder.AddDataEdge(data1, 0, cast1, 0); + builder.AddDataEdge(data1, 0, transdata1, 0); + builder.AddDataEdge(cast1, 0, shape1, 0); + builder.AddDataEdge(transdata1, 0, shape2, 0); + return builder.GetGraph(); +} + +void CheckIterOrder(UtestTestPass *pass, std::vector> &nodes_layers) { + std::unordered_set layer_nodes; + size_t layer_index = 0; + for (const auto &node : pass->GetIterNodes()) { + layer_nodes.insert(node->GetName()); + EXPECT_LT(layer_index, nodes_layers.size()); + if (layer_nodes == nodes_layers[layer_index]) { + layer_index++; + layer_nodes.clear(); + } + } + EXPECT_EQ(layer_index, nodes_layers.size()); +} + +/// Op1 +/// | +/// Merge +/// / \. +/// Op2 Op3 +TEST_F(UTESTGraphPassesBasePass, del_isolate_fail) { + auto builder = ut::GraphBuilder("g1"); + auto merge_node = builder.AddNode("Merge", MERGE, 1, 1); + auto node1 = builder.AddNode("Op1", RELU, 1, 1); + auto node2 = builder.AddNode("Op2", CONVOLUTION, 1, 1); + auto node3 = builder.AddNode("Op3", CONVOLUTION, 1, 1); + + GraphUtils::AddEdge(node1->GetOutDataAnchor(0), merge_node->GetInDataAnchor(0)); + GraphUtils::AddEdge(merge_node->GetOutDataAnchor(0), node2->GetInDataAnchor(0)); + GraphUtils::AddEdge(merge_node->GetOutDataAnchor(0), node3->GetInDataAnchor(0)); + + EXPECT_EQ(node1->GetOutDataNodes().size(), 1); + + TestDelPass del_pass; + auto ret = del_pass.IsolateAndDeleteNode(merge_node, {0, -1}); + EXPECT_EQ(ret, FAILED); + + OpDescPtr op_desc = std::make_shared("merge", MERGE); + NodePtr node = shared_ptr(new (std::nothrow) Node(op_desc, nullptr)); + ret = del_pass.IsolateAndDeleteNode(node, {0, -1}); + EXPECT_EQ(ret, FAILED); +} + +/// Op1 +/// | +/// Merge +/// / \. +/// Op2 Op3 +TEST_F(UTESTGraphPassesBasePass, del_isolate_success) { + auto builder = ut::GraphBuilder("g1"); + auto merge_node = builder.AddNode("Merge", MERGE, 1, 2); + auto node1 = builder.AddNode("Op1", RELU, 1, 1); + auto node2 = builder.AddNode("Op2", CONVOLUTION, 1, 1); + auto node3 = builder.AddNode("Op3", CONVOLUTION, 1, 1); + + GraphUtils::AddEdge(node1->GetOutDataAnchor(0), merge_node->GetInDataAnchor(0)); + GraphUtils::AddEdge(merge_node->GetOutDataAnchor(0), node2->GetInDataAnchor(0)); + GraphUtils::AddEdge(merge_node->GetOutDataAnchor(0), node3->GetInDataAnchor(0)); + + EXPECT_EQ(node1->GetOutDataNodes().size(), 1); + + TestDelPass del_pass; + auto ret = del_pass.IsolateAndDeleteNode(merge_node, {0, -1}); + EXPECT_EQ(ret, SUCCESS); +} + +TEST_F(UTESTGraphPassesBasePass, data_graph) { + auto graph = BuildGraph1(); + auto ge_pass = GEPass(graph); + EXPECT_EQ(ge_pass.Run(names_to_pass_), SUCCESS); + auto *pass = dynamic_cast(names_to_pass_[0].second); + + EXPECT_EQ(pass->GetIterNodes().size(), 4); + std::vector> layers; + layers.push_back({"data1", "const1"}); + layers.push_back({"add1"}); + layers.push_back({"reshape1"}); + CheckIterOrder(pass, layers); +} + +TEST_F(UTESTGraphPassesBasePass, graph_with_control_link) { + auto graph = BuildGraph2(); + auto ge_pass = GEPass(graph); + EXPECT_EQ(ge_pass.Run(names_to_pass_), SUCCESS); + auto *pass = dynamic_cast(names_to_pass_[0].second); + + EXPECT_EQ(pass->GetIterNodes().size(), 8); + EXPECT_EQ(pass->GetIterNodes().at(3)->GetName(), "shape1"); + + std::vector> layers; + layers.push_back({"data1", "const1", "const2"}); + layers.push_back({"shape1"}); + layers.push_back({"add1", "addn1", "reshape1"}); + layers.push_back({"sum1"}); + CheckIterOrder(pass, layers); +} + +TEST_F(UTESTGraphPassesBasePass, re_pass_after) { + NamesToPass names_to_pass; + auto test_pass = UtestTestPass(); + names_to_pass.push_back(std::make_pair("test", &test_pass)); + + test_pass.AddRePassNodeName("add1", "sum1"); + test_pass.AddRePassNodeName("shape1", "sum1"); + test_pass.AddRePassNodeName("shape1", "add1"); + test_pass.AddRePassNodeName("data1", "add1"); + + auto graph = BuildGraph2(); + auto ge_pass = GEPass(graph); + EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); + EXPECT_EQ(test_pass.GetIterNodes().size(), 8); +} + +TEST_F(UTESTGraphPassesBasePass, re_pass_before) { + NamesToPass names_to_pass; + auto test_pass = UtestTestPass(); + names_to_pass.push_back(std::make_pair("test", &test_pass)); + + test_pass.AddRePassNodeName("add1", "data1"); + + auto graph = BuildGraph1(); + auto ge_pass = GEPass(graph); + EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); + EXPECT_EQ(test_pass.GetIterNodes().size(), 5); + EXPECT_EQ(test_pass.GetIterNodes().at(2)->GetName(), "add1"); + EXPECT_EQ(test_pass.GetIterNodes().at(3)->GetName(), "reshape1"); + EXPECT_EQ(test_pass.GetIterNodes().at(4)->GetName(), "data1"); +} + +TEST_F(UTESTGraphPassesBasePass, re_pass_before_multi_times) { + NamesToPass names_to_pass; + auto test_pass = UtestTestPass(); + names_to_pass.push_back(std::make_pair("test", &test_pass)); + + test_pass.AddRePassNodeName("add1", "data1"); + test_pass.AddRePassNodeName("add1", "const1"); + test_pass.AddRePassNodeName("reshape1", "data1"); + + auto graph = BuildGraph1(); + auto ge_pass = GEPass(graph); + EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); + EXPECT_EQ(test_pass.GetIterNodes().size(), 6); + EXPECT_EQ(test_pass.GetIterNodes().at(2)->GetName(), "add1"); + EXPECT_EQ(test_pass.GetIterNodes().at(3)->GetName(), "reshape1"); +} + +TEST_F(UTESTGraphPassesBasePass, del_after) { + NamesToPass names_to_pass; + auto test_pass = UtestTestPass(); + names_to_pass.push_back(std::make_pair("test", &test_pass)); + + test_pass.AddDelNodeName("add1", "sum1"); + + auto graph = BuildGraph2(); + auto ge_pass = GEPass(graph); + EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); + EXPECT_EQ(test_pass.GetIterNodes().size(), 7); +} + +TEST_F(UTESTGraphPassesBasePass, del_after_multiple) { + NamesToPass names_to_pass; + auto test_pass = UtestTestPass(); + names_to_pass.push_back(std::make_pair("test", &test_pass)); + + test_pass.AddDelNodeName("add1", "sum1"); + test_pass.AddDelNodeName("add1", "reshape1"); + + auto graph = BuildGraph2(); + auto ge_pass = GEPass(graph); + EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); + EXPECT_EQ(test_pass.GetIterNodes().size(), 6); +} + +TEST_F(UTESTGraphPassesBasePass, del_after_break_link) { + NamesToPass names_to_pass; + auto test_pass = UtestTestPass(); + names_to_pass.push_back(std::make_pair("test", &test_pass)); + + test_pass.AddDelNodeName("shape1", "add1"); + test_pass.AddDelNodeName("shape1", "addn1"); + test_pass.AddRePassNodeName("shape1", "shape1"); + test_pass.AddRePassNodeName("shape1", "reshape1"); + test_pass.AddRePassNodeName("shape1", "sum1"); + + auto graph = BuildGraph2(); + auto ge_pass = GEPass(graph); + EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); + EXPECT_EQ(test_pass.GetIterNodes().size(), 7); +} + +TEST_F(UTESTGraphPassesBasePass, del_self_and_after) { + NamesToPass names_to_pass; + auto test_pass = UtestTestPass(); + names_to_pass.push_back(std::make_pair("test", &test_pass)); + + test_pass.AddDelNodeName("shape1", "add1"); + test_pass.AddDelNodeName("shape1", "addn1"); + + auto graph = BuildGraph2(); + auto ge_pass = GEPass(graph); + EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); + EXPECT_EQ(test_pass.GetIterNodes().size(), 6); +} + +TEST_F(UTESTGraphPassesBasePass, del_before) { + NamesToPass names_to_pass; + auto test_pass = UtestTestPass(); + names_to_pass.push_back(std::make_pair("test", &test_pass)); + + test_pass.AddDelNodeName("reshape1", "add1"); + test_pass.AddDelNodeName("sum1", "addn1"); + + auto graph = BuildGraph2(); + auto ge_pass = GEPass(graph); + EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); + EXPECT_EQ(test_pass.GetIterNodes().size(), 8); +} + +TEST_F(UTESTGraphPassesBasePass, re_pass_and_del) { + NamesToPass names_to_pass; + auto test_pass = UtestTestPass(); + names_to_pass.push_back(std::make_pair("test", &test_pass)); + + test_pass.AddRePassNodeName("add1", "sum1"); + test_pass.AddDelNodeName("reshape1", "sum1"); + + auto graph = BuildGraph2(); + auto ge_pass = GEPass(graph); + EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); + EXPECT_EQ(test_pass.GetIterNodes().size(), 7); +} +/* +TEST_F(UTESTGraphPassesBasePass, dead_loop) { + NamesToPass names_to_pass; + auto test_pass = UtestTestPass(true); + names_to_pass.push_back(std::make_pair("test", &test_pass)); + + test_pass.AddRePassNodeName("add1", "sum1"); + test_pass.AddRePassNodeName("sum1", "add1"); + + auto graph = BuildGraph2(); + auto ge_pass = GEPass(graph); + EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); + EXPECT_EQ(test_pass.GetRunTimes(), 1007); +} +*/ + +TEST_F(UTESTGraphPassesBasePass, while_loop) { + NamesToPass names_to_pass; + auto test_pass = UtestTestPass(true); + names_to_pass.push_back(std::make_pair("test", &test_pass)); + + auto graph = BuildGraph3(); + auto ge_pass = GEPass(graph); + EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); +} + +/// data1 const +/// \ / +/// while +/// / \. +/// | | +/// cast1 cast2 +ComputeGraphPtr BuildWhileGraph1() { + // build sub graph + auto builder_sub = ut::GraphBuilder("sub"); + auto data_1 = builder_sub.AddNode("data_1", DATA, 0, 1); + auto data_2 = builder_sub.AddNode("data_2", DATA, 0, 1); + auto add = builder_sub.AddNode("add", ADD, 2, 1); + + builder_sub.AddDataEdge(data_1, 0, add, 0); + builder_sub.AddDataEdge(data_2, 0, add, 1); + auto sub_graph = builder_sub.GetGraph(); + sub_graph->SetName("while_sub"); + // build root graph + auto builder = ut::GraphBuilder("g1"); + auto data = builder.AddNode("data1", DATA, 0, 1); + auto const_op = builder.AddNode("const_op", CONSTANT, 0, 1); + auto c1 = builder.AddNode("cast1", CAST, 1, 1); + auto c2 = builder.AddNode("cast2", CAST, 1, 1); + // add while op + auto tensor_desc = std::make_shared(); + tensor_desc->SetShape(GeShape({1,1,1,1})); + tensor_desc->SetFormat(FORMAT_ND); + tensor_desc->SetDataType(DT_INT32); + + auto op_desc = std::make_shared("while", WHILE); + for (int i = 0; i < 2; ++i) { + op_desc->AddInputDesc(tensor_desc->Clone()); + } + for (int i = 0; i < 2; ++i) { + op_desc->AddOutputDesc(tensor_desc->Clone()); + } + AttrUtils::SetBool(op_desc,"_need_infer_again", true); + op_desc->AddSubgraphName(sub_graph->GetName()); + op_desc->SetSubgraphInstanceName(0,sub_graph->GetName()); + auto root_graph = builder.GetGraph(); + auto while_op = root_graph->AddNode(op_desc); + + builder.AddDataEdge(data, 0, while_op, 0); + builder.AddDataEdge(const_op, 0, while_op, 1); + builder.AddDataEdge(while_op, 0, c1, 0); + builder.AddDataEdge(while_op, 1, c2, 0); + sub_graph->SetParentGraph(root_graph); + sub_graph->SetParentNode(while_op); + root_graph->AddSubgraph(sub_graph); + return root_graph; +} + +TEST_F(UTESTGraphPassesBasePass, while_infershape) { + NamesToPass names_to_pass; + auto test_pass = UtestTestPass(); + names_to_pass.push_back(std::make_pair("test", &test_pass)); + + auto graph = BuildWhileGraph1(); + auto ge_pass = GEPass(graph); + auto while_node = graph->FindNode("while"); + EXPECT_EQ(while_node->GetOpDesc()->GetSubgraphInstanceNames().size(),1); + EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); +} + +TEST_F(UTESTGraphPassesBasePass, re_pass_pre_node_immediately) { + auto graph = BuildGraph2(); + auto ge_pass = GEPass(graph); + auto *test_pass = dynamic_cast(names_to_pass_[0].second); + // repass pre_node immediately + test_pass->AddRePassImmediateNodeName("reshape1", "add1"); + EXPECT_EQ(ge_pass.Run(names_to_pass_), SUCCESS); + + EXPECT_EQ(test_pass->GetIterNodes().size(), 9);// todo + std::vector> layers; + layers.push_back({"data1", "const1", "const2"}); + layers.push_back({"shape1"}); + layers.push_back({"add1", "addn1"}); + layers.push_back({"reshape1", "add1", "sum1"}); + CheckIterOrder(test_pass, layers); +} + +TEST_F(UTESTGraphPassesBasePass, re_pass_cur_node_immediately) { + auto graph = BuildGraph2(); + auto ge_pass = GEPass(graph); + auto *test_pass = dynamic_cast(names_to_pass_[0].second); + // repass cur_node immediately + test_pass->AddRePassImmediateNodeName("reshape1", "reshape1"); + EXPECT_EQ(ge_pass.Run(names_to_pass_), SUCCESS); + + EXPECT_EQ(test_pass->GetIterNodes().size(), 9); + std::vector> layers; + layers.push_back({"data1", "const1", "const2"}); + layers.push_back({"shape1"}); + layers.push_back({"add1", "addn1"}); + layers.push_back({"reshape1"}); + layers.push_back({"reshape1", "sum1"}); + CheckIterOrder(test_pass, layers); +} + +TEST_F(UTESTGraphPassesBasePass, re_pass_next_node_immediately) { + auto graph = BuildGraph2(); + auto ge_pass = GEPass(graph); + auto *test_pass = dynamic_cast(names_to_pass_[0].second); + // repass next_node immediately + test_pass->AddRePassImmediateNodeName("reshape1", "sum1"); + // repass node after next_node immediately + test_pass->AddRePassImmediateNodeName("add1", "sum1"); + EXPECT_EQ(ge_pass.Run(names_to_pass_), SUCCESS); + + EXPECT_EQ(test_pass->GetIterNodes().size(), 8); + std::vector> layers; + layers.push_back({"data1", "const1", "const2"}); + layers.push_back({"shape1"}); + layers.push_back({"add1", "addn1"}); + layers.push_back({"reshape1", "sum1"}); + CheckIterOrder(test_pass, layers); +} +/** + * A->B->C + * if node B suspend its pre_node A, and C resume A, it is a useless operation, so iter_order should follow normal order + * when C resuem A, A will pass again. + */ +TEST_F(UTESTGraphPassesBasePass, B_suspend_pre_node_A_then_C_resume_A) { + auto graph = BuildGraph2(); + auto ge_pass = GEPass(graph); + auto *test_pass = dynamic_cast(names_to_pass_[0].second); + // add1->reshape1->sum1 + test_pass->AddSuspendNodeName("reshape1", "add1"); + test_pass->AddResumeNodeName("sum1", "add1"); + EXPECT_EQ(ge_pass.Run(names_to_pass_), SUCCESS); + EXPECT_EQ(test_pass->GetIterNodes().size(), 9); + std::vector> layers; + layers.push_back({"data1", "const1", "const2"}); + layers.push_back({"shape1"}); + layers.push_back({"add1", "addn1"}); + layers.push_back({"reshape1", "sum1"}); + layers.push_back({"add1"}); + CheckIterOrder(test_pass, layers); +} + +/** + * A->B->C + * if node B suspend its pre_node A, and B resume A, it is a useless operation, so iter_order should follow normal order + * when B resuem A, A will pass again. + */ +TEST_F(UTESTGraphPassesBasePass, B_suspend_pre_node_A_then_B_resume_A) { + auto graph = BuildGraph2(); + auto ge_pass = GEPass(graph); + auto *test_pass = dynamic_cast(names_to_pass_[0].second); + // add1->reshape1->sum1 + test_pass->AddSuspendNodeName("reshape1", "add1"); + test_pass->AddResumeNodeName("reshape1", "add1"); + EXPECT_EQ(ge_pass.Run(names_to_pass_), SUCCESS); + EXPECT_EQ(test_pass->GetIterNodes().size(), 9); + std::vector> layers; + layers.push_back({"data1", "const1", "const2"}); + layers.push_back({"shape1"}); + layers.push_back({"add1", "addn1"}); + layers.push_back({"reshape1", "sum1", "add1"}); + CheckIterOrder(test_pass, layers); +} + +/** + * A->B->C + * if node B resume C(which is not suspended), it is a useless operation, C will not pass. + */ +TEST_F(UTESTGraphPassesBasePass, B_resume_node_not_suspended) { + auto graph = BuildGraph2(); + auto ge_pass = GEPass(graph); + auto *test_pass = dynamic_cast(names_to_pass_[0].second); + // add1->reshape1->sum1 + test_pass->AddResumeNodeName("reshape1", "sum1"); + EXPECT_EQ(ge_pass.Run(names_to_pass_), SUCCESS); + EXPECT_EQ(test_pass->GetIterNodes().size(), 8); + std::vector> layers; + layers.push_back({"data1", "const1", "const2"}); + layers.push_back({"shape1"}); + layers.push_back({"add1", "addn1"}); + layers.push_back({"reshape1", "sum1"}); + CheckIterOrder(test_pass, layers); +} + +/** + * A->B->C + * if node B suspend its pre_node A, it is a useless operation, so iter_order should follow normal order + * because nobody resume it ,which means A is a leaked node, so return fail + */ +TEST_F(UTESTGraphPassesBasePass, suspend_pre_node_nobody_resume_it_return_failed) { + NamesToPass names_to_pass; + auto test_pass = UtestTestPass(); + names_to_pass.push_back(std::make_pair("test", &test_pass)); + // suspend pre_node immediately + test_pass.AddSuspendNodeName("reshape1", "add1"); + auto graph = BuildGraph2(); + auto ge_pass = GEPass(graph); + EXPECT_EQ(ge_pass.Run(names_to_pass), INTERNAL_ERROR); +} + +/** + * A->B->C + * if node B suspend its pre_node A, it is a useless operation, + * so iter_order should follow normal order + * resume A on leaked, which means A will pass again + */ +TEST_F(UTESTGraphPassesBasePass, suspend_pre_node_resume_it_onleaked) { + auto graph = BuildGraph2(); + auto ge_pass = GEPass(graph); + auto *test_pass = dynamic_cast(names_to_pass_[0].second); + // suspend pre_node immediately + test_pass->AddSuspendNodeName("reshape1", "add1"); + test_pass->AddResumeNodeNameOnLeaked("add1"); + EXPECT_EQ(ge_pass.Run(names_to_pass_), SUCCESS); + std::vector> layers; + layers.push_back({"data1", "const1", "const2"}); + layers.push_back({"shape1"}); + layers.push_back({"add1", "addn1"}); + layers.push_back({"reshape1", "sum1"}); + layers.push_back({"add1"}); + CheckIterOrder(test_pass, layers); +} + + +/// cast1--shape1 +/// / +/// data1 +/// \ +/// transdata1--shape2 +/** + * suspend cur node + * cast1 suspend itself, shape2 resume cast1 + * iter order follows : data1; cast1,transdata1; shape2; cast1 ; shape1 + */ +TEST_F(UTESTGraphPassesBasePass, cast1_suspend_cur_node_shape2_resume_cast1) { + auto graph = BuildGraph4(); + auto ge_pass = GEPass(graph); + auto *test_pass = dynamic_cast(names_to_pass_[0].second); + // suspend pre_node immediately + test_pass->AddSuspendNodeName("cast1", "cast1"); + test_pass->AddResumeNodeName("shape2", "cast1"); + EXPECT_EQ(ge_pass.Run(names_to_pass_), SUCCESS); + EXPECT_EQ(test_pass->GetIterNodes().size(), 6); + std::vector> layers; + layers.push_back({"data1"}); + layers.push_back({"cast1","transdata1"}); + layers.push_back({"shape2"}); + layers.push_back({"cast1", "shape1"}); + CheckIterOrder(test_pass, layers); +} +/** + * suspend cur node + * cast1 suspend itself, then resume cast1 + * iter order follows : data1; cast1,cast1,transdata1; shape2; shape1. + */ +TEST_F(UTESTGraphPassesBasePass, cast1_suspend_itslef_then_resume_itself) { + auto graph = BuildGraph4(); + auto ge_pass = GEPass(graph); + auto *test_pass = dynamic_cast(names_to_pass_[0].second); + // suspend pre_node immediately + test_pass->AddSuspendNodeName("cast1", "cast1"); + test_pass->AddResumeNodeName("cast1", "cast1"); + EXPECT_EQ(ge_pass.Run(names_to_pass_), SUCCESS); + EXPECT_EQ(test_pass->GetIterNodes().size(), 6); + std::vector> layers; + layers.push_back({"data1"}); + layers.push_back({"cast1","transdata1","cast1","shape1", "shape2"}); + CheckIterOrder(test_pass, layers); +} +/** + * suspend cur node + * cast1 suspend itself, then resume cast1 on leaked + * iter order follows : data1; cast1,cast1,transdata1; shape2; shape1. + */ +TEST_F(UTESTGraphPassesBasePass, cast1_suspend_itslef_then_resume_onleaked) { + auto graph = BuildGraph4(); + auto ge_pass = GEPass(graph); + auto *test_pass = dynamic_cast(names_to_pass_[0].second); + // suspend pre_node immediately + test_pass->AddSuspendNodeName("cast1", "cast1"); + test_pass->AddResumeNodeNameOnLeaked("cast1"); + EXPECT_EQ(ge_pass.Run(names_to_pass_), SUCCESS); + EXPECT_EQ(test_pass->GetIterNodes().size(), 6); + std::vector> layers; + layers.push_back({"data1"}); + layers.push_back({"cast1","transdata1", "shape2"}); + layers.push_back({"cast1","shape1"}); + CheckIterOrder(test_pass, layers); +} +/** + * suspend next node + * data1 suspend cast1, then resume cast1 on leaked + * iter order follows : data1; transdata1, shape2; cast1, shape1. + */ +TEST_F(UTESTGraphPassesBasePass, data1_suspend_cast1_resume_cast1_onleaked) { + auto graph = BuildGraph4(); + auto ge_pass = GEPass(graph); + auto *test_pass = dynamic_cast(names_to_pass_[0].second); + // suspend pre_node immediately + test_pass->AddSuspendNodeName("data1", "cast1"); + test_pass->AddResumeNodeNameOnLeaked("cast1"); + EXPECT_EQ(ge_pass.Run(names_to_pass_), SUCCESS); + EXPECT_EQ(test_pass->GetIterNodes().size(), 5); + std::vector> layers; + layers.push_back({"data1"}); + layers.push_back({"transdata1", "shape2"}); + layers.push_back({"cast1","shape1"}); + CheckIterOrder(test_pass, layers); +} + +/** + * suspend next node + * data1 suspend cast1, nobody resume it + * iter order follows : data1; transdata1, shape2; + * run ret is failed ,because node leaked + */ +TEST_F(UTESTGraphPassesBasePass, data1_suspend_cast1_nobody_resume) { + auto graph = BuildGraph4(); + auto ge_pass = GEPass(graph); + auto *test_pass = dynamic_cast(names_to_pass_[0].second); + // suspend pre_node immediately + test_pass->AddSuspendNodeName("data1", "cast1"); + EXPECT_EQ(ge_pass.Run(names_to_pass_), INTERNAL_ERROR); + EXPECT_EQ(test_pass->GetIterNodes().size(), 3); +} + +/* +TEST_F(UTESTGraphPassesBasePass, suspend_pre_node) { + NamesToPass names_to_pass; + auto test_pass = UtestTestPass(); + names_to_pass.push_back(std::make_pair("test", &test_pass)); + + // repass next_node immediately + test_pass.AddRePassNodeName("reshape1", "sum1"); + // repass node after next_node immediately + test_pass.AddRePassNodeName("add1", "sum1"); + + auto graph = BuildGraph2(); + auto ge_pass = GEPass(graph); + EXPECT_EQ(ge_pass.Run(names_to_pass), SUCCESS); + EXPECT_EQ(test_pass.GetIterNodes().size(), 8);// todo + std::vector> layers; + layers.push_back({"data1", "const1", "const2"}); + layers.push_back({"shape1"}); + layers.push_back({"add1", "addn1"}); + layers.push_back({"reshape1", "sum1"}); + CheckIterOrder(&test_pass, layers); +}*/ +} // namespace ge diff --git a/tests/ut/ge/graph/passes/infershape_pass_unittest.cc b/tests/ut/ge/graph/passes/infershape_pass_unittest.cc index 13e66c50..d84aff50 100644 --- a/tests/ut/ge/graph/passes/infershape_pass_unittest.cc +++ b/tests/ut/ge/graph/passes/infershape_pass_unittest.cc @@ -1,161 +1,262 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -#define protected public -#define private public -#include "graph/passes/infershape_pass.h" - -#include "graph/utils/tensor_utils.h" -#include "graph/utils/graph_utils.h" -#include "graph/operator_factory.h" -#include "graph/operator_reg.h" -#include "graph_builder_utils.h" - -using namespace std; -using namespace testing; -namespace ge { -class UtestGraphInfershapePass : public testing::Test { - protected: - void SetUp() {} - void TearDown() {} -}; - -static NodePtr CreateNode(ComputeGraph &graph, const string &name, const string &type, int in_num, int out_num) { - OpDescPtr op_desc = std::make_shared(name, type); - op_desc->SetStreamId(0); - static int32_t index = 0; - op_desc->SetId(index++); - - GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT); - TensorUtils::SetSize(tensor, 512); - vector input_offset; - for (int i = 0; i < in_num; i++) { - op_desc->AddInputDesc(tensor); - input_offset.emplace_back(1024); - } - op_desc->SetInputOffset(input_offset); - - vector output_offset; - for (int i = 0; i < out_num; i++) { - op_desc->AddOutputDesc(tensor); - output_offset.emplace_back(1024); - } - op_desc->SetOutputOffset(output_offset); - - op_desc->SetWorkspace({}); - op_desc->SetWorkspaceBytes({}); - op_desc->SetOpKernelLibName("DNN_VM_RTS_OP_STORE"); - - const auto stub_func = [](Operator &op) { return GRAPH_SUCCESS; }; - op_desc->AddInferFunc(stub_func); - op_desc->AddInferFormatFunc(stub_func); - op_desc->AddVerifierFunc(stub_func); - - return graph.AddNode(op_desc); -} - -TEST_F(UtestGraphInfershapePass, infershape_pass_failed) { - GeTensorDesc ge_tensor_desc(GeShape({-2, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT16); - string type = "AddN"; - auto addn_op_desc = std::make_shared("AddN", type); - addn_op_desc->AddInputDesc(ge_tensor_desc); - addn_op_desc->AddOutputDesc(ge_tensor_desc); - auto graph = std::make_shared("test"); - auto addn_node = std::make_shared(addn_op_desc, graph); - addn_node->Init(); - - InferShapePass infershape_pass; - EXPECT_EQ(infershape_pass.Run(addn_node), GE_GRAPH_INFERSHAPE_FAILED); -} - -TEST_F(UtestGraphInfershapePass, delete_need_infer_again) { - auto graph = std::make_shared("test"); - - auto no_op_desc = std::make_shared("No", "NoOp"); - auto no_op_node = graph->AddNode(no_op_desc); - AttrUtils::SetBool(no_op_desc, "_need_infer_again", false); - - InferShapePass infershape_pass; - infershape_pass.options_[kOptimizeAfterSubGraph] = "yes"; - EXPECT_EQ(infershape_pass.Run(no_op_node), SUCCESS); -} - -TEST_F(UtestGraphInfershapePass, stop_node_for_while_loop) { -/******************************************************************************* - * Exit Identify - * \ / \. - * \ / \. - * Switch Add - * / | | - * / | | - * / | | - * LoopCond | | - * \ | | - * \ | | - * \ | | - * Less | | - * \ | NextIteration - * \ | | - * \ | | - * Merge <---------| - * | - * | - * Enter - ******************************************************************************/ - auto graph = std::make_shared("test_infer_shape"); - auto data1 = CreateNode(*graph, "data", DATA, 1, 1); - auto enter1 = CreateNode(*graph, "enter", ENTER, 1, 1); - auto merge1 = CreateNode(*graph, "merge", MERGE, 2, 2); - auto less1 = CreateNode(*graph, "less", LESS, 2, 1); - auto loop1 = CreateNode(*graph, "loopcond", LOOPCOND, 1, 1); - auto switch1 = CreateNode(*graph, "switch", SWITCH, 2, 2); - auto ident1 = CreateNode(*graph, "identity", IDENTITY, 1, 1); - auto add1 = CreateNode(*graph, "add", ADD, 2, 1); - auto next1 = CreateNode(*graph, "next", NEXTITERATION, 1, 1); - auto exit1 = CreateNode(*graph, "exit", EXIT, 1, 1); - auto value0 = CreateNode(*graph, "const", CONSTANT, 0, 1); - auto value1 = CreateNode(*graph, "const", CONSTANT, 0, 1); - auto output1 = CreateNode(*graph, "net_output", NETOUTPUT, 1, 1); - - GraphUtils::AddEdge(data1->GetOutDataAnchor(0), enter1->GetInDataAnchor(0)); - GraphUtils::AddEdge(enter1->GetOutDataAnchor(0), merge1->GetInDataAnchor(0)); - GraphUtils::AddEdge(merge1->GetOutDataAnchor(0), less1->GetInDataAnchor(0)); - GraphUtils::AddEdge(value1->GetOutDataAnchor(0), less1->GetInDataAnchor(1)); - GraphUtils::AddEdge(less1->GetOutDataAnchor(0), loop1->GetInDataAnchor(0)); - - GraphUtils::AddEdge(loop1->GetOutDataAnchor(0), switch1->GetInDataAnchor(0)); - GraphUtils::AddEdge(merge1->GetOutDataAnchor(0), switch1->GetInDataAnchor(1)); - - GraphUtils::AddEdge(switch1->GetOutDataAnchor(0), exit1->GetInDataAnchor(0)); - GraphUtils::AddEdge(switch1->GetOutDataAnchor(1), ident1->GetInDataAnchor(0)); - - GraphUtils::AddEdge(ident1->GetOutDataAnchor(0), add1->GetInDataAnchor(0)); - GraphUtils::AddEdge(value1->GetOutDataAnchor(0), add1->GetInDataAnchor(1)); - GraphUtils::AddEdge(add1->GetOutDataAnchor(0), next1->GetInDataAnchor(0)); - - GraphUtils::AddEdge(next1->GetOutDataAnchor(0), merge1->GetInDataAnchor(1)); - GraphUtils::AddEdge(exit1->GetOutDataAnchor(0), output1->GetInDataAnchor(0)); - - GEPass ge_passes(graph); - NamesToPass names_to_passes; - InferShapePass infer_shape_pass; - names_to_passes.emplace_back("InferShapePass", &infer_shape_pass); - - EXPECT_EQ(ge_passes.Run(names_to_passes), SUCCESS); -} -} // namespace ge +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#define protected public +#define private public +#include "graph/passes/infershape_pass.h" + +#include "graph/utils/tensor_utils.h" +#include "graph/utils/graph_utils.h" +#include "graph/operator_factory.h" +#include "graph/operator_reg.h" +#include "graph_builder_utils.h" + +using namespace std; +using namespace testing; +namespace ge { +class UtestGraphInfershapePass : public testing::Test { + protected: + void SetUp() {} + void TearDown() {} +}; + +static NodePtr CreateNode(ComputeGraph &graph, const string &name, const string &type, int in_num, int out_num) { + OpDescPtr op_desc = std::make_shared(name, type); + op_desc->SetStreamId(0); + static int32_t index = 0; + op_desc->SetId(index++); + + GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT); + TensorUtils::SetSize(tensor, 512); + vector input_offset; + for (int i = 0; i < in_num; i++) { + op_desc->AddInputDesc(tensor); + input_offset.emplace_back(1024); + } + op_desc->SetInputOffset(input_offset); + + vector output_offset; + for (int i = 0; i < out_num; i++) { + op_desc->AddOutputDesc(tensor); + output_offset.emplace_back(1024); + } + op_desc->SetOutputOffset(output_offset); + + op_desc->SetWorkspace({}); + op_desc->SetWorkspaceBytes({}); + op_desc->SetOpKernelLibName("DNN_VM_RTS_OP_STORE"); + + const auto stub_func = [](Operator &op) { return GRAPH_SUCCESS; }; + op_desc->AddInferFunc(stub_func); + op_desc->AddInferFormatFunc(stub_func); + op_desc->AddVerifierFunc(stub_func); + + return graph.AddNode(op_desc); +} + +TEST_F(UtestGraphInfershapePass, infershape_pass_failed) { + GeTensorDesc ge_tensor_desc(GeShape({-2, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT16); + string type = "AddN"; + auto addn_op_desc = std::make_shared("AddN", type); + addn_op_desc->AddInputDesc(ge_tensor_desc); + addn_op_desc->AddOutputDesc(ge_tensor_desc); + auto graph = std::make_shared("test"); + auto addn_node = std::make_shared(addn_op_desc, graph); + addn_node->Init(); + + InferShapePass infershape_pass; + EXPECT_EQ(infershape_pass.Run(addn_node), GRAPH_FAILED); +} + +TEST_F(UtestGraphInfershapePass, stop_node_for_while_loop) { +/******************************************************************************* + * Exit Identify + * \ / \. + * \ / \. + * Switch Add + * / | | + * / | | + * / | | + * LoopCond | | + * \ | | + * \ | | + * \ | | + * Less | | + * \ | NextIteration + * \ | | + * \ | | + * Merge <---------| + * | + * | + * Enter + ******************************************************************************/ + auto graph = std::make_shared("test_infer_shape"); + auto data1 = CreateNode(*graph, "data", DATA, 1, 1); + auto enter1 = CreateNode(*graph, "enter", ENTER, 1, 1); + auto merge1 = CreateNode(*graph, "merge", MERGE, 2, 2); + auto less1 = CreateNode(*graph, "less", LESS, 2, 1); + auto loop1 = CreateNode(*graph, "loopcond", LOOPCOND, 1, 1); + auto switch1 = CreateNode(*graph, "switch", SWITCH, 2, 2); + auto ident1 = CreateNode(*graph, "identity", IDENTITY, 1, 1); + auto add1 = CreateNode(*graph, "add", ADD, 2, 1); + auto next1 = CreateNode(*graph, "next", NEXTITERATION, 1, 1); + auto exit1 = CreateNode(*graph, "exit", EXIT, 1, 1); + auto value0 = CreateNode(*graph, "const", CONSTANT, 0, 1); + auto value1 = CreateNode(*graph, "const", CONSTANT, 0, 1); + auto output1 = CreateNode(*graph, "net_output", NETOUTPUT, 1, 1); + + GraphUtils::AddEdge(data1->GetOutDataAnchor(0), enter1->GetInDataAnchor(0)); + GraphUtils::AddEdge(enter1->GetOutDataAnchor(0), merge1->GetInDataAnchor(0)); + GraphUtils::AddEdge(merge1->GetOutDataAnchor(0), less1->GetInDataAnchor(0)); + GraphUtils::AddEdge(value1->GetOutDataAnchor(0), less1->GetInDataAnchor(1)); + GraphUtils::AddEdge(less1->GetOutDataAnchor(0), loop1->GetInDataAnchor(0)); + + GraphUtils::AddEdge(loop1->GetOutDataAnchor(0), switch1->GetInDataAnchor(1)); + GraphUtils::AddEdge(merge1->GetOutDataAnchor(0), switch1->GetInDataAnchor(0)); + + GraphUtils::AddEdge(switch1->GetOutDataAnchor(0), exit1->GetInDataAnchor(0)); + GraphUtils::AddEdge(switch1->GetOutDataAnchor(1), ident1->GetInDataAnchor(0)); + + GraphUtils::AddEdge(ident1->GetOutDataAnchor(0), add1->GetInDataAnchor(0)); + GraphUtils::AddEdge(value1->GetOutDataAnchor(0), add1->GetInDataAnchor(1)); + GraphUtils::AddEdge(add1->GetOutDataAnchor(0), next1->GetInDataAnchor(0)); + + GraphUtils::AddEdge(next1->GetOutDataAnchor(0), merge1->GetInDataAnchor(1)); + GraphUtils::AddEdge(exit1->GetOutDataAnchor(0), output1->GetInDataAnchor(0)); + + GEPass ge_passes(graph); + NamesToPass names_to_passes; + InferShapePass infer_shape_pass; + names_to_passes.emplace_back("InferShapePass", &infer_shape_pass); + + EXPECT_EQ(infer_shape_pass.Run(switch1), SUCCESS); + auto suspend_nodes = infer_shape_pass.GetNodesSuspend(); + auto exit_node = graph->FindNode("exit"); + EXPECT_EQ(suspend_nodes.count(exit_node), 1); + infer_shape_pass.OnSuspendNodesLeaked(); + auto resume_nodes = infer_shape_pass.GetNodesResume(); + EXPECT_EQ(resume_nodes.count(exit_node), 1); +} +TEST_F(UtestGraphInfershapePass, update_tensordesc_when_changed) { + GeTensorDesc src_ge_tensor_desc(GeShape({1, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT16); + GeTensorDesc dst_ge_tensor_desc(GeShape({2, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT16); + GeTensorDescPtr src_tensor_desc_ptr = std::make_shared(src_ge_tensor_desc); + GeTensorDescPtr dst_tensor_desc_ptr = std::make_shared(dst_ge_tensor_desc); + InferShapePass infershape_pass; + bool changed = false; + infershape_pass.UpdateTensorDesc(src_tensor_desc_ptr, dst_tensor_desc_ptr, changed); + EXPECT_EQ(changed, true); + EXPECT_EQ(dst_tensor_desc_ptr->GetShape().GetDims(), std::vector({1, 2, 3, 4})); +} + +TEST_F(UtestGraphInfershapePass, update_tensordesc_when_not_changed) { + GeTensorDesc src_ge_tensor_desc(GeShape({1, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT16); + GeTensorDesc dst_ge_tensor_desc(GeShape({1, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT16); + GeTensorDescPtr src_tensor_desc_ptr = std::make_shared(src_ge_tensor_desc); + GeTensorDescPtr dst_tensor_desc_ptr = std::make_shared(dst_ge_tensor_desc); + InferShapePass infershape_pass; + bool changed = false; + infershape_pass.UpdateTensorDesc(src_tensor_desc_ptr, dst_tensor_desc_ptr, changed); + EXPECT_EQ(changed, false); +} + +TEST_F(UtestGraphInfershapePass, update_output_from_subgraphs_failed) { + // ref output has different dtype + GeTensorDesc ge_tensor_desc1(GeShape({1, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT16); + GeTensorDesc ge_tensor_desc2(GeShape({1, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT); + GeTensorDesc dst_ge_tensor_desc(GeShape({1, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT); + GeTensorDescPtr ge_tensor_desc1_ptr = std::make_shared(ge_tensor_desc1); + GeTensorDescPtr ge_tensor_desc2_ptr = std::make_shared(ge_tensor_desc2); + GeTensorDescPtr dst_ge_tensor_desc_ptr = std::make_shared(dst_ge_tensor_desc); + InferShapePass infershape_pass; + auto ret = infershape_pass.UpdateOutputFromSubgraphs({ge_tensor_desc1_ptr, ge_tensor_desc2_ptr}, dst_ge_tensor_desc_ptr); + EXPECT_EQ(ret, GRAPH_FAILED); +} + +TEST_F(UtestGraphInfershapePass, update_output_from_subgraphs_get_unknown_rank) { + // ref output has different dtype + GeTensorDesc ge_tensor_desc1(GeShape({1, 2, 3}), ge::FORMAT_NCHW, DT_FLOAT); + GeTensorDesc ge_tensor_desc2(GeShape({1, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT); + GeTensorDesc dst_ge_tensor_desc(GeShape({1, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT); + GeTensorDescPtr ge_tensor_desc1_ptr = std::make_shared(ge_tensor_desc1); + GeTensorDescPtr ge_tensor_desc2_ptr = std::make_shared(ge_tensor_desc2); + GeTensorDescPtr dst_ge_tensor_desc_ptr = std::make_shared(dst_ge_tensor_desc); + InferShapePass infershape_pass; + auto ret = infershape_pass.UpdateOutputFromSubgraphs({ge_tensor_desc1_ptr, ge_tensor_desc2_ptr}, dst_ge_tensor_desc_ptr); + EXPECT_EQ(ret, SUCCESS); + EXPECT_EQ(dst_ge_tensor_desc_ptr->GetShape().GetDims(), UNKNOWN_RANK); +} + +TEST_F(UtestGraphInfershapePass, update_output_from_subgraphs_get_unknown_shape) { + // ref output has different dtype + GeTensorDesc ge_tensor_desc1(GeShape({1, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT); + GeTensorDesc ge_tensor_desc2(GeShape({2, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT); + GeTensorDesc dst_ge_tensor_desc(GeShape({1, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT); + GeTensorDescPtr ge_tensor_desc1_ptr = std::make_shared(ge_tensor_desc1); + GeTensorDescPtr ge_tensor_desc2_ptr = std::make_shared(ge_tensor_desc2); + GeTensorDescPtr dst_ge_tensor_desc_ptr = std::make_shared(dst_ge_tensor_desc); + InferShapePass infershape_pass; + auto ret = infershape_pass.UpdateOutputFromSubgraphs({ge_tensor_desc1_ptr, ge_tensor_desc2_ptr}, dst_ge_tensor_desc_ptr); + EXPECT_EQ(ret, SUCCESS); + EXPECT_EQ(dst_ge_tensor_desc_ptr->GetShape().GetDims(), std::vector({-1,2,3,4})); + // todo shape range? +} + +TEST_F(UtestGraphInfershapePass, update_output_from_subgraphs_for_multiDims_failed) { + // ref output has different dtype + GeTensorDesc ge_tensor_desc1(GeShape({1, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT16); + GeTensorDesc ge_tensor_desc2(GeShape({1, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT); + GeTensorDesc dst_ge_tensor_desc(GeShape({1, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT); + GeTensorDescPtr ge_tensor_desc1_ptr = std::make_shared(ge_tensor_desc1); + GeTensorDescPtr ge_tensor_desc2_ptr = std::make_shared(ge_tensor_desc2); + GeTensorDescPtr dst_ge_tensor_desc_ptr = std::make_shared(dst_ge_tensor_desc); + InferShapePass infershape_pass; + auto ret = infershape_pass.UpdateOutputFromSubgraphsForMultiDims({ge_tensor_desc1_ptr, ge_tensor_desc2_ptr}, + dst_ge_tensor_desc_ptr); + EXPECT_EQ(ret, GRAPH_FAILED); +} + +TEST_F(UtestGraphInfershapePass, update_output_from_subgraphs_for_multiDims_failed_shape_size_overflow) { + // ref output has different dtype + GeTensorDesc ge_tensor_desc1(GeShape({1, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT); + GeTensorDesc ge_tensor_desc2(GeShape({INT64_MAX, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT); + GeTensorDesc dst_ge_tensor_desc(GeShape({1, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT); + GeTensorDescPtr ge_tensor_desc1_ptr = std::make_shared(ge_tensor_desc1); + GeTensorDescPtr ge_tensor_desc2_ptr = std::make_shared(ge_tensor_desc2); + GeTensorDescPtr dst_ge_tensor_desc_ptr = std::make_shared(dst_ge_tensor_desc); + InferShapePass infershape_pass; + auto ret = infershape_pass.UpdateOutputFromSubgraphsForMultiDims({ge_tensor_desc1_ptr, ge_tensor_desc2_ptr}, + dst_ge_tensor_desc_ptr); + EXPECT_EQ(ret, PARAM_INVALID); +} + +TEST_F(UtestGraphInfershapePass, update_output_from_subgraphs_for_multiDims_success) { + // ref output has different dtype + GeTensorDesc ge_tensor_desc1(GeShape({1, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT); + GeTensorDesc ge_tensor_desc2(GeShape({2, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT); + GeTensorDesc dst_ge_tensor_desc(GeShape({1, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT); + GeTensorDescPtr ge_tensor_desc1_ptr = std::make_shared(ge_tensor_desc1); + GeTensorDescPtr ge_tensor_desc2_ptr = std::make_shared(ge_tensor_desc2); + GeTensorDescPtr dst_ge_tensor_desc_ptr = std::make_shared(dst_ge_tensor_desc); + InferShapePass infershape_pass; + auto ret = infershape_pass.UpdateOutputFromSubgraphsForMultiDims({ge_tensor_desc1_ptr, ge_tensor_desc2_ptr}, + dst_ge_tensor_desc_ptr); + EXPECT_EQ(ret, SUCCESS); + EXPECT_EQ(dst_ge_tensor_desc_ptr->GetShape().GetDims(), std::vector({2,2,3,4})); +} +} // namespace ge From eee6bc92d1b2bcd150b18621274222ba511ee5bd Mon Sep 17 00:00:00 2001 From: lichun Date: Fri, 16 Jul 2021 16:13:45 +0800 Subject: [PATCH 212/226] fix error code and add complex128 support --- tests/ut/ge/graph_ir/ge_ir_build_unittest.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/ut/ge/graph_ir/ge_ir_build_unittest.cc b/tests/ut/ge/graph_ir/ge_ir_build_unittest.cc index 60f33ed3..500dbc2a 100644 --- a/tests/ut/ge/graph_ir/ge_ir_build_unittest.cc +++ b/tests/ut/ge/graph_ir/ge_ir_build_unittest.cc @@ -367,7 +367,7 @@ TEST(UtestIrBuild, check_data_op_attr_index_valid) { }; ModelBufferData model; graphStatus ret = aclgrphBuildModel(graph, build_options, model); - EXPECT_EQ(ret, GE_GENERATOR_GRAPH_MANAGER_BUILD_GRAPH_FAILED); + EXPECT_EQ(ret, ge::FAILED); } // set attr index invalid, when not set input shape range @@ -377,7 +377,7 @@ TEST(UtestIrBuild, check_data_attr_index_succ_no_input_range) { const map build_options; ModelBufferData model; graphStatus ret = aclgrphBuildModel(graph, build_options, model); - EXPECT_EQ(ret, GE_GENERATOR_GRAPH_MANAGER_BUILD_GRAPH_FAILED); + EXPECT_EQ(ret, ge::FAILED); } TEST(UtestIrBuild, check_modify_mixlist_param) { From 4cb9aff399e1a8564cc12827bffda9a3a540749d Mon Sep 17 00:00:00 2001 From: lichun Date: Fri, 16 Jul 2021 17:21:22 +0800 Subject: [PATCH 213/226] fix error code and add complex128 support --- .../build/graph_mem_assigner_unittest.cc | 85 +++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 tests/ut/ge/graph/build/graph_mem_assigner_unittest.cc diff --git a/tests/ut/ge/graph/build/graph_mem_assigner_unittest.cc b/tests/ut/ge/graph/build/graph_mem_assigner_unittest.cc new file mode 100644 index 00000000..983f1763 --- /dev/null +++ b/tests/ut/ge/graph/build/graph_mem_assigner_unittest.cc @@ -0,0 +1,85 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "graph/anchor.h" +#include "graph/attr_value.h" +#include "graph/debug/ge_attr_define.h" +#include "graph/utils/graph_utils.h" +#include "graph/utils/node_utils.h" +#include "graph/utils/op_desc_utils.h" +#include "graph/utils/tensor_utils.h" +#include "omg/omg_inner_types.h" +#include "../passes/graph_builder_utils.h" + +#define protected public +#define private public +#include "graph/build/memory/binary_block_mem_assigner.h" +#include "graph/build/memory/graph_mem_assigner_unittest.h" +#include "graph/build/memory/hybrid_mem_assigner.h" +#include "graph/build/memory/max_block_mem_assigner.h" +#include "graph/manager/graph_var_manager.h" +#undef protected +#undef private + +using namespace std; +using namespace testing; +using namespace ge; +using domi::GetContext; + +class UtestTaskGeneratorTest : public testing::Test { + public: + ge::ComputeGraphPtr BuildGraphWithVar(int64_t session_id) { + // init + MemManager::Instance().Initialize(std::vector({RT_MEMORY_HBM})); + VarManager::Instance(session_id)->Init(0, 0, 0, 0); + ge::ut::GraphBuilder builder("graph"); + auto var_input = builder.AddNode("var", "Variable", 1, 1); + auto const_input = builder.AddNode("const", "Const", 1, 1); + auto assign = builder.AddNode("assgin", "Assign", 2, 1); + // add link + builder.AddDataEdge(var_input, 0, assign, 0); + builder.AddDataEdge(const_input, 0, assign, 1); + // set offset + var_input->GetOpDesc()->SetOutputOffset({10000}); + const_input->GetOpDesc()->SetOutputOffset({1000}); + assign->GetOpDesc()->SetInputOffset({10100, 1000}); + assign->GetOpDesc()->SetOutputOffset({10100}); + // set inner offset + int64_t inner_offset = 100; + ge::AttrUtils::SetInt(assign->GetOpDesc()->MutableInputDesc(0), ATTR_NAME_INNER_OFFSET, inner_offset); + ge::AttrUtils::SetInt(assign->GetOpDesc()->MutableOutputDesc(0), ATTR_NAME_INNER_OFFSET, inner_offset); + // add var addr + VarManager::Instance(session_id)->var_resource_->var_offset_map_.emplace(10000, RT_MEMORY_HBM); + + return builder.GetGraph(); + } + +protected: + void SetUp() {} + void TearDown() {} +}; + +TEST_F(UtestMemoryAssignerTest, graph_memory_assign_continuous_input) { + ge::ComputeGraphPtr compute_graph = make_shared(""); + GraphMemoryAssigner graph_mem_assigner(compute_graph); + map mem_type_to_offset = {}; + Status ret = ReAssignMemory(false, mem_type_to_offset); + EXPECT_EQ(ret, ge::FAILED); +} + From 7e6461f7f17fcc824f1b91c98ceb54a01cc2df15 Mon Sep 17 00:00:00 2001 From: lichun Date: Fri, 16 Jul 2021 17:32:17 +0800 Subject: [PATCH 214/226] fix error code and add complex128 support --- tests/ut/ge/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index 773a2686..a5b3942d 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -677,6 +677,7 @@ set(MULTI_PARTS_TEST_FILES "graph/build/stream_allocator_unittest.cc" "graph/build/model_builder_unittest.cc" "graph/build/mem_assigner_unittest.cc" + "graph/build/graph_mem_assigner_unittest.cc" "graph/build/task_generator_unittest.cc" "graph/build/buffer_pool_mem_assigner_unittest.cc" "graph/execute/graph_execute_unittest.cc" From 8b7ae630863eac1d2aef3b20cf0496139554c420 Mon Sep 17 00:00:00 2001 From: lichun Date: Fri, 16 Jul 2021 17:35:08 +0800 Subject: [PATCH 215/226] fix error code and add complex128 support --- tests/ut/ge/graph/build/graph_mem_assigner_unittest.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ut/ge/graph/build/graph_mem_assigner_unittest.cc b/tests/ut/ge/graph/build/graph_mem_assigner_unittest.cc index 983f1763..4bff9b38 100644 --- a/tests/ut/ge/graph/build/graph_mem_assigner_unittest.cc +++ b/tests/ut/ge/graph/build/graph_mem_assigner_unittest.cc @@ -30,7 +30,7 @@ #define protected public #define private public #include "graph/build/memory/binary_block_mem_assigner.h" -#include "graph/build/memory/graph_mem_assigner_unittest.h" +#include "graph/build/memory/graph_mem_assigner.h" #include "graph/build/memory/hybrid_mem_assigner.h" #include "graph/build/memory/max_block_mem_assigner.h" #include "graph/manager/graph_var_manager.h" From a928df8eaaaaf0c8bea944d674e5eb58700854bd Mon Sep 17 00:00:00 2001 From: zhaoxinxin Date: Fri, 16 Jul 2021 18:17:25 +0800 Subject: [PATCH 216/226] fix sc problem --- ge/graph/passes/infershape_pass.cc | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/ge/graph/passes/infershape_pass.cc b/ge/graph/passes/infershape_pass.cc index deaebf4f..05b1b5fc 100755 --- a/ge/graph/passes/infershape_pass.cc +++ b/ge/graph/passes/infershape_pass.cc @@ -138,7 +138,9 @@ graphStatus InferShapePass::InferShapeAndType(NodePtr &node) { if (!is_unknown_graph) { auto inference_context = ShapeRefiner::CreateInferenceContext(node); GE_CHECK_NOTNULL(inference_context); - GELOGD("create context for node:%s, marks %zu", node->GetName().c_str(), inference_context->GetMarks().size()); + std::vector marks; + inference_context->GetMarks(marks); + GELOGD("create context for node:%s, marks %zu", node->GetName().c_str(), marks.size()); op.SetInferenceContext(inference_context); } @@ -151,10 +153,12 @@ graphStatus InferShapePass::InferShapeAndType(NodePtr &node) { if (!is_unknown_graph) { auto ctx_after_infer = op.GetInferenceContext(); if (ctx_after_infer != nullptr) { - GELOGD("[%s] after infershape. mark:%zu", node->GetName().c_str(), ctx_after_infer->GetMarks().size()); - if (!ctx_after_infer->GetOutputHandleShapesAndTypes().empty() || !ctx_after_infer->GetMarks().empty()) { + std::vector marks; + ctx_after_infer->GetMarks(marks); + GELOGD("[%s] after infershape. mark:%zu", node->GetName().c_str(), marks.size()); + if (!ctx_after_infer->GetOutputHandleShapesAndTypes().empty() || !marks.empty()) { GELOGD("[%s] set inference context after. mark:%zu", node->GetName().c_str(), - ctx_after_infer->GetMarks().size()); + marks.size()); ShapeRefiner::PushToContextMap(node, ctx_after_infer); } } @@ -254,7 +258,8 @@ graphStatus InferShapePass::CallInferShapeFunc(NodePtr &node, Operator &op) { auto ret = op_desc->CallInferFunc(op); if (ret == GRAPH_PARAM_INVALID) { // Op ir no infer func, try to get infer func from operator factory - auto node_op = ge::OperatorFactory::CreateOperator("node_op", op_desc->GetType()); + + auto node_op = ge::OperatorFactory::CreateOperator("node_op", op_desc->GetType().c_str()); if (node_op.IsEmpty()) { GELOGW("get op from OperatorFactory fail. opType: %s", op_type.c_str()); return ret; From f98a41081af2d7f23434cc1c63be84d0bef9d49c Mon Sep 17 00:00:00 2001 From: lichun Date: Fri, 16 Jul 2021 18:25:05 +0800 Subject: [PATCH 217/226] fix error code and add complex128 support --- .../ge/graph/build/graph_mem_assigner_unittest.cc | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tests/ut/ge/graph/build/graph_mem_assigner_unittest.cc b/tests/ut/ge/graph/build/graph_mem_assigner_unittest.cc index 4bff9b38..703ac3b4 100644 --- a/tests/ut/ge/graph/build/graph_mem_assigner_unittest.cc +++ b/tests/ut/ge/graph/build/graph_mem_assigner_unittest.cc @@ -34,6 +34,7 @@ #include "graph/build/memory/hybrid_mem_assigner.h" #include "graph/build/memory/max_block_mem_assigner.h" #include "graph/manager/graph_var_manager.h" +#include "graph/manager/graph_mem_manager.h" #undef protected #undef private @@ -42,7 +43,7 @@ using namespace testing; using namespace ge; using domi::GetContext; -class UtestTaskGeneratorTest : public testing::Test { +class UtestGraphMemAssigner : public testing::Test { public: ge::ComputeGraphPtr BuildGraphWithVar(int64_t session_id) { // init @@ -75,11 +76,15 @@ protected: void TearDown() {} }; -TEST_F(UtestMemoryAssignerTest, graph_memory_assign_continuous_input) { +TEST_F(UtestGraphMemAssigner, graph_memory_assign_fail_case) { ge::ComputeGraphPtr compute_graph = make_shared(""); GraphMemoryAssigner graph_mem_assigner(compute_graph); + MemoryOffset mem_offset(2, 10000); + graph_mem_assigner.memory_offset_.insert({2, mem_offset}); + VarManager::Instance(0)->graph_mem_max_size_ = 0; + map mem_type_to_offset = {}; - Status ret = ReAssignMemory(false, mem_type_to_offset); - EXPECT_EQ(ret, ge::FAILED); + Status ret = graph_mem_assigner.ReAssignMemory(false, mem_type_to_offset); + EXPECT_EQ(ret, ACL_ERROR_GE_MEMORY_ALLOCATION); } From 02c3500ceff3bcc491d536b944a0821635de0770 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Fri, 16 Jul 2021 19:28:02 +0800 Subject: [PATCH 218/226] delete model_cache_helper.cc --- ge/CMakeLists.txt | 2 - ge/common/helper/model_cache_helper.cc | 1721 ----------------- ge/common/helper/model_cache_helper.h | 123 -- ge/executor/ge_executor.cc | 1 + ge/generator/ge_generator.cc | 1 + ge/graph/manager/graph_manager.cc | 209 +- ge/graph/manager/graph_manager.h | 10 - ge/graph/manager/graph_manager_utils.cc | 38 +- ge/graph/manager/graph_manager_utils.h | 2 - ge/graph/manager/graph_var_manager.cc | 44 - ge/graph/manager/graph_var_manager.h | 6 - .../manager/model_manager/event_manager.cc | 83 - .../manager/model_manager/event_manager.h | 98 - ge/graph/manager/trans_var_data_utils.h | 1 - ge/graph/passes/global_step_insert_pass.cc | 1 - ge/init/gelib.h | 11 +- tests/ut/ge/CMakeLists.txt | 7 - .../graph/execute/model_executor_unittest.cc | 1 + tests/ut/ge/graph/graph_load_unittest.cc | 93 - ...new_model_manager_data_inputer_unittest.cc | 64 - ...ew_model_manager_davinci_model_unittest.cc | 1433 -------------- ...ew_model_manager_event_manager_unittest.cc | 117 -- .../new_model_manager_task_build_unittest.cc | 115 -- .../graph/load/output_net_output_unittest.cc | 300 --- .../graph/manager/graph_manager_unittest.cc | 3 - 25 files changed, 23 insertions(+), 4461 deletions(-) delete mode 100755 ge/common/helper/model_cache_helper.cc delete mode 100755 ge/common/helper/model_cache_helper.h delete mode 100644 ge/graph/manager/model_manager/event_manager.cc delete mode 100644 ge/graph/manager/model_manager/event_manager.h delete mode 100644 tests/ut/ge/graph/graph_load_unittest.cc delete mode 100644 tests/ut/ge/graph/load/new_model_manager_data_inputer_unittest.cc delete mode 100644 tests/ut/ge/graph/load/new_model_manager_davinci_model_unittest.cc delete mode 100644 tests/ut/ge/graph/load/new_model_manager_event_manager_unittest.cc delete mode 100644 tests/ut/ge/graph/load/new_model_manager_task_build_unittest.cc delete mode 100644 tests/ut/ge/graph/load/output_net_output_unittest.cc diff --git a/ge/CMakeLists.txt b/ge/CMakeLists.txt index 0236e8bd..f98297d8 100755 --- a/ge/CMakeLists.txt +++ b/ge/CMakeLists.txt @@ -262,7 +262,6 @@ set(COMPILER_SRC_LIST "common/dump/dump_op.cc" "common/ge/op_tiling_manager.cc" "common/ge/plugin_manager.cc" - "common/helper/model_cache_helper.cc" "common/profiling/profiling_manager.cc" "engine_manager/dnnengine_manager.cc" "ge_local_engine/engine/host_cpu_engine.cc" @@ -300,7 +299,6 @@ set(COMPILER_SRC_LIST "graph/manager/graph_var_manager.cc" "graph/manager/host_mem_allocator.cc" "graph/manager/host_mem_manager.cc" - "graph/manager/model_manager/event_manager.cc" "graph/manager/rdma_pool_allocator.cc" "graph/manager/session_scope_mem_allocator.cc" "graph/manager/trans_var_data_utils.cc" diff --git a/ge/common/helper/model_cache_helper.cc b/ge/common/helper/model_cache_helper.cc deleted file mode 100755 index 0e6c6329..00000000 --- a/ge/common/helper/model_cache_helper.cc +++ /dev/null @@ -1,1721 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "common/helper/model_cache_helper.h" - -#include -#include -#include - -#include "common/model_parser/model_parser.h" -#include "framework/common/helper/model_helper.h" -#include "graph/detail/model_serialize_imp.h" -#include "graph/utils/graph_utils.h" -#include "graph/utils/tensor_utils.h" -#include "init/gelib.h" -#include "proto/ge_ir.pb.h" - -using namespace std; - -namespace { -const char *const kTbeKernelInfoStoreName = "AIcoreEngine"; -const char *const kGraphName = "temp_name"; -// Keys of json -const char *const kNodeNum = "nodeNum"; -const char *const kEdgeNum = "edgeNum"; -const char *const kGraphHash = "graphHash"; -const char *const kNodeHash = "nodeHash"; -const char *const kHash = "hash"; -const char *const kSessionId = "sessionId"; -const char *const kDeviceId = "deviceId"; -const char *const kJobId = "jobId"; -const char *const kGraphMemMaxSize = "graphMemMaxSize"; -const char *const kVarMemMaxSize = "varMemMaxSize"; -const char *const kVarMemLogicBase = "varMemLogicBase"; -const char *const kUseMaxMemSize = "useMaxMemSize"; -const char *const kMemResourceMap = "memResourceMap"; -const char *const kMemType = "memType"; -const char *const kTotalSize = "totalSize"; -const char *const kVarMemSize = "varMemSize"; -const char *const kVarResource = "varResource"; -const char *const kVarAddrMgrMap = "varAddrMgrMap"; -const char *const kName = "name"; -const char *const kAddress = "address"; -const char *const kOffset = "offset"; -const char *const kMemoryType = "memoryType"; -const char *const kTensorDesc = "tensorDesc"; -const char *const kDataType = "dataType"; -const char *const kShape = "shape"; -const char *const kLayout = "layout"; -const char *const kOriginDataType = "originDataType"; -const char *const kOriginShape = "originShape"; -const char *const kOriginLayout = "originLayout"; -const char *const kRealDimCnt = "realDimCnt"; -const char *const kCurVarTensorDescMap = "curVarTensorDescMap"; -const char *const kTransRoads = "transRoads"; -const char *const kTransRoad = "transRoad"; -const char *const kNodeType = "nodeType"; -const char *const kInputTensorDesc = "inputTensorDesc"; -const char *const kOutputTensorDesc = "outputTensorDesc"; -const char *const kChangedGraphId = "changedGraphId"; -const char *const kAllocatedGraphId = "allocatedGraphId"; -const char *const kGraphId = "graphId"; -const char *const kVarBroadcastInfo = "varBroadcastInfo"; -const char *const kBroadcastName = "broadcastName"; -const char *const kIdx = "idx"; -const char *const kInputOffset = "inputOffset"; -const char *const kInputSize = "inputSize"; -const char *const kOutputOffset = "outputOffset"; -const char *const kOutputSize = "outputSize"; -// Suffix of cache files -const char *const kBeforeVarManagerSuffix = "_before_build_var_manager.json"; -const char *const kAfterVarManagerSuffix = "_after_build_var_manager.json"; -const char *const kManifestSuffix = ".manifest"; -const char *const kOmSuffix = ".om"; -} // namespace - -namespace ge { -map ModelCacheHelper::graph_id_run_times_; -ModelCacheHelper::ModelCacheHelper(uint64_t session_id, uint32_t graph_id, ComputeGraphPtr &compute_graph) - : session_id_(session_id), - graph_id_(graph_id), - compute_graph_(compute_graph), - is_cache_path_valid_for_output(false) { - if (graph_id_run_times_.count(graph_id) == 0) { - graph_id_run_times_[graph_id] = 1; - } else { - graph_id_run_times_[graph_id] = graph_id_run_times_[graph_id] + 1; - } - for (const auto &node : compute_graph_->GetDirectNode()) { - bool is_variable = (node->GetType() == VARIABLE) || (node->GetType() == VARIABLEV2) || - (node->GetType() == VARHANDLEOP) || (node->GetType() == CONSTANTOP); - if (!is_variable) { - continue; - } - var_names_.insert(node->GetName()); - } - std::shared_ptr instance_ptr = ge::GELib::GetInstance(); - if (instance_ptr != nullptr && instance_ptr->IsIncreBuild()) { - std::string cache_path = instance_ptr->GetIncreBuildCachePath(); - GELOGD("Incre build path conf: %s", cache_path.c_str()); - string fake_file_path = cache_path + to_string(graph_id_) + kManifestSuffix; - if (CheckOutputPathValid(fake_file_path)) { - is_cache_path_valid_for_output = true; - } else { - GELOGW("Invalid cache path for output."); - } - std::string real_cache_path = RealPath(cache_path.c_str()); - if (real_cache_path.empty()) { - GELOGW("Invalid incre build cache path conf: %s", cache_path.c_str()); - return; - } - cache_path_ = real_cache_path + '/'; - GELOGD("Try to use incre build cache path: %s", cache_path_.c_str()); - } -} - -ModelCacheHelper::~ModelCacheHelper() { var_names_.clear(); } - -bool ModelCacheHelper::IsModelCacheHit() const { - CacheInfo cache_info; - if (GetCacheInfo(cache_info) != SUCCESS) { - GELOGI("Get cache info of graph id[%u] failed.", graph_id_); - return false; - } - // Check number of nodes and edges first. - if (cache_info.node_num != compute_graph_->GetDirectNodesSize()) { - GELOGI("Graph id[%u] cache miss: the node number of the graph does not match the cache info.", graph_id_); - return false; - } - size_t edge_num = 0; - for (const auto &node : compute_graph_->GetDirectNode()) { - for (const auto &anchor : node->GetAllInAnchors()) { - edge_num += anchor->GetPeerAnchors().size(); - } - } - if (cache_info.edge_num != edge_num) { - GELOGI("Graph id[%u] cache miss: the edge number of the graph does not match the cache info.", graph_id_); - return false; - } - size_t compute_graph_hash; - auto ret = GetComputeGraphHash(compute_graph_hash); - if (ret != SUCCESS || cache_info.graph_hash != compute_graph_hash) { - GELOGI("Graph id[%u] cache miss: the hash code of the graph does not match the cache info.", graph_id_); - return false; - } - if (!IsNodeHashSameAsCache(cache_info.nodes_hash)) { - GELOGI("Graph id[%u] cache miss: the hash code of node does not match the cache info.", graph_id_); - return false; - } - - string var_manager_cache = - to_string(graph_id_) + "_" + to_string(graph_id_run_times_[graph_id_]) + kBeforeVarManagerSuffix; - Json var_manager_json; - if (LoadJsonFromFile(var_manager_cache, var_manager_json) != SUCCESS) { - GELOGW("Fail to load json from cache file: %s", var_manager_cache.c_str()); - return false; - } - if (!IsVarManagerSameAsCache(var_manager_json)) { - GELOGI("Graph id[%u] cache miss: the VarManager does not match the cache info.", graph_id_); - return false; - } - GELOGI("Graph id[%u] cache hit.", graph_id_); - return true; -} - -Status ModelCacheHelper::RefreshComputeGraph(const ComputeGraphPtr &compute_graph) { - if (compute_graph->IsValid()) { - compute_graph_ = compute_graph; - var_names_.clear(); - for (const auto &node : compute_graph_->GetDirectNode()) { - bool is_variable = (node->GetType() == VARIABLE) || (node->GetType() == VARIABLEV2) || - (node->GetType() == VARHANDLEOP) || (node->GetType() == CONSTANTOP); - if (!is_variable) { - continue; - } - var_names_.insert(node->GetName()); - } - return SUCCESS; - } else { - GELOGW("Invalid compute graph."); - return FAILED; - } -} - -Status ModelCacheHelper::ClearCache(uint32_t graph_id) const { - if (!is_cache_path_valid_for_output) { - GELOGW("Invalid cache path."); - return SUCCESS; - } - string manifest_file = cache_path_ + to_string(graph_id) + kManifestSuffix; - string manifest_file_path = RealPath(manifest_file.c_str()); - int ret; - if (!manifest_file_path.empty()) { - ret = remove(manifest_file_path.c_str()); - // If remove file failed, print the warning log - if (ret != 0) { - GELOGW("Clear cache [%s] failed.", manifest_file_path.c_str()); - } - } - string before_var_manager_file = cache_path_ + to_string(graph_id) + kManifestSuffix; - string before_var_manager_file_path = RealPath(before_var_manager_file.c_str()); - if (!before_var_manager_file_path.empty()) { - ret = remove(before_var_manager_file_path.c_str()); - if (ret != 0) { - GELOGW("Clear cache [%s] failed.", before_var_manager_file_path.c_str()); - } - } - string after_var_manager_file = cache_path_ + to_string(graph_id) + kManifestSuffix; - string after_var_manager_file_path = RealPath(after_var_manager_file.c_str()); - if (!after_var_manager_file_path.empty()) { - ret = remove(after_var_manager_file_path.c_str()); - if (ret != 0) { - GELOGW("Clear cache [%s] failed.", after_var_manager_file_path.c_str()); - } - } - string om_file = cache_path_ + to_string(graph_id) + kManifestSuffix; - string om_file_path = RealPath(om_file.c_str()); - if (!om_file_path.empty()) { - ret = remove(om_file_path.c_str()); - if (ret != 0) { - GELOGW("Clear cache [%s] failed.", om_file_path.c_str()); - } - } - return SUCCESS; -} - -Status ModelCacheHelper::RecoverVarManagerFromCache() const { - string var_manager_cache = - to_string(graph_id_) + "_" + to_string(graph_id_run_times_[graph_id_]) + kAfterVarManagerSuffix; - Json var_manager_json; - if (LoadJsonFromFile(var_manager_cache, var_manager_json) != SUCCESS) { - GELOGW("Fail to load json from cache file: %s", var_manager_cache.c_str()); - return FAILED; - } - - Json mem_resource_json = move(var_manager_json[kMemResourceMap]); - auto ret = RecoverMemResource(mem_resource_json); - if (ret != SUCCESS) { - GELOGW("Recover VarManager from cache failed.[MemResource]"); - return FAILED; - } - Json var_resource_json = move(var_manager_json[kVarResource]); - ret = RecoverAllocatedGraphId(var_resource_json[kAllocatedGraphId]); - if (ret != SUCCESS) { - GELOGW("Recover VarManager from cache failed.[AllocatedGraphId]"); - return FAILED; - } - ret = RecoverChangedGraphId(var_resource_json[kChangedGraphId]); - if (ret != SUCCESS) { - GELOGW("Recover VarManager from cache failed.[ChangedGraphId]"); - return FAILED; - } - ret = RecoverBroadcastInfo(var_resource_json[kVarBroadcastInfo]); - if (ret != SUCCESS) { - GELOGW("Recover VarManager from cache failed.[VarBroadcastInfo]"); - return FAILED; - } - ret = RecoverVarAddrAndTensorDesc(var_resource_json[kVarAddrMgrMap]); - if (ret != SUCCESS) { - GELOGW("Recover VarManager from cache failed.[VarAddrMgrMap & CurVarTensorDesc]"); - return FAILED; - } - ret = RecoverTransRoads(var_resource_json[kTransRoads]); - if (ret != SUCCESS) { - GELOGW("Recover VarManager from cache failed.[TransRoads]"); - return FAILED; - } - GELOGI("Recover VarManager from cache[%s] success.", cache_path_.c_str()); - return SUCCESS; -} - -Status ModelCacheHelper::GetNodesNeedRecompile(ComputeGraphPtr &graph, vector &nodes) { - std::shared_ptr instance = ge::GELib::GetInstance(); - if (instance == nullptr || !instance->InitFlag()) { - GELOGW("RecompileNodes failed."); - return ge::GE_CLI_GE_NOT_INITIALIZED; - } - // Collect aicore ops for recompile - for (auto &node : graph->GetDirectNode()) { - if (node == nullptr) { - continue; - } - auto op_desc = node->GetOpDesc(); - if (op_desc == nullptr) { - continue; - } - // Get op kernel lib name - string kernel_lib_name = op_desc->GetOpKernelLibName(); - if (kernel_lib_name.empty()) { - // reset op kernel lib - (void)instance->DNNEngineManagerObj().GetDNNEngineName(node); - kernel_lib_name = op_desc->GetOpKernelLibName(); - if (kernel_lib_name.empty()) { - GELOGW("Get node:%s, type:%s kernel lib failed.", node->GetName().c_str(), op_desc->GetType().c_str()); - continue; - } - } - } - return SUCCESS; -} - -Status ModelCacheHelper::RecompileNodes(GeModelPtr &ge_model) { - std::shared_ptr instance = ge::GELib::GetInstance(); - if (instance == nullptr || !instance->InitFlag()) { - GELOGW("RecompileNodes failed."); - return ge::GE_CLI_GE_NOT_INITIALIZED; - } - // Get aicore ops kernel info store. - OpsKernelInfoStorePtr kernel_info = instance->OpsKernelManagerObj().GetOpsKernelInfoStore(kTbeKernelInfoStoreName); - if (kernel_info == nullptr) { - GELOGW("Get %s ops kernel info store failed", kTbeKernelInfoStoreName); - return INTERNAL_ERROR; - } - - auto compute_graph = GraphUtils::GetComputeGraph(ge_model->GetGraph()); - vector node_vec; - auto ret = GetNodesNeedRecompile(compute_graph, node_vec); - GE_CHK_BOOL_EXEC_WARN(ret == ge::SUCCESS, return ret, "Get nodes need recompiling failed"); - // Recompile aicore ops - ret = kernel_info->CompileOp(node_vec); - GE_CHK_BOOL_EXEC_WARN(ret == ge::SUCCESS, return ret, "Recompile op failed"); - const TBEKernelStore &tbekernel_store = ge_model->GetTBEKernelStore(); - TBEKernelStore tbe_kernel_store; - for (const ge::NodePtr &n : compute_graph->GetDirectNode()) { - auto node_op_desc = n->GetOpDesc(); - GE_IF_BOOL_EXEC(node_op_desc == nullptr, continue); - TBEKernelPtr tbe_kernel = node_op_desc->TryGetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, TBEKernelPtr()); - if (tbe_kernel == nullptr) { - // Load tbe kernel from tbe_kernel_store to op if op was not recompiled - auto op_desc = n->GetOpDesc(); - tbekernel_store.LoadTBEKernelBinToOpDesc(op_desc); - GELOGD("LoadOmModelFromCache: Load tbe kernel bin to op desc[%s].", op_desc->GetName().c_str()); - } - tbe_kernel = node_op_desc->TryGetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, TBEKernelPtr()); - GE_IF_BOOL_EXEC(tbe_kernel == nullptr, continue); - // Refresh tbe kernel in tbe_kernel_store - tbe_kernel_store.AddTBEKernel(tbe_kernel); - GELOGD("Add tbe kernel bin %s", tbe_kernel->GetName().c_str()); - } - GE_CHK_BOOL_EXEC_WARN(tbe_kernel_store.Build(), return FAILED, "TBE Kernels store build failed!"); - ge_model->SetTBEKernelStore(tbe_kernel_store); - return SUCCESS; -} - -Status ModelCacheHelper::GetNodesHash(map &hash_map) const { - vector nodes; - GraphUtils::TopologicalSortingByName(compute_graph_, nodes); - ModelSerializeImp model_serialize_imp; - std::hash node_hash; - for (const auto &node : nodes) { - if (node == nullptr) { - continue; - } - proto::OpDef op_def; - bool is_framework_op = (node->GetType() == FRAMEWORKOP); - int32_t framework_type = 0; - if (is_framework_op) { - AttrUtils::GetInt(node->GetOpDesc(), ge::ATTR_NAME_FRAMEWORK_FWK_TYPE, framework_type); - AttrUtils::SetInt(node->GetOpDesc(), ge::ATTR_NAME_FRAMEWORK_FWK_TYPE, 0); - } - bool ret = model_serialize_imp.SerializeNode(node, &op_def, is_framework_op); - op_def.set_id(0); // Id of op is not stable because of parallel parsing - // Clear weights attr in constant. - auto attr = op_def.mutable_attr(); - if (op_def.type() == CONSTANT || op_def.type() == CONSTANTOP) { - attr->erase(ATTR_NAME_WEIGHTS); - } - if (is_framework_op) { - AttrUtils::SetInt(node->GetOpDesc(), ge::ATTR_NAME_FRAMEWORK_FWK_TYPE, framework_type); - } - if (!ret) { - GELOGW("Fail to serialize node[%s].", node->GetName().c_str()); - return INTERNAL_ERROR; - } - string prototxt; - ret = google::protobuf::TextFormat::PrintToString(op_def, &prototxt); - if (!ret) { - GELOGW("Print OpDef to string failed."); - hash_map.clear(); - return INTERNAL_ERROR; - } - size_t hash_code = node_hash(prototxt); - hash_map[node->GetName()] = hash_code; - } - return SUCCESS; -} - -Status ModelCacheHelper::GetComputeGraphHash(size_t &hash) const { - proto::GraphDef graph_proto; - ModelSerializeImp model_serialize_imp; - // The name of compute graph may be generated randomly, so replace it temporarily. - const string origin_name = compute_graph_->GetName(); - compute_graph_->SetName(kGraphName); - bool serialize_ret = model_serialize_imp.SerializeGraph(compute_graph_, &graph_proto); - graph_proto.clear_op(); - if (!serialize_ret) { - GELOGW("Serialize graph failed."); - hash = 0; - return INTERNAL_ERROR; - } - compute_graph_->SetName(origin_name); - // Generate proto text of GraphDef - string prototxt; - bool print_ret = google::protobuf::TextFormat::PrintToString(graph_proto, &prototxt); - if (!print_ret) { - GELOGW("Print GraphDef to string failed."); - hash = 0; - return INTERNAL_ERROR; - } - // Get the hash code of proto text - std::hash graph_hash; - hash = graph_hash(prototxt); - return SUCCESS; -} - -Status ModelCacheHelper::SaveJsonToFile(const string &file_name, const Json &json) const { - if (!is_cache_path_valid_for_output) { - GELOGW("Invalid cache path."); - return PARAM_INVALID; - } - // Check whether the manifest exists, if not, create it. - string real_path = RealPath(cache_path_.c_str()); - if (real_path.empty()) { - GELOGW("File path is invalid. please check cache path: %s", cache_path_.c_str()); - return FAILED; - } - const string path = cache_path_ + file_name; - const int FILE_AUTHORITY = 0600; - int fd = mmOpen2(path.c_str(), M_WRONLY | M_CREAT | O_TRUNC, FILE_AUTHORITY); - if (fd < 0) { - GELOGW("Fail to open the file:%s. errmsg:%s", path.c_str(), strerror(errno)); - return INTERNAL_ERROR; - } - if (mmClose(fd) != 0) { - GELOGW("Fail to close the file:%s. errmsg:%s", path.c_str(), strerror(errno)); - return INTERNAL_ERROR; - } - - // Write json into cache file - ofstream ofs; - ofs.open(path); - if (!ofs.is_open()) { - GELOGW("Fail to open the file: %s.", path.c_str()); - return INTERNAL_ERROR; - } - ofs << json << std::endl; - ofs.close(); - return SUCCESS; -} - -Status ModelCacheHelper::LoadJsonFromFile(const string &file_name, Json &json) const { - if (!json.is_null()) { - GELOGW("Input param json type should be null."); - return PARAM_INVALID; - } - string real_path = RealPath(cache_path_.c_str()); - if (real_path.empty()) { - GELOGW("File path is invalid. please check cache path: %s", cache_path_.c_str()); - return FAILED; - } - const string path = cache_path_ + file_name; - if (!CheckInputPathValid(path)) { - GELOGW("Invalid cache path for input:%s.", path.c_str()); - return FAILED; - } - string cache_real_path = RealPath(path.c_str()); - if (cache_real_path.empty()) { - GELOGI("File[%s] is not found.", path.c_str()); - return FAILED; - } - // Read json from cache file - ifstream ifs; - ifs.open(path); - if (!ifs.is_open()) { - GELOGW("Fail to open the file: %s.", path.c_str()); - return INTERNAL_ERROR; - } - try { - ifs >> json; - } catch (nlohmann::detail::parse_error e) { - GELOGW("Fail to load json from file, json throw an error:%s.", e.what()); - return INTERNAL_ERROR; - } catch (nlohmann::detail::invalid_iterator e) { - GELOGW("Fail to load json from file, json throw an error:%s.", e.what()); - return INTERNAL_ERROR; - } catch (nlohmann::detail::type_error e) { - GELOGW("Fail to load json from file, json throw an error:%s.", e.what()); - return INTERNAL_ERROR; - } catch (nlohmann::detail::out_of_range e) { - GELOGW("Fail to load json from file, json throw an error:%s.", e.what()); - return INTERNAL_ERROR; - } catch (nlohmann::detail::other_error e) { - GELOGW("Fail to load json from file, json throw an error:%s.", e.what()); - return INTERNAL_ERROR; - } - - if (!json.is_object()) { - GELOGW("Fail to load the json file: %s.", path.c_str()); - return INTERNAL_ERROR; - } - return SUCCESS; -} - -Status ModelCacheHelper::SaveCacheInfoToCache() const { - // Generate cache json - // example: {"edgeNum":6,"nodeNum":7,"graphCache":134714827475991356} - Json cache_json; - try { - cache_json[kNodeNum] = compute_graph_->GetDirectNodesSize(); - size_t edge_num = 0; - for (const auto &node : compute_graph_->GetDirectNode()) { - for (const auto &anchor : node->GetAllInAnchors()) { - edge_num += anchor->GetPeerAnchors().size(); - } - } - cache_json[kEdgeNum] = edge_num; - size_t hash = 0; - auto ret = GetComputeGraphHash(hash); - if (ret != SUCCESS) { - GELOGW("Error occur when generate graph hash code."); - return ret; - } - cache_json[kGraphHash] = hash; - Json nodes_hash_json; - ret = GetNodesHashMapJson(nodes_hash_json); - if (ret != SUCCESS) { - GELOGW("Error occur when generate nodes hash code."); - return ret; - } - cache_json[kNodeHash] = nodes_hash_json; - } catch (const std::exception &e) { - GELOGW("Fail to generate cache info json. Error message: %s", e.what()); - return INTERNAL_ERROR; - } - string cache_manifest = to_string(graph_id_) + "_" + to_string(graph_id_run_times_[graph_id_]) + kManifestSuffix; - - auto ret = SaveJsonToFile(cache_manifest, cache_json); - if (ret != SUCCESS) { - GELOGW("Fail to save cache info to json file, path: %s.", cache_path_.c_str()); - return ret; - } - return SUCCESS; -} - -Status ModelCacheHelper::GetCacheInfo(CacheInfo &cache_info) const { - string cache_manifest = to_string(graph_id_) + "_" + to_string(graph_id_run_times_[graph_id_]) + kManifestSuffix; - Json cache_json; - if (LoadJsonFromFile(cache_manifest, cache_json) != SUCCESS) { - GELOGW("Fail to load json from cache file: %s", cache_manifest.c_str()); - return INTERNAL_ERROR; - } - if (!cache_json.is_object()) { - GELOGW("Manifest should be a json object"); - return INTERNAL_ERROR; - } - try { - cache_info.node_num = cache_json[kNodeNum]; - cache_info.edge_num = cache_json[kEdgeNum]; - cache_info.graph_hash = cache_json[kGraphHash]; - Json nodes_hash_json = cache_json[kNodeHash]; - if (!(nodes_hash_json.is_null() || nodes_hash_json.is_array())) { - GELOGW("Nodes hash in cache should be null or array."); - return FAILED; - } - for (const auto &iter : nodes_hash_json) { - cache_info.nodes_hash[iter[kName].get()] = iter[kHash].get(); - } - } catch (const std::exception &e) { - GELOGW("Fail to get info from json file. Error message: %s", e.what()); - return INTERNAL_ERROR; - } - return SUCCESS; -} - -bool ModelCacheHelper::IsAllocatedGraphIdSameAsCache(Json &json) const { - if (!(json.is_null() || json.is_array())) { - GELOGW("Input param json type should be null or array."); - return false; - } - // Compare allocated graph id info between json and VarManager - std::map allocated_graph_id; - auto ret = ParseAllocatedGraphIdFromJson(json, allocated_graph_id); - if (ret != SUCCESS) { - GELOGW("Fail to parse AllocatedGraphId from Json."); - return false; - } - for (const auto &iter : allocated_graph_id) { - uint32_t graph_id = 0; - ret = VarManager::Instance(session_id_)->GetAllocatedGraphId(iter.first, graph_id); - if (ret != SUCCESS) { - GELOGW("Fail to find allocated graph id of var[%s].", iter.first.c_str()); - return false; - } - if (graph_id != iter.second) { - GELOGW("The allocated graph id of variable[%s] in cache is different from VarManager.", iter.first.c_str()); - return false; - } - } - return true; -} - -bool ModelCacheHelper::IsNodeHashSameAsCache(const map &hash_map) const { - map cur_hash_map; - GetNodesHash(cur_hash_map); - if (hash_map.size() != cur_hash_map.size()) { - GELOGI("The number of hash code is different from cache info."); - return false; - } - for (const auto &iter : cur_hash_map) { - if (hash_map.count(iter.first) == 0) { - GELOGI("Node[%s] is not found in cache info.", iter.first.c_str()); - return false; - } - if (hash_map.at(iter.first) != iter.second) { - GELOGI("The hash code of node[%s] is different from cache info.", iter.first.c_str()); - return false; - } - } - return true; -} - -bool ModelCacheHelper::IsMemResourceSameAsCache(Json &json) const { - if (!(json.is_null() || json.is_array())) { - GELOGW("Input param json type should be null or array."); - return false; - } - // Compare var mem size info between json and VarManager - std::map var_mem_size; - auto ret = ParseMemResourceFromJson(json, var_mem_size); - if (ret != SUCCESS) { - GELOGW("Fail to parse MemResource from Json."); - return false; - } - for (const auto &iter : var_mem_size) { - int64_t mem_size = VarManager::Instance(session_id_)->GetVarMemSize(iter.first); - if (mem_size != iter.second) { - GELOGW("The var mem size of memory_type[%u] in cache is different from VarManager.", iter.first); - return false; - } - } - return true; -} - -bool ModelCacheHelper::IsChangedGraphIdSameAsCache(Json &json) const { - if (!(json.is_null() || json.is_array())) { - GELOGW("Input param json type should be null or array."); - return false; - } - // Compare variable changed graph id info between json and VarManager - std::map changed_graph_id; - auto ret = ParseChangedGraphIdFromJson(json, changed_graph_id); - if (ret != SUCCESS) { - GELOGW("Fail to parse ChangedGraphId from Json."); - return false; - } - for (const auto &iter : changed_graph_id) { - uint32_t graph_id = 0; - ret = VarManager::Instance(session_id_)->GetChangedGraphId(iter.first, graph_id); - if (ret != SUCCESS) { - GELOGW("Fail to find changed graph id of var[%s].", iter.first.c_str()); - return false; - } - if (graph_id != iter.second) { - GELOGW("The changed graph id of variable[%s] in cache is different from VarManager.", iter.first.c_str()); - return false; - } - } - return true; -} - -bool ModelCacheHelper::IsCurVarTensorDescSameAsCache(Json &json) const { - if (!(json.is_null() || json.is_array())) { - GELOGW("Input param json type should be null or array."); - return false; - } - // Compare variable tensor desc info between json and VarManager - std::unordered_map cur_var_tensor_desc; - auto ret = ParseCurVarTensorDescMapFromJson(json, cur_var_tensor_desc); - if (ret != SUCCESS) { - GELOGW("Fail to parse CurVarTensorDesc from Json."); - return false; - } - for (const auto &iter : cur_var_tensor_desc) { - GeTensorDesc tensor_desc; - ret = VarManager::Instance(session_id_)->GetCurVarDesc(iter.first, tensor_desc); - if (ret != SUCCESS) { - GELOGW("Fail to find tensor desc of var[%s].", iter.first.c_str()); - return false; - } - uint32_t l_real_dim_cnt = 0; - uint32_t r_real_dim_cnt = 0; - TensorUtils::GetRealDimCnt(tensor_desc, l_real_dim_cnt); - TensorUtils::GetRealDimCnt(iter.second, r_real_dim_cnt); - if ((tensor_desc.GetDataType() != iter.second.GetDataType()) || - (tensor_desc.GetOriginDataType() != iter.second.GetOriginDataType()) || - (tensor_desc.GetFormat() != iter.second.GetFormat()) || - (tensor_desc.GetOriginFormat() != iter.second.GetOriginFormat()) || - (tensor_desc.GetShape().ToString() != iter.second.GetShape().ToString()) || - (tensor_desc.GetOriginShape().ToString() != iter.second.GetOriginShape().ToString()) || - (l_real_dim_cnt != r_real_dim_cnt)) { - GELOGW("The var tensor desc of variable[%s] in cache is different from VarManager.", iter.first.c_str()); - return false; - } - } - return true; -} - -bool ModelCacheHelper::IsVarAddrMgrMapSameAsCache(Json &json) const { - if (!(json.is_null() || json.is_array())) { - GELOGW("Input param json type should be null or array."); - return false; - } - // Compare variable address info between json and VarManager - std::vector> var_addr_mgr_vector; - std::set var_offset_set; - auto ret = ParseVarAddrMgrMapFromJson(json, var_addr_mgr_vector, var_offset_set); - if (ret != SUCCESS) { - GELOGW("Fail to parse VarAddrMgrMap from Json."); - return false; - } - for (const auto &iter : var_addr_mgr_vector) { - uint8_t *dev_ptr = nullptr; - rtMemType_t memory_type; - ret = VarManager::Instance(session_id_)->GetVarAddr(iter.first, iter.second.tensor_desc, &dev_ptr, memory_type); - if (ret != SUCCESS) { - GELOGW("Fail to find tensor desc of var[%s].", iter.first.c_str()); - return false; - } - // Compare memory type and logic address - if (iter.second.memory_type != memory_type || iter.second.address != dev_ptr) { - GELOGW("The VarAddrMgr of variable[%s] in cache is different from VarManager.", iter.first.c_str()); - return false; - } - } - return true; -} - -bool ModelCacheHelper::IsBroadcastInfoSameAsCache(Json &json) const { - if (!(json.is_null() || json.is_array())) { - GELOGW("Input param json type should be null or array."); - return false; - } - // Compare broadcast info between json and VarManager - std::unordered_map var_broadcast_info; - auto ret = ParseBroadcastInfoFromJson(json, var_broadcast_info); - if (ret != SUCCESS) { - GELOGW("Fail to parse BroadcastInfo from Json."); - return false; - } - for (const auto &iter : var_broadcast_info) { - VarBroadCastInfo broadcast_info; - if (VarManager::Instance(session_id_)->GetBroadCastInfo(graph_id_, iter.first, broadcast_info) != SUCCESS) { - GELOGW("Fail to find broadcast info of var[%s].", iter.first.c_str()); - return false; - } - if (iter.second.var_name != broadcast_info.var_name || iter.second.idx != broadcast_info.idx || - iter.second.input_size != broadcast_info.input_size || - iter.second.input_offset != broadcast_info.input_offset || - iter.second.output_size != broadcast_info.output_size || - iter.second.output_offset != broadcast_info.output_offset) { - GELOGW("The BroadcastInfo of variable[%s] in cache is different from VarManager.", iter.first.c_str()); - return false; - } - } - return true; -} - -bool ModelCacheHelper::IsTransRoadsSameAsCache(Json &json) const { - if (!(json.is_null() || json.is_array())) { - GELOGW("Input param json type should be null or array."); - return false; - } - // Compare trans road between json and VarManager - std::unordered_map> trans_roads; - auto ret = ParseTransRoadsFromJson(json, trans_roads); - if (ret != SUCCESS) { - GELOGW("Fail to parse TransRoads from Json."); - return false; - } - for (const auto &iter : trans_roads) { - VarTransRoad *trans_road; - trans_road = VarManager::Instance(session_id_)->GetTransRoad(iter.first); - if (trans_road == nullptr) { - GELOGW("Fail to find trans road of var[%s].", iter.first.c_str()); - return false; - } - if (trans_road->size() != iter.second.size()) { - GELOGW("The TransRoad of variable[%s] in cache is different from VarManager.", iter.first.c_str()); - return false; - } - // Compare every trans node in trans road. - for (size_t idx = 0; idx < trans_road->size(); idx += 1) { - if (!(trans_road->at(idx).node_type == iter.second.at(idx).node_type && - trans_road->at(idx).input == iter.second.at(idx).input && - trans_road->at(idx).output == iter.second.at(idx).output)) { - GELOGW("The TransRoad of variable[%s] in cache is different from VarManager.", iter.first.c_str()); - return false; - } - } - } - return true; -} - -bool ModelCacheHelper::IsVarManagerParamSameAsCache(Json &json) const { - if (!json.is_object()) { - GELOGW("Input param json type should be object."); - return false; - } - try { - if (json[kSessionId].get() != session_id_) { - GELOGW("Check VarManager cache failed.[sessionId]"); - return false; - } - if (json[kDeviceId].get() != VarManager::Instance(session_id_)->DeviceId()) { - GELOGW("Check VarManager cache failed.[deviceId]"); - return false; - } - if (json[kJobId].get() != VarManager::Instance(session_id_)->JobId()) { - GELOGW("Check VarManager cache failed.[jobId]"); - return false; - } - if (json[kGraphMemMaxSize].get() != VarManager::Instance(session_id_)->GetGraphMemoryMaxSize()) { - GELOGW("Check VarManager cache failed.[graphMemMaxSize]"); - return false; - } - if (json[kVarMemMaxSize].get() != VarManager::Instance(session_id_)->GetVarMemMaxSize()) { - GELOGW("Check VarManager cache failed.[varMemMaxSize]"); - return false; - } - if (json[kVarMemLogicBase].get() != VarManager::Instance(session_id_)->GetVarMemLogicBase()) { - GELOGW("Check VarManager cache failed.[varMemLogicBase]"); - return false; - } - if (json[kUseMaxMemSize].get() != VarManager::Instance(session_id_)->GetUseMaxMemorySize()) { - GELOGW("Check VarManager cache failed.[useMaxMemSize]"); - return false; - } - } catch (const std::exception &e) { - GELOGW("Fail to check VarManager json. Error message: %s", e.what()); - return false; - } - return true; -} - -bool ModelCacheHelper::IsVarManagerSameAsCache(Json &json) const { - if (!json.is_object()) { - GELOGW("Input param json type should be object."); - return false; - } - try { - if (!IsVarManagerParamSameAsCache(json)) { - GELOGW("Check VarManager cache failed.[Param]"); - return false; - } - Json mem_resource_json = move(json[kMemResourceMap]); - auto ret = IsMemResourceSameAsCache(mem_resource_json); - if (!ret) { - GELOGW("Check VarManager cache failed.[MemResource]"); - return false; - } - Json var_resource_json = move(json[kVarResource]); - ret = IsAllocatedGraphIdSameAsCache(var_resource_json[kAllocatedGraphId]); - if (!ret) { - GELOGW("Check VarManager cache failed.[AllocatedGraphId]"); - return false; - } - ret = IsChangedGraphIdSameAsCache(var_resource_json[kChangedGraphId]); - if (!ret) { - GELOGW("Check VarManager cache failed.[ChangedGraphId]"); - return false; - } - ret = IsBroadcastInfoSameAsCache(var_resource_json[kVarBroadcastInfo]); - if (!ret) { - GELOGW("Check VarManager cache failed.[VarBroadcastInfo]"); - return false; - } - ret = IsCurVarTensorDescSameAsCache(var_resource_json[kCurVarTensorDescMap]); - if (!ret) { - GELOGW("Check VarManager cache failed.[CurVarTensorDesc]"); - return false; - } - ret = IsVarAddrMgrMapSameAsCache(var_resource_json[kVarAddrMgrMap]); - if (!ret) { - GELOGW("Check VarManager cache failed.[VarAddrMgrMap]"); - return false; - } - ret = IsTransRoadsSameAsCache(var_resource_json[kTransRoads]); - if (!ret) { - GELOGW("Check VarManager cache failed.[TransRoads]"); - return false; - } - } catch (const std::exception &e) { - GELOGW("Fail to check VarManager json. Error message: %s", e.what()); - return false; - } - return true; -} - -Status ModelCacheHelper::RecoverMemResource(const Json &json) const { - if (!(json.is_null() || json.is_array())) { - GELOGW("Input param json type should be null or array."); - return PARAM_INVALID; - } - std::map var_mem_size; - auto ret = ParseMemResourceFromJson(json, var_mem_size); - if (ret != SUCCESS) { - GELOGW("Fail to parse MemResource from Json."); - return ret; - } - for (const auto &iter : var_mem_size) { - ret = VarManager::Instance(session_id_)->UpdateVarMemSize(iter.first, iter.second); - if (ret != SUCCESS) { - GELOGW("Fail to recover var mem size."); - return ret; - } - } - return SUCCESS; -} - -Status ModelCacheHelper::RecoverAllocatedGraphId(const Json &json) const { - if (!(json.is_null() || json.is_array())) { - GELOGW("Input param json type should be null or array."); - return PARAM_INVALID; - } - std::map allocated_graph_id; - auto ret = ParseAllocatedGraphIdFromJson(json, allocated_graph_id); - if (ret != SUCCESS) { - GELOGW("Fail to parse AllocatedGraphId from Json."); - return ret; - } - for (const auto &iter : allocated_graph_id) { - ret = VarManager::Instance(session_id_)->SetAllocatedGraphId(iter.first, iter.second); - if (ret != SUCCESS) { - GELOGW("Fail to recover allocated graph id."); - return ret; - } - } - return SUCCESS; -} - -Status ModelCacheHelper::RecoverChangedGraphId(const Json &json) const { - if (!(json.is_null() || json.is_array())) { - GELOGW("Input param json type should be null or array."); - return PARAM_INVALID; - } - std::map changed_graph_id; - auto ret = ParseChangedGraphIdFromJson(json, changed_graph_id); - if (ret != SUCCESS) { - GELOGW("Fail to parse AllocatedGraphId from Json."); - return ret; - } - for (const auto &iter : changed_graph_id) { - ret = VarManager::Instance(session_id_)->SetChangedGraphId(iter.first, iter.second); - if (ret != SUCCESS) { - GELOGW("Fail to recover changed graph id."); - return ret; - } - } - return SUCCESS; -} - -Status ModelCacheHelper::RecoverVarAddrAndTensorDesc(const Json &json) const { - if (!(json.is_null() || json.is_array())) { - GELOGW("Input param json type should be null or array."); - return PARAM_INVALID; - } - std::vector> var_addr_mgr_vector; - std::set var_offset_set; - auto ret = ParseVarAddrMgrMapFromJson(json, var_addr_mgr_vector, var_offset_set); - if (ret != SUCCESS) { - GELOGW("Fail to parse VarAddrMgrMap from Json."); - return ret; - } - for (const auto &iter : var_addr_mgr_vector) { - const VarAddrMgr &tensor_addr_mgr = iter.second; - const bool var_exist = VarManager::Instance(session_id_)->IsVarExist(iter.first, tensor_addr_mgr.tensor_desc); - // SaveVarVddr if var does not exist, the logic address will be recorded by VarManager - if (!var_exist) { - auto logic_address = reinterpret_cast(reinterpret_cast(tensor_addr_mgr.address)); - auto offset = (tensor_addr_mgr.offset); - // Check logic address and offset - if (logic_address - offset != VarManager::Instance(session_id_)->GetVarMemLogicBase()) { - GELOGW("Check logic_address[%lu] and offset [%lu] of %s failed, var mem logic base is %lu, abandon", - logic_address, offset, iter.first.c_str(), VarManager::Instance(session_id_)->GetVarMemLogicBase()); - return PARAM_INVALID; - } - // Offset is needed by SaveVarVddr instead of logic address - ret = VarManager::Instance(session_id_)->SaveVarAddr(iter.first, tensor_addr_mgr.tensor_desc, - reinterpret_cast(reinterpret_cast(offset)), - tensor_addr_mgr.memory_type); - if (ret != SUCCESS) { - GELOGW("Fail to recover VarAddr or TensorDesc of var[%s].", iter.first.c_str()); - return ret; - } - } - // SetVarAddr to update cur_var_tensor_desc_map_ - ret = VarManager::Instance(session_id_) - ->SetVarAddr(iter.first, tensor_addr_mgr.tensor_desc, tensor_addr_mgr.address, tensor_addr_mgr.memory_type); - if (ret != SUCCESS) { - GELOGW("Fail to recover VarAddr or TensorDesc desc of var[%s].", iter.first.c_str()); - return ret; - } - } - return SUCCESS; -} - -Status ModelCacheHelper::RecoverBroadcastInfo(const Json &json) const { - if (!(json.is_null() || json.is_array())) { - GELOGW("Input param json type should be null or array."); - return PARAM_INVALID; - } - std::unordered_map var_broadcast_info; - auto ret = ParseBroadcastInfoFromJson(json, var_broadcast_info); - if (ret != SUCCESS) { - GELOGW("Fail to parse BroadcastInfo from Json."); - return ret; - } - for (const auto &iter : var_broadcast_info) { - VarBroadCastInfo broadcast_info; - ret = VarManager::Instance(session_id_)->SaveBroadCastInfo(graph_id_, iter.second); - if (ret != SUCCESS) { - GELOGW("Fail to recover broadcast info of var[%s].", iter.first.c_str()); - return ret; - } - } - return SUCCESS; -} - -Status ModelCacheHelper::RecoverTransRoads(const Json &json) const { - if (!(json.is_null() || json.is_array())) { - GELOGW("Input param json type should be null or array."); - return PARAM_INVALID; - } - std::unordered_map> trans_roads; - auto ret = ParseTransRoadsFromJson(json, trans_roads); - if (ret != SUCCESS) { - GELOGW("Fail to parse TransRoads from Json."); - return ret; - } - for (const auto &iter : trans_roads) { - ret = VarManager::Instance(session_id_)->SetTransRoad(iter.first, iter.second); - if (ret != SUCCESS) { - GELOGW("Fail to find trans road of var[%s].", iter.first.c_str()); - return ret; - } - } - return SUCCESS; -} - -Status ModelCacheHelper::TensorDescToJson(const GeTensorDesc &ge_tensor_desc, Json &json) { - if (!(json.is_null() || json.is_object())) { - GELOGW("Input param json type should be null or object."); - return PARAM_INVALID; - } - try { - json[kDataType] = static_cast(ge_tensor_desc.GetDataType()); - json[kOriginDataType] = static_cast(ge_tensor_desc.GetOriginDataType()); - json[kLayout] = static_cast(ge_tensor_desc.GetFormat()); - json[kOriginLayout] = static_cast(ge_tensor_desc.GetOriginFormat()); - json[kShape] = ge_tensor_desc.GetShape().GetDims(); - json[kOriginShape] = ge_tensor_desc.GetOriginShape().GetDims(); - uint32_t real_dim_cnt = 0; - (void)TensorUtils::GetRealDimCnt(ge_tensor_desc, real_dim_cnt); // [No need to check value] - json[kRealDimCnt] = real_dim_cnt; - } catch (const std::exception &e) { - GELOGW("Fail to trans GeTensorDesc to json. Error message: %s", e.what()); - return INTERNAL_ERROR; - } - return SUCCESS; -} - -Status ModelCacheHelper::JsonToTensorDesc(const Json &json, ge::GeTensorDesc &ge_tensor_desc) { - if (!json.is_object()) { - GELOGW("Input param json type should be object."); - return PARAM_INVALID; - } - try { - ge_tensor_desc.SetDataType(static_cast(json[kDataType].get())); - ge_tensor_desc.SetOriginDataType(static_cast(json[kOriginDataType].get())); - ge_tensor_desc.SetFormat(static_cast(json[kLayout].get())); - ge_tensor_desc.SetOriginFormat(static_cast(json[kOriginLayout].get())); - GeShape shape(json[kShape].get>()); - ge_tensor_desc.SetShape(shape); - GeShape origin_shape(json[kOriginShape].get>()); - ge_tensor_desc.SetOriginShape(origin_shape); - auto real_dim_cnt = json[kRealDimCnt].get(); - (void)TensorUtils::SetRealDimCnt(ge_tensor_desc, real_dim_cnt); // [No need to check value] - } catch (const std::exception &e) { - GELOGW("Fail to trans Json to GeTensorDesc. Error message: %s", e.what()); - return INTERNAL_ERROR; - } - return SUCCESS; -} - -Status ModelCacheHelper::GetNodesHashMapJson(Json &json) const { - if (!(json.is_null() || json.is_array())) { - GELOGW("Input param json type should be null or array."); - return PARAM_INVALID; - } - map hash_map; - GetNodesHash(hash_map); - for (const auto &iter : hash_map) { - Json node_hash_json; - try { - node_hash_json[kName] = iter.first; - node_hash_json[kHash] = iter.second; - json.emplace_back(move(node_hash_json)); - } catch (const std::exception &e) { - GELOGW("Fail to trans node cache to json. Error message: %s", e.what()); - return INTERNAL_ERROR; - } - } - return SUCCESS; -} - -Status ModelCacheHelper::GetMemResourceMap(Json &json) const { - if (!(json.is_null() || json.is_array())) { - GELOGW("Input param json type should be null or array."); - return PARAM_INVALID; - } - const auto total_size = VarManager::Instance(session_id_)->GetVarMemMaxSize(); - const auto var_mem_size = VarManager::Instance(session_id_)->GetVarMemSize(RT_MEMORY_HBM); - Json mem_resource_json; - try { - mem_resource_json[kMemType] = RT_MEMORY_HBM; - mem_resource_json[kTotalSize] = total_size; - mem_resource_json[kVarMemSize] = var_mem_size; - json.emplace_back(move(mem_resource_json)); - } catch (const std::exception &e) { - GELOGW("Fail to trans MemResourceMap to json. Error message: %s", e.what()); - return INTERNAL_ERROR; - } - return SUCCESS; -} - -Status ModelCacheHelper::GetVarAddrMgrMapJson(Json &json) const { - if (!(json.is_null() || json.is_array())) { - GELOGW("Input param json type should be null or array."); - return PARAM_INVALID; - } - std::unordered_map var_addr_mgr_map; - VarManager::Instance(session_id_)->GetAllVarAddrMgr(var_addr_mgr_map); - try { - for (const auto &iter : var_addr_mgr_map) { - Json var_addr_json; - string name; - GetVarNameFromVarKey(iter.first, iter.second.tensor_desc, name); - var_addr_json[kName] = name; - var_addr_json[kAddress] = static_cast(reinterpret_cast(iter.second.address)); - var_addr_json[kMemoryType] = iter.second.memory_type; - var_addr_json[kOffset] = iter.second.offset; - - // Copy tensor desc to json. - Json tensor_desc_json; - auto ret = TensorDescToJson(iter.second.tensor_desc, tensor_desc_json); - if (ret != SUCCESS) { - GELOGW("Fail to trans tensor desc to json."); - return INTERNAL_ERROR; - } - var_addr_json[kTensorDesc] = move(tensor_desc_json); - - json.emplace_back(move(var_addr_json)); - } - } catch (const std::exception &e) { - GELOGW("Fail to trans VarAddrMgrMap to json. Error message: %s", e.what()); - return INTERNAL_ERROR; - } - return SUCCESS; -} - -Status ModelCacheHelper::GetCurVarTensorDescMapJson(Json &json) const { - if (!(json.is_null() || json.is_array())) { - GELOGW("Input param json type should be null or array."); - return PARAM_INVALID; - } - try { - for (const auto &name : var_names_) { - Json cur_tensor_desc_json; - GeTensorDesc tensor_desc; - auto ret = VarManager::Instance(session_id_)->GetCurVarDesc(name, tensor_desc); - if (ret != SUCCESS) { - GELOGI("Get variable[%s] current tensor desc failed. It will be skipped.", name.c_str()); - continue; - } - cur_tensor_desc_json[kName] = name; - - Json tensor_desc_json; - ret = TensorDescToJson(tensor_desc, tensor_desc_json); - if (ret != SUCCESS) { - GELOGW("Fail to trans tensor desc to json."); - return INTERNAL_ERROR; - } - cur_tensor_desc_json[kTensorDesc] = move(tensor_desc_json); - json.emplace_back(move(cur_tensor_desc_json)); - } - } catch (const std::exception &e) { - GELOGW("Fail to trans CurVarTensorDescMap to json. Error message: %s", e.what()); - return INTERNAL_ERROR; - } - return SUCCESS; -} - -Status ModelCacheHelper::GetTransRoadsJson(Json &json) const { - if (!(json.is_null() || json.is_array())) { - GELOGW("Input param json type should be null or array."); - return PARAM_INVALID; - } - try { - for (const auto &name : var_names_) { - auto trans_road = VarManager::Instance(session_id_)->GetTransRoad(name); - if (trans_road == nullptr) { - continue; - } - // Json object, variable name and trans road - Json trans_road_map_json; - trans_road_map_json[kName] = name; - - Json trans_road_json; - Status ret; - // Add nodes' info to json - for (const auto &trans_node_info : *trans_road) { - Json trans_node_info_json; - trans_node_info_json[kNodeType] = trans_node_info.node_type; - Json input_tensor_desc_json; - ret = TensorDescToJson(trans_node_info.input, input_tensor_desc_json); - if (ret != SUCCESS) { - GELOGW("Fail to trans tensor desc to json."); - return INTERNAL_ERROR; - } - trans_node_info_json[kInputTensorDesc] = move(input_tensor_desc_json); - Json output_tensor_desc_json; - ret = TensorDescToJson(trans_node_info.output, output_tensor_desc_json); - if (ret != SUCCESS) { - GELOGW("Fail to trans tensor desc to json."); - return INTERNAL_ERROR; - } - trans_node_info_json[kOutputTensorDesc] = move(output_tensor_desc_json); - trans_road_json.emplace_back(move(trans_node_info_json)); - } - trans_road_map_json[kTransRoad] = move(trans_road_json); - json.emplace_back(move(trans_road_map_json)); - } - } catch (const std::exception &e) { - GELOGW("Fail to trans VarToTransRoad to json. Error message: %s", e.what()); - return INTERNAL_ERROR; - } - return SUCCESS; -} - -Status ModelCacheHelper::GetChangedGraphIdJson(Json &json) const { - if (!(json.is_null() || json.is_array())) { - GELOGW("Input param json type should be null or array."); - return PARAM_INVALID; - } - for (const auto &name : var_names_) { - uint32_t changed_graph_id = 0; - Status ret = VarManager::Instance(session_id_)->GetChangedGraphId(name, changed_graph_id); - if (ret != SUCCESS) { - continue; - } - Json name_and_changed_graph_id; - try { - name_and_changed_graph_id[kName] = name; - name_and_changed_graph_id[kGraphId] = changed_graph_id; - json.emplace_back(move(name_and_changed_graph_id)); - } catch (const std::exception &e) { - GELOGW("Fail to trans ChangedGraphId to json. Error message: %s", e.what()); - return INTERNAL_ERROR; - } - } - return SUCCESS; -} - -Status ModelCacheHelper::GetAllocatedGraphIdJson(Json &json) const { - if (!(json.is_null() || json.is_array())) { - GELOGW("Input param json type should be null or array."); - return PARAM_INVALID; - } - for (const auto &name : var_names_) { - uint32_t allocated_graph_id = 0; - Status ret = VarManager::Instance(session_id_)->GetAllocatedGraphId(name, allocated_graph_id); - if (ret != SUCCESS) { - continue; - } - Json name_and_allocated_graph_id; - try { - name_and_allocated_graph_id[kName] = name; - name_and_allocated_graph_id[kGraphId] = allocated_graph_id; - json.emplace_back(move(name_and_allocated_graph_id)); - } catch (const std::exception &e) { - GELOGW("Fail to trans AllocatedGraphId to json. Error message: %s", e.what()); - return INTERNAL_ERROR; - } - } - return SUCCESS; -} - -Status ModelCacheHelper::GetBroadcastInfoJson(Json &json) const { - if (!(json.is_null() || json.is_array())) { - GELOGW("Input param json type should be null or array."); - return PARAM_INVALID; - } - for (const auto &name : var_names_) { - VarBroadCastInfo var_broadcast_info; - Status ret = VarManager::Instance(session_id_)->GetBroadCastInfo(graph_id_, name, var_broadcast_info); - if (ret != SUCCESS) { - continue; - } - Json var_broadcast_info_json; - try { - var_broadcast_info_json[kName] = name; - var_broadcast_info_json[kBroadcastName] = var_broadcast_info.broadcast_name; - var_broadcast_info_json[kIdx] = var_broadcast_info.idx; - var_broadcast_info_json[kInputOffset] = var_broadcast_info.input_offset; - var_broadcast_info_json[kInputSize] = var_broadcast_info.input_size; - var_broadcast_info_json[kOutputOffset] = var_broadcast_info.output_offset; - var_broadcast_info_json[kOutputSize] = var_broadcast_info.output_size; - json.emplace_back(move(var_broadcast_info_json)); - } catch (const std::exception &e) { - GELOGW("Fail to trans VarBroadcastInfo to json. Error message: %s", e.what()); - return INTERNAL_ERROR; - } - } - return SUCCESS; -} - -Status ModelCacheHelper::GetVarResourceJson(Json &json) const { - if (!(json.is_null() || json.is_object())) { - GELOGW("Input param json type should be null or object."); - return PARAM_INVALID; - } - Json var_addr_mgr_map_json; - Status ret = GetVarAddrMgrMapJson(var_addr_mgr_map_json); - if (ret != SUCCESS) { - GELOGW("GetVarAddrMgrMapJson failed."); - return INTERNAL_ERROR; - } - - Json cur_var_tensor_desc_map_json; - ret = GetCurVarTensorDescMapJson(cur_var_tensor_desc_map_json); - if (ret != SUCCESS) { - GELOGW("GetCurVarTensorDescMapJson failed."); - return INTERNAL_ERROR; - } - - Json trans_roads_json; - ret = GetTransRoadsJson(trans_roads_json); - if (ret != SUCCESS) { - GELOGW("GetTransRoadsJson failed."); - return INTERNAL_ERROR; - } - - Json changed_graph_id_json; - ret = GetChangedGraphIdJson(changed_graph_id_json); - if (ret != SUCCESS) { - GELOGW("GetChangedGraphIdJson failed."); - return INTERNAL_ERROR; - } - - Json allocated_graph_id_json; - ret = GetAllocatedGraphIdJson(allocated_graph_id_json); - if (ret != SUCCESS) { - GELOGW("GetAllocatedGraphIdJson failed."); - return INTERNAL_ERROR; - } - - Json var_broadcast_info_json; - ret = GetBroadcastInfoJson(var_broadcast_info_json); - if (ret != SUCCESS) { - GELOGW("GetBroadcastInfoJson failed."); - return INTERNAL_ERROR; - } - - try { - json[kVarAddrMgrMap] = move(var_addr_mgr_map_json); - json[kCurVarTensorDescMap] = move(cur_var_tensor_desc_map_json); - json[kTransRoads] = move(trans_roads_json); - json[kChangedGraphId] = move(changed_graph_id_json); - json[kAllocatedGraphId] = move(allocated_graph_id_json); - json[kVarBroadcastInfo] = move(var_broadcast_info_json); - } catch (const exception &e) { - GELOGW("Fail to generate VarResource json. Error message: %s", e.what()); - return INTERNAL_ERROR; - } - return SUCCESS; -} - -Status ModelCacheHelper::GetVarManagerJson(Json &json) const { - if (!(json.is_null() || json.is_object())) { - GELOGW("Input param json type should be null or object."); - return PARAM_INVALID; - } - - Json mem_resource_map_json; - auto ret = GetMemResourceMap(mem_resource_map_json); - if (ret != SUCCESS) { - GELOGW("GetMemResourceMap failed."); - return INTERNAL_ERROR; - } - - Json var_resource_json; - ret = GetVarResourceJson(var_resource_json); - if (ret != SUCCESS) { - GELOGW("GetVarResourceJson failed."); - return INTERNAL_ERROR; - } - - try { - json[kSessionId] = session_id_; - json[kDeviceId] = VarManager::Instance(session_id_)->DeviceId(); - json[kJobId] = VarManager::Instance(session_id_)->JobId(); - json[kGraphMemMaxSize] = VarManager::Instance(session_id_)->GetGraphMemoryMaxSize(); - json[kVarMemMaxSize] = VarManager::Instance(session_id_)->GetVarMemMaxSize(); - json[kVarMemLogicBase] = VarManager::Instance(session_id_)->GetVarMemLogicBase(); - json[kUseMaxMemSize] = VarManager::Instance(session_id_)->GetUseMaxMemorySize(); - json[kMemResourceMap] = move(mem_resource_map_json); - json[kVarResource] = move(var_resource_json); - } catch (const exception &e) { - GELOGW("Fail to generate VarManager json. Error message: %s", e.what()); - return INTERNAL_ERROR; - } - return SUCCESS; -} - -Status ModelCacheHelper::SaveVarManagerToCache(bool before_build) const { - if (!is_cache_path_valid_for_output) { - GELOGW("Invalid cache path."); - return FAILED; - } - Json var_manager_json; - auto ret = GetVarManagerJson(var_manager_json); - if (ret != SUCCESS) { - GELOGW("Fail to generate VarManager json."); - return FAILED; - } - string var_manager_path = to_string(graph_id_) + "_" + to_string(graph_id_run_times_[graph_id_]) + - (before_build ? kBeforeVarManagerSuffix : kAfterVarManagerSuffix); - ret = SaveJsonToFile(var_manager_path, var_manager_json); - if (ret != SUCCESS) { - GELOGW("Fail to save VarManager info to json file, path: %s.", cache_path_.c_str()); - return ret; - } - return SUCCESS; -} - -Status ModelCacheHelper::SaveOmModelToCache(const GeModelPtr &ge_model) const { - if (!is_cache_path_valid_for_output) { - GELOGW("Invalid cache path."); - return FAILED; - } - string om_path = RealPath(cache_path_.c_str()); - if (om_path.empty()) { - GELOGW("file path is invalid. please check path om: %s", cache_path_.c_str()); - return FAILED; - } - string cache_om_path = cache_path_; - cache_om_path += (to_string(graph_id_) + "_" + to_string(graph_id_run_times_[graph_id_]) + kOmSuffix); - GELOGI("SaveOmModelToCache: start to save om model : %s", cache_om_path.c_str()); - ModelHelper model_helper; - SaveParam save_param; - ModelBufferData model; - Status ret = model_helper.SaveToOmModel(ge_model, save_param, cache_om_path, model); - if (ret != SUCCESS) { - GELOGW("SaveOmModelToCache: save mode failed. ret = %u", ret); - return ret; - } - return SUCCESS; -} - -Status ModelCacheHelper::ParseMemResourceFromJson(const Json &json, map &mem_resource) { - if (!(json.is_array() || json.is_null())) { - GELOGW("Input param json type should be null or array."); - return PARAM_INVALID; - } - mem_resource.clear(); - for (const Json &mem_resource_json : json) { - try { - rtMemType_t mem_type = mem_resource_json[kMemType].get(); - uint64_t var_mem_size = mem_resource_json[kVarMemSize].get(); - mem_resource[mem_type] = var_mem_size; - } catch (const exception &e) { - GELOGW("Fail to trans Json to MemResource. Error message: %s", e.what()); - return INTERNAL_ERROR; - } - } - return SUCCESS; -} - -Status ModelCacheHelper::ParseVarAddrMgrMapFromJson( - const Json &json, std::vector> &var_addr_mgr_vector, - std::set &var_offset_set) { - if (!(json.is_array() || json.is_null())) { - GELOGW("Input param json type should be null or array."); - return PARAM_INVALID; - } - var_addr_mgr_vector.clear(); - var_offset_set.clear(); - for (const Json &var_addr_json : json) { - VarAddrMgr var_addr_mgr; - try { - auto logic_address = var_addr_json[kAddress].get(); - auto address = reinterpret_cast(reinterpret_cast(logic_address)); - var_addr_mgr.address = address; - var_addr_mgr.offset = var_addr_json[kOffset].get(); - var_addr_mgr.memory_type = var_addr_json[kMemoryType].get(); - auto ret = JsonToTensorDesc(var_addr_json[kTensorDesc], var_addr_mgr.tensor_desc); - if (ret != SUCCESS) { - GELOGW("Fail to trans json to tensor desc."); - return ret; - } - var_addr_mgr_vector.emplace_back(var_addr_json[kName].get(), move(var_addr_mgr)); - var_offset_set.insert(logic_address); - } catch (const exception &e) { - GELOGW("Fail to trans Json to VarAddrMgr. Error message: %s", e.what()); - return INTERNAL_ERROR; - } - } - return SUCCESS; -} - -Status ModelCacheHelper::ParseCurVarTensorDescMapFromJson( - const Json &json, std::unordered_map &cur_var_tensor_desc_map) { - if (!(json.is_array() || json.is_null())) { - GELOGW("Input param json type should be null or array."); - return PARAM_INVALID; - } - cur_var_tensor_desc_map.clear(); - for (const Json &tensor_desc_json : json) { - GeTensorDesc tensor_desc; - try { - auto ret = JsonToTensorDesc(tensor_desc_json[kTensorDesc], tensor_desc); - if (ret != SUCCESS) { - GELOGW("Fail to trans json to tensor desc."); - return ret; - } - cur_var_tensor_desc_map[tensor_desc_json[kName].get()] = move(tensor_desc); - } catch (const exception &e) { - GELOGW("Fail to trans Json to VarAddrMgr. Error message: %s", e.what()); - return INTERNAL_ERROR; - } - } - return SUCCESS; -} - -Status ModelCacheHelper::ParseTransRoadsFromJson( - const Json &json, std::unordered_map> &trans_roads) { - if (!(json.is_array() || json.is_null())) { - GELOGW("Input param json type should be null or array."); - return PARAM_INVALID; - } - trans_roads.clear(); - try { - for (const Json &name_trans_road_json : json) { - const Json &trans_road_json = name_trans_road_json[kTransRoad]; - if (!(trans_road_json.is_array() || trans_road_json.is_null())) { - GELOGW("%s json type should be null or object.", kTransRoad); - return PARAM_INVALID; - } - vector trans_road; - for (const Json &trans_node_json : trans_road_json) { - TransNodeInfo trans_node_info; - trans_node_info.node_type = trans_node_json[kNodeType]; - GeTensorDesc input_tensor_desc; - auto ret = JsonToTensorDesc(trans_node_json[kInputTensorDesc], input_tensor_desc); - if (ret != SUCCESS) { - GELOGW("Fail to trans json to tensor desc."); - return ret; - } - trans_node_info.input = move(input_tensor_desc); - GeTensorDesc output_tensor_desc; - ret = JsonToTensorDesc(trans_node_json[kOutputTensorDesc], output_tensor_desc); - if (ret != SUCCESS) { - GELOGW("Fail to trans json to tensor desc."); - return ret; - } - trans_node_info.output = move(output_tensor_desc); - trans_road.emplace_back(move(trans_node_info)); - } - trans_roads[name_trans_road_json[kName].get()] = move(trans_road); - } - } catch (const exception &e) { - GELOGW("Fail to trans Json to TransRoads. Error message: %s", e.what()); - return INTERNAL_ERROR; - } - return SUCCESS; -} - -Status ModelCacheHelper::ParseChangedGraphIdFromJson(const Json &json, - std::map &changed_graph_id) { - if (!(json.is_array() || json.is_null())) { - GELOGW("Input param json type should be null or array."); - return PARAM_INVALID; - } - changed_graph_id.clear(); - for (const Json &name_graph_id_json : json) { - try { - changed_graph_id[name_graph_id_json[kName].get()] = name_graph_id_json[kGraphId].get(); - } catch (const exception &e) { - GELOGW("Fail to trans Json to changed graph id. Error message: %s", e.what()); - return INTERNAL_ERROR; - } - } - return SUCCESS; -} - -Status ModelCacheHelper::ParseAllocatedGraphIdFromJson(const Json &json, - std::map &allocated_graph_id) { - if (!(json.is_array() || json.is_null())) { - GELOGW("Input param json type should be null or array."); - return PARAM_INVALID; - } - allocated_graph_id.clear(); - for (const Json &name_graph_id_json : json) { - try { - allocated_graph_id[name_graph_id_json[kName].get()] = name_graph_id_json[kGraphId].get(); - } catch (const exception &e) { - GELOGW("Fail to trans Json to allocated graph id. Error message: %s", e.what()); - return INTERNAL_ERROR; - } - } - return SUCCESS; -} - -Status ModelCacheHelper::ParseBroadcastInfoFromJson( - const Json &json, std::unordered_map &var_broadcast_info) { - if (!(json.is_array() || json.is_null())) { - GELOGW("Input param json type should be null or array."); - return PARAM_INVALID; - } - for (const Json &broadcast_info_json : json) { - VarBroadCastInfo broadcast_info; - try { - broadcast_info.var_name = broadcast_info_json[kName].get(); - broadcast_info.broadcast_name = broadcast_info_json[kBroadcastName].get(); - broadcast_info.idx = broadcast_info_json[kIdx].get(); - broadcast_info.input_offset = broadcast_info_json[kInputOffset].get(); - broadcast_info.input_size = broadcast_info_json[kInputSize].get(); - broadcast_info.output_offset = broadcast_info_json[kOutputOffset].get(); - broadcast_info.output_size = broadcast_info_json[kOutputSize].get(); - } catch (const exception &e) { - GELOGW("Fail to trans Json to VarBroadCastInfo. Error message: %s", e.what()); - return INTERNAL_ERROR; - } - var_broadcast_info[broadcast_info.var_name] = broadcast_info; - } - return SUCCESS; -} - -Status ModelCacheHelper::LoadOmModelFromCache(GeModelPtr &ge_model) const { - string cache_om = cache_path_ + to_string(graph_id_) + "_" + to_string(graph_id_run_times_[graph_id_]) + kOmSuffix; - if (!CheckInputPathValid(cache_om)) { - GELOGW("Invalid cache path for input:%s.", cache_om.c_str()); - return FAILED; - } - string om_path = RealPath(cache_om.c_str()); - if (om_path.empty()) { - GELOGW("file path is invalid. please check file om: %s", om_path.c_str()); - return FAILED; - } - GELOGI("load model data from file: %s", om_path.c_str()); - Status ret; - int32_t priority = 0; - ModelData model_data; - ret = ModelParserBase::LoadFromFile(om_path.c_str(), priority, model_data); - if (ret != SUCCESS) { - GELOGW("LoadOmModelFromCache: Load model from file failed. ret = %u", ret); - return ret; - } - std::function callback = [&]() { - if (model_data.model_data != nullptr) { - delete[] reinterpret_cast(model_data.model_data); - model_data.model_data = nullptr; - } - }; - GE_MAKE_GUARD(release, callback); - - ModelHelper model_helper; - ret = model_helper.LoadModel(model_data); - if (ret != SUCCESS) { - GELOGW("LoadOmModelFromCache: Load model from data failed. ret = %u", ret); - return ret; - } - ge_model = model_helper.GetGeModel(); - ret = RecompileNodes(ge_model); - if (ret != SUCCESS) { - GELOGW("LoadOmModelFromCache: recompile nodes failed. ret = %u", ret); - return ret; - } - return SUCCESS; -} - -Status ModelCacheHelper::GetVarNameFromVarKey(const string &var_key, const GeTensorDesc &tensor_desc, - string &var_name) { - std::string::size_type underline_idx = var_key.rfind('_'); - if (underline_idx == std::string::npos) { - GELOGW("Invalid var key: underline not found"); - return FAILED; - } - std::string::size_type format_idx = - var_key.rfind(std::to_string(static_cast(tensor_desc.GetFormat())), underline_idx); - if (format_idx == std::string::npos) { - GELOGW("Invalid var key: format not found"); - return FAILED; - } - var_name = var_key.substr(0, format_idx); - return SUCCESS; -} -} // namespace ge diff --git a/ge/common/helper/model_cache_helper.h b/ge/common/helper/model_cache_helper.h deleted file mode 100755 index f0831075..00000000 --- a/ge/common/helper/model_cache_helper.h +++ /dev/null @@ -1,123 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef GE_COMMON_HELPER_MODEL_CACHE_HELPER_H_ -#define GE_COMMON_HELPER_MODEL_CACHE_HELPER_H_ - -#include -#include -#include - -#include "external/ge/ge_api_error_codes.h" -#include "graph/compute_graph.h" -#include "graph/manager/graph_var_manager.h" -#include "common/model/ge_model.h" - -namespace ge { -using Json = nlohmann::json; - -struct CacheInfo { - size_t node_num; - size_t edge_num; - size_t graph_hash; - map nodes_hash; - CacheInfo() : node_num(0), edge_num(0), graph_hash(0) {} -}; - -class ModelCacheHelper { - public: - ModelCacheHelper(uint64_t session_id, uint32_t graph_id, ComputeGraphPtr &compute_graph); - ~ModelCacheHelper(); - - Status SaveCacheInfoToCache () const; - Status SaveVarManagerToCache(bool before_build) const; - Status SaveOmModelToCache(const GeModelPtr &ge_model) const; - bool IsModelCacheHit() const; - Status RecoverVarManagerFromCache() const; - Status LoadOmModelFromCache(GeModelPtr &ge_model) const; - Status RefreshComputeGraph(const ComputeGraphPtr &compute_graph); - Status ClearCache(uint32_t graph_id) const; - - private: - Status GetComputeGraphHash(size_t &hash) const; - Status GetNodesHash(map &hash_map) const; - Status GetCacheInfo(CacheInfo &cache_info) const; - - Status RecoverMemResource(const Json &json) const; - Status RecoverAllocatedGraphId(const Json &json) const; - Status RecoverChangedGraphId(const Json &json) const; - Status RecoverVarAddrAndTensorDesc(const Json &json) const; - Status RecoverBroadcastInfo(const Json &json) const; - Status RecoverTransRoads(const Json &json) const; - static Status GetNodesNeedRecompile(ComputeGraphPtr &graph, vector &nodes); - static Status RecompileNodes(GeModelPtr &ge_model); - - bool IsNodeHashSameAsCache(const map &hash_map) const; - bool IsMemResourceSameAsCache(Json &json) const; - bool IsChangedGraphIdSameAsCache(Json &json) const; - bool IsAllocatedGraphIdSameAsCache(Json &json) const; - bool IsCurVarTensorDescSameAsCache(Json &json) const; - bool IsVarAddrMgrMapSameAsCache(Json &json) const; - bool IsBroadcastInfoSameAsCache(Json &json) const; - bool IsTransRoadsSameAsCache(Json &json) const; - bool IsVarManagerSameAsCache(Json &json) const; - bool IsVarManagerParamSameAsCache(Json &json) const; - - Status SaveJsonToFile(const string &file_name, const Json &json) const; - Status LoadJsonFromFile(const string &file_name, Json &json) const; - - Status GetNodesHashMapJson(Json &json) const; - Status GetMemResourceMap(Json &json) const; - Status GetVarAddrMgrMapJson(Json &json) const; - Status GetCurVarTensorDescMapJson(Json &json) const; - Status GetTransRoadsJson(Json &json) const; - Status GetChangedGraphIdJson(Json &json) const; - Status GetAllocatedGraphIdJson(Json &json) const; - Status GetBroadcastInfoJson(Json &json) const; - Status GetVarResourceJson(Json &json) const; - Status GetVarManagerJson(Json &json) const; - - static Status TensorDescToJson(const GeTensorDesc &ge_tensor_desc, Json &json); - static Status JsonToTensorDesc(const Json &json, GeTensorDesc &ge_tensor_desc); - static Status ParseMemResourceFromJson(const Json &json, map &mem_resource); - static Status ParseVarAddrMgrMapFromJson(const Json &json, - std::vector> &var_addr_mgr_vector, - std::set &var_offset_set); - static Status ParseCurVarTensorDescMapFromJson( - const Json &json, std::unordered_map &cur_var_tensor_desc_map); - static Status ParseTransRoadsFromJson(const Json &json, - std::unordered_map> &trans_roads); - static Status ParseChangedGraphIdFromJson(const Json &json, - std::map &changed_graph_id); - static Status ParseAllocatedGraphIdFromJson(const Json &json, - std::map &allocated_graph_id); - static Status ParseBroadcastInfoFromJson(const Json &json, - std::unordered_map &var_broadcast_info); - static Status GetVarNameFromVarKey(const string &var_key, const GeTensorDesc &tensor_desc, string &var_name); - - uint64_t session_id_; - uint32_t graph_id_; - string cache_path_; - ComputeGraphPtr compute_graph_; - std::set var_names_; - bool is_cache_path_valid_for_output; - static map graph_id_run_times_; -}; - -using ModelCacheHelperPtr = std::shared_ptr; -} // namespace ge - -#endif // GE_COMMON_HELPER_MODEL_CACHE_HELPER_H_ diff --git a/ge/executor/ge_executor.cc b/ge/executor/ge_executor.cc index 73cd7bb5..76cde2b9 100755 --- a/ge/executor/ge_executor.cc +++ b/ge/executor/ge_executor.cc @@ -27,6 +27,7 @@ #include "graph/load/graph_loader.h" #include "graph/load/model_manager/model_manager.h" #include "graph/manager/graph_mem_manager.h" +#include "graph/manager/graph_var_manager.h" #include "single_op/single_op_manager.h" #include "graph/load/model_manager/davinci_model.h" #include "opskernel_manager/ops_kernel_builder_manager.h" diff --git a/ge/generator/ge_generator.cc b/ge/generator/ge_generator.cc index 45eaed59..1a80a3e0 100644 --- a/ge/generator/ge_generator.cc +++ b/ge/generator/ge_generator.cc @@ -30,6 +30,7 @@ #include "graph/debug/ge_attr_define.h" #include "graph/ge_context.h" #include "graph/manager/graph_manager.h" +#include "graph/manager/graph_var_manager.h" #include "graph/manager/util/rt_context_util.h" #include "graph/operator_factory_impl.h" #include "graph/opsproto_manager.h" diff --git a/ge/graph/manager/graph_manager.cc b/ge/graph/manager/graph_manager.cc index 7d72d85b..d1237f4e 100755 --- a/ge/graph/manager/graph_manager.cc +++ b/ge/graph/manager/graph_manager.cc @@ -248,7 +248,6 @@ Status GraphManager::Finalize() { Analyzer::GetInstance()->DestroyGraphJsonObject(session_id, graph_id); } graph_map_.clear(); - cache_helper_map_.clear(); graph_count_.clear(); // graph context @@ -1005,13 +1004,6 @@ Status GraphManager::PreRun(const GraphNodePtr &graph_node, const std::vectorGetGraphId(), compute_graph, ge_model); - if (save_ret != SUCCESS) { - GELOGW("Fail to save cache."); - } GEEVENT("[GEPERFTRACE] GE PreRun End"); return SUCCESS; } @@ -1063,18 +1055,15 @@ Status GraphManager::StartForRunGraph(const GraphNodePtr &graph_node, const std: graph_node->GetGraphId()); return PARAM_INVALID; } - GeModelPtr ge_model = nullptr; - // check need incre build. - ret = IncreBuild(graph_node, ge_model); + + ret = PreRun(graph_node, inputs, ge_root_model, session_id); + // release rts generate context + RtContextUtil::GetInstance().DestroyRtContexts(session_id, graph_node->GetGraphId()); if (ret != SUCCESS) { - ret = PreRun(graph_node, inputs, ge_root_model, session_id); - // release rts generate context - RtContextUtil::GetInstance().DestroyRtContexts(session_id, graph_node->GetGraphId()); - if (ret != SUCCESS) { - GELOGE(ret, "[Call][PreRun] Failed, graph_id:%u, session_id:%lu.", graph_node->GetGraphId(), session_id); - return ret; - } + GELOGE(ret, "[Call][PreRun] Failed, graph_id:%u, session_id:%lu.", graph_node->GetGraphId(), session_id); + return ret; } + ret = LoadGraph(ge_root_model, graph_node); if (ret != SUCCESS) { GELOGE(ret, "[Load][Graph] Failed, graph_id:%u.", graph_node->GetGraphId()); @@ -1104,91 +1093,6 @@ Status GraphManager::LoadGraph(const GeRootModelPtr &ge_root_model, const GraphN return executor_->LoadGraph(ge_root_model, graph_node); } -Status GraphManager::LoadFromCache(const GraphNodePtr &graph_node, const ModelCacheHelperPtr &cache_helper, - GeModelPtr &ge_model) { - auto graph_id = graph_node->GetGraphId(); - auto ret = cache_helper->LoadOmModelFromCache(ge_model); - if (ret != SUCCESS) { - GELOGW("Fail to load om model from cache."); - if (cache_helper->ClearCache(graph_id) != SUCCESS) { - GELOGW("Fail to clear cache of graph %u.", graph_id); - } - return FAILED; - } - ret = cache_helper->RecoverVarManagerFromCache(); - if (ret != SUCCESS) { - GELOGW("Fail to recover VarManager from cache."); - if (cache_helper->ClearCache(graph_id) != SUCCESS) { - GELOGW("Fail to clear cache of graph %u.", graph_id); - } - return FAILED; - } - ComputeGraphPtr compute_graph_in_model = GraphUtils::GetComputeGraph(ge_model->GetGraph()); - if (compute_graph_in_model == nullptr) { - GELOGW("Error occurred when get compute graph from om, abandon."); - return FAILED; - } else { - graph_node->SetComputeGraph(compute_graph_in_model); - graph_node->SetGeModel(ge_model); - GELOGI("Load model and graph form cache om file."); - } - return SUCCESS; -} - -Status GraphManager::SaveCacheBeforeBuild(uint32_t graph_id, const ModelCacheHelperPtr &cache_helper) { - auto ret = cache_helper->SaveCacheInfoToCache(); - if (ret != SUCCESS) { - GELOGW("Fail to save cache info of graph[%d] to cache.", graph_id); - return FAILED; - } - ret = cache_helper->SaveVarManagerToCache(true); - if (ret != SUCCESS) { - GELOGW("Fail to save var manager to cache."); - cache_helper->ClearCache(graph_id); - return FAILED; - } - GELOGI("Cache files have been saved."); - return SUCCESS; -} - -Status GraphManager::SaveCacheAfterBuild(uint32_t graph_id, ge::ComputeGraphPtr graph, GeModelPtr &ge_model) { - std::shared_ptr instance_ptr = ge::GELib::GetInstance(); - if ((instance_ptr == nullptr) || !instance_ptr->InitFlag()) { - GELOGW("GELib not initialized."); - return FAILED; - } - - if (instance_ptr->IsIncreBuild()) { - std::lock_guard lock(member_mutex_); - auto iter = cache_helper_map_.find(graph_id); - if (iter == cache_helper_map_.end()) { - GELOGW("Can not find ModelCacheHelper of graph[%u]", graph_id); - return FAILED; - } else { - ModelCacheHelperPtr cache_helper = iter->second; - auto ret = cache_helper->RefreshComputeGraph(graph); - if (ret != SUCCESS) { - cache_helper->ClearCache(graph_id); - GELOGW("Fail to refresh cache helper's compute graph"); - return FAILED; - } - ret = cache_helper->SaveVarManagerToCache(false); - if (ret != SUCCESS) { - cache_helper->ClearCache(graph_id); - GELOGW("Fail to save VarManager to cache"); - return FAILED; - } - ret = cache_helper->SaveOmModelToCache(ge_model); - if (ret != SUCCESS) { - cache_helper->ClearCache(graph_id); - GELOGW("Fail to save om model to cache"); - return FAILED; - } - } - } - return SUCCESS; -} - Status GraphManager::InnerRunGraph(GraphNodePtr &graph_node, const GraphId &graph_id, const std::vector &inputs, std::vector &outputs) { GE_CHECK_NOTNULL(executor_); @@ -1239,8 +1143,6 @@ Status GraphManager::RunGraphWithStreamAsync(const GraphId &graph_id, rtStream_t graph_node->SetIsSpecificStream(true); ComputeGraphPtr compute_graph_tmp = GraphUtils::GetComputeGraph(*(graph_node->GetGraph())); - // when set incre build, add cache helper map - AddModelCacheHelperToMap(graph_id, session_id, compute_graph_tmp); if (options_.local_fmk_op_flag) { GetCompilerStages(graph_id).optimizer.TranFrameOp(compute_graph_tmp); } @@ -1299,9 +1201,6 @@ Status GraphManager::RunGraph(const GraphId &graph_id, const std::vector lock(member_mutex_); - auto iter = cache_helper_map_.find(graph_id); - if (iter != cache_helper_map_.end()) { - cache_helper_map_.erase(iter); - } else { - GELOGW("[GraphManager] cache helper does not exist, graph_id = %u", graph_id); - } -} - bool GraphManager::CheckModelLoad(const GeRootModelPtr &ge_root_model, bool load_flag) { return ((ge_root_model != nullptr) && (ge_root_model->GetModelId() != INVALID_MODEL_ID) && load_flag); } @@ -1555,7 +1444,6 @@ Status GraphManager::RemoveGraph(const GraphId &graph_id) { var_acc_ctrl_.RemoveGraph(graph_id); RemoveGraphNode(graph_id); - RemoveModelCacheHelper(graph_id); auto ge_root_model = graph_node->GetGeRootModel(); if (CheckModelLoad(ge_root_model, graph_node->GetLoadFlag())) { @@ -2727,61 +2615,6 @@ Status GraphManager::RunGraphAsync(const GraphId &graph_id, const std::vector instance_ptr = ge::GELib::GetInstance(); - if (instance_ptr != nullptr && instance_ptr->IsIncreBuild()) { - std::lock_guard lock(member_mutex_); - auto iter = cache_helper_map_.find(graph_id); - if (iter == cache_helper_map_.end()) { - ModelCacheHelperPtr cache_helper = MakeShared(session_id, graph_id, compute_graph); - if (cache_helper != nullptr) { - cache_helper_map_.emplace(std::make_pair(graph_id, cache_helper)); - } else { - GELOGW("Cache helper make shared failed, graph_id = %u.", graph_id); - } - } - } -} - -ModelCacheHelperPtr GraphManager::FindModelCacheHelper(GraphId graph_id) { - std::lock_guard lock(member_mutex_); - auto iter = cache_helper_map_.find(graph_id); - if (iter != cache_helper_map_.end()) { - return iter->second; - } - - return nullptr; -} - -Status GraphManager::IncreBuild(const GraphNodePtr &graph_node, GeModelPtr &ge_model) { - std::shared_ptr instance_ptr = ge::GELib::GetInstance(); - if (instance_ptr == nullptr || !instance_ptr->IsIncreBuild()) { - return FAILED; - } - const uint32_t graph_id = graph_node->GetGraphId(); - ModelCacheHelperPtr cache_helper = FindModelCacheHelper(graph_id); - if (cache_helper == nullptr) { - GELOGW("Can not find ModelCacheHelper of graph[%u]", graph_id); - return FAILED; - } - if (cache_helper->IsModelCacheHit()) { - GEEVENT("Model cache hit."); - Status ret = LoadFromCache(graph_node, cache_helper, ge_model); - if (ret == SUCCESS) { - return SUCCESS; - } else { - GELOGW("Error occurred when load from cache, abandon."); - } - } else { - GEEVENT("Model cache miss."); - } - if (SaveCacheBeforeBuild(graph_node->GetGraphId(), cache_helper) != SUCCESS) { - GELOGW("Error occurred when save cache."); - } - return FAILED; -} - Status GraphManager::CheckIncreBuildAndPreRun(const PreRunArgs &args, GraphNodePtr &graph_node, GeRootModelPtr &ge_root_model) { if (!IsGraphNeedBuild(graph_node)) { @@ -2796,20 +2629,18 @@ Status GraphManager::CheckIncreBuildAndPreRun(const PreRunArgs &args, return PARAM_INVALID; } // check need incre build. - GeModelPtr ge_model = nullptr; - if (IncreBuild(graph_node, ge_model) != SUCCESS) { - std::vector ge_inputs; - for (const auto &item: args.input_tensor) { - ge_inputs.emplace_back(TensorAdapter::AsGeTensor(item)); - } - Status ret = PreRun(graph_node, ge_inputs, ge_root_model, args.session_id); - // release rts generate context - RtContextUtil::GetInstance().DestroyRtContexts(args.session_id, graph_node->GetGraphId()); - if (ret != SUCCESS) { - ReturnError(args.callback, ret, "PreRun Failed."); - return ret; - } + std::vector ge_inputs; + for (const auto &item: args.input_tensor) { + ge_inputs.emplace_back(TensorAdapter::AsGeTensor(item)); } + Status ret = PreRun(graph_node, ge_inputs, ge_root_model, args.session_id); + // release rts generate context + RtContextUtil::GetInstance().DestroyRtContexts(args.session_id, graph_node->GetGraphId()); + if (ret != SUCCESS) { + ReturnError(args.callback, ret, "PreRun Failed."); + return ret; + } + graph_node->SetBuildFlag(true); var_acc_ctrl_.SetGraphBuildEnd(graph_node->GetGraphId()); return SUCCESS; @@ -2878,10 +2709,6 @@ void GraphManager::PreRunThread() { graph_node->Unlock(); return; } - // when set incre build, save cache helper. - AddModelCacheHelperToMap(args.graph_id, args.session_id, compute_graph_tmp); - - std::vector ge_models; if (options_.local_fmk_op_flag) { GetCompilerStages(graph_node->GetGraphId()).optimizer.TranFrameOp(compute_graph_tmp); diff --git a/ge/graph/manager/graph_manager.h b/ge/graph/manager/graph_manager.h index 84d2b11e..e7cd88a9 100644 --- a/ge/graph/manager/graph_manager.h +++ b/ge/graph/manager/graph_manager.h @@ -27,7 +27,6 @@ #include "common/blocking_queue.h" #include "framework/common/ge_inner_error_codes.h" -#include "common/helper/model_cache_helper.h" #include "external/graph/types.h" #include "external/ge/ge_api_types.h" #include "graph/build/graph_builder.h" @@ -339,14 +338,6 @@ class GraphManager { bool IsGraphNeedBuild(const GraphNodePtr &graph_node); - Status LoadFromCache(const GraphNodePtr &graph_node, const ModelCacheHelperPtr &cache_helper, GeModelPtr &ge_model); - Status SaveCacheBeforeBuild(uint32_t graph_id, const ModelCacheHelperPtr &cache_helper); - Status SaveCacheAfterBuild(uint32_t graph_id, ComputeGraphPtr graph, GeModelPtr &ge_model); - void AddModelCacheHelperToMap(const GraphId &graph_id, uint64_t session_id, ComputeGraphPtr &compute_graph); - Status IncreBuild(const GraphNodePtr &graph_node, GeModelPtr &ge_model); - void RemoveModelCacheHelper(const GraphId &graph_id); - ModelCacheHelperPtr FindModelCacheHelper(GraphId graph_id); - void SetRunContext(const GraphNodePtr &graph_node); void PushGraph(const RunArgs &args); @@ -411,7 +402,6 @@ class GraphManager { std::thread prerun_thread_; ComputeGraphPtr compute_graph_; std::map graph_map_; - std::map cache_helper_map_; // summary and checkpoint callback function list for ME, key is summary or checkpoint std::map &)>> me_callback_map_; diff --git a/ge/graph/manager/graph_manager_utils.cc b/ge/graph/manager/graph_manager_utils.cc index 42251b10..225a748a 100644 --- a/ge/graph/manager/graph_manager_utils.cc +++ b/ge/graph/manager/graph_manager_utils.cc @@ -70,45 +70,9 @@ void GraphNode::IncreaseLoadCount() { ++load_count_; } -SubGraphInfo::SubGraphInfo() : subgraph_ptr_(nullptr), ge_model_ptr_(nullptr), malloc_flag_(false) {} +SubGraphInfo::SubGraphInfo() : subgraph_ptr_(nullptr), ge_model_ptr_(nullptr) {} SubGraphInfo::~SubGraphInfo() { - if (malloc_flag_) { - for (auto &buffer_addr : buffer_addr_) { - if (buffer_addr == nullptr) { - continue; - } - rtError_t rt_ret; - rt_ret = rtFreeHost(buffer_addr); - buffer_addr = nullptr; - if (rt_ret != RT_ERROR_NONE) { - GELOGE(rt_ret, "[Call][RtFreeHost] subgraph free buffer failed, modelId = %u", - model_id_info_.model_id); - } - } - } -} - -Status SubGraphInfo::FreeInOutBuffer() { - if (malloc_flag_) { - for (auto iter = buffer_addr_.begin(); iter != buffer_addr_.end(); ++iter) { - rtError_t rt_ret; - rt_ret = rtFreeHost(*iter); - if (rt_ret != RT_ERROR_NONE) { - REPORT_CALL_ERROR("E19999", "Call rtFreeHost fail, ret:%d", rt_ret); - GELOGE(rt_ret, "[Call][RtFreeHost] subgraph free buffer failed, modelId = %u", model_id_info_.model_id); - buffer_addr_.erase(buffer_addr_.begin(), iter); - return GE_GRAPH_FREE_FAILED; - } - } - buffer_addr_.clear(); - - malloc_flag_ = false; - return SUCCESS; - } else { - GELOGI("[GraphManager] not malloc buffer, modelId = %u", model_id_info_.model_id); - return SUCCESS; - } } GraphModelListener::GraphModelListener(std::mutex &mutex, std::condition_variable &cond) diff --git a/ge/graph/manager/graph_manager_utils.h b/ge/graph/manager/graph_manager_utils.h index 14eb67f2..efdbecf8 100644 --- a/ge/graph/manager/graph_manager_utils.h +++ b/ge/graph/manager/graph_manager_utils.h @@ -86,8 +86,6 @@ class SubGraphInfo { void SetGeModelPtr(const GeModelPtr &ge_model_ptr) { ge_model_ptr_ = ge_model_ptr; } bool GeModelIsValid() const { return ge_model_ptr_ != nullptr; } - Status FreeInOutBuffer(); - void SetOutputContext(const std::string &output) { output_names_ = output; } std::string GetOutputContext() const { return output_names_; } diff --git a/ge/graph/manager/graph_var_manager.cc b/ge/graph/manager/graph_var_manager.cc index d0669254..ce5b335e 100755 --- a/ge/graph/manager/graph_var_manager.cc +++ b/ge/graph/manager/graph_var_manager.cc @@ -429,10 +429,6 @@ ge::Status VarManager::GetVarAddr(const std::string &var_name, const ge::GeTenso return GetVarAddr(var_name, tensor_desc, dev_ptr, memory_type); } -void VarManager::GetAllVarAddrMgr(std::unordered_map &var_addr_mgr_map) { - var_resource_->GetAllVarAddrMgr(var_addr_mgr_map); -} - int64_t VarManager::GetVarMemSize(rtMemType_t memory_type) { std::lock_guard lock(mutex_); MemResource *mem_resource = nullptr; @@ -453,36 +449,6 @@ int64_t VarManager::GetVarMemSize(rtMemType_t memory_type) { return mem_resource->GetVarMemSize(); } -Status VarManager::UpdateVarMemSize(rtMemType_t memory_type, int64_t mem_size) { - std::lock_guard lock(mutex_); - MemResource *mem_resource = nullptr; - auto iter = mem_resource_map_.find(memory_type); - if (iter == mem_resource_map_.end()) { - mem_resource = MemResource::BuildMemResourceFromType(memory_type); - if (mem_resource == nullptr) { - REPORT_CALL_ERROR("E19999", "memory_type:%d invalid or New MemResource fail, session_id:%lu", - memory_type, session_id_); - GELOGE(ge::INTERNAL_ERROR, "[Alloc][MemResource] failed, memory_type:%u, session_id:%lu", - memory_type, session_id_); - return ge::INTERNAL_ERROR; - } else { - mem_resource_map_[memory_type] = mem_resource; - } - } else { - mem_resource = iter->second; - } - - if (mem_resource == nullptr) { - REPORT_INNER_ERROR("E19999", "MemResource is invalid, memory_type:%d, session_id:%lu", - memory_type, session_id_); - GELOGE(ge::INTERNAL_ERROR, "[Check][Param] MemResource is invalid, memory_type:%u, session_id:%lu", - memory_type, session_id_); - return FAILED; - } - mem_resource->UpdateVarMemSize(mem_size); - return SUCCESS; -} - ge::Status VarManager::AssignVarMem(const std::string &var_name, const ge::GeTensorDesc &tensor_desc, rtMemType_t memory_type) { std::lock_guard lock(mutex_); @@ -638,16 +604,6 @@ ge::Status VarManager::SaveBroadCastInfo(uint32_t graph_id, const VarBroadCastIn return SUCCESS; } -ge::Status VarManager::GetBroadCastInfo(uint32_t graph_id, const string &var_name, VarBroadCastInfo &broad_cast_info) { - std::lock_guard lock(mutex_); - - if (var_resource_ == nullptr) { - GELOGW("VarManager has not been init."); - return ge::INTERNAL_ERROR; - } - return var_resource_->GetBroadCastInfo(graph_id, var_name, broad_cast_info); -} - ge::Status VarManager::RenewCurVarDesc(const std::string &var_name, ge::OpDescPtr op_desc) { std::lock_guard lock(mutex_); GELOGD("VarManager::RenewCurVarDesc var_name = %s.", var_name.c_str()); diff --git a/ge/graph/manager/graph_var_manager.h b/ge/graph/manager/graph_var_manager.h index a1b45959..f0e3b89b 100755 --- a/ge/graph/manager/graph_var_manager.h +++ b/ge/graph/manager/graph_var_manager.h @@ -223,14 +223,10 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY VarManager { ge::Status GetVarAddr(const std::string &var_name, const ge::GeTensorDesc &tensor_desc, uint8_t **dev_ptr, rtMemType_t &memory_type); - void GetAllVarAddrMgr(std::unordered_map &var_addr_mgr_map); - ge::Status GetVarAddr(const std::string &var_name, const ge::GeTensorDesc &tensor_desc, uint8_t **dev_ptr); ge::Status SaveBroadCastInfo(uint32_t graph_id, const VarBroadCastInfo &broad_cast_info); - ge::Status GetBroadCastInfo(uint32_t graph_id, const string &var_name, VarBroadCastInfo &broad_cast_info); - ge::Status GetCurVarDesc(const std::string &var_name, ge::GeTensorDesc &tensor_desc); ge::Status RenewCurVarDesc(const std::string &var_name, ge::OpDescPtr op_desc); @@ -273,8 +269,6 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY VarManager { int64_t GetVarMemSize(rtMemType_t memory_type); - Status UpdateVarMemSize(rtMemType_t memory_type, int64_t mem_size); - bool IsVarExist(const std::string &var_name, const ge::GeTensorDesc &tensor_desc); bool IsVarExist(const std::string &var_name); diff --git a/ge/graph/manager/model_manager/event_manager.cc b/ge/graph/manager/model_manager/event_manager.cc deleted file mode 100644 index 339e9894..00000000 --- a/ge/graph/manager/model_manager/event_manager.cc +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "graph/manager/model_manager/event_manager.h" - -#define RETURN_IF_COND_NOT_MET(condition, ...) \ - do { \ - if (!(condition)) { \ - GELOGE(FAILED, __VA_ARGS__); \ - return; \ - } \ - } while (0); - -namespace ge { -Status EventManager::Init(size_t event_num) { - if (this->inited_) { - return SUCCESS; - } - - rtEvent_t event = nullptr; - current_idx_ = 0; - for (size_t i = 0; i < event_num; ++i) { - GE_CHK_RT_RET(rtEventCreate(&event)); - this->event_list_.push_back(event); - } - - this->inited_ = true; - - return SUCCESS; -} - -void EventManager::Release() noexcept { - for (size_t i = 0; i < this->event_list_.size(); ++i) { - rtError_t rt_ret = rtEventDestroy(this->event_list_[i]); - RETURN_IF_COND_NOT_MET(rt_ret == RT_ERROR_NONE, "[Destroy][Event] failed, idx is %zu, ret is 0x%x.", i, rt_ret); - } - this->event_list_.clear(); - - this->inited_ = false; -} - -Status EventManager::EventRecord(size_t event_idx, rtStream_t stream) { - GE_CHK_BOOL_RET_STATUS_NOLOG(this->inited_, INTERNAL_ERROR); - - GE_CHK_BOOL_RET_STATUS_NOLOG(event_idx < this->event_list_.size(), PARAM_INVALID); - - GE_CHK_RT_RET(rtEventRecord(this->event_list_[event_idx], stream)); - - current_idx_ = static_cast(event_idx); - return SUCCESS; -} - -Status EventManager::EventElapsedTime(size_t start_event_idx, size_t stop_event_idx, float &time) { - GE_CHK_BOOL_RET_STATUS_NOLOG(this->inited_, INTERNAL_ERROR); - - GE_CHK_BOOL_RET_STATUS_NOLOG(start_event_idx < this->event_list_.size() && - stop_event_idx < this->event_list_.size() && start_event_idx <= stop_event_idx, - PARAM_INVALID); - - GE_CHK_RT_RET(rtEventElapsedTime(&time, this->event_list_[start_event_idx], this->event_list_[stop_event_idx])); - - return SUCCESS; -} - -Status EventManager::GetEvent(uint32_t index, rtEvent_t &event) { - GE_CHK_BOOL_RET_STATUS_NOLOG(index < this->event_list_.size(), PARAM_INVALID); - event = this->event_list_[index]; - return SUCCESS; -} -} // namespace ge diff --git a/ge/graph/manager/model_manager/event_manager.h b/ge/graph/manager/model_manager/event_manager.h deleted file mode 100644 index 2cb1c3f6..00000000 --- a/ge/graph/manager/model_manager/event_manager.h +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef GE_GRAPH_MANAGER_MODEL_MANAGER_EVENT_MANAGER_H_ -#define GE_GRAPH_MANAGER_MODEL_MANAGER_EVENT_MANAGER_H_ - - -#include - -#include "framework/common/fmk_error_codes.h" -#include "framework/common/fmk_types.h" -#include "framework/common/util.h" -#include "runtime/event.h" - -namespace ge { -class EventManager { - public: - /// - /// @ingroup domi_ome - /// @brief constructor - /// - EventManager() : inited_(false), current_idx_(0) {} - /// - /// @ingroup domi_ome - /// @brief destructor - /// - ~EventManager() { this->Release(); } - - /// - /// @ingroup domi_ome - /// @brief init and create event list - /// @param [in] event_num event number created - /// @return exec result - /// - Status Init(size_t event_num); - - /// - /// @ingroup domi_ome - /// @brief event record - /// @param [in] event_idx event index - /// @param [in] stream related stream - /// @return exec result - /// - Status EventRecord(size_t event_idx, rtStream_t stream); - - /// - /// @ingroup domi_ome - /// @brief time between start and end in ms - /// @param [in] start_event_idx start event index - /// @param [in] stop_event_idx stop event index - /// @param [out] time - /// @return exec result - /// - Status EventElapsedTime(size_t start_event_idx, size_t stop_event_idx, float &time); - - /// - /// @ingroup domi_ome - /// @brief current event index - /// @return - /// - uint32_t CurrentIdx() const { return current_idx_; } - - /// - /// @ingroup domi_ome - /// @brief get event at specific loc - /// @param [in] index event index - /// @return - /// - Status GetEvent(uint32_t index, rtEvent_t &event); - - /// - /// @ingroup domi_ome - /// @brief release event list - /// @param [in] - /// @return - /// - void Release() noexcept; - - private: - std::vector event_list_; - bool inited_; - uint32_t current_idx_; -}; // EventManager -} // namespace ge -#endif // GE_GRAPH_MANAGER_MODEL_MANAGER_EVENT_MANAGER_H_ diff --git a/ge/graph/manager/trans_var_data_utils.h b/ge/graph/manager/trans_var_data_utils.h index 174efbb3..f5a89a50 100755 --- a/ge/graph/manager/trans_var_data_utils.h +++ b/ge/graph/manager/trans_var_data_utils.h @@ -24,7 +24,6 @@ #include "graph/utils/tensor_utils.h" #include "graph/node.h" #include "runtime/context.h" -#include "graph/manager/graph_var_manager.h" namespace ge { class TransVarDataUtils { diff --git a/ge/graph/passes/global_step_insert_pass.cc b/ge/graph/passes/global_step_insert_pass.cc index 297e4ee2..ada4e12a 100755 --- a/ge/graph/passes/global_step_insert_pass.cc +++ b/ge/graph/passes/global_step_insert_pass.cc @@ -24,7 +24,6 @@ #include "framework/common/util.h" #include "graph/debug/ge_attr_define.h" #include "common/ge/ge_util.h" -#include "graph/manager/graph_var_manager.h" #include "graph/passes/pass_utils.h" #include "graph/ge_context.h" diff --git a/ge/init/gelib.h b/ge/init/gelib.h index 5e66be51..226dd4c8 100644 --- a/ge/init/gelib.h +++ b/ge/init/gelib.h @@ -28,7 +28,6 @@ #include "graph/debug/ge_attr_define.h" #include "graph/utils/graph_utils.h" #include "graph/utils/anchor_utils.h" -#include "graph/manager/graph_var_manager.h" #include "framework/common/ge_inner_error_codes.h" #include "framework/common/ge_types.h" @@ -63,13 +62,7 @@ class GE_FUNC_VISIBILITY GELib { bool InitFlag() const { return init_flag_; } // get TrainMode flag - bool isTrainMode() { return is_train_mode_; } - - // get incre build flag - bool IsIncreBuild() const { return is_incre_build_; } - - // get incre build cache path - const std::string &GetIncreBuildCachePath() const { return incre_build_cache_path_; } + bool IsTrainMode() { return is_train_mode_; } void InitProfiling(Options &options); void ShutDownProfiling(); @@ -100,8 +93,6 @@ class GE_FUNC_VISIBILITY GELib { bool is_system_inited = false; bool is_shutdown = false; bool is_use_hcom = false; - bool is_incre_build_ = false; - std::string incre_build_cache_path_; }; } // namespace ge diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index 49c9161d..a0790cf2 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -140,7 +140,6 @@ set(COMMON_SRC_FILES "${GE_CODE_DIR}/ge/graph/optimize/graph_optimize.cc" "${GE_CODE_DIR}/ge/graph/build/graph_builder.cc" "${GE_CODE_DIR}/ge/graph/partition/graph_partition.cc" - "${GE_CODE_DIR}/ge/common/helper/model_cache_helper.cc" "${GE_CODE_DIR}/ge/ir_build/ge_ir_build.cc" "${GE_CODE_DIR}/ge/ir_build/attr_options/utils.cc" "${GE_CODE_DIR}/ge/ir_build/attr_options/keep_dtype_option.cc" @@ -248,7 +247,6 @@ set(GRAPH_DAVINCI_MODEL_SRC_FILES "${GE_CODE_DIR}/ge/graph/load/model_manager/task_info/super_kernel/super_kernel.cc" "${GE_CODE_DIR}/ge/graph/load/model_manager/task_info/super_kernel/super_kernel_factory.cc" "${GE_CODE_DIR}/ge/hybrid/node_executor/aicpu/aicpu_ext_info.cc" - "${GE_CODE_DIR}/ge/graph/manager/model_manager/event_manager.cc" ) set(GRAPH_EXECUTE_COMMON_SRC_FILES @@ -520,13 +518,9 @@ set(COMMON_TEST_FILES set(DISTINCT_GRAPH_LOAD_TEST_FILES "graph/load/data_dumper_unittest.cc" - #"graph/load/new_model_manager_data_inputer_unittest.cc" - #"graph/load/new_model_manager_davinci_model_unittest.cc" "graph/load/model_manager_unittest.cc" "graph/load/new_model_manager_model_manager_aicpu_unittest.cc" "graph/load/end_graph_task_unittest.cc" - "graph/load/new_model_manager_event_manager_unittest.cc" - #"graph/load/output_net_output_unittest.cc" "graph/load/davinci_model_unittest.cc" "graph/load/tbe_handle_store_unittest.cc" "graph/load/hccl_task_info_unittest.cc" @@ -536,7 +530,6 @@ set(DISTINCT_GRAPH_LOAD_TEST_FILES "graph/load/memcpy_addr_async_task_info_unittest.cc" "graph/load/memcpy_async_task_info_unittest.cc" "graph/load/cpu_queue_schedule_unittest.cc" - #"graph/graph_load_unittest.cc" "graph/ge_executor_unittest.cc" "graph/load/model_helper_unittest.cc" "graph/load/model_utils_unittest.cc" diff --git a/tests/ut/ge/graph/execute/model_executor_unittest.cc b/tests/ut/ge/graph/execute/model_executor_unittest.cc index d4e0e3a4..cd907e99 100644 --- a/tests/ut/ge/graph/execute/model_executor_unittest.cc +++ b/tests/ut/ge/graph/execute/model_executor_unittest.cc @@ -20,6 +20,7 @@ #define private public #include "graph/execute/model_executor.h" #include "graph/manager/graph_manager.h" +#include "graph/manager/graph_var_manager.h" #include "graph/load/model_manager/model_manager.h" #include "graph/load/model_manager/davinci_model.h" diff --git a/tests/ut/ge/graph/graph_load_unittest.cc b/tests/ut/ge/graph/graph_load_unittest.cc deleted file mode 100644 index 93282a5e..00000000 --- a/tests/ut/ge/graph/graph_load_unittest.cc +++ /dev/null @@ -1,93 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include -#include -#include - -#include "common/debug/log.h" -#include "common/helper/model_helper.h" -#include "common/op/ge_op_utils.h" -#include "common/types.h" -#include "graph/op_desc.h" -#include "graph/types.h" -#include "graph/utils/attr_utils.h" -#include "graph/utils/op_desc_utils.h" - -#define protected public -#define private public -#include "graph/load/graph_loader.h" - -#include "framework/common/ge_inner_error_codes.h" -#include "graph/load/model_manager/model_manager.h" -#include "graph/manager/graph_manager_utils.h" -#include "common/model/ge_model.h" -#undef private -#undef protected - -using namespace testing; -namespace ge { - -class UtestGraphGraphLoad : public testing::Test { - protected: - void SetUp() {} - - void TearDown() {} -}; - -TEST_F(UtestGraphGraphLoad, load_graph_param_invalid1) { - std::shared_ptr graph_run_listener = nullptr; - SubGraphInfo sub_graph1; - ge::SubGraphInfoPtr sub_graph_ptr1 = std::make_shared(sub_graph1); - ModelIdInfo model_Id_info; - model_Id_info.model_id = 1; - - GeModelPtr ge_model_ptr = std::make_shared(); - sub_graph_ptr1->SetGeModelPtr(ge_model_ptr); - - std::vector input_flag; - input_flag.push_back(false); - sub_graph_ptr1->SetInputFlag(input_flag); - - ge::GraphLoader graph_load; - EXPECT_EQ(GE_GRAPH_PARAM_NULLPTR, graph_load.LoadGraph(sub_graph_ptr1->ge_model_ptr_, graph_run_listener, model_Id_info)); - sub_graph_ptr1->SetModelIdInfo(model_Id_info); -} - -TEST_F(UtestGraphGraphLoad, load_graph_param_invalid2) { - std::mutex sync_run_mutex; - std::condition_variable condition; - std::shared_ptr listener = std::make_shared(sync_run_mutex, condition); - - SubGraphInfo sub_graph1; - ge::SubGraphInfoPtr sub_graph_ptr1 = std::make_shared(sub_graph1); - ModelIdInfo model_Id_info; - model_Id_info.model_id = 1; - - GeModelPtr ge_model_ptr = std::make_shared(); - sub_graph_ptr1->SetGeModelPtr(ge_model_ptr); - - std::vector input_flag; - input_flag.push_back(false); - sub_graph_ptr1->SetInputFlag(input_flag); - - ge::GraphLoader graph_load; - EXPECT_EQ(GE_GRAPH_PARAM_NULLPTR, graph_load.LoadGraph(sub_graph_ptr1->ge_model_ptr_, listener, model_Id_info)); - sub_graph_ptr1->SetModelIdInfo(model_Id_info); -} -} // namespace ge diff --git a/tests/ut/ge/graph/load/new_model_manager_data_inputer_unittest.cc b/tests/ut/ge/graph/load/new_model_manager_data_inputer_unittest.cc deleted file mode 100644 index 43c2ad15..00000000 --- a/tests/ut/ge/graph/load/new_model_manager_data_inputer_unittest.cc +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -#include - -#include "graph/load/model_manager/data_inputer.h" - -#include "common/debug/log.h" -#include "common/debug/memory_dumper.h" -#include "common/types.h" -#include "new_op_test_utils.h" - -using namespace std; -using namespace testing; - -namespace ge { - -class UtestModelManagerDataInputer : public testing::Test { - protected: - void SetUp() {} - - void TearDown() {} -}; - -/// InputDataWrapper -/// constructor -/// GetInput -TEST_F(UtestModelManagerDataInputer, inputdatawrapper_construct) { - InputDataWrapper *input_data_wrapper = new InputDataWrapper(); - - input_data_wrapper->GetInput(); - - delete input_data_wrapper; -} - -/// InputDataWrapper -/// Init func with correct input -TEST_F(UtestModelManagerDataInputer, success_inputdatawrapper_init) { - InputDataWrapper *input_data_wrapper = new InputDataWrapper(); - ge::InputData input_data; - ge::OutputData output_data; - Status ret = input_data_wrapper->Init(input_data, output_data); - - EXPECT_EQ(ret, SUCCESS); - - delete input_data_wrapper; - input_data_wrapper = NULL; -} - -} // namespace ge diff --git a/tests/ut/ge/graph/load/new_model_manager_davinci_model_unittest.cc b/tests/ut/ge/graph/load/new_model_manager_davinci_model_unittest.cc deleted file mode 100644 index 38a250ad..00000000 --- a/tests/ut/ge/graph/load/new_model_manager_davinci_model_unittest.cc +++ /dev/null @@ -1,1433 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include "common/debug/log.h" -#include "common/debug/memory_dumper.h" -#include "common/types.h" - -#define private public -#define protected public -#include "graph/compute_graph.h" -#include "graph/utils/graph_utils.h" -#include "graph/model_serialize.h" -#include "graph/load/model_manager/davinci_model.h" -#include "common/properties_manager.h" -#include "common/op/ge_op_utils.h" -#include -#include "runtime/dev.h" -#include "runtime/kernel.h" -#include "cce/fwk_adpt_struct.h" -#include "graph/load/model_manager/task_info/task_info_factory.h" -#include "graph/load/model_manager/task_info/task_info.h" -#include "graph/load/model_manager/task_info/stream_active_task_info.h" -#include "graph/load/model_manager/task_info/stream_switch_task_info.h" -#include "graph/load/model_manager/task_info/profiler_trace_task_info.h" -#include "graph/load/model_manager/task_info/memcpy_async_task_info.h" -#include "graph/load/model_manager/task_info/label_set_task_info.h" -#include "graph/load/model_manager/task_info/kernel_ex_task_info.h" -#include "graph/load/model_manager/task_info/kernel_task_info.h" -#include "graph/load/model_manager/task_info/hccl_task_info.h" -#include "graph/load/model_manager/task_info/fusion_start_task_info.h" -#include "graph/load/model_manager/task_info/fusion_stop_task_info.h" -#include "graph/load/model_manager/task_info/event_record_task_info.h" -#include "graph/load/model_manager/task_info/event_wait_task_info.h" -#include "graph/manager/graph_var_manager.h" -#include "graph/load/model_manager/model_manager.h" -#undef private -#undef protected - -#include "new_op_test_utils.h" -#include "graph/debug/ge_attr_define.h" -using namespace std; -using namespace testing; -using domi::EventExDef; -using domi::KernelContext; -using domi::KernelDef; -using domi::LogTimeStampDef; -using domi::ModelTaskDef; -using domi::StreamActiveDef; -using domi::TaskDef; - -namespace ge { -class UtestModelManagerDavinciModel : public testing::Test { - protected: - void SetUp() {} - - void TearDown() {} -}; - -class DModelListener : public ge::ModelListener { - public: - DModelListener(){}; - uint32_t OnComputeDone(uint32_t model_id, uint32_t data_index, uint32_t resultCode) { - GELOGI("In Call back. OnComputeDone"); - return 0; - } -}; - -shared_ptr g_label_call_back(new DModelListener()); - -static ge::OpDescPtr CreateOpDesc(string name = "", string type = "") { - auto op_desc = std::make_shared(name, type); - op_desc->SetStreamId(0); - op_desc->SetId(0); - - ge::AttrUtils::SetFloat(op_desc, ge::ATTR_NAME_ALPHA, 0); - ge::AttrUtils::SetFloat(op_desc, ge::ATTR_NAME_BETA, 0); - - op_desc->SetWorkspace({}); - ; - op_desc->SetWorkspaceBytes({}); - op_desc->SetInputOffset({}); - op_desc->SetOutputOffset({}); - - ge::AttrUtils::SetListStr(op_desc, ge::ATTR_NAME_WEIGHT_NAME, {}); - ge::AttrUtils::SetInt(op_desc, ge::POOLING_ATTR_MODE, 0); - ge::AttrUtils::SetInt(op_desc, ge::POOLING_ATTR_PAD_MODE, 0); - ge::AttrUtils::SetInt(op_desc, ge::POOLING_ATTR_DATA_MODE, 0); - ge::AttrUtils::SetInt(op_desc, ge::POOLING_ATTR_CEIL_MODE, 0); - ge::AttrUtils::SetInt(op_desc, ge::POOLING_ATTR_NAN_OPT, 0); - ge::AttrUtils::SetListInt(op_desc, ge::POOLING_ATTR_WINDOW, {}); - ge::AttrUtils::SetListInt(op_desc, ge::POOLING_ATTR_PAD, {}); - ge::AttrUtils::SetListInt(op_desc, ge::POOLING_ATTR_STRIDE, {}); - ge::AttrUtils::SetListInt(op_desc, ge::ATTR_NAME_ACTIVE_STREAM_LIST, {1, 1}); - ge::AttrUtils::SetInt(op_desc, ge::ATTR_NAME_STREAM_SWITCH_COND, 0); - ge::AttrUtils::SetInt(op_desc, ge::ATTR_NAME_FRAMEWORK_FWK_TYPE, FMK_TYPE_T); - return op_desc; -} - -// tset failed_rt_free_host -TEST_F(UtestModelManagerDavinciModel, failed_rt_free_host) { - DavinciModel model(0, g_label_call_back); - - OutputData output_data; - - auto op_desc = CreateOpDesc("Pooling", "Pooling"); - op_desc->SetOutputOffset({1}); - op_desc->SetInputOffset({1}); - - { - ge::GeTensorDesc in_desc(ge::GeShape({1, 1, 1, 1})); - ge::TensorUtils::SetSize(in_desc, 16); - ge::TensorUtils::SetOutputTensor(in_desc, false); - ge::TensorUtils::SetInputTensor(in_desc, true); - op_desc->AddInputDesc(in_desc); - } - - { - ge::GeTensorDesc out_desc(ge::GeShape({1, 1, 1, 1})); - ge::TensorUtils::SetSize(out_desc, 16); - ge::TensorUtils::SetOutputTensor(out_desc, true); - ge::TensorUtils::SetInputTensor(out_desc, false); - op_desc->AddOutputDesc(out_desc); - } - ge::AttrUtils::SetInt(op_desc, ge::POOLING_ATTR_PAD_MODE, cce::CC_PADDING_DIRECTASSIGN); - ge::AttrUtils::SetListInt(op_desc, ge::POOLING_ATTR_PAD, vector({1, 1, 1, 1})); - ge::AttrUtils::SetListInt(op_desc, ge::POOLING_ATTR_WINDOW, vector({1, 1})); - ge::AttrUtils::SetListInt(op_desc, ge::POOLING_ATTR_STRIDE, vector({1, 1})); - - auto compute_graph = make_shared("g"); - auto node = compute_graph->AddNode(op_desc); - - OmeTestOpUtils::InitModel(model); - - model.data_op_list_.push_back(op_desc); - - EXPECT_EQ(ge::INTERNAL_ERROR, model.ReturnResult(1, false, false, &output_data)); -} - -// test modeldef_fail -TEST_F(UtestModelManagerDavinciModel, contruct_modeldef_createfail) { - DavinciModel model(0, g_label_call_back); - - OmeTestOpUtils::InitModel(model); - - auto op_desc = CreateOpDesc("Pooling", "Pooling"); - op_desc->SetOutputOffset({1}); - op_desc->SetInputOffset({1}); - - { - ge::GeTensorDesc in_desc(ge::GeShape({1, 1, 1, 1})); - ge::TensorUtils::SetSize(in_desc, 16); - ge::TensorUtils::SetOutputTensor(in_desc, false); - ge::TensorUtils::SetInputTensor(in_desc, true); - op_desc->AddInputDesc(in_desc); - } - - { - ge::GeTensorDesc out_desc(ge::GeShape({1, 1, 1, 1})); - ge::TensorUtils::SetSize(out_desc, 16); - ge::TensorUtils::SetOutputTensor(out_desc, true); - ge::TensorUtils::SetInputTensor(out_desc, false); - op_desc->AddOutputDesc(out_desc); - } - ge::AttrUtils::SetInt(op_desc, ge::POOLING_ATTR_PAD_MODE, cce::CC_PADDING_DIRECTASSIGN); - ge::AttrUtils::SetListInt(op_desc, ge::POOLING_ATTR_PAD, vector({1, 1, 1, 1})); - ge::AttrUtils::SetListInt(op_desc, ge::POOLING_ATTR_WINDOW, vector({1, 1})); - ge::AttrUtils::SetListInt(op_desc, ge::POOLING_ATTR_STRIDE, vector({1, 1})); - - model.GetEventList(); -} - -// test CopyInputDataToModel -TEST_F(UtestModelManagerDavinciModel, copy_input_data_to_model_fail) { - DavinciModel model(0, g_label_call_back); - - ge::InputData input_data; - ge::DataBuffer data_buffer; - data_buffer.data = new char[16]; - data_buffer.length = 16; - input_data.index = 0; - input_data.model_id = 1; - input_data.blobs.push_back(data_buffer); - - model.op_list_.clear(); - - delete[](char *) data_buffer.data; -} - -// test StreamNum -TEST_F(UtestModelManagerDavinciModel, streamnum_success) { - DavinciModel *model = new DavinciModel(0, g_label_call_back); - - OmeTestOpUtils::InitModel(*model); - - EXPECT_EQ(0, model->StreamNum()); - EXPECT_EQ(ge::INTERNAL_ERROR, model->ModelRunStart()); - - EXPECT_EQ(ge::SUCCESS, model->ModelRunStop()); - - delete model; -} - -// test EventNum -TEST_F(UtestModelManagerDavinciModel, eventnum_success) { - DavinciModel *model = new DavinciModel(0, g_label_call_back); - - OmeTestOpUtils::InitModel(*model); - - EXPECT_EQ(0, model->EventNum()); - EXPECT_EQ(ge::INTERNAL_ERROR, model->ModelRunStart()); - - EXPECT_EQ(ge::SUCCESS, model->ModelRunStop()); - - delete model; -} - -TEST_F(UtestModelManagerDavinciModel, handlelist_success) { - DavinciModel *model = new DavinciModel(0, g_label_call_back); - - OmeTestOpUtils::InitModel(*model); - - EXPECT_EQ(ge::INTERNAL_ERROR, model->ModelRunStart()); - - EXPECT_EQ(ge::SUCCESS, model->ModelRunStop()); - - delete model; -} - -// test GetEventList -TEST_F(UtestModelManagerDavinciModel, eventlist_success) { - DavinciModel *model = new DavinciModel(0, g_label_call_back); - - OmeTestOpUtils::InitModel(*model); - - EXPECT_EQ(true, model->GetEventList().empty()); - EXPECT_EQ(ge::INTERNAL_ERROR, model->ModelRunStart()); - - EXPECT_EQ(ge::SUCCESS, model->ModelRunStop()); - - delete model; -} - -// test Shrink -TEST_F(UtestModelManagerDavinciModel, shrink_success) { - DavinciModel model(0, g_label_call_back); - OpDescPtr op_desc_ptr = make_shared("Cast", "Cast"); - void *addr = nullptr; - rtMalloc(&addr, 128, RT_MEMORY_HBM); - model.saved_task_addrs_.emplace(op_desc_ptr, addr); - model.Shrink(); - EXPECT_EQ(model.saved_task_addrs_.isEmpty(), true); -} - -// test rtMalloc -TEST_F(UtestModelManagerDavinciModel, failed_reset_device) { - DavinciModel model(0, g_label_call_back); - ge::OutputData output_data; - ge::DataBuffer buf_data; - rtMalloc(&buf_data.data, 128, RT_MEMORY_HBM); - buf_data.length = 128; - output_data.blobs.push_back(buf_data); - EXPECT_EQ(ge::INTERNAL_ERROR, model.ReturnResult(1, true, false, &output_data)); - rtFree(buf_data.data); -} - -// test priority -TEST_F(UtestModelManagerDavinciModel, init_not_support_priority) { - int32_t priority = 8; - DavinciModel model(priority, g_label_call_back); -} - -// test GetInputOutputDescInfo -TEST_F(UtestModelManagerDavinciModel, success_GetInputOutputDescInfo_without_netoutput) { - DavinciModel model(0, g_label_call_back); - - auto op_desc = CreateOpDesc("Data", "Data"); - op_desc->SetOutputOffset({1}); - op_desc->SetInputOffset({1}); - op_desc->SetStreamId(0); - - { - ge::GeTensorDesc in_desc(ge::GeShape({1, 1, 10, 10}), ge::FORMAT_NCHW, ge::DT_FLOAT16); - ge::TensorUtils::SetOutputTensor(in_desc, false); - ge::TensorUtils::SetInputTensor(in_desc, true); - op_desc->AddInputDesc(in_desc); - } - - { - ge::GeTensorDesc out_desc(ge::GeShape({1, 1, 10, 10}), ge::FORMAT_NCHW, ge::DT_FLOAT16); - ge::TensorUtils::SetOutputTensor(out_desc, true); - ge::TensorUtils::SetInputTensor(out_desc, false); - op_desc->AddOutputDesc(out_desc); - } - - op_desc->SetSrcName({"Pooling1", "Pooling0"}); - op_desc->SetSrcIndex({0, 1}); - - auto compute_graph = make_shared("g"); - auto node = compute_graph->AddNode(op_desc); - - model.data_op_list_.push_back(op_desc); - model.output_size_list_.push_back(32); - - model.op_list_[0] = op_desc; - - model.output_op_list_.push_back(op_desc); - - vector input_shapes; - vector output_shapes; - EXPECT_EQ(ge::SUCCESS, model.GetInputOutputDescInfo(input_shapes, output_shapes)); -} - -TEST_F(UtestModelManagerDavinciModel, CopyTensorFromSrcVarNode_input_is_nullptr) { - NodePtr src_node = nullptr; - NodePtr dst_node = nullptr; - DavinciModel model(0, g_label_call_back); - Status ret = model.CopyTensorFromSrcVarNode(src_node, dst_node); - EXPECT_EQ(FAILED, ret); -} - -TEST_F(UtestModelManagerDavinciModel, CopyTensorFromSrcVarNode_success) { - ge::ComputeGraphPtr graph = std::make_shared("default"); - OpDescPtr op_desc_ptr = make_shared("Cast", "Cast"); - GeTensorDesc dims_tensor_desc(GeShape({1, 1, 1, 1}), FORMAT_NCHW, DT_FLOAT16); - GeTensorDesc dims_tensor_desc_in(GeShape({1, 1, 1, 1}), FORMAT_NCHW, DT_FLOAT); - op_desc_ptr->AddInputDesc(dims_tensor_desc_in); - op_desc_ptr->AddOutputDesc(dims_tensor_desc); - - NodePtr src_node = graph->AddNode(op_desc_ptr); - NodePtr dst_node = graph->AddNode(op_desc_ptr); - DavinciModel model(0, g_label_call_back); - Status ret = model.CopyTensorFromSrcVarNode(src_node, dst_node); -} - -TEST_F(UtestModelManagerDavinciModel, CopyVarData_graph_is_nullptr) { - ge::ComputeGraphPtr graph = nullptr; - DavinciModel model(0, g_label_call_back); - Status ret = model.CopyVarData(graph); - EXPECT_EQ(FAILED, ret); -} - -TEST_F(UtestModelManagerDavinciModel, copy_var_data_success) { - ge::ComputeGraphPtr graph = std::make_shared("default"); - OpDescPtr op_desc_ptr = make_shared("Variable", "Variable"); - GeTensorDesc dims_tensor_desc(GeShape({1, 1, 1, 1}), FORMAT_NCHW, DT_FLOAT16); - GeTensorDesc dims_tensor_desc_in(GeShape({1, 1, 1, 1}), FORMAT_NCHW, DT_FLOAT16); - op_desc_ptr->AddInputDesc(dims_tensor_desc_in); - op_desc_ptr->AddOutputDesc(dims_tensor_desc); - - NodePtr src_node = graph->AddNode(op_desc_ptr); - (void)ge::AttrUtils::SetStr(src_node->GetOpDesc(), "_copy_from_var_node", "abc"); - (void)ge::AttrUtils::SetBool(src_node->GetOpDesc(), "_copy_value", false); - - DavinciModel model(0, g_label_call_back); - Status ret = model.CopyVarData(graph); -} - -TEST_F(UtestModelManagerDavinciModel, get_input_output_desc_info_without_data_op_list) { - DavinciModel model(0, g_label_call_back); - vector input_list; - vector output_list; - Status ret = model.GetInputOutputDescInfo(input_list, output_list); - EXPECT_EQ(SUCCESS, ret); -} - -// test GetInputOutputDescInfo -TEST_F(UtestModelManagerDavinciModel, success_get_input_output_descInfo_with_net_output) { - DavinciModel model(0, g_label_call_back); - - auto op_desc = CreateOpDesc("Data", "Data"); - op_desc->SetOutputOffset({1}); - op_desc->SetInputOffset({1}); - op_desc->SetStreamId(0); - - { - ge::GeTensorDesc in_desc(ge::GeShape({1, 1, 10, 10}), ge::FORMAT_NCHW, ge::DT_FLOAT16); - ge::TensorUtils::SetOutputTensor(in_desc, false); - ge::TensorUtils::SetInputTensor(in_desc, true); - op_desc->AddInputDesc(in_desc); - } - - { - ge::GeTensorDesc out_desc(ge::GeShape({1, 1, 10, 10}), ge::FORMAT_NCHW, ge::DT_FLOAT16); - ge::TensorUtils::SetOutputTensor(out_desc, true); - ge::TensorUtils::SetOutputTensor(out_desc, true); - ge::TensorUtils::SetInputTensor(out_desc, false); - op_desc->AddOutputDesc(out_desc); - } - op_desc->SetSrcName({"Pooling1", "Pooling0"}); - op_desc->SetSrcIndex({0, 1}); - - auto compute_graph = make_shared("g"); - auto data_node = compute_graph->AddNode(op_desc); - - model.data_op_list_.push_back(op_desc); - - op_desc->SetType("NetOutput"); - - auto no_node = compute_graph->AddNode(op_desc); - - model.op_list_[0] = op_desc; - - model.output_op_list_.push_back(op_desc); - model.output_size_list_.push_back(32); - - vector input_shapes; - vector output_shapes; - EXPECT_EQ(ge::SUCCESS, model.GetInputOutputDescInfo(input_shapes, output_shapes)); -} - -TEST_F(UtestModelManagerDavinciModel, success_get_input_output_desc_info_for_zero_copy_with_net_output) { - DavinciModel model(0, g_label_call_back); - - auto op_desc = CreateOpDesc("Data", "Data"); - op_desc->SetOutputOffset({1}); - op_desc->SetInputOffset({1}); - op_desc->SetStreamId(0); - - { - ge::GeTensorDesc in_desc(ge::GeShape({1, 1, 10, 10}), ge::FORMAT_NCHW, ge::DT_FLOAT16); - ge::TensorUtils::SetOutputTensor(in_desc, false); - ge::TensorUtils::SetInputTensor(in_desc, true); - op_desc->AddInputDesc(in_desc); - } - - { - ge::GeTensorDesc out_desc(ge::GeShape({1, 1, 10, 10}), ge::FORMAT_NCHW, ge::DT_FLOAT16); - ge::TensorUtils::SetOutputTensor(out_desc, true); - ge::TensorUtils::SetOutputTensor(out_desc, true); - ge::TensorUtils::SetInputTensor(out_desc, false); - op_desc->AddOutputDesc(out_desc); - } - - op_desc->SetSrcName({"Pooling1", "Pooling0"}); - op_desc->SetSrcIndex({0, 1}); - - auto compute_graph = make_shared("g"); - auto data_node = compute_graph->AddNode(op_desc); - - model.data_op_list_.push_back(op_desc); - - op_desc->SetType("NetOutput"); - - auto net_out_node = compute_graph->AddNode(op_desc); - model.op_list_[0] = op_desc; - - model.output_op_list_.push_back(op_desc); - model.output_size_list_.push_back(32); - model.output_memory_size_list_.push_back(64); - - vector input_shapes; - vector output_shapes; - EXPECT_EQ(ge::SUCCESS, model.GetInputOutputDescInfoForZeroCopy(input_shapes, output_shapes)); -} - -TEST_F(UtestModelManagerDavinciModel, success_get_input_output_desc_info_dim_size_not4) { - DavinciModel model(0, g_label_call_back); - - auto op_desc = CreateOpDesc("Data", "Data"); - op_desc->SetOutputOffset({1}); - op_desc->SetInputOffset({1}); - op_desc->SetStreamId(0); - - { - ge::GeTensorDesc in_desc(ge::GeShape({1, 1, 10}), ge::FORMAT_NCHW, ge::DT_FLOAT16); - ge::TensorUtils::SetOutputTensor(in_desc, false); - ge::TensorUtils::SetInputTensor(in_desc, true); - op_desc->AddInputDesc(in_desc); - } - - { - ge::GeTensorDesc out_desc(ge::GeShape({1, 1, 10}), ge::FORMAT_NCHW, ge::DT_FLOAT16); - ge::TensorUtils::SetOutputTensor(out_desc, true); - ge::TensorUtils::SetOutputTensor(out_desc, true); - ge::TensorUtils::SetInputTensor(out_desc, false); - op_desc->AddOutputDesc(out_desc); - } - - op_desc->SetSrcName({"Pooling1", "Pooling0"}); - op_desc->SetSrcIndex({0, 1}); - - auto compute_graph = make_shared("g"); - auto data_node = compute_graph->AddNode(op_desc); - - model.data_op_list_.push_back(op_desc); - - op_desc->SetType("NetOutput"); - - auto net_out_node = compute_graph->AddNode(op_desc); - model.op_list_[0] = op_desc; - - model.output_op_list_.push_back(op_desc); - model.output_size_list_.push_back(32); - - vector input_shapes; - vector output_shapes; - EXPECT_EQ(ge::SUCCESS, model.GetInputOutputDescInfo(input_shapes, output_shapes)); -} - -// test GetLabelList -TEST_F(UtestModelManagerDavinciModel, get_label_list_success) { - DavinciModel model(0, g_label_call_back); - OmeTestOpUtils::InitModel(model); - vector label_list; - model.label_list_ = label_list; - EXPECT_EQ(label_list, model.GetLabelList()); -} - -// test GetInputListSize -TEST_F(UtestModelManagerDavinciModel, get_label_list_size_success) { - DavinciModel model(0, g_label_call_back); - OmeTestOpUtils::InitModel(model); - vector data_op_list; - data_op_list.push_back(std::make_shared()); - model.data_op_list_ = data_op_list; -} - -// test GetFlowctrlOpList -TEST_F(UtestModelManagerDavinciModel, get_flow_ctrl_op_list_success) { - DavinciModel model(0, g_label_call_back); - OmeTestOpUtils::InitModel(model); - std::map flowctrl_op_index_internal_map; - flowctrl_op_index_internal_map.insert(pair(1, 1)); - model.flowctrl_op_index_internal_map_ = flowctrl_op_index_internal_map; -} - -// test SetFlowctrlOpList -TEST_F(UtestModelManagerDavinciModel, get_flow_ctrl_index_success) { - DavinciModel model(0, g_label_call_back); - OmeTestOpUtils::InitModel(model); - EXPECT_EQ(0, model.GetFlowctrlIndex(0)); - EXPECT_EQ(1, model.GetFlowctrlIndex(0)); - EXPECT_EQ(0, model.GetFlowctrlIndex(1)); - EXPECT_EQ(1, model.GetFlowctrlIndex(1)); - EXPECT_EQ(2, model.GetFlowctrlIndex(0)); -} - -// test GetRegisterStub -TEST_F(UtestModelManagerDavinciModel, success_get_register_stub) { - DavinciModel model(0, g_label_call_back); - OmeTestOpUtils::InitModel(model); - std::string binfile = "tvmbin"; - string ret = model.GetRegisterStub(binfile); - EXPECT_EQ("tvmbin", ret); - model.tvm_bin_kernel_.insert("tvmbin"); - ret = model.GetRegisterStub(binfile); - EXPECT_EQ("tvmbin", ret); -} - -// test InitTbeHandle -TEST_F(UtestModelManagerDavinciModel, success_init_tbe_handle) { - DavinciModel model(0, g_label_call_back); - OmeTestOpUtils::InitModel(model); - std::shared_ptr op_desc = std::make_shared(); - Status ret = model.InitTbeHandle(op_desc); - EXPECT_EQ(ge::INTERNAL_ERROR, ret); -} - -// test InitTVMTask failed -TEST_F(UtestModelManagerDavinciModel, init_tvm_task_failed1) { - DavinciModel model(0, g_label_call_back); - uint16_t offset = 0; - TaskDef *task_def = new TaskDef(); - KernelDef *kernel_def = task_def->mutable_kernel(); - map op_list; - model.op_list_ = op_list; - - KernelTaskInfo *kernel_task_info = new KernelTaskInfo(); - Status ret = kernel_task_info->InitTVMTask(&model, offset, kernel_def[0]); - EXPECT_EQ(INTERNAL_ERROR, ret); - task_def->clear_kernel(); - delete kernel_task_info; - delete task_def; -} - -TEST_F(UtestModelManagerDavinciModel, kernel_taskInfo_init_cce_task_failed1) { - DavinciModel model(0, g_label_call_back); - - TaskDef *task_def = new TaskDef(); - KernelTaskInfo *kernel_task_info = new KernelTaskInfo(); - KernelDef *kernel_def = task_def->mutable_kernel(); - Status ret = kernel_task_info->InitCceTask(&model, kernel_def[0]); - EXPECT_EQ(ge::INTERNAL_ERROR, ret); - task_def->clear_kernel(); - delete kernel_task_info; - delete task_def; -} - -// test SetContext success -TEST_F(UtestModelManagerDavinciModel, success_kernel_taskInfo_init_set_context) { - DavinciModel model(0, g_label_call_back); - - TaskDef *task_def = new TaskDef(); - KernelTaskInfo *kernel_task_info = new KernelTaskInfo(); - KernelDef *kernel_def = task_def->mutable_kernel(); - KernelContext *context = kernel_def->mutable_context(); - context->set_op_id(1); - context->set_kernel_func_id(1); - context->set_is_flowtable(true); - context->set_args_count(1); - context->set_args_offset("args111111", 10); - - Status ret = kernel_task_info->SetContext(kernel_def[0]); - EXPECT_EQ(ge::SUCCESS, ret); - - ret = kernel_task_info->Release(); - EXPECT_EQ(ge::SUCCESS, ret); - kernel_def->clear_context(); - task_def->clear_kernel(); - delete kernel_task_info; - delete task_def; -} - -// test SetContext failed -TEST_F(UtestModelManagerDavinciModel, kernel_taskInfo_init_set_context_failed1) { - DavinciModel model(0, g_label_call_back); - - TaskDef *task_def = new TaskDef(); - KernelTaskInfo *kernel_task_info = new KernelTaskInfo(); - KernelDef *kernel_def = task_def->mutable_kernel(); - KernelContext *context = kernel_def->mutable_context(); - context->set_op_id(1); - context->set_kernel_func_id(1); - context->set_is_flowtable(true); - context->set_args_count(0); - Status ret = kernel_task_info->SetContext(kernel_def[0]); - EXPECT_EQ(ge::INTERNAL_ERROR, ret); - - kernel_def->clear_context(); - task_def->clear_kernel(); - delete kernel_task_info; - delete task_def; -} - -TEST_F(UtestModelManagerDavinciModel, kernel_taskInfo_init_set_context_failed2) { - DavinciModel model(0, g_label_call_back); - - TaskDef *task_def = new TaskDef(); - KernelTaskInfo *kernel_task_info = new KernelTaskInfo(); - KernelDef *kernel_def = task_def->mutable_kernel(); - KernelContext *context = kernel_def->mutable_context(); - context->set_op_id(1); - context->set_kernel_func_id(1); - context->set_is_flowtable(true); - context->set_args_count(5); - context->set_args_offset("\0\0"); // args_offset = 0 - - Status ret = kernel_task_info->SetContext(kernel_def[0]); - EXPECT_EQ(ge::PARAM_INVALID, ret); - - kernel_def->clear_context(); - task_def->clear_kernel(); - delete kernel_task_info; - delete task_def; -} - -// test success DistributeDumpTask -TEST_F(UtestModelManagerDavinciModel, success_distribute_dump_task) { - DavinciModel model(0, g_label_call_back); - TaskDef *task_def = new TaskDef(); - KernelTaskInfo *kernel_task_info = new KernelTaskInfo(); - KernelDef *kernel_def = task_def->mutable_kernel(); - - kernel_def->set_stub_func("kerneltaskinfo"); - kernel_def->set_block_dim(10); - kernel_def->set_args("args111111", 10); - kernel_def->set_args_size(10); - rtSmDesc_t l2CtrlInfo; - l2CtrlInfo.data[0].L2_mirror_addr = 1024; - kernel_def->set_sm_desc((void *)&l2CtrlInfo, sizeof(rtSmDesc_t)); - - // for SetStream - rtStream_t stream = nullptr; - rtStreamCreate(&stream, 0); - std::vector stream_list; - stream_list.push_back(stream); - Status ret = kernel_task_info->SetStream(0, stream_list); - EXPECT_EQ(SUCCESS, ret); - - ret = kernel_task_info->Release(); - EXPECT_EQ(SUCCESS, ret); - rtStreamDestroy(stream); - task_def->clear_kernel(); - delete kernel_task_info; - delete task_def; -} - -// test success GetTaskID -TEST_F(UtestModelManagerDavinciModel, success_get_task_id) { - ModelTaskDef *model_task_def = new ModelTaskDef(); - TaskDef *task = model_task_def->add_task(); - task->set_type(RT_MODEL_TASK_KERNEL); - TaskInfoPtr task_info = TaskInfoFactory::Instance().Create(static_cast(task->type())); - - KernelTaskInfo *kernel_task_info = new KernelTaskInfo(); - uint32_t ret = task_info->GetTaskID(); - EXPECT_EQ(0, ret); - ret = kernel_task_info->GetTaskID(); - EXPECT_EQ(0, ret); - HcclTaskInfo *hccl_task_info = new HcclTaskInfo(); - ret = hccl_task_info->GetTaskID(); - EXPECT_EQ(0, ret); - - delete hccl_task_info; - delete kernel_task_info; - delete model_task_def; -} - -// test StoreInputOutputTensor success -TEST_F(UtestModelManagerDavinciModel, success_store_input_output_tensor) { - DavinciModel model(0, g_label_call_back); - TaskDef *task_def = new TaskDef(); - KernelTaskInfo *kernel_task_info = new KernelTaskInfo(); - - std::vector input_data_addrs; - std::vector output_data_addrs; - std::vector<::tagCcAICPUTensor> input_descs; - std::vector<::tagCcAICPUTensor> output_descs; - - int test = 1; - int *addr = &test; - void *input; - void *output; - input = addr; - output = addr; - input_data_addrs.push_back(&input); - output_data_addrs.push_back(output); - - tagCcAICPUTensor input_desc; - tagCcAICPUTensor output_desc; - input_descs.push_back(input_desc); - output_descs.push_back(output_desc); - - Status ret = kernel_task_info->StoreInputOutputTensor(input_data_addrs, output_data_addrs, input_descs, output_descs); - EXPECT_EQ(SUCCESS, ret); - ret = kernel_task_info->Release(); - EXPECT_EQ(SUCCESS, ret); - delete kernel_task_info; - delete task_def; -} - -// test init EventRecordTaskInfo -TEST_F(UtestModelManagerDavinciModel, success_event_record_task_init) { - DavinciModel *model1 = nullptr; - TaskDef *task_def1 = new TaskDef(); - EventRecordTaskInfo *eventRecordTaskInfo1 = new EventRecordTaskInfo(); - Status ret1 = eventRecordTaskInfo1->Init(task_def1[0], model1); - EXPECT_EQ(PARAM_INVALID, ret1); - - delete eventRecordTaskInfo1; - delete task_def1; - delete model1; - DavinciModel model(0, g_label_call_back); - - ModelTaskDef *model_task_info = new ModelTaskDef(); - TaskDef *task = model_task_info->add_task(); - task->set_type(RT_MODEL_TASK_EVENT_RECORD); - TaskInfoPtr task_info = TaskInfoFactory::Instance().Create(static_cast(task->type())); - - task->stream_id_ = 0; - rtStream_t rt_stream; - rtStreamCreate(&rt_stream, 1); - vector stream_list; - stream_list.push_back(rt_stream); - model.stream_list_ = stream_list; - - task->set_event_id(1); - model.runtime_param_.event_num = 1; - Status ret = task_info->Init(task[0], &model); - EXPECT_EQ(ge::INTERNAL_ERROR, ret); - - model.runtime_param_.event_num = 2; - rtEvent_t event1; - rtEvent_t event2; - rtEventCreate(&event1); - rtEventCreate(&event2); - model.event_list_.push_back(event1); - model.event_list_.push_back(event2); - - EventExDef *event_ex_def = task->mutable_event_ex(); - event_ex_def->set_event_type(1); - - ret = task_info->Init(task[0], &model); - EXPECT_EQ(SUCCESS, ret); - - task->clear_event_ex(); - task_info->Release(); - delete model_task_info; -} - -// test init EventWaitTaskInfo -TEST_F(UtestModelManagerDavinciModel, success_event_wait_task_init) { - DavinciModel *model1 = nullptr; - TaskDef *task_def1 = new TaskDef(); - EventWaitTaskInfo *event_wait_task_info1 = new EventWaitTaskInfo(); - Status ret1 = event_wait_task_info1->Init(task_def1[0], model1); - EXPECT_EQ(PARAM_INVALID, ret1); - - delete event_wait_task_info1; - delete task_def1; - delete model1; - DavinciModel model(0, g_label_call_back); - - ModelTaskDef *model_task_info = new ModelTaskDef(); - TaskDef *task = model_task_info->add_task(); - task->set_type(RT_MODEL_TASK_EVENT_WAIT); - TaskInfoPtr task_info = TaskInfoFactory::Instance().Create(static_cast(task->type())); - - task->stream_id_ = 0; - rtStream_t rt_stream; - rtStreamCreate(&rt_stream, 1); - vector stream_list; - stream_list.push_back(rt_stream); - model.stream_list_ = stream_list; - - task->set_event_id(1); - model.runtime_param_.event_num = 1; - Status ret = task_info->Init(task[0], &model); - EXPECT_EQ(ge::INTERNAL_ERROR, ret); - - model.runtime_param_.event_num = 2; - rtEvent_t event1; - rtEvent_t event2; - rtEventCreate(&event1); - rtEventCreate(&event2); - model.event_list_.push_back(event1); - model.event_list_.push_back(event2); - - EventExDef *event_ex_def = task->mutable_event_ex(); - event_ex_def->set_event_type(1); - - ret = task_info->Init(task[0], &model); - EXPECT_EQ(SUCCESS, ret); - - task->clear_event_ex(); - task_info->Release(); - delete model_task_info; -} - -// test fusion_start_task Init -TEST_F(UtestModelManagerDavinciModel, success_fusion_start_task_init) { - DavinciModel *model1 = nullptr; - TaskDef *task_def1 = new TaskDef(); - FusionStartTaskInfo *fusion_start_task_info1 = new FusionStartTaskInfo(); - Status ret1 = fusion_start_task_info1->Init(task_def1[0], model1); - EXPECT_EQ(PARAM_INVALID, ret1); - - delete fusion_start_task_info1; - delete task_def1; - delete model1; - DavinciModel model(0, g_label_call_back); - TaskDef *task_def = new TaskDef(); - FusionStartTaskInfo *fusion_start_task_info = new FusionStartTaskInfo(); - task_def->set_stream_id(0); - rtStream_t stream; - rtStreamCreate(&stream, 0); - model.stream_list_.push_back(stream); - - Status ret = fusion_start_task_info->Init(task_def[0], &model); - EXPECT_EQ(SUCCESS, ret); - delete fusion_start_task_info; - delete task_def; -} - -// test fusion_end_task Init -TEST_F(UtestModelManagerDavinciModel, success_fusion_end_task_rinit) { - DavinciModel *model1 = nullptr; - TaskDef *task_def1 = new TaskDef(); - FusionStopTaskInfo *fusion_stop_task_info1 = new FusionStopTaskInfo(); - Status ret1 = fusion_stop_task_info1->Init(task_def1[0], model1); - EXPECT_EQ(PARAM_INVALID, ret1); - - delete fusion_stop_task_info1; - delete task_def1; - delete model1; - DavinciModel model(0, g_label_call_back); - TaskDef *task_def = new TaskDef(); - FusionStopTaskInfo *fusion_stop_task_info = new FusionStopTaskInfo(); - task_def->set_stream_id(0); - rtStream_t stream; - rtStreamCreate(&stream, 0); - model.stream_list_.push_back(stream); - - Status ret = fusion_stop_task_info->Init(task_def[0], &model); - EXPECT_EQ(SUCCESS, ret); - delete fusion_stop_task_info; - delete task_def; -} - -// test kernel_ex_task_Release -TEST_F(UtestModelManagerDavinciModel, success_kernel_ex_task_release) { - KernelExTaskInfo *kernel_ex_task_info = new KernelExTaskInfo(); - Status ret = kernel_ex_task_info->Release(); - EXPECT_EQ(SUCCESS, ret); - - delete kernel_ex_task_info; -} - -// test hccl_Distribute -TEST_F(UtestModelManagerDavinciModel, success_Distribute7) { - DavinciModel model(0, g_label_call_back); - - ModelTaskDef *model_task_def = new ModelTaskDef(); - TaskDef *task7 = model_task_def->add_task(); - task7->set_type(RT_MODEL_TASK_HCCL); - TaskInfoPtr task_info7 = TaskInfoFactory::Instance().Create(static_cast(task7->type())); - Status ret = task_info7->Init(task7[0], &model); - EXPECT_EQ(FAILED, ret); - - std::vector task_list; - task_list.push_back(task_info7); - model.task_list_ = task_list; - - task_info7->Release(); - delete model_task_def; -} - -// test hccl_GetPrivateDefByTaskDef -TEST_F(UtestModelManagerDavinciModel, success_hccl_get_private_def_by_task_def) { - DavinciModel model(0, g_label_call_back); - - ModelTaskDef *model_task_def = new ModelTaskDef(); - TaskDef *task7 = model_task_def->add_task(); - task7->set_type(RT_MODEL_TASK_HCCL); - // for SetStream - rtStream_t stream = nullptr; - rtStreamCreate(&stream, 0); - model.stream_list_.push_back(stream); - // for GetPrivateDefByTaskDef - task7->set_ops_kernel_store_ptr(10); - std::string value = "hccl_task"; - task7->set_private_def(value); - - TaskInfoPtr task_info7 = TaskInfoFactory::Instance().Create(static_cast(task7->type())); - // for Distribute - Status ret = task_info7->Init(task7[0], &model); - EXPECT_EQ(ge::PARAM_INVALID, ret); - - task_info7->Release(); - delete model_task_def; -} - -// test hccl_task_TransToGETaskInfo -TEST_F(UtestModelManagerDavinciModel, success_hccl_trans_to_ge_task_info) { - DavinciModel model(0, g_label_call_back); - - ModelTaskDef *model_task_def = new ModelTaskDef(); - TaskDef *task7 = model_task_def->add_task(); - // for type - task7->set_type(RT_MODEL_TASK_HCCL); - TaskInfoPtr task_info7 = TaskInfoFactory::Instance().Create(static_cast(task7->type())); - - GETaskInfo ge_task; - HcclTaskInfo *hccl_task_info = new HcclTaskInfo(); - hccl_task_info->TransToGETaskInfo(ge_task); - - delete hccl_task_info; - delete model_task_def; -} - -// test stream_active_task Init -TEST_F(UtestModelManagerDavinciModel, success_stream_active_task_init) { - DavinciModel *model1 = nullptr; - TaskDef *task_def1 = new TaskDef(); - StreamActiveTaskInfo *stream_active_task_info1 = new StreamActiveTaskInfo(); - Status ret1 = stream_active_task_info1->Init(task_def1[0], model1); - EXPECT_EQ(PARAM_INVALID, ret1); - delete stream_active_task_info1; - delete task_def1; - delete model1; - - DavinciModel model(0, g_label_call_back); - TaskDef *task_def = new TaskDef(); - task_def->set_stream_id(0); - rtStream_t stream1, stream2; - rtStreamCreate(&stream1, 0); - rtStreamCreate(&stream2, 0); - model.stream_list_.push_back(stream1); - - StreamActiveTaskInfo *stream_active_task_info = new StreamActiveTaskInfo(); - - StreamActiveDef *stream_active_def = task_def->mutable_stream_active(); - stream_active_def->set_op_index(0); - stream_active_def->set_active_stream_id(0); - - std::map flowctrl; - flowctrl.insert(pair(1, 1)); - model.flowctrl_op_index_internal_map_ = flowctrl; - - auto opDef = CreateOpDesc("", ""); - model.op_list_[0] = opDef; - - Status ret = stream_active_task_info->Init(task_def[0], &model); - EXPECT_EQ(ge::INTERNAL_ERROR, ret); // line 51 - - model.stream_list_.push_back(stream2); - ret = stream_active_task_info->Init(task_def[0], &model); - EXPECT_EQ(SUCCESS, ret); - - task_def->clear_stream_active(); - delete stream_active_task_info; - delete task_def; -} - -// test label_set_task Init -TEST_F(UtestModelManagerDavinciModel, success_label_set_task_init) { - DavinciModel *model1 = nullptr; - TaskDef *task_def1 = new TaskDef(); - LabelSetTaskInfo *label_set_task_info1 = new LabelSetTaskInfo(); - Status ret1 = label_set_task_info1->Init(task_def1[0], model1); - EXPECT_EQ(PARAM_INVALID, ret1); - delete label_set_task_info1; - delete task_def1; - delete model1; - - DavinciModel model(0, g_label_call_back); - TaskDef *task_def = new TaskDef(); - LabelSetTaskInfo *label_set_task_info = new LabelSetTaskInfo(); - task_def->set_stream_id(0); - rtStream_t stream; - rtStreamCreate(&stream, 0); - model.stream_list_.push_back(stream); - - task_def->set_label_id(1); - model.runtime_param_.batch_num = 0; - Status ret = label_set_task_info->Init(task_def[0], &model); - EXPECT_EQ(PARAM_INVALID, ret); - - task_def->clear_label_id(); - task_def->set_label_id(0); - model.runtime_param_.batch_num = 1; - rtLabel_t label; - rtLabelCreate(&label); - model.label_list_.push_back(label); - - ret = label_set_task_info->Init(task_def[0], &model); - EXPECT_EQ(SUCCESS, ret); - delete label_set_task_info; - delete task_def; -} - -// test label_goto_task init -TEST_F(UtestModelManagerDavinciModel, success_label_goto_task_init) { - DavinciModel model(0, g_label_call_back); - TaskDef *task_def = new TaskDef(); - LabelGotoTaskInfo *label_goto_task_info = new LabelGotoTaskInfo(); - task_def->set_stream_id(0); - - rtStream_t stream; - rtStreamCreate(&stream, 0); - model.stream_list_.push_back(stream); - - rtLabel_t label; - rtLabelCreate(&label); - model.label_list_.push_back(label); - - Status ret = label_goto_task_info->Init(task_def[0], &model); - EXPECT_EQ(SUCCESS, ret); - - delete label_goto_task_info; - delete task_def; -} - -// test profiler_trace_task init -TEST_F(UtestModelManagerDavinciModel, success_profiler_trace_task_init) { - DavinciModel *model1 = nullptr; - TaskDef *task_def1 = new TaskDef(); - ProfilerTraceTaskInfo *profiler_trace_task_info1 = new ProfilerTraceTaskInfo(); - Status ret1 = profiler_trace_task_info1->Init(task_def1[0], model1); - EXPECT_EQ(PARAM_INVALID, ret1); - - delete profiler_trace_task_info1; - delete task_def1; - delete model1; - DavinciModel model(0, g_label_call_back); - TaskDef *task_def = new TaskDef(); - task_def->set_stream_id(0); - rtStream_t stream; - rtStreamCreate(&stream, 0); - model.stream_list_.push_back(stream); - LogTimeStampDef *logTimeStampDef = task_def->mutable_log_timestamp(); - logTimeStampDef->set_logid(1); - logTimeStampDef->set_notify(1); - logTimeStampDef->set_flat(1); - ProfilerTraceTaskInfo *profiler_trace_task_info = new ProfilerTraceTaskInfo(); - Status ret = profiler_trace_task_info->Init(task_def[0], &model); - EXPECT_EQ(SUCCESS, ret); - - task_def->clear_log_timestamp(); - delete profiler_trace_task_info; - delete task_def; -} - -TEST_F(UtestModelManagerDavinciModel, profiling_model_success) { - rtStream_t stream = nullptr; - rtStreamCreate(&stream, 0); - - DavinciModel model(0, g_label_call_back); - model.model_id_ = 1; - model.name_ = "test"; - model.version_ = 0x01; - - model.stream_list_.push_back(stream); - - ge::ModelData data; - rtMallocHost(&data.model_data, 128); - data.model_len = 128; - - ModelDef *model_def = new ModelDef(); - auto op_def = CreateOpDesc("", "Data"); - op_def->SetInputOffset({1}); - op_def->SetOutputOffset({100}); - - ge::GeTensorDesc descin(ge::GeShape({1, 1, 1, 1}), ge::FORMAT_NCHW, ge::DT_FLOAT); - ge::TensorUtils::SetSize(descin, 4); - op_def->AddInputDesc(descin); - ge::GeTensorDesc desc_out(ge::GeShape({1, 1, 1, 1}), ge::FORMAT_NCHW, ge::DT_FLOAT16); - ge::TensorUtils::SetSize(desc_out, 32); - op_def->AddInputDesc(desc_out); - op_def->SetId(0); - - model.data_op_list_.push_back(op_def); - model.op_list_[0] = op_def; - - auto opdef1 = CreateOpDesc("", "Relu"); - opdef1->SetInputOffset({1}); - opdef1->SetOutputOffset({100}); - - ge::GeTensorDesc desc_in1(ge::GeShape({1, 1, 1, 1}), ge::FORMAT_NCHW, ge::DT_FLOAT); - ge::TensorUtils::SetSize(desc_in1, 4); - opdef1->AddInputDesc(desc_in1); - ge::GeTensorDesc desc_out1(ge::GeShape({1, 1, 1, 1}), ge::FORMAT_NCHW, ge::DT_FLOAT16); - ge::TensorUtils::SetSize(desc_out1, 32); - opdef1->AddInputDesc(desc_out1); - op_def->SetId(1); - - model.op_list_[1] = opdef1; - - auto opdef2 = CreateOpDesc("", "Relu"); - opdef2->SetInputOffset({1}); - opdef2->SetOutputOffset({100}); - - ge::GeTensorDesc desc_in2(ge::GeShape({1, 1, 1, 1}), ge::FORMAT_NCHW, ge::DT_FLOAT); - ge::TensorUtils::SetSize(desc_in2, 4); - opdef2->AddInputDesc(desc_in2); - ge::GeTensorDesc desc_out2(ge::GeShape({1, 1, 1, 1}), ge::FORMAT_NCHW, ge::DT_FLOAT16); - ge::TensorUtils::SetSize(desc_out2, 32); - opdef2->AddInputDesc(desc_out2); - op_def->SetId(2); - - model.op_list_[2] = opdef2; - - auto opdef3 = CreateOpDesc("", "Relu"); - opdef3->SetInputOffset({1}); - opdef3->SetOutputOffset({100}); - - ge::GeTensorDesc desc_in3(ge::GeShape({1, 1, 1, 1}), ge::FORMAT_NCHW, ge::DT_FLOAT); - ge::TensorUtils::SetSize(desc_in3, 4); - opdef3->AddInputDesc(desc_in3); - ge::GeTensorDesc desc_out3(ge::GeShape({1, 1, 1, 1}), ge::FORMAT_NCHW, ge::DT_FLOAT16); - ge::TensorUtils::SetSize(desc_out3, 32); - opdef3->AddInputDesc(desc_out3); - op_def->SetId(3); - - model.op_list_[3] = opdef3; - - auto opdef4 = CreateOpDesc("", "Relu"); - opdef4->SetInputOffset({1}); - opdef4->SetOutputOffset({100}); - - ge::GeTensorDesc desc_in4(ge::GeShape({1, 1, 1, 1}), ge::FORMAT_NCHW, ge::DT_FLOAT); - ge::TensorUtils::SetSize(desc_in4, 4); - opdef4->AddInputDesc(desc_in4); - ge::GeTensorDesc desc_out4(ge::GeShape({1, 1, 1, 1}), ge::FORMAT_NCHW, ge::DT_FLOAT16); - ge::TensorUtils::SetSize(desc_out4, 32); - opdef4->AddInputDesc(desc_out4); - op_def->SetId(4); - - model.op_list_[4] = opdef4; - - ge::InputData input_data; - ge::DataBuffer data_buffer; - data_buffer.data = new char[4]; - data_buffer.length = 4; - input_data.index = 0; - input_data.model_id = 1; - input_data.blobs.push_back(data_buffer); - - rtFreeHost(data.model_data); - delete[](char *) data_buffer.data; - delete model_def; -} - -TEST_F(UtestModelManagerDavinciModel, success_output_list_0) { - DavinciModel model(0, g_label_call_back); - - uint32_t version = 0; - uint64_t session_id = 0; - uint32_t device_id = 0; - uint64_t job_id = 0; - Status ret = VarManager::Instance(session_id)->Init(version, session_id, device_id, job_id); - EXPECT_EQ(ret, ge::SUCCESS); - - ret = model.ReturnNoOutput(1); - EXPECT_EQ(ret, ge::SUCCESS); - - VarManagerPool::Instance().Destroy(); -} - -// test dyncbatch_distributeTask_SUCCESS -TEST_F(UtestModelManagerDavinciModel, dyncbatch_distribute_task_success) { - DavinciModel model(0, g_label_call_back); - - rtStream_t stream = nullptr; - rtStreamCreate(&stream, 0); - - rtLabel_t label = nullptr; - rtLabelCreate(&label); - model.label_list_.push_back(label); - rtLabelCreate(&label); - model.label_list_.push_back(label); - rtLabelCreate(&label); - model.label_list_.push_back(label); - - rtLabelDestroy(label); - rtStreamDestroy(stream); -} - -// test GetOutputDescInfo -TEST_F(UtestModelManagerDavinciModel, success_get_output_desc_info_with_netoutput) { - setenv("GE_TRAIN", "1", true); - DavinciModel model(0, g_label_call_back); - - auto op_desc = CreateOpDesc("Data", "Data"); - op_desc->SetOutputOffset({1}); - op_desc->SetInputOffset({1}); - op_desc->SetStreamId(0); - - { - ge::GeTensorDesc in_desc(ge::GeShape({1, 1, 10, 10}), ge::FORMAT_FRACTAL_Z, ge::DT_FLOAT16); - ge::TensorUtils::SetOutputTensor(in_desc, false); - ge::TensorUtils::SetInputTensor(in_desc, true); - op_desc->AddInputDesc(in_desc); - } - - { - ge::GeTensorDesc out_desc(ge::GeShape({1, 1, 10, 10}), ge::FORMAT_NCHW, ge::DT_FLOAT); - ge::TensorUtils::SetOutputTensor(out_desc, true); - ge::TensorUtils::SetInputTensor(out_desc, false); - op_desc->AddOutputDesc(out_desc); - } - - op_desc->SetSrcName({"Pooling1", "Pooling0"}); - op_desc->SetSrcIndex({0, 1}); - - auto compute_graph = make_shared("g"); - - op_desc->SetType("NetOutput"); - - auto net_out_node = compute_graph->AddNode(op_desc); - model.op_list_[0] = op_desc; - - model.output_op_list_.push_back(op_desc); - model.output_size_list_.push_back(32); - model.output_memory_size_list_.push_back(64); - - vector output_shapes; - vector formats; - EXPECT_EQ(ge::SUCCESS, model.GetOutputDescInfo(output_shapes, formats)); - - setenv("GE_TRAIN", "0", true); -} - -TEST_F(UtestModelManagerDavinciModel, device_runtime_success_Run) { - rtStream_t stream = nullptr; - rtStreamCreate(&stream, 0); - - DavinciModel model(0, g_label_call_back); - - model.stream_list_.push_back(stream); - auto model_def = make_shared(); - - auto op_def = CreateOpDesc("", "Data"); - - auto compute_graph = make_shared("g"); - compute_graph->AddNode(op_def); - - model_def->SetGraph(ge::GraphUtils::CreateGraphFromComputeGraph(compute_graph)); - - model.data_op_list_.push_back(op_def); - - model.data_inputer_ = new DataInputer(); - - model.ModelRunStart(); - - OutputData output_data; - ge::InputData input_data; - - ge::DataBuffer data_buffer; - data_buffer.data = new char[16]; - data_buffer.length = 16; - - input_data.index = 0; - input_data.model_id = 1; - input_data.blobs.push_back(data_buffer); - - model.ModelRunStop(); - - delete[](char *) data_buffer.data; -} - -TEST_F(UtestModelManagerDavinciModel, run_failed) { - rtStream_t stream = nullptr; - rtStreamCreate(&stream, 0); - - DavinciModel model(0, g_label_call_back); - - model.stream_list_.push_back(stream); - auto model_def = make_shared(); - - auto op_def = CreateOpDesc("", "Data"); - - auto compute_graph = make_shared("g"); - compute_graph->AddNode(op_def); - - model_def->SetGraph(ge::GraphUtils::CreateGraphFromComputeGraph(compute_graph)); - - model.data_op_list_.push_back(op_def); - - model.data_inputer_ = new DataInputer(); - - model.ModelRunStart(); - - OutputData output_data; - ge::InputData input_data; - - ge::DataBuffer data_buffer; - data_buffer.data = new char[16]; - data_buffer.length = 16; - - input_data.index = 0; - input_data.model_id = 1; - input_data.blobs.push_back(data_buffer); - - model.ModelRunStop(); - delete[](char *) data_buffer.data; -} - -TEST_F(UtestModelManagerDavinciModel, run_failed01) { - rtStream_t stream = nullptr; - rtStreamCreate(&stream, 0); - - DavinciModel model(0, g_label_call_back); - - model.stream_list_.push_back(stream); - auto model_def = make_shared(); - - auto op_def = CreateOpDesc("", "Data"); - - auto compute_graph = make_shared("g"); - compute_graph->AddNode(op_def); - - model_def->SetGraph(ge::GraphUtils::CreateGraphFromComputeGraph(compute_graph)); - - model.data_op_list_.push_back(op_def); - - model.data_inputer_ = nullptr; - model.ModelRunStart(); - - model.ModelRunStop(); -} - -TEST_F(UtestModelManagerDavinciModel, init_tbe_handle_fe_registered) { - DavinciModel::tvm_bin_kernel_.clear(); - DavinciModel model(0, g_label_call_back); - OpDescPtr op_desc = CreateOpDesc("MatMul", "MatMul"); - - std::vector kernelBin; - TBEKernelPtr tbe_kernel = std::make_shared("name/MatMul", std::move(kernelBin)); - op_desc->SetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel); - - std::string kernel_name("kernel/MatMul"); - AttrUtils::SetStr(op_desc, op_desc->GetName() + "_kernelname", kernel_name); - - EXPECT_EQ(model.InitTbeHandle(op_desc), SUCCESS); - EXPECT_EQ(model.InitTbeHandle(op_desc), SUCCESS); - - EXPECT_EQ(model.used_tbe_handle_map_.size(), 0); - DavinciModel::tvm_bin_kernel_.clear(); -} - -TEST_F(UtestModelManagerDavinciModel, init_tbe_handle_ge_registered) { - DavinciModel::tvm_bin_kernel_.clear(); - DavinciModel model(0, g_label_call_back); - OpDescPtr op_desc = CreateOpDesc("MatMul", "MatMul"); - - std::vector kernelBin; - TBEKernelPtr tbe_kernel = std::make_shared("name/MatMul", std::move(kernelBin)); - op_desc->SetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel); - - std::string kernel_name("kernel/MatMul"); - AttrUtils::SetStr(op_desc, op_desc->GetName() + "_kernelname", kernel_name); - - string session_graph_id; - AttrUtils::GetStr(op_desc, ATTR_NAME_SESSION_GRAPH_ID, session_graph_id); - const char *bin_file_key = DavinciModel::GetRegisterStub(op_desc->GetName(), session_graph_id); - model.used_tbe_handle_map_[bin_file_key] = 1; // test first register. - - EXPECT_EQ(model.InitTbeHandle(op_desc), SUCCESS); - EXPECT_EQ(model.InitTbeHandle(op_desc), SUCCESS); - - EXPECT_EQ(model.used_tbe_handle_map_.size(), 1); - - auto it = model.used_tbe_handle_map_.find(bin_file_key); - EXPECT_NE(it, model.used_tbe_handle_map_.end()); - EXPECT_EQ(it->second, 3); - DavinciModel::tvm_bin_kernel_.clear(); -} -} // namespace ge diff --git a/tests/ut/ge/graph/load/new_model_manager_event_manager_unittest.cc b/tests/ut/ge/graph/load/new_model_manager_event_manager_unittest.cc deleted file mode 100644 index ee708501..00000000 --- a/tests/ut/ge/graph/load/new_model_manager_event_manager_unittest.cc +++ /dev/null @@ -1,117 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -#include "common/debug/log.h" -#include "common/debug/memory_dumper.h" -#include "common/types.h" - -#define private public -#include "graph/manager/model_manager/event_manager.h" -#undef private - -using namespace ge; -using namespace std; -using namespace testing; - -class UtestModelManagerEventManager : public testing::Test { - protected: - void SetUp() {} - - void TearDown() {} -}; - -// test repeat initialize -TEST_F(UtestModelManagerEventManager, repeat_initialization) { - ge::EventManager event_manager; - size_t event_num = 1; - event_manager.Init(event_num); - Status ret = event_manager.Init(event_num); - EXPECT_EQ(ret, SUCCESS); -} - -TEST_F(UtestModelManagerEventManager, call_event_record_normal) { - ge::EventManager event_manager; - size_t event_num = 1; - Status ret = event_manager.Init(event_num); - EXPECT_EQ(SUCCESS, ret); - EXPECT_NE(event_manager.event_list_.size(), 0); - - ret = event_manager.EventRecord(0, NULL); - EXPECT_EQ(SUCCESS, ret); -} - -// test load EventRecore when uninited -TEST_F(UtestModelManagerEventManager, call_event_record_while_uninited) { - ge::EventManager event_manager; - Status ret = event_manager.EventRecord(1, NULL); - EXPECT_EQ(ge::INTERNAL_ERROR, ret); -} - -// test with invalid param when load EventRecord -TEST_F(UtestModelManagerEventManager, call_event_record_with_invalid_param) { - ge::EventManager event_manager; - Status ret = event_manager.Init(1); - EXPECT_EQ(SUCCESS, ret); - ret = event_manager.EventRecord(1, NULL); - EXPECT_EQ(ge::PARAM_INVALID, ret); -} - -// test load EventElapsedTime when uninited -TEST_F(UtestModelManagerEventManager, call_event_elapsed_time_while_uninited) { - ge::EventManager event_manager; - float time = .0f; - Status ret = event_manager.EventElapsedTime(1, 2, time); - EXPECT_EQ(ge::INTERNAL_ERROR, ret); -} - -// test with invalid param when load EventElapsedTime -TEST_F(UtestModelManagerEventManager, call_event_elapsed_time_with_invalid_param) { - ge::EventManager *event_manager = new ge::EventManager; - size_t event_num = 2; - Status ret = event_manager->Init(event_num); - EXPECT_EQ(SUCCESS, ret); - float time = .0f; - - // normal load - ret = event_manager->EventElapsedTime(0, 1, time); - EXPECT_EQ(SUCCESS, ret); - - // startevent_idx overstep boundary - ret = event_manager->EventElapsedTime(2, 1, time); - EXPECT_EQ(ge::PARAM_INVALID, ret); - - // stopevent_idx overstep boundary - ret = event_manager->EventElapsedTime(1, 2, time); - EXPECT_EQ(ge::PARAM_INVALID, ret); - - // startevent_idx > stopevent_idx - ret = event_manager->EventElapsedTime(1, 0, time); - EXPECT_EQ(ge::PARAM_INVALID, ret); - - delete event_manager; -} -TEST_F(UtestModelManagerEventManager, call_get_event) { - ge::EventManager event_manager; - size_t event_num = 1; - event_manager.Init(event_num); - rtEvent_t event = nullptr; - Status ret = event_manager.GetEvent(2, event); - EXPECT_EQ(ge::PARAM_INVALID, ret); - ret = event_manager.GetEvent(0, event); - EXPECT_EQ(SUCCESS, ret); -} diff --git a/tests/ut/ge/graph/load/new_model_manager_task_build_unittest.cc b/tests/ut/ge/graph/load/new_model_manager_task_build_unittest.cc deleted file mode 100644 index f10ccd7f..00000000 --- a/tests/ut/ge/graph/load/new_model_manager_task_build_unittest.cc +++ /dev/null @@ -1,115 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -#include "common/debug/log.h" -#include "common/debug/memory_dumper.h" -#include "common/types.h" -#include "new_op_test_utils.h" -#include "graph/debug/ge_attr_define.h" -#include "graph/utils/attr_utils.h" -#include "graph/detail/model_serialize_imp.h" -#include "proto/ge_ir.pb.h" - -#define private public -#define protected public -#include "graph/compute_graph.h" -#include "graph/utils/graph_utils.h" -#include "graph/model_serialize.h" -#include "graph/load/model_manager/davinci_model.h" -#include "common/properties_manager.h" -#include "common/op/ge_op_utils.h" -#include -#include "runtime/dev.h" -#include "runtime/kernel.h" -#include "cce/fwk_adpt_struct.h" -#undef private -#undef protected - -using namespace std; -using namespace testing; - -namespace ge { -class UtestModelManagerTaskBuilder : public testing::Test { - protected: - void SetUp() {} - - void TearDown() {} - - /// data weight - /// | | | | - /// |-conv-| | | - /// | | | - /// conv2d | - /// | | - /// |-resApply - - void BuildGraph(ComputeGraphPtr graph) { - OpDescPtr data = std::make_shared("DATA1", "data"); - OpDescPtr weight = std::make_shared("WEIGHT", "weight"); - OpDescPtr conv_op = std::make_shared("conv", "conv"); - OpDescPtr conv_2D = std::make_shared("conv_2D", "conv2d"); - OpDescPtr res_apply_op = std::make_shared("res_apply_op", "resapply"); - // add descriptor - vector dim(4, 4); - GeShape shape(dim); - GeTensorDesc out_desc(shape); - int32_t blockSize = 4096; - - ge::TensorUtils::SetDataOffset(out_desc, blockSize * 1); - data->AddOutputDesc(out_desc); - - ge::TensorUtils::SetDataOffset(out_desc, blockSize * 2); - weight->AddOutputDesc(out_desc); - - ge::TensorUtils::SetDataOffset(out_desc, blockSize * 1); - conv_op->AddInputDesc(out_desc); - ge::TensorUtils::SetDataOffset(out_desc, blockSize * 2); - conv_op->AddInputDesc(out_desc); - ge::TensorUtils::SetDataOffset(out_desc, blockSize * 3); - conv_op->AddOutputDesc(out_desc); - - ge::TensorUtils::SetDataOffset(out_desc, blockSize * 3); - conv_2D->AddInputDesc(out_desc); - ge::TensorUtils::SetDataOffset(out_desc, blockSize * 2); - conv_2D->AddInputDesc(out_desc); - ge::TensorUtils::SetDataOffset(out_desc, blockSize * 4); - conv_2D->AddOutputDesc(out_desc); - - ge::TensorUtils::SetDataOffset(out_desc, blockSize * 4); - res_apply_op->AddInputDesc(out_desc); - ge::TensorUtils::SetDataOffset(out_desc, blockSize * 1); - res_apply_op->AddInputDesc(out_desc); - ge::TensorUtils::SetDataOffset(out_desc, blockSize * 5); - res_apply_op->AddOutputDesc(out_desc); - - NodePtr data_node = graph->AddNode(data); - NodePtr weigth_node = graph->AddNode(weight); - NodePtr conv_node = graph->AddNode(conv_op); - NodePtr conv_2D_node = graph->AddNode(conv_2D); - NodePtr res_node = graph->AddNode(res_apply_op); - - GraphUtils::AddEdge(data_node->GetOutDataAnchor(0), conv_node->GetInDataAnchor(0)); - GraphUtils::AddEdge(weigth_node->GetOutDataAnchor(0), conv_node->GetInDataAnchor(1)); - GraphUtils::AddEdge(conv_node->GetOutDataAnchor(0), conv_2D_node->GetInDataAnchor(0)); - GraphUtils::AddEdge(weigth_node->GetOutDataAnchor(0), conv_2D_node->GetInDataAnchor(1)); - GraphUtils::AddEdge(conv_2D_node->GetOutDataAnchor(0), res_node->GetInDataAnchor(0)); - GraphUtils::AddEdge(weigth_node->GetOutDataAnchor(0), res_node->GetInDataAnchor(1)); - return; - } -}; -} // namespace ge diff --git a/tests/ut/ge/graph/load/output_net_output_unittest.cc b/tests/ut/ge/graph/load/output_net_output_unittest.cc deleted file mode 100644 index 97246dad..00000000 --- a/tests/ut/ge/graph/load/output_net_output_unittest.cc +++ /dev/null @@ -1,300 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include - -#include "securec.h" - -#define protected public -#define private public -#include "common/debug/memory_dumper.h" -#include "common/op/ge_op_utils.h" -#include "graph/load/model_manager/davinci_model.h" -#include "graph/load/model_manager/model_utils.h" -#include "graph/manager/graph_var_manager.h" -#include "new_op_test_utils.h" -#include "proto/om.pb.h" - -using namespace std; - -namespace ge { -class UtestNetOutput : public testing::Test { - protected: - void TearDown() {} - shared_ptr GenOpdef(OpDescPtr &op_desc, int flag) { - shared_ptr builder = make_shared(op_desc); - builder->SetStreamId(0); - builder->AddInput(1); - builder->SetType("NetOutput"); - - if (flag == 1) { - auto input_desc_1 = builder->AddInputDesc({1, 1, 10, 10}, FORMAT_NCHW, DT_FLOAT16); - } - auto input_desc_1 = builder->AddInputDesc({1, 1, 10, 10}, FORMAT_NCHW, DT_FLOAT16); - - if (flag == 2) { - auto input_desc_2 = builder->AddInputDesc({1, 1, 10, 10}, FORMAT_NCHW, DT_FLOAT16); - } - if (flag == 3) { - builder->AddInput(10); - } - - return builder; - } - shared_ptr GenOpdef2(OpDescPtr &op_desc) { - shared_ptr builder = make_shared(op_desc); - builder->SetStreamId(0); - builder->SetType("NetOutput"); - builder->AddInput(10); - - auto input_desc_1 = builder->AddInputDesc({64, 32, 5, 5}, FORMAT_FRACTAL_Z, DT_FLOAT); - - builder->AddInput(1000000); - auto input_desc_2 = builder->AddInputDesc({1, 10, 10, 1}, FORMAT_NHWC, DT_FLOAT); - - builder->AddOutput(2000000); - auto output_desc_1 = builder->AddOutputDesc({64, 32, 5, 5}, FORMAT_NCHW, DT_FLOAT); - - builder->AddOutput(2100000); - output_desc_1 = builder->AddOutputDesc({1, 10, 10, 1}, FORMAT_NHWC, DT_FLOAT); - - return builder; - } - - public: - shared_ptr dav_model_; -}; - -TEST_F(UtestNetOutput, test_get_input_size) { - shared_ptr custom_op_desc = make_shared(); - OmeTestOpDescBuilder builder(custom_op_desc); - builder.SetName("netoutput"); - builder.SetStreamId(0); - builder.SetType("NetOutput"); - - auto input_desc_1 = builder.AddInputDesc({1, 1, 1, 1}, FORMAT_FRACTAL_Z, DT_FLOAT); - builder.AddInput(1); - auto output_desc = builder.AddOutputDesc({1, 1, 1, 1}, FORMAT_NCHW, DT_FLOAT); - builder.AddOutput(1); - builder.Finish(); - - vector v_output_size = ModelUtils::GetInputSize(custom_op_desc); - EXPECT_EQ(v_output_size.size(), 1); -} - -// test ModelUtils::IsOutput -TEST_F(UtestNetOutput, success_is_output) { - ModelUtils *model_utils = new ModelUtils(); - std::shared_ptr op_desc = std::make_shared(); - OmeTestOpDescBuilder builder(op_desc); - builder.SetType("NetOutput"); - vector outputs_desc; - std::shared_ptr desc = std::make_shared(); - outputs_desc.push_back(desc); - op_desc->outputs_desc_ = outputs_desc; - bool ret = model_utils->IsOutput(op_desc); - EXPECT_EQ(false, ret); - - delete model_utils; -} - -// test ModelUtils::IsOutput -TEST_F(UtestNetOutput, true_is_output) { - ModelUtils *model_utils = new ModelUtils(); - std::shared_ptr op_desc = std::make_shared(); - OmeTestOpDescBuilder builder(op_desc); - builder.SetType("NetOutput"); - vector outputs_desc; - std::shared_ptr desc = std::make_shared(); - outputs_desc.push_back(desc); - op_desc->outputs_desc_ = outputs_desc; - ge::TensorUtils::SetOutputTensor(*(outputs_desc[0].get()), true); - bool ret = model_utils->IsOutput(op_desc); - EXPECT_EQ(true, ret); - - delete model_utils; -} - -// test ModelUtils::IsInputTensorNeedTrans -TEST_F(UtestNetOutput, success_is_output_tensor_need_trans) { - ModelUtils *model_utils = new ModelUtils(); - std::shared_ptr op_desc = std::make_shared(); - OmeTestOpDescBuilder builder(op_desc); - builder.SetType("NetOutput"); - size_t tensor_index = 1; - vector outputs_desc; - std::shared_ptr desc = std::make_shared(); - outputs_desc.push_back(desc); - op_desc->outputs_desc_ = outputs_desc; - op_desc->inputs_desc_ = outputs_desc; - - bool ret = model_utils->IsInputTensorNeedTrans(op_desc, tensor_index); - EXPECT_EQ(false, ret); - - delete model_utils; -} - -// test ModelUtils::GetOutputSize -TEST_F(UtestNetOutput, success_get_output_size) { - vector v_output_size; - - ModelUtils *model_utils = new ModelUtils(); - std::shared_ptr op_desc = std::make_shared(); - vector outputs_desc; - std::shared_ptr desc = std::make_shared(); - outputs_desc.push_back(desc); - op_desc->outputs_desc_ = outputs_desc; - EXPECT_EQ(v_output_size, model_utils->GetOutputSize(op_desc)); - - vector output = {1}; - op_desc->SetOutputOffset(output); - uint32_t tensor_size = 0; - v_output_size.push_back(tensor_size); - EXPECT_EQ(v_output_size, model_utils->GetOutputSize(op_desc)); - delete model_utils; -} - -// test ModelUtils::GetWorkspaceSize -TEST_F(UtestNetOutput, success_get_workspace_size) { - vector v_workspace_size; - - ModelUtils *model_utils = new ModelUtils(); - std::shared_ptr op_desc = std::make_shared(); - vector workspace = {1}; - op_desc->SetWorkspace(workspace); - EXPECT_EQ(v_workspace_size, model_utils->GetWorkspaceSize(op_desc)); - - op_desc->SetWorkspaceBytes(workspace); - v_workspace_size.push_back(1); - EXPECT_EQ(v_workspace_size, model_utils->GetWorkspaceSize(op_desc)); - delete model_utils; -} - -// test ModelUtils::GetWeightSize -TEST_F(UtestNetOutput, success_get_weight_size) { - vector v_weight_size; - - ModelUtils *model_utils = new ModelUtils(); - std::shared_ptr op_desc = std::make_shared(); - op_desc->SetType("Const"); - EXPECT_EQ(v_weight_size, model_utils->GetWeightSize(op_desc)); - - op_desc->SetType("NetOutput"); - vector inputs_desc; - std::shared_ptr desc = std::make_shared(); - inputs_desc.push_back(desc); - op_desc->inputs_desc_ = inputs_desc; - - vector is_input_const = {true}; - op_desc->SetIsInputConst(is_input_const); - v_weight_size.push_back(0); - EXPECT_EQ(v_weight_size, model_utils->GetWeightSize(op_desc)); - - delete model_utils; -} - -// test ModelUtils::GetWeights -TEST_F(UtestNetOutput, success_get_weights) { - vector v_weights; - - ModelUtils *model_utils = new ModelUtils(); - std::shared_ptr op_desc = std::make_shared(); - op_desc->SetType("Const"); - EXPECT_EQ(v_weights, model_utils->GetWeights(op_desc)); - - op_desc->SetType("NetOutput"); - vector inputs_desc; - std::shared_ptr desc = std::make_shared(); - inputs_desc.push_back(desc); - op_desc->inputs_desc_ = inputs_desc; - - vector is_input_const = {true}; - op_desc->SetIsInputConst(is_input_const); - GeTensorDesc tensor_desc; - EXPECT_EQ(v_weights, model_utils->GetWeights(op_desc)); - - delete model_utils; -} - -// test ModelUtils::GetInputDescs -TEST_F(UtestNetOutput, success_get_input_descs) { - vector<::opTensor_t> v_input_descs; - vector<::tagCcAICPUTensor> ret; - ModelUtils *model_utils = new ModelUtils(); - std::shared_ptr op_desc = std::make_shared(); - ret = model_utils->GetInputDescs(op_desc); - EXPECT_EQ(v_input_descs.size(), ret.size()); - - vector inputs_desc; - std::shared_ptr desc = std::make_shared(); - inputs_desc.push_back(desc); - op_desc->inputs_desc_ = inputs_desc; - vector is_input_const = {false}; - op_desc->SetIsInputConst(is_input_const); - - opTensor_t tmp; - tmp.format = OP_TENSOR_FORMAT_NC1HWC0; - tmp.dim_cnt = 0; - tmp.data_type = OP_DATA_FLOAT; - v_input_descs.push_back(tmp); - ret = model_utils->GetInputDescs(op_desc); - EXPECT_EQ(v_input_descs.size(), ret.size()); - - delete model_utils; -} - -// test ModelUtils::GetOutputDescs -TEST_F(UtestNetOutput, success_get_output_descs) { - vector<::opTensor_t> v_output_descs; - vector<::tagCcAICPUTensor> ret; - ModelUtils *model_utils = new ModelUtils(); - std::shared_ptr op_desc = std::make_shared(); - ret = model_utils->GetOutputDescs(op_desc); - EXPECT_EQ(v_output_descs.size(), ret.size()); - - vector outputs_desc; - std::shared_ptr desc = std::make_shared(); - outputs_desc.push_back(desc); - op_desc->outputs_desc_ = outputs_desc; - - opTensor_t tmp; - tmp.format = OP_TENSOR_FORMAT_NC1HWC0; - tmp.dim_cnt = 0; - tmp.data_type = OP_DATA_FLOAT; - v_output_descs.push_back(tmp); - ret = model_utils->GetOutputDescs(op_desc); - EXPECT_EQ(v_output_descs.size(), ret.size()); - - delete model_utils; -} - -// test Output::GetOutputData -TEST_F(UtestNetOutput, success_get_output_data) { - Output *output = new Output(nullptr, nullptr); - output->v_input_data_addr_.push_back((void *)1); - output->v_input_size_.push_back(1); - output->input_num_ = 1; - - vector v_data_addr; - vector v_data_size; - output->GetOutputData(v_data_addr, v_data_size); - - EXPECT_EQ(output->v_input_data_addr_, v_data_addr); - EXPECT_EQ(output->v_input_size_, v_data_size); - delete output; -} -} // namespace ge diff --git a/tests/ut/ge/graph/manager/graph_manager_unittest.cc b/tests/ut/ge/graph/manager/graph_manager_unittest.cc index 518cfdcd..b40690e2 100644 --- a/tests/ut/ge/graph/manager/graph_manager_unittest.cc +++ b/tests/ut/ge/graph/manager/graph_manager_unittest.cc @@ -30,9 +30,6 @@ #define protected public #define private public #include "graph/manager/graph_manager.h" -#define const -#include "common/helper/model_cache_helper.h" -#undef const #include "init/gelib.h" #include "common/math/math_util.h" From 67974b31362c13d8fa986abc7ecdee3f9e50b2f4 Mon Sep 17 00:00:00 2001 From: zhaoxinxin Date: Sat, 17 Jul 2021 14:41:15 +0800 Subject: [PATCH 219/226] fix pytorch infershape origin shape --- ge/graph/passes/infershape_pass.cc | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/ge/graph/passes/infershape_pass.cc b/ge/graph/passes/infershape_pass.cc index 05b1b5fc..0555929d 100755 --- a/ge/graph/passes/infershape_pass.cc +++ b/ge/graph/passes/infershape_pass.cc @@ -228,19 +228,13 @@ bool InferShapePass::SameTensorDesc(const GeTensorDescPtr &src, const GeTensorDe } graphStatus InferShapePass::UpdateTensorDesc(const GeTensorDescPtr &src, GeTensorDescPtr &dst, bool &changed) { - changed = !SameTensorDesc(src, dst); - // refresh src itself - src->SetOriginShape(src->GetShape()); - src->SetOriginDataType(src->GetDataType()); - TensorUtils::SetRealDimCnt(*src, static_cast(src->GetOriginShape().GetDims().size())); - vector> src_shape_range; - src->GetShapeRange(src_shape_range); - src->SetOriginShapeRange(src_shape_range); - - if (!changed) { + changed = false; + if (SameTensorDesc(src, dst)) { GELOGD("Peer dst tensor_desc is same as src tensor_desc. No need update."); return SUCCESS; } + + changed = true; UpdateShapeAndDType(src, dst); GELOGD( "UpdatePeerInputDesc from src Node: shape: [%s], datatype: %s, original datatype is %s." From 3dc9881cd65bef96a7583b9db4be0c9e138ddee9 Mon Sep 17 00:00:00 2001 From: "gengchao4@huawei.com" Date: Sat, 17 Jul 2021 19:22:59 +0800 Subject: [PATCH 220/226] bugfix for taskdef's random variation in offline case --- .../ge/graph/build/task_generator_unittest.cc | 77 ++++++++++++++++++- 1 file changed, 76 insertions(+), 1 deletion(-) diff --git a/tests/ut/ge/graph/build/task_generator_unittest.cc b/tests/ut/ge/graph/build/task_generator_unittest.cc index 1e865050..7be20fa1 100644 --- a/tests/ut/ge/graph/build/task_generator_unittest.cc +++ b/tests/ut/ge/graph/build/task_generator_unittest.cc @@ -29,6 +29,8 @@ #define protected public #define private public +#include "init/gelib.h" +#include "ge/opskernel_manager/ops_kernel_builder_manager.h" #include "graph/build/task_generator.h" #include "graph/manager/graph_mem_manager.h" #include "graph/manager/graph_var_manager.h" @@ -41,9 +43,46 @@ using namespace ge; namespace { const char *const kIsInputVar = "INPUT_IS_VAR"; const char *const kIsOutputVar = "OUTPUT_IS_VAR"; -} +const char *const kKernelInfoNameHccl = "ops_kernel_info_hccl"; +} // namespace class UtestTaskGeneratorTest : public testing::Test { public: + struct FakeOpsKernelBuilder : OpsKernelBuilder { + FakeOpsKernelBuilder(){}; + + private: + Status Initialize(const map &options) override { + return SUCCESS; + }; + Status Finalize() override { + return SUCCESS; + }; + Status CalcOpRunningParam(Node &node) override { + return SUCCESS; + }; + Status GenerateTask(const Node &node, RunContext &context, std::vector &tasks) override { + domi::TaskDef task_def; + tasks.push_back(task_def); + return SUCCESS; + }; + }; + + struct FakeOpsKernelInfoStore : OpsKernelInfoStore { + FakeOpsKernelInfoStore() = default; + + private: + Status Initialize(const std::map &options) override { + return SUCCESS; + }; + Status Finalize() override { + return SUCCESS; + }; + bool CheckSupported(const OpDescPtr &op_desc, std::string &reason) const override { + return true; + }; + void GetAllOpsKernelInfo(std::map &infos) const override{}; + }; + ge::ComputeGraphPtr BuildGraphFpProfiling() { ge::ut::GraphBuilder builder("graph"); auto data = builder.AddNode("data", "phony", 1, 1); @@ -95,6 +134,14 @@ class UtestTaskGeneratorTest : public testing::Test { return builder.GetGraph(); } + ge::ComputeGraphPtr BuildHcclGraph() { + ge::ut::GraphBuilder builder("graph"); + auto hccl_node = builder.AddNode("hccl_phony_node", "HCCL_PHONY", 0, 0); + auto op_desc = hccl_node->GetOpDesc(); + op_desc->SetOpKernelLibName(kKernelInfoNameHccl); + op_desc->SetStreamId(0); + return builder.GetGraph(); + } protected: void SetUp() {} @@ -156,3 +203,31 @@ TEST_F(UtestTaskGeneratorTest, AutoFindBpOpIndex) { output_desc->SetName("hcom"); EXPECT_EQ(task_generator.AutoFindBpOpIndex(graph, profiling_point, all_reduce_nodes), SUCCESS); } + +TEST_F(UtestTaskGeneratorTest, GenerateTask) { + map options; + Status ret = ge::GELib::Initialize(options); + EXPECT_EQ(ret, SUCCESS); + + shared_ptr instance_ptr = ge::GELib::GetInstance(); + EXPECT_NE(instance_ptr, nullptr); + + OpsKernelInfoStorePtr ops_kernel_info_store_ptr = MakeShared(); + instance_ptr->opsManager_.ops_kernel_store_.insert(make_pair(kKernelInfoNameHccl, ops_kernel_info_store_ptr)); + + OpsKernelBuilderManager &builder_manager_instance_ptr = ge::OpsKernelBuilderManager::Instance(); + OpsKernelBuilderPtr fake_builder = MakeShared(); + builder_manager_instance_ptr.ops_kernel_builders_[kKernelInfoNameHccl] = fake_builder; + + auto graph = BuildHcclGraph(); + TaskGenerator task_generator(nullptr, 0); + RunContext run_context; + run_context.graphStreamList.push_back(static_cast(ops_kernel_info_store_ptr.get())); + vector all_reduce_nodes; + vector task_def_list; + map op_name_map; + + EXPECT_EQ(task_generator.GenerateTask(run_context, graph, task_def_list, op_name_map), SUCCESS); + EXPECT_EQ(task_def_list.size(), 1); + EXPECT_EQ(task_def_list[0].ops_kernel_store_ptr(), reinterpret_cast(ops_kernel_info_store_ptr.get())); +} \ No newline at end of file From d715f462b1723dc56cd61d31a565da2c8abc8d35 Mon Sep 17 00:00:00 2001 From: zhangxiaokun Date: Mon, 19 Jul 2021 09:06:53 +0800 Subject: [PATCH 221/226] Delete unused from SubGraphInfo --- ge/graph/manager/graph_manager_utils.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/ge/graph/manager/graph_manager_utils.h b/ge/graph/manager/graph_manager_utils.h index efdbecf8..e17d9046 100644 --- a/ge/graph/manager/graph_manager_utils.h +++ b/ge/graph/manager/graph_manager_utils.h @@ -105,10 +105,7 @@ class SubGraphInfo { std::vector output_flag_; ModelIdInfo model_id_info_; GeModelPtr ge_model_ptr_; - bool malloc_flag_; - std::vector buffer_addr_; std::string output_names_; - std::vector buffer_size_; std::string stream_label_; std::unordered_map end_to_pld_; std::unordered_map pld_to_end_; From 84928b083e3b160bc01fc3bc201e4167fb6873c4 Mon Sep 17 00:00:00 2001 From: "gengchao4@huawei.com" Date: Mon, 19 Jul 2021 19:19:28 +0800 Subject: [PATCH 222/226] fos code check --- ge/graph/build/task_generator.cc | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/ge/graph/build/task_generator.cc b/ge/graph/build/task_generator.cc index 1adcd0aa..abb409c4 100755 --- a/ge/graph/build/task_generator.cc +++ b/ge/graph/build/task_generator.cc @@ -387,13 +387,7 @@ Status TaskGenerator::GenerateTask(RunContext &run_context, ComputeGraphPtr &gra GE_CHK_BOOL_EXEC_INFO(!op_kernel_lib_name.empty(), continue, "Node[name:%s, type:%s] does not need to generate task.", name.c_str(), type.c_str()); auto kernel_info_store = ops_kernel_manager.GetOpsKernelInfoStore(op_kernel_lib_name); - if (kernel_info_store == nullptr) { - REPORT_INNER_ERROR("E19999", "Get ops kernel info store failed for op:%s(%s), op_kernel_name:%s", - node->GetName().c_str(), node->GetType().c_str(), op_kernel_lib_name.c_str()); - GELOGE(INTERNAL_ERROR, "[Call][GetOpsKernelInfoStore] No ops kernel store or ops kernel builder found. " - "node:%s(%s), op_kernel_lib_name=%s.", name.c_str(), type.c_str(), op_kernel_lib_name.c_str()); - return INTERNAL_ERROR; - } + GE_CHECK_NOTNULL(kernel_info_store); GE_CHK_STATUS_RET(UpdateAnchorStatus(node), "[Call][UpdateAnchorStatus] node:%s(%s) failed", name.c_str(), type.c_str()); if (node->GetOpDesc()->HasAttr(ATTR_NAME_FFTS_SUB_GRAPH)) { From 4f5a7fcefd3cebfc788cdbce74a7583b22a76a1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=B6=9B?= Date: Mon, 19 Jul 2021 19:58:54 +0800 Subject: [PATCH 223/226] =?UTF-8?q?=E5=9B=9E=E9=80=80=20'Pull=20Request=20?= =?UTF-8?q?!2028=20:=20Fix=20bug=20of=20single=5Fop.'?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ge/single_op/single_op.cc | 4 +-- ge/single_op/task/op_task.cc | 25 +++---------------- ge/single_op/task/op_task.h | 5 ++-- ge/single_op/task/tbe_task_builder.cc | 2 +- .../ge/single_op/single_op_task_unittest.cc | 24 ------------------ 5 files changed, 7 insertions(+), 53 deletions(-) diff --git a/ge/single_op/single_op.cc b/ge/single_op/single_op.cc index 23f4cfad..a82c30ba 100755 --- a/ge/single_op/single_op.cc +++ b/ge/single_op/single_op.cc @@ -433,13 +433,11 @@ Status DynamicSingleOp::ExecuteAsync(const vector &input_desc, if (!inputs_size.empty()) { StreamResource *stream_resource = SingleOpManager::GetInstance().GetResource(resource_id_, stream_); GE_CHK_STATUS_RET_NOLOG(UpdateInputsBufferAddr(stream_resource, stream_, inputs_size, update_buffers)); + GE_CHK_STATUS_RET_NOLOG(SetHostTensorValue(input_desc, input_buffers)); } if (hybrid_model_executor_ != nullptr) { GELOGD("Execute multi-task dynamic single op by hybrid model executor"); - if (!inputs_size.empty()) { - GE_CHK_STATUS_RET_NOLOG(SetHostTensorValue(input_desc, input_buffers)); - } hybrid::HybridModelExecutor::ExecuteArgs args; GE_CHK_STATUS_RET_NOLOG(InitHybridModelArgs(update_buffers, output_buffers, input_desc, args)); diff --git a/ge/single_op/task/op_task.cc b/ge/single_op/task/op_task.cc index ee752022..dbc90ac5 100755 --- a/ge/single_op/task/op_task.cc +++ b/ge/single_op/task/op_task.cc @@ -293,9 +293,6 @@ Status TbeOpTask::UpdateNodeByShape(const vector &input_desc, cons } Status TbeOpTask::EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size) { - node_ = node; - tiling_buffer_ = tiling_buffer; - max_tiling_size_ = max_tiling_size; if (tiling_buffer != nullptr) { uintptr_t *arg_base = nullptr; size_t arg_num = 0; @@ -313,6 +310,9 @@ Status TbeOpTask::EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, } arg_base[tiling_index] = reinterpret_cast(tiling_buffer); } + node_ = node; + tiling_buffer_ = tiling_buffer; + max_tiling_size_ = max_tiling_size; return SUCCESS; } @@ -481,25 +481,6 @@ void TbeOpTask::GetIoAddr(uintptr_t *&arg_base, size_t &arg_count) { } } -Status AtomicAddrCleanOpTask::EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size) { - node_ = node; - tiling_buffer_ = tiling_buffer; - max_tiling_size_ = max_tiling_size; - if (tiling_buffer != nullptr) { - uintptr_t *arg_base = nullptr; - size_t arg_num = 0; - GetIoAddr(arg_base, arg_num); - uint32_t tiling_index = atomic_output_indices_.size(); - if (arg_num == 0 || arg_num < tiling_index) { - GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "[Check][Size]Tiling index %u, arg number %zu is invalid.", - tiling_index, arg_num); - return ACL_ERROR_GE_INTERNAL_ERROR; - } - arg_base[tiling_index] = reinterpret_cast(tiling_buffer); - } - return SUCCESS; -} - Status AtomicAddrCleanOpTask::UpdateNodeByShape(const vector &input_desc, const vector &output_desc) { return SUCCESS; diff --git a/ge/single_op/task/op_task.h b/ge/single_op/task/op_task.h index 4a839389..132672b0 100644 --- a/ge/single_op/task/op_task.h +++ b/ge/single_op/task/op_task.h @@ -97,7 +97,7 @@ class TbeOpTask : public OpTask { const void *GetArgs() const; size_t GetArgSize() const; const std::string &GetStubName() const; - virtual Status EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size); + Status EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size); const std::string &GetTaskType() const override; void SetHandle(void *handle); @@ -149,7 +149,6 @@ class TbeOpTask : public OpTask { class AtomicAddrCleanOpTask : public TbeOpTask { public: Status InitAtomicAddrCleanIndices(); - Status EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size) override; private: Status UpdateNodeByShape(const vector &input_desc, @@ -157,8 +156,8 @@ class AtomicAddrCleanOpTask : public TbeOpTask { Status UpdateIoAddr(const vector &inputs, const vector &outputs) override; Status UpdateTilingArgs(rtStream_t stream) override; Status CalcTilingInfo(optiling::utils::OpRunInfo &run_info) override; - std::vector atomic_output_indices_; + }; class AiCpuBaseTask : public OpTask { diff --git a/ge/single_op/task/tbe_task_builder.cc b/ge/single_op/task/tbe_task_builder.cc index f947ca57..017dac25 100644 --- a/ge/single_op/task/tbe_task_builder.cc +++ b/ge/single_op/task/tbe_task_builder.cc @@ -425,7 +425,7 @@ Status TbeTaskBuilder::InitTilingInfo(TbeOpTask &task) { GELOGD("[%s] Done allocating tiling buffer, size=%ld.", op_desc_->GetName().c_str(), max_size); } - GE_CHK_STATUS_RET_NOLOG(task.EnableDynamicSupport(node_, tiling_buffer, static_cast(max_size))); + task.EnableDynamicSupport(node_, tiling_buffer, static_cast(max_size)); return SUCCESS; } diff --git a/tests/ut/ge/single_op/single_op_task_unittest.cc b/tests/ut/ge/single_op/single_op_task_unittest.cc index 9a0381cd..8964df74 100644 --- a/tests/ut/ge/single_op/single_op_task_unittest.cc +++ b/tests/ut/ge/single_op/single_op_task_unittest.cc @@ -237,27 +237,3 @@ TEST_F(UtestSingleOpTask, test_aicpu_task_update_io_addr) { ASSERT_EQ(ret, PARAM_INVALID); } } - -TEST_F(UtestSingleOpTask, test_dynamic_support) { - auto graph = make_shared("graph"); - auto op_desc = make_shared("Add", "Add"); - auto node = graph->AddNode(op_desc); - AtomicAddrCleanOpTask atomic_task; - TbeOpTask tbe_task; - - tbe_task.arg_size_ = sizeof(void *) * 1; - tbe_task.args_.reset(new (std::nothrow) uint8_t[tbe_task.arg_size_]); - atomic_task.arg_size_ = sizeof(void *) * 1; - atomic_task.args_.reset(new (std::nothrow) uint8_t[atomic_task.arg_size_]); - ASSERT_EQ(tbe_task.EnableDynamicSupport(node, (void *)0x0001, 1), ACL_ERROR_GE_INTERNAL_ERROR); - ASSERT_EQ(atomic_task.EnableDynamicSupport(node, (void *)0x0001, 1), ACL_ERROR_GE_INTERNAL_ERROR); - - tbe_task.arg_size_ = sizeof(void *) * 2; - tbe_task.args_.reset(new (std::nothrow) uint8_t[tbe_task.arg_size_]); - atomic_task.arg_size_ = sizeof(void *) * 2; - atomic_task.args_.reset(new (std::nothrow) uint8_t[atomic_task.arg_size_]); - ASSERT_EQ(tbe_task.EnableDynamicSupport(node, (void *)0x0001, 1), SUCCESS); - ASSERT_EQ(atomic_task.EnableDynamicSupport(node, (void *)0x0001, 1), SUCCESS); - tbe_task.tiling_buffer_ = nullptr; - atomic_task.tiling_buffer_ = nullptr; -} From a1dd84cc5354dd71fc494da33cc21c90505936ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=99=88=E5=8D=8E?= Date: Mon, 19 Jul 2021 22:32:54 +0800 Subject: [PATCH 224/226] aicpu op --- ge/engine_manager/dnnengine_manager.cc | 4 + ge/graph/load/model_manager/davinci_model.cc | 52 ++++ ge/graph/load/model_manager/davinci_model.h | 6 + .../task_info/kernel_ex_task_info.cc | 109 ++++++++- .../task_info/kernel_ex_task_info.h | 8 + .../task_info/kernel_task_info.cc | 106 +++++++- .../task_info/kernel_task_info.h | 8 + .../node_executor/aicpu/aicpu_ext_info.cc | 30 +++ .../node_executor/aicpu/aicpu_ext_info.h | 5 + .../aicpu/aicpu_node_executor.cc | 121 ++++++++++ .../node_executor/aicpu/aicpu_node_executor.h | 10 +- ge/single_op/task/op_task.cc | 117 +++++++++ ge/single_op/task/op_task.h | 7 + tests/depends/runtime/src/runtime_stub.cc | 93 ++++++- tests/depends/runtime/src/runtime_stub.h | 70 ++++++ tests/ut/ge/CMakeLists.txt | 1 + .../load/kernel_ex_task_info_unittest.cc | 141 ++++++++++- .../graph/load/kernel_task_info_unittest.cc | 140 ++++++++++- .../aicpu/aicpu_node_executor_unittest.cc | 227 +++++++++++++++++- .../ge/single_op/single_op_task_unittest.cc | 131 +++++++++- .../fwkacllib/inc/cce/fwk_adpt_struct.h | 16 ++ third_party/fwkacllib/inc/runtime/config.h | 8 + third_party/fwkacllib/inc/runtime/dev.h | 12 + 23 files changed, 1396 insertions(+), 26 deletions(-) create mode 100644 tests/depends/runtime/src/runtime_stub.h diff --git a/ge/engine_manager/dnnengine_manager.cc b/ge/engine_manager/dnnengine_manager.cc index 0fadd993..36f11828 100644 --- a/ge/engine_manager/dnnengine_manager.cc +++ b/ge/engine_manager/dnnengine_manager.cc @@ -239,6 +239,10 @@ std::string DNNEngineManager::GetDNNEngineName(const ge::NodePtr &node_ptr) { op_desc->SetOpEngineName(it.engine); op_desc->SetOpKernelLibName(kernel_name); // set attrs for taking information when load txt to graph object + if (it.flagAsync) { + GELOGD("Set aicpu blocking op:%s attribute(is_blocking_op):true", op_desc->GetName().c_str()); + (void)AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, true); + } (void) AttrUtils::SetStr(op_desc, ATTR_NAME_ENGINE_NAME_FOR_LX, it.engine); (void) AttrUtils::SetStr(op_desc, ATTR_NAME_KKERNEL_LIB_NAME_FOR_LX, kernel_name); GELOGD("DNNEngineManager:Set OpKernelLibName %s and engine name %s to op_desc %s", kernel_name.c_str(), diff --git a/ge/graph/load/model_manager/davinci_model.cc b/ge/graph/load/model_manager/davinci_model.cc index aba06173..495ec28e 100755 --- a/ge/graph/load/model_manager/davinci_model.cc +++ b/ge/graph/load/model_manager/davinci_model.cc @@ -238,6 +238,12 @@ DavinciModel::~DavinciModel() { GE_LOGW_IF(rtEventDestroy(event_list_[i]) != RT_ERROR_NONE, "Destroy event failed, index: %zu", i); } + for (const auto &it : stream_2_event_) { + if (rtEventDestroy(it.second) != RT_ERROR_NONE) { + GELOGW("Destroy event failed"); + } + } + FreeWeightsMem(); FreeFeatureMapMem(); @@ -4648,4 +4654,50 @@ Status DavinciModel::GetTotalMemSizeExcludeZeroCopy(int64_t &total_useful_size) total_useful_size = runtime_param_.mem_size - runtime_param_.zero_copy_size; return SUCCESS; } + +Status DavinciModel::GetEventIdForBlockingAicpuOp(const OpDescPtr &op_desc, rtStream_t stream, uint32_t &event_id) { + GELOGI("Get event id for aicpu blocking op:%s", op_desc->GetName().c_str()); + auto it = stream_2_event_.find(stream); + if (it != stream_2_event_.end()) { + auto rt_ret = rtGetEventID(it->second, &event_id); + if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtGetEventID failed for op:%s(%s), ret:0x%X", + op_desc->GetName().c_str(), op_desc->GetType().c_str(), rt_ret); + GELOGE(RT_FAILED, "[Call][rtGetEventID] failed for op:%s(%s), ret:0x%X", + op_desc->GetName().c_str(), op_desc->GetType().c_str(), rt_ret); + return RT_ERROR_TO_GE_STATUS(rt_ret); + } + } else { + rtEvent_t rt_event = nullptr; + auto rt_ret = rtEventCreateWithFlag(&rt_event, RT_EVENT_WITH_FLAG); + if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtEventCreateWithFlag failed for op:%s(%s), ret:0x%X", + op_desc->GetName().c_str(), op_desc->GetType().c_str(), rt_ret); + GELOGE(RT_FAILED, "[Call][rtEventCreateWithFlag] failed for op:%s(%s), ret:0x%X", + op_desc->GetName().c_str(), op_desc->GetType().c_str(), rt_ret); + return RT_ERROR_TO_GE_STATUS(rt_ret); + } + rt_ret = rtGetEventID(rt_event, &event_id); + if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtGetEventID failed for op:%s(%s), ret:0x%X", + op_desc->GetName().c_str(), op_desc->GetType().c_str(), rt_ret); + GELOGE(RT_FAILED, "[Call][rtGetEventID] failed for op:%s(%s), ret:0x%X", + op_desc->GetName().c_str(), op_desc->GetType().c_str(), rt_ret); + return RT_ERROR_TO_GE_STATUS(rt_ret); + } + stream_2_event_.emplace(stream, rt_event); + } + return SUCCESS; +} + +Status DavinciModel::GetEventByStream(const rtStream_t &stream, rtEvent_t &rt_event) { + auto it = stream_2_event_.find(stream); + if (it == stream_2_event_.end()) { + REPORT_INNER_ERROR("E19999", "Get event failed"); + GELOGE(FAILED, "[Get][Event] Get event failed"); + return FAILED; + } + rt_event = it->second; + return SUCCESS; +} } // namespace ge diff --git a/ge/graph/load/model_manager/davinci_model.h b/ge/graph/load/model_manager/davinci_model.h index fe89f66f..76b0beef 100755 --- a/ge/graph/load/model_manager/davinci_model.h +++ b/ge/graph/load/model_manager/davinci_model.h @@ -582,6 +582,10 @@ class DavinciModel { void SetRunningFlag(bool flag) { running_flg_ = flag; } Status SetRunAsyncListenerCallback(const RunAsyncCallback &callback); + // for blocking aicpu op + Status GetEventByStream(const rtStream_t &stream, rtEvent_t &rt_event); + Status GetEventIdForBlockingAicpuOp(const OpDescPtr &op_desc, rtStream_t stream, uint32_t &event_id); + private: // memory address of weights uint8_t *weights_mem_base_; @@ -1107,6 +1111,8 @@ class DavinciModel { // op name to attrs mapping std::map>> op_name_to_attrs_; + + std::map stream_2_event_; }; } // namespace ge #endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_DAVINCI_MODEL_H_ diff --git a/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc b/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc index 1a6ab542..fe9cd0cc 100644 --- a/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc +++ b/ge/graph/load/model_manager/task_info/kernel_ex_task_info.cc @@ -26,8 +26,8 @@ #include "external/graph/attr_value.h" #include "graph/load/model_manager/davinci_model.h" #include "graph/load/model_manager/model_manager.h" -#include "hybrid/node_executor/aicpu/aicpu_ext_info.h" #include "framework/common/debug/log.h" +#include "runtime/rt.h" namespace { const char *const kAicpuAllshape = "_AllShape"; @@ -43,7 +43,7 @@ Status KernelExTaskInfo::InitTaskExtInfo(const std::string &ext_info, const OpDe UnknowShapeOpType unknown_type = static_cast(unknown_shape_type_val); uint32_t num_inputs = op_desc->GetInputsSize(); uint32_t num_outputs = op_desc->GetOutputsSize(); - std::unique_ptr ext_handle( + std::shared_ptr ext_handle( new(std::nothrow) ::ge::hybrid::AicpuExtInfoHandler(op_desc->GetName(), num_inputs, num_outputs, @@ -76,6 +76,16 @@ Status KernelExTaskInfo::InitTaskExtInfo(const std::string &ext_info, const OpDe } } } + + AttrUtils::GetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, is_blocking_aicpu_op_); + GELOGD("Get op:%s attribute(is_blocking_op), value:%d", op_desc->GetName().c_str(), is_blocking_aicpu_op_); + + if (UpdateEventIdForAicpuBlockingOp(op_desc, ext_handle) != SUCCESS) { + GELOGE(FAILED, "[Call][UpdateEventIdForAicpuBlockingOp] failed for op:%s(%s)", + op_desc->GetName().c_str(), op_desc->GetType().c_str()); + return FAILED; + } + auto rt_ret = rtMalloc(&ext_info_addr_, ext_handle->GetExtInfoLen(), RT_MEMORY_HBM); GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, REPORT_CALL_ERROR("E19999", "Call rtMalloc failed, size:%zu, ret:0x%X", ext_info.size(), rt_ret); @@ -448,6 +458,101 @@ Status KernelExTaskInfo::Distribute() { stream_id_ = stream_id; GELOGI("KernelExTaskInfo Distribute Success. task id: %u, stream id: %u", task_id_, stream_id_); + if (is_blocking_aicpu_op_) { + if (DistributeWaitTaskForAicpuBlockingOp() != SUCCESS) { + GELOGE(FAILED, "[Call][DistributeWaitTaskForAicpuBlockingOp] Call DistributeWaitTaskForAicpuBlockingOp failed"); + return FAILED; + } + } + return SUCCESS; +} + +Status KernelExTaskInfo::CheckDeviceSupportBlockingAicpuOpProcess(bool &is_support) { + int32_t device_id = 0; + auto rt_ret = rtGetDevice(&device_id); + if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtGetDevice failed, ret:0x%X", rt_ret); + GELOGE(RT_FAILED, "[Call][rtGetDevice] failed, ret:0x%X", rt_ret); + return RT_ERROR_TO_GE_STATUS(rt_ret); + } + int32_t value = 0; + rt_ret = rtGetDeviceCapability(device_id, FEATURE_TYPE_BLOCKING_OPERATOR, RT_MODULE_TYPE_AICPU, &value); + if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtGetDeviceCapability failed, ret:0x%X", rt_ret); + GELOGE(RT_FAILED, "[Call][rtGetDeviceCapability] failed, ret:0x%X", rt_ret); + return RT_ERROR_TO_GE_STATUS(rt_ret); + } + if (value != RT_AICPU_BLOCKING_OP_NOT_SUPPORT && value != RT_AICPU_BLOCKING_OP_SUPPORT) { + REPORT_INNER_ERROR("E19999", "Value should be %d or %d but %d", + RT_AICPU_BLOCKING_OP_NOT_SUPPORT, RT_AICPU_BLOCKING_OP_SUPPORT, value); + GELOGE(FAILED, "[Check][Value] Value should be %d or %d but %d", + RT_AICPU_BLOCKING_OP_NOT_SUPPORT, RT_AICPU_BLOCKING_OP_SUPPORT, value); + return FAILED; + } + is_support = (value == RT_AICPU_BLOCKING_OP_SUPPORT ? true : false); + return SUCCESS; +} + +Status KernelExTaskInfo::UpdateEventIdForAicpuBlockingOp(const OpDescPtr &op_desc, + std::shared_ptr &ext_handle) { + if (is_blocking_aicpu_op_) { + bool is_support = false; + if (CheckDeviceSupportBlockingAicpuOpProcess(is_support) != SUCCESS) { + GELOGE(FAILED, "[Call][CheckDeviceSupportBlockingAicpuOpProcess] Call CheckDeviceSupportBlockingAicpuOpProcess failed"); + return FAILED; + } + if (!is_support) { + GELOGD("Device not support blocking aicpu op process"); + return SUCCESS; + } + uint32_t event_id = 0; + if (davinci_model_->GetEventIdForBlockingAicpuOp(op_desc, stream_, event_id) != SUCCESS) { + REPORT_CALL_ERROR("E19999", "Get event id failed for op:%s(%s).", op_desc->GetName().c_str(), + op_desc->GetType().c_str()); + GELOGE(FAILED, "[Get][EventId] Get event id failed for op:%s(%s)", op_desc->GetName().c_str(), + op_desc->GetType().c_str()); + return FAILED; + } + if (ext_handle->UpdateEventId(event_id) != SUCCESS) { + REPORT_CALL_ERROR("E19999", "Update event id failed for op:%s(%s).", op_desc->GetName().c_str(), + op_desc->GetType().c_str()); + GELOGE(FAILED, "[Update][EventId] Update event id failed for op:%s(%s)", op_desc->GetName().c_str(), + op_desc->GetType().c_str()); + return FAILED; + } + GELOGI("Update event_id=%u success", event_id); + } + return SUCCESS; +} + +Status KernelExTaskInfo::DistributeWaitTaskForAicpuBlockingOp() { + bool is_support = false; + if (CheckDeviceSupportBlockingAicpuOpProcess(is_support) != SUCCESS) { + GELOGE(FAILED, "[Call][CheckDeviceSupportBlockingAicpuOpProcess] Call CheckDeviceSupportBlockingAicpuOpProcess failed"); + return FAILED; + } + if (!is_support) { + GELOGD("Device not support blocking aicpu op process."); + return SUCCESS; + } + GELOGD("Distribute wait task begin"); + rtEvent_t rt_event = nullptr; + if (davinci_model_->GetEventByStream(stream_, rt_event) != SUCCESS) { + GELOGE(FAILED, "[Call][GetEventByStream] Call GetEventByStream failed"); + return FAILED; + } + auto rt_ret = rtStreamWaitEvent(stream_, rt_event); + if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtStreamWaitEvent failed, ret:0x%X", rt_ret); + GELOGE(RT_FAILED, "[Call][RtApi] failed, ret:0x%X", rt_ret); + return RT_ERROR_TO_GE_STATUS(rt_ret); + } + rt_ret = rtEventReset(rt_event, stream_); + if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtEventReset failed, ret:0x%X", rt_ret); + GELOGE(RT_FAILED, "[Call][RtApi] failed, ret:0x%X", rt_ret); + return RT_ERROR_TO_GE_STATUS(rt_ret); + } return SUCCESS; } diff --git a/ge/graph/load/model_manager/task_info/kernel_ex_task_info.h b/ge/graph/load/model_manager/task_info/kernel_ex_task_info.h index 7d07eb7f..eb411576 100644 --- a/ge/graph/load/model_manager/task_info/kernel_ex_task_info.h +++ b/ge/graph/load/model_manager/task_info/kernel_ex_task_info.h @@ -19,6 +19,7 @@ #include "graph/load/model_manager/task_info/task_info.h" #include "graph/op_desc.h" +#include "hybrid/node_executor/aicpu/aicpu_ext_info.h" namespace ge { class KernelExTaskInfo : public TaskInfo { @@ -65,6 +66,12 @@ class KernelExTaskInfo : public TaskInfo { void InitDumpArgs(void *addr, const OpDescPtr &op_desc); Status InitTaskExtInfo(const std::string &ext_info, const OpDescPtr &op_desc); + // for blocking aicpu op + Status DistributeWaitTaskForAicpuBlockingOp(); + Status CheckDeviceSupportBlockingAicpuOpProcess(bool &is_support); + Status UpdateEventIdForAicpuBlockingOp(const OpDescPtr &op_desc, + std::shared_ptr &ext_handle); + uint32_t task_id_; uint32_t stream_id_; uint32_t dump_flag_; @@ -79,6 +86,7 @@ class KernelExTaskInfo : public TaskInfo { uint32_t args_offset_ = 0; int64_t fixed_addr_offset_ = 0; int32_t topic_type_flag_ = -1; + bool is_blocking_aicpu_op_ = false; }; } // namespace ge #endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_KERNEL_EX_TASK_INFO_H_ diff --git a/ge/graph/load/model_manager/task_info/kernel_task_info.cc b/ge/graph/load/model_manager/task_info/kernel_task_info.cc index 019a0a8b..6bbfe58e 100755 --- a/ge/graph/load/model_manager/task_info/kernel_task_info.cc +++ b/ge/graph/load/model_manager/task_info/kernel_task_info.cc @@ -28,11 +28,10 @@ #include "graph/load/model_manager/davinci_model.h" #include "graph/load/model_manager/model_manager.h" #include "graph/load/model_manager/model_utils.h" -#include "runtime/kernel.h" +#include "runtime/rt.h" #include "graph/load/model_manager/task_info/super_kernel/super_kernel.h" #include "graph/load/model_manager/task_info/super_kernel/super_kernel_factory.h" #include "cce/aicpu_engine_struct.h" -#include "hybrid/node_executor/aicpu/aicpu_ext_info.h" #include "framework/common/debug/log.h" namespace { @@ -474,6 +473,12 @@ Status KernelTaskInfo::Distribute() { } // set for task_id_ UpdateTaskId(); + if (is_blocking_aicpu_op_) { + if (DistributeWaitTaskForAicpuBlockingOp() != SUCCESS) { + GELOGE(FAILED, "[Call][DistributeWaitTaskForAicpuBlockingOp] Call DistributeWaitTaskForAicpuBlockingOp failed"); + return FAILED; + } + } GELOGD( "KernelTaskInfo Distribute Success. sktenable:%d taskid:%d sktid:%d stubfunc_name:%s stubfunc:%p " "blockdim:%d stream:%p", @@ -482,6 +487,91 @@ Status KernelTaskInfo::Distribute() { return SUCCESS; } +Status KernelTaskInfo::CheckDeviceSupportBlockingAicpuOpProcess(bool &is_support) { + int32_t device_id = 0; + auto rt_ret = rtGetDevice(&device_id); + if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtGetDevice failed, ret:0x%X", rt_ret); + GELOGE(RT_FAILED, "[Call][rtGetDevice] failed, ret:0x%X", rt_ret); + return RT_ERROR_TO_GE_STATUS(rt_ret); + } + int32_t value = 0; + rt_ret = rtGetDeviceCapability(device_id, FEATURE_TYPE_BLOCKING_OPERATOR, RT_MODULE_TYPE_AICPU, &value); + if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtGetDeviceCapability failed, ret:0x%X", rt_ret); + GELOGE(RT_FAILED, "[Call][rtGetDeviceCapability] failed, ret:0x%X", rt_ret); + return RT_ERROR_TO_GE_STATUS(rt_ret); + } + if (value != RT_AICPU_BLOCKING_OP_NOT_SUPPORT && value != RT_AICPU_BLOCKING_OP_SUPPORT) { + REPORT_INNER_ERROR("E19999", "Value should be %d or %d but %d", + RT_AICPU_BLOCKING_OP_NOT_SUPPORT, RT_AICPU_BLOCKING_OP_SUPPORT, value); + GELOGE(FAILED, "[Check][Value] Value should be %d or %d but %d", + RT_AICPU_BLOCKING_OP_NOT_SUPPORT, RT_AICPU_BLOCKING_OP_SUPPORT, value); + return FAILED; + } + is_support = (value == RT_AICPU_BLOCKING_OP_SUPPORT ? true : false); + return SUCCESS; +} + +Status KernelTaskInfo::UpdateEventIdForAicpuBlockingOp(std::shared_ptr &ext_handle) { + if (is_blocking_aicpu_op_) { + bool is_support = false; + if (CheckDeviceSupportBlockingAicpuOpProcess(is_support) != SUCCESS) { + GELOGE(FAILED, "[Call][CheckDeviceSupportBlockingAicpuOpProcess] Call CheckDeviceSupportBlockingAicpuOpProcess failed"); + return FAILED; + } + if (!is_support) { + GELOGD("Device not support blocking aicpu op process"); + return SUCCESS; + } + uint32_t event_id = 0; + if (davinci_model_->GetEventIdForBlockingAicpuOp(op_desc_, stream_, event_id) != SUCCESS) { + GELOGE(FAILED, "[Get][EventId] Get event id failed for op:%s(%s)", op_desc_->GetName().c_str(), + op_desc_->GetType().c_str()); + return FAILED; + } + if (ext_handle->UpdateEventId(event_id) != SUCCESS) { + GELOGE(FAILED, "[Update][EventId] Update event id failed for op:%s(%s)", op_desc_->GetName().c_str(), + op_desc_->GetType().c_str()); + return FAILED; + } + GELOGI("Update event_id=%u success", event_id); + } + return SUCCESS; +} + +Status KernelTaskInfo::DistributeWaitTaskForAicpuBlockingOp() { + bool is_support = false; + if (CheckDeviceSupportBlockingAicpuOpProcess(is_support) != SUCCESS) { + GELOGE(FAILED, "[Call][CheckDeviceSupportBlockingAicpuOpProcess] Call CheckDeviceSupportBlockingAicpuOpProcess failed"); + return FAILED; + } + if (!is_support) { + GELOGD("device not support blocking aicpu op process."); + return SUCCESS; + } + GELOGD("Distribute wait task begin"); + rtEvent_t rt_event = nullptr; + if (davinci_model_->GetEventByStream(stream_, rt_event) != SUCCESS) { + REPORT_CALL_ERROR("E19999", "Call GetEventByStream failed"); + GELOGE(FAILED, "[Call][GetEventByStream] Call GetEventByStream failed"); + return FAILED; + } + auto rt_ret = rtStreamWaitEvent(stream_, rt_event); + if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtStreamWaitEvent failed, ret:0x%X", rt_ret); + GELOGE(RT_FAILED, "[Call][RtApi] failed, ret:0x%X", rt_ret); + return RT_ERROR_TO_GE_STATUS(rt_ret); + } + rt_ret = rtEventReset(rt_event, stream_); + if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtEventReset failed, ret:0x%X", rt_ret); + GELOGE(RT_FAILED, "[Call][RtApi] failed, ret:0x%X", rt_ret); + return RT_ERROR_TO_GE_STATUS(rt_ret); + } + return SUCCESS; +} + void KernelTaskInfo::SetIoAddrs(const OpDescPtr &op_desc) { const RuntimeParam &rts_param = davinci_model_->GetRuntimeParam(); vector input_data_addrs = ModelUtils::GetInputDataAddrs(rts_param, op_desc); @@ -1109,7 +1199,7 @@ Status KernelTaskInfo::InitAicpuTaskExtInfo(const std::string &ext_info) { UnknowShapeOpType unknown_type = static_cast(unknown_shape_type_val); uint32_t num_inputs = op_desc_->GetInputsSize(); uint32_t num_outputs = op_desc_->GetOutputsSize(); - std::unique_ptr ext_handle( + std::shared_ptr ext_handle( new(std::nothrow) ::ge::hybrid::AicpuExtInfoHandler(op_desc_->GetName(), num_inputs, num_outputs, @@ -1145,6 +1235,16 @@ Status KernelTaskInfo::InitAicpuTaskExtInfo(const std::string &ext_info) { j, op_desc_->GetName().c_str()); } } + + AttrUtils::GetBool(op_desc_, ATTR_NAME_IS_BLOCKING_OP, is_blocking_aicpu_op_); + GELOGD("Get op:%s attribute(is_blocking_op), value:%d", op_desc_->GetName().c_str(), is_blocking_aicpu_op_); + + if (UpdateEventIdForAicpuBlockingOp(ext_handle) != SUCCESS) { + GELOGE(FAILED, "[Call][UpdateEventIdForAicpuBlockingOp] failed for op:%s(%s)", + op_desc_->GetName().c_str(), op_desc_->GetType().c_str()); + return FAILED; + } + auto rt_ret = rtMalloc(&aicpu_ext_info_addr_, ext_handle->GetExtInfoLen(), RT_MEMORY_HBM); if (rt_ret != RT_ERROR_NONE) { REPORT_CALL_ERROR("E19999", "Call rtMalloc failed for op:%s(%s), size:%zu, ret:0x%X", diff --git a/ge/graph/load/model_manager/task_info/kernel_task_info.h b/ge/graph/load/model_manager/task_info/kernel_task_info.h index d9dd30bb..59a91aee 100644 --- a/ge/graph/load/model_manager/task_info/kernel_task_info.h +++ b/ge/graph/load/model_manager/task_info/kernel_task_info.h @@ -24,6 +24,8 @@ #include "graph/load/model_manager/task_info/task_info.h" #include "graph/op_desc.h" +#include "hybrid/node_executor/aicpu/aicpu_ext_info.h" + namespace ge { class KernelTaskInfo : public TaskInfo { public: @@ -148,6 +150,11 @@ class KernelTaskInfo : public TaskInfo { bool DoubleCallSKTSaveCheck(); void SetArgs(); + // for blocking aicpu op + Status DistributeWaitTaskForAicpuBlockingOp(); + Status CheckDeviceSupportBlockingAicpuOpProcess(bool &is_support); + Status UpdateEventIdForAicpuBlockingOp(std::shared_ptr &ext_handle); + void *stub_func_; void *args_; void *sm_desc_; @@ -187,6 +194,7 @@ class KernelTaskInfo : public TaskInfo { uint32_t skt_dump_flag_ = RT_KERNEL_DEFAULT; void *superkernel_device_args_addr_ = nullptr; void *superkernel_dev_nav_table_ = nullptr; + bool is_blocking_aicpu_op_ = false; struct AICPUCustomInfo { void *input_descs = nullptr; diff --git a/ge/hybrid/node_executor/aicpu/aicpu_ext_info.cc b/ge/hybrid/node_executor/aicpu/aicpu_ext_info.cc index c607a43e..6e8841b9 100644 --- a/ge/hybrid/node_executor/aicpu/aicpu_ext_info.cc +++ b/ge/hybrid/node_executor/aicpu/aicpu_ext_info.cc @@ -81,6 +81,9 @@ Status AicpuExtInfoHandler::Parse(const std::string &ext_info) { case aicpu::FWKAdapter::FWK_ADPT_EXT_TOPIC_TYPE: GE_CHK_STATUS_RET(ParseExtTopicType(aicpu_ext_info), "[Parse][ExtTopicType] failed."); break; + case aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT: + GE_CHK_STATUS_RET(ParseExtAsyncWait(aicpu_ext_info), "[Parse][ExtAsyncWait] failed."); + break; default: GELOGD("Node[%s] ignore infoType=%d, infoLen=%u.", node_name_.c_str(), aicpu_ext_info->infoType, aicpu_ext_info->infoLen); @@ -101,6 +104,22 @@ Status AicpuExtInfoHandler::Parse(const std::string &ext_info) { return SUCCESS; } +Status AicpuExtInfoHandler::ParseExtAsyncWait(AicpuExtInfo *aicpu_ext_info) { + if (aicpu_ext_info->infoLen != sizeof(AsyncWaitInfo)) { + REPORT_INNER_ERROR("E19999", + "Node[%s] parse ext async wait info failed as infoLen must be %zu but %u.", + node_name_.c_str(), sizeof(AsyncWaitInfo), aicpu_ext_info->infoLen); + GELOGE(ACL_ERROR_GE_PARAM_INVALID, + "[Check][DataLen]Node[%s] parse ext async wait info failed as infoLen must be %zu but %u.", + node_name_.c_str(), sizeof(AsyncWaitInfo), aicpu_ext_info->infoLen); + return ACL_ERROR_GE_PARAM_INVALID; + } + + async_wait_ = reinterpret_cast(aicpu_ext_info->infoMsg); + GELOGI("Node[%s] parse async wait info success infoLen=%u.", node_name_.c_str(), aicpu_ext_info->infoLen); + return SUCCESS; +} + Status AicpuExtInfoHandler::ParseExtShapeType(AicpuExtInfo *aicpu_ext_info) { GE_IF_BOOL_EXEC(aicpu_ext_info->infoLen != sizeof(int32_t), REPORT_INNER_ERROR("E19999", "Node[%s] parse ext shape type failed as infoLen must be %zu but %u.", @@ -280,6 +299,17 @@ Status AicpuExtInfoHandler::UpdateSessionInfo(uint64_t session_id, uint64_t kern return SUCCESS; } +Status AicpuExtInfoHandler::UpdateEventId(uint32_t event_id) { + if (async_wait_ == nullptr) { + REPORT_INNER_ERROR("E19999", "async_wait_ is nullptr."); + GELOGE(FAILED, "[Check][async_wait_] async_wait_ is nullptr."); + return FAILED; + } + async_wait_->waitType = 1; + async_wait_->waitId = event_id; + return SUCCESS; +} + Status AicpuExtInfoHandler::UpdateSessionInfoSessionId(uint64_t session_id) { if (session_info_ == nullptr) { GELOGD("There is no session info in ext_info, no need update."); diff --git a/ge/hybrid/node_executor/aicpu/aicpu_ext_info.h b/ge/hybrid/node_executor/aicpu/aicpu_ext_info.h index 46fb7c05..80e3bb92 100644 --- a/ge/hybrid/node_executor/aicpu/aicpu_ext_info.h +++ b/ge/hybrid/node_executor/aicpu/aicpu_ext_info.h @@ -27,6 +27,7 @@ namespace ge { namespace hybrid { using AicpuShapeAndType = aicpu::FWKAdapter::ShapeAndType; using AicpuExtInfo = aicpu::FWKAdapter::ExtInfo; +using AsyncWaitInfo = aicpu::FWKAdapter::AsyncWait; using AicpuSessionInfo = SessionInfo; class AicpuExtInfoHandler { @@ -59,6 +60,8 @@ class AicpuExtInfoHandler { Status UpdateExecuteMode(bool flag); + Status UpdateEventId(uint32_t event_id); + Status GetOutputShapeAndType(uint32_t output_index, GeShape &shape, DataType &data_type); bool IsNeedRefreshIOAddr(); @@ -73,6 +76,7 @@ class AicpuExtInfoHandler { Status ParseExtBitMap(AicpuExtInfo *aicpu_ext_info); Status ParseExtUpdateAddr(AicpuExtInfo *aicpu_ext_info); Status ParseExtTopicType(AicpuExtInfo *aicpu_ext_info); + Status ParseExtAsyncWait(AicpuExtInfo *aicpu_ext_info); static Status UpdateShapeAndType(const GeShape &shape, DataType data_type, @@ -90,6 +94,7 @@ class AicpuExtInfoHandler { const uint32_t output_num_; UnknowShapeOpType unknown_type_; AicpuSessionInfo *session_info_ = nullptr; + AsyncWaitInfo *async_wait_ = nullptr; uint64_t *bit_map_ = nullptr; uint32_t *update_addr_ = nullptr; int32_t topic_type_flag_ = -1; diff --git a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc index cf20303c..f309ebd0 100755 --- a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc +++ b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc @@ -22,6 +22,7 @@ #include "graph/utils/node_utils.h" #include "hybrid/executor/hybrid_execution_context.h" #include "hybrid/model/hybrid_model.h" +#include "runtime/rt.h" namespace ge { namespace hybrid { @@ -33,6 +34,12 @@ const char *const kAicpuAllshape = "_AllShape"; REGISTER_NODE_EXECUTOR_BUILDER(NodeExecutorManager::ExecutorType::AICPU_TF, AiCpuNodeExecutor); REGISTER_NODE_EXECUTOR_BUILDER(NodeExecutorManager::ExecutorType::AICPU_CUSTOM, AiCpuNodeExecutor); +AicpuNodeTaskBase::~AicpuNodeTaskBase() { + if (rt_event_ != nullptr) { + (void)rtEventDestroy(rt_event_); + } +} + Status AicpuNodeTaskBase::AllocTensorBuffer(size_t size, std::unique_ptr &tensor_buffer) { auto allocator = NpuMemoryAllocator::GetAllocator(); GE_CHECK_NOTNULL(allocator); @@ -64,6 +71,13 @@ Status AicpuNodeTaskBase::InitExtInfo(const std::string &kernel_ext_info, int64_ GE_CHK_STATUS_RET(aicpu_ext_handle_.UpdateSessionInfoSessionId(session_id), "[Update][SessionInfoSessionId] failed, session_id:%ld.", session_id); + if (is_blocking_aicpu_op_) { + if (UpdateEventIdForBlockingAicpuOp() != SUCCESS) { + GELOGE(FAILED, "[Call][UpdateEventIdForBlockingAicpuOp] Call UpdateEventIdForBlockingAicpuOp failed"); + return FAILED; + } + } + // copy task args buf GE_CHK_STATUS_RET(AllocTensorBuffer(aicpu_ext_handle_.GetExtInfoLen(), ext_info_addr_dev_), "[Invoke][AllocTensorBuffer]Node[%s] alloc kernel_ext_info buf failed, size=%zu", @@ -230,6 +244,96 @@ Status AicpuNodeTaskBase::ExecuteAsync(TaskContext &context, std::functionnum_outputs == 0)) { GELOGD("Node[%s] type[%s] unknown_type is %d, output num is %d.", @@ -325,6 +429,9 @@ Status AicpuTfNodeTask::Init(const HybridModel &model) { // init ext info uint64_t ext_session_id = model.GetSessionId(); + const OpDescPtr op_desc = node_item_->GetOpDesc(); + AttrUtils::GetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, is_blocking_aicpu_op_); + GELOGD("Get op:%s attribute(is_blocking_op), value:%d", op_desc->GetName().c_str(), is_blocking_aicpu_op_); GE_CHK_STATUS_RET(InitExtInfo(kernel_ext_info, ext_session_id), "[Init][ExtInfo] failed for Node[%s].", node_name_.c_str()); GE_CHK_STATUS_RET(InitForDependComputeTask(), "[Init][DependComputeTask] failed for Node[%s].", node_name_.c_str()); @@ -642,6 +749,12 @@ Status AicpuTfNodeTask::LaunchTask(TaskContext &context) { kernel_buf_->GetSize(), flag, context.GetStream())); RECORD_EXECUTION_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[AicpuTfNodertKernelLaunchEx] End"); GELOGD("Node[%s] launch end.", node_name_.c_str()); + if (is_blocking_aicpu_op_) { + if (DistributeWaitTaskForAicpuBlockingOp(context.GetStream()) != SUCCESS) { + GELOGE(FAILED, "[Call][DistributeWaitTaskForAicpuBlockingOp] Call DistributeWaitTaskForAicpuBlockingOp failed"); + return FAILED; + } + } if (need_sync_) { GELOGD("[%s] Task needs sync", node_name_.c_str()); GE_CHK_STATUS_RET_NOLOG(context.Synchronize()); @@ -760,6 +873,8 @@ Status AicpuNodeTask::Init(const HybridModel &model) { return FAILED;); uint64_t ext_session_id = model.GetSessionId(); + AttrUtils::GetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, is_blocking_aicpu_op_); + GELOGD("Get op:%s attribute(is_blocking_op), value:%d", op_desc->GetName().c_str(), is_blocking_aicpu_op_); GE_CHK_STATUS_RET(InitExtInfo(kernel_ext_info, ext_session_id), "[Init][ExtInfo] failed for Node[%s].", node_name.c_str()); @@ -826,6 +941,12 @@ Status AicpuNodeTask::LaunchTask(TaskContext &context) { args_.get(), args_size_, nullptr, context.GetStream(), flag); GE_CHK_RT_RET(rt_ret); + if (is_blocking_aicpu_op_) { + if (DistributeWaitTaskForAicpuBlockingOp(context.GetStream()) != SUCCESS) { + GELOGE(FAILED, "[Call][DistributeWaitTaskForAicpuBlockingOp] Call DistributeWaitTaskForAicpuBlockingOp failed"); + return FAILED; + } + } GELOGD("Node[%s] launch task end.", node_name_.c_str()); return SUCCESS; } diff --git a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.h b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.h index 14bc8fcc..3911e090 100644 --- a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.h +++ b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.h @@ -35,7 +35,7 @@ class AicpuNodeTaskBase : public NodeTask { node_item->num_outputs, node_item->shape_inference_type) {} - ~AicpuNodeTaskBase() override = default; + ~AicpuNodeTaskBase() override; using NodeTask::Init; @@ -61,6 +61,10 @@ class AicpuNodeTaskBase : public NodeTask { static Status AllocTensorBuffer(size_t size, std::unique_ptr &tensor_buffer); + Status DistributeWaitTaskForAicpuBlockingOp(rtStream_t stream); + Status CheckDeviceSupportBlockingAicpuOpProcess(bool &is_support); + Status UpdateEventIdForBlockingAicpuOp(); + protected: const NodeItem *node_item_; // just reference. @@ -78,6 +82,10 @@ class AicpuNodeTaskBase : public NodeTask { // ext info addr, device mem std::unique_ptr ext_info_addr_dev_; + + // for blocking aicpu op + bool is_blocking_aicpu_op_ = false; + rtEvent_t rt_event_ = nullptr; }; class AicpuTfNodeTask : public AicpuNodeTaskBase { diff --git a/ge/single_op/task/op_task.cc b/ge/single_op/task/op_task.cc index dbc90ac5..83cb0529 100755 --- a/ge/single_op/task/op_task.cc +++ b/ge/single_op/task/op_task.cc @@ -564,6 +564,41 @@ AiCpuBaseTask::~AiCpuBaseTask() { if (ext_info_addr_dev_ != nullptr) { (void)rtFree(ext_info_addr_dev_); } + if (rt_event_ != nullptr) { + (void)rtEventDestroy(rt_event_); + } +} + +Status AiCpuBaseTask::UpdateEventIdForBlockingAicpuOp() { + bool is_support = false; + if (CheckDeviceSupportBlockingAicpuOpProcess(is_support) != SUCCESS) { + GELOGE(FAILED, "[Call][CheckDeviceSupportBlockingAicpuOpProcess] Call CheckDeviceSupportBlockingAicpuOpProcess failed"); + return FAILED; + } + if (!is_support) { + GELOGD("Device not support blocking aicpu op process"); + return SUCCESS; + } + uint32_t event_id = 0; + auto rt_ret = rtEventCreateWithFlag(&rt_event_, RT_EVENT_WITH_FLAG); + if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtEventCreateWithFlag failed, ret:0x%X", rt_ret); + GELOGE(RT_FAILED, "[Call][rtEventCreateWithFlag] failed, ret:0x%X", rt_ret); + return RT_ERROR_TO_GE_STATUS(rt_ret); + } + rt_ret = rtGetEventID(rt_event_, &event_id); + if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtGetEventID failed, ret:0x%X", rt_ret); + GELOGE(RT_FAILED, "[Call][rtGetEventID] failed, ret:0x%X", rt_ret); + return RT_ERROR_TO_GE_STATUS(rt_ret); + } + if (aicpu_ext_handle_->UpdateEventId(event_id) != SUCCESS) { + REPORT_CALL_ERROR("E19999", "Update event id=%u failed.", event_id); + GELOGE(FAILED, "[Update][EventId] Update event id failed", event_id); + return FAILED; + } + GELOGI("Update event_id=%u success", event_id); + return SUCCESS; } Status AiCpuBaseTask::SetExtInfoAndType(const std::string &kernel_ext_info, uint64_t kernel_id) { @@ -577,6 +612,9 @@ Status AiCpuBaseTask::SetExtInfoAndType(const std::string &kernel_ext_info, uint GELOGD("Get unknown_type is %d.", unknown_shape_type_val); unknown_type_ = static_cast(unknown_shape_type_val); + AttrUtils::GetBool(op_desc_, ATTR_NAME_IS_BLOCKING_OP, is_blocking_aicpu_op_); + GELOGD("Get op:%s attribute(is_blocking_op), value:%d", op_desc_->GetName().c_str(), is_blocking_aicpu_op_); + aicpu_ext_handle_.reset(new(std::nothrow) ::ge::hybrid::AicpuExtInfoHandler(op_desc_->GetName(), num_inputs_, num_outputs_, @@ -595,6 +633,13 @@ Status AiCpuBaseTask::SetExtInfoAndType(const std::string &kernel_ext_info, uint GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateSessionInfo(ULLONG_MAX, kernel_id, false), "[Update][SessionInfo] failed."); + if (is_blocking_aicpu_op_) { + if (UpdateEventIdForBlockingAicpuOp() != SUCCESS) { + GELOGE(FAILED, "[Call][UpdateEventIdForBlockingAicpuOp] Call UpdateEventIdForBlockingAicpuOp failed"); + return FAILED; + } + } + GE_CHK_RT_RET(rtMalloc(&ext_info_addr_dev_, aicpu_ext_handle_->GetExtInfoLen(), RT_MEMORY_HBM)); GE_CHK_RT_RET(rtMemcpy(ext_info_addr_dev_, aicpu_ext_handle_->GetExtInfoLen(), aicpu_ext_handle_->GetExtInfo(), aicpu_ext_handle_->GetExtInfoLen(), @@ -770,6 +815,63 @@ Status AiCpuBaseTask::UpdateIoAddr(const vector &inputs, const vecto return SUCCESS; } +Status AiCpuBaseTask::CheckDeviceSupportBlockingAicpuOpProcess(bool &is_support) { + int32_t device_id = 0; + auto rt_ret = rtGetDevice(&device_id); + if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtGetDevice failed, ret:0x%X", rt_ret); + GELOGE(RT_FAILED, "[Call][rtGetDevice] failed, ret:0x%X", rt_ret); + return RT_ERROR_TO_GE_STATUS(rt_ret); + } + int32_t value = 0; + rt_ret = rtGetDeviceCapability(device_id, FEATURE_TYPE_BLOCKING_OPERATOR, RT_MODULE_TYPE_AICPU, &value); + if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtGetDeviceCapability failed, ret:0x%X", rt_ret); + GELOGE(RT_FAILED, "[Call][rtGetDeviceCapability] failed, ret:0x%X", rt_ret); + return RT_ERROR_TO_GE_STATUS(rt_ret); + } + if (value != RT_AICPU_BLOCKING_OP_NOT_SUPPORT && value != RT_AICPU_BLOCKING_OP_SUPPORT) { + REPORT_INNER_ERROR("E19999", "Value should be %d or %d but %d", + RT_AICPU_BLOCKING_OP_NOT_SUPPORT, RT_AICPU_BLOCKING_OP_SUPPORT, value); + GELOGE(FAILED, "[Check][Value] Value should be %d or %d but %d", + RT_AICPU_BLOCKING_OP_NOT_SUPPORT, RT_AICPU_BLOCKING_OP_SUPPORT, value); + return FAILED; + } + is_support = (value == RT_AICPU_BLOCKING_OP_SUPPORT ? true : false); + return SUCCESS; +} + +Status AiCpuBaseTask::DistributeWaitTaskForAicpuBlockingOp(rtStream_t stream) { + bool is_support = false; + if (CheckDeviceSupportBlockingAicpuOpProcess(is_support) != SUCCESS) { + GELOGE(FAILED, "[Call][CheckDeviceSupportBlockingAicpuOpProcess] Call CheckDeviceSupportBlockingAicpuOpProcess failed"); + return FAILED; + } + if (!is_support) { + GELOGD("Device not support blocking aicpu op process."); + return SUCCESS; + } + GELOGI("Distribute queue task begin"); + if (rt_event_ == nullptr) { + REPORT_INNER_ERROR("E19999", "rt_event_ is nullptr"); + GELOGE(FAILED, "[Check][rt_event_] rt_event_ is nullptr"); + return FAILED; + } + auto rt_ret = rtStreamWaitEvent(stream, rt_event_); + if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtStreamWaitEvent failed, ret:0x%X", rt_ret); + GELOGE(RT_FAILED, "[Call][RtApi] failed, ret:0x%X", rt_ret); + return RT_ERROR_TO_GE_STATUS(rt_ret); + } + rt_ret = rtEventReset(rt_event_, stream); + if (rt_ret != RT_ERROR_NONE) { + REPORT_CALL_ERROR("E19999", "Call rtEventReset failed, ret:0x%X", rt_ret); + GELOGE(RT_FAILED, "[Call][RtApi] failed, ret:0x%X", rt_ret); + return RT_ERROR_TO_GE_STATUS(rt_ret); + } + return SUCCESS; +} + AiCpuTask::~AiCpuTask() { FreeHbm(args_); FreeHbm(io_addr_); @@ -813,6 +915,14 @@ Status AiCpuTask::LaunchKernel(rtStream_t stream) { GELOGI("[TASK_INFO] %lu/%s", kernel_id_, op_type_.c_str()); GELOGD("Done launch kernel successfully. task = %s", this->op_type_.c_str()); + + if (is_blocking_aicpu_op_) { + if (DistributeWaitTaskForAicpuBlockingOp(stream) != SUCCESS) { + GELOGE(FAILED, "[Call][DistributeWaitTaskForAicpuBlockingOp] Call DistributeWaitTaskForAicpuBlockingOp failed"); + return FAILED; + } + } + return SUCCESS; } @@ -1089,6 +1199,13 @@ Status AiCpuCCTask::LaunchKernel(rtStream_t stream) { } GELOGI("[TASK_INFO] %lu/%s", kernel_id_, op_type_.c_str()); GELOGD("Invoke rtCpuKernelLaunch succeeded"); + + if (is_blocking_aicpu_op_) { + if (DistributeWaitTaskForAicpuBlockingOp(stream) != SUCCESS) { + GELOGE(FAILED, "[Call][DistributeWaitTaskForAicpuBlockingOp] Call DistributeWaitTaskForAicpuBlockingOp failed"); + return FAILED; + } + } return SUCCESS; } diff --git a/ge/single_op/task/op_task.h b/ge/single_op/task/op_task.h index 132672b0..adf51dba 100644 --- a/ge/single_op/task/op_task.h +++ b/ge/single_op/task/op_task.h @@ -178,6 +178,10 @@ class AiCpuBaseTask : public OpTask { rtStream_t stream); Status UpdateOutputShape(vector &output_desc); Status UpdateShapeToOutputDesc(const GeShape &shape_new, GeTensorDesc &output_desc); + // for blocking aicpu op + Status DistributeWaitTaskForAicpuBlockingOp(rtStream_t stream); + Status UpdateEventIdForBlockingAicpuOp(); + Status CheckDeviceSupportBlockingAicpuOpProcess(bool &is_support); protected: size_t num_inputs_ = 0; @@ -186,6 +190,9 @@ class AiCpuBaseTask : public OpTask { std::unique_ptr aicpu_ext_handle_; void *ext_info_addr_dev_ = nullptr; vector input_is_const_; + // for blocking aicpu op + bool is_blocking_aicpu_op_ = false; + rtEvent_t rt_event_ = nullptr; }; class AiCpuTask : public AiCpuBaseTask { diff --git a/tests/depends/runtime/src/runtime_stub.cc b/tests/depends/runtime/src/runtime_stub.cc index 25d6c2d3..32df7552 100644 --- a/tests/depends/runtime/src/runtime_stub.cc +++ b/tests/depends/runtime/src/runtime_stub.cc @@ -16,12 +16,94 @@ #include #include +#include "runtime_stub.h" +#include "runtime/rt.h" + +#define ADD_STUB_RETURN_VALUE(FUNC, TYPE) std::vector g_Stub_##FUNC##_RETURN + +#define GET_STUB_RETURN_VALUE(FUNC, TYPE, DEFAULT) ({ \ + TYPE result = DEFAULT; \ + if (!g_Stub_##FUNC##_RETURN.empty()) { \ + result = g_Stub_##FUNC##_RETURN.back(); \ + g_Stub_##FUNC##_RETURN.pop_back(); \ + } \ + result; \ +}) + +#define DEL_STUB_RETURN_VALUE(FUNC, TYPE) \ +do { \ + extern std::vector g_Stub_##FUNC##_RETURN; \ + g_Stub_##FUNC##_RETURN.clear(); \ +} while (0) + + +#define ADD_STUB_OUTBOUND_VALUE(FUNC, TYPE, NAME) std::vector g_Stub_##FUNC##_OUT_##NAME + +#define GET_STUB_OUTBOUND_VALUE(FUNC, TYPE, NAME, DEFAULT) ({ \ + TYPE value; \ + if (!g_Stub_##FUNC##_OUT_##NAME.empty()) { \ + value = g_Stub_##FUNC##_OUT_##NAME.back(); \ + g_Stub_##FUNC##_OUT_##NAME.pop_back(); \ + } else { \ + value = DEFAULT; \ + } \ + value; \ +}) + +#define DEL_STUB_OUTBOUND_VALUE(FUNC, TYPE, NAME) \ +do { \ + extern std::vector g_Stub_##FUNC##_OUT_##NAME; \ + g_Stub_##FUNC##_OUT_##NAME.clear(); \ +} while (0) #ifdef __cplusplus extern "C" { #endif #define EVENT_LENTH 10 +void rtStubTearDown() { + DEL_STUB_RETURN_VALUE(rtGetDevice, rtError_t); + DEL_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t); + DEL_STUB_RETURN_VALUE(rtStreamWaitEvent, rtError_t); + DEL_STUB_RETURN_VALUE(rtEventReset, rtError_t); + DEL_STUB_RETURN_VALUE(rtEventCreate, rtError_t); + DEL_STUB_RETURN_VALUE(rtGetEventID, rtError_t); +} + +ADD_STUB_RETURN_VALUE(rtGetDevice, rtError_t); +rtError_t rtGetDevice(int32_t *device) { + return GET_STUB_RETURN_VALUE(rtGetDevice, rtError_t, RT_ERROR_NONE); +} + +ADD_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t); +ADD_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value); +rtError_t rtGetDeviceCapability(int32_t device, int32_t moduleType, int32_t featureType, int32_t *value) { + *value = GET_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_SUPPORT); + return GET_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE); +} + +ADD_STUB_RETURN_VALUE(rtStreamWaitEvent, rtError_t); +rtError_t rtStreamWaitEvent(rtStream_t stream, rtEvent_t event) { + return GET_STUB_RETURN_VALUE(rtStreamWaitEvent, rtError_t, RT_ERROR_NONE); +} + +ADD_STUB_RETURN_VALUE(rtEventReset, rtError_t); +rtError_t rtEventReset(rtEvent_t event, rtStream_t stream) { + return GET_STUB_RETURN_VALUE(rtEventReset, rtError_t, RT_ERROR_NONE); +} + +ADD_STUB_RETURN_VALUE(rtEventCreate, rtError_t); +rtError_t rtEventCreate(rtEvent_t *event) { + *event = new int[EVENT_LENTH]; + return GET_STUB_RETURN_VALUE(rtEventCreate, rtError_t, RT_ERROR_NONE); +} + +ADD_STUB_RETURN_VALUE(rtGetEventID, rtError_t); +rtError_t rtGetEventID(rtEvent_t event, uint32_t *event_id) { + *event_id = 0; + return GET_STUB_RETURN_VALUE(rtEventCreate, rtError_t, RT_ERROR_NONE); +} + rtError_t rtCtxSetCurrent(rtContext_t ctx) { return RT_ERROR_NONE; } rtError_t rtGetStreamId(rtStream_t stream, int32_t *stream_id) { @@ -42,11 +124,6 @@ rtError_t rtEventGetTimeStamp(uint64_t *time, rtEvent_t event) { return RT_ERROR_NONE; } -rtError_t rtEventCreate(rtEvent_t *event) { - *event = new int[EVENT_LENTH]; - return RT_ERROR_NONE; -} - rtError_t rtEventCreateWithFlag(rtEvent_t *event, uint32_t flag) { return rtEventCreate(event); } @@ -112,8 +189,6 @@ rtError_t rtMemcpyAsync(void *dst, uint64_t dest_max, const void *src, uint64_t return RT_ERROR_NONE; } -rtError_t rtStreamWaitEvent(rtStream_t stream, rtEvent_t event) { return RT_ERROR_NONE; } - rtError_t rtSetTSDevice(uint32_t tsId) { return RT_ERROR_NONE; } @@ -347,10 +422,6 @@ rtError_t rtStreamSwitchEx(void *ptr, rtCondition_t condition, void *value_ptr, rtError_t rtStreamActive(rtStream_t active_stream, rtStream_t stream) { return RT_ERROR_NONE; } -rtError_t rtEventReset(rtEvent_t event, rtStream_t stream) { return RT_ERROR_NONE; } - -rtError_t rtGetDevice(int32_t *device) { return RT_ERROR_NONE; } - rtError_t rtDatadumpInfoLoad(const void *dump_info, uint32_t length) { return RT_ERROR_NONE; } rtError_t rtKernelLaunchWithFlag(const void *stub_func, uint32_t block_dim, void *args, uint32_t args_size, diff --git a/tests/depends/runtime/src/runtime_stub.h b/tests/depends/runtime/src/runtime_stub.h new file mode 100644 index 00000000..b693b9ea --- /dev/null +++ b/tests/depends/runtime/src/runtime_stub.h @@ -0,0 +1,70 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __INC_LLT_RUNTIME_STUB_H +#define __INC_LLT_RUNTIME_STUB_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif +void rtStubTearDown(); + +#define RTS_STUB_SETUP() \ +do { \ + rtStubTearDown(); \ +} while (0) + +#define RTS_STUB_TEARDOWN() \ +do { \ + rtStubTearDown(); \ +} while (0) + +#define RTS_STUB_RETURN_VALUE(FUNC, TYPE, VALUE) \ +do { \ + g_Stub_##FUNC##_RETURN.emplace(g_Stub_##FUNC##_RETURN.begin(), VALUE); \ +} while (0) + +#define RTS_STUB_OUTBOUND_VALUE(FUNC, TYPE, NAME, VALUE) \ +do { \ + g_Stub_##FUNC##_OUT_##NAME.emplace(g_Stub_##FUNC##_OUT_##NAME.begin(), VALUE); \ +} while (0) + + +#define RTS_STUB_RETURN_EXTERN(FUNC, TYPE) extern std::vector g_Stub_##FUNC##_RETURN; +#define RTS_STUB_OUTBOUND_EXTERN(FUNC, TYPE, NAME) extern std::vector g_Stub_##FUNC##_OUT_##NAME; + +RTS_STUB_RETURN_EXTERN(rtGetDevice, rtError_t); +RTS_STUB_OUTBOUND_EXTERN(rtGetDevice, int32_t, device) + +RTS_STUB_RETURN_EXTERN(rtGetDeviceCapability, rtError_t); +RTS_STUB_OUTBOUND_EXTERN(rtGetDeviceCapability, int32_t, value); + +RTS_STUB_RETURN_EXTERN(rtStreamWaitEvent, rtError_t); + +RTS_STUB_RETURN_EXTERN(rtEventReset, rtError_t); + +RTS_STUB_RETURN_EXTERN(rtEventCreate, rtError_t); +RTS_STUB_OUTBOUND_EXTERN(rtEventCreate, rtEvent_t, event); + +RTS_STUB_RETURN_EXTERN(rtGetEventID, rtError_t); +RTS_STUB_OUTBOUND_EXTERN(rtEventCreate, uint32_t, event_id); + +#ifdef __cplusplus +} +#endif +#endif // __INC_LLT_RUNTIME_STUB_H diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index a0790cf2..f9d9e857 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -935,6 +935,7 @@ target_link_libraries(ge_single_op PRIVATE ascend_protobuf json c_sec + runtime_stub ) # ut binary diff --git a/tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc b/tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc index 327dd248..86569789 100644 --- a/tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc +++ b/tests/ut/ge/graph/load/kernel_ex_task_info_unittest.cc @@ -23,15 +23,20 @@ #include "graph/load/model_manager/task_info/kernel_ex_task_info.h" #include "cce/aicpu_engine_struct.h" +#include "tests/depends/runtime/src/runtime_stub.h" namespace ge { extern OpDescPtr CreateOpDesc(string name, string type); class UtestKernelExTaskInfo : public testing::Test { protected: - void SetUp() {} + void SetUp() { + RTS_STUB_SETUP(); + } - void TearDown() {} + void TearDown() { + RTS_STUB_TEARDOWN(); + } }; // test kernel_ex_task_Release @@ -209,4 +214,136 @@ TEST_F(UtestKernelExTaskInfo, parse_topic_type_failed_2) { KernelExTaskInfo kernel_ex_task_info; EXPECT_NE(kernel_ex_task_info.InitTaskExtInfo(ext_info, op_desc), SUCCESS); } + +TEST_F(UtestKernelExTaskInfo, blocking_aicpu_op) { + int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo); + vector aicpu_ext_info(len, 0); + char *buf = aicpu_ext_info.data(); + int offset = 0; + hybrid::AicpuExtInfo *ext_info = reinterpret_cast(buf + offset); + ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT; + ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo); + offset += sizeof(hybrid::AicpuExtInfo); + hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast(buf + offset); + async_wait_info->waitType = 0; + async_wait_info->waitId = 0; + async_wait_info->timeOut = 0; + async_wait_info->reserved = 0; + + domi::TaskDef task_def; + domi::KernelExDef kernel_ex_def; + kernel_ex_def.set_kernel_ext_info(buf, len); + kernel_ex_def.set_kernel_ext_info_size(len); + domi::KernelExDef *kernel_ex_def_tmp = task_def.mutable_kernel_ex(); + *kernel_ex_def_tmp = kernel_ex_def; + + const OpDescPtr op_desc = CreateOpDesc("deque", "Deque"); + ge::AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, true); + + KernelExTaskInfo kernel_ex_task_info; + kernel_ex_task_info.op_desc_ = op_desc; + DavinciModel davinci_model(0, nullptr); + kernel_ex_task_info.davinci_model_ = &davinci_model; + EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(kernel_ex_def.kernel_ext_info(), op_desc), SUCCESS); + EXPECT_EQ(kernel_ex_task_info.Distribute(), SUCCESS); + kernel_ex_task_info.op_desc_ = op_desc; + EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(kernel_ex_def.kernel_ext_info(), op_desc), SUCCESS); + EXPECT_EQ(kernel_ex_task_info.Distribute(), SUCCESS); +} + +TEST_F(UtestKernelExTaskInfo, blocking_aicpu_op_fail_01) { + int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo); + vector aicpu_ext_info(len, 0); + char *buf = aicpu_ext_info.data(); + int offset = 0; + hybrid::AicpuExtInfo *ext_info = reinterpret_cast(buf + offset); + ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT; + ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo); + offset += sizeof(hybrid::AicpuExtInfo); + hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast(buf + offset); + async_wait_info->waitType = 0; + async_wait_info->waitId = 0; + async_wait_info->timeOut = 0; + async_wait_info->reserved = 0; + + domi::TaskDef task_def; + domi::KernelExDef kernel_ex_def; + kernel_ex_def.set_kernel_ext_info(buf, len); + kernel_ex_def.set_kernel_ext_info_size(len); + domi::KernelExDef *kernel_ex_def_tmp = task_def.mutable_kernel_ex(); + *kernel_ex_def_tmp = kernel_ex_def; + + const OpDescPtr op_desc = CreateOpDesc("deque", "Deque"); + + KernelExTaskInfo kernel_ex_task_info; + kernel_ex_task_info.op_desc_ = op_desc; + DavinciModel davinci_model(0, nullptr); + kernel_ex_task_info.davinci_model_ = &davinci_model; + EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(kernel_ex_def.kernel_ext_info(), op_desc), SUCCESS); + + kernel_ex_task_info.is_blocking_aicpu_op_ = true; + EXPECT_EQ(kernel_ex_task_info.Distribute(), FAILED); +} + +TEST_F(UtestKernelExTaskInfo, blocking_aicpu_op_fail_02) { + int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo); + vector aicpu_ext_info(len, 0); + char *buf = aicpu_ext_info.data(); + int offset = 0; + hybrid::AicpuExtInfo *ext_info = reinterpret_cast(buf + offset); + ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT; + ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo); + offset += sizeof(hybrid::AicpuExtInfo); + hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast(buf + offset); + async_wait_info->waitType = 0; + async_wait_info->waitId = 0; + async_wait_info->timeOut = 0; + async_wait_info->reserved = 0; + + domi::TaskDef task_def; + domi::KernelExDef kernel_ex_def; + kernel_ex_def.set_kernel_ext_info(buf, len); + kernel_ex_def.set_kernel_ext_info_size(len); + domi::KernelExDef *kernel_ex_def_tmp = task_def.mutable_kernel_ex(); + *kernel_ex_def_tmp = kernel_ex_def; + + const OpDescPtr op_desc = CreateOpDesc("deque", "Deque"); + ge::AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, true); + KernelExTaskInfo kernel_ex_task_info; + kernel_ex_task_info.op_desc_ = op_desc; + DavinciModel davinci_model(0, nullptr); + kernel_ex_task_info.davinci_model_ = &davinci_model; + + RTS_STUB_RETURN_VALUE(rtGetDevice, rtError_t, 0x78000001); + EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(kernel_ex_def.kernel_ext_info(), op_desc), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001); + EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(kernel_ex_def.kernel_ext_info(), op_desc), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001); + EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(kernel_ex_def.kernel_ext_info(), op_desc), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE); + RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_SUPPORT + 1); + EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(kernel_ex_def.kernel_ext_info(), op_desc), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDevice, rtError_t, 0x78000001); + EXPECT_EQ(kernel_ex_task_info.Distribute(), FAILED); + + EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(kernel_ex_def.kernel_ext_info(), op_desc), SUCCESS); + RTS_STUB_RETURN_VALUE(rtStreamWaitEvent, rtError_t, 0x78000001); + EXPECT_EQ(kernel_ex_task_info.Distribute(), FAILED); + + EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(kernel_ex_def.kernel_ext_info(), op_desc), SUCCESS); + RTS_STUB_RETURN_VALUE(rtEventReset, rtError_t, 0x78000001); + EXPECT_EQ(kernel_ex_task_info.Distribute(), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE); + RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT); + EXPECT_EQ(kernel_ex_task_info.InitTaskExtInfo(kernel_ex_def.kernel_ext_info(), op_desc), SUCCESS); + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE); + RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT); + EXPECT_EQ(kernel_ex_task_info.Distribute(), SUCCESS); +} + } // namespace ge diff --git a/tests/ut/ge/graph/load/kernel_task_info_unittest.cc b/tests/ut/ge/graph/load/kernel_task_info_unittest.cc index 0c8da4b5..45ae7853 100644 --- a/tests/ut/ge/graph/load/kernel_task_info_unittest.cc +++ b/tests/ut/ge/graph/load/kernel_task_info_unittest.cc @@ -22,15 +22,20 @@ #include "graph/load/model_manager/davinci_model.h" #include "graph/load/model_manager/task_info/kernel_task_info.h" #include "graph/load/model_manager/task_info/hccl_task_info.h" +#include "tests/depends/runtime/src/runtime_stub.h" namespace ge { extern OpDescPtr CreateOpDesc(string name, string type); class UtestKernelTaskInfo : public testing::Test { protected: - void SetUp() {} + void SetUp() { + RTS_STUB_SETUP(); + } - void TearDown() {} + void TearDown() { + RTS_STUB_TEARDOWN(); + } }; // test KernelTaskInfo Init. @@ -1240,4 +1245,135 @@ TEST_F(UtestKernelTaskInfo, kernel_task_info_super_kernel_info) { EXPECT_EQ(kernel_task_info.SKTFinalize(), SUCCESS); } +TEST_F(UtestKernelTaskInfo, blocking_aicpu_op) { + int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo); + vector aicpu_ext_info(len, 0); + char *buf = aicpu_ext_info.data(); + int offset = 0; + hybrid::AicpuExtInfo *ext_info = reinterpret_cast(buf + offset); + ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT; + ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo); + offset += sizeof(hybrid::AicpuExtInfo); + hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast(buf + offset); + async_wait_info->waitType = 0; + async_wait_info->waitId = 0; + async_wait_info->timeOut = 0; + async_wait_info->reserved = 0; + + domi::TaskDef task_def; + domi::KernelDef kernel_def; + kernel_def.set_kernel_ext_info(buf, len); + kernel_def.set_kernel_ext_info_size(len); + + const OpDescPtr op_desc = CreateOpDesc("deque", "Deque"); + op_desc->SetId(0); + ge::AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, true); + DavinciModel davinci_model(0, nullptr); + davinci_model.op_list_.emplace(0, op_desc); + + KernelTaskInfo kernel_task_info; + kernel_task_info.op_desc_ = op_desc; + kernel_task_info.davinci_model_ = &davinci_model; + EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(kernel_def.kernel_ext_info()), SUCCESS); + EXPECT_EQ(kernel_task_info.Distribute(), SUCCESS); + kernel_task_info.op_desc_ = op_desc; + EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(kernel_def.kernel_ext_info()), SUCCESS); + EXPECT_EQ(kernel_task_info.Distribute(), SUCCESS); +} + +TEST_F(UtestKernelTaskInfo, blocking_aicpu_op_fail_01) { + int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo); + vector aicpu_ext_info(len, 0); + char *buf = aicpu_ext_info.data(); + int offset = 0; + hybrid::AicpuExtInfo *ext_info = reinterpret_cast(buf + offset); + ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT; + ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo); + offset += sizeof(hybrid::AicpuExtInfo); + hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast(buf + offset); + async_wait_info->waitType = 0; + async_wait_info->waitId = 0; + async_wait_info->timeOut = 0; + async_wait_info->reserved = 0; + + domi::KernelDef kernel_def; + kernel_def.set_kernel_ext_info(buf, len); + kernel_def.set_kernel_ext_info_size(len); + + const OpDescPtr op_desc = CreateOpDesc("deque", "Deque"); + op_desc->SetId(0); + DavinciModel davinci_model(0, nullptr); + davinci_model.op_list_.emplace(0, op_desc); + + KernelTaskInfo kernel_task_info; + kernel_task_info.davinci_model_ = &davinci_model; + kernel_task_info.op_desc_ = op_desc; + + EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(kernel_def.kernel_ext_info()), SUCCESS); + + kernel_task_info.is_blocking_aicpu_op_ = true; + EXPECT_EQ(kernel_task_info.Distribute(), FAILED); +} + +TEST_F(UtestKernelTaskInfo, blocking_aicpu_op_fail_02) { + int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo); + vector aicpu_ext_info(len, 0); + char *buf = aicpu_ext_info.data(); + int offset = 0; + hybrid::AicpuExtInfo *ext_info = reinterpret_cast(buf + offset); + ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT; + ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo); + offset += sizeof(hybrid::AicpuExtInfo); + hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast(buf + offset); + async_wait_info->waitType = 0; + async_wait_info->waitId = 0; + async_wait_info->timeOut = 0; + async_wait_info->reserved = 0; + + domi::KernelDef kernel_def; + kernel_def.set_kernel_ext_info(buf, len); + kernel_def.set_kernel_ext_info_size(len); + + const OpDescPtr op_desc = CreateOpDesc("deque", "Deque"); + ge::AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, true); + op_desc->SetId(0); + DavinciModel davinci_model(0, nullptr); + davinci_model.op_list_.emplace(0, op_desc); + + KernelTaskInfo kernel_task_info; + kernel_task_info.davinci_model_ = &davinci_model; + kernel_task_info.op_desc_ = op_desc; + + RTS_STUB_RETURN_VALUE(rtGetDevice, rtError_t, 0x78000001); + EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(kernel_def.kernel_ext_info()), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001); + EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(kernel_def.kernel_ext_info()), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001); + EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(kernel_def.kernel_ext_info()), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE); + RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_SUPPORT + 1); + EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(kernel_def.kernel_ext_info()), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDevice, rtError_t, 0x78000001); + EXPECT_EQ(kernel_task_info.Distribute(), FAILED); + + EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(kernel_def.kernel_ext_info()), SUCCESS); + RTS_STUB_RETURN_VALUE(rtStreamWaitEvent, rtError_t, 0x78000001); + EXPECT_EQ(kernel_task_info.Distribute(), FAILED); + + EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(kernel_def.kernel_ext_info()), SUCCESS); + RTS_STUB_RETURN_VALUE(rtEventReset, rtError_t, 0x78000001); + EXPECT_EQ(kernel_task_info.Distribute(), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE); + RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT); + EXPECT_EQ(kernel_task_info.InitAicpuTaskExtInfo(kernel_def.kernel_ext_info()), SUCCESS); + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE); + RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT); + EXPECT_EQ(kernel_task_info.Distribute(), SUCCESS); +} + } // namespace ge diff --git a/tests/ut/ge/hybrid/node_executor/aicpu/aicpu_node_executor_unittest.cc b/tests/ut/ge/hybrid/node_executor/aicpu/aicpu_node_executor_unittest.cc index b225949b..034b3f47 100644 --- a/tests/ut/ge/hybrid/node_executor/aicpu/aicpu_node_executor_unittest.cc +++ b/tests/ut/ge/hybrid/node_executor/aicpu/aicpu_node_executor_unittest.cc @@ -27,7 +27,7 @@ #include "hybrid/node_executor/aicpu/aicpu_node_executor.h" #undef protected #undef private - +#include "tests/depends/runtime/src/runtime_stub.h" using namespace std; using namespace testing; @@ -43,8 +43,12 @@ using namespace hybrid; class UtestAicpuNodeExecutor : public testing::Test { protected: - void SetUp() {} - void TearDown() {} + void SetUp() { + RTS_STUB_SETUP(); + } + void TearDown() { + RTS_STUB_TEARDOWN(); + } }; static NodePtr CreateNode(ComputeGraphPtr graph, const string &name, const string &type, int in_num, int out_num) { @@ -164,5 +168,222 @@ TEST_F(UtestAicpuNodeExecutor, aicpu_tf_node_task) { } +TEST_F(UtestAicpuNodeExecutor, aicpu_blocking_node_task) { + ComputeGraphPtr graph = std::make_shared("test"); + GeRootModelPtr ge_root_model = std::make_shared(graph); + ge_root_model->SetModelName("test_name"); + HybridModel hybrid_model(ge_root_model); + + NodePtr node = CreateNode(graph, "deque", FRAMEWORK_OP_TYPE, 1, 1); + ge::AttrUtils::SetBool(node->GetOpDesc(), ATTR_NAME_IS_BLOCKING_OP, true); + std::unique_ptr new_node; + ASSERT_EQ(NodeItem::Create(node, new_node), SUCCESS); + NodeItem *node_item = new_node.get(); + node_item->input_start = 0; + node_item->output_start = 0; + node_item->is_dynamic = true; + node_item->shape_inference_type = DEPEND_SHAPE_RANGE; + + GraphItem graph_item; + graph_item.node_items_.emplace_back(node_item); + graph_item.total_inputs_ = 1; + graph_item.total_outputs_ = 1; + + GraphExecutionContext graph_execution_context; + SubgraphContext subgraph_context(&graph_item, &graph_execution_context); + ASSERT_EQ(subgraph_context.Init(), SUCCESS); + graph_execution_context.callback_manager = std::unique_ptr(new CallbackManager()); + + auto node_state = subgraph_context.GetOrCreateNodeState(node_item); + ASSERT_NE(node_state, nullptr); + + uint64_t value_0 = 512; + TensorValue in_tensor0(&value_0, sizeof(value_0)); + subgraph_context.SetInput(*node_item, 0, in_tensor0); + + TensorValue out_tensor0(&value_0, sizeof(value_0)); + subgraph_context.SetOutput(*node_item, 0, out_tensor0); + + int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo); + vector aicpu_ext_info(len, 0); + char *buf = aicpu_ext_info.data(); + int offset = 0; + hybrid::AicpuExtInfo *ext_info = reinterpret_cast(buf + offset); + ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT; + ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo); + offset += sizeof(hybrid::AicpuExtInfo); + hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast(buf + offset); + async_wait_info->waitType = 0; + async_wait_info->waitId = 0; + async_wait_info->timeOut = 0; + async_wait_info->reserved = 0; + + domi::KernelDef kernel_def; + kernel_def.set_kernel_ext_info(buf, len); + kernel_def.set_kernel_ext_info_size(len); + domi::TaskDef task_def; + + AicpuTaskStruct args; + args.head.length = sizeof(args); + args.head.ioAddrNum = 2; + + kernel_def.set_args(reinterpret_cast(&args), args.head.length); + kernel_def.set_args_size(args.head.length); + domi::KernelDef *kernel_def_tmp = task_def.mutable_kernel(); + *kernel_def_tmp = kernel_def; + + AicpuNodeTask aicpu_node_task(node_item, task_def); + ASSERT_EQ(aicpu_node_task.Init(hybrid_model), SUCCESS); + ASSERT_EQ(aicpu_node_task.LaunchTask(*node_state->GetTaskContext()), SUCCESS); + + node_item->shape_inference_type = DEPEND_COMPUTE; + domi::KernelExDef kernel_ex_def; + kernel_ex_def.set_kernel_ext_info(buf, len); + kernel_ex_def.set_kernel_ext_info_size(len); + kernel_ex_def.set_args(reinterpret_cast(&args), args.head.length); + kernel_ex_def.set_args_size(args.head.length); + domi::KernelExDef *kernel_ex_def_tmp = task_def.mutable_kernel_ex(); + *kernel_ex_def_tmp = kernel_ex_def; + hybrid_model.task_defs_[node] = std::vector({task_def, task_def}); + + AicpuTfNodeTask aicpu_tf_node_task(node_item, task_def); + ASSERT_EQ(aicpu_tf_node_task.Init(hybrid_model), SUCCESS); + ASSERT_EQ(aicpu_tf_node_task.LaunchTask(*node_state->GetTaskContext()), SUCCESS); +} + +TEST_F(UtestAicpuNodeExecutor, aicpu_blocking_node_task_fail) { + ComputeGraphPtr graph = std::make_shared("test"); + GeRootModelPtr ge_root_model = std::make_shared(graph); + ge_root_model->SetModelName("test_name"); + HybridModel hybrid_model(ge_root_model); + + NodePtr node = CreateNode(graph, "deque", FRAMEWORK_OP_TYPE, 1, 1); + ge::AttrUtils::SetBool(node->GetOpDesc(), ATTR_NAME_IS_BLOCKING_OP, true); + std::unique_ptr new_node; + ASSERT_EQ(NodeItem::Create(node, new_node), SUCCESS); + NodeItem *node_item = new_node.get(); + node_item->input_start = 0; + node_item->output_start = 0; + node_item->is_dynamic = true; + node_item->shape_inference_type = DEPEND_SHAPE_RANGE; + + GraphItem graph_item; + graph_item.node_items_.emplace_back(node_item); + graph_item.total_inputs_ = 1; + graph_item.total_outputs_ = 1; + + GraphExecutionContext graph_execution_context; + SubgraphContext subgraph_context(&graph_item, &graph_execution_context); + ASSERT_EQ(subgraph_context.Init(), SUCCESS); + graph_execution_context.callback_manager = std::unique_ptr(new CallbackManager()); + + auto node_state = subgraph_context.GetOrCreateNodeState(node_item); + ASSERT_NE(node_state, nullptr); + + uint64_t value_0 = 512; + TensorValue in_tensor0(&value_0, sizeof(value_0)); + subgraph_context.SetInput(*node_item, 0, in_tensor0); + + TensorValue out_tensor0(&value_0, sizeof(value_0)); + subgraph_context.SetOutput(*node_item, 0, out_tensor0); + + int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo); + vector aicpu_ext_info(len, 0); + char *buf = aicpu_ext_info.data(); + int offset = 0; + hybrid::AicpuExtInfo *ext_info = reinterpret_cast(buf + offset); + ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT; + ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo); + offset += sizeof(hybrid::AicpuExtInfo); + hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast(buf + offset); + async_wait_info->waitType = 0; + async_wait_info->waitId = 0; + async_wait_info->timeOut = 0; + async_wait_info->reserved = 0; + + domi::KernelDef kernel_def; + kernel_def.set_kernel_ext_info(buf, len); + kernel_def.set_kernel_ext_info_size(len); + domi::TaskDef task_def; + + AicpuTaskStruct args; + args.head.length = sizeof(args); + args.head.ioAddrNum = 2; + + kernel_def.set_args(reinterpret_cast(&args), args.head.length); + kernel_def.set_args_size(args.head.length); + domi::KernelDef *kernel_def_tmp = task_def.mutable_kernel(); + *kernel_def_tmp = kernel_def; + + AicpuNodeTask aicpu_node_task(node_item, task_def); + + RTS_STUB_RETURN_VALUE(rtGetDevice, rtError_t, 0x78000001); + ASSERT_EQ(aicpu_node_task.Init(hybrid_model), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001); + ASSERT_EQ(aicpu_node_task.Init(hybrid_model), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001); + ASSERT_EQ(aicpu_node_task.Init(hybrid_model), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE); + RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_SUPPORT + 1); + ASSERT_EQ(aicpu_node_task.Init(hybrid_model), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDevice, rtError_t, 0x78000001); + ASSERT_EQ(aicpu_node_task.LaunchTask(*node_state->GetTaskContext()), FAILED); + + ASSERT_EQ(aicpu_node_task.Init(hybrid_model), SUCCESS); + RTS_STUB_RETURN_VALUE(rtStreamWaitEvent, rtError_t, 0x78000001); + ASSERT_EQ(aicpu_node_task.LaunchTask(*node_state->GetTaskContext()), FAILED); + + ASSERT_EQ(aicpu_node_task.Init(hybrid_model), SUCCESS); + RTS_STUB_RETURN_VALUE(rtEventReset, rtError_t, 0x78000001); + ASSERT_EQ(aicpu_node_task.LaunchTask(*node_state->GetTaskContext()), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE); + RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT); + ASSERT_EQ(aicpu_node_task.Init(hybrid_model), SUCCESS); + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE); + RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT); + ASSERT_EQ(aicpu_node_task.LaunchTask(*node_state->GetTaskContext()), SUCCESS); + + node_item->shape_inference_type = DEPEND_COMPUTE; + domi::KernelExDef kernel_ex_def; + kernel_ex_def.set_kernel_ext_info(buf, len); + kernel_ex_def.set_kernel_ext_info_size(len); + kernel_ex_def.set_args(reinterpret_cast(&args), args.head.length); + kernel_ex_def.set_args_size(args.head.length); + domi::KernelExDef *kernel_ex_def_tmp = task_def.mutable_kernel_ex(); + *kernel_ex_def_tmp = kernel_ex_def; + hybrid_model.task_defs_[node] = std::vector({task_def, task_def}); + + AicpuTfNodeTask aicpu_tf_node_task(node_item, task_def); + + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001); + ASSERT_EQ(aicpu_tf_node_task.Init(hybrid_model), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001); + ASSERT_EQ(aicpu_tf_node_task.Init(hybrid_model), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE); + RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_SUPPORT + 1); + ASSERT_EQ(aicpu_tf_node_task.Init(hybrid_model), FAILED); + + ASSERT_EQ(aicpu_tf_node_task.Init(hybrid_model), SUCCESS); + RTS_STUB_RETURN_VALUE(rtStreamWaitEvent, rtError_t, 0x78000001); + ASSERT_EQ(aicpu_tf_node_task.LaunchTask(*node_state->GetTaskContext()), FAILED); + + ASSERT_EQ(aicpu_tf_node_task.Init(hybrid_model), SUCCESS); + RTS_STUB_RETURN_VALUE(rtEventReset, rtError_t, 0x78000001); + ASSERT_EQ(aicpu_tf_node_task.LaunchTask(*node_state->GetTaskContext()), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE); + RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT); + EXPECT_EQ(aicpu_tf_node_task.Init(hybrid_model), SUCCESS); + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE); + RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT); + EXPECT_EQ(aicpu_tf_node_task.LaunchTask(*node_state->GetTaskContext()), SUCCESS); +} } // namespace ge diff --git a/tests/ut/ge/single_op/single_op_task_unittest.cc b/tests/ut/ge/single_op/single_op_task_unittest.cc index 8964df74..52091856 100644 --- a/tests/ut/ge/single_op/single_op_task_unittest.cc +++ b/tests/ut/ge/single_op/single_op_task_unittest.cc @@ -19,6 +19,7 @@ #include "graph/load/model_manager/model_utils.h" #include "graph/utils/graph_utils.h" +#include "hybrid/node_executor/aicpu/aicpu_ext_info.h" #include "runtime/rt.h" #define protected public @@ -30,6 +31,7 @@ #include "external/register/op_tiling_registry.h" #undef private #undef protected +#include "tests/depends/runtime/src/runtime_stub.h" using namespace std; using namespace testing; @@ -38,9 +40,13 @@ using namespace optiling; class UtestSingleOpTask : public testing::Test { protected: - void SetUp() {} + void SetUp() { + RTS_STUB_SETUP(); + } - void TearDown() {} + void TearDown() { + RTS_STUB_TEARDOWN(); + } }; TEST_F(UtestSingleOpTask, test_build_kernel_task) { @@ -237,3 +243,124 @@ TEST_F(UtestSingleOpTask, test_aicpu_task_update_io_addr) { ASSERT_EQ(ret, PARAM_INVALID); } } + +TEST_F(UtestSingleOpTask, test_blocking_aicpu_op_01) { + int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo); + vector aicpu_ext_info(len, 0); + char *buf = aicpu_ext_info.data(); + int offset = 0; + hybrid::AicpuExtInfo *ext_info = reinterpret_cast(buf + offset); + ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT; + ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo); + offset += sizeof(hybrid::AicpuExtInfo); + hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast(buf + offset); + async_wait_info->waitType = 0; + async_wait_info->waitId = 0; + async_wait_info->timeOut = 0; + async_wait_info->reserved = 0; + + domi::KernelDef kernel_def; + kernel_def.set_kernel_ext_info(buf, len); + kernel_def.set_kernel_ext_info_size(len); + + auto op_desc = make_shared("deque", "Deque"); + ge::AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, true); + AiCpuCCTask aicpu_task; + aicpu_task.SetOpDesc(op_desc); + rtStream_t stream; + ASSERT_EQ(rtStreamCreate(&stream, 0), RT_ERROR_NONE); + + ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS); + ASSERT_EQ(aicpu_task.LaunchKernel(stream), SUCCESS); +} + +TEST_F(UtestSingleOpTask, test_blocking_aicpu_op_02) { + int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo); + vector aicpu_ext_info(len, 0); + char *buf = aicpu_ext_info.data(); + int offset = 0; + hybrid::AicpuExtInfo *ext_info = reinterpret_cast(buf + offset); + ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT; + ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo); + offset += sizeof(hybrid::AicpuExtInfo); + hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast(buf + offset); + async_wait_info->waitType = 0; + async_wait_info->waitId = 0; + async_wait_info->timeOut = 0; + async_wait_info->reserved = 0; + + domi::KernelDef kernel_def; + kernel_def.set_kernel_ext_info(buf, len); + kernel_def.set_kernel_ext_info_size(len); + + auto op_desc = make_shared("deque", "Deque"); + ge::AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, true); + AiCpuTask aicpu_task; + aicpu_task.SetOpDesc(op_desc); + rtStream_t stream; + ASSERT_EQ(rtStreamCreate(&stream, 0), RT_ERROR_NONE); + + ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS); + ASSERT_EQ(aicpu_task.LaunchKernel(stream), SUCCESS); +} + +TEST_F(UtestSingleOpTask, test_blocking_aicpu_op_fail) { + int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo); + vector aicpu_ext_info(len, 0); + char *buf = aicpu_ext_info.data(); + int offset = 0; + hybrid::AicpuExtInfo *ext_info = reinterpret_cast(buf + offset); + ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT; + ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo); + offset += sizeof(hybrid::AicpuExtInfo); + hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast(buf + offset); + async_wait_info->waitType = 0; + async_wait_info->waitId = 0; + async_wait_info->timeOut = 0; + async_wait_info->reserved = 0; + + domi::KernelDef kernel_def; + kernel_def.set_kernel_ext_info(buf, len); + kernel_def.set_kernel_ext_info_size(len); + + auto op_desc = make_shared("deque", "Deque"); + ge::AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, true); + AiCpuTask aicpu_task; + aicpu_task.SetOpDesc(op_desc); + rtStream_t stream; + ASSERT_EQ(rtStreamCreate(&stream, 0), RT_ERROR_NONE); + + ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS); + ASSERT_EQ(aicpu_task.LaunchKernel(stream), SUCCESS); + + RTS_STUB_RETURN_VALUE(rtGetDevice, rtError_t, 0x78000001); + ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001); + ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001); + ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE); + RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_SUPPORT + 1); + ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDevice, rtError_t, 0x78000001); + ASSERT_EQ(aicpu_task.LaunchKernel(stream), FAILED); + + ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS); + RTS_STUB_RETURN_VALUE(rtStreamWaitEvent, rtError_t, 0x78000001); + ASSERT_EQ(aicpu_task.LaunchKernel(stream), FAILED); + + ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS); + RTS_STUB_RETURN_VALUE(rtEventReset, rtError_t, 0x78000001); + ASSERT_EQ(aicpu_task.LaunchKernel(stream), FAILED); + + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE); + RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT); + EXPECT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS); + RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE); + RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT); + EXPECT_EQ(aicpu_task.LaunchKernel(stream), SUCCESS); +} diff --git a/third_party/fwkacllib/inc/cce/fwk_adpt_struct.h b/third_party/fwkacllib/inc/cce/fwk_adpt_struct.h index df57c82e..5733d68f 100644 --- a/third_party/fwkacllib/inc/cce/fwk_adpt_struct.h +++ b/third_party/fwkacllib/inc/cce/fwk_adpt_struct.h @@ -62,6 +62,7 @@ enum FWKTaskExtInfoType { FWK_ADPT_EXT_SESSION_INFO, FWK_ADPT_EXT_BITMAP, FWK_ADPT_EXT_TOPIC_TYPE, + FWK_ADPT_EXT_ASYNCWAIT, FWK_ADPT_EXT_INVALID }; @@ -80,6 +81,12 @@ enum FWKExtUpdateAddrType { FWK_ADPT_UPDATE_INPUT_OUTPUT }; +enum FWKExtWaitType { + FWK_ADPT_WAIT_TYPE_NULL = 0, + FWK_ADPT_WAIT_TYPE_EVENT, + FWK_ADPT_WAIT_TYPE_INVALID +}; + #pragma pack(push, 1) // API Parameter Structure struct StrFWKKernel { @@ -133,6 +140,15 @@ struct ResultSummary { uint64_t raw_data_size; // size of raw data }; #pragma pack(pop) + +#pragma pack(push, 1) +struct AsyncWait { + uint8_t waitType; // wait type, FWK_ADPT_WAIT_TYPE_EVENT: event wait + uint32_t waitId; // wait id, GE refresh + uint32_t timeOut; // reserved + uint64_t reserved; +}; +#pragma pack(pop) } // end namespace FWKAdapter } // namespace aicpu diff --git a/third_party/fwkacllib/inc/runtime/config.h b/third_party/fwkacllib/inc/runtime/config.h index c1327c45..a244c793 100644 --- a/third_party/fwkacllib/inc/runtime/config.h +++ b/third_party/fwkacllib/inc/runtime/config.h @@ -52,6 +52,14 @@ typedef enum tagRtAicpuScheType { SCHEDULE_HARDWARE, /* HWTS Schedule */ } rtAicpuScheType; +typedef enum tagRtDeviceCapabilityType { + RT_SCHEDULE_SOFTWARE = 0, // SoftWare Schedule + RT_SCHEDULE_SOFTWARE_OPT, + RT_SCHEDULE_HARDWARE, // HWTS Schedule + RT_AICPU_BLOCKING_OP_NOT_SUPPORT, + RT_AICPU_BLOCKING_OP_SUPPORT, // 1910/1980/1951 ts support AICPU blocking operation +} rtDeviceCapabilityType; + typedef enum tagRtVersion { VER_BEGIN = 0, VER_NA = VER_BEGIN, diff --git a/third_party/fwkacllib/inc/runtime/dev.h b/third_party/fwkacllib/inc/runtime/dev.h index 2cf6712f..18d837eb 100644 --- a/third_party/fwkacllib/inc/runtime/dev.h +++ b/third_party/fwkacllib/inc/runtime/dev.h @@ -65,6 +65,7 @@ typedef enum tagRtFeatureType { typedef enum tagRtDeviceFeatureType { FEATURE_TYPE_SCHE, + FEATURE_TYPE_BLOCKING_OPERATOR, FEATURE_TYPE_END, } rtDeviceFeatureType_t; @@ -78,6 +79,17 @@ typedef enum tagMemoryInfo { MEMORY_INFO_RSV } rtMemoryInfo_t; +typedef enum tagRtDeviceModuleType { + RT_MODULE_TYPE_SYSTEM = 0, + RT_MODULE_TYPE_AICPU, + RT_MODULE_TYPE_CCPU, + RT_MODULE_TYPE_DCPU, + RT_MODULE_TYPE_AICORE, + RT_MODULE_TYPE_TSCPU, + RT_MODULE_TYPE_PCIE, + RT_MODULE_TYPE_VECTOR_CORE +} tagRtDeviceModuleType_t; + /** * @ingroup dvrt_dev * @brief get total device number. From 3dfd2119c1317dcd08483d9b91310092c306b5fb Mon Sep 17 00:00:00 2001 From: yanghaoran Date: Wed, 28 Jul 2021 16:04:35 +0800 Subject: [PATCH 225/226] sync code 0728 --- CMakeLists.txt | 3 +- cmake/external_libs/json.cmake | 4 - ge/ge_runtime/CMakeLists.txt | 1 + ge/ge_runtime/task/hccl_task.cc | 16 +- ge/ge_runtime/task/label_goto_task.cc | 56 +- ge/ge_runtime/task/label_goto_task.h | 16 +- ge/ge_runtime/task/label_manager.cc | 119 + ge/ge_runtime/task/label_manager.h | 54 + ge/ge_runtime/task/label_switch_task.cc | 25 +- ge/ge_runtime/task/label_switch_task.h | 6 +- inc/external/acl/acl.h | 82 + inc/external/acl/acl_base.h | 638 ++++ inc/external/acl/acl_mdl.h | 1225 ++++++++ inc/external/acl/acl_op.h | 504 ++++ inc/external/acl/acl_op_compiler.h | 121 + inc/external/acl/acl_prof.h | 329 +++ inc/external/acl/acl_rt.h | 958 ++++++ inc/external/acl/acl_tdt.h | 276 ++ inc/external/acl/error_codes/ge_error_codes.h | 75 + inc/external/acl/error_codes/rt_error_codes.h | 109 + inc/external/acl/ops/acl_cblas.h | 334 +++ inc/external/acl/ops/acl_dvpp.h | 2568 +++++++++++++++++ inc/external/acl/ops/acl_fv.h | 348 +++ inc/external/hccl/hccl.h | 159 + inc/external/hccl/hccl_types.h | 101 + inc/external/runtime/rt_error_codes.h | 109 + inc/framework/ge_runtime/task_info.h | 5 +- metadef | 2 +- scripts/format_source_code.sh | 107 + .../fwkacllib/inc/cce/taskdown_common.hpp | 19 +- .../inc/external/runtime/rt_error_codes.h | 0 third_party/fwkacllib/inc/hccl/base.h | 36 +- third_party/fwkacllib/inc/hccl/hccl_types.h | 101 - third_party/fwkacllib/inc/hccl/hcom.h | 14 + third_party/fwkacllib/inc/mmpa/mmpa_api.h | 1 + .../fwkacllib/inc/mmpa/sub_inc/mmpa_linux.h | 4 + .../fwkacllib/inc/mmpa/sub_inc/mmpa_win.h | 4 + third_party/fwkacllib/inc/ops/aipp.h | 4 +- third_party/fwkacllib/inc/ops/all_ops.h | 3 +- third_party/fwkacllib/inc/ops/array_ops.h | 104 +- third_party/fwkacllib/inc/ops/audio_ops.h | 2 +- .../fwkacllib/inc/ops/avg_pool_1d_ops.h | 58 + third_party/fwkacllib/inc/ops/batch_ops.h | 21 +- third_party/fwkacllib/inc/ops/bitwise_ops.h | 31 +- .../fwkacllib/inc/ops/boosted_trees_ops.h | 2 +- .../inc/ops/candidate_sampling_ops.h | 2 +- third_party/fwkacllib/inc/ops/condtake_ops.h | 2 +- .../fwkacllib/inc/ops/control_flow_ops.h | 12 +- third_party/fwkacllib/inc/ops/correlation.h | 52 + third_party/fwkacllib/inc/ops/ctc_ops.h | 83 +- third_party/fwkacllib/inc/ops/data_flow_ops.h | 89 +- .../inc/ops/elewise_calculation_ops.h | 527 +++- .../fwkacllib/inc/ops/functional_ops.h | 2 +- third_party/fwkacllib/inc/ops/get_data_ops.h | 2 +- third_party/fwkacllib/inc/ops/globalavgpool.h | 49 + third_party/fwkacllib/inc/ops/hcom_ops.h | 135 +- third_party/fwkacllib/inc/ops/hvd_ops.h | 2 +- third_party/fwkacllib/inc/ops/image_ops.h | 653 ++++- third_party/fwkacllib/inc/ops/internal_ops.h | 2 +- third_party/fwkacllib/inc/ops/linalg_ops.h | 138 +- third_party/fwkacllib/inc/ops/list_ops.h | 504 ++++ third_party/fwkacllib/inc/ops/logging_ops.h | 2 +- third_party/fwkacllib/inc/ops/lookup_ops.h | 2 +- third_party/fwkacllib/inc/ops/math_ops.h | 283 +- .../inc/ops/matrix_calculation_ops.h | 336 ++- .../fwkacllib/inc/ops/nn_batch_norm_ops.h | 134 +- .../fwkacllib/inc/ops/nn_calculation_ops.h | 448 ++- third_party/fwkacllib/inc/ops/nn_detect_ops.h | 602 +++- third_party/fwkacllib/inc/ops/nn_norm_ops.h | 777 ++++- third_party/fwkacllib/inc/ops/nn_ops.h | 141 +- .../fwkacllib/inc/ops/nn_pooling_ops.h | 488 +++- .../fwkacllib/inc/ops/nn_training_ops.h | 51 +- third_party/fwkacllib/inc/ops/no_op.h | 2 +- .../fwkacllib/inc/ops/nonlinear_fuc_ops.h | 408 ++- .../fwkacllib/inc/ops/npu_loss_scale_ops.h | 2 +- third_party/fwkacllib/inc/ops/outfeed_ops.h | 2 +- third_party/fwkacllib/inc/ops/pad_ops.h | 160 +- third_party/fwkacllib/inc/ops/parsing_ops.h | 242 +- third_party/fwkacllib/inc/ops/quantize_ops.h | 31 +- .../fwkacllib/inc/ops/ragged_array_ops.h | 2 +- .../fwkacllib/inc/ops/ragged_conversion_ops.h | 2 +- .../fwkacllib/inc/ops/ragged_math_ops.h | 2 +- third_party/fwkacllib/inc/ops/random_ops.h | 91 +- third_party/fwkacllib/inc/ops/reduce_ops.h | 279 +- .../fwkacllib/inc/ops/resource_variable_ops.h | 2 +- third_party/fwkacllib/inc/ops/rnn.h | 595 +++- third_party/fwkacllib/inc/ops/rpn_ops.h | 2 +- third_party/fwkacllib/inc/ops/save_ops.h | 2 +- third_party/fwkacllib/inc/ops/sdca_ops.h | 2 +- third_party/fwkacllib/inc/ops/selection_ops.h | 426 ++- third_party/fwkacllib/inc/ops/set_ops.h | 2 +- third_party/fwkacllib/inc/ops/sparse_ops.h | 8 +- third_party/fwkacllib/inc/ops/spectral_ops.h | 98 +- .../fwkacllib/inc/ops/split_combination_ops.h | 26 +- third_party/fwkacllib/inc/ops/state_ops.h | 2 +- .../fwkacllib/inc/ops/stateful_random_ops.h | 2 +- .../fwkacllib/inc/ops/stateless_random_ops.h | 2 +- third_party/fwkacllib/inc/ops/string_ops.h | 382 ++- third_party/fwkacllib/inc/ops/swap_co_ops.h | 2 +- .../inc/ops/target_crop_and_resize.h | 2 +- .../fwkacllib/inc/ops/transformation_ops.h | 271 +- .../fwkacllib/inc/ops/warp_perspective_ops.h | 2 +- third_party/fwkacllib/inc/runtime/event.h | 5 + third_party/fwkacllib/inc/runtime/rt.h | 1 + third_party/fwkacllib/inc/runtime/rt_stars.h | 85 + third_party/fwkacllib/inc/tdt/tsd_client.h | 82 - .../inc/toolchain/adx_datadump_server.h | 22 +- .../fwkacllib/inc/toolchain/prof_acl_api.h | 208 +- .../fwkacllib/inc/toolchain/prof_mgr_core.h | 9 + .../fwkacllib/inc/toolchain/prof_reporter.h | 70 +- third_party/prebuild/aarch64/libalog.so | Bin 223920 -> 225280 bytes .../prebuild/aarch64/liberror_manager.so | Bin 888880 -> 1159216 bytes third_party/prebuild/aarch64/libmmpa.a | Bin 63182 -> 62550 bytes third_party/prebuild/x86_64/libalog.so | Bin 164208 -> 173984 bytes .../prebuild/x86_64/liberror_manager.so | Bin 852544 -> 1168920 bytes third_party/prebuild/x86_64/libmmpa.a | Bin 57270 -> 56998 bytes 116 files changed, 16672 insertions(+), 1133 deletions(-) create mode 100644 ge/ge_runtime/task/label_manager.cc create mode 100644 ge/ge_runtime/task/label_manager.h create mode 100644 inc/external/acl/acl.h create mode 100644 inc/external/acl/acl_base.h create mode 100644 inc/external/acl/acl_mdl.h create mode 100644 inc/external/acl/acl_op.h create mode 100644 inc/external/acl/acl_op_compiler.h create mode 100644 inc/external/acl/acl_prof.h create mode 100644 inc/external/acl/acl_rt.h create mode 100644 inc/external/acl/acl_tdt.h create mode 100644 inc/external/acl/error_codes/ge_error_codes.h create mode 100644 inc/external/acl/error_codes/rt_error_codes.h create mode 100644 inc/external/acl/ops/acl_cblas.h create mode 100644 inc/external/acl/ops/acl_dvpp.h create mode 100644 inc/external/acl/ops/acl_fv.h create mode 100644 inc/external/hccl/hccl.h create mode 100644 inc/external/hccl/hccl_types.h create mode 100644 inc/external/runtime/rt_error_codes.h create mode 100755 scripts/format_source_code.sh mode change 100755 => 100644 third_party/fwkacllib/inc/external/runtime/rt_error_codes.h delete mode 100644 third_party/fwkacllib/inc/hccl/hccl_types.h create mode 100644 third_party/fwkacllib/inc/ops/avg_pool_1d_ops.h create mode 100644 third_party/fwkacllib/inc/ops/correlation.h create mode 100644 third_party/fwkacllib/inc/ops/globalavgpool.h create mode 100644 third_party/fwkacllib/inc/ops/list_ops.h create mode 100644 third_party/fwkacllib/inc/runtime/rt_stars.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 60509838..5e58eeba 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -125,7 +125,6 @@ else () message(STATUS "PLATFORM param is invalid, should be train or inference, you choose nothing!") endif() endif() - set(METADEF_DIR ${CMAKE_CURRENT_LIST_DIR}/metadef) set(PARSER_DIR ${CMAKE_CURRENT_LIST_DIR}/parser) set(GE_DEPEND_DIR ${CMAKE_CURRENT_LIST_DIR}/..) @@ -158,6 +157,7 @@ else () elseif(ENABLE_MS_TESTCASES) include(cmake/external_libs/protobuf_static.cmake) include(cmake/external_libs/protoc.cmake) + include(cmake/external_libs/json.cmake) include(cmake/external_libs/securec.cmake) include(cmake/FindModule.cmake) include(cmake/intf_pub_linux.cmake) @@ -175,5 +175,4 @@ else () endif() add_subdirectory(ge) - endif () diff --git a/cmake/external_libs/json.cmake b/cmake/external_libs/json.cmake index 3c1cd012..04659ebc 100755 --- a/cmake/external_libs/json.cmake +++ b/cmake/external_libs/json.cmake @@ -9,10 +9,6 @@ if (GE_PB_PKG) set(REQ_URL "${GE_PB_PKG}/libs/ge_nlohmann_json/include.zip") set(MD5 "0dc903888211db3a0f170304cd9f3a89") set(JSON_INCLUDE_DIR ${JSON_SRC_DIR}) -#elseif (ENABLE_GITEE) -# set(REQ_URL "https://gitee.com/mirrors/JSON-for-Modern-CPP/repository/archive/v3.6.1.zip") -# set(MD5 "5bda78ce308e6cfcf614dcf1d5ff27a7") -#set(JSON_INCLUDE_DIR "${JSON_SRC_DIR}/include") else() set(REQ_URL "https://github.com/nlohmann/json/releases/download/v3.6.1/include.zip") set(MD5 "0dc903888211db3a0f170304cd9f3a89") diff --git a/ge/ge_runtime/CMakeLists.txt b/ge/ge_runtime/CMakeLists.txt index 3243766f..ffea784b 100644 --- a/ge/ge_runtime/CMakeLists.txt +++ b/ge/ge_runtime/CMakeLists.txt @@ -16,6 +16,7 @@ set(GE_SRC_LIST "task/label_goto_task.cc" "task/label_set_task.cc" "task/label_switch_task.cc" + "task/label_manager.cc" ) add_library(ge_runtime SHARED ${GE_SRC_LIST}) diff --git a/ge/ge_runtime/task/hccl_task.cc b/ge/ge_runtime/task/hccl_task.cc index b1c7158c..bfe0d0f3 100644 --- a/ge/ge_runtime/task/hccl_task.cc +++ b/ge/ge_runtime/task/hccl_task.cc @@ -53,15 +53,7 @@ HcclTask::HcclTask(const ModelContext &model_context, const std::shared_ptrworkspace_size() > 0) { - rtError_t rt_ret = rtMalloc(&workspace_mem_, task_info_->workspace_size(), RT_MEMORYINFO_HBM); - if (rt_ret != RT_ERROR_NONE) { - GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret); - return false; - } + workspace_mem_ = task_info_->workspace_addr(); } GELOGI("HcclTaskInfo Distribute Start. begin to call function LoadTask in hccl."); diff --git a/ge/ge_runtime/task/label_goto_task.cc b/ge/ge_runtime/task/label_goto_task.cc index 7cb6d556..a3b70971 100644 --- a/ge/ge_runtime/task/label_goto_task.cc +++ b/ge/ge_runtime/task/label_goto_task.cc @@ -16,33 +16,46 @@ #include "ge_runtime/task/label_goto_task.h" #include "ge_runtime/task/task_factory.h" -#include "framework/common/util.h" namespace ge { namespace model_runner { LabelGotoTask::LabelGotoTask(const ModelContext &model_context, const std::shared_ptr &task_info) - : TaskRepeater(model_context, task_info), task_info_(task_info) { + : TaskRepeater(model_context, task_info), + task_info_(task_info), + stream_(nullptr), + index_value_(nullptr) { if (task_info_ == nullptr) { GELOGW("task_info_ is null!"); return; } auto stream_list = model_context.stream_list(); auto label_list = model_context.label_list(); + rt_model_handle_ = model_context.rt_model_handle(); uint32_t stream_id = task_info->stream_id(); - uint32_t label_id = task_info->label_id(); + label_id_ = task_info->label_id(); GELOGI("Stream list size:%zu, stream id:%u.", stream_list.size(), stream_id); - GELOGI("Label list size:%zu, label id:%u.", label_list.size(), label_id); - if (stream_id >= stream_list.size() || label_id >= label_list.size()) { + GELOGI("Label list size:%zu, label id:%u.", label_list.size(), label_id_); + if (stream_id >= stream_list.size() || label_id_ >= label_list.size()) { GELOGW("Stream/Label id invalid."); return; } stream_ = stream_list[stream_id]; - label_ = label_list[label_id]; + label_manager_ = LabelManager::GetInstance(); + if (label_manager_ == nullptr) { + GELOGW("Get label manager instance failed."); + return; + } + label_info_ = label_manager_->GetLabelInfo(rt_model_handle_, {label_id_}, label_list); } LabelGotoTask::~LabelGotoTask() { - GE_FREE_RT_LOG(label_info_); - GE_FREE_RT_LOG(index_value_); + if (index_value_ != nullptr) { + rtError_t rt_ret = rtFree(index_value_); + if (rt_ret != RT_ERROR_NONE) { + GELOGE(RT_FAILED, "rtFree index_value_ failed! ret: 0x%X.", rt_ret); + } + index_value_ = nullptr; + } } bool LabelGotoTask::Distribute() { @@ -94,21 +107,34 @@ bool LabelGotoTask::CheckParamValid() { return false; } - if (label_ == nullptr) { - GELOGE(PARAM_INVALID, "label is null!"); + if (label_info_ == nullptr) { + GELOGE(PARAM_INVALID, "label info is null!"); return false; } - if (label_info_ != nullptr) { - GELOGE(PARAM_INVALID, "label_info_ has dirty data."); - return false; + if (index_value_ == nullptr) { + rtError_t rt_ret = rtMalloc(&index_value_, sizeof(uint64_t), RT_MEMORY_HBM); + if (rt_ret != RT_ERROR_NONE) { + GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret); + return false; + } + + uint64_t index = 0; + rt_ret = rtMemcpy(index_value_, sizeof(uint64_t), &index, sizeof(index), RT_MEMCPY_HOST_TO_DEVICE); + if (rt_ret != RT_ERROR_NONE) { + GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret); + return false; + } } - if (index_value_ != nullptr) { - GELOGE(PARAM_INVALID, "index_value_ has dirty data."); + void *label_info = label_info_->GetLabelInfo(); + rtError_t rt_ret = rtLabelSwitchByIndex(index_value_, 1, label_info, stream_); + if (rt_ret != RT_ERROR_NONE) { + GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret); return false; } + GELOGI("DistributeTask end."); return true; } diff --git a/ge/ge_runtime/task/label_goto_task.h b/ge/ge_runtime/task/label_goto_task.h index addbb700..e579c683 100644 --- a/ge/ge_runtime/task/label_goto_task.h +++ b/ge/ge_runtime/task/label_goto_task.h @@ -18,7 +18,11 @@ #define GE_GE_RUNTIME_TASK_LABEL_GOTO_TASK_H_ #include +#include +#include +#include #include "ge_runtime/task/task.h" +#include "ge_runtime/task/label_manager.h" namespace ge { namespace model_runner { @@ -31,13 +35,13 @@ class LabelGotoTask : public TaskRepeater { bool Distribute() override; private: - bool CheckParamValid(); - std::shared_ptr task_info_; - void *stream_{nullptr}; - void *label_{nullptr}; - void *label_info_{nullptr}; - void *index_value_{nullptr}; + void *stream_; + std::shared_ptr label_info_; + void *index_value_; + uint32_t label_id_; + rtModel_t rt_model_handle_; + std::shared_ptr label_manager_; }; } // namespace model_runner } // namespace ge diff --git a/ge/ge_runtime/task/label_manager.cc b/ge/ge_runtime/task/label_manager.cc new file mode 100644 index 00000000..a2b0c3aa --- /dev/null +++ b/ge/ge_runtime/task/label_manager.cc @@ -0,0 +1,119 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ge_runtime/task/label_manager.h" +#include +#include +#include "runtime/mem.h" +#include "runtime/rt_model.h" +#include "common/ge_inner_error_codes.h" +#include "framework/common/debug/ge_log.h" + +namespace ge { +namespace model_runner { +std::weak_ptr LabelManager::instance_; +std::mutex LabelManager::instance_mutex_; + +template +static std::string GetVectorString(const std::vector &vec) { + std::string ret; + for (size_t i = 0; i < vec.size(); ++i) { + if (i != 0) { + ret.push_back(','); + } + ret += std::to_string(vec[i]); + } + return ret; +} + +LabelGuard::~LabelGuard() { + void *label_info = GetLabelInfo(); + if (label_info != nullptr) { + rtError_t rt_ret = rtFree(label_info); + if (rt_ret != RT_ERROR_NONE) { + GELOGE(RT_FAILED, "rtFree label_info failed! ret: 0x%X.", rt_ret); + } + } +} + +std::shared_ptr LabelManager::GetInstance() { + std::lock_guard lock(instance_mutex_); + auto instance = instance_.lock(); + if (instance != nullptr) { + return instance; + } + + instance = std::make_shared(); + instance_ = instance; + return instance; +} + +std::shared_ptr LabelManager::GetLabelInfo(rtModel_t model, const std::vector &label_ids, + const std::vector &all_label) { + std::lock_guard lock(model_info_mapping_mutex_); + rtError_t rt_ret; + auto model_iter = model_info_mapping_.find(model); + if (model_iter == model_info_mapping_.end()) { + model_info_mapping_.emplace(model, std::map>()); + model_iter = model_info_mapping_.find(model); + } + + std::string label_id_str = GetVectorString(label_ids); + auto &label_map = model_iter->second; + auto label_iter = label_map.find(label_id_str); + if (label_iter != label_map.end()) { + auto label_guard = label_iter->second.lock(); + if (label_guard != nullptr) { + GELOGI("model %p find same label id %s.", model, label_id_str.c_str()); + return label_guard; + } + } + + GELOGI("Alloc label id %s for model %p.", label_id_str.c_str(), model); + void *label_info; + std::vector label_list; + bool status = true; + std::transform(label_ids.begin(), label_ids.end(), std::back_inserter(label_list), + [&all_label, &status](uint32_t idx) -> void * { + if (idx >= all_label.size()) { + GELOGE(PARAM_INVALID, "Invalid label id %u, all label list size %zu.", idx, all_label.size()); + status = false; + return nullptr; + } + return all_label[idx]; + }); + if (!status) { + GELOGE(PARAM_INVALID, "Get label info failed."); + return nullptr; + } + uint32_t label_info_size = sizeof(rtLabelDevInfo) * label_list.size(); + rt_ret = rtMalloc(&label_info, label_info_size, RT_MEMORY_HBM); + if (rt_ret != RT_ERROR_NONE) { + GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret); + return nullptr; + } + + rt_ret = rtLabelListCpy(label_list.data(), label_list.size(), label_info, label_info_size); + if (rt_ret != RT_ERROR_NONE) { + GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret); + return nullptr; + } + + auto label_guard = std::make_shared(label_info); + label_map.emplace(label_id_str, label_guard); + return label_guard; +} +} // namespace model_runner +} // namespace ge diff --git a/ge/ge_runtime/task/label_manager.h b/ge/ge_runtime/task/label_manager.h new file mode 100644 index 00000000..f2c42c29 --- /dev/null +++ b/ge/ge_runtime/task/label_manager.h @@ -0,0 +1,54 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef GE_GE_RUNTIME_TASK_LABEL_MANAGER_H_ +#define GE_GE_RUNTIME_TASK_LABEL_MANAGER_H_ + +#include +#include +#include +#include +#include + +namespace ge { +namespace model_runner { +class LabelGuard { + public: + explicit LabelGuard(void *label_info) : label_info_(reinterpret_cast(label_info)) {} + ~LabelGuard(); + void *GetLabelInfo() { return reinterpret_cast(label_info_); } + + private: + uintptr_t label_info_; +}; + +class LabelManager { + public: + static std::shared_ptr GetInstance(); + std::shared_ptr GetLabelInfo(rtModel_t model, const std::vector &label_ids, + const std::vector &all_label); + + private: + std::mutex model_info_mapping_mutex_; + std::map>> model_info_mapping_; + + static std::weak_ptr instance_; + static std::mutex instance_mutex_; +}; + + +} // namespace model_runner +} // namespace ge +#endif // GE_GE_RUNTIME_TASK_LABEL_MANAGER_H_ \ No newline at end of file diff --git a/ge/ge_runtime/task/label_switch_task.cc b/ge/ge_runtime/task/label_switch_task.cc index 8c795da9..cde278d9 100644 --- a/ge/ge_runtime/task/label_switch_task.cc +++ b/ge/ge_runtime/task/label_switch_task.cc @@ -24,14 +24,14 @@ LabelSwitchTask::LabelSwitchTask(const ModelContext &model_context, : TaskRepeater(model_context, task_info), task_info_(task_info), stream_(nullptr), - all_label_resource_(), label_info_(nullptr) { if (task_info_ == nullptr) { GELOGW("task_info_ is null!"); return; } - all_label_resource_ = model_context.label_list(); + rt_model_handle_ = model_context.rt_model_handle(); + auto all_label_resource = model_context.label_list(); auto stream_list = model_context.stream_list(); uint32_t stream_id = task_info->stream_id(); GELOGI("Stream list size:%zu, stream id:%u.", stream_list.size(), stream_id); @@ -40,18 +40,16 @@ LabelSwitchTask::LabelSwitchTask(const ModelContext &model_context, return; } stream_ = stream_list[stream_id]; -} - -LabelSwitchTask::~LabelSwitchTask() { - if (label_info_ != nullptr) { - rtError_t rt_ret = rtFree(label_info_); - if (rt_ret != RT_ERROR_NONE) { - GELOGE(RT_FAILED, "rtFree fwkOpBuf failed! ret: 0x%X.", rt_ret); - } - label_info_ = nullptr; + label_manager_ = LabelManager::GetInstance(); + if (label_manager_ == nullptr) { + GELOGW("Get label manager instance failed."); + return; } + label_info_ = label_manager_->GetLabelInfo(rt_model_handle_, task_info_->label_list(), all_label_resource); } +LabelSwitchTask::~LabelSwitchTask() {} + bool LabelSwitchTask::Distribute() { GELOGI("LabelSwitchTask Distribute start."); if (!CheckParamValid()) { @@ -117,8 +115,8 @@ bool LabelSwitchTask::CheckParamValid() { return false; } - if (label_info_ != nullptr) { - GELOGE(PARAM_INVALID, "label_info_ has dirty data."); + if (label_info_ == nullptr) { + GELOGE(PARAM_INVALID, "CopyLabelList failed, label info is null."); return false; } @@ -126,6 +124,5 @@ bool LabelSwitchTask::CheckParamValid() { } REGISTER_TASK(TaskInfoType::LABEL_SWITCH, LabelSwitchTask, LabelSwitchTaskInfo); - } // namespace model_runner } // namespace ge diff --git a/ge/ge_runtime/task/label_switch_task.h b/ge/ge_runtime/task/label_switch_task.h index 463faa31..cfa6877c 100644 --- a/ge/ge_runtime/task/label_switch_task.h +++ b/ge/ge_runtime/task/label_switch_task.h @@ -19,6 +19,7 @@ #include #include "ge_runtime/task/task.h" +#include "ge_runtime/task/label_manager.h" namespace ge { namespace model_runner { @@ -35,8 +36,9 @@ class LabelSwitchTask : public TaskRepeater { std::shared_ptr task_info_; void *stream_; - std::vector all_label_resource_; - void *label_info_; + rtModel_t rt_model_handle_; + std::shared_ptr label_info_; + std::shared_ptr label_manager_; }; } // namespace model_runner } // namespace ge diff --git a/inc/external/acl/acl.h b/inc/external/acl/acl.h new file mode 100644 index 00000000..8d261201 --- /dev/null +++ b/inc/external/acl/acl.h @@ -0,0 +1,82 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_EXTERNAL_ACL_ACL_H_ +#define INC_EXTERNAL_ACL_ACL_H_ + +#include "acl_rt.h" +#include "acl_op.h" +#include "acl_mdl.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// Current version is 1.0.0 +#define ACL_MAJOR_VERSION 1 +#define ACL_MINOR_VERSION 0 +#define ACL_PATCH_VERSION 0 + +/** + * @ingroup AscendCL + * @brief acl initialize + * + * @par Restriction + * The aclInit interface can be called only once in a process + * @param configPath [IN] the config path,it can be NULL + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclInit(const char *configPath); + +/** + * @ingroup AscendCL + * @brief acl finalize + * + * @par Restriction + * Need to call aclFinalize before the process exits. + * After calling aclFinalize,the services cannot continue to be used normally. + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclFinalize(); + +/** + * @ingroup AscendCL + * @brief query ACL interface version + * + * @param majorVersion[OUT] ACL interface major version + * @param minorVersion[OUT] ACL interface minor version + * @param patchVersion[OUT] ACL interface patch version + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtGetVersion(int32_t *majorVersion, int32_t *minorVersion, int32_t *patchVersion); + +/** + * @ingroup AscendCL + * @brief get recent error message + * + * @retval null for failed + * @retval OtherValues success + */ +ACL_FUNC_VISIBILITY const char *aclGetRecentErrMsg(); + +#ifdef __cplusplus +} +#endif + +#endif // INC_EXTERNAL_ACL_ACL_H_ diff --git a/inc/external/acl/acl_base.h b/inc/external/acl/acl_base.h new file mode 100644 index 00000000..64d4bd81 --- /dev/null +++ b/inc/external/acl/acl_base.h @@ -0,0 +1,638 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_EXTERNAL_ACL_ACL_BASE_H_ +#define INC_EXTERNAL_ACL_ACL_BASE_H_ + +#include +#include +#include "error_codes/rt_error_codes.h" +#include "error_codes/ge_error_codes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(_MSC_VER) +#ifdef FUNC_VISIBILITY +#define ACL_FUNC_VISIBILITY _declspec(dllexport) +#else +#define ACL_FUNC_VISIBILITY +#endif +#else +#ifdef FUNC_VISIBILITY +#define ACL_FUNC_VISIBILITY __attribute__((visibility("default"))) +#else +#define ACL_FUNC_VISIBILITY +#endif +#endif + +#ifdef __GNUC__ +#define ACL_DEPRECATED __attribute__((deprecated)) +#define ACL_DEPRECATED_MESSAGE(message) __attribute__((deprecated(message))) +#elif defined(_MSC_VER) +#define ACL_DEPRECATED __declspec(deprecated) +#define ACL_DEPRECATED_MESSAGE(message) __declspec(deprecated(message)) +#else +#define ACL_DEPRECATED +#define ACL_DEPRECATED_MESSAGE(message) +#endif + +typedef void *aclrtStream; +typedef void *aclrtEvent; +typedef void *aclrtContext; +typedef int aclError; +typedef uint16_t aclFloat16; +typedef struct aclDataBuffer aclDataBuffer; +typedef struct aclTensorDesc aclTensorDesc; + +static const int ACL_ERROR_NONE = 0; +static const int ACL_SUCCESS = 0; + +static const int ACL_ERROR_INVALID_PARAM = 100000; +static const int ACL_ERROR_UNINITIALIZE = 100001; +static const int ACL_ERROR_REPEAT_INITIALIZE = 100002; +static const int ACL_ERROR_INVALID_FILE = 100003; +static const int ACL_ERROR_WRITE_FILE = 100004; +static const int ACL_ERROR_INVALID_FILE_SIZE = 100005; +static const int ACL_ERROR_PARSE_FILE = 100006; +static const int ACL_ERROR_FILE_MISSING_ATTR = 100007; +static const int ACL_ERROR_FILE_ATTR_INVALID = 100008; +static const int ACL_ERROR_INVALID_DUMP_CONFIG = 100009; +static const int ACL_ERROR_INVALID_PROFILING_CONFIG = 100010; +static const int ACL_ERROR_INVALID_MODEL_ID = 100011; +static const int ACL_ERROR_DESERIALIZE_MODEL = 100012; +static const int ACL_ERROR_PARSE_MODEL = 100013; +static const int ACL_ERROR_READ_MODEL_FAILURE = 100014; +static const int ACL_ERROR_MODEL_SIZE_INVALID = 100015; +static const int ACL_ERROR_MODEL_MISSING_ATTR = 100016; +static const int ACL_ERROR_MODEL_INPUT_NOT_MATCH = 100017; +static const int ACL_ERROR_MODEL_OUTPUT_NOT_MATCH = 100018; +static const int ACL_ERROR_MODEL_NOT_DYNAMIC = 100019; +static const int ACL_ERROR_OP_TYPE_NOT_MATCH = 100020; +static const int ACL_ERROR_OP_INPUT_NOT_MATCH = 100021; +static const int ACL_ERROR_OP_OUTPUT_NOT_MATCH = 100022; +static const int ACL_ERROR_OP_ATTR_NOT_MATCH = 100023; +static const int ACL_ERROR_OP_NOT_FOUND = 100024; +static const int ACL_ERROR_OP_LOAD_FAILED = 100025; +static const int ACL_ERROR_UNSUPPORTED_DATA_TYPE = 100026; +static const int ACL_ERROR_FORMAT_NOT_MATCH = 100027; +static const int ACL_ERROR_BIN_SELECTOR_NOT_REGISTERED = 100028; +static const int ACL_ERROR_KERNEL_NOT_FOUND = 100029; +static const int ACL_ERROR_BIN_SELECTOR_ALREADY_REGISTERED = 100030; +static const int ACL_ERROR_KERNEL_ALREADY_REGISTERED = 100031; +static const int ACL_ERROR_INVALID_QUEUE_ID = 100032; +static const int ACL_ERROR_REPEAT_SUBSCRIBE = 100033; +static const int ACL_ERROR_STREAM_NOT_SUBSCRIBE = 100034; +static const int ACL_ERROR_THREAD_NOT_SUBSCRIBE = 100035; +static const int ACL_ERROR_WAIT_CALLBACK_TIMEOUT = 100036; +static const int ACL_ERROR_REPEAT_FINALIZE = 100037; +static const int ACL_ERROR_NOT_STATIC_AIPP = 100038; +static const int ACL_ERROR_COMPILING_STUB_MODE = 100039; +static const int ACL_ERROR_GROUP_NOT_SET = 100040; +static const int ACL_ERROR_GROUP_NOT_CREATE = 100041; +static const int ACL_ERROR_PROF_ALREADY_RUN = 100042; +static const int ACL_ERROR_PROF_NOT_RUN = 100043; +static const int ACL_ERROR_DUMP_ALREADY_RUN = 100044; +static const int ACL_ERROR_DUMP_NOT_RUN = 100045; +static const int ACL_ERROR_PROF_REPEAT_SUBSCRIBE = 148046; +static const int ACL_ERROR_PROF_API_CONFLICT = 148047; +static const int ACL_ERROR_INVALID_MAX_OPQUEUE_NUM_CONFIG = 148048; +static const int ACL_ERROR_INVALID_OPP_PATH = 148049; +static const int ACL_ERROR_OP_UNSUPPORTED_DYNAMIC = 148050; + +static const int ACL_ERROR_BAD_ALLOC = 200000; +static const int ACL_ERROR_API_NOT_SUPPORT = 200001; +static const int ACL_ERROR_INVALID_DEVICE = 200002; +static const int ACL_ERROR_MEMORY_ADDRESS_UNALIGNED = 200003; +static const int ACL_ERROR_RESOURCE_NOT_MATCH = 200004; +static const int ACL_ERROR_INVALID_RESOURCE_HANDLE = 200005; +static const int ACL_ERROR_FEATURE_UNSUPPORTED = 200006; +static const int ACL_ERROR_PROF_MODULES_UNSUPPORTED = 200007; + +static const int ACL_ERROR_STORAGE_OVER_LIMIT = 300000; + +static const int ACL_ERROR_INTERNAL_ERROR = 500000; +static const int ACL_ERROR_FAILURE = 500001; +static const int ACL_ERROR_GE_FAILURE = 500002; +static const int ACL_ERROR_RT_FAILURE = 500003; +static const int ACL_ERROR_DRV_FAILURE = 500004; +static const int ACL_ERROR_PROFILING_FAILURE = 500005; + +#define ACL_TENSOR_SHAPE_RANGE_NUM 2 +#define ACL_UNKNOWN_RANK 0xFFFFFFFFFFFFFFFE + +typedef enum { + ACL_DT_UNDEFINED = -1, + ACL_FLOAT = 0, + ACL_FLOAT16 = 1, + ACL_INT8 = 2, + ACL_INT32 = 3, + ACL_UINT8 = 4, + ACL_INT16 = 6, + ACL_UINT16 = 7, + ACL_UINT32 = 8, + ACL_INT64 = 9, + ACL_UINT64 = 10, + ACL_DOUBLE = 11, + ACL_BOOL = 12, + ACL_STRING = 13, +} aclDataType; + +typedef enum { + ACL_FORMAT_UNDEFINED = -1, + ACL_FORMAT_NCHW = 0, + ACL_FORMAT_NHWC = 1, + ACL_FORMAT_ND = 2, + ACL_FORMAT_NC1HWC0 = 3, + ACL_FORMAT_FRACTAL_Z = 4, + ACL_FORMAT_NC1HWC0_C04 = 12, + ACL_FORMAT_NDHWC = 27, + ACL_FORMAT_FRACTAL_NZ = 29, + ACL_FORMAT_NCDHW = 30, + ACL_FORMAT_NDC1HWC0 = 32, + ACL_FRACTAL_Z_3D = 33 +} aclFormat; + +typedef enum { + ACL_DEBUG = 0, + ACL_INFO = 1, + ACL_WARNING = 2, + ACL_ERROR = 3, +} aclLogLevel; + +typedef enum { + ACL_MEMTYPE_DEVICE = 0, + ACL_MEMTYPE_HOST = 1, +} aclMemType; + +/** + * @ingroup AscendCL + * @brief Converts data of type aclFloat16 to data of type float + * + * @param value [IN] Data to be converted + * + * @retval Transformed data + */ +ACL_FUNC_VISIBILITY float aclFloat16ToFloat(aclFloat16 value); + +/** + * @ingroup AscendCL + * @brief Converts data of type float to data of type aclFloat16 + * + * @param value [IN] Data to be converted + * + * @retval Transformed data + */ +ACL_FUNC_VISIBILITY aclFloat16 aclFloatToFloat16(float value); + +/** + * @ingroup AscendCL + * @brief create data of aclDataBuffer + * + * @param data [IN] pointer to data + * @li Need to be managed by the user, + * call aclrtMalloc interface to apply for memory, + * call aclrtFree interface to release memory + * + * @param size [IN] size of data in bytes + * + * @retval pointer to created instance. nullptr if run out of memory + * + * @see aclrtMalloc | aclrtFree + */ +ACL_FUNC_VISIBILITY aclDataBuffer *aclCreateDataBuffer(void *data, size_t size); + +/** + * @ingroup AscendCL + * @brief destroy data of aclDataBuffer + * + * @par Function + * Only the aclDataBuffer type data is destroyed here. + * The memory of the data passed in when the aclDataDataBuffer interface + * is called to create aclDataBuffer type data must be released by the user + * + * @param dataBuffer [IN] pointer to the aclDataBuffer + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclCreateDataBuffer + */ +ACL_FUNC_VISIBILITY aclError aclDestroyDataBuffer(const aclDataBuffer *dataBuffer); + +/** + * @ingroup AscendCL + * @brief update new data of aclDataBuffer + * + * @param dataBuffer [OUT] pointer to aclDataBuffer + * @li The old data need to be released by the user, otherwise it may occur memory leak leakage + * call aclGetDataBufferAddr interface to get old data address + * call aclrtFree interface to release memory + * + * @param data [IN] pointer to new data + * @li Need to be managed by the user, + * call aclrtMalloc interface to apply for memory, + * call aclrtFree interface to release memory + * + * @param size [IN] size of data in bytes + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtMalloc | aclrtFree | aclGetDataBufferAddr + */ +ACL_FUNC_VISIBILITY aclError aclUpdateDataBuffer(aclDataBuffer *dataBuffer, void *data, size_t size); + +/** + * @ingroup AscendCL + * @brief get data address from aclDataBuffer + * + * @param dataBuffer [IN] pointer to the data of aclDataBuffer + * + * @retval data address + */ +ACL_FUNC_VISIBILITY void *aclGetDataBufferAddr(const aclDataBuffer *dataBuffer); + +/** + * @ingroup AscendCL + * @brief get data size of aclDataBuffer + * + * @param dataBuffer [IN] pointer to the data of aclDataBuffer + * + * @retval data size + */ +ACL_DEPRECATED_MESSAGE("aclGetDataBufferSize is deprecated, use aclGetDataBufferSizeV2 instead") +ACL_FUNC_VISIBILITY uint32_t aclGetDataBufferSize(const aclDataBuffer *dataBuffer); + +/** + * @ingroup AscendCL + * @brief get data size of aclDataBuffer to replace aclGetDataBufferSize + * + * @param dataBuffer [IN] pointer to the data of aclDataBuffer + * + * @retval data size + */ +ACL_FUNC_VISIBILITY size_t aclGetDataBufferSizeV2(const aclDataBuffer *dataBuffer); + +/** + * @ingroup AscendCL + * @brief get size of aclDataType + * + * @param dataType [IN] aclDataType data the size to get + * + * @retval size of the aclDataType + */ +ACL_FUNC_VISIBILITY size_t aclDataTypeSize(aclDataType dataType); + +// interfaces of tensor desc +/** + * @ingroup AscendCL + * @brief create data aclTensorDesc + * + * @param dataType [IN] Data types described by tensor + * @param numDims [IN] the number of dimensions of the shape + * @param dims [IN] the size of the specified dimension + * @param format [IN] tensor format + * + * @retval aclTensorDesc pointer. + * @retval nullptr if param is invalid or run out of memory + */ +ACL_FUNC_VISIBILITY aclTensorDesc *aclCreateTensorDesc(aclDataType dataType, int numDims, const int64_t *dims, + aclFormat format); + +/** + * @ingroup AscendCL + * @brief destroy data aclTensorDesc + * + * @param desc [IN] pointer to the data of aclTensorDesc to destroy + */ +ACL_FUNC_VISIBILITY void aclDestroyTensorDesc(const aclTensorDesc *desc); + +/** + * @ingroup AscendCL + * @brief set tensor shape range for aclTensorDesc + * + * @param desc [OUT] pointer to the data of aclTensorDesc + * @param dimsCount [IN] the number of dimensions of the shape + * @param dimsRange [IN] the range of dimensions of the shape + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclSetTensorShapeRange(aclTensorDesc *desc, size_t dimsCount, + int64_t dimsRange[][ACL_TENSOR_SHAPE_RANGE_NUM]); + +/** + * @ingroup AscendCL + * @brief get data type specified by the tensor description + * + * @param desc [IN] pointer to the instance of aclTensorDesc + * + * @retval data type specified by the tensor description. + * @retval ACL_DT_UNDEFINED if description is null + */ +ACL_FUNC_VISIBILITY aclDataType aclGetTensorDescType(const aclTensorDesc *desc); + +/** + * @ingroup AscendCL + * @brief get data format specified by the tensor description + * + * @param desc [IN] pointer to the instance of aclTensorDesc + * + * @retval data format specified by the tensor description. + * @retval ACL_FORMAT_UNDEFINED if description is null + */ +ACL_FUNC_VISIBILITY aclFormat aclGetTensorDescFormat(const aclTensorDesc *desc); + +/** + * @ingroup AscendCL + * @brief get tensor size specified by the tensor description + * + * @param desc [IN] pointer to the instance of aclTensorDesc + * + * @retval data size specified by the tensor description. + * @retval 0 if description is null + */ +ACL_FUNC_VISIBILITY size_t aclGetTensorDescSize(const aclTensorDesc *desc); + +/** + * @ingroup AscendCL + * @brief get element count specified by the tensor description + * + * @param desc [IN] pointer to the instance of aclTensorDesc + * + * @retval element count specified by the tensor description. + * @retval 0 if description is null + */ +ACL_FUNC_VISIBILITY size_t aclGetTensorDescElementCount(const aclTensorDesc *desc); + +/** + * @ingroup AscendCL + * @brief get number of dims specified by the tensor description + * + * @param desc [IN] pointer to the instance of aclTensorDesc + * + * @retval number of dims specified by the tensor description. + * @retval 0 if description is null + * @retval ACL_UNKNOWN_RANK if the tensor dim is -2 + */ +ACL_FUNC_VISIBILITY size_t aclGetTensorDescNumDims(const aclTensorDesc *desc); + +/** + * @ingroup AscendCL + * @brief Get the size of the specified dim in the tensor description + * + * @param desc [IN] pointer to the instance of aclTensorDesc + * @param index [IN] index of dims, start from 0. + * + * @retval dim specified by the tensor description and index. + * @retval -1 if description or index is invalid + */ +ACL_DEPRECATED_MESSAGE("aclGetTensorDescDim is deprecated, use aclGetTensorDescDimV2 instead") +ACL_FUNC_VISIBILITY int64_t aclGetTensorDescDim(const aclTensorDesc *desc, size_t index); + +/** + * @ingroup AscendCL + * @brief Get the size of the specified dim in the tensor description + * + * @param desc [IN] pointer to the instance of aclTensorDesc + * @param index [IN] index of dims, start from 0. + * @param dimSize [OUT] size of the specified dim. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclGetTensorDescDimV2(const aclTensorDesc *desc, size_t index, int64_t *dimSize); + +/** + * @ingroup AscendCL + * @brief Get the range of the specified dim in the tensor description + * + * @param desc [IN] pointer to the instance of aclTensorDesc + * @param index [IN] index of dims, start from 0. + * @param dimRangeNum [IN] number of dimRange. + * @param dimRange [OUT] range of the specified dim. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclGetTensorDescDimRange(const aclTensorDesc *desc, size_t index, size_t dimRangeNum, + int64_t *dimRange); + +/** + * @ingroup AscendCL + * @brief set tensor description name + * + * @param desc [OUT] pointer to the instance of aclTensorDesc + * @param name [IN] tensor description name + */ +ACL_FUNC_VISIBILITY void aclSetTensorDescName(aclTensorDesc *desc, const char *name); + +/** + * @ingroup AscendCL + * @brief get tensor description name + * + * @param desc [IN] pointer to the instance of aclTensorDesc + * + * @retval tensor description name. + * @retval empty string if description is null + */ +ACL_FUNC_VISIBILITY const char *aclGetTensorDescName(aclTensorDesc *desc); + +/** + * @ingroup AscendCL + * @brief Convert the format in the source aclTensorDesc according to + * the specified dstFormat to generate a new target aclTensorDesc. + * The format in the source aclTensorDesc remains unchanged. + * + * @param srcDesc [IN] pointer to the source tensor desc + * @param dstFormat [IN] destination format + * @param dstDesc [OUT] pointer to the pointer to the destination tensor desc + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclTransTensorDescFormat(const aclTensorDesc *srcDesc, aclFormat dstFormat, + aclTensorDesc **dstDesc); + +/** + * @ingroup AscendCL + * @brief Set the storage format specified by the tensor description + * + * @param desc [OUT] pointer to the instance of aclTensorDesc + * @param format [IN] the storage format + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_DEPRECATED_MESSAGE("aclSetTensorStorageFormat is deprecated, use aclSetTensorFormat instead") +ACL_FUNC_VISIBILITY aclError aclSetTensorStorageFormat(aclTensorDesc *desc, aclFormat format); + +/** + * @ingroup AscendCL + * @brief Set the storage shape specified by the tensor description + * + * @param desc [OUT] pointer to the instance of aclTensorDesc + * @param numDims [IN] the number of dimensions of the shape + * @param dims [IN] the size of the specified dimension + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_DEPRECATED_MESSAGE("aclSetTensorStorageShape is deprecated, use aclSetTensorShape instead") +ACL_FUNC_VISIBILITY aclError aclSetTensorStorageShape(aclTensorDesc *desc, int numDims, const int64_t *dims); + +/** + * @ingroup AscendCL + * @brief Set the format specified by the tensor description + * + * @param desc [OUT] pointer to the instance of aclTensorDesc + * @param format [IN] the storage format + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclSetTensorFormat(aclTensorDesc *desc, aclFormat format); + +/** + * @ingroup AscendCL + * @brief Set the shape specified by the tensor description + * + * @param desc [OUT] pointer to the instance of aclTensorDesc + * @param numDims [IN] the number of dimensions of the shape + * @param dims [IN] the size of the specified dimension + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclSetTensorShape(aclTensorDesc *desc, int numDims, const int64_t *dims); + +/** + * @ingroup AscendCL + * @brief Set the original format specified by the tensor description + * + * @param desc [OUT] pointer to the instance of aclTensorDesc + * @param format [IN] the storage format + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclSetTensorOriginFormat(aclTensorDesc *desc, aclFormat format); + +/** + * @ingroup AscendCL + * @brief Set the original shape specified by the tensor description + * + * @param desc [OUT] pointer to the instance of aclTensorDesc + * @param numDims [IN] the number of dimensions of the shape + * @param dims [IN] the size of the specified dimension + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclSetTensorOriginShape(aclTensorDesc *desc, int numDims, const int64_t *dims); + +/** + * @ingroup AscendCL + * @brief get op description info + * + * @param desc [IN] pointer to tensor description + * @param index [IN] index of tensor + * + * @retval null for failed. + * @retval OtherValues success. + */ +ACL_FUNC_VISIBILITY aclTensorDesc *aclGetTensorDescByIndex(aclTensorDesc *desc, size_t index); + +/** + * @ingroup AscendCL + * @brief get address of tensor + * + * @param desc [IN] pointer to tensor description + * + * @retval null for failed + * @retval OtherValues success + */ +ACL_FUNC_VISIBILITY void *aclGetTensorDescAddress(const aclTensorDesc *desc); + +/** + * @ingroup AscendCL + * @brief Set the dynamic input name specified by the tensor description + * + * @param desc [OUT] pointer to the instance of aclTensorDesc + * @param dynamicInputName [IN] pointer to the dynamic input name + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclSetTensorDynamicInput(aclTensorDesc *desc, const char *dynamicInputName); + +/** + * @ingroup AscendCL + * @brief Set const data specified by the tensor description + * + * @param desc [OUT] pointer to the instance of aclTensorDesc + * @param dataBuffer [IN] pointer to the const databuffer + * @param length [IN] the length of const databuffer + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclSetTensorConst(aclTensorDesc *desc, void *dataBuffer, size_t length); + +/** + * @ingroup AscendCL + * @brief Set tensor memory type specified by the tensor description + * + * @param desc [OUT] pointer to the instance of aclTensorDesc + * @param memType [IN] ACL_MEMTYPE_DEVICE means device, ACL_MEMTYPE_HOST means host + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclSetTensorPlaceMent(aclTensorDesc *desc, aclMemType memType); + +/** + * @ingroup AscendCL + * @brief an interface for users to output APP logs + * + * @param logLevel [IN] the level of current log + * @param func [IN] the function where the log is located + * @param file [IN] the file where the log is located + * @param line [IN] Number of source lines where the log is located + * @param fmt [IN] the format of current log + * @param ... [IN] the value of current log + */ +ACL_FUNC_VISIBILITY void aclAppLog(aclLogLevel logLevel, const char *func, const char *file, uint32_t line, + const char *fmt, ...); + +/** + * @ingroup AscendCL + * @brief get soc name + * + * @retval null for failed + * @retval OtherValues success + */ +ACL_FUNC_VISIBILITY const char *aclrtGetSocName(); + +#define ACL_APP_LOG(level, fmt, ...) aclAppLog(level, __FUNCTION__, __FILE__, __LINE__, fmt, ##__VA_ARGS__) + +#ifdef __cplusplus +} +#endif + +#endif // INC_EXTERNAL_ACL_ACL_BASE_H_ diff --git a/inc/external/acl/acl_mdl.h b/inc/external/acl/acl_mdl.h new file mode 100644 index 00000000..2bf85e29 --- /dev/null +++ b/inc/external/acl/acl_mdl.h @@ -0,0 +1,1225 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_EXTERNAL_ACL_ACL_MODEL_H_ +#define INC_EXTERNAL_ACL_ACL_MODEL_H_ + +#include +#include + +#include "acl_base.h" +#include "acl_rt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ACL_MAX_DIM_CNT 128 +#define ACL_MAX_TENSOR_NAME_LEN 128 +#define ACL_MAX_BATCH_NUM 128 +#define ACL_MAX_HW_NUM 128 +#define ACL_MAX_SHAPE_COUNT 128 +#define ACL_INVALID_NODE_INDEX 0xFFFFFFFF + +#define ACL_MDL_LOAD_FROM_FILE 1 +#define ACL_MDL_LOAD_FROM_FILE_WITH_MEM 2 +#define ACL_MDL_LOAD_FROM_MEM 3 +#define ACL_MDL_LOAD_FROM_MEM_WITH_MEM 4 +#define ACL_MDL_LOAD_FROM_FILE_WITH_Q 5 +#define ACL_MDL_LOAD_FROM_MEM_WITH_Q 6 + +#define ACL_DYNAMIC_TENSOR_NAME "ascend_mbatch_shape_data" +#define ACL_DYNAMIC_AIPP_NAME "ascend_dynamic_aipp_data" +#define ACL_ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES "_datadump_original_op_names" + +typedef struct aclmdlDataset aclmdlDataset; +typedef struct aclmdlDesc aclmdlDesc; +typedef struct aclmdlAIPP aclmdlAIPP; +typedef struct aclAippExtendInfo aclAippExtendInfo; +typedef struct aclmdlConfigHandle aclmdlConfigHandle; + +typedef enum { + ACL_YUV420SP_U8 = 1, + ACL_XRGB8888_U8, + ACL_RGB888_U8, + ACL_YUV400_U8, + ACL_NC1HWC0DI_FP16, + ACL_NC1HWC0DI_S8, + ACL_ARGB8888_U8, + ACL_YUYV_U8, + ACL_YUV422SP_U8, + ACL_AYUV444_U8, + ACL_RAW10, + ACL_RAW12, + ACL_RAW16, + ACL_RAW24, + ACL_AIPP_RESERVED = 0xffff, +} aclAippInputFormat; + +typedef enum { + ACL_MDL_PRIORITY_INT32 = 0, + ACL_MDL_LOAD_TYPE_SIZET, + ACL_MDL_PATH_PTR, /**< pointer to model load path with deep copy */ + ACL_MDL_MEM_ADDR_PTR, /**< pointer to model memory with shallow copy */ + ACL_MDL_MEM_SIZET, + ACL_MDL_WEIGHT_ADDR_PTR, /**< pointer to weight memory of model with shallow copy */ + ACL_MDL_WEIGHT_SIZET, + ACL_MDL_WORKSPACE_ADDR_PTR, /**< pointer to worksapce memory of model with shallow copy */ + ACL_MDL_WORKSPACE_SIZET, + ACL_MDL_INPUTQ_NUM_SIZET, + ACL_MDL_INPUTQ_ADDR_PTR, /**< pointer to inputQ with shallow copy */ + ACL_MDL_OUTPUTQ_NUM_SIZET, + ACL_MDL_OUTPUTQ_ADDR_PTR /**< pointer to outputQ with shallow copy */ +} aclmdlConfigAttr; + +typedef enum { + ACL_DATA_WITHOUT_AIPP = 0, + ACL_DATA_WITH_STATIC_AIPP, + ACL_DATA_WITH_DYNAMIC_AIPP, + ACL_DYNAMIC_AIPP_NODE +} aclmdlInputAippType; + +typedef struct aclmdlIODims { + char name[ACL_MAX_TENSOR_NAME_LEN]; /**< tensor name */ + size_t dimCount; /**< dim array count */ + int64_t dims[ACL_MAX_DIM_CNT]; /**< dim data array */ +} aclmdlIODims; + +typedef struct aclAippDims { + aclmdlIODims srcDims; /**< input dims before model transform */ + size_t srcSize; /**< input size before model transform */ + aclmdlIODims aippOutdims; /**< aipp output dims */ + size_t aippOutSize; /**< aipp output size */ +} aclAippDims; + +typedef struct aclmdlBatch { + size_t batchCount; /**< batch array count */ + uint64_t batch[ACL_MAX_BATCH_NUM]; /**< batch data array */ +} aclmdlBatch; + +typedef struct aclmdlHW { + size_t hwCount; /**< height&width array count */ + uint64_t hw[ACL_MAX_HW_NUM][2]; /**< height&width data array */ +} aclmdlHW; + +typedef struct aclAippInfo { + aclAippInputFormat inputFormat; + int32_t srcImageSizeW; + int32_t srcImageSizeH; + int8_t cropSwitch; + int32_t loadStartPosW; + int32_t loadStartPosH; + int32_t cropSizeW; + int32_t cropSizeH; + int8_t resizeSwitch; + int32_t resizeOutputW; + int32_t resizeOutputH; + int8_t paddingSwitch; + int32_t leftPaddingSize; + int32_t rightPaddingSize; + int32_t topPaddingSize; + int32_t bottomPaddingSize; + int8_t cscSwitch; + int8_t rbuvSwapSwitch; + int8_t axSwapSwitch; + int8_t singleLineMode; + int32_t matrixR0C0; + int32_t matrixR0C1; + int32_t matrixR0C2; + int32_t matrixR1C0; + int32_t matrixR1C1; + int32_t matrixR1C2; + int32_t matrixR2C0; + int32_t matrixR2C1; + int32_t matrixR2C2; + int32_t outputBias0; + int32_t outputBias1; + int32_t outputBias2; + int32_t inputBias0; + int32_t inputBias1; + int32_t inputBias2; + int32_t meanChn0; + int32_t meanChn1; + int32_t meanChn2; + int32_t meanChn3; + float minChn0; + float minChn1; + float minChn2; + float minChn3; + float varReciChn0; + float varReciChn1; + float varReciChn2; + float varReciChn3; + aclFormat srcFormat; + aclDataType srcDatatype; + size_t srcDimNum; + size_t shapeCount; + aclAippDims outDims[ACL_MAX_SHAPE_COUNT]; + aclAippExtendInfo *aippExtend; /**< reserved parameters, current version needs to be null */ +} aclAippInfo; + +/** + * @ingroup AscendCL + * @brief Create data of type aclmdlDesc + * + * @retval the aclmdlDesc pointer + */ +ACL_FUNC_VISIBILITY aclmdlDesc *aclmdlCreateDesc(); + +/** + * @ingroup AscendCL + * @brief destroy data of type aclmdlDesc + * + * @param modelDesc [IN] Pointer to almdldlDesc to be destroyed + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlDestroyDesc(aclmdlDesc *modelDesc); + +/** + * @ingroup AscendCL + * @brief Get aclmdlDesc data of the model according to the model ID + * + * @param modelDesc [OUT] aclmdlDesc pointer + * @param modelId [IN] model id + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetDesc(aclmdlDesc *modelDesc, uint32_t modelId); + +/** + * @ingroup AscendCL + * @brief Get the number of the inputs of + * the model according to data of aclmdlDesc + * + * @param modelDesc [IN] aclmdlDesc pointer + * + * @retval input size with aclmdlDesc + */ +ACL_FUNC_VISIBILITY size_t aclmdlGetNumInputs(aclmdlDesc *modelDesc); + +/** + * @ingroup AscendCL + * @brief Get the number of the output of + * the model according to data of aclmdlDesc + * + * @param modelDesc [IN] aclmdlDesc pointer + * + * @retval output size with aclmdlDesc + */ +ACL_FUNC_VISIBILITY size_t aclmdlGetNumOutputs(aclmdlDesc *modelDesc); + +/** + * @ingroup AscendCL + * @brief Get the size of the specified input according to + * the data of type aclmdlDesc + * + * @param modelDesc [IN] aclmdlDesc pointer + * @param index [IN] the size of the number of inputs to be obtained, + * the index value starts from 0 + * + * @retval Specify the size of the input + */ +ACL_FUNC_VISIBILITY size_t aclmdlGetInputSizeByIndex(aclmdlDesc *modelDesc, size_t index); + +/** + * @ingroup AscendCL + * @brief Get the size of the specified output according to + * the data of type aclmdlDesc + * + * @param modelDesc [IN] aclmdlDesc pointer + * @param index [IN] the size of the number of outputs to be obtained, + * the index value starts from 0 + * + * @retval Specify the size of the output + */ +ACL_FUNC_VISIBILITY size_t aclmdlGetOutputSizeByIndex(aclmdlDesc *modelDesc, size_t index); + +/** + * @ingroup AscendCL + * @brief Create data of type aclmdlDataset + * + * @retval the aclmdlDataset pointer + */ +ACL_FUNC_VISIBILITY aclmdlDataset *aclmdlCreateDataset(); + +/** + * @ingroup AscendCL + * @brief destroy data of type aclmdlDataset + * + * @param dataset [IN] Pointer to aclmdlDataset to be destroyed + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlDestroyDataset(const aclmdlDataset *dataset); + +/** + * @ingroup AscendCL + * @brief Add aclDataBuffer to aclmdlDataset + * + * @param dataset [OUT] aclmdlDataset address of aclDataBuffer to be added + * @param dataBuffer [IN] aclDataBuffer address to be added + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlAddDatasetBuffer(aclmdlDataset *dataset, aclDataBuffer *dataBuffer); + +/** + * @ingroup AscendCL + * @brief Set aclTensorDesc to aclmdlDataset + * + * @param dataset [OUT] aclmdlDataset address of aclDataBuffer to be added + * @param tensorDesc [IN] aclTensorDesc address to be added + * @param index [IN] index of tensorDesc which to be added + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetDatasetTensorDesc(aclmdlDataset *dataset, aclTensorDesc *tensorDesc, + size_t index); + +/** + * @ingroup AscendCL + * @brief Get the number of aclDataBuffer in aclmdlDataset + * + * @param dataset [IN] aclmdlDataset poiter + * + * @retval the number of aclDataBuffer + */ +ACL_FUNC_VISIBILITY size_t aclmdlGetDatasetNumBuffers(const aclmdlDataset *dataset); + +/** + * @ingroup AscendCL + * @brief Get the aclDataBuffer in aclmdlDataset by index + * + * @param dataset [IN] aclmdlDataset poiter + * @param index [IN] the index of aclDataBuffer + * + * @retval Get successfully, return the address of aclDataBuffer + * @retval Failure return NULL + */ +ACL_FUNC_VISIBILITY aclDataBuffer *aclmdlGetDatasetBuffer(const aclmdlDataset *dataset, size_t index); + +/** + * @ingroup AscendCL + * @brief Load offline model data from files + * and manage memory internally by the system + * + * @par Function + * After the system finishes loading the model, + * the model ID returned is used as a mark to identify the model + * during subsequent operations + * + * @param modelPath [IN] Storage path for offline model files + * @param modelId [OUT] Model ID generated after + * the system finishes loading the model + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlLoadFromFile(const char *modelPath, uint32_t *modelId); + +/** + * @ingroup AscendCL + * @brief Load offline model data from memory and manage the memory of + * model running internally by the system + * + * @par Function + * After the system finishes loading the model, + * the model ID returned is used as a mark to identify the model + * during subsequent operations + * + * @param model [IN] Model data stored in memory + * @param modelSize [IN] model data size + * @param modelId [OUT] Model ID generated after + * the system finishes loading the model + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlLoadFromMem(const void *model, size_t modelSize, uint32_t *modelId); + +/** + * @ingroup AscendCL + * @brief Load offline model data from a file, + * and the user manages the memory of the model run by itself + * + * @par Function + * After the system finishes loading the model, + * the model ID returned is used as a mark to identify the model + * during subsequent operations. + * @param modelPath [IN] Storage path for offline model files + * @param modelId [OUT] Model ID generated after finishes loading the model + * @param workPtr [IN] A pointer to the working memory + * required by the model on the Device,can be null + * @param workSize [IN] The amount of working memory required by the model + * @param weightPtr [IN] Pointer to model weight memory on Device + * @param weightSize [IN] The amount of weight memory required by the model + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlLoadFromFileWithMem(const char *modelPath, uint32_t *modelId, void *workPtr, + size_t workSize, void *weightPtr, size_t weightSize); + +/** + * @ingroup AscendCL + * @brief Load offline model data from memory, + * and the user can manage the memory of model running + * + * @par Function + * After the system finishes loading the model, + * the model ID returned is used as a mark to identify the model + * during subsequent operations + * @param model [IN] Model data stored in memory + * @param modelSize [IN] model data size + * @param modelId [OUT] Model ID generated after finishes loading the model + * @param workPtr [IN] A pointer to the working memory + * required by the model on the Device,can be null + * @param workSize [IN] work memory size + * @param weightPtr [IN] Pointer to model weight memory on Device,can be null + * @param weightSize [IN] The amount of weight memory required by the model + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlLoadFromMemWithMem(const void *model, size_t modelSize, uint32_t *modelId, + void *workPtr, size_t workSize, void *weightPtr, + size_t weightSize); + +/** + * @ingroup AscendCL + * @brief load model from file with async queue + * + * @param modelPath [IN] model path + * @param modelId [OUT] return model id if load success + * @param inputQ [IN] input queue pointer + * @param inputQNum [IN] input queue num + * @param outputQ [IN] output queue pointer + * @param outputQNum [IN] output queue num + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlLoadFromFileWithQ(const char *modelPath, uint32_t *modelId, const uint32_t *inputQ, + size_t inputQNum, const uint32_t *outputQ, size_t outputQNum); + +/** + * @ingroup AscendCL + * @brief load model from memory with async queue + * + * @param model [IN] model memory which user manages + * @param modelSize [IN] model size + * @param modelId [OUT] return model id if load success + * @param inputQ [IN] input queue pointer + * @param inputQNum [IN] input queue num + * @param outputQ [IN] output queue pointer + * @param outputQNum [IN] output queue num + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlLoadFromMemWithQ(const void *model, size_t modelSize, uint32_t *modelId, + const uint32_t *inputQ, size_t inputQNum, const uint32_t *outputQ, + size_t outputQNum); + +/** + * @ingroup AscendCL + * @brief Execute model synchronous inference until the inference result is returned + * + * @param modelId [IN] ID of the model to perform inference + * @param input [IN] Input data for model inference + * @param output [OUT] Output data for model inference + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlExecute(uint32_t modelId, const aclmdlDataset *input, aclmdlDataset *output); + +/** + * @ingroup AscendCL + * @brief Execute model asynchronous inference until the inference result is returned + * + * @param modelId [IN] ID of the model to perform inference + * @param input [IN] Input data for model inference + * @param output [OUT] Output data for model inference + * @param stream [IN] stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | + * aclmdlLoadFromMemWithMem + */ +ACL_FUNC_VISIBILITY aclError aclmdlExecuteAsync(uint32_t modelId, const aclmdlDataset *input, aclmdlDataset *output, + aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief unload model with model id + * + * @param modelId [IN] model id to be unloaded + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlUnload(uint32_t modelId); + +/** + * @ingroup AscendCL + * @brief Get the weight memory size and working memory size + * required for model execution according to the model file + * + * @param fileName [IN] Model path to get memory information + * @param workSize [OUT] The amount of working memory for model executed + * @param weightSize [OUT] The amount of weight memory for model executed + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlQuerySize(const char *fileName, size_t *workSize, size_t *weightSize); + +/** + * @ingroup AscendCL + * @brief Obtain the weights required for + * model execution according to the model data in memory + * + * @par Restriction + * The execution and weight memory is Device memory, + * and requires user application and release. + * @param model [IN] model memory which user manages + * @param modelSize [IN] model data size + * @param workSize [OUT] The amount of working memory for model executed + * @param weightSize [OUT] The amount of weight memory for model executed + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlQuerySizeFromMem(const void *model, size_t modelSize, size_t *workSize, + size_t *weightSize); + +/** + * @ingroup AscendCL + * @brief In dynamic batch scenarios, + * it is used to set the number of images processed + * at one time during model inference + * + * @param modelId [IN] model id + * @param dataset [IN|OUT] data for model inference + * @param index [IN] index of dynamic tensor + * @param batchSize [IN] Number of images processed at a time during model + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | + * aclmdlLoadFromMemWithMem | aclmdlGetInputIndexByName + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetDynamicBatchSize(uint32_t modelId, aclmdlDataset *dataset, size_t index, + uint64_t batchSize); + +/** + * @ingroup AscendCL + * @brief Sets the H and W of the specified input of the model + * + * @param modelId [IN] model id + * @param dataset [IN|OUT] data for model inference + * @param index [IN] index of dynamic tensor + * @param height [IN] model height + * @param width [IN] model width + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | + * aclmdlLoadFromMemWithMem | aclmdlGetInputIndexByName + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetDynamicHWSize(uint32_t modelId, aclmdlDataset *dataset, size_t index, + uint64_t height, uint64_t width); + +/** + * @ingroup AscendCL + * @brief Sets the dynamic dims of the specified input of the model + * + * @param modelId [IN] model id + * @param dataset [IN|OUT] data for model inference + * @param index [IN] index of dynamic dims + * @param dims [IN] value of dynamic dims + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | + * aclmdlLoadFromMemWithMem | aclmdlGetInputIndexByName + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetInputDynamicDims(uint32_t modelId, aclmdlDataset *dataset, size_t index, + const aclmdlIODims *dims); + +/** + * @ingroup AscendCL + * @brief get input dims info + * + * @param modelDesc [IN] model description + * @param index [IN] input tensor index + * @param dims [OUT] dims info + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlGetInputDimsV2 + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetInputDims(const aclmdlDesc *modelDesc, size_t index, aclmdlIODims *dims); + +/** + * @ingroup AscendCL + * @brief get input dims info(version 2), especially for static aipp + * it is the same with aclmdlGetInputDims while model without static aipp + * + * @param modelDesc [IN] model description + * @param index [IN] input tensor index + * @param dims [OUT] dims info + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlGetInputDims + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetInputDimsV2(const aclmdlDesc *modelDesc, size_t index, aclmdlIODims *dims); + +/** + * @ingroup AscendCL + * @brief get output dims info + * + * @param modelDesc [IN] model description + * @param index [IN] output tensor index + * @param dims [OUT] dims info + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetOutputDims(const aclmdlDesc *modelDesc, size_t index, aclmdlIODims *dims); + +/** + * @ingroup AscendCL + * @brief get current output dims info + * + * @par Function + * The following use cases are supported: + * @li Get current output shape when model is dynamic and + * dynamic shape info is set + * @li Get max output shape when model is dynamic and + * dynamic shape info is not set + * @li Get actual output shape when model is static + * + * @param modelDesc [IN] model description + * @param index [IN] output tensor index + * @param dims [OUT] dims info + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetCurOutputDims(const aclmdlDesc *modelDesc, size_t index, aclmdlIODims *dims); + +/** + * @ingroup AscendCL + * @brief get attr value by op name + * + * @param modelDesc [IN] model description + * @param opName [IN] op name + * @param attr [IN] attr name + * + * @retval the attr value + */ +ACL_FUNC_VISIBILITY const char *aclmdlGetOpAttr(aclmdlDesc *modelDesc, const char *opName, const char *attr); + +/** + * @ingroup AscendCL + * @brief get input name by index + * + * @param modelDesc [IN] model description + * @param index [IN] intput tensor index + * + * @retval input tensor name,the same life cycle with modelDesc + */ +ACL_FUNC_VISIBILITY const char *aclmdlGetInputNameByIndex(const aclmdlDesc *modelDesc, size_t index); + +/** + * @ingroup AscendCL + * @brief get output name by index + * + * @param modelDesc [IN] model description + * @param index [IN] output tensor index + * + * @retval output tensor name,the same life cycle with modelDesc + */ +ACL_FUNC_VISIBILITY const char *aclmdlGetOutputNameByIndex(const aclmdlDesc *modelDesc, size_t index); + +/** + * @ingroup AscendCL + * @brief get input format by index + * + * @param modelDesc [IN] model description + * @param index [IN] intput tensor index + * + * @retval input tensor format + */ +ACL_FUNC_VISIBILITY aclFormat aclmdlGetInputFormat(const aclmdlDesc *modelDesc, size_t index); + +/** + * @ingroup AscendCL + * @brief get output format by index + * + * @param modelDesc [IN] model description + * @param index [IN] output tensor index + * + * @retval output tensor format + */ +ACL_FUNC_VISIBILITY aclFormat aclmdlGetOutputFormat(const aclmdlDesc *modelDesc, size_t index); + +/** + * @ingroup AscendCL + * @brief get input data type by index + * + * @param modelDesc [IN] model description + * @param index [IN] intput tensor index + * + * @retval input tensor data type + */ +ACL_FUNC_VISIBILITY aclDataType aclmdlGetInputDataType(const aclmdlDesc *modelDesc, size_t index); + +/** + * @ingroup AscendCL + * @brief get output data type by index + * + * @param modelDesc [IN] model description + * @param index [IN] output tensor index + * + * @retval output tensor data type + */ +ACL_FUNC_VISIBILITY aclDataType aclmdlGetOutputDataType(const aclmdlDesc *modelDesc, size_t index); + +/** + * @ingroup AscendCL + * @brief get input tensor index by name + * + * @param modelDesc [IN] model description + * @param name [IN] intput tensor name + * @param index [OUT] intput tensor index + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetInputIndexByName(const aclmdlDesc *modelDesc, const char *name, size_t *index); + +/** + * @ingroup AscendCL + * @brief get output tensor index by name + * + * @param modelDesc [IN] model description + * @param name [IN] output tensor name + * @param index [OUT] output tensor index + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetOutputIndexByName(const aclmdlDesc *modelDesc, const char *name, size_t *index); + +/** + * @ingroup AscendCL + * @brief get dynamic batch info + * + * @param modelDesc [IN] model description + * @param batch [OUT] dynamic batch info + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetDynamicBatch(const aclmdlDesc *modelDesc, aclmdlBatch *batch); + +/** + * @ingroup AscendCL + * @brief get dynamic height&width info + * + * @param modelDesc [IN] model description + * @param index [IN] input tensor index + * @param hw [OUT] dynamic height&width info + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetDynamicHW(const aclmdlDesc *modelDesc, size_t index, aclmdlHW *hw); + +/** + * @ingroup AscendCL + * @brief get dynamic gear count + * + * @param modelDesc [IN] model description + * @param index [IN] unused, must be -1 + * @param gearCount [OUT] dynamic gear count + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetInputDynamicGearCount(const aclmdlDesc *modelDesc, size_t index, + size_t *gearCount); + +/** + * @ingroup AscendCL + * @brief get dynamic dims info + * + * @param modelDesc [IN] model description + * @param index [IN] unused, must be -1 + * @param dims [OUT] value of dynamic dims + * @param gearCount [IN] dynamic gear count + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetInputDynamicDims(const aclmdlDesc *modelDesc, size_t index, aclmdlIODims *dims, + size_t gearCount); + +/** + * @ingroup AscendCL + * @brief Create data of type aclmdlAIPP + * + * @param batchSize [IN] batchsizes of model + * + * @retval the aclmdlAIPP pointer + */ +ACL_FUNC_VISIBILITY aclmdlAIPP *aclmdlCreateAIPP(uint64_t batchSize); + +/** + * @ingroup AscendCL + * @brief destroy data of type aclmdlAIPP + * + * @param aippParmsSet [IN] Pointer for aclmdlAIPP to be destroyed + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlDestroyAIPP(const aclmdlAIPP *aippParmsSet); + +/** + * @ingroup AscendCL + * @brief set InputFormat of type aclmdlAIPP + * + * @param aippParmsSet [OUT] Pointer for aclmdlAIPP + * @param inputFormat [IN] The inputFormat of aipp + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPInputFormat(aclmdlAIPP *aippParmsSet, aclAippInputFormat inputFormat); + +/** + * @ingroup AscendCL + * @brief set cscParms of type aclmdlAIPP + * + * @param aippParmsSet [OUT] Pointer for aclmdlAIPP + * @param csc_switch [IN] Csc switch + * @param cscMatrixR0C0 [IN] Csc_matrix_r0_c0 + * @param cscMatrixR0C1 [IN] Csc_matrix_r0_c1 + * @param cscMatrixR0C2 [IN] Csc_matrix_r0_c2 + * @param cscMatrixR1C0 [IN] Csc_matrix_r1_c0 + * @param cscMatrixR1C1 [IN] Csc_matrix_r1_c1 + * @param cscMatrixR1C2 [IN] Csc_matrix_r1_c2 + * @param cscMatrixR2C0 [IN] Csc_matrix_r2_c0 + * @param cscMatrixR2C1 [IN] Csc_matrix_r2_c1 + * @param cscMatrixR2C2 [IN] Csc_matrix_r2_c2 + * @param cscOutputBiasR0 [IN] Output Bias for RGB to YUV, element of row 0, unsigned number + * @param cscOutputBiasR1 [IN] Output Bias for RGB to YUV, element of row 1, unsigned number + * @param cscOutputBiasR2 [IN] Output Bias for RGB to YUV, element of row 2, unsigned number + * @param cscInputBiasR0 [IN] Input Bias for YUV to RGB, element of row 0, unsigned number + * @param cscInputBiasR1 [IN] Input Bias for YUV to RGB, element of row 1, unsigned number + * @param cscInputBiasR2 [IN] Input Bias for YUV to RGB, element of row 2, unsigned number + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPCscParams(aclmdlAIPP *aippParmsSet, int8_t csc_switch, int16_t cscMatrixR0C0, + int16_t cscMatrixR0C1, int16_t cscMatrixR0C2, int16_t cscMatrixR1C0, + int16_t cscMatrixR1C1, int16_t cscMatrixR1C2, int16_t cscMatrixR2C0, + int16_t cscMatrixR2C1, int16_t cscMatrixR2C2, + uint8_t cscOutputBiasR0, uint8_t cscOutputBiasR1, + uint8_t cscOutputBiasR2, uint8_t cscInputBiasR0, + uint8_t cscInputBiasR1, uint8_t cscInputBiasR2); + +/** + * @ingroup AscendCL + * @brief set rb/ub swap switch of type aclmdlAIPP + * + * @param aippParmsSet [OUT] Pointer for aclmdlAIPP + * @param rbuvSwapSwitch [IN] rb/ub swap switch + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPRbuvSwapSwitch(aclmdlAIPP *aippParmsSet, int8_t rbuvSwapSwitch); + +/** + * @ingroup AscendCL + * @brief set RGBA->ARGB, YUVA->AYUV swap switch of type aclmdlAIPP + * + * @param aippParmsSet [OUT] Pointer for aclmdlAIPP + * @param axSwapSwitch [IN] RGBA->ARGB, YUVA->AYUV swap switch + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPAxSwapSwitch(aclmdlAIPP *aippParmsSet, int8_t axSwapSwitch); + +/** + * @ingroup AscendCL + * @brief set source image of type aclmdlAIPP + * + * @param aippParmsSet [OUT] Pointer for aclmdlAIPP + * @param srcImageSizeW [IN] Source image width + * @param srcImageSizeH [IN] Source image height + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPSrcImageSize(aclmdlAIPP *aippParmsSet, int32_t srcImageSizeW, + int32_t srcImageSizeH); + +/** + * @ingroup AscendCL + * @brief set resize switch of type aclmdlAIPP + * + * @param aippParmsSet [OUT] Pointer for aclmdlAIPP + * @param scfSwitch [IN] Resize switch + * @param scfInputSizeW [IN] Input width of scf + * @param scfInputSizeH [IN] Input height of scf + * @param scfOutputSizeW [IN] Output width of scf + * @param scfOutputSizeH [IN] Output height of scf + * @param batchIndex [IN] Batch parameter index + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPScfParams(aclmdlAIPP *aippParmsSet, int8_t scfSwitch, int32_t scfInputSizeW, + int32_t scfInputSizeH, int32_t scfOutputSizeW, + int32_t scfOutputSizeH, uint64_t batchIndex); + +/** + * @ingroup AscendCL + * @brief set cropParams of type aclmdlAIPP + * + * @param aippParmsSet [OUT] Pointer for aclmdlAIPP + * @param cropSwitch [IN] Crop switch + * @param cropStartPosW [IN] The start horizontal position of cropping + * @param cropStartPosH [IN] The start vertical position of cropping + * @param cropSizeW [IN] Crop width + * @param cropSizeH [IN] Crop height + * @param batchIndex [IN] Batch parameter index + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPCropParams(aclmdlAIPP *aippParmsSet, int8_t cropSwitch, int32_t cropStartPosW, + int32_t cropStartPosH, int32_t cropSizeW, int32_t cropSizeH, + uint64_t batchIndex); + +/** + * @ingroup AscendCL + * @brief set paddingParams of type aclmdlAIPP + * + * @param aippParmsSet [OUT] Pointer for aclmdlAIPP + * @param paddingSwitch [IN] Padding switch + * @param paddingSizeTop [IN] Top padding size + * @param paddingSizeBottom [IN] Bottom padding size + * @param paddingSizeLeft [IN] Left padding size + * @param paddingSizeRight [IN] Right padding size + * @param batchIndex [IN] Batch parameter index + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPPaddingParams(aclmdlAIPP *aippParmsSet, int8_t paddingSwitch, + int32_t paddingSizeTop, int32_t paddingSizeBottom, + int32_t paddingSizeLeft, int32_t paddingSizeRight, + uint64_t batchIndex); + +/** + * @ingroup AscendCL + * @brief set DtcPixelMean of type aclmdlAIPP + * + * @param aippParmsSet [OUT] Pointer for aclmdlAIPP + * @param dtcPixelMeanChn0 [IN] Mean value of channel 0 + * @param dtcPixelMeanChn1 [IN] Mean value of channel 1 + * @param dtcPixelMeanChn2 [IN] Mean value of channel 2 + * @param dtcPixelMeanChn3 [IN] Mean value of channel 3 + * @param batchIndex [IN] Batch parameter index + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPDtcPixelMean(aclmdlAIPP *aippParmsSet, int16_t dtcPixelMeanChn0, + int16_t dtcPixelMeanChn1, int16_t dtcPixelMeanChn2, + int16_t dtcPixelMeanChn3, uint64_t batchIndex); + +/** + * @ingroup AscendCL + * @brief set DtcPixelMin of type aclmdlAIPP + * + * @param aippParmsSet [OUT] Pointer for aclmdlAIPP + * @param dtcPixelMinChn0 [IN] Min value of channel 0 + * @param dtcPixelMinChn1 [IN] Min value of channel 1 + * @param dtcPixelMinChn2 [IN] Min value of channel 2 + * @param dtcPixelMinChn3 [IN] Min value of channel 3 + * @param batchIndex [IN] Batch parameter index + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPDtcPixelMin(aclmdlAIPP *aippParmsSet, float dtcPixelMinChn0, + float dtcPixelMinChn1, float dtcPixelMinChn2, + float dtcPixelMinChn3, uint64_t batchIndex); + +/** + * @ingroup AscendCL + * @brief set PixelVarReci of type aclmdlAIPP + * + * @param aippParmsSet [OUT] Pointer for aclmdlAIPP + * @param dtcPixelVarReciChn0 [IN] sfr_dtc_pixel_variance_reci_ch0 + * @param dtcPixelVarReciChn1 [IN] sfr_dtc_pixel_variance_reci_ch1 + * @param dtcPixelVarReciChn2 [IN] sfr_dtc_pixel_variance_reci_ch2 + * @param dtcPixelVarReciChn3 [IN] sfr_dtc_pixel_variance_reci_ch3 + * @param batchIndex [IN] Batch parameter index + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPPixelVarReci(aclmdlAIPP *aippParmsSet, float dtcPixelVarReciChn0, + float dtcPixelVarReciChn1, float dtcPixelVarReciChn2, + float dtcPixelVarReciChn3, uint64_t batchIndex); + +/** + * @ingroup AscendCL + * @brief set aipp parameters to model + * + * @param modelId [IN] model id + * @param dataset [IN] Pointer of dataset + * @param index [IN] index of input for aipp data(ACL_DYNAMIC_AIPP_NODE) + * @param aippParmsSet [IN] Pointer for aclmdlAIPP + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | + * aclmdlLoadFromMemWithMem | aclmdlGetInputIndexByName | aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetInputAIPP(uint32_t modelId, aclmdlDataset *dataset, size_t index, + const aclmdlAIPP *aippParmsSet); + +/** + * @ingroup AscendCL + * @brief set aipp parameters to model + * + * @param modelId [IN] model id + * @param dataset [IN] Pointer of dataset + * @param index [IN] index of input for data which linked dynamic aipp(ACL_DATA_WITH_DYNAMIC_AIPP) + * @param aippParmsSet [IN] Pointer for aclmdlAIPP + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | + * aclmdlLoadFromMemWithMem | aclmdlGetInputIndexByName | aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetAIPPByInputIndex(uint32_t modelId, aclmdlDataset *dataset, size_t index, + const aclmdlAIPP *aippParmsSet); + +/** + * @ingroup AscendCL + * @brief get input aipp type + * + * @param modelId [IN] model id + * @param index [IN] index of input + * @param type [OUT] aipp type for input.refrer to aclmdlInputAippType(enum) + * @param dynamicAttachedDataIndex [OUT] index for dynamic attached data(ACL_DYNAMIC_AIPP_NODE) + * valid when type is ACL_DATA_WITH_DYNAMIC_AIPP, invalid value is ACL_INVALID_NODE_INDEX + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | + * aclmdlLoadFromMemWithMem | aclmdlGetInputIndexByName | aclmdlCreateAIPP + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetAippType(uint32_t modelId, size_t index, aclmdlInputAippType *type, + size_t *dynamicAttachedDataIndex); + +/** + * @ingroup AscendCL + * @brief get static aipp parameters from model + * + * @param modelId [IN] model id + * @param index [IN] index of tensor + * @param aippinfo [OUT] Pointer for static aipp info + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval ACL_ERROR_MODEL_AIPP_NOT_EXIST The tensor of index is not configured with aipp + * @retval OtherValues Failure + * + * @see aclmdlLoadFromFile | aclmdlLoadFromMem | aclmdlLoadFromFileWithMem | + * aclmdlLoadFromMemWithMem | aclmdlGetInputIndexByName + */ +ACL_FUNC_VISIBILITY aclError aclmdlGetFirstAippInfo(uint32_t modelId, size_t index, aclAippInfo *aippinfo); + +/** + * @ingroup AscendCL + * @brief get op description info + * + * @param deviceId [IN] device id + * @param streamId [IN] stream id + * @param taskId [IN] task id + * @param opName [OUT] pointer to op name + * @param opNameLen [IN] the length of op name + * @param inputDesc [OUT] pointer to input description + * @param numInputs [OUT] the number of input tensor + * @param outputDesc [OUT] pointer to output description + * @param numOutputs [OUT] the number of output tensor + * + * @retval ACL_SUCCESS The function is successfully executed + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlCreateAndGetOpDesc(uint32_t deviceId, uint32_t streamId, uint32_t taskId, + char *opName, size_t opNameLen, aclTensorDesc **inputDesc, + size_t *numInputs, aclTensorDesc **outputDesc, + size_t *numOutputs); + +/** + * @ingroup AscendCL + * @brief init dump + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlInitDump(); + +/** + * @ingroup AscendCL + * @brief set param of dump + * + * @param dumpCfgPath [IN] the path of dump config + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetDump(const char *dumpCfgPath); + +/** + * @ingroup AscendCL + * @brief finalize dump. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlFinalizeDump(); + +/** + * @ingroup AscendCL + * @brief load model with config + * + * @param handle [IN] pointer to model config handle + * @param modelId [OUT] pointer to model id + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlLoadWithConfig(const aclmdlConfigHandle *handle, uint32_t *modelId); + +/** + * @ingroup AscendCL + * @brief create model config handle of type aclmdlConfigHandle + * + * @retval the aclmdlConfigHandle pointer + * + * @see aclmdlDestroyConfigHandle + */ +ACL_FUNC_VISIBILITY aclmdlConfigHandle *aclmdlCreateConfigHandle(); + +/** + * @ingroup AscendCL + * @brief destroy data of type aclmdlConfigHandle + * + * @param handle [IN] pointer to model config handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclmdlCreateConfigHandle + */ +ACL_FUNC_VISIBILITY aclError aclmdlDestroyConfigHandle(aclmdlConfigHandle *handle); + +/** + * @ingroup AscendCL + * @brief set config for model load + * + * @param handle [OUT] pointer to model config handle + * @param attr [IN] config attr in model config handle to be set + * @param attrValue [IN] pointer to model config value + * @param valueSize [IN] memory size of attrValue + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclmdlSetConfigOpt(aclmdlConfigHandle *handle, aclmdlConfigAttr attr, + const void *attrValue, size_t valueSize); + +/** + * @ingroup AscendCL + * @brief get real tensor name from modelDesc + * + * @param modelDesc [IN] pointer to modelDesc + * @param name [IN] tensor name + * + * @retval the pointer of real tensor name + * @retval Failure return NULL + */ +ACL_FUNC_VISIBILITY const char *aclmdlGetTensorRealName(const aclmdlDesc *modelDesc, const char *name); + +#ifdef __cplusplus +} +#endif + +#endif // INC_EXTERNAL_ACL_ACL_MODEL_H_ diff --git a/inc/external/acl/acl_op.h b/inc/external/acl/acl_op.h new file mode 100644 index 00000000..d2e59bfb --- /dev/null +++ b/inc/external/acl/acl_op.h @@ -0,0 +1,504 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_EXTERNAL_ACL_ACL_OP_H_ +#define INC_EXTERNAL_ACL_ACL_OP_H_ + +#include "acl_base.h" +#include "acl_rt.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct aclopHandle aclopHandle; +typedef struct aclopAttr aclopAttr; +typedef struct aclopKernelDesc aclopKernelDesc; + +typedef void (*aclDataDeallocator)(void *data, size_t length); + +static const int ACL_COMPILE_FLAG_BIN_SELECTOR = 1; + +typedef enum aclEngineType { + ACL_ENGINE_SYS, + ACL_ENGINE_AICORE, + ACL_ENGINE_VECTOR, +} aclopEngineType; + +/** + * @ingroup AscendCL + * @brief Set base directory that contains single op models + * + * @par Restriction + * The aclopSetModelDir interface can be called only once in a process. + * @param modelDir [IN] path of the directory + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetModelDir(const char *modelDir); + +/** + * @ingroup AscendCL + * @brief load single op models from memory + * + * @par Restriction + * The aclopLoad interface can be called more than one times in a process. + * @param model [IN] address of single op models + * @param modelSize [IN] size of single op models + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopLoad(const void *model, size_t modelSize); + +/** + * @ingroup AscendCL + * @brief create data of type aclopAttr + * + * @retval pointer to created instance. + * @retval nullptr if run out of memory + */ +ACL_FUNC_VISIBILITY aclopAttr *aclopCreateAttr(); + +/** + * @ingroup AscendCL + * @brief destroy data of typ aclopAttr + * + * @param attr [IN] pointer to the instance of aclopAttr + */ +ACL_FUNC_VISIBILITY void aclopDestroyAttr(const aclopAttr *attr); + +/** + * @ingroup AscendCL + * @brief set an attribute. the type of the attribute is bool + * + * @param attr [OUT] pointer to the instance of aclopAttr + * @param attrName [IN] attribute name + * @param attrValue [IN] attribute value + * false if attrValue is 0, true otherwise. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetAttrBool(aclopAttr *attr, const char *attrName, uint8_t attrValue); + +/** + * @ingroup AscendCL + * @brief set an attribute. the type of the attribute is int64_t + * + * @param attr [OUT] pointer to the instance of aclopAttr + * @param attrName [IN] attribute name + * @param attrValue [IN] attribute value + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetAttrInt(aclopAttr *attr, const char *attrName, int64_t attrValue); + +/** + * @ingroup AscendCL + * @brief set an attribute. the type of the attribute is float + * + * @param attr [OUT] pointer to the instance of aclopAttr + * @param attrName [IN] attribute name + * @param attrValue [IN] attribute value + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetAttrFloat(aclopAttr *attr, const char *attrName, float attrValue); + +/** + * @ingroup AscendCL + * @brief set an attribute. the type of the attribute is string + * + * @param attr [OUT] pointer to the instance of aclopAttr + * @param attrName [IN] attribute name + * @param attrValue [IN] attribute value + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetAttrString(aclopAttr *attr, const char *attrName, const char *attrValue); + +/** + * @ingroup AscendCL + * @brief set an attribute. the type of the attribute is list of bools + * + * @param attr [OUT] pointer to the instance of aclopAttr + * @param attrName [IN] attribute name + * @param numValues [IN] number of values. false if attrValue is 0, true otherwise. + * @param values [IN] pointer to values + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetAttrListBool(aclopAttr *attr, const char *attrName, int numValues, + const uint8_t *values); + +/** + * @ingroup AscendCL + * @brief set an attribute. the type of the attribute is list of ints + * + * @param attr [OUT] pointer to the instance of aclopAttr + * @param attrName [IN] attribute name + * @param numValues [IN] number of values + * @param values [IN] pointer to values + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetAttrListInt(aclopAttr *attr, const char *attrName, int numValues, + const int64_t *values); + +/** + * @ingroup AscendCL + * @brief set an attribute. the type of the attribute is list of floats + * + * @param attr [OUT] pointer to the instance of aclopAttr + * @param attrName [IN] attribute name + * @param numValues [IN] number of values + * @param values [IN] pointer to values + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetAttrListFloat(aclopAttr *attr, const char *attrName, int numValues, + const float *values); + +/** + * @ingroup AscendCL + * @brief set an attribute. the type of the attribute is list of strings + * + * @param attr [OUT] pointer to the instance of aclopAttr + * @param attrName [IN] attribute name + * @param numValues [IN] number of values + * @param values [IN] pointer to values + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetAttrListString(aclopAttr *attr, const char *attrName, int numValues, + const char **values); + +/** + * @ingroup AscendCL + * @brief set an attribute. the type of the attribute is list of list of ints + * + * @param attr [OUT] pointer to the instance of aclopAttr + * @param attrName [IN] attribute name + * @param numLists [IN] number of lists + * @param numValues [IN] pointer to number of values of each list + * @param values [IN] pointer to values + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetAttrListListInt(aclopAttr *attr, const char *attrName, int numLists, + const int *numValues, const int64_t *const values[]); + +/** + * @ingroup AscendCL + * @brief Load and execute the specified operator asynchronously + * + * @par Restriction + * @li The input and output organization of each operator is different, + * and the application needs to organize the operator strictly + * according to the operator input and output parameters when calling. + * @li When the user calls aclopExecute, + * the ACL finds the corresponding task according to the optype, + * the description of the input tesnsor, + * the description of the output tesnsor, and attr, and issues the execution. + * + * @param opType [IN] type of op + * @param numInputs [IN] number of inputs + * @param inputDesc [IN] pointer to array of input tensor descriptions + * @param inputs [IN] pointer to array of input buffers + * @param numOutputs [IN] number of outputs + * @param outputDesc [IN] pointer to array of output tensor descriptions + * @param outputs [OUT] pointer to array of output buffers + * @param attr [IN] pointer to instance of aclopAttr. + * may pass nullptr if the op has no attribute + * @param stream [IN] stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_DEPRECATED_MESSAGE("aclopExecute is deprecated, use aclopExecuteV2 instead") +ACL_FUNC_VISIBILITY aclError aclopExecute(const char *opType, int numInputs, const aclTensorDesc *const inputDesc[], + const aclDataBuffer *const inputs[], int numOutputs, + const aclTensorDesc *const outputDesc[], aclDataBuffer *const outputs[], + const aclopAttr *attr, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief Load and execute the specified operator + * The difference with aclopExecute is that aclopExecuteV2 will refresh outputDesc + * + * @par Restriction + * @li The input and output organization of each operator is different, + * and the application needs to organize the operator strictly + * according to the operator input and output parameters when calling. + * @li When the user calls aclopExecuteV2, + * the ACL finds the corresponding task according to the optype, + * the description of the input tesnsor, + * the description of the output tesnsor, and attr, and issues the execution. + * + * @param opType [IN] type of op + * @param numInputs [IN] number of inputs + * @param inputDesc [IN] pointer to array of input tensor descriptions + * @param inputs [IN] pointer to array of input buffers + * @param numOutputs [IN] number of outputs + * @param outputDesc [IN|OUT] pointer to array of output tensor descriptions + * @param outputs [OUT] pointer to array of output buffers + * @param attr [IN] pointer to instance of aclopAttr. + * may pass nullptr if the op has no attribute + * @param stream [IN] stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopExecuteV2(const char *opType, int numInputs, aclTensorDesc *inputDesc[], + aclDataBuffer *inputs[], int numOutputs, aclTensorDesc *outputDesc[], + aclDataBuffer *outputs[], aclopAttr *attr, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief create a instance of aclopHandle. + * + * @param opType [IN] type of op + * @param numInputs [IN] number of inputs + * @param inputDesc [IN] pointer to array of input tensor descriptions + * @param numOutputs [IN] number of outputs + * @param outputDesc [IN] pointer to array of output tensor descriptions + * @param opAttr [IN] pointer to instance of aclopAttr. + * may pass nullptr if the op has no attribute + * @param handle [OUT] pointer to the pointer to the handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopCreateHandle(const char *opType, int numInputs, + const aclTensorDesc *const inputDesc[], int numOutputs, + const aclTensorDesc *const outputDesc[], const aclopAttr *opAttr, + aclopHandle **handle); + +/** + * @ingroup AscendCL + * @brief destroy aclopHandle instance + * + * @param handle [IN] pointer to the instance of aclopHandle + */ +ACL_FUNC_VISIBILITY void aclopDestroyHandle(aclopHandle *handle); + +/** + * @ingroup AscendCL + * @brief execute an op with the handle. + * can save op model matching cost compared with aclopExecute + * + * @param handle [IN] pointer to the instance of aclopHandle. + * The aclopCreateHandle interface has been called + * in advance to create aclopHandle type data. + * @param numInputs [IN] number of inputs + * @param inputs [IN] pointer to array of input buffers. + * The aclCreateDataBuffer interface has been called + * in advance to create aclDataBuffer type data. + * @param numOutputs [IN] number of outputs + * @param outputs [OUT] pointer to array of output buffers + * @param stream [IN] stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclopCreateHandle | aclCreateDataBuffer + */ +ACL_FUNC_VISIBILITY aclError aclopExecWithHandle(aclopHandle *handle, int numInputs, + const aclDataBuffer *const inputs[], int numOutputs, + aclDataBuffer *const outputs[], aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief cast data type + * + * @param srcDesc [IN] source tensor desc + * @param srcBuffer [IN] source tensor buffer + * @param dstDesc [IN] destination tensor desc + * @param dstBuffer [OUT] destination tensor buffer + * @param truncate [IN] do not truncate if value is 0, truncate otherwise + * @param stream [IN] stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopCast(const aclTensorDesc *srcDesc, const aclDataBuffer *srcBuffer, + const aclTensorDesc *dstDesc, aclDataBuffer *dstBuffer, uint8_t truncate, + aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief create a handle for casting datatype + * + * @param srcDesc [IN] source tensor desc + * @param dstDesc [IN] destination tensor desc + * @param truncate [IN] do not truncate if value is 0, truncate otherwise + * @param handle [OUT] pointer to the pointer to the handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopCreateHandleForCast(aclTensorDesc *srcDesc, aclTensorDesc *dstDesc, uint8_t truncate, + aclopHandle **handle); + +/** + * @ingroup AscendCL + * @brief create kernel + * + * @param opType [IN] op type + * @param kernelId [IN] kernel id + * @param kernelName [IN] kernel name + * @param binData [IN] kernel bin data + * @param binSize [IN] kernel bin size + * @param enginetype [IN] enigne type + * @param deallocator [IN] callback function for deallocating bin data, + * null if bin data to be deallocated by caller + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclopCompile + */ +ACL_FUNC_VISIBILITY aclError aclopCreateKernel(const char *opType, const char *kernelId, const char *kernelName, + void *binData, int binSize, aclopEngineType enginetype, + aclDataDeallocator deallocator); + +/** + * @ingroup AscendCL + * @brief create kernel + * + * @param numInputs [IN] number of inputs + * @param inputDesc [IN] pointer to array of input tensor descriptions + * @param numOutputs [IN] number of outputs + * @param outputDesc [IN] pointer to array of output tensor descriptions + * @param opAttr [IN] pointer to instance of aclopAttr + * @param aclopKernelDesc [IN] pointer to instance of aclopKernelDesc + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +typedef aclError (*aclopCompileFunc)(int numInputs, const aclTensorDesc *const inputDesc[], int numOutputs, + const aclTensorDesc *const outputDesc[], const aclopAttr *opAttr, + aclopKernelDesc *aclopKernelDesc); + +/** + * @ingroup AscendCL + * @brief register compile function + * + * @param opType [IN] op type + * @param func [IN] compile function + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclopUnregisterCompileFunc + */ +ACL_FUNC_VISIBILITY aclError aclopRegisterCompileFunc(const char *opType, aclopCompileFunc func); + +/** + * @ingroup AscendCL + * @brief unregister compile function + * + * @param opType [IN] op type + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopUnregisterCompileFunc(const char *opType); + +/** + * @ingroup AscendCL + * @brief set kernel args + * + * @param kernelDesc [IN] pointer to instance of aclopKernelDesc + * @param kernelId [IN] kernel id + * @param blockDim [IN] block dim + * @param args [IN] args + * @param argSize [IN] size in bytes of args + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetKernelArgs(aclopKernelDesc *kernelDesc, const char *kernelId, uint32_t blockDim, + const void *args, uint32_t argSize); + +/** + * @ingroup AscendCL + * @brief set workspace sizes + * + * @param kernelDesc [IN] pointer to instance of aclopKernelDesc + * @param numWorkspaces [IN] number of workspaces + * @param workspaceSizes [IN] pointer to array of sizes of workspaces + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetKernelWorkspaceSizes(aclopKernelDesc *kernelDesc, int numWorkspaces, + size_t *workspaceSizes); + +/** + * @ingroup AscendCL + * @brief compile op with dynamic shape + * + * @param opType [IN] op type + * @param numInputs [IN] number of inputs + * @param inputDesc [IN] pointer to array of input tensor descriptions + * @param numOutputs [IN] number of outputs + * @param outputDesc [IN] pointer to array of output tensor descriptions + * @param attr [IN] pointer to instance of aclopAttr. + * may pass nullptr if the op has no attribute + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopUpdateParams(const char *opType, int numInputs, + const aclTensorDesc *const inputDesc[], int numOutputs, + const aclTensorDesc *const outputDesc[], const aclopAttr *attr); + +/** + * @ingroup AscendCL + * @brief inferShape the specified operator synchronously + * + * @param opType [IN] type of op + * @param numInputs [IN] number of inputs + * @param inputDesc [IN] pointer to array of input tensor descriptions + * @param inputs [IN] pointer to array of input buffers + * @param numOutputs [IN] number of outputs + * @param outputDesc [OUT] pointer to array of output tensor descriptions + * @param attr [IN] pointer to instance of aclopAttr. + * may pass nullptr if the op has no attribute + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopInferShape(const char *opType, int numInputs, aclTensorDesc *inputDesc[], + aclDataBuffer *inputs[], int numOutputs, aclTensorDesc *outputDesc[], + aclopAttr *attr); + +#ifdef __cplusplus +} +#endif + +#endif // INC_EXTERNAL_ACL_ACL_OP_H_ diff --git a/inc/external/acl/acl_op_compiler.h b/inc/external/acl/acl_op_compiler.h new file mode 100644 index 00000000..d9d1b3da --- /dev/null +++ b/inc/external/acl/acl_op_compiler.h @@ -0,0 +1,121 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_EXTERNAL_ACL_ACL_OP_COMPILER_H_ +#define INC_EXTERNAL_ACL_ACL_OP_COMPILER_H_ + +#include "acl_base.h" +#include "acl_op.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum aclCompileType { ACL_COMPILE_SYS, ACL_COMPILE_UNREGISTERED } aclopCompileType; + +typedef enum { + ACL_PRECISION_MODE, + ACL_AICORE_NUM, + ACL_AUTO_TUNE_MODE, + ACL_OP_SELECT_IMPL_MODE, + ACL_OPTYPELIST_FOR_IMPLMODE, + ACL_OP_DEBUG_LEVEL, + ACL_DEBUG_DIR, + ACL_OP_COMPILER_CACHE_MODE, + ACL_OP_COMPILER_CACHE_DIR, + ACL_OP_PERFORMANCE_MODE +} aclCompileOpt; + +typedef enum aclCompileFlag { ACL_OP_COMPILE_DEFAULT, ACL_OP_COMPILE_FUZZ } aclOpCompileFlag; + +/** + * @ingroup AscendCL + * @brief compile op + * + * @param opType [IN] op type + * @param numInputs [IN] number of inputs + * @param inputDesc [IN] pointer to array of input tensor descriptions + * @param numOutputs [IN] number of outputs + * @param outputDesc [IN] pointer to array of output tensor descriptions + * @param attr [IN] pointer to instance of aclopAttr. + * may pass nullptr if the op has no attribute + * @param engineType [IN] engine type + * @param compileFlag [IN] compile flag + * @param opPath [IN] path of op + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopCompile(const char *opType, int numInputs, const aclTensorDesc *const inputDesc[], + int numOutputs, const aclTensorDesc *const outputDesc[], + const aclopAttr *attr, aclopEngineType engineType, + aclopCompileType compileFlag, const char *opPath); + +/** + * @ingroup AscendCL + * @brief compile and execute op + * + * @param opType [IN] op type + * @param numInputs [IN] number of inputs + * @param inputDesc [IN] pointer to array of input tensor descriptions + * @param inputs [IN] pointer to array of input buffers + * @param numOutputs [IN] number of outputs + * @param outputDesc [IN] pointer to array of output tensor descriptions + * @param outputs [IN] pointer to array of outputs buffers + * @param attr [IN] pointer to instance of aclopAttr. + * may pass nullptr if the op has no attribute + * @param engineType [IN] engine type + * @param compileFlag [IN] compile flag + * @param opPath [IN] path of op + * @param stream [IN] stream handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopCompileAndExecute( + const char *opType, int numInputs, const aclTensorDesc *const inputDesc[], const aclDataBuffer *const inputs[], + int numOutputs, const aclTensorDesc *const outputDesc[], aclDataBuffer *const outputs[], const aclopAttr *attr, + aclopEngineType engineType, aclopCompileType compileFlag, const char *opPath, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief set compile option + * + * @param aclCompileOpt [IN] compile option + * @param value [IN] pointer for the option value + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclSetCompileopt(aclCompileOpt opt, const char *value); + +/** + * @ingroup AscendCL + * @brief set compile flag + * + * @param flag [IN] compile flag, ACL_OP_COMPILE_DEFAULT means compile with default mode + * ACL_OP_COMPILE_FUZZ means compile with fuzz mode + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetCompileFlag(aclOpCompileFlag flag); + +#ifdef __cplusplus +} +#endif + +#endif // INC_EXTERNAL_ACL_ACL_OP_COMPILER_H_ diff --git a/inc/external/acl/acl_prof.h b/inc/external/acl/acl_prof.h new file mode 100644 index 00000000..3784d8c6 --- /dev/null +++ b/inc/external/acl/acl_prof.h @@ -0,0 +1,329 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_EXTERNAL_ACL_PROF_H_ +#define INC_EXTERNAL_ACL_PROF_H_ + +#include "acl_base.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ACL_PROF_ACL_API 0x0001 +#define ACL_PROF_TASK_TIME 0x0002 +#define ACL_PROF_AICORE_METRICS 0x0004 +#define ACL_PROF_AICPU 0x0008 + +/** + * @deprecated please use aclprofGetOpTypeLen and aclprofGetOpTNameLen instead + */ +#define ACL_PROF_MAX_OP_NAME_LEN 257 +#define ACL_PROF_MAX_OP_TYPE_LEN 65 + +typedef enum { + ACL_AICORE_ARITHMETIC_UTILIZATION = 0, + ACL_AICORE_PIPE_UTILIZATION = 1, + ACL_AICORE_MEMORY_BANDWIDTH = 2, + ACL_AICORE_L0B_AND_WIDTH = 3, + ACL_AICORE_RESOURCE_CONFLICT_RATIO = 4, + ACL_AICORE_NONE = 0xFF +} aclprofAicoreMetrics; + +typedef struct aclprofConfig aclprofConfig; +typedef struct aclprofStopConfig aclprofStopConfig; +typedef struct aclprofAicoreEvents aclprofAicoreEvents; +typedef struct aclprofSubscribeConfig aclprofSubscribeConfig; + +/** + * @ingroup AscendCL + * @brief profiling initialize + * + * @param profilerResultPath [IN] path of profiling result + * @param length [IN] length of profilerResultPath + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclprofFinalize + */ +ACL_FUNC_VISIBILITY aclError aclprofInit(const char *profilerResultPath, size_t length); + +/** + * @ingroup AscendCL + * @brief profiling finalize + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclprofInit + */ +ACL_FUNC_VISIBILITY aclError aclprofFinalize(); + +/** + * @ingroup AscendCL + * @brief Start profiling modules by profilerConfig + * + * @param profilerConfig [IN] config of profiling + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclprofStop + */ +ACL_FUNC_VISIBILITY aclError aclprofStart(const aclprofConfig *profilerConfig); + +/** + * @ingroup AscendCL + * @brief Create data of type aclprofConfig + * + * @param deviceIdList [IN] list of device id + * @param deviceNums [IN] number of devices + * @param aicoreMetrics [IN] type of aicore metrics + * @param aicoreEvents [IN] pointer to aicore events, only support NULL now + * @param dataTypeConfig [IN] config modules need profiling + * + * @retval the aclprofConfig pointer + * + * @see aclprofDestroyConfig + */ +ACL_FUNC_VISIBILITY aclprofConfig *aclprofCreateConfig(uint32_t *deviceIdList, uint32_t deviceNums, + aclprofAicoreMetrics aicoreMetrics, + aclprofAicoreEvents *aicoreEvents, uint64_t dataTypeConfig); + +/** + * @ingroup AscendCL + * @brief Destroy data of type aclprofConfig + * + * @param profilerConfig [IN] config of profiling + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclprofCreateConfig + */ +ACL_FUNC_VISIBILITY aclError aclprofDestroyConfig(const aclprofConfig *profilerConfig); + +/** + * @ingroup AscendCL + * @brief stop profiling modules by stopProfilingConfig + * + * @param profilerConfig [IN] pointer to stop config of profiling + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclprofStart + */ +ACL_FUNC_VISIBILITY aclError aclprofStop(const aclprofConfig *profilerConfig); + +/** + * @ingroup AscendCL + * @brief subscribe profiling data of model + * + * @param modelId [IN] the model id subscribed + * @param profSubscribeConfig [IN] pointer to config of model subscribe + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclprofModelUnSubscribe + */ +ACL_FUNC_VISIBILITY aclError aclprofModelSubscribe(uint32_t modelId, const aclprofSubscribeConfig *profSubscribeConfig); + +/** + * @ingroup AscendCL + * @brief unsubscribe profiling data of model + * + * @param modelId [IN] the model id unsubscribed + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclprofModelSubscribe + */ +ACL_FUNC_VISIBILITY aclError aclprofModelUnSubscribe(uint32_t modelId); + +/** + * @ingroup AscendCL + * @brief create subscribe config + * + * @param timeInfoSwitch [IN] switch whether get time info from model + * @param aicoreMetrics [IN] aicore metrics + * @param fd [IN] pointer to write pipe + * + * @retval the aclprofSubscribeConfig pointer + * + * @see aclprofDestroySubscribeConfig + */ +ACL_FUNC_VISIBILITY aclprofSubscribeConfig *aclprofCreateSubscribeConfig(int8_t timeInfoSwitch, + aclprofAicoreMetrics aicoreMetrics, void *fd); + +/** + * @ingroup AscendCL + * @brief destroy subscribe config + * + * @param profSubscribeConfig [IN] subscribe config + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclprofCreateSubscribeConfig + */ +ACL_FUNC_VISIBILITY aclError aclprofDestroySubscribeConfig(const aclprofSubscribeConfig *profSubscribeConfig); + +/** + * @ingroup AscendCL + * @brief create subscribe config + * + * @param opDescSize [OUT] size of op desc + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclprofGetOpDescSize(size_t *opDescSize); + +/** + * @ingroup AscendCL + * @brief get op number from subscription data + * + * @param opInfo [IN] pointer to subscription data + * @param opInfoLen [IN] memory size of subscription data + * @param opNumber [OUT] op number of subscription data + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclprofGetOpNum(const void *opInfo, size_t opInfoLen, uint32_t *opNumber); + +/** + * @ingroup AscendCL + * @brief get length op type from subscription data + * + * @param opInfo [IN] pointer to subscription data + * @param opInfoLen [IN] memory size of subscription data + * @param index [IN] index of op array in opInfo + * @param opTypeLen [OUT] actual length of op type string + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclprofGetOpTypeLen(const void *opInfo, size_t opInfoLen, uint32_t index, + size_t *opTypeLen); + +/** + * @ingroup AscendCL + * @brief get op type from subscription data + * + * @param opInfo [IN] pointer to subscription data + * @param opInfoLen [IN] memory size of subscription data + * @param index [IN] index of op array in opInfo + * @param opType [OUT] obtained op type string + * @param opTypeLen [IN] obtained length of op type string + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclprofGetOpType(const void *opInfo, size_t opInfoLen, uint32_t index, char *opType, + size_t opTypeLen); + +/** + * @ingroup AscendCL + * @brief get length op name from subscription data + * + * @param opInfo [IN] pointer to subscription data + * @param opInfoLen [IN] memory size of subscription data + * @param index [IN] index of op array in opInfo + * @param opNameLen [OUT] actual length of op name string + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclprofGetOpNameLen(const void *opInfo, size_t opInfoLen, uint32_t index, + size_t *opNameLen); + +/** + * @ingroup AscendCL + * @brief get op type from subscription data + * + * @param opInfo [IN] pointer to subscription data + * @param opInfoLen [IN] memory size of subscription data + * @param index [IN] index of op array in opInfo + * @param opName [OUT] obtained op name string + * @param opNameLen [IN] obtained length of op name string + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclprofGetOpName(const void *opInfo, size_t opInfoLen, uint32_t index, char *opName, + size_t opNameLen); + +/** + * @ingroup AscendCL + * @brief get start time of specified op from subscription data + * + * @param opInfo [IN] pointer to subscription data + * @param opInfoLen [IN] memory size of subscription data + * @param index [IN] index of op array in opInfo + * + * @retval start time(us) of specified op with timestamp + * @retval 0 for failed + */ +ACL_FUNC_VISIBILITY uint64_t aclprofGetOpStart(const void *opInfo, size_t opInfoLen, uint32_t index); + +/** + * @ingroup AscendCL + * @brief get end time of specified op from subscription data + * + * @param opInfo [IN] pointer to subscription data + * @param opInfoLen [IN] memory size of subscription data + * @param index [IN] index of op array in opInfo + * + * @retval end time(us) of specified op with timestamp + * @retval 0 for failed + */ +ACL_FUNC_VISIBILITY uint64_t aclprofGetOpEnd(const void *opInfo, size_t opInfoLen, uint32_t index); + +/** + * @ingroup AscendCL + * @brief get excution time of specified op from subscription data + * + * @param opInfo [IN] pointer to subscription data + * @param opInfoLen [IN] memory size of subscription data + * @param index [IN] index of op array in opInfo + * + * @retval execution time(us) of specified op with timestamp + * @retval 0 for failed + */ +ACL_FUNC_VISIBILITY uint64_t aclprofGetOpDuration(const void *opInfo, size_t opInfoLen, uint32_t index); + +/** + * @ingroup AscendCL + * @brief get model id from subscription data + * + * @param opInfo [IN] pointer to subscription data + * @param opInfoLen [IN] memory size of subscription data + * + * @retval model id of subscription data + * @retval 0 for failed + */ +ACL_FUNC_VISIBILITY size_t aclprofGetModelId(const void *opInfo, size_t opInfoLen, uint32_t index); + +#ifdef __cplusplus +} +#endif + +#endif // INC_EXTERNAL_ACL_PROF_H_ diff --git a/inc/external/acl/acl_rt.h b/inc/external/acl/acl_rt.h new file mode 100644 index 00000000..5ee70724 --- /dev/null +++ b/inc/external/acl/acl_rt.h @@ -0,0 +1,958 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_EXTERNAL_ACL_ACL_RT_H_ +#define INC_EXTERNAL_ACL_ACL_RT_H_ + +#include +#include +#include "acl_base.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ACL_EVENT_TIME_LINE 0x00000008u + +typedef enum aclrtRunMode { + ACL_DEVICE, + ACL_HOST, +} aclrtRunMode; + +typedef enum aclrtTsId { + ACL_TS_ID_AICORE = 0, + ACL_TS_ID_AIVECTOR = 1, + ACL_TS_ID_RESERVED = 2, +} aclrtTsId; + +typedef enum aclrtEventStatus { + ACL_EVENT_STATUS_COMPLETE = 0, + ACL_EVENT_STATUS_NOT_READY = 1, + ACL_EVENT_STATUS_RESERVED = 2, +} aclrtEventStatus; + +typedef enum aclrtCallbackBlockType { + ACL_CALLBACK_NO_BLOCK, + ACL_CALLBACK_BLOCK, +} aclrtCallbackBlockType; + +typedef enum aclrtMemcpyKind { + ACL_MEMCPY_HOST_TO_HOST, + ACL_MEMCPY_HOST_TO_DEVICE, + ACL_MEMCPY_DEVICE_TO_HOST, + ACL_MEMCPY_DEVICE_TO_DEVICE, +} aclrtMemcpyKind; + +typedef enum aclrtMemMallocPolicy { + ACL_MEM_MALLOC_HUGE_FIRST, + ACL_MEM_MALLOC_HUGE_ONLY, + ACL_MEM_MALLOC_NORMAL_ONLY, + ACL_MEM_MALLOC_HUGE_FIRST_P2P, + ACL_MEM_MALLOC_HUGE_ONLY_P2P, + ACL_MEM_MALLOC_NORMAL_ONLY_P2P, +} aclrtMemMallocPolicy; + +typedef enum aclrtMemAttr { + ACL_DDR_MEM, + ACL_HBM_MEM, + ACL_DDR_MEM_HUGE, + ACL_DDR_MEM_NORMAL, + ACL_HBM_MEM_HUGE, + ACL_HBM_MEM_NORMAL, + ACL_DDR_MEM_P2P_HUGE, + ACL_DDR_MEM_P2P_NORMAL, + ACL_HBM_MEM_P2P_HUGE, + ACL_HBM_MEM_P2P_NORMAL, +} aclrtMemAttr; + +typedef enum aclrtGroupAttr { + ACL_GROUP_AICORE_INT, + ACL_GROUP_AIV_INT, + ACL_GROUP_AIC_INT, + ACL_GROUP_SDMANUM_INT, + ACL_GROUP_ASQNUM_INT, + ACL_GROUP_GROUPID_INT +} aclrtGroupAttr; + +typedef struct tagRtGroupInfo aclrtGroupInfo; + +typedef struct rtExceptionInfo aclrtExceptionInfo; + +typedef void (*aclrtCallback)(void *userData); + +typedef void (*aclrtExceptionInfoCallback)(aclrtExceptionInfo *exceptionInfo); + +/** + * @ingroup AscendCL + * @brief Set a callback function to handle exception information + * + * @param callback [IN] callback function to handle exception information + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtSetExceptionInfoCallback(aclrtExceptionInfoCallback callback); + +/** + * @ingroup AscendCL + * @brief Get task id from exception information + * + * @param info [IN] pointer of exception information + * + * @retval The task id from exception information + * @retval 0xFFFFFFFF if info is null + */ +ACL_FUNC_VISIBILITY uint32_t aclrtGetTaskIdFromExceptionInfo(const aclrtExceptionInfo *info); + +/** + * @ingroup AscendCL + * @brief Get stream id from exception information + * + * @param info [IN] pointer of exception information + * + * @retval The stream id from exception information + * @retval 0xFFFFFFFF if info is null + */ +ACL_FUNC_VISIBILITY uint32_t aclrtGetStreamIdFromExceptionInfo(const aclrtExceptionInfo *info); + +/** + * @ingroup AscendCL + * @brief Get thread id from exception information + * + * @param info [IN] pointer of exception information + * + * @retval The thread id of fail task + * @retval 0xFFFFFFFF if info is null + */ +ACL_FUNC_VISIBILITY uint32_t aclrtGetThreadIdFromExceptionInfo(const aclrtExceptionInfo *info); + +/** + * @ingroup AscendCL + * @brief Get device id from exception information + * + * @param info [IN] pointer of exception information + * + * @retval The thread id of fail task + * @retval 0xFFFFFFFF if info is null + */ +ACL_FUNC_VISIBILITY uint32_t aclrtGetDeviceIdFromExceptionInfo(const aclrtExceptionInfo *info); + +/** + * @ingroup AscendCL + * @brief The thread that handles the callback function on the Stream + * + * @param threadId [IN] thread ID + * @param stream [IN] stream handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtSubscribeReport(uint64_t threadId, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief Add a callback function to be executed on the host + * to the task queue of the Stream + * + * @param fn [IN] Specify the callback function to be added + * The function prototype of the callback function is: + * typedef void (*aclrtCallback)(void *userData); + * @param userData [IN] User data to be passed to the callback function + * @param blockType [IN] callback block type + * @param stream [IN] stream handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtLaunchCallback(aclrtCallback fn, void *userData, aclrtCallbackBlockType blockType, + aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief After waiting for a specified time, trigger callback processing + * + * @par Function + * The thread processing callback specified by + * the aclrtSubscribeReport interface + * + * @param timeout [IN] timeout value + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtSubscribeReport + */ +ACL_FUNC_VISIBILITY aclError aclrtProcessReport(int32_t timeout); + +/** + * @ingroup AscendCL + * @brief Cancel thread registration, + * the callback function on the specified Stream + * is no longer processed by the specified thread + * + * @param threadId [IN] thread ID + * @param stream [IN] stream handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtUnSubscribeReport(uint64_t threadId, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief create context and associates it with the calling thread + * + * @par Function + * The following use cases are supported: + * @li If you don't call the aclrtCreateContext interface + * to explicitly create the context, + * the system will use the default context, which is implicitly created + * when the aclrtSetDevice interface is called. + * @li If multiple contexts are created in a process + * (there is no limit on the number of contexts), + * the current thread can only use one of them at the same time. + * It is recommended to explicitly specify the context of the current thread + * through the aclrtSetCurrentContext interface to increase. + * the maintainability of the program. + * + * @param context [OUT] point to the created context + * @param deviceId [IN] device to create context on + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtSetDevice | aclrtSetCurrentContext + */ +ACL_FUNC_VISIBILITY aclError aclrtCreateContext(aclrtContext *context, int32_t deviceId); + +/** + * @ingroup AscendCL + * @brief destroy context instance + * + * @par Function + * Can only destroy context created through aclrtCreateContext interface + * + * @param context [IN] the context to destroy + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtCreateContext + */ +ACL_FUNC_VISIBILITY aclError aclrtDestroyContext(aclrtContext context); + +/** + * @ingroup AscendCL + * @brief set the context of the thread + * + * @par Function + * The following scenarios are supported: + * @li If the aclrtCreateContext interface is called in a thread to explicitly + * create a Context (for example: ctx1), the thread's Context can be specified + * without calling the aclrtSetCurrentContext interface. + * The system uses ctx1 as the context of thread1 by default. + * @li If the aclrtCreateContext interface is not explicitly created, + * the system uses the default context as the context of the thread. + * At this time, the aclrtDestroyContext interface cannot be used to release + * the default context. + * @li If the aclrtSetCurrentContext interface is called multiple times to + * set the thread's Context, the last one prevails. + * + * @par Restriction + * @li If the cevice corresponding to the context set for the thread + * has been reset, you cannot set the context as the context of the thread, + * otherwise a business exception will result. + * @li It is recommended to use the context created in a thread. + * If the aclrtCreateContext interface is called in thread A to create a context, + * and the context is used in thread B, + * the user must guarantee the execution order of tasks in the same stream + * under the same context in two threads. + * + * @param context [IN] the current context of the thread + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtCreateContext | aclrtDestroyContext + */ +ACL_FUNC_VISIBILITY aclError aclrtSetCurrentContext(aclrtContext context); + +/** + * @ingroup AscendCL + * @brief get the context of the thread + * + * @par Function + * If the user calls the aclrtSetCurrentContext interface + * multiple times to set the context of the current thread, + * then the last set context is obtained + * + * @param context [OUT] the current context of the thread + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtSetCurrentContext + */ +ACL_FUNC_VISIBILITY aclError aclrtGetCurrentContext(aclrtContext *context); + +/** + * @ingroup AscendCL + * @brief Specify the device to use for the operation + * implicitly create the default context and the default stream + * + * @par Function + * The following use cases are supported: + * @li Device can be specified in the process or thread. + * If you call the aclrtSetDevice interface multiple + * times to specify the same device, + * you only need to call the aclrtResetDevice interface to reset the device. + * @li The same device can be specified for operation + * in different processes or threads. + * @li Device is specified in a process, + * and multiple threads in the process can share this device to explicitly + * create a Context (aclrtCreateContext interface). + * @li In multi-device scenarios, you can switch to other devices + * through the aclrtSetDevice interface in the process. + * + * @param deviceId [IN] the device id + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtResetDevice |aclrtCreateContext + */ +ACL_FUNC_VISIBILITY aclError aclrtSetDevice(int32_t deviceId); + +/** + * @ingroup AscendCL + * @brief Reset the current operating Device and free resources on the device, + * including the default context, the default stream, + * and all streams created under the default context, + * and synchronizes the interface. + * If the task under the default context or stream has not been completed, + * the system will wait for the task to complete before releasing it. + * + * @par Restriction + * @li The Context, Stream, and Event that are explicitly created + * on the device to be reset. Before resetting, + * it is recommended to follow the following interface calling sequence, + * otherwise business abnormalities may be caused. + * @li Interface calling sequence: + * call aclrtDestroyEvent interface to release Event or + * call aclrtDestroyStream interface to release explicitly created Stream-> + * call aclrtDestroyContext to release explicitly created Context-> + * call aclrtResetDevice interface + * + * @param deviceId [IN] the device id + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtResetDevice(int32_t deviceId); + +/** + * @ingroup AscendCL + * @brief get target device of current thread + * + * @param deviceId [OUT] the device id + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtGetDevice(int32_t *deviceId); + +/** + * @ingroup AscendCL + * @brief get target side + * + * @param runMode [OUT] the run mode + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtGetRunMode(aclrtRunMode *runMode); + +/** + * @ingroup AscendCL + * @brief Wait for compute device to finish + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtSynchronizeDevice(void); + +/** + * @ingroup AscendCL + * @brief Set Scheduling TS + * + * @param tsId [IN] the ts id + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtSetTsDevice(aclrtTsId tsId); + +/** + * @ingroup AscendCL + * @brief get total device number. + * + * @param count [OUT] the device number + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtGetDeviceCount(uint32_t *count); + +/** + * @ingroup AscendCL + * @brief create event instance + * + * @param event [OUT] created event + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtCreateEvent(aclrtEvent *event); + +/** + * @ingroup AscendCL + * @brief create event instance with flag + * + * @param event [OUT] created event + * @param flag [IN] event flag + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtCreateEventWithFlag(aclrtEvent *event, uint32_t flag); + +/** + * @ingroup AscendCL + * @brief destroy event instance + * + * @par Function + * Only events created through the aclrtCreateEvent interface can be + * destroyed, synchronous interfaces. When destroying an event, + * the user must ensure that the tasks involved in the aclrtSynchronizeEvent + * interface or the aclrtStreamWaitEvent interface are completed before + * they are destroyed. + * + * @param event [IN] event to destroy + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtCreateEvent | aclrtSynchronizeEvent | aclrtStreamWaitEvent + */ +ACL_FUNC_VISIBILITY aclError aclrtDestroyEvent(aclrtEvent event); + +/** + * @ingroup AscendCL + * @brief Record an Event in the Stream + * + * @param event [IN] event to record + * @param stream [IN] stream handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtRecordEvent(aclrtEvent event, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief Reset an event + * + * @par Function + * Users need to make sure to wait for the tasks in the Stream + * to complete before resetting the Event + * + * @param event [IN] event to reset + * @param stream [IN] stream handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtResetEvent(aclrtEvent event, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief Queries an event's status + * + * @param event [IN] event to query + * @param status [OUT] event status + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtQueryEvent(aclrtEvent event, aclrtEventStatus *status); + +/** + * @ingroup AscendCL + * @brief Block Host Running, wait event to be complete + * + * @param event [IN] event to wait + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtSynchronizeEvent(aclrtEvent event); + +/** + * @ingroup AscendCL + * @brief computes the elapsed time between events. + * + * @param ms [OUT] time between start and end in ms + * @param start [IN] starting event + * @param end [IN] ending event + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtCreateEvent | aclrtRecordEvent | aclrtSynchronizeStream + */ +ACL_FUNC_VISIBILITY aclError aclrtEventElapsedTime(float *ms, aclrtEvent start, aclrtEvent end); + +/** + * @ingroup AscendCL + * @brief alloc memory on device + * + * @par Function + * alloc for size linear memory on device + * and return a pointer to allocated memory by *devPtr + * + * @par Restriction + * @li The memory requested by the aclrtMalloc interface needs to be released + * through the aclrtFree interface. + * @li Before calling the media data processing interface, + * if you need to apply memory on the device to store input or output data, + * you need to call acldvppMalloc to apply for memory. + * + * @param devPtr [OUT] pointer to pointer to allocated memory on device + * @param size [IN] alloc memory size + * @param policy [IN] memory alloc policy + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtFree | acldvppMalloc | aclrtMallocCached + */ +ACL_FUNC_VISIBILITY aclError aclrtMalloc(void **devPtr, size_t size, aclrtMemMallocPolicy policy); + +/** + * @ingroup AscendCL + * @brief allocate memory on device with cache + * + * @par Function + * alloc for size linear memory on device + * and return a pointer to allocated memory by *devPtr + * + * @par Restriction + * @li The memory requested by the aclrtMallocCached interface needs to be released + * through the aclrtFree interface. + * + * @param devPtr [OUT] pointer to pointer to allocated memory on device + * @param size [IN] alloc memory size + * @param policy [IN] memory alloc policy + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtFree | aclrtMalloc + */ +ACL_FUNC_VISIBILITY aclError aclrtMallocCached(void **devPtr, size_t size, aclrtMemMallocPolicy policy); + +/** + * @ingroup AscendCL + * @brief flush cache data to ddr + * + * @param devPtr [IN] the pointer that flush data to ddr + * @param size [IN] flush size + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtMemFlush(void *devPtr, size_t size); + +/** + * @ingroup AscendCL + * @brief invalidate cache data + * + * @param devPtr [IN] pointer to invalidate cache data + * @param size [IN] invalidate size + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtMemInvalidate(void *devPtr, size_t size); + +/** + * @ingroup AscendCL + * @brief free device memory + * + * @par Function + * can only free memory allocated through the aclrtMalloc interface + * + * @param devPtr [IN] Pointer to memory to be freed + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtMalloc + */ +ACL_FUNC_VISIBILITY aclError aclrtFree(void *devPtr); + +/** + * @ingroup AscendCL + * @brief alloc memory on host + * + * @par Restriction + * @li The requested memory cannot be used in the Device + * and needs to be explicitly copied to the Device. + * @li The memory requested by the aclrtMallocHost interface + * needs to be released through the aclrtFreeHost interface. + * + * @param hostPtr [OUT] pointer to pointer to allocated memory on the host + * @param size [IN] alloc memory size + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtFreeHost + */ +ACL_FUNC_VISIBILITY aclError aclrtMallocHost(void **hostPtr, size_t size); + +/** + * @ingroup AscendCL + * @brief free host memory + * + * @par Function + * can only free memory allocated through the aclrtMallocHost interface + * + * @param hostPtr [IN] free memory pointer + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtMallocHost + */ +ACL_FUNC_VISIBILITY aclError aclrtFreeHost(void *hostPtr); + +/** + * @ingroup AscendCL + * @brief synchronous memory replication between host and device + * + * @param dst [IN] destination address pointer + * @param destMax [IN] Max length of the destination address memory + * @param src [IN] source address pointer + * @param count [IN] the length of byte to copy + * @param kind [IN] memcpy type + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtMemcpy(void *dst, size_t destMax, const void *src, size_t count, + aclrtMemcpyKind kind); + +/** + * @ingroup AscendCL + * @brief Initialize memory and set contents of memory to specified value + * + * @par Function + * The memory to be initialized is on the Host or device side, + * and the system determines whether + * it is host or device according to the address + * + * @param devPtr [IN] Starting address of memory + * @param maxCount [IN] Max length of destination address memory + * @param value [IN] Set value + * @param count [IN] The length of memory + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtMemset(void *devPtr, size_t maxCount, int32_t value, size_t count); + +/** + * @ingroup AscendCL + * @brief Asynchronous memory replication between Host and Device + * + * @par Function + * After calling this interface, + * be sure to call the aclrtSynchronizeStream interface to ensure that + * the task of memory replication has been completed + * + * @par Restriction + * @li For on-chip Device-to-Device memory copy, + * both the source and destination addresses must be 64-byte aligned + * + * @param dst [IN] destination address pointer + * @param destMax [IN] Max length of destination address memory + * @param src [IN] source address pointer + * @param count [IN] the number of byte to copy + * @param kind [IN] memcpy type + * @param stream [IN] asynchronized task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtSynchronizeStream + */ +ACL_FUNC_VISIBILITY aclError aclrtMemcpyAsync(void *dst, size_t destMax, const void *src, size_t count, + aclrtMemcpyKind kind, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief Asynchronous initialize memory + * and set contents of memory to specified value async + * + * @par Function + * The memory to be initialized is on the Host or device side, + * and the system determines whether + * it is host or device according to the address + * + * @param devPtr [IN] destination address pointer + * @param maxCount [IN] Max length of destination address memory + * @param value [IN] set value + * @param count [IN] the number of byte to set + * @param stream [IN] asynchronized task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtSynchronizeStream + */ +ACL_FUNC_VISIBILITY aclError aclrtMemsetAsync(void *devPtr, size_t maxCount, int32_t value, size_t count, + aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief create stream instance + * + * @param stream [OUT] the created stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtCreateStream(aclrtStream *stream); + +/** + * @ingroup AscendCL + * @brief destroy stream instance + * + * @par Function + * Can only destroy streams created through the aclrtCreateStream interface + * + * @par Restriction + * Before calling the aclrtDestroyStream interface to destroy + * the specified Stream, you need to call the aclrtSynchronizeStream interface + * to ensure that the tasks in the Stream have been completed. + * + * @param stream [IN] the stream to destroy + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtCreateStream | aclrtSynchronizeStream + */ +ACL_FUNC_VISIBILITY aclError aclrtDestroyStream(aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief block the host until all tasks + * in the specified stream have completed + * + * @param stream [IN] the stream to wait + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtSynchronizeStream(aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief Blocks the operation of the specified Stream until + * the specified Event is completed. + * Support for multiple streams waiting for the same event. + * + * @param stream [IN] the wait stream If using thedefault Stream, set NULL + * @param event [IN] the event to wait + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtStreamWaitEvent(aclrtStream stream, aclrtEvent event); + +/** + * @ingroup AscendCL + * @brief set group + * + * @par Function + * set the task to the corresponding group + * + * @param groupId [IN] group id + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtGetGroupCount | aclrtGetAllGroupInfo | aclrtGetGroupInfoDetail + */ +ACL_FUNC_VISIBILITY aclError aclrtSetGroup(int32_t groupId); + +/** + * @ingroup AscendCL + * @brief get the number of group + * + * @par Function + * get the number of group. if the number of group is zero, + * it means that group is not supported or group is not created. + * + * @param count [OUT] the number of group + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + */ +ACL_FUNC_VISIBILITY aclError aclrtGetGroupCount(uint32_t *count); + +/** + * @ingroup AscendCL + * @brief create group information + * + * @retval null for failed. + * @retval OtherValues success. + * + * @see aclrtDestroyGroupInfo + */ +ACL_FUNC_VISIBILITY aclrtGroupInfo *aclrtCreateGroupInfo(); + +/** + * @ingroup AscendCL + * @brief destroy group information + * + * @param groupInfo [IN] pointer to group information + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtCreateGroupInfo + */ +ACL_FUNC_VISIBILITY aclError aclrtDestroyGroupInfo(aclrtGroupInfo *groupInfo); + +/** + * @ingroup AscendCL + * @brief get all group information + * + * @param groupInfo [OUT] pointer to group information + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtGetGroupCount + */ +ACL_FUNC_VISIBILITY aclError aclrtGetAllGroupInfo(aclrtGroupInfo *groupInfo); + +/** + * @ingroup AscendCL + * @brief get detail information of group + * + * @param groupInfo [IN] pointer to group information + * @param groupIndex [IN] group index value + * @param attr [IN] group attribute + * @param attrValue [OUT] pointer to attribute value + * @param valueLen [IN] length of attribute value + * @param paramRetSize [OUT] pointer to real length of attribute value + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtGetGroupCount | aclrtGetAllGroupInfo + */ +ACL_FUNC_VISIBILITY aclError aclrtGetGroupInfoDetail(const aclrtGroupInfo *groupInfo, int32_t groupIndex, + aclrtGroupAttr attr, void *attrValue, size_t valueLen, + size_t *paramRetSize); + +/** + * @ingroup AscendCL + * @brief checking whether current device and peer device support the p2p feature + * + * @param canAccessPeer [OUT] pointer to save the checking result + * @param deviceId [IN] current device id + * @param peerDeviceId [IN] peer device id + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtDeviceEnablePeerAccess | aclrtDeviceDisablePeerAccess + */ +ACL_FUNC_VISIBILITY aclError aclrtDeviceCanAccessPeer(int32_t *canAccessPeer, int32_t deviceId, int32_t peerDeviceId); + +/** + * @ingroup AscendCL + * @brief enable the peer device to support the p2p feature + * + * @param peerDeviceId [IN] the peer device id + * @param flags [IN] reserved field, now it must be zero + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtDeviceCanAccessPeer | aclrtDeviceDisablePeerAccess + */ +ACL_FUNC_VISIBILITY aclError aclrtDeviceEnablePeerAccess(int32_t peerDeviceId, uint32_t flags); + +/** + * @ingroup AscendCL + * @brief disable the peer device to support the p2p function + * + * @param peerDeviceId [IN] the peer device id + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclrtDeviceCanAccessPeer | aclrtDeviceEnablePeerAccess + */ +ACL_FUNC_VISIBILITY aclError aclrtDeviceDisablePeerAccess(int32_t peerDeviceId); + +/** + * @ingroup AscendCL + * @brief Obtain the free memory and total memory of specified attribute. + * the specified memory include normal memory and huge memory. + * + * @param attr [IN] the memory attribute of specified device + * @param free [OUT] the free memory of specified device + * @param total [OUT] the total memory of specified device. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtGetMemInfo(aclrtMemAttr attr, size_t *free, size_t *total); + +/** + * @ingroup AscendCL + * @brief Set the timeout interval for waitting of op + * + * @param timeout [IN] op wait timeout + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtSetOpWaitTimeout(uint32_t timeout); + +#ifdef __cplusplus +} +#endif + +#endif // INC_EXTERNAL_ACL_ACL_RT_H_ diff --git a/inc/external/acl/acl_tdt.h b/inc/external/acl/acl_tdt.h new file mode 100644 index 00000000..c357518d --- /dev/null +++ b/inc/external/acl/acl_tdt.h @@ -0,0 +1,276 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_EXTERNAL_ACL_ACL_TDT_H_ +#define INC_EXTERNAL_ACL_ACL_TDT_H_ + +#include "acl/acl_base.h" + +#ifdef __cplusplus +extern "C" { +#endif + +enum acltdtTensorType { + ACL_TENSOR_DATA_UNDEFINED = -1, + ACL_TENSOR_DATA_TENSOR, + ACL_TENSOR_DATA_END_OF_SEQUENCE, + ACL_TENSOR_DATA_ABNORMAL +}; + +typedef struct acltdtDataItem acltdtDataItem; +typedef struct acltdtDataset acltdtDataset; +typedef struct acltdtChannelHandle acltdtChannelHandle; + +/** + * @ingroup AscendCL + * @brief Get tensor type from item + * + * @param dataItem [IN] pointer to the data item + * + * @retval Tensor type. + * @retval ACL_DT_UNDEFINED if dataItem is null + */ +ACL_FUNC_VISIBILITY acltdtTensorType acltdtGetTensorTypeFromItem(const acltdtDataItem *dataItem); + +/** + * @ingroup AscendCL + * @brief Get data type from item + * + * @param dataItem [IN] pointer to the data item + * + * @retval Data type. + * @retval ACL_DT_UNDEFINED if dataItem is null + */ +ACL_FUNC_VISIBILITY aclDataType acltdtGetDataTypeFromItem(const acltdtDataItem *dataItem); + +/** + * @ingroup AscendCL + * @brief Get data address from item + * + * @param dataItem [IN] pointer to data item + * + * @retval null for failed + * @retval OtherValues success + */ +ACL_FUNC_VISIBILITY void *acltdtGetDataAddrFromItem(const acltdtDataItem *dataItem); + +/** + * @ingroup AscendCL + * @brief Get data size from item + * + * @param dataItem [IN] pointer to data item + * + * @retval 0 for failed + * @retval OtherValues success + */ +ACL_FUNC_VISIBILITY size_t acltdtGetDataSizeFromItem(const acltdtDataItem *dataItem); + +/** + * @ingroup AscendCL + * @brief Get dim's number from item + * + * @param dataItem [IN] pointer to data item + * + * @retval 0 for failed + * @retval OtherValues success + */ +ACL_FUNC_VISIBILITY size_t acltdtGetDimNumFromItem(const acltdtDataItem *dataItem); + +/** + * @ingroup AscendCL + * @brief Get dims from item + * + * @param dataItem [IN] the struct of data item + * @param dims [IN|OUT] pointer to the dims of dataTtem + * @param dimNum [IN] the size of the dims + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acltdtGetDimsFromItem(const acltdtDataItem *dataItem, int64_t *dims, size_t dimNum); + +/** + * @ingroup AscendCL + * @brief Create the struct of data item + * + * @param tdtType [IN] Tdt tensor type + * @param dims [IN] pointer of tdtDataItem's dims + * @param dimNum [IN] Dim number + * @param dataType [IN] Data type + * @param data [IN] Data pointer + * @param size [IN] Data size + * + * @retval null for failed + * @retval OtherValues success + * + * @see acltdtDestroyDataItem + */ +ACL_FUNC_VISIBILITY acltdtDataItem *acltdtCreateDataItem(acltdtTensorType tdtType, const int64_t *dims, size_t dimNum, + aclDataType dataType, void *data, size_t size); + +/** + * @ingroup AscendCL + * @brief Destroy the struct of data item + * + * @param dataItem [IN] pointer to the data item + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acltdtCreateDataItem + */ +ACL_FUNC_VISIBILITY aclError acltdtDestroyDataItem(acltdtDataItem *dataItem); + +/** + * @ingroup AscendCL + * @brief Create the tdt dataset + * + * @retval null for failed + * @retval OtherValues success + * + * @see acltdtDestroyDataset + */ +ACL_FUNC_VISIBILITY acltdtDataset *acltdtCreateDataset(); + +/** + * @ingroup AscendCL + * @brief Destroy the tdt dataset + * + * @param dataset [IN] pointer to the dataset + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acltdtCreateDataset + */ +ACL_FUNC_VISIBILITY aclError acltdtDestroyDataset(acltdtDataset *dataset); + +/** + * @ingroup AscendCL + * @brief Get the data item + * + * @param dataset [IN] pointer to the dataset + * @param index [IN] index of the dataset + * + * @retval null for failed + * @retval OtherValues success + * + * @see acltdtAddDataItem + */ +ACL_FUNC_VISIBILITY acltdtDataItem *acltdtGetDataItem(const acltdtDataset *dataset, size_t index); + +/** + * @ingroup AscendCL + * @brief Get the data item + * + * @param dataset [OUT] pointer to the dataset + * @param dataItem [IN] pointer to the data item + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acltdtGetDataItem + */ +ACL_FUNC_VISIBILITY aclError acltdtAddDataItem(acltdtDataset *dataset, acltdtDataItem *dataItem); + +/** + * @ingroup AscendCL + * @brief Get the size of dataset + * + * @param dataset [IN] pointer to the dataset + * + * @retval 0 for failed + * @retval OtherValues success + */ +ACL_FUNC_VISIBILITY size_t acltdtGetDatasetSize(const acltdtDataset *dataset); + +/** + * @ingroup AscendCL + * @brief Stop the channel + * + * @param handle [IN] pointer to the channel handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acltdtCreateChannel | acltdtDestroyChannel + */ +ACL_FUNC_VISIBILITY aclError acltdtStopChannel(acltdtChannelHandle *handle); + +/** + * @ingroup AscendCL + * @brief Create the channel + * + * @param deviceId [IN] the device id + * @param name [IN] the channel's name + * + * @retval null for failed + * @retval OtherValues success + * + * @see acltdtStopChannel | acltdtDestroyChannel + */ +ACL_FUNC_VISIBILITY acltdtChannelHandle *acltdtCreateChannel(uint32_t deviceId, const char *name); + +/** + * @ingroup AscendCL + * @brief Destroy the channel + * + * @param handle [IN] pointer to the channel handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acltdtCreateChannel | acltdtStopChannel + */ +ACL_FUNC_VISIBILITY aclError acltdtDestroyChannel(acltdtChannelHandle *handle); + +/** + * @ingroup AscendCL + * @brief Send tensor to device + * + * @param handle [IN] pointer to the channel handle + * @param dataset [IN] pointer to the dataset + * @param timeout [IN] to be reserved, now it must be -1 + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acltdtReceiveTensor + */ +ACL_FUNC_VISIBILITY aclError acltdtSendTensor(const acltdtChannelHandle *handle, const acltdtDataset *dataset, + int32_t timeout); + +/** + * @ingroup AscendCL + * @brief Receive tensor from device + * + * @param handle [IN] pointer to the channel handle + * @param dataset [OUT] pointer to the dataset + * @param timeout [IN] to be reserved, now it must be -1 + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acltdtSendTensor + */ +ACL_FUNC_VISIBILITY aclError acltdtReceiveTensor(const acltdtChannelHandle *handle, acltdtDataset *dataset, + int32_t timeout); + +#ifdef __cplusplus +} +#endif + +#endif // INC_EXTERNAL_ACL_ACL_TDT_H_ diff --git a/inc/external/acl/error_codes/ge_error_codes.h b/inc/external/acl/error_codes/ge_error_codes.h new file mode 100644 index 00000000..cafc5a64 --- /dev/null +++ b/inc/external/acl/error_codes/ge_error_codes.h @@ -0,0 +1,75 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_EXTERNAL_GE_GE_ERROR_CODES_H_ +#define INC_EXTERNAL_GE_GE_ERROR_CODES_H_ + +#if defined(_MSC_VER) +#ifdef FUNC_VISIBILITY +#define GE_FUNC_VISIBILITY _declspec(dllexport) +#else +#define GE_FUNC_VISIBILITY +#endif +#else +#ifdef FUNC_VISIBILITY +#define GE_FUNC_VISIBILITY __attribute__((visibility("default"))) +#else +#define GE_FUNC_VISIBILITY +#endif +#endif + +#include + +#ifdef __cplusplus +extern "C" { +#endif +static const uint32_t ACL_ERROR_GE_PARAM_INVALID = 145000; +static const uint32_t ACL_ERROR_GE_EXEC_NOT_INIT = 145001; +static const uint32_t ACL_ERROR_GE_EXEC_MODEL_PATH_INVALID = 145002; +static const uint32_t ACL_ERROR_GE_EXEC_MODEL_ID_INVALID = 145003; +static const uint32_t ACL_ERROR_GE_EXEC_MODEL_DATA_SIZE_INVALID = 145006; +static const uint32_t ACL_ERROR_GE_EXEC_MODEL_ADDR_INVALID = 145007; +static const uint32_t ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID = 145008; +static const uint32_t ACL_ERROR_GE_EXEC_LOAD_MODEL_REPEATED = 145009; +static const uint32_t ACL_ERROR_GE_DYNAMIC_INPUT_ADDR_INVALID = 145011; +static const uint32_t ACL_ERROR_GE_DYNAMIC_INPUT_LENGTH_INVALID = 145012; +static const uint32_t ACL_ERROR_GE_DYNAMIC_BATCH_SIZE_INVALID = 145013; +static const uint32_t ACL_ERROR_GE_AIPP_BATCH_EMPTY = 145014; +static const uint32_t ACL_ERROR_GE_AIPP_NOT_EXIST = 145015; +static const uint32_t ACL_ERROR_GE_AIPP_MODE_INVALID = 145016; +static const uint32_t ACL_ERROR_GE_OP_TASK_TYPE_INVALID = 145017; +static const uint32_t ACL_ERROR_GE_OP_KERNEL_TYPE_INVALID = 145018; +static const uint32_t ACL_ERROR_GE_PLGMGR_PATH_INVALID = 145019; +static const uint32_t ACL_ERROR_GE_FORMAT_INVALID = 145020; +static const uint32_t ACL_ERROR_GE_SHAPE_INVALID = 145021; +static const uint32_t ACL_ERROR_GE_DATATYPE_INVALID = 145022; +static const uint32_t ACL_ERROR_GE_MEMORY_ALLOCATION = 245000; +static const uint32_t ACL_ERROR_GE_MEMORY_OPERATE_FAILED = 245001; +static const uint32_t ACL_ERROR_GE_INTERNAL_ERROR = 545000; +static const uint32_t ACL_ERROR_GE_LOAD_MODEL = 545001; +static const uint32_t ACL_ERROR_GE_EXEC_LOAD_MODEL_PARTITION_FAILED = 545002; +static const uint32_t ACL_ERROR_GE_EXEC_LOAD_WEIGHT_PARTITION_FAILED = 545003; +static const uint32_t ACL_ERROR_GE_EXEC_LOAD_TASK_PARTITION_FAILED = 545004; +static const uint32_t ACL_ERROR_GE_EXEC_LOAD_KERNEL_PARTITION_FAILED = 545005; +static const uint32_t ACL_ERROR_GE_EXEC_RELEASE_MODEL_DATA = 545006; +static const uint32_t ACL_ERROR_GE_COMMAND_HANDLE = 545007; +static const uint32_t ACL_ERROR_GE_GET_TENSOR_INFO = 545008; +static const uint32_t ACL_ERROR_GE_UNLOAD_MODEL = 545009; + +#ifdef __cplusplus +} // namespace ge +#endif +#endif // INC_EXTERNAL_GE_GE_ERROR_CODES_H_ diff --git a/inc/external/acl/error_codes/rt_error_codes.h b/inc/external/acl/error_codes/rt_error_codes.h new file mode 100644 index 00000000..a1392cc6 --- /dev/null +++ b/inc/external/acl/error_codes/rt_error_codes.h @@ -0,0 +1,109 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __INC_EXTERNEL_RT_ERROR_CODES_H__ +#define __INC_EXTERNEL_RT_ERROR_CODES_H__ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +static const int32_t ACL_RT_SUCCESS = 0; // success + +static const int32_t ACL_ERROR_RT_PARAM_INVALID = 107000; // param invalid +static const int32_t ACL_ERROR_RT_INVALID_DEVICEID = 107001; // invalid device id +static const int32_t ACL_ERROR_RT_CONTEXT_NULL = 107002; // current context null +static const int32_t ACL_ERROR_RT_STREAM_CONTEXT = 107003; // stream not in current context +static const int32_t ACL_ERROR_RT_MODEL_CONTEXT = 107004; // model not in current context +static const int32_t ACL_ERROR_RT_STREAM_MODEL = 107005; // stream not in model +static const int32_t ACL_ERROR_RT_EVENT_TIMESTAMP_INVALID = 107006; // event timestamp invalid +static const int32_t ACL_ERROR_RT_EVENT_TIMESTAMP_REVERSAL = 107007; // event timestamp reversal +static const int32_t ACL_ERROR_RT_ADDR_UNALIGNED = 107008; // memory address unaligned +static const int32_t ACL_ERROR_RT_FILE_OPEN = 107009; // open file failed +static const int32_t ACL_ERROR_RT_FILE_WRITE = 107010; // write file failed +static const int32_t ACL_ERROR_RT_STREAM_SUBSCRIBE = 107011; // error subscribe stream +static const int32_t ACL_ERROR_RT_THREAD_SUBSCRIBE = 107012; // error subscribe thread +static const int32_t ACL_ERROR_RT_GROUP_NOT_SET = 107013; // group not set +static const int32_t ACL_ERROR_RT_GROUP_NOT_CREATE = 107014; // group not create +static const int32_t ACL_ERROR_RT_STREAM_NO_CB_REG = 107015; // callback not register to stream +static const int32_t ACL_ERROR_RT_INVALID_MEMORY_TYPE = 107016; // invalid memory type +static const int32_t ACL_ERROR_RT_INVALID_HANDLE = 107017; // invalid handle +static const int32_t ACL_ERROR_RT_INVALID_MALLOC_TYPE = 107018; // invalid malloc type +static const int32_t ACL_ERROR_RT_WAIT_TIMEOUT = 107019; // wait timeout + +static const int32_t ACL_ERROR_RT_FEATURE_NOT_SUPPORT = 207000; // feature not support +static const int32_t ACL_ERROR_RT_MEMORY_ALLOCATION = 207001; // memory allocation error +static const int32_t ACL_ERROR_RT_MEMORY_FREE = 207002; // memory free error +static const int32_t ACL_ERROR_RT_AICORE_OVER_FLOW = 207003; // aicore over flow +static const int32_t ACL_ERROR_RT_NO_DEVICE = 207004; // no device +static const int32_t ACL_ERROR_RT_RESOURCE_ALLOC_FAIL = 207005; // resource alloc fail +static const int32_t ACL_ERROR_RT_NO_PERMISSION = 207006; // no permission +static const int32_t ACL_ERROR_RT_NO_EVENT_RESOURCE = 207007; // no event resource +static const int32_t ACL_ERROR_RT_NO_STREAM_RESOURCE = 207008; // no stream resource +static const int32_t ACL_ERROR_RT_NO_NOTIFY_RESOURCE = 207009; // no notify resource +static const int32_t ACL_ERROR_RT_NO_MODEL_RESOURCE = 207010; // no model resource +static const int32_t ACL_ERROR_RT_NO_CDQ_RESOURCE = 207011; // no cdq resource + +static const int32_t ACL_ERROR_RT_INTERNAL_ERROR = 507000; // runtime internal error +static const int32_t ACL_ERROR_RT_TS_ERROR = 507001; // ts internel error +static const int32_t ACL_ERROR_RT_STREAM_TASK_FULL = 507002; // task full in stream +static const int32_t ACL_ERROR_RT_STREAM_TASK_EMPTY = 507003; // task empty in stream +static const int32_t ACL_ERROR_RT_STREAM_NOT_COMPLETE = 507004; // stream not complete +static const int32_t ACL_ERROR_RT_END_OF_SEQUENCE = 507005; // end of sequence +static const int32_t ACL_ERROR_RT_EVENT_NOT_COMPLETE = 507006; // event not complete +static const int32_t ACL_ERROR_RT_CONTEXT_RELEASE_ERROR = 507007; // context release error +static const int32_t ACL_ERROR_RT_SOC_VERSION = 507008; // soc version error +static const int32_t ACL_ERROR_RT_TASK_TYPE_NOT_SUPPORT = 507009; // task type not support +static const int32_t ACL_ERROR_RT_LOST_HEARTBEAT = 507010; // ts lost heartbeat +static const int32_t ACL_ERROR_RT_MODEL_EXECUTE = 507011; // model execute failed +static const int32_t ACL_ERROR_RT_REPORT_TIMEOUT = 507012; // report timeout +static const int32_t ACL_ERROR_RT_SYS_DMA = 507013; // sys dma error +static const int32_t ACL_ERROR_RT_AICORE_TIMEOUT = 507014; // aicore timeout +static const int32_t ACL_ERROR_RT_AICORE_EXCEPTION = 507015; // aicore exception +static const int32_t ACL_ERROR_RT_AICORE_TRAP_EXCEPTION = 507016; // aicore trap exception +static const int32_t ACL_ERROR_RT_AICPU_TIMEOUT = 507017; // aicpu timeout +static const int32_t ACL_ERROR_RT_AICPU_EXCEPTION = 507018; // aicpu exception +static const int32_t ACL_ERROR_RT_AICPU_DATADUMP_RSP_ERR = 507019; // aicpu datadump response error +static const int32_t ACL_ERROR_RT_AICPU_MODEL_RSP_ERR = 507020; // aicpu model operate response error +static const int32_t ACL_ERROR_RT_PROFILING_ERROR = 507021; // profiling error +static const int32_t ACL_ERROR_RT_IPC_ERROR = 507022; // ipc error +static const int32_t ACL_ERROR_RT_MODEL_ABORT_NORMAL = 507023; // model abort normal +static const int32_t ACL_ERROR_RT_KERNEL_UNREGISTERING = 507024; // kernel unregistering +static const int32_t ACL_ERROR_RT_RINGBUFFER_NOT_INIT = 507025; // ringbuffer not init +static const int32_t ACL_ERROR_RT_RINGBUFFER_NO_DATA = 507026; // ringbuffer no data +static const int32_t ACL_ERROR_RT_KERNEL_LOOKUP = 507027; // kernel lookup error +static const int32_t ACL_ERROR_RT_KERNEL_DUPLICATE = 507028; // kernel register duplicate +static const int32_t ACL_ERROR_RT_DEBUG_REGISTER_FAIL = 507029; // debug register failed +static const int32_t ACL_ERROR_RT_DEBUG_UNREGISTER_FAIL = 507030; // debug unregister failed +static const int32_t ACL_ERROR_RT_LABEL_CONTEXT = 507031; // label not in current context +static const int32_t ACL_ERROR_RT_PROGRAM_USE_OUT = 507032; // program register num use out +static const int32_t ACL_ERROR_RT_DEV_SETUP_ERROR = 507033; // device setup error +static const int32_t ACL_ERROR_RT_VECTOR_CORE_TIMEOUT = 507034; // vector core timeout +static const int32_t ACL_ERROR_RT_VECTOR_CORE_EXCEPTION = 507035; // vector core exception +static const int32_t ACL_ERROR_RT_VECTOR_CORE_TRAP_EXCEPTION = 507036; // vector core trap exception +static const int32_t ACL_ERROR_RT_CDQ_BATCH_ABNORMAL = 507037; // cdq alloc batch abnormal + +static const int32_t ACL_ERROR_RT_DRV_INTERNAL_ERROR = 507899; // drv internal error +static const int32_t ACL_ERROR_RT_AICPU_INTERNAL_ERROR = 507900; // aicpu internal error +static const int32_t ACL_ERROR_RT_SOCKET_CLOSE = 507901; // hdc disconnect + +#ifdef __cplusplus +} +#endif + +#endif // __INC_EXTERNEL_RT_ERROR_CODES_H__ diff --git a/inc/external/acl/ops/acl_cblas.h b/inc/external/acl/ops/acl_cblas.h new file mode 100644 index 00000000..3d81eb2b --- /dev/null +++ b/inc/external/acl/ops/acl_cblas.h @@ -0,0 +1,334 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_EXTERNAL_ACL_OPS_ACL_CBLAS_H_ +#define INC_EXTERNAL_ACL_OPS_ACL_CBLAS_H_ + +#include "acl/acl.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum aclTransType { ACL_TRANS_N, ACL_TRANS_T, ACL_TRANS_NZ, ACL_TRANS_NZ_T } aclTransType; + +typedef enum aclComputeType { ACL_COMPUTE_HIGH_PRECISION, ACL_COMPUTE_LOW_PRECISION } aclComputeType; + +/** + * @ingroup AscendCL + * @brief perform the matrix-vector multiplication + * + * @param transA [IN] transpose type of matrix A + * @param m [IN] number of rows of matrix A + * @param n [IN] number of columns of matrix A + * @param alpha [IN] pointer to scalar used for multiplication. + * of same type as dataTypeC + * @param a [IN] pointer to matrix A + * @param lda [IN] leading dimension used to store the matrix A + * @param dataTypeA [IN] datatype of matrix A + * @param x [IN] pointer to vector x + * @param incx [IN] stride between consecutive elements of vector x + * @param dataTypeX [IN] datatype of vector x + * @param beta [IN] pointer to scalar used for multiplication. + * of same type as dataTypeC If beta == 0, + * then y does not have to be a valid input + * @param y [IN|OUT] pointer to vector y + * @param incy [IN] stride between consecutive elements of vector y + * @param dataTypeY [IN] datatype of vector y + * @param type [IN] computation type + * @param stream [IN] stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasGemvEx(aclTransType transA, int m, int n, const void *alpha, const void *a, int lda, + aclDataType dataTypeA, const void *x, int incx, aclDataType dataTypeX, + const void *beta, void *y, int incy, aclDataType dataTypeY, + aclComputeType type, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief create a handle for performing the matrix-vector multiplication + * + * @param transA [IN] transpose type of matrix A + * @param m [IN] number of rows of matrix A + * @param n [IN] number of columns of matrix A + * @param dataTypeA [IN] datatype of matrix A + * @param dataTypeX [IN] datatype of vector x + * @param dataTypeY [IN] datatype of vector y + * @param type [IN] computation type + * @param handle [OUT] pointer to the pointer to the handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasCreateHandleForGemvEx(aclTransType transA, int m, int n, aclDataType dataTypeA, + aclDataType dataTypeX, aclDataType dataTypeY, + aclComputeType type, aclopHandle **handle); + +/** + * @ingroup AscendCL + * @brief perform the matrix-vector multiplication + * + * @param transA [IN] transpose type of matrix A + * @param m [IN] number of rows of matrix A + * @param n [IN] number of columns of matrix A + * @param alpha [IN] pointer to scalar used for multiplication + * @param a [IN] pointer to matrix A + * @param lda [IN] leading dimension used to store the matrix A + * @param x [IN] pointer to vector x + * @param incx [IN] stride between consecutive elements of vector x + * @param beta [IN] pointer to scalar used for multiplication. + * If beta value == 0, + * then y does not have to be a valid input + * @param y [IN|OUT] pointer to vector y + * @param incy [IN] stride between consecutive elements of vector y + * @param type [IN] computation type + * @param stream [IN] stream + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasHgemv(aclTransType transA, int m, int n, const aclFloat16 *alpha, + const aclFloat16 *a, int lda, const aclFloat16 *x, int incx, + const aclFloat16 *beta, aclFloat16 *y, int incy, aclComputeType type, + aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief create a handle for performing the matrix-vector multiplication + * + * @param transA [IN] transpose type of matrix A + * @param m [IN] number of rows of matrix A + * @param n [IN] number of columns of matrix A + * @param type [IN] computation type + * @param handle [OUT] pointer to the pointer to the handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasCreateHandleForHgemv(aclTransType transA, int m, int n, aclComputeType type, + aclopHandle **handle); + +/** + * @ingroup AscendCL + * @brief perform the matrix-vector multiplication + * + * @param transA [IN] transpose type of matrix A + * @param m [IN] number of rows of matrix A + * @param n [IN] number of columns of matrix A + * @param alpha [IN] pointer to scalar used for multiplication + * @param a [IN] pointer to matrix A + * @param lda [IN] leading dimension used to store the matrix A + * @param x [IN] pointer to vector x + * @param incx [IN] stride between consecutive elements of vector x + * @param beta [IN] pointer to scalar used for multiplication. + * If beta value == 0, + * then y does not have to be a valid input + * @param y [IN|OUT] pointer to vector y + * @param incy [IN] stride between consecutive elements of vector y + * @param type [IN] computation type + * @param stream [IN] stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasS8gemv(aclTransType transA, int m, int n, const int32_t *alpha, const int8_t *a, + int lda, const int8_t *x, int incx, const int32_t *beta, int32_t *y, + int incy, aclComputeType type, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief create a handle for performing the matrix-vector multiplication + * + * @param transA [IN] transpose type of matrix A + * @param m [IN] number of rows of matrix A + * @param n [IN] number of columns of matrix A + * @param handle [OUT] pointer to the pointer to the handle + * @param type [IN] computation type + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasCreateHandleForS8gemv(aclTransType transA, int m, int n, aclComputeType type, + aclopHandle **handle); + +/** + * @ingroup AscendCL + * @brief perform the matrix-matrix multiplication + * + * @param transA [IN] transpose type of matrix A + * @param transB [IN] transpose type of matrix B + * @param transC [IN] transpose type of matrix C + * @param m [IN] number of rows of matrix A and matrix C + * @param n [IN] number of columns of matrix B and matrix C + * @param k [IN] number of columns of matrix A and rows of matrix B + * @param alpha [IN] pointer to scalar used for multiplication. of same type as dataTypeC + * @param matrixA [IN] pointer to matrix A + * @param lda [IN] leading dimension array used to store matrix A + * @param dataTypeA [IN] datatype of matrix A + * @param matrixB [IN] pointer to matrix B + * @param ldb [IN] leading dimension array used to store matrix B + * @param dataTypeB [IN] datatype of matrix B + * @param beta [IN] pointer to scalar used for multiplication. + * of same type as dataTypeC If beta == 0, + * then matrixC does not have to be a valid input + * @param matrixC [IN|OUT] pointer to matrix C + * @param ldc [IN] leading dimension array used to store matrix C + * @param dataTypeC [IN] datatype of matrix C + * @param type [IN] computation type + * @param stream [IN] stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasGemmEx(aclTransType transA, aclTransType transB, aclTransType transC, int m, int n, + int k, const void *alpha, const void *matrixA, int lda, + aclDataType dataTypeA, const void *matrixB, int ldb, aclDataType dataTypeB, + const void *beta, void *matrixC, int ldc, aclDataType dataTypeC, + aclComputeType type, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief create a handle for performing the matrix-matrix multiplication + * + * @param transA [IN] transpose type of matrix A + * @param transB [IN] transpose type of matrix B + * @param transC [IN] transpose type of matrix C + * @param m [IN] number of rows of matrix A and matrix C + * @param n [IN] number of columns of matrix B and matrix C + * @param k [IN] number of columns of matrix A and rows of matrix B + * @param dataTypeA [IN] datatype of matrix A + * @param dataTypeB [IN] datatype of matrix B + * @param dataTypeC [IN] datatype of matrix C + * @param type [IN] computation type + * @param handle [OUT] pointer to the pointer to the handle + * @param type [IN] computation type + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasCreateHandleForGemmEx(aclTransType transA, aclTransType transB, aclTransType transC, + int m, int n, int k, aclDataType dataTypeA, + aclDataType dataTypeB, aclDataType dataTypeC, + aclComputeType type, aclopHandle **handle); + +/** + * @ingroup AscendCL + * @brief perform the matrix-matrix multiplication + * + * @param transA [IN] transpose type of matrix A + * @param transB [IN] transpose type of matrix B + * @param transC [IN] transpose type of matrix C + * @param m [IN] number of rows of matrix A and matrix C + * @param n [IN] number of columns of matrix B and matrix C + * @param k [IN] number of columns of matrix A and rows of matrix B + * @param alpha [IN] pointer to scalar used for multiplication + * @param matrixA [IN] pointer to matrix A + * @param lda [IN] leading dimension used to store the matrix A + * @param matrixB [IN] pointer to matrix B + * @param ldb [IN] leading dimension used to store the matrix B + * @param beta [IN] pointer to scalar used for multiplication. + * If beta value == 0, + * then matrixC does not have to be a valid input + * @param matrixC [IN|OUT] pointer to matrix C + * @param ldc [IN] leading dimension used to store the matrix C + * @param type [IN] computation type + * @param stream [IN] stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasHgemm(aclTransType transA, aclTransType transB, aclTransType transC, int m, int n, + int k, const aclFloat16 *alpha, const aclFloat16 *matrixA, int lda, + const aclFloat16 *matrixB, int ldb, const aclFloat16 *beta, + aclFloat16 *matrixC, int ldc, aclComputeType type, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief create a handle for performing the matrix-matrix multiplication + * + * @param transA [IN] transpose type of matrix A + * @param transB [IN] transpose type of matrix B + * @param transC [IN] transpose type of matrix C + * @param m [IN] number of rows of matrix A and matrix C + * @param n [IN] number of columns of matrix B and matrix C + * @param k [IN] number of columns of matrix A and rows of matrix B + * @param type [IN] computation type + * @param handle [OUT] pointer to the pointer to the handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasCreateHandleForHgemm(aclTransType transA, aclTransType transB, aclTransType transC, + int m, int n, int k, aclComputeType type, + aclopHandle **handle); + +/** + * @ingroup AscendCL + * @brief perform the matrix-matrix multiplication + * + * @param transA [IN] transpose type of matrix A + * @param transB [IN] transpose type of matrix B + * @param transC [IN] transpose type of matrix C + * @param m [IN] number of rows of matrix A and matrix C + * @param n [IN] number of columns of matrix B and matrix C + * @param k [IN] number of columns of matrix A and rows of matrix B + * @param alpha [IN] pointer to scalar used for multiplication + * @param matrixA [IN] pointer to matrix A + * @param lda [IN] leading dimension used to store the matrix A + * @param matrixB [IN] pointer to matrix B + * @param ldb [IN] leading dimension used to store the matrix B + * @param beta [IN] pointer to scalar used for multiplication. + * If beta value == 0, + * then matrixC does not have to be a valid input + * @param matrixC [IN|OUT] pointer to matrix C + * @param ldc [IN] leading dimension used to store the matrix C + * @param type [IN] computation type + * @param stream [IN] stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasS8gemm(aclTransType transA, aclTransType transB, aclTransType transC, int m, int n, + int k, const int32_t *alpha, const int8_t *matrixA, int lda, + const int8_t *matrixB, int ldb, const int32_t *beta, int32_t *matrixC, + int ldc, aclComputeType type, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief create a handle for performing the matrix-matrix multiplication + * + * @param transA [IN] transpose type of matrix A + * @param transB [IN] transpose type of matrix B + * @param transC [IN] transpose type of matrix C + * @param m [IN] number of rows of matrix A and matrix C + * @param n [IN] number of columns of matrix B and matrix C + * @param k [IN] number of columns of matrix A and rows of matrix B + * @param type [IN] computation type + * @param handle [OUT] pointer to the pointer to the handle + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclblasCreateHandleForS8gemm(aclTransType transA, aclTransType transB, aclTransType transC, + int m, int n, int k, aclComputeType type, + aclopHandle **handle); + +#ifdef __cplusplus +} +#endif + +#endif // INC_EXTERNAL_ACL_OPS_ACL_CBLAS_H_ diff --git a/inc/external/acl/ops/acl_dvpp.h b/inc/external/acl/ops/acl_dvpp.h new file mode 100644 index 00000000..dcaa3936 --- /dev/null +++ b/inc/external/acl/ops/acl_dvpp.h @@ -0,0 +1,2568 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if !defined(ENABLE_DVPP_INTERFACE) +#if defined(_MSC_VER) +#error message("if you want to use dvpp funtions ,please use the macro definition (ENABLE_DVPP_INTERFACE).") +#else +#error "if you want to use dvpp funtions ,please use the macro definition (ENABLE_DVPP_INTERFACE)." +#endif +#endif + +#ifndef INC_EXTERNAL_ACL_OPS_ACL_DVPP_H_ +#define INC_EXTERNAL_ACL_OPS_ACL_DVPP_H_ + +#include +#include +#include "acl/acl.h" +#include "acl/acl_base.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct acldvppPicDesc acldvppPicDesc; +typedef struct acldvppBatchPicDesc acldvppBatchPicDesc; +typedef struct acldvppRoiConfig acldvppRoiConfig; +typedef struct acldvppResizeConfig acldvppResizeConfig; +typedef struct acldvppBorderConfig acldvppBorderConfig; +typedef struct acldvppLutMap acldvppLutMap; +typedef struct acldvppChannelDesc acldvppChannelDesc; +typedef struct acldvppJpegeConfig acldvppJpegeConfig; +typedef struct aclvdecChannelDesc aclvdecChannelDesc; +typedef struct acldvppStreamDesc acldvppStreamDesc; +typedef struct aclvdecFrameConfig aclvdecFrameConfig; +typedef struct aclvencChannelDesc aclvencChannelDesc; +typedef struct aclvencFrameConfig aclvencFrameConfig; +typedef struct acldvppHist acldvppHist; +typedef void (*aclvdecCallback)(acldvppStreamDesc *input, acldvppPicDesc *output, void *userData); +typedef void (*aclvencCallback)(acldvppPicDesc *input, acldvppStreamDesc *output, void *userdata); + +// Supported Pixel Format +enum acldvppPixelFormat { + PIXEL_FORMAT_YUV_400 = 0, // 0 + PIXEL_FORMAT_YUV_SEMIPLANAR_420 = 1, // 1 + PIXEL_FORMAT_YVU_SEMIPLANAR_420 = 2, // 2 + PIXEL_FORMAT_YUV_SEMIPLANAR_422 = 3, // 3 + PIXEL_FORMAT_YVU_SEMIPLANAR_422 = 4, // 4 + PIXEL_FORMAT_YUV_SEMIPLANAR_444 = 5, // 5 + PIXEL_FORMAT_YVU_SEMIPLANAR_444 = 6, // 6 + PIXEL_FORMAT_YUYV_PACKED_422 = 7, // 7 + PIXEL_FORMAT_UYVY_PACKED_422 = 8, // 8 + PIXEL_FORMAT_YVYU_PACKED_422 = 9, // 9 + PIXEL_FORMAT_VYUY_PACKED_422 = 10, // 10 + PIXEL_FORMAT_YUV_PACKED_444 = 11, // 11 + PIXEL_FORMAT_RGB_888 = 12, // 12 + PIXEL_FORMAT_BGR_888 = 13, // 13 + PIXEL_FORMAT_ARGB_8888 = 14, // 14 + PIXEL_FORMAT_ABGR_8888 = 15, // 15 + PIXEL_FORMAT_RGBA_8888 = 16, // 16 + PIXEL_FORMAT_BGRA_8888 = 17, // 17 + PIXEL_FORMAT_YUV_SEMI_PLANNER_420_10BIT = 18, // 18 + PIXEL_FORMAT_YVU_SEMI_PLANNER_420_10BIT = 19, // 19 + PIXEL_FORMAT_YVU_PLANAR_420 = 20, // 20 + PIXEL_FORMAT_YVU_PLANAR_422, + PIXEL_FORMAT_YVU_PLANAR_444, + PIXEL_FORMAT_RGB_444 = 23, + PIXEL_FORMAT_BGR_444, + PIXEL_FORMAT_ARGB_4444, + PIXEL_FORMAT_ABGR_4444, + PIXEL_FORMAT_RGBA_4444, + PIXEL_FORMAT_BGRA_4444, + PIXEL_FORMAT_RGB_555, + PIXEL_FORMAT_BGR_555, + PIXEL_FORMAT_RGB_565, + PIXEL_FORMAT_BGR_565, + PIXEL_FORMAT_ARGB_1555, + PIXEL_FORMAT_ABGR_1555, + PIXEL_FORMAT_RGBA_1555, + PIXEL_FORMAT_BGRA_1555, + PIXEL_FORMAT_ARGB_8565, + PIXEL_FORMAT_ABGR_8565, + PIXEL_FORMAT_RGBA_8565, + PIXEL_FORMAT_BGRA_8565, + PIXEL_FORMAT_RGB_BAYER_8BPP = 50, + PIXEL_FORMAT_RGB_BAYER_10BPP, + PIXEL_FORMAT_RGB_BAYER_12BPP, + PIXEL_FORMAT_RGB_BAYER_14BPP, + PIXEL_FORMAT_RGB_BAYER_16BPP, + PIXEL_FORMAT_BGR_888_PLANAR = 70, + PIXEL_FORMAT_HSV_888_PACKAGE, + PIXEL_FORMAT_HSV_888_PLANAR, + PIXEL_FORMAT_LAB_888_PACKAGE, + PIXEL_FORMAT_LAB_888_PLANAR, + PIXEL_FORMAT_S8C1, + PIXEL_FORMAT_S8C2_PACKAGE, + PIXEL_FORMAT_S8C2_PLANAR, + PIXEL_FORMAT_S16C1, + PIXEL_FORMAT_U8C1, + PIXEL_FORMAT_U16C1, + PIXEL_FORMAT_S32C1, + PIXEL_FORMAT_U32C1, + PIXEL_FORMAT_U64C1, + PIXEL_FORMAT_S64C1, + PIXEL_FORMAT_YUV_SEMIPLANAR_440 = 1000, + PIXEL_FORMAT_YVU_SEMIPLANAR_440, + PIXEL_FORMAT_FLOAT32, + PIXEL_FORMAT_BUTT, + PIXEL_FORMAT_UNKNOWN = 10000 +}; + +// Stream Format +enum acldvppStreamFormat { H265_MAIN_LEVEL = 0, H264_BASELINE_LEVEL, H264_MAIN_LEVEL, H264_HIGH_LEVEL }; + +// Supported Channel Mode +enum acldvppChannelMode { DVPP_CHNMODE_VPC = 1, DVPP_CHNMODE_JPEGD = 2, DVPP_CHNMODE_JPEGE = 4 }; + +// Supported Border Type +enum acldvppBorderType { BORDER_CONSTANT = 0, BORDER_REPLICATE, BORDER_REFLECT, BORDER_REFLECT_101 }; + +// Venc parameter type +enum aclvencChannelDescParamType { + ACL_VENC_THREAD_ID_UINT64 = 0, + ACL_VENC_CALLBACK_PTR, + ACL_VENC_PIXEL_FORMAT_UINT32, + ACL_VENC_ENCODE_TYPE_UINT32, + ACL_VENC_PIC_WIDTH_UINT32, + ACL_VENC_PIC_HEIGHT_UINT32, + ACL_VENC_KEY_FRAME_INTERVAL_UINT32, + ACL_VENC_BUF_ADDR_PTR, + ACL_VENC_BUF_SIZE_UINT32, + ACL_VENC_RC_MODE_UINT32, + ACL_VENC_SRC_RATE_UINT32, + ACL_VENC_MAX_BITRATE_UINT32, + ACL_VENC_MAX_IP_PROP_UINT32 +}; + +// Jpeg picture format +enum acldvppJpegFormat { + ACL_JPEG_CSS_444 = 0, + ACL_JPEG_CSS_422, + ACL_JPEG_CSS_420, + ACL_JPEG_CSS_GRAY, + ACL_JPEG_CSS_440, + ACL_JPEG_CSS_411, + ACL_JPEG_CSS_UNKNOWN = 1000 +}; + +/** + * @ingroup AscendCL + * @brief alloc device memory for dvpp. + * + * @par Function + * @li It's mainly used for allocating memory to device media data processing. + * The requested memory meets the data processing requirements. + * After calling this interface to request memory, + * you must release the memory using the acldvppFree interface. + * @li When calling the acldvppMalloc interface to apply for memory, + * the size entered by the user is aligned upwards to 32 integer multiples, + * and an additional 32 bytes are applied. + * + * @par Restriction + * If the user uses the acldvppMalloc interface to apply for a large block of + * memory and divide and manage the memory by himself, + * when applying for memory, the user needs to align up to 32 integer + * times + 32 bytes (ALIGN_UP [len] +32 words) according to + * the actual data size of each picture Section) to manage memory. + * + * @param devPtr [OUT] memory pointer. + * @param size [IN] memory size. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppFree + */ +ACL_FUNC_VISIBILITY aclError acldvppMalloc(void **devPtr, size_t size); + +/** + * @ingroup AscendCL + * @brief free device memory for dvpp. + * + * @par Function + * Free the memory requested through the acldvppMalloc interface + * @param devPtr [IN] memory pointer to free. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppMalloc + */ +ACL_FUNC_VISIBILITY aclError acldvppFree(void *devPtr); + +/** + * @ingroup AscendCL + * @brief create DvppChannelDesc. + * + * @par Function + * Create a channel for image data processing. + * The same channel can be reused + * and is no longer available after destruction + * + * @retval null for failed. + * @retval OtherValues success. + */ +ACL_FUNC_VISIBILITY acldvppChannelDesc *acldvppCreateChannelDesc(); + +/** + * @ingroup AscendCL + * @brief destroy dvppChannelDesc. + * + * @par Function + * Can only destroy channels created by the acldvppCreateChannel interface + * @param channelDesc [IN] the channel description. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannelDesc | acldvppDestroyChannel + */ +ACL_FUNC_VISIBILITY aclError acldvppDestroyChannelDesc(acldvppChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get dvpp channel Id. + * + * @par Restriction + * Interface calling sequence: + * acldvppCreateChannelDesc --> acldvppCreateChannel --> + * acldvppGetChannelDescChannelId + * + * @param channelDesc [IN] the channel description. + * + * @retval channel id. + * + * @see acldvppCreateChannelDesc | acldvppCreateChannel + */ +ACL_FUNC_VISIBILITY uint64_t acldvppGetChannelDescChannelId(const acldvppChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Create dvpp picture description. + * + * @retval null for failed. + * @retval OtherValues success. + */ +ACL_FUNC_VISIBILITY acldvppPicDesc *acldvppCreatePicDesc(); + +/** + * @ingroup AscendCL + * @brief Destroy dvpp picture description. + * + * @par Function + * Can only destroy picture description information created + * through acldvppCreatePicDesc interface. + * @param picDesc [IN] dvpp picture description. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreatePicDesc + */ +ACL_FUNC_VISIBILITY aclError acldvppDestroyPicDesc(acldvppPicDesc *picDesc); + +/** + * @ingroup AscendCL + * @brief Set dvpp picture description's data. + * + * @param picDesc [OUT] dvpp picture description. + * @param dataDev [IN] dvpp picture dataDev.Must be the memory + * requested using the acldvppMalloc interface. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppMalloc + */ +ACL_FUNC_VISIBILITY aclError acldvppSetPicDescData(acldvppPicDesc *picDesc, void *dataDev); + +/** + * @ingroup AscendCL + * @brief Set dvpp picture description's size. + * + * @param picDesc [OUT] dvpp picture description. + * @param size dvpp [IN] picture size. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetPicDescSize(acldvppPicDesc *picDesc, uint32_t size); + +/** + * @ingroup AscendCL + * @brief Set dvpp picture description's format. + * + * @param picDesc [OUT] dvpp picture description. + * @param format [IN] dvpp picture format. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetPicDescFormat(acldvppPicDesc *picDesc, acldvppPixelFormat format); + +/** + * @ingroup AscendCL + * @brief Set dvpp picture description's width. + * + * @param picDesc [OUT] dvpp picture description. + * @param width [IN] dvpp picture width. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetPicDescWidth(acldvppPicDesc *picDesc, uint32_t width); + +/** + * @ingroup AscendCL + * @brief Set dvpp picture description's height. + * + * @param picDesc [OUT] dvpp picture description. + * @param height [IN] dvpp picture height. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetPicDescHeight(acldvppPicDesc *picDesc, uint32_t height); + +/** + * @ingroup AscendCL + * @brief Set dvpp picture description's widthStride. + * + * @par Restriction + * Width alignment requirements: + * @li The minimum stride is 32 and the maximum is 4096 * 4 + * (that is, an image in argb format with a width of 4096); + * @li For 8K scaling, widthStride is required to be aligned to 2; + * @li For non 8K scaling, the calculation formula for widthStride + * is different for different image formats: + * @li yuv400sp, yuv420sp, yuv422sp, yuv444sp: input image width aligned to 16 + * @li yuv422packed: input image width * 2 and then align to 16 + * @li yuv444packed, rgb888: input image width alignment * 3, alignment to 16 + * @li xrgb8888: input image width * 4, align to 16 + * @li HFBC:input image width + * + * @param picDesc [OUT] dvpp picture description. + * @param widthStride [IN] dvpp picture widthStride. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetPicDescWidthStride(acldvppPicDesc *picDesc, uint32_t widthStride); + +/** + * @ingroup AscendCL + * @brief Set dvpp picture description's heightStride. + * + * @par Restriction + * Height alignment requirements: + * @li The height of the input image is aligned to 2. + * High stride minimum 6 and maximum 4096. + * + * @param picDesc [OUT] dvpp picture description. + * @param heightStride [IN] dvpp picture heightStride. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetPicDescHeightStride(acldvppPicDesc *picDesc, uint32_t heightStride); + +/** + * @ingroup AscendCL + * @brief Set dvpp picture description's retcode. + * + * @param picDesc [OUT] dvpp picture description. + * @param retCode [IN] dvpp picture retcode. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetPicDescRetCode(acldvppPicDesc *picDesc, uint32_t retCode); + +/** + * @ingroup AscendCL + * @brief Get picture data. + * + * @param picDesc [IN] dvpp picture description. + * + * @retval picture data addr. + * @retval default nullptr. + */ +ACL_FUNC_VISIBILITY void *acldvppGetPicDescData(const acldvppPicDesc *picDesc); + +/** + * @ingroup AscendCL + * @brief Get picture data size. + * + * @param picDesc [IN] dvpp picture description. + * + * @retval picture data size. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetPicDescSize(const acldvppPicDesc *picDesc); + +/** + * @ingroup AscendCL + * @brief Get dvpp picture desc's format. + * + * @param picDesc [IN] dvpp picture description. + * + * @retval format + * @retval default PIXEL_FORMAT_YUV_400. + */ +ACL_FUNC_VISIBILITY acldvppPixelFormat acldvppGetPicDescFormat(const acldvppPicDesc *picDesc); + +/** + * @ingroup AscendCL + * @brief Get dvpp picture desc's width. + * + * @param picDesc [IN] dvpp picture description. + * + * @retval width. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetPicDescWidth(const acldvppPicDesc *picDesc); + +/** + * @ingroup AscendCL + * @brief Get dvpp picture desc's height. + * + * @param picDesc [IN] dvpp picture description. + * + * @retval height. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetPicDescHeight(const acldvppPicDesc *picDesc); + +/** + * @ingroup AscendCL + * @brief Get dvpp picture desc's widthStride. + * + * @par Restriction + * Width alignment requirements: + * @li The minimum stride is 32 and the maximum is 4096 * 4 + * (that is, an image in argb format with a width of 4096); + * @li For 8K scaling, widthStride is required to be aligned to 2; + * @li For non 8K scaling, the calculation formula for widthStride + * is different for different image formats: + * @li yuv400sp, yuv420sp, yuv422sp, yuv444sp: input image width aligned to 16 + * @li yuv422packed: input image width * 2 and then align to 16 + * @li yuv444packed, rgb888: input image width alignment * 3, alignment to 16 + * @li xrgb8888: input image width * 4, align to 16 + * @li HFBC:input image width + * + * @param picDesc [IN] dvpp picture description. + * + * @retval stride width. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetPicDescWidthStride(const acldvppPicDesc *picDesc); + +/** + * @ingroup AscendCL + * @brief Get dvpp picture desc's heightStride. + * + * @par Restriction + * Height alignment requirements: + * @li The height of the input image is aligned to 2. + * High stride minimum 6 and maximum 4096. + * + * @param picDesc [IN] dvpp picture description. + * + * @retval stride height. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetPicDescHeightStride(const acldvppPicDesc *picDesc); + +/** + * @ingroup AscendCL + * @brief Get dvpp picture desc's retcode. + * + * @param picDesc [IN] dvpp picture description. + * + * @retval ret code. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetPicDescRetCode(const acldvppPicDesc *picDesc); + +/** + * @ingroup AscendCL + * @brief Create dvpp roi config. + * + * @param left [IN] the left offset, must be even + * @param right [IN] the right offset, must be odd + * @param top [IN] the top offset, must be even + * @param bottom [IN] the bottom offset, must be odd + * + * @retval null for failed. + * @retval other success + */ +ACL_FUNC_VISIBILITY acldvppRoiConfig *acldvppCreateRoiConfig(uint32_t left, uint32_t right, uint32_t top, + uint32_t bottom); + +/** + * @ingroup AscendCL + * @brief Destroy dvpp roi config. + * + * @par Function + * Destroys data created through the acldvppCreateRoiConfig interface + * @param roiConfig [IN] dvpp roi config. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateRoiConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppDestroyRoiConfig(acldvppRoiConfig *roiConfig); + +/** + * @ingroup AscendCL + * @brief Set left of RoiConfig. + * + * @param config [OUT] RoiConfig + * @param left [IN] left offset + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetRoiConfigLeft(acldvppRoiConfig *config, uint32_t left); + +/** + * @ingroup AscendCL + * @brief Set right of RoiConfig. + * + * @param config [OUT] RoiConfig + * @param right [IN] right offset + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetRoiConfigRight(acldvppRoiConfig *config, uint32_t right); + +/** + * @ingroup AscendCL + * @brief Set top of RoiConfig. + * + * @param config [OUT] RoiConfig + * @param top [IN] top offset + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetRoiConfigTop(acldvppRoiConfig *config, uint32_t top); + +/** + * @ingroup AscendCL + * @brief Set bottom of RoiConfig. + * + * @param config [OUT] RoiConfig + * @param bottom [IN] bottom offset + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetRoiConfigBottom(acldvppRoiConfig *config, uint32_t bottom); + +/** + * @ingroup AscendCL + * @brief Set RoiConfig. + * + * @param config [OUT] RoiConfig + * @param left [IN] left offset + * @param right [IN] right offset + * @param top [IN] top offset + * @param bottom [IN] bottom offset + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetRoiConfig(acldvppRoiConfig *config, uint32_t left, uint32_t right, uint32_t top, + uint32_t bottom); + +/** + * @ingroup AscendCL + * @brief Create dvpp resize config. + * The specified scaling algorithm is not supported. + * The default scaling algorithm is "nearest neighbor interpolation". + * + * @retval null for failed. + * @retval other success. + */ +ACL_FUNC_VISIBILITY acldvppResizeConfig *acldvppCreateResizeConfig(); + +/** + * @ingroup AscendCL + * @brief Destroy dvpp resize config. + * + * @par Function + * Destroys the scaling configuration data created by + * the acldvppCreateResizeConfig interface + * + * @param resizeConfig [IN] resize config. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateResizeConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppDestroyResizeConfig(acldvppResizeConfig *resizeConfig); + +/** + * @ingroup AscendCL + * @brief Create jpege config. + * + * @retval null for failed. + * @retval other success. + */ +ACL_FUNC_VISIBILITY acldvppJpegeConfig *acldvppCreateJpegeConfig(); + +/** + * @ingroup AscendCL + * @brief Destroy jpege config. + * + * @par Function + * Destroys the encoding configuration data created by + * the acldvppCreateJpegeConfig interface + * @param jpegeConfig [IN] config pointer to destroy. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateJpegeConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppDestroyJpegeConfig(acldvppJpegeConfig *jpegeConfig); + +/** + * @ingroup AscendCL + * @brief Set jpege config's level. + * + * @param jpegeConfig [OUT] Call the acldvppCreateJpegeConfig + * interface to create acldvppJpegeConfig data + * @param level [IN] Encoding quality range [0, 100], + * where level 0 encoding quality is similar to level 100, + * and the smaller the value in [1, 100], + * the worse the quality of the output picture. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetJpegeConfigLevel(acldvppJpegeConfig *jpegeConfig, uint32_t level); + +/** + * @ingroup AscendCL + * @brief Get jpege config's level. + * + * @param jpegeConfig [IN] jpege config. + * + * @retval compression level. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetJpegeConfigLevel(const acldvppJpegeConfig *jpegeConfig); + +/** + * @ingroup AscendCL + * @brief create vdecChannelDesc.Channel description information + * when creating a video data processing channel. + * + * @retval null for failed. + * @retval other success + */ +ACL_FUNC_VISIBILITY aclvdecChannelDesc *aclvdecCreateChannelDesc(); + +/** + * @ingroup AscendCL + * @brief destroy vdecChannelDesc. + * + * @par Function + * Can only destroy aclvdecChannelDesc type created + * through aclvdecCreateChannelDesc interface + * @param channelDesc [IN] channel description. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + + * @see aclvdecCreateChannelDesc + */ +ACL_FUNC_VISIBILITY aclError aclvdecDestroyChannelDesc(aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Set vdec channel description's channel id. + * + * @param channelDesc [OUT] vdec channel description. + * @param channelId [IN] decoding channel id: 0~15. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclvdecSetChannelDescChannelId(aclvdecChannelDesc *channelDesc, uint32_t channelId); + +/** + * @ingroup AscendCL + * @brief Set vdec channel description's thread id. + * + * @param channelDesc [OUT] vdec channel description. + * @param threadId [IN] thread id. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclvdecSetChannelDescThreadId(aclvdecChannelDesc *channelDesc, uint64_t threadId); + +/** + * @ingroup AscendCL + * @brief Set vdec channel description's callback function. + * + * @param channelDesc [OUT] vdec channel description. + * @param callback [IN] function callback.Function prototype: + * void (* aclvdecCallback) + * (acldvppStreamDesc * input, acldvppPicDesc * output, void* userdata) + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclvdecCallback + */ +ACL_FUNC_VISIBILITY aclError aclvdecSetChannelDescCallback(aclvdecChannelDesc *channelDesc, aclvdecCallback callback); + +/** + * @ingroup AscendCL + * @brief Set vdec channel description's video encoding type. + * + * @param channelDesc [OUT] vdec channel description. + * @param enType [IN] video encoding type. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclvdecSetChannelDescEnType(aclvdecChannelDesc *channelDesc, acldvppStreamFormat enType); + +/** + * @ingroup AscendCL + * @brief Set vdec channel description's out picture format. + * + * @param channelDesc [OUT] vdec channel description. + * @param outPicFormat [IN] out picture format (acldvppPixelFormat). + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclvdecSetChannelDescOutPicFormat(aclvdecChannelDesc *channelDesc, + acldvppPixelFormat outPicFormat); + +/** + * @ingroup AscendCL + * @brief Set vdec channel description's out picture width. + * + * @param channelDesc [OUT] vdec channel description. + * @param outPicWidth [IN] out picture width. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclvdecSetChannelDescOutPicWidth(aclvdecChannelDesc *channelDesc, uint32_t outPicWidth); + +/** + * @ingroup AscendCL + * @brief Set vdec channel description's out picture height. + * + * @param channelDesc [OUT] vdec channel description. + * @param outPicHeight [IN] out picture height. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclvdecSetChannelDescOutPicHeight(aclvdecChannelDesc *channelDesc, uint32_t outPicHeight); + +/** + * @ingroup AscendCL + * @brief Set vdec channel description's reference frame num. + * + * @param channelDesc [OUT] vdec channel description. + * @param refFrameNum [IN] reference frame num. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclvdecSetChannelDescRefFrameNum(aclvdecChannelDesc *channelDesc, uint32_t refFrameNum); + +/** + * @ingroup AscendCL + * @brief Set vdec channel description's bit depth. + * + * @param channelDesc [OUT] vdec channel description. + * @param bitDepth [IN] bit depth. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclvdecSetChannelDescBitDepth(aclvdecChannelDesc *channelDesc, uint32_t bitDepth); + +/** + * @ingroup AscendCL + * @brief Get vdec channel description's channel id. + * + * @param channelDesc [IN] vdec channel description. + * + * @retval decoding channel id: 0~15. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t aclvdecGetChannelDescChannelId(const aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get vdec channel description's thread id. + * + * @param channelDesc [IN] vdec channel description. + * + * @retval thread id. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint64_t aclvdecGetChannelDescThreadId(const aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get vdec channel description's callback function. + * + * @param channelDesc [IN] vdec channel description. + * + * @retval function callback.Function prototype: + * void (* aclvdecCallback) + * (acldvppStreamDesc * input, acldvppPicDesc * output, void* userdata) + * @retval default null. + * + * @see aclvdecCallback + */ +ACL_FUNC_VISIBILITY aclvdecCallback aclvdecGetChannelDescCallback(const aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get vdec channel description's video encoding type. + * + * @param channelDesc [IN] vdec channel description. + * + * @retval video encoding type. + * @retval default H265_MAIN_LEVEL. + */ +ACL_FUNC_VISIBILITY acldvppStreamFormat aclvdecGetChannelDescEnType(const aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get vdec channel description's out picture format. + * + * @param channelDesc [IN] vdec channel description. + * + * @retval out picture format. + * @retval default DVPP_OUTPUT_YUV420SP_UV. + */ +ACL_FUNC_VISIBILITY acldvppPixelFormat aclvdecGetChannelDescOutPicFormat(const aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get vdec channel description's out picture width. + * + * @param channelDesc [IN] vdec channel description. + * + * @retval out picture width. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t aclvdecGetChannelDescOutPicWidth(const aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get vdec channel description's out picture height. + * + * @param channelDesc [IN] vdec channel description. + * + * @retval out picture height (for vdec malloc memory). + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t aclvdecGetChannelDescOutPicHeight(const aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get vdec channel description's bit depth. + * + * @param channelDesc [IN] vdec channel description. + * + * @retval bit depth. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t aclvdecGetChannelDescBitDepth(const aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get vdec channel description's reference frame num. + * + * @param channelDesc [IN] vdec channel description. + * + * @retval reference frame num. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t aclvdecGetChannelDescRefFrameNum(const aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief create vencChannelDesc. + * + * @retval null for failed, other success + */ +ACL_FUNC_VISIBILITY aclvencChannelDesc *aclvencCreateChannelDesc(); + +/** + * @ingroup AscendCL + * @brief destroy vencChannelDesc. + * + * @param channelDesc [IN] channel desc. + * + * @retval ACL_SUCCESS:success, other:failed + */ +ACL_FUNC_VISIBILITY aclError aclvencDestroyChannelDesc(aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Set decoding thread id for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param threadId [IN] thread id + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescThreadId(aclvencChannelDesc *channelDesc, uint64_t threadId); + +/** + * @ingroup AscendCL + * @brief Set func callback for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param callback [IN] func callback + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescCallback(aclvencChannelDesc *channelDesc, aclvencCallback callback); + +/** + * @ingroup AscendCL + * @brief Set video encoding type for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param enType [IN] video encoding type + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescEnType(aclvencChannelDesc *channelDesc, acldvppStreamFormat enType); + +/** + * @ingroup AscendCL + * @brief Set pic format for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param picFormat [IN] pic format + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescPicFormat(aclvencChannelDesc *channelDesc, + acldvppPixelFormat picFormat); + +/** + * @ingroup AscendCL + * @brief Set out pic width for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param picWidth [IN] pic width + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescPicWidth(aclvencChannelDesc *channelDesc, uint32_t picWidth); + +/** + * @ingroup AscendCL + * @brief Set pic height for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param picHeight [IN] pic height + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescPicHeight(aclvencChannelDesc *channelDesc, uint32_t picHeight); + +/** + * @ingroup AscendCL + * @brief Set key frame interval for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param keyFrameInterval [IN] Interval of key frame + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescKeyFrameInterval(aclvencChannelDesc *channelDesc, + uint32_t keyFrameInterval); + +/** + * @ingroup AscendCL + * @brief Set output buffer address for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param bufAddr [IN] output buffer address + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescBufAddr(aclvencChannelDesc *channelDesc, void *bufAddr); + +/** + * @ingroup AscendCL + * @brief Set output buffer size for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param bufSize [IN] output buffer size + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescBufSize(aclvencChannelDesc *channelDesc, uint32_t bufSize); + +/** + * @ingroup AscendCL + * @brief Set rc model for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param rcMode [IN] venc rc mode(VBR=1, CBR=2) + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescRcMode(aclvencChannelDesc *channelDesc, uint32_t rcMode); + +/** + * @ingroup AscendCL + * @brief Set source rate for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param srcRate [IN] source rate + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescSrcRate(aclvencChannelDesc *channelDesc, uint32_t srcRate); + +/** + * @ingroup AscendCL + * @brief Set max bit rate for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param maxBitRate [IN] max bit rate + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescMaxBitRate(aclvencChannelDesc *channelDesc, uint32_t maxBitRate); + +/** + * @ingroup AscendCL + * @brief Set venc parameter for venc channel desc. + * + * @param channelDesc [OUT] venc channel desc + * @param paramType [IN] parameter type + * @param length [IN] parameter length + * @param param [IN] pointer to parameter value + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencSetChannelDescParam(aclvencChannelDesc *channelDesc, + aclvencChannelDescParamType paramType, size_t length, + const void *param); + +/** + * @ingroup AscendCL + * @brief Get output buffer address for venc channel desc. + * + * @param channelDesc[IN] venc channel desc + * + * @retval output buffer address + */ +ACL_FUNC_VISIBILITY void *aclvencGetChannelDescBufAddr(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get output buffer size for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval output buffer size + */ +ACL_FUNC_VISIBILITY uint32_t aclvencGetChannelDescBufSize(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get decoding channel id for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval decoding channel id: 0~15, default 0 + */ +ACL_FUNC_VISIBILITY uint32_t aclvencGetChannelDescChannelId(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get decoding thread id for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval thread id, default 0 + */ +ACL_FUNC_VISIBILITY uint64_t aclvencGetChannelDescThreadId(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get func callback for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval func callback, default null + */ +ACL_FUNC_VISIBILITY aclvencCallback aclvencGetChannelDescCallback(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get video encoding type for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval video encoding type, default H265_MAIN_LEVEL + */ +ACL_FUNC_VISIBILITY acldvppStreamFormat aclvencGetChannelDescEnType(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get pic format for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval pic format + */ +ACL_FUNC_VISIBILITY acldvppPixelFormat aclvencGetChannelDescPicFormat(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get pic width for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval pic width, default 0 + */ +ACL_FUNC_VISIBILITY uint32_t aclvencGetChannelDescPicWidth(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get pic height for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval pic height, default 0 + */ +ACL_FUNC_VISIBILITY uint32_t aclvencGetChannelDescPicHeight(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Get interval of key frame for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval interval of key frame, default 0 + */ +ACL_FUNC_VISIBILITY uint32_t aclvencGetChannelDescKeyFrameInterval(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * + * @brief Get rc mode for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval rc mode, default 0 + */ +ACL_FUNC_VISIBILITY uint32_t aclvencGetChannelDescRcMode(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * + * @brief Get source rate for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval source rate, default 0 + */ +ACL_FUNC_VISIBILITY uint32_t aclvencGetChannelDescSrcRate(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * + * @brief Get max bit rate for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * + * @retval max bit rate, default 0 + */ +ACL_FUNC_VISIBILITY uint32_t aclvencGetChannelDescMaxBitRate(const aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * + * @brief Get venc parameter for venc channel desc. + * + * @param channelDesc [IN] venc channel desc + * @param paramType [IN] parameter type + * @param length [IN] parameter length + * @param paramRetSize [OUT] pointer to parameter real length + * @param param [OUT] pointer to parameter value + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError aclvencGetChannelDescParam(const aclvencChannelDesc *channelDesc, + aclvencChannelDescParamType paramType, size_t length, + size_t *paramRetSize, void *param); + +/** + * @ingroup AscendCL + * @brief get forced restart of I-frame interval from config + * + * @param config [IN] venc frame config + * + * @retval 0: Not forced; 1: Forced restart of I-frame -1: error + */ +ACL_FUNC_VISIBILITY uint8_t aclvencGetFrameConfigForceIFrame(const aclvencFrameConfig *config); + +/** + * @ingroup AscendCL + * @brief get forced restart of I-frame interval from config + * + * @param config [IN] venc frame config + * + * @retval Whether it is the end frame: 0: no; 1: end frame + */ +ACL_FUNC_VISIBILITY uint8_t aclvencGetFrameConfigEos(const aclvencFrameConfig *config); + +/** + * @ingroup AscendCL + * @brief set single frame encoding configuration parameters + * + * @param config [OUT] venc frame config + * @param forceFrame [IN] forced restart of I-frame interval: 0: Not forced; 1: Forced restart of I-frame + * + * @retval ACL_SUCCESS for ok, others for fail + */ +ACL_FUNC_VISIBILITY aclError aclvencSetFrameConfigForceIFrame(aclvencFrameConfig *config, uint8_t forceIFrame); + +/** + * @ingroup AscendCL + * @brief set single frame encoding configuration parameters + * + * @param config [OUT] venc frame config + * @param eos [IN] Whether it is the end frame: 0: no; 1: end frame + * + * @retval ACL_SUCCESS for ok, others for fail + */ +ACL_FUNC_VISIBILITY aclError aclvencSetFrameConfigEos(aclvencFrameConfig *config, uint8_t eos); + +/** + * @ingroup AscendCL + * @brief dvpp venc destroy frame config + * + * @param config [IN] venc frame config + * + * @retval ACL_SUCCESS for ok, others for fail + */ +ACL_FUNC_VISIBILITY aclError aclvencDestroyFrameConfig(aclvencFrameConfig *config); + +/** + * @ingroup AscendCL + * @brief Create dvpp venc frame config. + * + * @retval null for failed, other aclvencFrameConfig ptr + */ +ACL_FUNC_VISIBILITY aclvencFrameConfig *aclvencCreateFrameConfig(); + +/** + * @ingroup AscendCL + * @brief Create dvpp venc channel. + * + * @param channelDesc [IN|OUT] venc channel desc + * + * @retval ACL_SUCCESS for ok, others for fail + */ +ACL_FUNC_VISIBILITY aclError aclvencCreateChannel(aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Destroy dvpp venc channel. + * + * @param channelDesc [IN] venc channel desc + * + * @retval ACL_SUCCESS for ok, others for fail + */ +ACL_FUNC_VISIBILITY aclError aclvencDestroyChannel(aclvencChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief dvpp venc launch send frame task. + * + * @param channelDesc [IN] venc channel desc + * @param input [IN] input picture desc + * @param reserve [IN] reserve parameter + * @param config [IN] dvpp frame config + * @param userdata [IN] user callback function + * + * @retval ACL_SUCCESS for ok, others for fail + */ +ACL_FUNC_VISIBILITY aclError aclvencSendFrame(aclvencChannelDesc *channelDesc, acldvppPicDesc *input, void *reserve, + aclvencFrameConfig *config, void *userdata); + +/** + * @ingroup AscendCL + * @brief Create dvpp stream description. + * + * @retval null for failed. + * @retval other success. + */ +ACL_FUNC_VISIBILITY acldvppStreamDesc *acldvppCreateStreamDesc(); + +/** + * @ingroup AscendCL + * @brief Destroy dvpp stream description. + * + * @par Function + * Can only destroy acldvppStreamDesc type created through + * acldvppCreateStreamDesc interface. + * + * @param streamDesc [IN] dvpp stream description. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateStreamDesc + */ +ACL_FUNC_VISIBILITY aclError acldvppDestroyStreamDesc(acldvppStreamDesc *streamDesc); + +/** + * @ingroup AscendCL + * @brief Set stream description's data addr. + * + * @param streamDesc [OUT] dvpp stream description. + * @param dataDev [IN] data addr. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetStreamDescData(acldvppStreamDesc *streamDesc, void *dataDev); + +/** + * @ingroup AscendCL + * @brief Set stream description's data size. + * + * @param streamDesc [OUT] dvpp stream description. + * @param size [IN] data size. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetStreamDescSize(acldvppStreamDesc *streamDesc, uint32_t size); + +/** + * @ingroup AscendCL + * @brief Set stream description's format. + * + * @param streamDesc [OUT] dvpp stream description. + * @param format [IN] stream format. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetStreamDescFormat(acldvppStreamDesc *streamDesc, acldvppStreamFormat format); + +/** + * @ingroup AscendCL + * @brief Set stream description's timestamp. + * + * @param streamDesc [OUT] dvpp stream description. + * @param timestamp [IN] current timestamp. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetStreamDescTimestamp(acldvppStreamDesc *streamDesc, uint64_t timestamp); + +/** + * @ingroup AscendCL + * @brief Set stream description's ret code. + * + * @param streamDesc [OUT] dvpp stream description. + * @param retCode [IN] result code. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetStreamDescRetCode(acldvppStreamDesc *streamDesc, uint32_t retCode); + +/** + * @ingroup AscendCL + * @brief Set stream description's eos. + * + * @param streamDesc [OUT] dvpp stream description. + * @param eos [IN] end flag of sequence. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetStreamDescEos(acldvppStreamDesc *streamDesc, uint8_t eos); + +/** + * @ingroup AscendCL + * @brief Get stream description's data addr. + * + * @param streamDesc [IN] dvpp stream description. + * + * @retval data addr. + * @retval deault nullptr. + */ +ACL_FUNC_VISIBILITY void *acldvppGetStreamDescData(const acldvppStreamDesc *streamDesc); + +/** + * @ingroup AscendCL + * @brief Get stream description's data size. + * + * @param streamDesc [IN] dvpp stream description. + * + * @retval data size. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetStreamDescSize(const acldvppStreamDesc *streamDesc); + +/** + * @ingroup AscendCL + * @brief Get stream description's format. + * + * @param streamDesc [IN] dvpp stream description. + * + * @retval stream format. + * @retval default ACL_DVPP_STREAM_H264. + */ +ACL_FUNC_VISIBILITY acldvppStreamFormat acldvppGetStreamDescFormat(const acldvppStreamDesc *streamDesc); + +/** + * @ingroup AscendCL + * @brief Get stream description's timestamp. + * + * @param streamDesc [IN] dvpp stream description. + * + * @retval current timestamp. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint64_t acldvppGetStreamDescTimestamp(const acldvppStreamDesc *streamDesc); + +/** + * @ingroup AscendCL + * @brief Get stream description's retCode. + * + * @param streamDesc [IN] dvpp stream description. + * + * @retval result code. + * @retval default 0. + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetStreamDescRetCode(const acldvppStreamDesc *streamDesc); + +/** + * @ingroup AscendCL + * @brief Get stream description's eos. + * + * @param streamDesc [IN] dvpp stream description. + * + * @retval end flag of sequence. + * @retval default 0(false). + */ +ACL_FUNC_VISIBILITY uint8_t acldvppGetStreamDescEos(const acldvppStreamDesc *streamDesc); + +/** + * @ingroup AscendCL + * @brief Create vdec frame config. + * + * @retval null for failed. + * @retval other success. + */ +ACL_FUNC_VISIBILITY aclvdecFrameConfig *aclvdecCreateFrameConfig(); + +/** + * @ingroup AscendCL + * @brief Destroy vdec frame config. + * + * @par Function + * Can only destroy aclvdecFrameConfig type created through + * aclvdecCreateFrameConfig interface + * + * @param vdecFrameConfig [IN] vdec frame config. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclvdecCreateFrameConfig + */ +ACL_FUNC_VISIBILITY aclError aclvdecDestroyFrameConfig(aclvdecFrameConfig *vdecFrameConfig); + +/** + * @ingroup AscendCL + * @brief Get image width and height of jpeg. + * + * @param data [IN] image data in host memory + * @param size [IN] the size of image data + * @param width [OUT] the width of image from image header + * @param height [OUT] the height of image from image header + * @param components [OUT] the components of image from image header + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppJpegGetImageInfo(const void *data, uint32_t size, uint32_t *width, uint32_t *height, + int32_t *components); + +/** + * @ingroup AscendCL + * @brief Get image width and height of jpeg. + * + * @param data [IN] image data in host memory + * @param size [IN] the size of image data + * @param width [OUT] the width of image from image header + * @param height [OUT] the height of image from image header + * @param components [OUT] the components of image from image header + * @param format [OUT] the format of image from image header + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppJpegGetImageInfoV2(const void *data, uint32_t size, uint32_t *width, + uint32_t *height, int32_t *components, + acldvppJpegFormat *format); + +/** + * @ingroup AscendCL + * @brief Predict encode size of jpeg image. + * + * @param inputDesc [IN] dvpp image desc + * @param config [IN] jpeg encode config + * @param size [OUT] the size predicted of image + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppJpegPredictEncSize(const acldvppPicDesc *inputDesc, + const acldvppJpegeConfig *config, uint32_t *size); + +/** + * @ingroup AscendCL + * @brief Predict decode size of jpeg image. + * + * @param data [IN] origin image data in host memory + * @param dataSize [IN] the size of origin image data + * @param outputPixelFormat [IN] the pixel format jpeg decode + * @param decSize [OUT] the size predicted for decode image + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppJpegPredictDecSize(const void *data, uint32_t dataSize, + acldvppPixelFormat outputPixelFormat, uint32_t *decSize); + +/** + * @ingroup AscendCL + * @brief Get image width and height of png. + * + * @param data [IN] image data in host memory + * @param size [IN] the size of image data + * @param width [OUT] the width of image from image header + * @param height [OUT] the height of image from image header + * @param components [OUT] the components of image from image header + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppPngGetImageInfo(const void *data, uint32_t dataSize, uint32_t *width, + uint32_t *height, int32_t *components); + +/** + * @ingroup AscendCL + * @brief Predict decode size of png image. + * + * @param data [IN] origin image data in host memory + * @param dataSize [IN] the size of origin image data + * @param outputPixelFormat [IN] the pixel format jpeg decode + * @param decSize [OUT] the size predicted for decode image + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppPngPredictDecSize(const void *data, uint32_t dataSize, + acldvppPixelFormat outputPixelFormat, uint32_t *decSize); + +/** + * @ingroup AscendCL + * @brief Create dvpp channel, the same channel can be reused + * and is no longer available after destruction. + * + * @param channelDesc [IN|OUT] the channel destruction + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannelDesc + */ +ACL_FUNC_VISIBILITY aclError acldvppCreateChannel(acldvppChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Destroy dvpp channel. + * + * @par Restriction + * Can only destroy channel created through the acldvppCreateChannel interface + * + * @param channelDesc [IN] the channel destruction + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel + */ +ACL_FUNC_VISIBILITY aclError acldvppDestroyChannel(acldvppChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief dvpp vpc resize. + * + * @par Restriction + * Width alignment requirements: + * @li The minimum stride is 32 and the maximum is 4096 * 4 + * (that is, an image in argb format with a width of 4096); + * @li For 8K scaling, widthStride is required to be aligned to 2; + * @li For non 8K scaling, the calculation formula for widthStride + * is different for different image formats: + * @li yuv400sp, yuv420sp, yuv422sp, yuv444sp: input image width aligned to 16 + * @li yuv422packed: input image width * 2 and then align to 16 + * @li yuv444packed, rgb888: input image width alignment * 3, alignment to 16 + * @li xrgb8888: input image width * 4, align to 16 + * @li HFBC:input image width + * Height alignment requirements: + * @li The height of the input image is aligned to 2. + * High stride minimum 6 and maximum 4096. + * + * @param channelDesc [IN] the channel destruction + * @param inputDesc [IN] resize input picture destruction + * @param outputDesc [IN|OUT] resize output picture destruction + * @param resizeConfig [IN] resize config + * @param stream [IN] resize task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreatePicDesc + * | acldvppCreateResizeConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcResizeAsync(acldvppChannelDesc *channelDesc, acldvppPicDesc *inputDesc, + acldvppPicDesc *outputDesc, acldvppResizeConfig *resizeConfig, + aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc crop. + * + * @par Function + * crop the input picture according to the specified area, + * and then store the picture in the output memory as the output picture + * + * @par Restriction + * Width alignment requirements: + * @li The minimum stride is 32 and the maximum is 4096 * 4 + * (that is, an image in argb format with a width of 4096); + * @li For 8K scaling, widthStride is required to be aligned to 2; + * @li For non 8K scaling, the calculation formula for widthStride + * is different for different image formats: + * @li yuv400sp, yuv420sp, yuv422sp, yuv444sp: input image width aligned to 16 + * @li yuv422packed: input image width * 2 and then align to 16 + * @li yuv444packed, rgb888: input image width alignment * 3, alignment to 16 + * @li xrgb8888: input image width * 4, align to 16 + * @li HFBC:input image width + * Height alignment requirements: + * @li The height of the input image is aligned to 2. + * High stride minimum 6 and maximum 4096. + * + * @param channelDesc [IN] the channel destruction + * @param inputDesc [IN] crop input picture destruction + * @param outputDesc [IN|OUT] crop output picture destruction + * @param cropArea [IN] crop area config + * @param stream [IN] crop task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcCropAsync(acldvppChannelDesc *channelDesc, acldvppPicDesc *inputDesc, + acldvppPicDesc *outputDesc, acldvppRoiConfig *cropArea, + aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc crop and resize config. + * + * @par Function + * crop the input picture with resize config according to the specified area, + * and then store the picture in the output memory as the output picture + * + * @par Restriction + * Width alignment requirements: + * @li The minimum stride is 32 and the maximum is 4096 * 4 + * (that is, an image in argb format with a width of 4096); + * @li For 8K scaling, widthStride is required to be aligned to 2; + * @li For non 8K scaling, the calculation formula for widthStride + * is different for different image formats: + * @li yuv400sp, yuv420sp, yuv422sp, yuv444sp: input image width aligned to 16 + * @li yuv422packed: input image width * 2 and then align to 16 + * @li yuv444packed, rgb888: input image width alignment * 3, alignment to 16 + * @li xrgb8888: input image width * 4, align to 16 + * @li HFBC:input image width + * Height alignment requirements: + * @li The height of the input image is aligned to 2. + * High stride minimum 6 and maximum 4096. + * + * @param channelDesc [IN] the channel destruction + * @param inputDesc [IN] crop input picture destruction + * @param outputDesc [IN|OUT] crop output picture destruction + * @param cropArea [IN] crop area config + * @param resizeConfig [IN] resize config + * @param stream [IN] crop and resize config task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcCropResizeAsync(acldvppChannelDesc *channelDesc, acldvppPicDesc *inputDesc, + acldvppPicDesc *outputDesc, acldvppRoiConfig *cropArea, + acldvppResizeConfig *resizeConfig, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc batch crop. + * + * @par Function + * crop the input batch picture according to the specified area + * as the output batch pictures + * + * @param channelDesc [IN] the channel destruction + * @param srcBatchPicDescs [IN] crop input batch picture destruction + * @param roiNums [IN] roi config numbers + * @param size [IN] roiNum size + * @param dstBatchPicDescs [IN|OUT] crop output batch picture destruction + * @param cropAreas [IN] crop area configs + * @param stream [IN] crop batch task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreateBatchPicDesc | acldvppCreateRoiConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcBatchCropAsync(acldvppChannelDesc *channelDesc, + acldvppBatchPicDesc *srcBatchPicDescs, uint32_t *roiNums, + uint32_t size, acldvppBatchPicDesc *dstBatchPicDescs, + acldvppRoiConfig *cropAreas[], aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc batch crop and resize config. + * + * @par Function + * crop the input batch picture with resize config according to the specified area + * as the output batch pictures + * + * @param channelDesc [IN] the channel destruction + * @param srcBatchPicDescs [IN] crop input batch picture destruction + * @param roiNums [IN] roi config numbers + * @param size [IN] roiNum size + * @param dstBatchPicDescs [IN|OUT] crop output batch picture destruction + * @param cropAreas [IN] crop area configs + * @param resizeConfig [IN] resize config + * @param stream [IN] crop batch and resize config task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreateBatchPicDesc | acldvppCreateRoiConfig | acldvppCreateDvppConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcBatchCropResizeAsync(acldvppChannelDesc *channelDesc, + acldvppBatchPicDesc *srcBatchPicDescs, uint32_t *roiNums, + uint32_t size, acldvppBatchPicDesc *dstBatchPicDescs, + acldvppRoiConfig *cropAreas[], + acldvppResizeConfig *resizeConfig, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc crop and paste. + * + * @par Function + * crop the input picture according to the specified area, + * and paste the picture to the specified position of the target picture + * as the output picture + * + * @param channelDesc [IN] thechannel destruction + * @param inputDesc [IN] crop and paste input picture destruction + * @param outputDesc [IN|OUT] crop and paste output picture destruction + * @param cropArea [IN] crop area config + * @param pasteArea [IN] paste area config + * @param stream [IN] crop and paste task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreatePicDesc | acldvppCreateRoiConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcCropAndPasteAsync(acldvppChannelDesc *channelDesc, acldvppPicDesc *inputDesc, + acldvppPicDesc *outputDesc, acldvppRoiConfig *cropArea, + acldvppRoiConfig *pasteArea, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc crop, resize config and paste. + * + * @par Function + * crop the input picture with resize config according to the specified area, + * and paste the picture to the specified position of the target picture + * as the output picture + * + * @param channelDesc [IN] thechannel destruction + * @param inputDesc [IN] crop and paste input picture destruction + * @param outputDesc [IN|OUT] crop and paste output picture destruction + * @param cropArea [IN] crop area config + * @param pasteArea [IN] paste area config + * @param resizeConfig [IN] resize config + * @param stream [IN] crop, paste and resize task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreatePicDesc | acldvppCreateRoiConfig | acldvppCreateResizeConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcCropResizePasteAsync(acldvppChannelDesc *channelDesc, acldvppPicDesc *inputDesc, + acldvppPicDesc *outputDesc, acldvppRoiConfig *cropArea, + acldvppRoiConfig *pasteArea, + acldvppResizeConfig *resizeConfig, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc batch crop and paste. + * + * @par Function + * crop the input batch picture according to the specified area, + * and paste the pictures to the specified position of the target pictures + * as the output batch pictures + * + * @param channelDesc [IN] the channel destruction + * @param srcBatchPicDescs [IN] crop input batch picture destruction + * @param roiNums [IN] roi config numbers + * @param size [IN] roiNum size + * @param dstBatchPicDescs [IN|OUT] crop output batch picture destruction + * @param cropAreas [IN] crop area configs + * @param pasteAreas [IN] paste area configs + * @param stream [IN] crop batch task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreateBatchPicDesc | acldvppCreateRoiConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcBatchCropAndPasteAsync(acldvppChannelDesc *channelDesc, + acldvppBatchPicDesc *srcBatchPicDescs, uint32_t *roiNums, + uint32_t size, acldvppBatchPicDesc *dstBatchPicDescs, + acldvppRoiConfig *cropAreas[], + acldvppRoiConfig *pasteAreas[], aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc batch crop, resize config and paste. + * + * @par Function + * crop the input batch picture with resize config according to the specified area, + * and paste the pictures to the specified position of the target pictures + * as the output batch pictures + * + * @param channelDesc [IN] the channel destruction + * @param srcBatchPicDescs [IN] crop input batch picture destruction + * @param roiNums [IN] roi config numbers + * @param size [IN] roiNum size + * @param dstBatchPicDescs [IN|OUT] crop output batch picture destruction + * @param cropAreas [IN] crop area configs + * @param pasteAreas [IN] paste area configs + * @param resizeConfig [IN] resize config + * @param stream [IN] crop batch and resize config task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreateBatchPicDesc | acldvppCreateRoiConfig | acldvppCreateResizeConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcBatchCropResizePasteAsync( + acldvppChannelDesc *channelDesc, acldvppBatchPicDesc *srcBatchPicDescs, uint32_t *roiNums, uint32_t size, + acldvppBatchPicDesc *dstBatchPicDescs, acldvppRoiConfig *cropAreas[], acldvppRoiConfig *pasteAreas[], + acldvppResizeConfig *resizeConfig, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc jpeg decode. + * + * @par Function + * For different source picture formats, after decoding, + * output pictures in the following format: + * @li jpeg(444) -> YUV444SP:V is front U is back, + * YUV420 SP V is front U is back, YUV420SP U is front V is back; + * @li jpeg(422) -> YUV422SP:V is in front U is behind, + * YUV420SP V is in front U is behind, YUV420SP U is in front V is behind; + * @li jpeg(420) -> YUV420SP: + * V is front U is back, YUV420SP U is front V is back; + * @li jpeg(400) -> YUV420SP:UV data is filled with 0 x 80. + * + * @param channelDesc [IN] the channel destruction + * @param data [IN] decode input picture destruction's data + * @param size [IN] decode input picture destruction's size + * @param outputDesc [IN|OUT] decode output picture destruction + * @param stream [IN] decode task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreatePicDesc + */ +ACL_FUNC_VISIBILITY aclError acldvppJpegDecodeAsync(acldvppChannelDesc *channelDesc, const void *data, uint32_t size, + acldvppPicDesc *outputDesc, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc jpeg encode. + * + * @param channelDesc [IN] the channel destruction + * @param inputDesc [IN] encode input picture destruction + * @param data [OUT] encode output picture destruction's data + * @param size [IN|OUT] encode output picture destruction's size + * @param config [IN] jpeg encode config + * @param stream [IN] encode task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreateJpegeConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppJpegEncodeAsync(acldvppChannelDesc *channelDesc, acldvppPicDesc *inputDesc, + const void *data, uint32_t *size, acldvppJpegeConfig *config, + aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc png decode. + * + * @param channelDesc [IN] the channel destruction + * @param data [IN] decode input picture destruction's data + * @param size [IN] decode input picture destruction's size + * @param outputDesc [IN|OUT] decode output picture destruction + * @param stream [IN] decode task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreatePicDesc + */ +ACL_FUNC_VISIBILITY aclError acldvppPngDecodeAsync(acldvppChannelDesc *channelDesc, const void *data, uint32_t size, + acldvppPicDesc *outputDesc, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief Create vdec channel. + * + * @par Function + * Create a channel for video data processing, + * the same channel can be reused, + * and is no longer available after destruction + * + * @param channelDesc [IN|OUT] the channel destruction + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclvdecCreateChannelDesc + */ +ACL_FUNC_VISIBILITY aclError aclvdecCreateChannel(aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Destroy vdec channel. + * + * @par Function + * Can only destroy channels created by the aclvdecCreateChannel interface + * + * @param channelDesc [IN] the channel destruction + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclvdecCreateChannel + */ +ACL_FUNC_VISIBILITY aclError aclvdecDestroyChannel(aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief dvpp vdec send frame. + * + * @par Function + * Pass the input memory to be decoded + * and the decoded output memory to the decoder for decoding + * + * @param channelDesc [IN] vdec channel destruction + * @param input [IN] input stream destruction + * @param output [IN|OUT] output picture destruction + * @param config [IN] vdec frame config + * @param userData [IN] user data for callback function + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclvdecCreateChannel | acldvppCreateStreamDesc | acldvppCreatePicDesc + */ +ACL_FUNC_VISIBILITY aclError aclvdecSendFrame(aclvdecChannelDesc *channelDesc, acldvppStreamDesc *input, + acldvppPicDesc *output, aclvdecFrameConfig *config, void *userData); + +/** + * @ingroup AscendCL + * @brief dvpp vdec send skipped frame. + * + * @par Function + * Pass video frame to decoder + * + * @param channelDesc [IN] vdec channel destruction + * @param input [IN] input stream destruction + * @param config [IN] vdec frame config + * @param userData [IN] user data for callback function + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclvdecCreateChannel | acldvppCreateStreamDesc | acldvppCreatePicDesc | aclvdecSendFrame + */ +ACL_FUNC_VISIBILITY aclError aclvdecSendSkippedFrame(aclvdecChannelDesc *channelDesc, acldvppStreamDesc *input, + aclvdecFrameConfig *config, void *userData); + +/** + * @ingroup AscendCL + * @brief dvpp vpc convert color. + * + * @par Restriction + * @li outputDesc:Width height stride, No changes are allowed. Just configure 0 + * @par Function + * Convert color gamut + * + * @param channelDesc [IN] the channel destruction + * @param inputDesc [IN] convert color input picture destruction + * @param outputDesc [IN|OUT] convert color output picture destruction + * @param stream [IN] convert color task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreatePicDesc + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcConvertColorAsync(acldvppChannelDesc *channelDesc, acldvppPicDesc *inputDesc, + acldvppPicDesc *outputDesc, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief dvpp vpc pyramid down. + * + * @par Restriction + * @li outputDesc:format only supported YUV400 + * @par Function + * Image pyramid down + * + * @param channelDesc [IN] the channel destruction + * @param inputDesc [IN] pyr down input picture destruction + * @param outputDesc [IN|OUT] pyr down output picture destruction + * @param reserve [IN] reserved param , must be nullptr + * @param stream [IN] pyr down task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreatePicDesc + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcPyrDownAsync(acldvppChannelDesc *channelDesc, acldvppPicDesc *inputDesc, + acldvppPicDesc *outputDesc, void *reserve, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief Set dvpp channel mode. + * + * @param channelDesc [OUT] the channel destruction + * @param mode [IN] channel mode + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetChannelDescMode(acldvppChannelDesc *channelDesc, uint32_t mode); + +/** + * @ingroup AscendCL + * @brief Set resize config interpolation. + * + * @param resizeConfig [OUT] the resize config + * @param interpolation [IN] interpolation + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetResizeConfigInterpolation(acldvppResizeConfig *resizeConfig, + uint32_t interpolation); + +/** + * @ingroup AscendCL + * @brief Get resize config interpolation. + * + * @param resizeConfig [IN] the resize config + * + * @retval Interpolation of resize config. + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetResizeConfigInterpolation(const acldvppResizeConfig *resizeConfig); + +/** + * @ingroup AscendCL + * @brief Set vdec channel out mode. + * + * @param channelDesc [OUT] the channel destruction + * @param outMode [IN] channel out mode + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclvdecSetChannelDescOutMode(aclvdecChannelDesc *channelDesc, uint32_t outMode); + +/** + * @ingroup AscendCL + * @brief Get vdec channel out mode. + * + * @param channelDesc [IN] the channel destruction + * + * @retval Out mode of channel destruction + * @retval default 0 + */ +ACL_FUNC_VISIBILITY uint32_t aclvdecGetChannelDescOutMode(const aclvdecChannelDesc *channelDesc); + +/** + * @ingroup AscendCL + * @brief Create dvpp batch picture description. + * + * @param batchSize [IN] batch size + * + * @retval null for failed. + * @retval OtherValues success. + */ +ACL_FUNC_VISIBILITY acldvppBatchPicDesc *acldvppCreateBatchPicDesc(uint32_t batchSize); + +/** + * @ingroup AscendCL + * @brief Get dvpp picture description. + * + * @param batchPicDesc [IN] dvpp batch picture description. + * @param index [IN] index of batch + * + * @retval null for failed. + * @retval OtherValues Failure + * + * @see acldvppCreateBatchPicDesc + */ +ACL_FUNC_VISIBILITY acldvppPicDesc *acldvppGetPicDesc(acldvppBatchPicDesc *batchPicDesc, uint32_t index); + +/** + * @ingroup AscendCL + * @brief Destroy dvpp batch picture description. + * + * @par Function + * Can only destroy batch picture description information created + * through acldvppCreateBatchPicDesc interface. + * + * @param batchPicDesc [IN] dvpp batch picture description. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateBatchPicDesc + */ +ACL_FUNC_VISIBILITY aclError acldvppDestroyBatchPicDesc(acldvppBatchPicDesc *batchPicDesc); + +/** + * @ingroup AscendCL + * @brief Create dvpp lut map. + * + * @retval null for failed. + * @retval OtherValues success. + */ +ACL_FUNC_VISIBILITY acldvppLutMap *acldvppCreateLutMap(); + +/** + * @ingroup AscendCL + * @brief Destroy lut map. + * + * @param lutMap [IN] lut map + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError acldvppDestroyLutMap(acldvppLutMap *lutMap); + +/** + * @ingroup AscendCL + * @brief Get lut map dims. + * + * @param lutMap [IN] lut map + * + * @retval 0 for failed. + * @retval OtherValues success. + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetLutMapDims(const acldvppLutMap *lutMap); + +/** + * @ingroup AscendCL + * @brief Get lut map data. + * + * @param lutMap [IN] lut map + * @param dim [IN] input dim of map + * @param data [OUT] the dim of lut map's data + * @param len [OUT] the dim of lut map's length + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError acldvppGetLutMapData(const acldvppLutMap *lutMap, uint32_t dim, uint8_t **data, + uint32_t *len); +/** + * @ingroup AscendCL + * @brief Vpc equalize hist. + * + * @param channelDesc [IN] channel desc + * @param inputDesc [IN] input desc + * @param outputDesc [IN|OUT] output desc + * @param lutMap [IN] lut map param + * @param stream [IN] runtime stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel|acldvppCreatePicDesc|acldvppCreateLutMap + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcEqualizeHistAsync(const acldvppChannelDesc *channelDesc, + const acldvppPicDesc *inputDesc, acldvppPicDesc *outputDesc, + const acldvppLutMap *lutMap, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief Create dvpp border config. + * + * @retval null for failed. + * @retval OtherValues success. + */ +ACL_FUNC_VISIBILITY acldvppBorderConfig *acldvppCreateBorderConfig(); + +/** + * @ingroup AscendCL + * @brief Set value of border config. + * + * @param borderConfig [OUT] border config + * @param index [IN] index of value array + * @param value [IN] value + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetBorderConfigValue(acldvppBorderConfig *borderConfig, uint32_t index, + double value); + +/** + * @ingroup AscendCL + * @brief Set border type of border config. + * + * @param borderConfig [OUT] border config + * @param borderType [IN] border type + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetBorderConfigBorderType(acldvppBorderConfig *borderConfig, + acldvppBorderType borderType); + +/** + * @ingroup AscendCL + * @brief Set top of border config. + * + * @param borderConfig [OUT] border config + * @param top [IN] top of border + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetBorderConfigTop(acldvppBorderConfig *borderConfig, uint32_t top); + +/** + * @ingroup AscendCL + * @brief Set bottom of border config. + * + * @param borderConfig [OUT] border config + * @param bottom [IN] bottom of border + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetBorderConfigBottom(acldvppBorderConfig *borderConfig, uint32_t bottom); + +/** + * @ingroup AscendCL + * @brief Set left of border config. + * + * @param borderConfig [OUT] border config + * @param left [IN] left of border + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetBorderConfigLeft(acldvppBorderConfig *borderConfig, uint32_t left); + +/** + * @ingroup AscendCL + * @brief Set right of border config. + * + * @param borderConfig [OUT] border config + * @param right [IN] right of border + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError acldvppSetBorderConfigRight(acldvppBorderConfig *borderConfig, uint32_t right); + +/** + * @ingroup AscendCL + * @brief Get value of border config. + * + * @param borderConfig [IN] border config + * @param index[IN] index of value array + * + * @retval invalid value is < 0, normal Value is >= 0 + */ +ACL_FUNC_VISIBILITY double acldvppGetBorderConfigValue(const acldvppBorderConfig *borderConfig, uint32_t index); + +/** + * @ingroup AscendCL + * @brief Get border type of border config. + * + * @param borderConfig [IN] border config + * @retval border type of border config + */ +ACL_FUNC_VISIBILITY acldvppBorderType acldvppGetBorderConfigBorderType(const acldvppBorderConfig *borderConfig); + +/** + * @ingroup AscendCL + * @brief Get right of border config. + * + * @param borderConfig [IN] border config + * + * @retval default 0, top value of border config + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetBorderConfigTop(const acldvppBorderConfig *borderConfig); + +/** + * @ingroup AscendCL + * @brief Get Bottom of border config. + * + * @param borderConfig [IN] border config + * + * @retval default 0, top value of border config + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetBorderConfigBottom(const acldvppBorderConfig *borderConfig); + +/** + * @ingroup AscendCL + * @brief Get left of border config. + * + * @param borderConfig [IN] border config + * + * @retval default 0, top value of border config + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetBorderConfigLeft(const acldvppBorderConfig *borderConfig); + +/** + * @ingroup AscendCL + * @brief Get right of border config. + * + * @param borderConfig [IN] border config + * + * @retval default 0, right value of border config + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetBorderConfigRight(const acldvppBorderConfig *borderConfig); + +/** + * @ingroup AscendCL + * @brief Destroy border config. + * + * @param borderConfig [IN] border config + * + * @retval ACL_SUCCESS for success, other for failure + */ +ACL_FUNC_VISIBILITY aclError acldvppDestroyBorderConfig(acldvppBorderConfig *borderConfig); + +/** + * @ingroup AscendCL + * @brief Vpc make border. + * + * @param channelDesc [IN] channel desc + * @param inputDesc [IN] input desc + * @param outputDesc [IN|OUT] output desc + * @param borderConfig [IN] border config param + * @param stream [IN] runtime stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel|acldvppCreatePicDesc|acldvppCreateBorderConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcMakeBorderAsync(const acldvppChannelDesc *channelDesc, + const acldvppPicDesc *inputDesc, acldvppPicDesc *outputDesc, + const acldvppBorderConfig *borderConfig, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief Dvpp vpc calc hist. + * + * @param channelDesc [IN] the channel destruction + * @param srcPicDesc [IN] pyr down input picture destruction + * @param hist [IN|OUT] pyr down output picture destruction + * @param reserve [IN] reserved param, must be nullptr + * @param stream [IN] task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreatePicDesc | acldvppCreateHist + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcCalcHistAsync(acldvppChannelDesc *channelDesc, acldvppPicDesc *srcPicDesc, + acldvppHist *hist, void *reserve, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief Create vpc hist description. + * + * @retval null for failed. + * @retval OtherValues success. + */ +ACL_FUNC_VISIBILITY acldvppHist *acldvppCreateHist(); + +/** + * @ingroup AscendCL + * @brief Destroy vpc hist description. + * + * @par Function + * Can only destroy hist description information created + * through acldvppCreateHist interface. + * + * @param hist [IN] vpc hist description. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateHist + */ +ACL_FUNC_VISIBILITY aclError acldvppDestroyHist(acldvppHist *hist); + +/** + * @ingroup AscendCL + * @brief Get dims of vpc hist description. + * + * @param hist [IN] vpc hist description. + * + * @retval dims of vpc hist description. + * + * @see acldvppCreateHist | acldvppVpcCalcHistAsync + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetHistDims(acldvppHist *hist); + +/** + * @ingroup AscendCL + * @brief Get data from vpc hist description by dim. + * + * @param hist [IN] vpc hist description. + * @param dim [IN] which dim to get data. + * @param data [OUT] address of output hist data. + * @param len [OUT] len of output hist data. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateHist | acldvppVpcCalcHistAsync + */ +ACL_FUNC_VISIBILITY aclError acldvppGetHistData(acldvppHist *hist, uint32_t dim, uint32_t **data, uint16_t *len); + +/** + * @ingroup AscendCL + * @brief Get dvpp calc hist process return code. + * + * @param hist [IN] vpc hist description. + * + * @retval Dvpp calc hist process return code. + * + * @see acldvppCreateHist | acldvppVpcCalcHistAsync + */ +ACL_FUNC_VISIBILITY uint32_t acldvppGetHistRetCode(acldvppHist *hist); + +/** + * @ingroup AscendCL + * @brief Set vpc hist description to 0. + * + * @par Function + * Can only clear hist description information created + * through acldvppCreateHist interface. + * + * @param hist [IN] vpc hist description. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateHist + */ +ACL_FUNC_VISIBILITY aclError acldvppClearHist(acldvppHist *hist); + +/** + * @ingroup AscendCL + * @brief dvpp vpc batch crop, resize config and make border. + * + * @par Function + * crop the input batch picture with resize config and border configs according to the specified area + * as the output batch pictures + * + * @param channelDesc [IN] the channel destruction + * @param srcBatchPicDescs [IN] crop input batch picture destruction + * @param roiNums [IN] roi config numbers + * @param size [IN] roiNum size + * @param dstBatchPicDescs [IN|OUT] crop output batch picture destruction + * @param cropAreas [IN] crop area configs + * @param borderCfgs [IN] border configs + * @param resizeConfig [IN] resize config + * @param stream [IN] crop batch, resize config and make border task stream + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppCreateChannel | acldvppCreateBatchPicDesc | acldvppCreateRoiConfig | acldvppCreateResizeConfig + */ +ACL_FUNC_VISIBILITY aclError acldvppVpcBatchCropResizeMakeBorderAsync( + acldvppChannelDesc *channelDesc, acldvppBatchPicDesc *srcBatchPicDescs, uint32_t *roiNums, uint32_t size, + acldvppBatchPicDesc *dstBatchPicDescs, acldvppRoiConfig *cropAreas[], acldvppBorderConfig *borderCfgs[], + acldvppResizeConfig *resizeConfig, aclrtStream stream); + +#ifdef __cplusplus +} +#endif + +#endif // INC_EXTERNAL_ACL_OPS_ACL_DVPP_H_ diff --git a/inc/external/acl/ops/acl_fv.h b/inc/external/acl/ops/acl_fv.h new file mode 100644 index 00000000..4bd392c9 --- /dev/null +++ b/inc/external/acl/ops/acl_fv.h @@ -0,0 +1,348 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef INC_EXTERNAL_ACL_OPS_ACL_RETR_H_ +#define INC_EXTERNAL_ACL_OPS_ACL_RETR_H_ + +#include "acl/acl.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct aclfvInitPara aclfvInitPara; +typedef struct aclfvFeatureInfo aclfvFeatureInfo; +typedef struct aclfvRepoRange aclfvRepoRange; +typedef struct aclfvQueryTable aclfvQueryTable; +typedef struct aclfvSearchInput aclfvSearchInput; +typedef struct aclfvSearchResult aclfvSearchResult; + +// search operation type +enum aclfvSearchType { + SEARCH_1_N, // 1:N operation type + SEARCH_N_M // N:M operation type +}; + +/** + * @ingroup AscendCL + * @brief Create fv init param. + * + * @param fsNum [IN] The feature num + * + * @retval null for failed. + * @retval OtherValues success. + */ +ACL_FUNC_VISIBILITY aclfvInitPara *aclfvCreateInitPara(uint64_t fsNum); + +/** + * @ingroup AscendCL + * @brief Destroy fv init param. + * + * @par Function + * Can only destroy fv init param information created + * through aclfvCreateInitPara interface. + * + * @param initPara [IN] fv init param. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclfvCreateInitPara + */ +ACL_FUNC_VISIBILITY aclError aclfvDestroyInitPara(aclfvInitPara *initPara); + +/** + * @ingroup AscendCL + * @brief set value for maxTopNumFor1N which in fv init param. + * + * @param initPara [IN|OUT] fv init param. + * @param maxTopNumFor1N [IN] maxTopNumFor1N value for init param. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclfvSet1NTopNum(aclfvInitPara *initPara, uint32_t maxTopNumFor1N); + +/** + * @ingroup AscendCL + * @brief set value for maxTopNumForNM which in fv init param. + * + * @param initPara [IN|OUT] fv init param. + * @param maxTopNumForNM [IN] maxTopNumForNM value for init param. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclfvSetNMTopNum(aclfvInitPara *initPara, uint32_t maxTopNumForNM); + +/** + * @ingroup AscendCL + * @brief Create fv feature info. + * + * @param id0 [IN] The first level library id0 + * @param id1 [IN] Secondary library id1 + * @param offset [IN] The offset of the first feature in the library + * @param featureLen [IN] Single feature length + * @param featureCount [IN] Single feature count + * @param featureData [IN] Feature value list + * @param featureDataLen [IN] Feature value list length + * + * @retval null for failed. + * @retval OtherValues success. + */ +ACL_FUNC_VISIBILITY aclfvFeatureInfo *aclfvCreateFeatureInfo(uint32_t id0, uint32_t id1, uint32_t offset, + uint32_t featureLen, uint32_t featureCount, + uint8_t *featureData, uint32_t featureDataLen); + +/** + * @ingroup AscendCL + * @brief Destroy fv feature info. + * + * @par Function + * Can only destroy fv feature info information created + * through aclfvCreateFeatureInfo interface. + * + * @param featureInfo [IN] fv feature info. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclfvCreateFeatureInfo + */ +ACL_FUNC_VISIBILITY aclError aclfvDestroyFeatureInfo(aclfvFeatureInfo *featureInfo); + +/** + * @ingroup AscendCL + * @brief Create fv repo range. + * + * @param id0Min [IN] id0 start value + * @param id0Min [IN] id0 max + * @param id1Min [IN] id0 start value + * @param id1Max [IN] id1 max + * + * @retval null for failed. OtherValues success + */ +ACL_FUNC_VISIBILITY aclfvRepoRange *aclfvCreateRepoRange(uint32_t id0Min, uint32_t id0Max, uint32_t id1Min, + uint32_t id1Max); + +/** + * @ingroup AscendCL + * @brief Destroy fv repo range. + * + * @par Function + * Can only destroy fv repo range information created + * through aclfvCreateRepoRange interface. + * + * @param repoRange [IN] fv repo range. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclfvCreateRepoRange + */ +ACL_FUNC_VISIBILITY aclError aclfvDestroyRepoRange(aclfvRepoRange *repoRange); + +/** + * @ingroup AscendCL + * @brief Create query table. + * + * @param queryCnt [IN] Number of tables, the maximum number is 6 + * @param tableLen [IN] Single table length, table length is 32KB + * @param tableData [IN] Feature value list + * @param tableDataLen [IN] The length of memory requested by the featureData pointer + * + * @retval null for failed. OtherValues success + */ +ACL_FUNC_VISIBILITY aclfvQueryTable *aclfvCreateQueryTable(uint32_t queryCnt, uint32_t tableLen, uint8_t *tableData, + uint32_t tableDataLen); + +/** + * @ingroup AscendCL + * @brief Destroy query table. + * + * @par Function + * Can only destroy query table information created + * through aclfvCreateQueryTable interface. + * + * @param queryTable [IN] query table. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclfvCreateQueryTable + */ +ACL_FUNC_VISIBILITY aclError aclfvDestroyQueryTable(aclfvQueryTable *queryTable); + +/** + * @ingroup AscendCL + * @brief Create search input. + * + * @param queryTable [IN] query table + * @param repoRange [IN] query repo range + * @param topk [IN] query topk + * + * @retval null for failed. OtherValues success + */ +ACL_FUNC_VISIBILITY aclfvSearchInput *aclfvCreateSearchInput(aclfvQueryTable *queryTable, aclfvRepoRange *repoRange, + uint32_t topk); + +/** + * @ingroup AscendCL + * @brief Destroy search input. + * + * @par Function + * Can only destroy search input information created + * through aclfvCreateSearchInput interface. + * + * @param searchInput [IN] search input. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclfvCreateSearchInput + */ +ACL_FUNC_VISIBILITY aclError aclfvDestroySearchInput(aclfvSearchInput *searchInput); + +/** + * @ingroup AscendCL + * @brief Create search result. + * + * @param queryCnt [IN] Retrieve the number of features + * @param resultNum [IN] The number of search results for each feature, the number is queryCnt + * @param resultNumDataLen [IN] resultNum memory length + * @param id0 [IN] Level 1 library id0 + * @param id1 [IN] Secondary library id1 + * @param resultOffset [IN] The offset of the bottom library corresponding + * to each feature retrieval result, total length topK * queryCnt + * @param resultDistance [IN] Distance, total length topK * queryCnt + * @param dataLen [IN] The memory size requested by + * id0\id1\reslutOffset\resultDistance + * + * @retval null for failed. OtherValues success + */ +ACL_FUNC_VISIBILITY aclfvSearchResult *aclfvCreateSearchResult(uint32_t queryCnt, uint32_t *resultNum, + uint32_t resultNumDataLen, uint32_t *id0, uint32_t *id1, + uint32_t *resultOffset, float *resultDistance, + uint32_t dataLen); + +/** + * @ingroup AscendCL + * @brief Destroy search result. + * + * @par Function + * Can only destroy search result information created + * through aclfvCreateSearchResult interface. + * + * @param searchResult [IN] search result. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclfvCreateSearchResult + */ +ACL_FUNC_VISIBILITY aclError aclfvDestroySearchResult(aclfvSearchResult *searchResult); + +/** + * @ingroup AscendCL + * @brief fv IP initialize. + * + * @param initPara [IN] fv init param. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure. + */ +ACL_FUNC_VISIBILITY aclError aclfvInit(aclfvInitPara *initPara); + +/** + * @ingroup AscendCL + * @brief release fv resources. + * + * @par Function + * Can only release fv resources created + * through aclfvInit interface. + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure. + * + * @see aclfvInit + */ +ACL_FUNC_VISIBILITY aclError aclfvRelease(); + +/** + * @ingroup AscendCL + * @brief fv repo add. + * + * @param type [IN] repo add type + * @param featureInfo [IN] add feature information + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure. + */ +ACL_FUNC_VISIBILITY aclError aclfvRepoAdd(aclfvSearchType type, aclfvFeatureInfo *featureInfo); + +/** + * @ingroup AscendCL + * @brief fv repo del. + * + * @param type [IN] repo delete type + * @param repoRange [IN] repo range information + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure. + */ +ACL_FUNC_VISIBILITY aclError aclfvRepoDel(aclfvSearchType type, aclfvRepoRange *repoRange); + +/** + * @ingroup AscendCL + * @brief fv accurate del. + * + * @param featureInfo [IN] accurate delete feature information + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure. + */ +ACL_FUNC_VISIBILITY aclError aclfvDel(aclfvFeatureInfo *featureInfo); + +/** + * @ingroup AscendCL + * @brief fv accurate modify. + * + * @param featureInfo [IN] accurate modify feature information + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure. + */ +ACL_FUNC_VISIBILITY aclError aclfvModify(aclfvFeatureInfo *featureInfo); + +/** + * @ingroup AscendCL + * @brief fv search. + * + * @param type [IN] search type + * @param searchInput [IN] search input + * @param searchRst [OUT] search result + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure. + */ +ACL_FUNC_VISIBILITY aclError aclfvSearch(aclfvSearchType type, aclfvSearchInput *searchInput, + aclfvSearchResult *searchRst); + +#ifdef __cplusplus +} +#endif + +#endif // INC_EXTERNAL_ACL_OPS_ACL_RETR_H_ diff --git a/inc/external/hccl/hccl.h b/inc/external/hccl/hccl.h new file mode 100644 index 00000000..8261adc4 --- /dev/null +++ b/inc/external/hccl/hccl.h @@ -0,0 +1,159 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file hccl.h + * @brief HCCL API + */ + +#ifndef HCCL_H_ +#define HCCL_H_ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +/** + * @brief Initialize HCCL. + * + * @param clusterInfo A string identifying the cluster info file path, include file name. + * @param rank A integer identifying the identify for the rank. + * @param comm A pointer identifying the initialized communication resource. + * @return HcclResult + * @see HcclCommDestroy() + */ +extern HcclResult HcclCommInitClusterInfo(const char *clusterInfo, uint32_t rank, HcclComm *comm); + +/** + * @brief Get hccl root info. + * + * @param rootInfo A pointer identifying the hccl root info. + * @return HcclResult + */ +extern HcclResult HcclGetRootInfo(HcclRootInfo *rootInfo); + +/** + * @brief Initialize HCCL with root info. + * + * @param nRanks A integer identifying the rank size of the cluster. + * @param rootInfo A struct identifying the hccl root info. + * @param rank A integer identifying the identify for the rank. + * @param comm A pointer identifying the initialized communication resource. + * @return HcclResult + * @see HcclCommDestroy() + */ +extern HcclResult HcclCommInitRootInfo(uint32_t nRanks, const HcclRootInfo *rootInfo, uint32_t rank, HcclComm *comm); + +/** + * @brief AllReduce operator. + * + * @param sendBuf A pointer identifying the input data address of the operator. + * @param recvBuf A pointer identifying the output data address of the operator. + * @param count An integer(u64) identifying the number of the output data. + * @param dataType The data type of the operator, must be one of the following types: int8, int16, int32, float16, + * float32. + * @param op The reduction type of the operator, must be one of the following types: sum, min, max, prod. + * @param comm A pointer identifying the communication resource based on. + * @param stream A pointer identifying the stream information. + * @return HcclResult + */ +extern HcclResult HcclAllReduce(void *sendBuf, void *recvBuf, uint64_t count, HcclDataType dataType, HcclReduceOp op, + HcclComm comm, aclrtStream stream); + +/** + * @brief Broadcast operator. + * + * @param buf A pointer identifying the data address of the operator. + * @param count An integer(u64) identifying the number of the data. + * @param dataType The data type of the operator, must be one of the following types: int8, int32, float16, float32. + * @param root An integer(u32) identifying the the root rank in the operator. + * @param comm A pointer identifying the communication resource based on + * @param stream A pointer identifying the stream information. + * @return HcclResult + */ +extern HcclResult HcclBroadcast(void *buf, uint64_t count, HcclDataType dataType, uint32_t root, HcclComm comm, + aclrtStream stream); + +/** + * @brief ReduceScatter operator. + * + * @param sendBuf A pointer identifying the input data address of the operator. + * @param recvBuf A pointer identifying the output data address of the operator. + * @param recvCount An integer(u64) identifying the number of the output data. + * @param dataType The data type of the operator, must be one of the following types: int8, int32, float16, float32. + * @param op The reduction type of the operator, must be one of the following types: sum, min, max, prod. + * @param comm A pointer identifying the communication resource based on. + * @param stream A pointer identifying the stream information. + * @return HcclResult + */ +extern HcclResult HcclReduceScatter(void *sendBuf, void *recvBuf, uint64_t recvCount, HcclDataType dataType, + HcclReduceOp op, HcclComm comm, aclrtStream stream); + +/** + * @brief AllGather operator. + * + * @param sendBuf A pointer identifying the input data address of the operator. + * @param recvBuf A pointer identifying the output data address of the operator. + * @param sendCount An integer(u64) identifying the number of the input data. + * @param dataType The data type of the operator, must be one of the following types: int8, int32, float16, float32. + * @param comm A pointer identifying the communication resource based on. + * @param stream A pointer identifying the stream information. + * @return HcclResult + */ +extern HcclResult HcclAllGather(void *sendBuf, void *recvBuf, uint64_t sendCount, HcclDataType dataType, HcclComm comm, + aclrtStream stream); +/** + * @brief Get the rank size of this comm. + * + * @param comm A pointer identifying the communication resource based on. + * @param rankSize A pointer identifying the rank size. + * @return HcclResult + */ +extern HcclResult HcclGetRankSize(HcclComm comm, uint32_t *rankSize); + +/** + * @brief Get the rank id of this comm. + * + * @param comm A pointer identifying the communication resource based on. + * @param rankSize A pointer identifying the rank id. + * @return HcclResult + */ +extern HcclResult HcclGetRankId(HcclComm comm, uint32_t *rank); +/** + * @brief Barrier operator. + * + * @param comm A pointer identifying the communication resource based on. + * @param stream A pointer identifying the stream information. + * @return HcclResult + */ +extern HcclResult HcclBarrier(HcclComm comm, aclrtStream stream); + +/** + * @brief Destroy HCCL comm + * + * @param comm A pointer identifying the communication resource targetting + * @return HcclResult + * @see HcclCommInitClusterInfo() + */ +extern HcclResult HcclCommDestroy(HcclComm comm); + +#ifdef __cplusplus +} +#endif // __cplusplus +#endif // HCCL_H_ diff --git a/inc/external/hccl/hccl_types.h b/inc/external/hccl/hccl_types.h new file mode 100644 index 00000000..0e832396 --- /dev/null +++ b/inc/external/hccl/hccl_types.h @@ -0,0 +1,101 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file hccl_types.h + * @brief HCCL data type definition + * + */ + +#ifndef HCCL_TYPES_H_ +#define HCCL_TYPES_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +/** + * @brief HCCL functions return value definition + */ +typedef enum { + HCCL_SUCCESS = 0, /**< success */ + HCCL_E_PARA = 1, /**< parameter error */ + HCCL_E_PTR = 2, /**< empty pointer */ + HCCL_E_MEMORY = 3, /**< memory error */ + HCCL_E_INTERNAL = 4, /**< internal error */ + HCCL_E_NOT_SUPPORT = 5, /**< not support feature */ + HCCL_E_NOT_FOUND = 6, /**< not found specific resource */ + HCCL_E_UNAVAIL = 7, /**< resource unavailable */ + HCCL_E_SYSCALL = 8, /**< call system interface error */ + HCCL_E_TIMEOUT = 9, /**< timeout */ + HCCL_E_OPEN_FILE_FAILURE = 10, /**< open file fail */ + HCCL_E_TCP_CONNECT = 11, /**< tcp connect fail */ + HCCL_E_ROCE_CONNECT = 12, /**< roce connect fail */ + HCCL_E_TCP_TRANSFER = 13, /**< tcp transfer fail */ + HCCL_E_ROCE_TRANSFER = 14, /**< roce transfer fail */ + HCCL_E_RUNTIME = 15, /**< call runtime api fail */ + HCCL_E_DRV = 16, /**< call driver api fail */ + HCCL_E_PROFILING = 17, /**< call profiling api fail */ + HCCL_E_CCE = 18, /**< call cce api fail */ + HCCL_E_NETWORK = 19, /**< call network api fail */ + HCCL_E_RESERVED /**< reserved */ +} HcclResult; + +/** + * @brief handle to HCCL communicator + */ +typedef void *HcclComm; + +/** + * @brief HCCL Reduction opperation + */ +typedef enum { + HCCL_REDUCE_SUM = 0, /**< sum */ + HCCL_REDUCE_PROD = 1, /**< prod */ + HCCL_REDUCE_MAX = 2, /**< max */ + HCCL_REDUCE_MIN = 3, /**< min */ + HCCL_REDUCE_RESERVED /**< reserved */ +} HcclReduceOp; + +/** + * @brief HCCL data type + */ +typedef enum { + HCCL_DATA_TYPE_INT8 = 0, /**< int8 */ + HCCL_DATA_TYPE_INT16 = 1, /**< int16 */ + HCCL_DATA_TYPE_INT32 = 2, /**< int32 */ + HCCL_DATA_TYPE_FP16 = 3, /**< fp16 */ + HCCL_DATA_TYPE_FP32 = 4, /**< fp32 */ + HCCL_DATA_TYPE_INT64 = 5, /**< int64 */ + HCCL_DATA_TYPE_UINT64 = 6, /**< uint64 */ + HCCL_DATA_TYPE_RESERVED /**< reserved */ +} HcclDataType; + +const uint32_t HCCL_ROOT_INFO_BYTES = 4108; // 4108: root info length + +/** + * @brief HCCL root info + */ +typedef struct HcclRootInfoDef { + char internal[HCCL_ROOT_INFO_BYTES]; +} HcclRootInfo; + +#ifdef __cplusplus +} +#endif // __cplusplus +#endif // HCCL_TYPES_H_ diff --git a/inc/external/runtime/rt_error_codes.h b/inc/external/runtime/rt_error_codes.h new file mode 100644 index 00000000..a1392cc6 --- /dev/null +++ b/inc/external/runtime/rt_error_codes.h @@ -0,0 +1,109 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __INC_EXTERNEL_RT_ERROR_CODES_H__ +#define __INC_EXTERNEL_RT_ERROR_CODES_H__ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +static const int32_t ACL_RT_SUCCESS = 0; // success + +static const int32_t ACL_ERROR_RT_PARAM_INVALID = 107000; // param invalid +static const int32_t ACL_ERROR_RT_INVALID_DEVICEID = 107001; // invalid device id +static const int32_t ACL_ERROR_RT_CONTEXT_NULL = 107002; // current context null +static const int32_t ACL_ERROR_RT_STREAM_CONTEXT = 107003; // stream not in current context +static const int32_t ACL_ERROR_RT_MODEL_CONTEXT = 107004; // model not in current context +static const int32_t ACL_ERROR_RT_STREAM_MODEL = 107005; // stream not in model +static const int32_t ACL_ERROR_RT_EVENT_TIMESTAMP_INVALID = 107006; // event timestamp invalid +static const int32_t ACL_ERROR_RT_EVENT_TIMESTAMP_REVERSAL = 107007; // event timestamp reversal +static const int32_t ACL_ERROR_RT_ADDR_UNALIGNED = 107008; // memory address unaligned +static const int32_t ACL_ERROR_RT_FILE_OPEN = 107009; // open file failed +static const int32_t ACL_ERROR_RT_FILE_WRITE = 107010; // write file failed +static const int32_t ACL_ERROR_RT_STREAM_SUBSCRIBE = 107011; // error subscribe stream +static const int32_t ACL_ERROR_RT_THREAD_SUBSCRIBE = 107012; // error subscribe thread +static const int32_t ACL_ERROR_RT_GROUP_NOT_SET = 107013; // group not set +static const int32_t ACL_ERROR_RT_GROUP_NOT_CREATE = 107014; // group not create +static const int32_t ACL_ERROR_RT_STREAM_NO_CB_REG = 107015; // callback not register to stream +static const int32_t ACL_ERROR_RT_INVALID_MEMORY_TYPE = 107016; // invalid memory type +static const int32_t ACL_ERROR_RT_INVALID_HANDLE = 107017; // invalid handle +static const int32_t ACL_ERROR_RT_INVALID_MALLOC_TYPE = 107018; // invalid malloc type +static const int32_t ACL_ERROR_RT_WAIT_TIMEOUT = 107019; // wait timeout + +static const int32_t ACL_ERROR_RT_FEATURE_NOT_SUPPORT = 207000; // feature not support +static const int32_t ACL_ERROR_RT_MEMORY_ALLOCATION = 207001; // memory allocation error +static const int32_t ACL_ERROR_RT_MEMORY_FREE = 207002; // memory free error +static const int32_t ACL_ERROR_RT_AICORE_OVER_FLOW = 207003; // aicore over flow +static const int32_t ACL_ERROR_RT_NO_DEVICE = 207004; // no device +static const int32_t ACL_ERROR_RT_RESOURCE_ALLOC_FAIL = 207005; // resource alloc fail +static const int32_t ACL_ERROR_RT_NO_PERMISSION = 207006; // no permission +static const int32_t ACL_ERROR_RT_NO_EVENT_RESOURCE = 207007; // no event resource +static const int32_t ACL_ERROR_RT_NO_STREAM_RESOURCE = 207008; // no stream resource +static const int32_t ACL_ERROR_RT_NO_NOTIFY_RESOURCE = 207009; // no notify resource +static const int32_t ACL_ERROR_RT_NO_MODEL_RESOURCE = 207010; // no model resource +static const int32_t ACL_ERROR_RT_NO_CDQ_RESOURCE = 207011; // no cdq resource + +static const int32_t ACL_ERROR_RT_INTERNAL_ERROR = 507000; // runtime internal error +static const int32_t ACL_ERROR_RT_TS_ERROR = 507001; // ts internel error +static const int32_t ACL_ERROR_RT_STREAM_TASK_FULL = 507002; // task full in stream +static const int32_t ACL_ERROR_RT_STREAM_TASK_EMPTY = 507003; // task empty in stream +static const int32_t ACL_ERROR_RT_STREAM_NOT_COMPLETE = 507004; // stream not complete +static const int32_t ACL_ERROR_RT_END_OF_SEQUENCE = 507005; // end of sequence +static const int32_t ACL_ERROR_RT_EVENT_NOT_COMPLETE = 507006; // event not complete +static const int32_t ACL_ERROR_RT_CONTEXT_RELEASE_ERROR = 507007; // context release error +static const int32_t ACL_ERROR_RT_SOC_VERSION = 507008; // soc version error +static const int32_t ACL_ERROR_RT_TASK_TYPE_NOT_SUPPORT = 507009; // task type not support +static const int32_t ACL_ERROR_RT_LOST_HEARTBEAT = 507010; // ts lost heartbeat +static const int32_t ACL_ERROR_RT_MODEL_EXECUTE = 507011; // model execute failed +static const int32_t ACL_ERROR_RT_REPORT_TIMEOUT = 507012; // report timeout +static const int32_t ACL_ERROR_RT_SYS_DMA = 507013; // sys dma error +static const int32_t ACL_ERROR_RT_AICORE_TIMEOUT = 507014; // aicore timeout +static const int32_t ACL_ERROR_RT_AICORE_EXCEPTION = 507015; // aicore exception +static const int32_t ACL_ERROR_RT_AICORE_TRAP_EXCEPTION = 507016; // aicore trap exception +static const int32_t ACL_ERROR_RT_AICPU_TIMEOUT = 507017; // aicpu timeout +static const int32_t ACL_ERROR_RT_AICPU_EXCEPTION = 507018; // aicpu exception +static const int32_t ACL_ERROR_RT_AICPU_DATADUMP_RSP_ERR = 507019; // aicpu datadump response error +static const int32_t ACL_ERROR_RT_AICPU_MODEL_RSP_ERR = 507020; // aicpu model operate response error +static const int32_t ACL_ERROR_RT_PROFILING_ERROR = 507021; // profiling error +static const int32_t ACL_ERROR_RT_IPC_ERROR = 507022; // ipc error +static const int32_t ACL_ERROR_RT_MODEL_ABORT_NORMAL = 507023; // model abort normal +static const int32_t ACL_ERROR_RT_KERNEL_UNREGISTERING = 507024; // kernel unregistering +static const int32_t ACL_ERROR_RT_RINGBUFFER_NOT_INIT = 507025; // ringbuffer not init +static const int32_t ACL_ERROR_RT_RINGBUFFER_NO_DATA = 507026; // ringbuffer no data +static const int32_t ACL_ERROR_RT_KERNEL_LOOKUP = 507027; // kernel lookup error +static const int32_t ACL_ERROR_RT_KERNEL_DUPLICATE = 507028; // kernel register duplicate +static const int32_t ACL_ERROR_RT_DEBUG_REGISTER_FAIL = 507029; // debug register failed +static const int32_t ACL_ERROR_RT_DEBUG_UNREGISTER_FAIL = 507030; // debug unregister failed +static const int32_t ACL_ERROR_RT_LABEL_CONTEXT = 507031; // label not in current context +static const int32_t ACL_ERROR_RT_PROGRAM_USE_OUT = 507032; // program register num use out +static const int32_t ACL_ERROR_RT_DEV_SETUP_ERROR = 507033; // device setup error +static const int32_t ACL_ERROR_RT_VECTOR_CORE_TIMEOUT = 507034; // vector core timeout +static const int32_t ACL_ERROR_RT_VECTOR_CORE_EXCEPTION = 507035; // vector core exception +static const int32_t ACL_ERROR_RT_VECTOR_CORE_TRAP_EXCEPTION = 507036; // vector core trap exception +static const int32_t ACL_ERROR_RT_CDQ_BATCH_ABNORMAL = 507037; // cdq alloc batch abnormal + +static const int32_t ACL_ERROR_RT_DRV_INTERNAL_ERROR = 507899; // drv internal error +static const int32_t ACL_ERROR_RT_AICPU_INTERNAL_ERROR = 507900; // aicpu internal error +static const int32_t ACL_ERROR_RT_SOCKET_CLOSE = 507901; // hdc disconnect + +#ifdef __cplusplus +} +#endif + +#endif // __INC_EXTERNEL_RT_ERROR_CODES_H__ diff --git a/inc/framework/ge_runtime/task_info.h b/inc/framework/ge_runtime/task_info.h index f59c6454..4530bff7 100644 --- a/inc/framework/ge_runtime/task_info.h +++ b/inc/framework/ge_runtime/task_info.h @@ -271,13 +271,14 @@ class FusionEndTaskInfo : public TaskInfo { class HcclTaskInfo : public TaskInfo { public: HcclTaskInfo(const std::string &op_name, uint32_t stream_id, const std::string hccl_type, void *input_data_addr, - void *output_data_addr, int64_t workspace_size, int64_t hccl_stream_num, + void *output_data_addr, void *workspace_addr, int64_t workspace_size, int64_t hccl_stream_num, const std::vector &private_def, void *ops_kernel_store, int32_t count, int64_t root_id, int64_t op_type, int64_t data_type, const std::string &group, bool dump_flag) : TaskInfo(op_name, stream_id, TaskInfoType::HCCL, dump_flag), hccl_type_(hccl_type), input_data_addr_(input_data_addr), output_data_addr_(output_data_addr), + workspace_addr_(workspace_addr), workspace_size_(workspace_size), hccl_stream_num_(hccl_stream_num), private_def_(private_def), @@ -292,6 +293,7 @@ class HcclTaskInfo : public TaskInfo { const std::string &hccl_type() const { return hccl_type_; } void *input_data_addr() const { return input_data_addr_; } void *output_data_addr() const { return output_data_addr_; } + void *workspace_addr() const { return workspace_addr_; } int64_t workspace_size() const { return workspace_size_; } int64_t hccl_stream_num() const { return hccl_stream_num_; } const std::vector &private_def() const { return private_def_; } @@ -306,6 +308,7 @@ class HcclTaskInfo : public TaskInfo { std::string hccl_type_; void *input_data_addr_; void *output_data_addr_; + void *workspace_addr_; int64_t workspace_size_; int64_t hccl_stream_num_; std::vector private_def_; diff --git a/metadef b/metadef index a725349b..21178899 160000 --- a/metadef +++ b/metadef @@ -1 +1 @@ -Subproject commit a725349b65aef2940555af2ddb7b9461fbe0d5fd +Subproject commit 211788997dcc9aa63527541a44d511388c06bce5 diff --git a/scripts/format_source_code.sh b/scripts/format_source_code.sh new file mode 100755 index 00000000..1fd0b4f6 --- /dev/null +++ b/scripts/format_source_code.sh @@ -0,0 +1,107 @@ +#!/bin/bash +# Copyright 2019-2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +set -e + +CLANG_FORMAT=$(which clang-format) || (echo "Please install 'clang-format' tool first"; exit 1) + +version=$("${CLANG_FORMAT}" --version | sed -n "s/.*\ \([0-9]*\)\.[0-9]*\.[0-9]*.*/\1/p") +if [[ "${version}" -lt "8" ]]; then + echo "clang-format's version must be at least 8.0.0" + exit 1 +fi + +CURRENT_PATH=$(pwd) +SCRIPTS_PATH=$(dirname "$0") + +echo "CURRENT_PATH=${CURRENT_PATH}" +echo "SCRIPTS_PATH=${SCRIPTS_PATH}" + +# print usage message +function usage() +{ + echo "Format the specified source files to conform the code style." + echo "Usage:" + echo "bash $0 [-a] [-c] [-l] [-h]" + echo "e.g. $0 -c" + echo "" + echo "Options:" + echo " -a format of all files" + echo " -c format of the files changed compared to last commit, default case" + echo " -l format of the files changed in last commit" + echo " -h Print usage" +} + +# check and set options +function checkopts() +{ + # init variable + mode="changed" # default format changed files + + # Process the options + while getopts 'aclh' opt + do + case "${opt}" in + a) + mode="all" + ;; + c) + mode="changed" + ;; + l) + mode="lastcommit" + ;; + h) + usage + exit 0 + ;; + *) + echo "Unknown option ${opt}!" + usage + exit 1 + esac + done +} + +# init variable +# check options +checkopts "$@" + +# switch to project root path, which contains clang-format config file '.clang-format' +cd "${SCRIPTS_PATH}/.." || exit 1 + +FMT_FILE_LIST='__format_files_list__' + +if [[ "X${mode}" == "Xall" ]]; then + find src -type f -name "*" | grep "\.h$\|\.cc$" > "${FMT_FILE_LIST}" || true + find inc -type f -name "*" | grep "\.h$\|\.cc$" >> "${FMT_FILE_LIST}" || true +elif [[ "X${mode}" == "Xchanged" ]]; then + # --diff-filter=ACMRTUXB will ignore deleted files in commit + git diff --diff-filter=ACMRTUXB --name-only | grep "^inc\|^src" | grep "\.h$\|\.cc$" >> "${FMT_FILE_LIST}" || true +else # "X${mode}" == "Xlastcommit" + git diff --diff-filter=ACMRTUXB --name-only HEAD~ HEAD | grep "^inc\|^src" | grep "\.h$\|\.cc$" > "${FMT_FILE_LIST}" || true +fi + +while read line; do + if [ -f "${line}" ]; then + ${CLANG_FORMAT} -i "${line}" + fi +done < "${FMT_FILE_LIST}" + +rm "${FMT_FILE_LIST}" +cd "${CURRENT_PATH}" || exit 1 + +echo "Specified cpp source files have been format successfully." diff --git a/third_party/fwkacllib/inc/cce/taskdown_common.hpp b/third_party/fwkacllib/inc/cce/taskdown_common.hpp index 3ecea523..7954162e 100644 --- a/third_party/fwkacllib/inc/cce/taskdown_common.hpp +++ b/third_party/fwkacllib/inc/cce/taskdown_common.hpp @@ -27,15 +27,16 @@ namespace cce { #define CC_FUSION_OP_MAX 32 typedef enum tagccKernelType { - CCE_AI_CORE = 0, /* cce aicore */ - CCE_AI_CPU = 1, /* cce aicpu */ - TE = 2, /* te operator*/ - CUSTOMIZED = 3, /* customized operator */ - TE_AI_CORE = 4, /* te aicore operator*/ - TE_AI_CPU = 5, /* te aicpu operator */ - AI_CPU = 6, /* aicpu */ - CUST_AI_CPU = 7, /* custom aicpu*/ - INVALID = 8, /* unknown kernel type */ + CCE_AI_CORE = 0, /* cce aicore */ + CCE_AI_CPU = 1, /* cce aicpu */ + TE = 2, /* te operator*/ + CUSTOMIZED = 3, /* customized operator */ + TE_AI_CORE = 4, /* te aicore operator*/ + TE_AI_CPU = 5, /* te aicpu operator */ + AI_CPU = 6, /* aicpu */ + CUST_AI_CPU = 7, /* custom aicpu*/ + HOST_CPU = 8, /* host cpu */ + INVALID = 10000 /* unknown kernel type */ } ccKernelType; typedef struct tagOpContext { diff --git a/third_party/fwkacllib/inc/external/runtime/rt_error_codes.h b/third_party/fwkacllib/inc/external/runtime/rt_error_codes.h old mode 100755 new mode 100644 diff --git a/third_party/fwkacllib/inc/hccl/base.h b/third_party/fwkacllib/inc/hccl/base.h index e57563b3..ffbf552b 100644 --- a/third_party/fwkacllib/inc/hccl/base.h +++ b/third_party/fwkacllib/inc/hccl/base.h @@ -124,27 +124,27 @@ struct HcomRemoteAccessAddrInfo { }; struct HcomAllToAllVParams { - void *sendbuf; - void *sendcounts; - void *sdispls; - HcclDataType sendtype; - void *recvbuf; - void *recvcounts; - void *rdispls; - HcclDataType recvtype; - const char *group; + void *sendbuf; // device mem + void *sendcounts; // device mem; Type: uint_64 + void *sdispls; // device mem; Type: uint_64 + HcclDataType sendtype; + void *recvbuf; // device mem + void *recvcounts; // device mem; Type: uint_64 + void *rdispls; // device mem; Type: uint_64 + HcclDataType recvtype; + const char *group; // not used now }; struct HcomGatherAllToAllVParams { - void *addrInfo; - void *addrInfoCountPerRank; - void *recvbuf; - void *recvcounts; - void *rdispls; - void *gatheredbuf; - s32 addrLength; - HcclDataType recvtype; - const char *group; + void *addrInfo; // device mem; contains host VA[uint_64]: [addr, length, addr, length, addr, length, ...] + void *addrInfoCountPerRank; // device mem; length: ranksize; contains addrInfoCounts for every rank + void *recvbuf; // device mem + void *recvcounts; // device mem; Type: uint_64 + void *rdispls; // device mem; Type: uint_64 + void *gatheredbuf; // device mem + s32 addrLength; + HcclDataType recvtype; + const char *group; // not used now }; #ifdef __cplusplus diff --git a/third_party/fwkacllib/inc/hccl/hccl_types.h b/third_party/fwkacllib/inc/hccl/hccl_types.h deleted file mode 100644 index 50a64795..00000000 --- a/third_party/fwkacllib/inc/hccl/hccl_types.h +++ /dev/null @@ -1,101 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @file hccl_types.h - * @brief HCCL data type definition - * - */ - -#ifndef HCCL_TYPES_H_ -#define HCCL_TYPES_H_ - -#include - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -/** - * @brief HCCL functions return value definition - */ -typedef enum { - HCCL_SUCCESS = 0, /**< success */ - HCCL_E_PARA = 1, /**< parameter error */ - HCCL_E_PTR = 2, /**< empty pointer */ - HCCL_E_MEMORY = 3, /**< memory error */ - HCCL_E_INTERNAL = 4, /**< internal error */ - HCCL_E_NOT_SUPPORT = 5, /**< not support feature */ - HCCL_E_NOT_FOUND = 6, /**< not found specific resource */ - HCCL_E_UNAVAIL = 7, /**< resource unavailable */ - HCCL_E_SYSCALL = 8, /**< call system interface error */ - HCCL_E_TIMEOUT = 9, /**< timeout */ - HCCL_E_OPEN_FILE_FAILURE = 10, /**< open file fail */ - HCCL_E_TCP_CONNECT = 11, /**< tcp connect fail */ - HCCL_E_ROCE_CONNECT = 12, /**< roce connect fail */ - HCCL_E_TCP_TRANSFER = 13, /**< tcp transfer fail */ - HCCL_E_ROCE_TRANSFER = 14, /**< roce transfer fail */ - HCCL_E_RUNTIME = 15, /**< call runtime api fail */ - HCCL_E_DRV = 16, /**< call driver api fail */ - HCCL_E_PROFILING = 17, /**< call profiling api fail */ - HCCL_E_CCE = 18, /**< call cce api fail */ - HCCL_E_NETWORK = 19, /**< call network api fail */ - HCCL_E_RESERVED /**< reserved */ -} HcclResult; - -/** - * @brief handle to HCCL communicator - */ -typedef void *HcclComm; - -/** - * @brief HCCL Reduction opperation - */ -typedef enum { - HCCL_REDUCE_SUM = 0, /**< sum */ - HCCL_REDUCE_PROD = 1, /**< prod */ - HCCL_REDUCE_MAX = 2, /**< max */ - HCCL_REDUCE_MIN = 3, /**< min */ - HCCL_REDUCE_RESERVED /**< reserved */ -} HcclReduceOp; - -/** - * @brief HCCL data type - */ -typedef enum { - HCCL_DATA_TYPE_INT8 = 0, /**< int8 */ - HCCL_DATA_TYPE_INT16 = 1, /**< int16 */ - HCCL_DATA_TYPE_INT32 = 2, /**< int32 */ - HCCL_DATA_TYPE_FP16 = 3, /**< fp16 */ - HCCL_DATA_TYPE_FP32 = 4, /**< fp32 */ - HCCL_DATA_TYPE_INT64 = 5, /**< int64 */ - HCCL_DATA_TYPE_UINT64 = 6, /**< uint64 */ - HCCL_DATA_TYPE_RESERVED /**< reserved */ -} HcclDataType; - -const uint32_t HCCL_ROOT_INFO_BYTES = 4108; // 4108: root info length - -/** - * @brief HCCL root info - */ -typedef struct HcclRootInfoDef { - char internal[HCCL_ROOT_INFO_BYTES]; -} HcclRootInfo; - -#ifdef __cplusplus -} -#endif // __cplusplus -#endif // HCCL_TYPES_H_ diff --git a/third_party/fwkacllib/inc/hccl/hcom.h b/third_party/fwkacllib/inc/hccl/hcom.h index 955764d6..bf1f395b 100644 --- a/third_party/fwkacllib/inc/hccl/hcom.h +++ b/third_party/fwkacllib/inc/hccl/hcom.h @@ -164,8 +164,22 @@ HcclResult HcomExecEnqueueRemoteAccess(const std::string& remoteAccessType, const std::vector& addrInfos, std::function callback); +/** + * @brief Put alltoallv communication operation into hcom executor. + * + * @param params information about alltoallv communication operation. + * @param callback callback after collective communication operation. + * @return HcclResult + */ HcclResult HcomExecEnqueueAllToAllV(HcomAllToAllVParams params, std::function callback); +/** + * @brief Put agther alltoallv communication operation into hcom executor. + * + * @param params information about agther alltoallv communication operation. + * @param callback callback after collective communication operation. + * @return HcclResult + */ HcclResult HcomExecEnqueueGatherAllToAllV(HcomGatherAllToAllVParams params, std::function callback); diff --git a/third_party/fwkacllib/inc/mmpa/mmpa_api.h b/third_party/fwkacllib/inc/mmpa/mmpa_api.h index 38a689ee..f8d5ccf3 100644 --- a/third_party/fwkacllib/inc/mmpa/mmpa_api.h +++ b/third_party/fwkacllib/inc/mmpa/mmpa_api.h @@ -56,6 +56,7 @@ #include #include #include +#include #include #include diff --git a/third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_linux.h b/third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_linux.h index 993f36ba..3d196e41 100644 --- a/third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_linux.h +++ b/third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_linux.h @@ -550,6 +550,10 @@ MMPA_FUNC_VISIBILITY mmFileHandle mmShmOpen(const CHAR *name, INT32 oflag, mmMod MMPA_FUNC_VISIBILITY INT32 mmShmUnlink(const CHAR *name); MMPA_FUNC_VISIBILITY VOID *mmMmap(mmFd_t fd, mmSize_t size, mmOfft_t offset, mmFd_t *extra, INT32 prot, INT32 flags); MMPA_FUNC_VISIBILITY INT32 mmMunMap(VOID *data, mmSize_t size, mmFd_t *extra); + +MMPA_FUNC_VISIBILITY mmSize mmGetPageSize(); +MMPA_FUNC_VISIBILITY VOID *mmAlignMalloc(mmSize mallocSize, mmSize alignSize); +MMPA_FUNC_VISIBILITY VOID mmAlignFree(VOID *addr); #define MMPA_DLL_API #ifdef __cplusplus diff --git a/third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_win.h b/third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_win.h index 49e97a5d..e6b6f71e 100644 --- a/third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_win.h +++ b/third_party/fwkacllib/inc/mmpa/sub_inc/mmpa_win.h @@ -557,6 +557,10 @@ MMPA_FUNC_VISIBILITY mmFileHandle mmShmOpen(const CHAR *name, INT32 oflag, mmMod MMPA_FUNC_VISIBILITY INT32 mmShmUnlink(const CHAR *name); MMPA_FUNC_VISIBILITY VOID *mmMmap(mmFd_t fd, mmSize_t size, mmOfft_t offset, mmFd_t *extra, INT32 prot, INT32 flags); MMPA_FUNC_VISIBILITY INT32 mmMunMap(VOID *data, mmSize_t size, mmFd_t *extra); + +MMPA_FUNC_VISIBILITY mmSize mmGetPageSize(); +MMPA_FUNC_VISIBILITY VOID *mmAlignMalloc(mmSize mallocSize, mmSize alignSize); +MMPA_FUNC_VISIBILITY VOID mmAlignFree(VOID *addr); #ifdef __cplusplus #if __cplusplus } diff --git a/third_party/fwkacllib/inc/ops/aipp.h b/third_party/fwkacllib/inc/ops/aipp.h index bed984bd..86805f72 100644 --- a/third_party/fwkacllib/inc/ops/aipp.h +++ b/third_party/fwkacllib/inc/ops/aipp.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -65,6 +65,8 @@ in aipp config file, framework will auto add one input node to graph at last. \n *@par Third-party framework compatibility *Compatible with the TensorFlow operator AippData. +*@par Restrictions: +*Warning: This operator can be integrated only by configuring INSERT_OP_FILE of aclgrphBuildModel. Please do not use it directly. */ REG_OP(AippData) .INPUT(data, TensorType::ALL()) diff --git a/third_party/fwkacllib/inc/ops/all_ops.h b/third_party/fwkacllib/inc/ops/all_ops.h index 1ac83783..cc11f5f9 100644 --- a/third_party/fwkacllib/inc/ops/all_ops.h +++ b/third_party/fwkacllib/inc/ops/all_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -39,6 +39,7 @@ #include "image_ops.h" #include "internal_ops.h" #include "linalg_ops.h" +#include "list_ops.h" #include "logging_ops.h" #include "lookup_ops.h" #include "math_ops.h" diff --git a/third_party/fwkacllib/inc/ops/array_ops.h b/third_party/fwkacllib/inc/ops/array_ops.h index e1f64421..fd35b546 100644 --- a/third_party/fwkacllib/inc/ops/array_ops.h +++ b/third_party/fwkacllib/inc/ops/array_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -626,7 +626,7 @@ REG_OP(StopGradient) *x: A tensor. \n *@par Outputs: -*y: A tensor. \n +*y: A tensor with the same shape and contents as input. \n *@par Third-party framework compatibility *Compatible with the TensorFlow operator Identity. @@ -666,7 +666,7 @@ REG_OP(IdentityN) *@li axis: The dimension index at which to expand. \n *@par Outputs: -*y: A tensor. \n +*y: A tensor with the same data as input, with an additional dimension inserted at the index specified by axis. \n *@par Third-party framework compatibility *Compatible with the TensorFlow operator ExpandDims. @@ -713,7 +713,7 @@ REG_OP(Unsqueeze) *@par Outputs: *y: A tensor. \n -*@par Attention: +*@attention Constraints: *This operator cannot be directly called by the acllopExecute API. \n *@par Third-party framework compatibility @@ -1153,6 +1153,102 @@ REG_OP(EditDistance) .OUTPUT(output, TensorType({DT_FLOAT})) .OP_END_FACTORY_REG(EditDistance) +/** +* @brief sort_v2. + +* @par Inputs: +* @li x: An ND tensor of type float16. + +* @par Attributes: + +* @li axis: An optional int. The dimension to sort along. This value defaults to -1. +* @li descending: An optional bool. Controls the sorting order (ascending or descending). This value defaults to False. + +* @par Outputs: +* @li y: An ND tensor of type float16. + +* @attention Constraints: +* @li Axis should select the last dim. +* @li When the sorting data is less than 150K, it is recommended to use this tbe ops, + and the descending performance is better than the ascending. +* @li The upper limit of data on Ascend910 is 2000K. +*/ +REG_OP(SortV2) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .ATTR(axis, Int, -1) + .ATTR(descending, Bool, false) + .OP_END_FACTORY_REG(SortV2) + +/** +* @brief Expand the input tensor to a compatible shape. \n + +* @par Inputs: +* One inputs, including: +* @li x: A Tensor. Must be one of the following types: +* float16, float32, int32, int8 ,uint8. \n +* @li shape: A Tensor to specify the shape that the input tensor expanded to. \n + +* @par Outputs: +* @li y: A Tensor. Has the same type as "x", and the shape specified by input and attr shape \n + +* @par Third-party framework compatibility +* Compatible with the ONNX operator Expand. +*/ + +REG_OP(Expand) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8})) + .INPUT(shape, TensorType({DT_INT16, DT_INT32, DT_INT64})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8})) + .OP_END_FACTORY_REG(Expand) + +/** +*@Returns a tensor containing the indices of all non-zero elements of input. \n + +*@par Inputs: +*@li x: A Tensor. Must be one of the following types: float16, float32, int32, int64. + +*@par Attributes: +* transpose: the output tensor will be transposed if true. \n + +*@par Outputs: +* y: A Tensor. Has the same type as "x" . \n + +*@par Third-party framework compatibility +*Compatible with the PyTorch operator NonZero. +*/ + +REG_OP(NonZero) + .INPUT(x, TensorType({DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT8, DT_UINT8, DT_INT16, \ + DT_UINT16, DT_INT32, DT_UINT32, DT_INT64, DT_UINT64, DT_BOOL})) + .OUTPUT(y, TensorType({DT_INT64})) + .ATTR(transpose, Bool, false) + .OP_END_FACTORY_REG(NonZero) + +/** +* @brief Expand the input tensor to a compatible shape. \n + +* @par Inputs: +* One inputs, including: +* @li x: A Tensor. Must be one of the following types: +* float16, float32, int32, int8 ,uint8. \n + +* @par Attributes: +* @li shape: A required listInt to specify the shape that the input tensor expanded to. \n + + +* @par Outputs: +* @li y: A Tensor. Has the same type as "x", and the shape specified by input and attr shape \n + +* @par Third-party framework compatibility +* Compatible with the ONNX operator Expand. +*/ + +REG_OP(ExpandD) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8})) + .REQUIRED_ATTR(shape, ListInt) + .OP_END_FACTORY_REG(ExpandD) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_ARRAY_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/audio_ops.h b/third_party/fwkacllib/inc/ops/audio_ops.h index d9883253..f05135d1 100644 --- a/third_party/fwkacllib/inc/ops/audio_ops.h +++ b/third_party/fwkacllib/inc/ops/audio_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/avg_pool_1d_ops.h b/third_party/fwkacllib/inc/ops/avg_pool_1d_ops.h new file mode 100644 index 00000000..d0800a08 --- /dev/null +++ b/third_party/fwkacllib/inc/ops/avg_pool_1d_ops.h @@ -0,0 +1,58 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! + * \file avg_pool_1d_ops.h + * \brief + */ +#ifndef OPS_BUILT_IN_OP_PROTO_INC_AVGPOOL1DOPS_H_ +#define OPS_BUILT_IN_OP_PROTO_INC_AVGPOOL1DOPS_H_ +#include "graph/operator_reg.h" + +namespace ge { +/** +*@brief Generate an auxiliary matrix . \n + +*@par Inputs: +* @li x: A tensor. Must be one of the following types:uint8, int8,int16, int32, + int64, float16, float, double.The format must be NHWC NCHW NC1HWC0. + +*@par Attributes: +*@li ksize: Kernel size. Input type is int. +*@li strides: Input type is int. +*@li pads: Input type is listInt . +*@li ceil_mode: Bool, default value is false. +*@li count_include_pad: Bool, default value is false. \n + +*@par Outputs: +*y_tensor: A tensor with the same types as "x" . \n +*@par Third-party framework compatibility + +*Compatible with the TensorFlow operator Unbatch. +*/ +REG_OP(AvgPool1DAvgMatrix) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT8, + DT_INT32, DT_INT64, DT_DOUBLE})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT8, + DT_INT32, DT_INT64, DT_DOUBLE})) + .REQUIRED_ATTR(ksize, Int) + .REQUIRED_ATTR(strides, Int) + .REQUIRED_ATTR(pads, ListInt) + .ATTR(ceil_mode, Bool, false) + .ATTR(count_include_pad, Bool, false) + .OP_END_FACTORY_REG(AvgPool1DAvgMatrix) +} +#endif \ No newline at end of file diff --git a/third_party/fwkacllib/inc/ops/batch_ops.h b/third_party/fwkacllib/inc/ops/batch_ops.h index 8a1c5a7b..ca4fe1db 100644 --- a/third_party/fwkacllib/inc/ops/batch_ops.h +++ b/third_party/fwkacllib/inc/ops/batch_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -64,10 +64,10 @@ the same types as "x_tensors" . It's a dynamic output. \n REG_OP(Batch) .DYNAMIC_INPUT(x_tensors, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, \ DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE})) - .OUTPUT(y_index, TensorType({ DT_INT64 })) - .OUTPUT(y_id, TensorType({ DT_INT64 })) .DYNAMIC_OUTPUT(y_tensors, TensorType({DT_INT8, DT_UINT8, DT_INT16, \ DT_UINT16, DT_INT32, DT_INT64, DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_BOOL})) + .OUTPUT(y_index, TensorType({ DT_INT64 })) + .OUTPUT(y_id, TensorType({ DT_INT64 })) .REQUIRED_ATTR(num_batch_threads, Int) .REQUIRED_ATTR(max_batch_size, Int) .ATTR(max_enqueued_batches, Int, 10) @@ -107,11 +107,13 @@ across multiple sessions . \n REG_OP(Unbatch) .INPUT(x_tensor, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \ - DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE})) + DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE, DT_FLOAT16, \ + DT_COMPLEX64, DT_COMPLEX128})) .INPUT(index, TensorType({DT_INT64})) .INPUT(id, TensorType({DT_INT64})) .OUTPUT(y_tensor, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \ - DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE})) + DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE, DT_FLOAT16, \ + DT_COMPLEX64, DT_COMPLEX128})) .REQUIRED_ATTR(timeout_micros, Int) .ATTR(container, String, "") .ATTR(shared_name, String, "") @@ -146,13 +148,16 @@ across multiple sessions . \n REG_OP(UnbatchGrad) .INPUT(x_input, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \ - DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE})) + DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE, DT_FLOAT16, \ + DT_COMPLEX64, DT_COMPLEX128})) .INPUT(index, TensorType({DT_INT64})) .INPUT(grad, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \ - DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE})) + DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE, DT_FLOAT16, \ + DT_COMPLEX64, DT_COMPLEX128})) .INPUT(id, TensorType({DT_INT64})) .OUTPUT(y_grad, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \ - DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE})) + DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE, DT_FLOAT16, \ + DT_COMPLEX64, DT_COMPLEX128})) .ATTR(container, String, "") .ATTR(shared_name, String, "") .OP_END_FACTORY_REG(UnbatchGrad) diff --git a/third_party/fwkacllib/inc/ops/bitwise_ops.h b/third_party/fwkacllib/inc/ops/bitwise_ops.h index 5c83e161..dac78118 100644 --- a/third_party/fwkacllib/inc/ops/bitwise_ops.h +++ b/third_party/fwkacllib/inc/ops/bitwise_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,6 +25,35 @@ namespace ge { +/** +*@brief Element-wise computes the bitwise left-shift of x and y . \n + +*@par Inputs: +*Input "x" is a k-dimensional tensor. Inputs "num_lower" and "num_upper" +are 0D scalars. +* @li x: A Tensor. Must be one of the following types: int8, int16, int32, +int64, uint8, uint16, uint32, uint64. +* @li y: A Tensor. Has the same type as "x". \n + +*@par Outputs: +* z: A Tensor. Has the same type as "x". \n + +*@attention Constraints: +*Unique runs on the Ascend AI CPU, which delivers poor performance. \n + +*@par Third-party framework compatibility +*Compatible with the TensorFlow operator LeftShift. +*/ + +REG_OP(LeftShift) + .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, \ + DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64})) + .INPUT(y, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, \ + DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64})) + .OUTPUT(z, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, \ + DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64})) + .OP_END_FACTORY_REG(LeftShift) + /** *@brief Element-wise computes the bitwise right-shift of x and y . \n diff --git a/third_party/fwkacllib/inc/ops/boosted_trees_ops.h b/third_party/fwkacllib/inc/ops/boosted_trees_ops.h index 550e8b7d..08e54824 100644 --- a/third_party/fwkacllib/inc/ops/boosted_trees_ops.h +++ b/third_party/fwkacllib/inc/ops/boosted_trees_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/candidate_sampling_ops.h b/third_party/fwkacllib/inc/ops/candidate_sampling_ops.h index e20607bf..890c52ae 100644 --- a/third_party/fwkacllib/inc/ops/candidate_sampling_ops.h +++ b/third_party/fwkacllib/inc/ops/candidate_sampling_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/condtake_ops.h b/third_party/fwkacllib/inc/ops/condtake_ops.h index 5e91eb07..029cffbf 100644 --- a/third_party/fwkacllib/inc/ops/condtake_ops.h +++ b/third_party/fwkacllib/inc/ops/condtake_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/control_flow_ops.h b/third_party/fwkacllib/inc/ops/control_flow_ops.h index 7196b14f..e5bd3534 100644 --- a/third_party/fwkacllib/inc/ops/control_flow_ops.h +++ b/third_party/fwkacllib/inc/ops/control_flow_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -96,7 +96,7 @@ REG_OP(RefMerge) * Otherwise, the data is forwarded to "output_false" . \n *@par Inputs: - *@li data: The tensor to be forwarded. \ n + *@li data: The tensor to be forwarded. \n * Must be one of the following types: float16, float32, float64, * int8, int16, int32, int64, uint8, uint16, uint32, uint64, bool. *@li pred: A boolean scalar. The output port that will receive data . \n @@ -387,12 +387,12 @@ REG_OP(ControlTrigger) *@par Inputs: * Three inputs, including: -*@li x: One dimensional tensore of type int32, specifying queried shape, max size is 8. -*@li data_seq: One dimensional tensore of type int32, specifying the mapped table is queried. -*@li level_index: One dimensional tensore of type int32, specifying secondary index. \n +*@li x: One dimensional tensor of type int32, specifying queried shape, max size is 128. +*@li data_seq: One dimensional tensor of type int32, specifying the mapped table is queried. +*@li level_index: One dimensional tensor of type int32, specifying secondary index. \n *@par Outputs: -*@li y: A Tensor with shape [batch, 8], of type int32, specifying index of shape in the map. +*@li y: A Tensor with shape [8], of type int32, specifying index of shape in the map. *@par Third-party framework compatibility * It is a custom operator. It has no corresponding operator in Caffe. */ diff --git a/third_party/fwkacllib/inc/ops/correlation.h b/third_party/fwkacllib/inc/ops/correlation.h new file mode 100644 index 00000000..caebba50 --- /dev/null +++ b/third_party/fwkacllib/inc/ops/correlation.h @@ -0,0 +1,52 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! + * \file correlation.h + * \brief + */ +#ifndef GE_OP_CORRELATION_OPS_H +#define GE_OP_CORRELATION_OPS_H + +#include "graph/operator_reg.h" + +namespace ge { +/** +*@brief Computes a 2D Correlation given 4D "x" and "filter" tensors. +* +*@par Inputs: +* @li filter: A 4D tensor of filters. +* @li x: A 4D tensor of input images, batch number must equal to batch +* number of "filter", and channel must equal to channel of "filter". +* +*@par Attributes: +* @li groups: set correlation mode, must be 1 or channel. +* +*@par Outputs: +*y: A Tensor. Has the same type as "x". + +*@par Third-party framework compatibility +* Compatible with caffe correlation custom operator. +*/ +REG_OP(Correlation) + .INPUT(filter, TensorType({DT_FLOAT16, DT_INT8})) + .INPUT(x, TensorType({DT_FLOAT16, DT_INT8})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_INT32})) + .ATTR(groups, Int, 1) + .OP_END_FACTORY_REG(Correlation) +} // namespace ge + +#endif // GE_OP_NN_CALCULATION_OPS_H diff --git a/third_party/fwkacllib/inc/ops/ctc_ops.h b/third_party/fwkacllib/inc/ops/ctc_ops.h index 2c75fd09..e907b828 100644 --- a/third_party/fwkacllib/inc/ops/ctc_ops.h +++ b/third_party/fwkacllib/inc/ops/ctc_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -137,6 +137,87 @@ REG_OP(CTCBeamSearchDecoder) .OUTPUT(log_probability, TensorType({DT_FLOAT, DT_DOUBLE})) .OP_END_FACTORY_REG(CTCBeamSearchDecoder) +/** +*@brief The Connectionist Temporal Classification loss. + +*@par Inputs: +*@li log_probs: Tensor of size (T, N, C), where T =input length, N =batch size, + and C = number of classes (including blank). + It represent the logarithmized probabilities of the outputs. +*@li targets: Tensor of size (N, S), where S= max target length. + It represent the target sequences. +*@li input_lengths: Tuple or tensor of size (N). It represent the lengths of the inputs. +*@li target_lengths: Tuple or tensor of size (N). It represent lengths of the targets. + +*@par Outputs: +*@li neg_log_likelihood: A loss value which is differentiable with respect to each input node. +*@li log_alpha: The probability of possible trace of input to target. + +*@par Attributes: +*@li blank : Blank label. Default 0. +*@li reduction: Specifies the reduction to apply to the output. Default: 'mean'. +*@li zero_infinity : Whether to zero infinite losses and the associated gradients. + +*@par Third-party framework compatibility +* Compatible with Pytorch CTCLoss operator. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(CTCLossV2) + .INPUT(log_probs, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(targets, TensorType({DT_INT32, DT_INT64})) + .INPUT(input_lengths, TensorType({DT_INT32, DT_INT64})) + .INPUT(target_lengths, TensorType({DT_INT32, DT_INT64})) + .OUTPUT(neg_log_likelihood, TensorType({DT_FLOAT, DT_DOUBLE})) + .OUTPUT(log_alpha, TensorType({DT_FLOAT, DT_DOUBLE})) + .ATTR(blank, Int, 0) + .ATTR(reduction, String, "mean") + .ATTR(zero_infinity, Bool, false) + .OP_END_FACTORY_REG(CTCLossV2) + +/** +*@brief The Connectionist Temporal Classification loss grad. + +*@par Inputs: +*@li grad_out: Gradient renewal coefficient. Tensor of size (N), where N = batch size. +*@li log_probs: Tensor of size (T, N, C), where T =input length, N =batch size, + and C = number of classes (including blank). + It represent the logarithmized probabilities of the outputs. +*@li targets: Tensor of size (N, S), where S= max target length. + It represent the target sequences. +*@li input_lengths: Tuple or tensor of size (N). It represent the lengths of the inputs. +*@li target_lengths: Tuple or tensor of size (N). It represent lengths of the targets. +*@li neg_log_likelihood: A loss value which is differentiable with respect to each input node. +*@li log_alpha: The probability of possible trace of input to target. + +*@par Outputs: +*@li grad: Tensor of size (T, N, C), The grad of Connectionist Temporal Classification loss. + +*@par Attributes: +*@li blank : Blank label. Default 0. +*@li reduction: Specifies the reduction to apply to the output. Default: 'mean'. +*@li zero_infinity : Whether to zero infinite losses and the associated gradients. + +*@par Third-party framework compatibility +* Compatible with Pytorch CTCLoss operator. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(CTCLossV2Grad) + .INPUT(grad_out, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(log_probs, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(targets, TensorType({DT_INT32, DT_INT64})) + .INPUT(input_lengths, TensorType({DT_INT32, DT_INT64})) + .INPUT(target_lengths, TensorType({DT_INT32, DT_INT64})) + .INPUT(neg_log_likelihood, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(log_alpha, TensorType({DT_FLOAT, DT_DOUBLE})) + .OUTPUT(grad, TensorType({DT_FLOAT, DT_DOUBLE})) + .ATTR(blank, Int, 0) + .ATTR(reduction, String, "mean") + .ATTR(zero_infinity, Bool, false) + .OP_END_FACTORY_REG(CTCLossV2Grad) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_CTC_OPS_H_ \ No newline at end of file diff --git a/third_party/fwkacllib/inc/ops/data_flow_ops.h b/third_party/fwkacllib/inc/ops/data_flow_ops.h index bb937a75..6021f4e3 100644 --- a/third_party/fwkacllib/inc/ops/data_flow_ops.h +++ b/third_party/fwkacllib/inc/ops/data_flow_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -908,7 +908,7 @@ REG_OP(TensorArray) .OUTPUT(handle, TensorType({DT_RESOURCE})) .OUTPUT(flow, TensorType({DT_FLOAT})) .REQUIRED_ATTR(dtype, Type) - .ATTR(element_shape, ListInt, ge::UNKNOWN_SHAPE) + .ATTR(element_shape, ListInt, ge::UNKNOWN_RANK) .ATTR(dynamic_size, Bool, false) .ATTR(clear_after_read, Bool, true) .ATTR(identical_element_shapes, Bool, false) @@ -963,7 +963,7 @@ REG_OP(TensorArrayConcat) DT_QUINT8, DT_QINT32})) .OUTPUT(lengths, TensorType({DT_INT64})) .REQUIRED_ATTR(dtype, Type) - .ATTR(element_shape_except0, ListInt, ge::UNKNOWN_SHAPE) + .ATTR(element_shape_except0, ListInt, ge::UNKNOWN_RANK) .OP_END_FACTORY_REG(TensorArrayConcat) /** @@ -999,7 +999,7 @@ REG_OP(TensorArrayGather) DT_STRING, DT_COMPLEX64, DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT32})) .REQUIRED_ATTR(dtype, Type) - .ATTR(element_shape, ListInt, ge::UNKNOWN_SHAPE) + .ATTR(element_shape, ListInt, ge::UNKNOWN_RANK) .OP_END_FACTORY_REG(TensorArrayGather) /** @@ -1430,6 +1430,24 @@ REG_OP(OrderedMapClear) .ATTR(shared_name, String, "") .OP_END_FACTORY_REG(OrderedMapClear) +/** +*@brief FakeQueue, support tf api FixedLengthRecordReader. \n + +*@par Inputs: +*Including: +* @li resource: A Tensor of type DT_RESOURCE. + +*@par Outputs: +*handle: A Tensor of type DT_STRING ref. \n + +*@par Third-party framework compatibility +*Compatible with the TensorFlow operator FakeQueue. +*/ +REG_OP(FakeQueue) + .INPUT(resource, TensorType({DT_RESOURCE})) + .OUTPUT(handle, TensorType({DT_STRING})) + .OP_END_FACTORY_REG(FakeQueue) + /** *@brief Returns the number of incomplete elements in the underlying container. \n @@ -2258,6 +2276,7 @@ REG_OP(LruCache) .ATTR(shared_name, String, "LruCache") .ATTR(cache_size, Int, 100000) .ATTR(load_factor, Float, 1) + .REQUIRED_ATTR(dtype, Type) .OP_END_FACTORY_REG(LruCache) /** @@ -2277,9 +2296,9 @@ REG_OP(CacheAdd) .INPUT(cache, TensorType({DT_RESOURCE})) .INPUT(ids, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32})) .OUTPUT(swap_in_id, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32})) - .OUTPUT(swap_in_idx, TensorType({DT_INT64})) + .OUTPUT(swap_in_idx, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32})) .OUTPUT(swap_out_id, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32})) - .OUTPUT(swap_out_idx, TensorType({DT_INT64})) + .OUTPUT(swap_out_idx, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32})) .OP_END_FACTORY_REG(CacheAdd) /** @@ -2295,9 +2314,65 @@ REG_OP(CacheAdd) REG_OP(CacheRemoteIndexToLocal) .INPUT(cache, TensorType({DT_RESOURCE})) .INPUT(ids, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32})) - .OUTPUT(local_idx, TensorType({DT_INT64})) + .OUTPUT(local_idx, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32})) .OP_END_FACTORY_REG(CacheRemoteIndexToLocal) +/** +*@brief CacheAllToLocalIndex, get id in cache +*@par Inputs: +*cache: resource data +*local_idx: id in cache. +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(CacheAllIndexToLocal) + .INPUT(cache, TensorType({DT_RESOURCE})) + .OUTPUT(local_idx, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32})) + .REQUIRED_ATTR(dtype, Type) + .OP_END_FACTORY_REG(CacheAllIndexToLocal) + +/** +*@brief DynamicGetNext, dynamic get next data +*@par Inputs: +*x: the iterator, all types are available +*@par Outputs: +*y: the date in iterator, all types are available +*@par Attributes: +*output_types: types of all outputs +*output_shapes: shapes of all outputs +*_dynamic_graph_execute_mode: dynamic graph execution mode, +value is one of lazy_recompile and dynamic_execute +*_getnext_inputs_shape_range: shape ranges of outputs, +it works where _dynamic_graph_execute_mode is dynamic_execute +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(DynamicGetNext) + .INPUT(x, TensorType::ALL()) + .DYNAMIC_OUTPUT(y, TensorType::ALL()) + .ATTR(output_types, ListType, {}) + .ATTR(output_shapes, ListListInt, {{}, {}}) + .ATTR(_dynamic_graph_execute_mode, String, "lazy_recompile") + .ATTR(_getnext_inputs_shape_range, String, "") + .OP_END_FACTORY_REG(DynamicGetNext) + +/** +*@brief AdpGetNext +*@par Outputs: +*y: the data in iterator, all types are available +*@par Attributes: +*output_types: types of all outputs +*output_shapes: shapes of all outputs +*queue_name: cdqm queue name +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(AdpGetNext) + .DYNAMIC_OUTPUT(y, TensorType::ALL()) + .ATTR(output_types, ListType, {}) + .ATTR(output_shapes, ListListInt, {{}, {}}) + .ATTR(queue_name, String, "") + .OP_END_FACTORY_REG(AdpGetNext) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_DATA_FLOW_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/elewise_calculation_ops.h b/third_party/fwkacllib/inc/ops/elewise_calculation_ops.h index c64bc138..f61e2939 100644 --- a/third_party/fwkacllib/inc/ops/elewise_calculation_ops.h +++ b/third_party/fwkacllib/inc/ops/elewise_calculation_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,10 +28,13 @@ namespace ge { *@par Inputs: *Dynamic inputs, including: -* @li x: A list of Tensor objects, each with same shape and type. The supported types are: +*x: A list of Tensor objects, each with same shape and type. The supported types are: * float16, float32, double, int32, uint8, int16, int8, complex64, int64, * qint8, quint8, qint32, uint16, complex128, uint32, uint64. It's a dynamic input. \n +*@par Attributes: +*N: An required attribute of type int32, means nums of inputs. \n + *@par Outputs: *y: A Tensor. Has the same shape and type as the elements of "x". \n @@ -122,7 +125,8 @@ REG_OP(MinimumGrad) *@par Inputs: *One input: *x:A Tensor. Must be one of the following types: bool, float16, float, int8, int32, uint32, uint8, - int64, uint64, int16, uint16, double, complex64, complex128, qint8, quint8, qint16, quint16, qint32. \n + int64, uint64, int16, uint16, double, complex64, complex128, qint8, quint8, qint16, quint16, qint32. + For float32 type, the actual calculation on the chip is based on float16. \n *@par Attributes: *dst_type: An required attribute of type int32, specifying the dst data type. \n @@ -142,6 +146,8 @@ REG_OP(Cast) /** *@brief Returns the truth value of (x1 >= x2) element-wise. \n +*when input is int32 and (x2 - x1) > 2**31 or < -2**31 +*aicore accuracy is not guaranteed \n *@par Inputs: *Two inputs, including: @@ -163,6 +169,8 @@ REG_OP(GreaterEqual) /** *@brief Returns the truth value of (x1 < x2) element-wise. \n +*when input is int32 and (x2 - x1) > 2**31 or < -2**31 +*aicore accuracy is not guaranteed \n *@par Inputs: *Two inputs, including: @@ -322,8 +330,8 @@ REG_OP(Sub) *@brief computes the absolute value of a tensor. \n *@par Inputs: -*One inputs, including: -* @li x: A Tensor. Must be one of the following types: float16, float32, double, int32, int64. \n +*One input, including: \n +*x: A Tensor. Must be one of the following types: float16, float32, double, int32, int64. \n *@par Outputs: *y: A Tensor. Has the same type as "x". \n @@ -563,6 +571,8 @@ REG_OP(InvGrad) /** *@brief: Returns the truth value of (x <= y) element-wise. \n +*when input is int32 and (x2 - x1) > 2**31 or < -2**31 +*aicore accuracy is not guaranteed \n *@par Inputs: * Two inputs, including: @@ -611,6 +621,15 @@ REG_OP(Log1p) *@par Outputs: *y: A Tensor. Has the same type as "x1". + +*@attention Constraints: +*@li x2: The input data does not support 0 +*@li When NUM exceeds 2048 , the accuracy of operator cannot guarantee the +*requirement of double thousandths in the mini form +*@li Due to different architectures, the calculation results of this operator +*on NPU and CPU may be inconsistent +*@li If shape is expressed as (D1,D2... ,Dn), then D1*D2... *DN<=1000000,n<=8 + *@par Third-party framework compatibility *Compatible with the TensorFlow operator Mod. */ @@ -1020,7 +1039,7 @@ REG_OP(BesselI1e) * y = log_base(shift + scale * x), with "base" > 0. \n * @par Inputs: -* @li x: A Tensor of type complex64, complex128, float16, float32 or double. \n +* x: A Tensor of type complex64, complex128, float16, float32 or double. \n * @par Attributes: * @li base: An optional float32, specifying the base "e". Defaults to "-1.0" @@ -1065,7 +1084,7 @@ REG_OP(Log) * uint8, int8, uint16, int16, int32, int64, complex64, complex128. \n * @attention Constraints: -* @li "x1" and "x2" have incompatible shapes or types. \n +* "x1" and "x2" have incompatible shapes or types. \n * @par Third-party framework compatibility * Compatible with the TensorFlow operator Multiply. @@ -1451,6 +1470,8 @@ REG_OP(ReciprocalGrad) /** *@brief Returns the truth value of (x1 > x2) element-wise. \n +*when input is int32 and (x2 - x1) > 2**31 or < -2**31 +*aicore accuracy is not guaranteed \n *@par Inputs: *@li x1: A Tensor of type float16, float32, double, int64, int32, int16, int8, @@ -2042,6 +2063,15 @@ REG_OP(FloorDiv) * *@par Outputs: *y: Result remainder. + +*@attention Constraints: +*@li x2: The input data does not support 0 +*@li When NUM exceeds 2048 , the accuracy of operator cannot guarantee the +*requirement of double thousandths in the mini form +*@li Due to different architectures, the calculation results of this operator +*on NPU and CPU may be inconsistent +*@li If shape is expressed as (D1,D2... ,Dn), then D1*D2... *DN<=1000000,n<=8 + *@par Third-party framework compatibility * Compatible with the TensorFlow operator FloorMod. */ @@ -2168,6 +2198,14 @@ REG_OP(Tan) *@par Outputs: *y: A Tensor. Has the same type as "x1". \n +*@attention Constraints: +*@li x2: The input data does not support 0 +*@li When NUM exceeds 2048 , the accuracy of operator cannot guarantee the +*requirement of double thousandths in the mini form +*@li Due to different architectures, the calculation results of this operator +*on NPU and CPU may be inconsistent +*@li If shape is expressed as (D1,D2... ,Dn), then D1*D2... *DN<=1000000,n<=8 + *@par Third-party framework compatibility *@li Compatible with the TensorFlow operator TruncateMod. */ @@ -2424,6 +2462,25 @@ REG_OP(Eltwise) .ATTR(coeff, ListFloat, {}) .OP_END_FACTORY_REG(Eltwise) +/** + *@brief Computes the inverse error function of each element of input. \n + + *@par Inputs: + *One inputs, including: + * @li input_x: A tensor. Must be one of the following types: + * float16, float32. \n + + *@par Outputs: + *y: A Tensor with the same type and shape of input_x's. \n + + *@par Third-party framework compatibility + *Compatible with the Pytorch operator Erfinv. \n + */ +REG_OP(Erfinv) + .INPUT(input_x, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(output_y, TensorType({DT_FLOAT, DT_FLOAT16})) + .OP_END_FACTORY_REG(Erfinv) + /** *@brief Computes element-wise population count. \n @@ -2829,9 +2886,9 @@ REG_OP(AdamApplyOneAssign) *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(LambApplyOptimizerAssign) - .INPUT(input0, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(input1, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(input2, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(grad, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(inputv, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(inputm, TensorType({DT_FLOAT16,DT_FLOAT})) .INPUT(input3, TensorType({DT_FLOAT16,DT_FLOAT})) .INPUT(mul0_x, TensorType({DT_FLOAT16,DT_FLOAT})) .INPUT(mul1_x, TensorType({DT_FLOAT16,DT_FLOAT})) @@ -2842,6 +2899,8 @@ REG_OP(LambApplyOptimizerAssign) .INPUT(do_use_weight, TensorType({DT_FLOAT16,DT_FLOAT})) .INPUT(weight_decay_rate, TensorType({DT_FLOAT16,DT_FLOAT})) .OUTPUT(output0, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(inputv, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(inputm, TensorType({DT_FLOAT16,DT_FLOAT})) .OP_END_FACTORY_REG(LambApplyOptimizerAssign) /** @@ -2873,7 +2932,8 @@ REG_OP(LambApplyWeightAssign) .INPUT(input1, TensorType({DT_FLOAT16,DT_FLOAT})) .INPUT(input2, TensorType({DT_FLOAT16,DT_FLOAT})) .INPUT(input3, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(input4, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(input_param, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(input_param, TensorType({DT_FLOAT16,DT_FLOAT})) .OP_END_FACTORY_REG(LambApplyWeightAssign) /** @@ -3183,12 +3243,14 @@ REG_OP(Fills) *@brief Add tensor with scale. \n *@par Inputs: -*Five inputs, including: -* @li x1: A Tensor. Must be one of the following types:int32,int16, float16, float32. -* @li x2: A scale. Must be float. \n +*One input, including: \n +*x: A Tensor. Must be one of the following types:int32,int16, float16, float32. \n + +*@par Attributes: +*value: A scale. Must be float. \n *@par Outputs: -*@li y: A Tensor. Has the same type and shape as "x1". \n +*y: A Tensor. Has the same type and shape as "x1". \n *@par Third-party framework compatibility: * Compatible with the Pytorch operator adds. @@ -3329,8 +3391,441 @@ REG_OP(TensorRedirect) .OUTPUT(output_x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32, DT_UINT8, DT_INT64, DT_INT16, DT_UINT16, DT_UINT64, DT_UINT32})) .OP_END_FACTORY_REG(TensorRedirect) -} // namespace ge +/** +* @brief Performs the element-wise division of tensor x2 by tensor x3, +* multiply the result by the scalar value and add it to tensor x1 + +* @par Inputs: +* Three inputs, including: +* @li input_data: A mutable input Tensor. Must be one of the following types: +* float16, float32. +* @li x1: A mutable input Tensor of the same type as x1. +* @li x2: A mutable input Tensor of the same type as x1. +* @li value: A mutable input Tensor. Must be one of the following types: +* float16, float32, int32. \n + +* @par Outputs: +* @li y: A mutable Tensor. Has the same type as "x1". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator Addcdiv. +*/ +REG_OP(Addcdiv) + .INPUT(input_data, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(x1, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(x2, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(value, TensorType({ DT_FLOAT16, DT_FLOAT, DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OP_END_FACTORY_REG(Addcdiv) + +/** +* @brief Performs the element-wise multiplication of tensor x2 by tensor x3, +* multiply the result by the scalar value and add it to tensor input_data + + +* @par Inputs: +* Three inputs, including: +* @li input_data: A mutable input Tensor. Must be one of the following types: +* float16, float32, int8, int32, uint8. +* @li x1: A mutable input Tensor of the same type as x1. +* @li x2: A mutable input Tensor of the same type as x1. +* @li value: A tensor which includes only one element of the same type as x1. \n + +* @par Outputs: +* @li y: A mutable output Tensor. Has the same type as "x1". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator Addcmul. +*/ +REG_OP(Addcmul) + .INPUT(input_data, TensorType({ DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32, DT_UINT8 })) + .INPUT(x1, TensorType({ DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32, DT_UINT8 })) + .INPUT(x2, TensorType({ DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32, DT_UINT8 })) + .INPUT(value, TensorType({ DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32, DT_UINT8 })) + .OUTPUT(y, TensorType({ DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32, DT_UINT8 })) + .OP_END_FACTORY_REG(Addcmul) +/** +* @brief Computes the result of x2 * alpha + x1. + +* @par Inputs: +* @li x1: An ND tensor of type float16, float32, int32. +* @li x2: An ND tensor of type float16, float32, int32. +* @li alpha: A scalar tensor of type float16, float32. \n + +* @par Outputs: +* @li y: An ND tensor tensor with the same shape and type as "x1". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator Axpy. +*/ +REG_OP(AxpyV2) + .INPUT(x1, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .INPUT(x2, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .INPUT(alpha, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .OP_END_FACTORY_REG(AxpyV2) + +/** +* @brief Computes the result of x1 - x2. + +* @par Inputs: +* @li x1: An ND tensor of type float16, float, int32. +* @li x2: An ND tensor of type float16, float, int32. \n + +* @par Outputs: +* @li y: An ND tensor tensor with the same type as "x1". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator Sub. +*/ +REG_OP(PtSub) + .INPUT(x1, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .INPUT(x2, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .OP_END_FACTORY_REG(PtSub) + +/** +* @brief Add the partial values of two tensors in format NC1HWC0. + +* @par Inputs: +* @li x1: A Tensor in 5HD, and must be one of the following types: float16, +* float32. \n +* @li x2: A Tensor of the same type as "x1", and the same shape as "x1", +* except for the C1 value. \n + +* @par Attributes: +* @li x1_c1_offset: A required int. Offset value of C1 in "x1". \n +* @li x2_c1_offset: A required int. Offset value of C1 in "x2". \n +* @li c1_len: A required int. C1 len of "y". The value must be less than +* the difference between C1 and offset in "x1" and "x2". \n + +* @par Outputs: +* @li y: A Tensor of the same type as "x1", and the same shape as "x1", +* except for the C1 value. Record the result after adding. \n +*/ +REG_OP(StrideAdd) + .INPUT(x1, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(x2, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .OUTPUT(y, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .REQUIRED_ATTR(x1_c1_offset, Int) + .REQUIRED_ATTR(x2_c1_offset, Int) + .REQUIRED_ATTR(c1_len, Int) + .OP_END_FACTORY_REG(StrideAdd) + +/** +* @brief Compare two tensors are totally equal or not, only output a bool value" + +* @par Inputs: +* Two inputs, including: +* @li input_x: A Tensor. the first tensor. \n +* @li input_y: A Tensor. the second tensor. \n + +* @par Outputs: +* @li output_z: A Tensor. Bool type, compare result of the two inputs. \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch equal operator. \n +*/ +REG_OP(TensorEqual) + .INPUT(input_x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8})) + .INPUT(input_y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8})) + .OUTPUT(output_z, TensorType({DT_BOOL})) + .OP_END_FACTORY_REG(TensorEqual) + +/** + * @brief Element-wise min of each of the input tensors (with Numpy-style broadcasting support). + * All inputs and outputs must have the same data type. This operator supports multidirectional + * (i.e., Numpy-style) broadcasting + * + * @par inputs + * one input including: + * @li x: dynamic input A Tensor. Must be one of the following types: float32, float16, double, int32, int64 + * + * @par output + * one output including: + * @li y:A Tensor of the same type as x + * + */ +REG_OP(MaxN) + .DYNAMIC_INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_FLOAT64, DT_INT32, DT_INT64})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_FLOAT64, DT_INT32, DT_INT64})) + .OP_END_FACTORY_REG(MaxN) + + +/** + * @brief Calculates x * maske * value. + * + * @par Inputs: + * @li x: An tensor of type float16 or float32, specifying the input to the data layer. + * @li mask: An tensor of type int8 or float16 or float32, be same shape with x. \n + * + * @par Attributes: + * value: A optional float. \n + * + * @par Outputs: + * y: The output tensor of type float16 or float32. + @ li y:A Tensor of the same type and shape as x + * + */ +REG_OP(MaskedScale) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT32})) + .INPUT(mask, TensorType({DT_INT8, DT_FLOAT16, DT_FLOAT32})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32})) + .REQUIRED_ATTR(value, Float) + .OP_END_FACTORY_REG(MaskedScale) + +/** + * @brief Calculate the lerp function. \n + + * @par Inputs: + * Three inputs, including: + * @li start: A tensor. Must be one of the following types: + * float16, float32. \n + * @li end: A tensor. Must be one of the following types: + * float16, float32. \n + * @li weight: A tensor. Must be one of the following types: + * float16, float32. \n + + * @par Outputs: + * y: A Tensor with the same type and shape of input_x's. \n + + * @par Third-party framework compatibility + * Compatible with the Pytorch operator Lerp. \n + */ +REG_OP(Lerp) + .INPUT(start, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(end, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(weight, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OP_END_FACTORY_REG(Lerp) + +/** +*@brief Returns the num value of abs(x1-x2) > atol+rtol*abs(x2) element-wise. \n + +* +*@par Inputs: +*@li x1: A tensor. Must be one of the following types: float32, int32, uint8, int8, float16 +*@li x2: A tensor of the same type as "x1". +* +*@par Attributes: +* atol: Defaults to "1e-05". +* rtol: Defaults to "1e-03". +* +*@par Outputs: +* num: A tensor of type float32. +* +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +* +*/ +REG_OP(DataCompare) + .INPUT(x1, TensorType({ DT_FLOAT16, DT_FLOAT,DT_INT8, DT_UINT8, DT_INT32 })) + .INPUT(x2, TensorType({ DT_FLOAT16, DT_FLOAT,DT_INT8, DT_UINT8, DT_INT32 })) + .OUTPUT(num, TensorType({DT_FLOAT})) + .ATTR(atol, Float, 1e-5) + .ATTR(rtol, Float, 1e-3) + .OP_END_FACTORY_REG(DataCompare) + +/** +*@brief Hardmax(element in input, axis) = 1 if the element is the first maximum value along the specified axis, 0 +*otherwise The input does not need to explicitly be a 2D vector.The "axis" attribute indicates the dimension along +*which Hardmax will be performed.The output tensor has the same shape and contains the Hardmax values of the +*corresponding input. +* +*@par inputs +*one input including: +*@li x: input A Tensor.Must be one of the following types:float32,float16 +* +*@par Attributes: +*@li axis:A required int attribute that decides which dimension will be used to cal the hard_max +* +*@par output: +*one output including: +*@li y:A Tensor of the same type as x +* +*/ +REG_OP(HardMax) + .INPUT(x, TensorType({ DT_FLOAT16, DT_FLOAT })) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(axis, Int, -1) + .OP_END_FACTORY_REG(HardMax) + +/** +* @brief Computes the dot product (inner product) of two tensors. This function does not broadcast. + +* @par Inputs: +* Two inputs, including: +* @li input_x: A Tensor. the first tensor must be 1d. \n +* @li input_y: A Tensor. the second tensor must be 1d. \n + +* @par Outputs: +* @li output: A Tensor. Result of the two inputs, must be 1d. \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch dot operator. \n +*/ +REG_OP(Dot) + .INPUT(input_x, TensorType({DT_FLOAT, DT_FLOAT16, DT_UINT8, DT_INT8, DT_INT32})) + .INPUT(input_y, TensorType({DT_FLOAT, DT_FLOAT16, DT_UINT8, DT_INT8, DT_INT32})) + .OUTPUT(output, TensorType({DT_FLOAT, DT_FLOAT16, DT_UINT8, DT_INT8, DT_INT32})) + .OP_END_FACTORY_REG(Dot) + +/** +*@brief Returns a new tensor with boolean elements representing \n +*if each element of input is “close” to the corresponding element of other \n + +*@par Inputs: +*Two inputs, including: +* @li x1: A tensor. Must be one of the following types: +* float16, float32, int32. \n +* @li x2: A tensor with the same type and shape of x1's. \n + +*@par Attributes: +*@li rtol: An optional float.Defaults to 1e-05. \n +*@li atol: An optional float.Defaults to 1e-08. \n +*@li equal_nan: An optional bool.Defaults to false. \n + +*@par Outputs: +*y: A Tensor bool with the same shape of x1's. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator isclose. \n +*/ +REG_OP(IsClose) + .INPUT(x1, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .INPUT(x2, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .OUTPUT(y, TensorType({DT_BOOL})) + .ATTR(rtol, Float, 1e-05) + .ATTR(atol, Float, 1e-08) + .ATTR(equal_nan, Bool, false) + .OP_END_FACTORY_REG(IsClose) + +/** +* @brief Returns the reverse tensor of the ArgMax operator of a tensor. \n + +* @par Inputs: +* three input, including: +* var: A Tensor of type float16, float32, int32 or int8. \n +* indices: A Tensor of type int32. \n +* updates: A Tensor of type float16, float32, int32 or int8. \n + +* @par Attributes: +* @li dimension: An integer of type int, specifying the axis information of the index with the maximum value.\n + +* @par Outputs: +* y: A Tensor of type float16, float32, int32 or int8. \n +* +*@attention Constraints: +*@li indices: only support int32,and shape same to "updates" +*@li The value range of "dimension" is [-dims, dims - 1]. "dims" is the dimension length of "x". +*@li y:A Tensor, the type and shape is same to "var" \n + +*@par Third-party framework compatibility +* not support all scene like pytorch operator scatter +* exp: +* var.shape=[2,3,4,5], dim=2, the shape of indices and updates should be [2,3,5] +* not support the shape of indices and updates is [2,3,2,5] like pytorch operator scatter. \n +*/ +REG_OP(ArgMaxGrad) + .INPUT(var, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8})) + .INPUT(indices, TensorType({DT_INT32})) + .INPUT(updates, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8})) + .REQUIRED_ATTR(dimension, Int) + .OP_END_FACTORY_REG(ArgMaxGrad) + +/** +* @brief Returns the reverse tensor of the ArgMax operator of a tensor. \n + +* @par Inputs: +* three input, including: +* var: A Tensor of type float16, float32, int32 or int8. \n +* indices: A Tensor of type int32. \n +* updates: A Tensor of type float16, float32, int32 or int8. \n +* assist: A Tensor of int32,also a assist matrix and it's shape must match the shape of var \n + +* @par Attributes: +* @li dimension: An integer of type int, specifying the axis information of the index with the maximum value.\n + +* @par Outputs: +* y: A Tensor of type float16, float32, int32 or int8. \n + +*@attention Constraints: +*@li indices: only support int32,and shape same to "updates" +*@li The value range of "dimension" is [-dims, dims - 1]. "dims" is the dimension length of "x". +*@li y:A Tensor, the type and shape is same to "var" \n + +*@par Third-party framework compatibility +* not support all scene like pytorch operator scatter +* exp: +* var.shape=[2,3,4,5], dim=2, the shape of indices and updates should be [2,3,5] +* not support the shape of indices and updates is [2,3,2,5] like pytorch operator scatter. \n +*/ +REG_OP(ArgMaxGradD) + .INPUT(var, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8})) + .INPUT(indices, TensorType({DT_INT32})) + .INPUT(updates, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8})) + .INPUT(assist, TensorType({DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8})) + .REQUIRED_ATTR(dimension, Int) + .OP_END_FACTORY_REG(ArgMaxGradD) + +/** +*@brief Calculates the reversed outputs of the function "AddMatMatElements" +* c = c * beta + alpha * a * b + +*@par Inputs: +*Three inputs, including: +* @li c: A mutable Tensor. Must be one of the following types: +* float16, float32. +* @li a: A mutable Tensor of the same type as "c". +* @li b: A mutable Tensor of the same type as "c". +* @li beta: A mutable scalar of the same type as "c". +* @li alpha: A mutable scalar of the same type as "c". \n + +*@par Outputs: +* @li c: A mutable Tensor. Has the same type as "c". \n + +*@par Third-party framework compatibility +* Compatible with the TensorFlow operator AddMatMatElements. +*/ +REG_OP(AddMatMatElements) + .INPUT(c, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(a, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(b, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(beta, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(alpha, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(c, TensorType({DT_FLOAT, DT_FLOAT16})) + .OP_END_FACTORY_REG(AddMatMatElements) + +/** +*@brief Returns cosine similarity between x1 and x2,computed along dim. \n + +*@par Inputs: +*Two inputs, including: +* @li input_x1: A tensor. Must be the following types: +* float32. \n + +*@par Inputs: +*@li input_x2: A tensor. Must of the following types: +* float32. \n + +*@par Outputs: +*@li output_y: A Tensor with the same type of input_x's. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator CosineSimilarity. \n +*/ +REG_OP(CosineSimilarity) + .INPUT(input_x1, TensorType({DT_FLOAT})) /* "First operand." */ + .INPUT(input_x2, TensorType({DT_FLOAT})) /* "Second operand." */ + .OUTPUT(output_y, TensorType({DT_FLOAT})) /* "Result, has same element type as two inputs" */ + .ATTR(dim, Int, 1) + .ATTR(eps, Float, 1e-8) + .OP_END_FACTORY_REG(CosineSimilarity) + +} // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_ELEWISE_CALCULATION_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/functional_ops.h b/third_party/fwkacllib/inc/ops/functional_ops.h index 598d3ad3..b09ac058 100644 --- a/third_party/fwkacllib/inc/ops/functional_ops.h +++ b/third_party/fwkacllib/inc/ops/functional_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/get_data_ops.h b/third_party/fwkacllib/inc/ops/get_data_ops.h index 33dc4f14..e5518ef8 100644 --- a/third_party/fwkacllib/inc/ops/get_data_ops.h +++ b/third_party/fwkacllib/inc/ops/get_data_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/globalavgpool.h b/third_party/fwkacllib/inc/ops/globalavgpool.h new file mode 100644 index 00000000..06f03d30 --- /dev/null +++ b/third_party/fwkacllib/inc/ops/globalavgpool.h @@ -0,0 +1,49 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! + * \file globalavgpool.h + * \brief + */ +#ifndef OPS_BUILT_IN_OP_PROTO_INC_GLOBALAVERAGEPOOL_H_ +#define OPS_BUILT_IN_OP_PROTO_INC_GLOBALAVERAGEPOOL_H_ + +#include "graph/operator_reg.h" + +namespace ge { +/** +*@brief GlobalAveragePool consumes an input tensor X and applies average pooling across the values in the same channel. +This is equivalent to AveragePool with kernel size equal to the spatial dimension of input tensor \n + +*@par Inputs: +*@li x: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), +where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. +For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. + +*@par Outputs: +*y: Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. +The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1 + +*@par Restrictions: +*Warning: This operator can be integrated only by configuring INSERT_OP_FILE of aclgrphBuildModel. Please do not use it directly. +*/ +REG_OP(GlobalAveragePool) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OP_END_FACTORY_REG(GlobalAveragePool) +} // namespace ge + +#endif // OPS_BUILT_IN_OP_PROTO_INC_GLOBALAVGPOOL_H_ \ No newline at end of file diff --git a/third_party/fwkacllib/inc/ops/hcom_ops.h b/third_party/fwkacllib/inc/ops/hcom_ops.h index b90b225e..497f6a68 100644 --- a/third_party/fwkacllib/inc/ops/hcom_ops.h +++ b/third_party/fwkacllib/inc/ops/hcom_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -45,8 +45,6 @@ REG_OP(HcomAllGather) .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16, DT_INT64, DT_UINT64})) .REQUIRED_ATTR(rank_size, Int) .REQUIRED_ATTR(group, String) - .ATTR(alpha, Float, 1.0) - .ATTR(beta, Float, 0.0) .OP_END_FACTORY_REG(HcomAllGather) /** @@ -77,8 +75,6 @@ REG_OP(HcomAllReduce) .REQUIRED_ATTR(group, String) .ATTR(fusion, Int, 1) .ATTR(fusion_id, Int, -1) - .ATTR(alpha, Float, 1.0) - .ATTR(beta, Float, 0.0) .OP_END_FACTORY_REG(HcomAllReduce) /** @@ -91,7 +87,7 @@ REG_OP(HcomAllReduce) input of this rank will be broadcast to other ranks. * @li fusion: A required integer identifying if the op need to fusion,the default value is none fusion - * @li fusion: A required integer identifying the fusion id if para fusion + * @li fusion_id: A required integer identifying the fusion id if para fusion is set. * @li group: A required string identifying the group name of ranks participating in the op. @@ -109,10 +105,39 @@ REG_OP(HcomBroadcast) .REQUIRED_ATTR(group, String) .ATTR(fusion, Int, 0) .ATTR(fusion_id, Int, -1) - .ATTR(alpha, Float, 1.0) - .ATTR(beta, Float, 0.0) .OP_END_FACTORY_REG(HcomBroadcast) +/** + * @brief preforms reduction from others rank to rootrank + * @par Inputs: +* @li root_rank: A required integer identifying the root rank in the op + the reduction result will be on this root rank + * x: A tensor. Must be one of the following types: int8, int16, int32, float16, + float32. + * @par Attributes: + * @li reduction: A required string identifying the reduction operation to + perform.The supported operation are: "sum", "max", "min", "prod". + * @li group: A required string identifying the group name of ranks + participating in the op. + * @li fusion: An optional integer identifying the fusion flag of the op. + 0: no fusion; 1 (default): fusion; 2: fusion the ops by fusion id. + * @li fusion_id: An optional integer identifying the fusion id of the op. + * The HcomReduce ops with the same fusion id will be fused. + * @par Outputs: + * y: A Tensor. Has the same type as "x". + * @attention Constraints: + *"group" is limited to 128 characters. Use "hccl_world_group" + as the name of a world group. + */ +REG_OP(HcomReduce) + .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16})) + .REQUIRED_ATTR(root_rank, Int) + .REQUIRED_ATTR(reduction, String) + .REQUIRED_ATTR(group, String) + .ATTR(fusion, Int, 0) + .ATTR(fusion_id, Int, -1) + .OP_END_FACTORY_REG(HcomReduce) /** * @brief Performs reduction across all input tensors, scattering in equal blocks among ranks, each rank getting a chunk of data based on its rank @@ -139,8 +164,6 @@ REG_OP(HcomReduceScatter) .REQUIRED_ATTR(reduction, String) .REQUIRED_ATTR(group, String) .REQUIRED_ATTR(rank_size, Int) - .ATTR(alpha, Float, 1.0) - .ATTR(beta, Float, 0.0) .OP_END_FACTORY_REG(HcomReduceScatter) /** @@ -167,8 +190,6 @@ REG_OP(HcomSend) .REQUIRED_ATTR(group, String) .REQUIRED_ATTR(sr_tag, Int) .REQUIRED_ATTR(dest_rank, Int) - .ATTR(alpha, Float, 1.0) - .ATTR(beta, Float, 0.0) .OP_END_FACTORY_REG(HcomSend) /** @@ -202,8 +223,6 @@ REG_OP(HcomReceive) .REQUIRED_ATTR(src_rank, Int) .REQUIRED_ATTR(shape, ListInt) .REQUIRED_ATTR(dtype, Type) - .ATTR(alpha, Float, 1.0) - .ATTR(beta, Float, 0.0) .OP_END_FACTORY_REG(HcomReceive) /** @@ -219,6 +238,15 @@ REG_OP(HcomRemoteRead) .REQUIRED_ATTR(dtype, Type) .OP_END_FACTORY_REG(HcomRemoteRead) +/** + * @brief Performs Remote Ref Read of input tensors + * @par Inputs: + * remote: A tensor. describing the remote memory address to read: u64 remoteId, u64 addrRemote, u64 length + * cache_var: The local base address + * local_offset: Skip step length + * @par Outputs: + * cache_var: The local base address + */ REG_OP(HcomRemoteRefRead) .INPUT(remote, TensorType({DT_UINT64})) .INPUT(cache_var, TensorType({DT_UINT64})) @@ -239,11 +267,90 @@ REG_OP(HcomRemoteWrite) .INPUT(local, TensorType::ALL()) .OP_END_FACTORY_REG(HcomRemoteWrite) +/** + * @brief Performs Remote Write of input tensors + * @par Inputs: + * remote: A tensor. describing the remote memory address to write: u64 remoteId, u64 addrRemote, u64 length + * @par Inputs: + * local: A Tensor. whose value is length / size_of(Type) + */ REG_OP(HcomRemoteScatterWrite) .INPUT(remote, TensorType({DT_INT64, DT_UINT64})) .INPUT(local, TensorType::ALL()) .OPTIONAL_INPUT(local_offset, TensorType({DT_UINT64})) .OP_END_FACTORY_REG(HcomRemoteScatterWrite) +/** + * @brief All ranks send different amount of data to, and receive different + amount of data from, all ranks. + * @par Inputs: + * Five inputs, including: + * @li send_data: A tensor. the memory to send. + * @li send_counts: A list, where entry i specifies the number of elements in + send_data to send to rank i. + * @li send_displacements: A list, where entry i specifies the displacement + (offset from sendbuf) from which to send data to rank i. + * @li recv_counts: A list, where entry i specifies the number of + elements to receive from rank i. + * @li recv_displacements: A list, , where entry i specifies the displacement + (offset from recv_data) to which data from rank i should be written. + * @par Outputs: + * recv_data: A Tensor has same element type as send_data. + * @par Attributes: + * @li group: A string identifying the group name of ranks participating in + the op. +* @attention all ranks participating in the op should be full-mesh networking + using the RDMA. + */ +REG_OP(HcomAllToAllV) + .INPUT(send_data, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16, DT_INT64, DT_UINT64})) + .INPUT(send_counts, TensorType({DT_INT64})) + .INPUT(send_displacements, TensorType({DT_INT64})) + .INPUT(recv_counts, TensorType({DT_INT64})) + .INPUT(recv_displacements, TensorType({DT_INT64})) + .OUTPUT(recv_data, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16, DT_INT64, DT_UINT64})) + .REQUIRED_ATTR(group, String) + .OP_END_FACTORY_REG(HcomAllToAllV) + +/** + * @brief All ranks send different amount of data to, and receive different + amount of data from, all ranks. And concat all data descripting by addrinfo + togather into output gathered. + * @par Inputs: + * Four inputs, including: + * @li addrinfo: A tensor, descripting the memory info(address, length) to send. + * @li addrinfo_count_per_rank: A list, where entry i specifies the number of + elements in send_data to send to rank i. + * @li recv_counts: A list, where entry i specifies the number of + elements to receive from rank i. + * @li recv_displacements: A list, , where entry i specifies the displacement + (offset from recv_data) to which data from rank i should be written. + * @par Outputs: + * Two outputs, including: + * @li recv_data: A Tensor has same element type as dtype. + * @li gathered: A Tensor has same element type as dtype. + * @par Attributes: + * @li group: A string identifying the group name of ranks participating in + the op. + * @li dtype: Datatype of send buffer elements. + * @li addr_length: descripting the element memory length in the addrinfo. + -2: all element memory length in the addrinfo is the same, but it is unknown. + -1: all element memory length is unknown. + >0: all element memory length in the addrinfo is the same. the attr value is the memory length. + * @attention all ranks participating in the op should be full-mesh networking + using the RDMA. + */ +REG_OP(HcomGatherAllToAllV) + .INPUT(addrinfo, TensorType({DT_UINT64})) + .INPUT(addrinfo_count_per_rank, TensorType({DT_INT64})) + .INPUT(recv_counts, TensorType({DT_INT64})) + .INPUT(recv_displacements, TensorType({DT_INT64})) + .OUTPUT(recv_data, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16, DT_INT64, DT_UINT64})) + .OUTPUT(gathered, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16, DT_INT64, DT_UINT64})) + .REQUIRED_ATTR(group, String) + .REQUIRED_ATTR(dtype, Type) + .REQUIRED_ATTR(addr_length, Int) + .OP_END_FACTORY_REG(HcomGatherAllToAllV) + } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_HCOM_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/hvd_ops.h b/third_party/fwkacllib/inc/ops/hvd_ops.h index a49ec5ed..00299ef7 100644 --- a/third_party/fwkacllib/inc/ops/hvd_ops.h +++ b/third_party/fwkacllib/inc/ops/hvd_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/image_ops.h b/third_party/fwkacllib/inc/ops/image_ops.h index ce3262f9..6909345a 100644 --- a/third_party/fwkacllib/inc/ops/image_ops.h +++ b/third_party/fwkacllib/inc/ops/image_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,22 @@ #include "graph/operator_reg.h" namespace ge { +/** +*@brief Decode the frame(s) of a GIF-encoded image to a uint8 tensor . \n + +*@par Inputs: +*@li contents:A Tensor of type string. 0-D. The GIF-encoded image. \n + +*@par Outputs: +*image:A Tensor of type uint8. \n + +*@par Third-party framework compatibility +*Compatible with tensorflow DecodeGif operator. +*/ +REG_OP(DecodeGif) + .INPUT(contents, TensorType({DT_STRING})) + .OUTPUT(image, TensorType({DT_UINT8})) + .OP_END_FACTORY_REG(DecodeGif) /** *@brief Adjust the hue of one or more images . \n @@ -31,11 +47,12 @@ namespace ge { *@par Inputs: *Input images is a tensor of at least 3 dimensions. The last dimension is interpretted as channels, and must be three. Inputs include: -*@li images:A Tensor of type float. Images to adjust. At least 3-D. +*@li images:A Tensor of type float. Images to adjust. At least 3-D. The format +must be NHWC. *@li delta:A Tensor of type float. A float delta to add to the hue . \n *@par Outputs: -*y:A Tensor of type float . \n +*y:A Tensor of type float. The format must be NHWC. \n *@attention Constraints: *Input images is a tensor of at least 3 dimensions. The last dimension is @@ -57,11 +74,12 @@ REG_OP(AdjustHue) *@par Inputs: *Input images is a tensor of at least 3 dimensions. The last dimension is interpretted as channels, and must be three. Inputs include: -*@li images:A Tensor of type float. Images to adjust. At least 3-D. +*@li images:A Tensor of type float. Images to adjust. At least 3-D. The format +must be NHWC. *@li scale:A Tensor of type float. A float scale to add to the saturation . \n *@par Outputs: -*y:A Tensor of type float . \n +*y:A Tensor of type float. The format must be NHWC. \n *@attention Constraints: *Input images is a tensor of at least 3 dimensions. The last dimension is @@ -83,11 +101,12 @@ REG_OP(AdjustSaturation) *@par Inputs: *Input images is a tensor of at least 3 dimensions. The last 3 dimensions are interpreted as '[height, width, channels]'. Inputs include: -*@li images:A Tensor of type float. Images to adjust. At least 3-D. +*@li images:A Tensor of type float. Images to adjust. At least 3-D. The format +must be NHWC. *@li scale:A Tensor of type float. A float multiplier for adjusting contrast . \n *@par Outputs: -*y:A Tensor of type float . \n +*y:A Tensor of type float. The format must be NHWC. \n *@attention Constraints: *Input images is a tensor of at least 3 dimensions. The last dimension is @@ -112,7 +131,7 @@ nearest neighbor sampling to a common output size specified by crop_size . \n *Input images must be a 4-D tensor. Inputs include: *@li images:A Tensor. Must be one of the following types:uint8, uint16, int8, int16, int32, int64, float16, float, double. A 4-D tensor of shape -[batch, image_height, image_width, depth]. +[batch, image_height, image_width, depth]. The format must be NHWC. *@li boxes: A Tensor of type float. A 2-D tensor of shape [num_boxes, 4]. *@li box_index: A Tensor of type int32. A 1-D tensor of shape [num_boxes] with int32 values in [0, batch). @@ -127,7 +146,7 @@ extrapolation, when applicable. NearestNeighbor . \n *@par Outputs: -*y:A Tensor of type float . \n +*y:A Tensor of type float. The format must be NHWC. \n *@attention Constraints: *Input images must be a 4-D tensor . \n @@ -193,7 +212,9 @@ boxes tensor . \n *@par Inputs: *Input images and grads must be a 4-D tensor. Inputs include: *@li grads: A 4-D tensor of shape [num_boxes, crop_height, crop_width, depth]. +The format must be NHWC. *@li images: A 4-D tensor of shape [batch, image_height, image_width, depth]. +The format must be NHWC. Both image_height and image_width need to be positive. *@li boxes: A 2-D tensor of shape [num_boxes, 4]. The i-th row of the tensor specifies the coordinates of a box in the box_ind[i] image and is specified in @@ -233,6 +254,7 @@ images tensor . \n *@par Inputs: *Input grads must be a 4-D tensor. Inputs include: *@li grads: A 4-D tensor of shape [num_boxes, crop_height, crop_width, depth]. +The format must be NHWC. *@li boxes: A 2-D tensor of shape [num_boxes, 4]. The i-th row of the tensor specifies the coordinates of a box in the box_ind[i] image and is specified in normalized coordinates [y1, x1, y2, x2]. @@ -248,7 +270,8 @@ method: A string specifying the interpolation method. Only 'bilinear' is supported for now . \n *@par Outputs: -*y:A 4-D tensor of shape [batch, image_height, image_width, depth] . \n +*y:A 4-D tensor of shape [batch, image_height, image_width, depth]. The format +must be NHWC. \n *@attention Constraints: *Input grads must be a 4-D tensor . \n @@ -273,6 +296,7 @@ REG_OP(CropAndResizeGradImage) *@par Inputs: *Input x must be a 4-D tensor. Inputs include: *@li x: A 4-D float tensor of shape [batch_size, height, width, channels]. +The format must be NHWC. *@li size: A 1-D tensor of 2 elements containing the size of the glimpses to extract. The glimpse height must be specified first, following by the glimpse width. @@ -293,7 +317,7 @@ uniform_noise . \n *@par Outputs: *y:A tensor representing the glimpses [batch_size, glimpse_height, -glimpse_width, channels] . \n +glimpse_width, channels]. The format must be NHWC. \n *@attention Constraints: *Input x must be a 4-D tensor . \n @@ -340,7 +364,8 @@ REG_OP(HSVToRGB) *@par Inputs: *Input images must be a 4-D tensor. Inputs include: -*@li images: 4-D with shape [batch, height, width, channels]. +*@li images: 4-D with shape [batch, height, width, channels]. The format must +be NHWC. *@li size: A 1-D int32 Tensor of 2 elements: new_height, new_width. The new size for the images. *@li min: A Tensor of type float. @@ -354,6 +379,7 @@ the values at the corner pixels. Defaults to false. *@par Outputs: *@li resized_images: 4-D with shape [batch, new_height, new_width, channels]. +The format must be NHWC. *@li y_min: A Tensor of type float. *@li y_max: A Tensor of type float . \n @@ -381,7 +407,8 @@ REG_OP(QuantizedResizeBilinear) *@par Inputs: *Input images must be a 4-D tensor. Inputs include: -*@li images: 4-D with shape [batch, height, width, channels]. +*@li images: 4-D with shape [batch, height, width, channels]. The format must +be NHWC. *@li size: A 1-D int32 Tensor of 2 elements: new_height, new_width. The new size for the images . \n @@ -391,7 +418,8 @@ output tensors are aligned, preserving the values at the corner pixels. Defaults to false . \n *@par Outputs: -*y: 4-D with shape [batch, new_height, new_width, channels] . \n +*y: 4-D with shape [batch, new_height, new_width, channels]. The format must +be NHWC. \n *@attention Constraints: *Input images can be of different types but output images are always float . \n @@ -414,10 +442,10 @@ REG_OP(ResizeArea) *@par Inputs: *Input grads must be a 4-D tensor. Inputs include: *@li grads: A Tensor of type float. 4-D with shape [batch, height, width, -channels]. +channels]. The format must be NHWC. *@li original_image: A Tensor. Must be one of the following types: float, double. 4-D with shape [batch, orig_height, orig_width, channels], The image -tensor that was resized . \n +tensor that was resized. The format must be NHWC. \n *@par Attributes: *@li align_corners: An optional bool. Defaults to False. If true, the centers @@ -426,10 +454,10 @@ false. *@li half_pixel_centers: An optional bool. Defaults to False . \n *@par Outputs: -*y: A Tensor. Has the same type as original_image . \n +*y: A Tensor. Has the same type as original_image. The format must be NHWC. \n *@attention Constraints: -*Input images can be of different types but output images are always float . \n +*Input images can be of different types but output images are always float . *@par Third-party framework compatibility *Compatible with tensorflow ResizeBicubicGrad operator. @@ -448,7 +476,8 @@ REG_OP(ResizeBicubicGrad) *@par Inputs: *Input images must be a 4-D tensor. Inputs include: -*@li images: 4-D with shape [batch, height, width, channels]. +*@li images: 4-D with shape [batch, height, width, channels]. The format +must be NHWC. *@li size: A 1-D int32 Tensor of 2 elements: new_height, new_width. The new size for the images . \n @@ -459,10 +488,11 @@ Defaults to false. *@li half_pixel_centers: An optional bool. Defaults to False . \n *@par Outputs: -*y: 4-D with shape [batch, new_height, new_width, channels] . \n +*y: 4-D with shape [batch, new_height, new_width, channels]. The format +must be NHWC. \n *@attention Constraints: -*Input images can be of different types but output images are always float . \n +*Input images can be of different types but output images are always float . *@par Third-party framework compatibility *Compatible with tensorflow ResizeBicubic operator. @@ -483,7 +513,7 @@ REG_OP(ResizeBicubic) *@par Inputs: *Input grads must be a 4-D tensor. Inputs include: *@li grads: A Tensor. Must be one of the following types: uint8, int8, int32, -float16, float, double. 4-D with shape [batch, height, width, channels]. +float16, float, double. Must set the format, supported format list ["NCHW, NHWC"] *@li size: A 1-D int32 Tensor of 2 elements: orig_height, orig_width. The original input size . \n @@ -550,9 +580,8 @@ REG_OP(ResizeNearestNeighborV2GradD) *@par Inputs: *Input grads must be a 4-D tensor. Inputs include: -*@li grads: A Tensor of type float32. 4-D with shape [batch, height, width, -channels]. -*@li original_image: A Tensor. 4-D with shape [batch, orig_height, orig_width, +*@li grads: A Tensor of type float32. Must set the format, supported format list ["NCHW, NHWC"] +*@li original_image: A Tensor. 4-D shape. Must set the format, supported format list ["NCHW, NHWC"] channels], The image tensor that was resized . \n *@par Attributes: @@ -583,7 +612,7 @@ REG_OP(ResizeBilinearV2Grad) *@par Inputs: *Input images must be a 4-D tensor. Inputs include: -*@li x: 4-D with shape [batch, height, width, channels]. +*@li x: 4-D tensor. Must set the format, supported format list ["NCHW, NHWC"] *@li size: A 1-D int32 Tensor of 2 elements: new_height, new_width. The new size for the images . \n @@ -639,6 +668,62 @@ REG_OP(RGBToHSV) /** *@brief Generate a single randomly distorted bounding box for an image . \n +*@par Inputs: +*Input images must be a 4-D tensor. Inputs include: +*@li image_size: 1-D, containing [height, width, channels]. +*@li bounding_boxes: 3-D with shape [batch, N, 4] describing the N bounding +boxes associated with the image. \n + +*@par Attributes: +*@li seed: If either seed or seed2 are set to non-zero, the random number +generator is seeded by the given seed. Otherwise, it is seeded by a random seed. +*@li seed2: A second seed to avoid seed collision. +*@li min_object_covered: The cropped area of the image must contain at least +this fraction of any bounding box supplied. The value of this parameter should +be non-negative. In the case of 0, the cropped area does not need to overlap +any of the bounding boxes supplied . +*@li aspect_ratio_range: The cropped area of the image must have an aspect +ratio = width / height within this range. +*@li max_attempts: Number of attempts at generating a cropped region of the +image of the specified constraints. After max_attempts failures, return the +entire image. +*@li use_image_if_no_bounding_boxes: Controls behavior if no bounding boxes +supplied. If true, assume an implicit bounding box covering the whole input. +If false, raise an error . \n + +*@par Outputs: +*@li begin: 1-D, containing [offset_height, offset_width, 0]. +*@li size: 1-D, containing [target_height, target_width, -1]. +*@li bboxes: 3-D with shape [1, 1, 4] containing the distorted bounding box . \n + +*@attention Constraints: +*Input images can be of different types but output images are always float . \n + +*@par Third-party framework compatibility +*Compatible with tensorflow SampleDistortedBoundingBox operator. +*/ + +REG_OP(SampleDistortedBoundingBox) + .INPUT(image_size, TensorType({ DT_UINT8, DT_INT8, DT_INT16, \ + DT_INT32, DT_INT64 })) + .INPUT(bounding_boxes, TensorType({ DT_FLOAT })) + .OUTPUT(begin, TensorType({ DT_UINT8, DT_INT8, DT_INT16, \ + DT_INT32, DT_INT64 })) + .OUTPUT(size, TensorType({ DT_UINT8, DT_INT8, DT_INT16, \ + DT_INT32, DT_INT64 })) + .OUTPUT(bboxes, TensorType({ DT_FLOAT })) + .ATTR(seed, Int, 0) + .ATTR(seed2, Int, 0) + .ATTR(min_object_covered, Float, 0.1f) + .ATTR(aspect_ratio_range, ListFloat, { 0.75f, 1.33f }) + .ATTR(area_range, ListFloat, { 0.05f, 1.0f }) + .ATTR(max_attempts, Int, 100) + .ATTR(use_image_if_no_bounding_boxes, Bool, false) + .OP_END_FACTORY_REG(SampleDistortedBoundingBox) + +/** +*@brief Generate a single randomly distorted bounding box for an image . \n + *@par Inputs: *Input images must be a 4-D tensor. Inputs include: *@li image_size: 1-D, containing [height, width, channels]. @@ -697,7 +782,7 @@ REG_OP(SampleDistortedBoundingBoxExt2) *@par Inputs: *Input x must be a 4-D tensor. Inputs include: -*@li x: 4-D with shape [batch, height, width, channels]. +*@li x: 4-D tensor. Must set the format, supported format list ["NCHW, NHWC"]. *@li size: A 1-D int32 Tensor of 2 elements: new_height, new_width. The new size for the images . \n @@ -729,12 +814,12 @@ REG_OP(ResizeNearestNeighborV2) *@par Inputs: *Input images must be a 4-D tensor. Inputs include: *@li images: A Tensor. Must be one of the following types: float. 4-D with -shape [batch, height, width, depth]. A batch of images. +shape [batch, height, width, depth]. A batch of images. The format must be NHWC. *@li boxes: A Tensor of type float32. 3-D with shape [batch, num_bounding_boxes, 4] containing bounding boxes . \n *@par Outputs: -*A Tensor. Has the same type as images . \n +*A Tensor. Has the same type as images. The format must be NHWC. \n *@attention Constraints: *Input images must be a 4-D tensor . \n @@ -1002,6 +1087,88 @@ REG_OP(EncodePng) .ATTR(compression, Int, -1) .OP_END_FACTORY_REG(EncodePng) + +/** +*@brief PNG-decode an image. +*@par Inputs: +*contents: 0-D. PNG-decoded image . + +*@par Attributes: +*channels: graph channels \n +*dtype: type of image + +*@par Outputs: +*image: is a 3-D uint8 or uint16 Tensor of shape [height, width, channels] +where channels is: 1: for grayscale; 2: for grayscale + alpha; 3: for RGB; +4: for RGBA . \n + +*@par Third-party framework compatibility +*Compatible with tensorflow DecodePng operator. +*/ +REG_OP(DecodePng) + .INPUT(contents, TensorType({DT_STRING})) + .OUTPUT(image, TensorType({DT_UINT8, DT_UINT16})) + .ATTR(dtype, Type, DT_UINT8) + .ATTR(channels, Int, 0) + .OP_END_FACTORY_REG(DecodePng) + +/** +*@brief Bmp-decode an image. \n + +*@par Inputs: +*@li contents: A Tensor of type string. 0-D. The BMP-encoded image. \n + +*@par Attributes: +*@li channels: Decode the desired number of color channels of the image. \n + +*@par Outputs: +*image: A Tensor dtype of uint8. + +* @par Third-party framework compatibility +* Compatible with tensorflow DecodeBmp operator. +*/ + +REG_OP(DecodeBmp) + .INPUT(contents, TensorType({DT_STRING})) + .OUTPUT(image, TensorType({DT_UINT8})) + .ATTR(channels, Int, 0) + .OP_END_FACTORY_REG(DecodeBmp) + +/** +*@brief Function parse image from string to int. \n + +*@par Inputs: +*@li contents: A Tensor of type string. 0-D. The JPEG-encoded image. \n +*@li crop_window: 1-D. The crop window: [crop_y, crop_x, crop_height, crop_width]. \n + +*@par Attributes: +*@li channels: An optional int. Defaults to 0. Number of color channels for the +*decoded image. +*@li ratio: An optional int. Defaults to 1. Downscaling ratio. +*@li fancy_upscaling: An optional bool. Defaults to True. If true use a slower +*but nicer upscaling of the chroma planes +*@li try_recover_truncated: An optional bool. Defaults to False. If true try to +*recover an image from truncated input. +*@li acceptable_fraction: An optional float. Defaults to 1. The minimum required +fraction of lines before a truncated input is accepted. +*@li dct_method: An optional string. Defaults to "". string specifying a hint +*about the algorithm used for decompression. \n + +*@par Outputs: +*image: A Tensor dtype of uint8. +*/ +REG_OP(DecodeAndCropJpeg) + .INPUT(contents, TensorType({DT_STRING})) + .INPUT(crop_window, TensorType({DT_INT32})) + .OUTPUT(image, TensorType({DT_UINT8})) + .ATTR(channels, Int, 0) + .ATTR(ratio, Int, 1) + .ATTR(fancy_upscaling, Bool, true) + .ATTR(try_recover_truncated, Bool, false) + .ATTR(acceptable_fraction, Float, 1.0) + .ATTR(dct_method, String, "") + .OP_END_FACTORY_REG(DecodeAndCropJpeg) + /** *@brief Resizes "images" to "size" using bilinear interpolation . \n @@ -1316,6 +1483,55 @@ REG_OP(CombinedNonMaxSuppression) .ATTR(clip_boxes, Bool, true) .OP_END_FACTORY_REG(CombinedNonMaxSuppression) +/** +*@brief Resizes "images" with "offset" using bilinear interpolation. \n + +*@par Inputs: +*@li img: input image, A 4-D tensor of shape `[n, h, w, c]`. +*@li warp_offset: the resize offset A 4-D float tensor of shape `[n, h, w, 2]`, 2 means (x, y) for offset point. + +*@par Outputs: +*warp_img: A Tensor after resize. \n +*/ +REG_OP(IMGWarp) + .INPUT(img, TensorType({DT_UINT8, DT_FLOAT16, DT_FLOAT32})) + .INPUT(warp_offset, TensorType({DT_FLOAT32})) + .OUTPUT(warp_img, TensorType({DT_UINT8, DT_FLOAT16, DT_FLOAT32})) + .OP_END_FACTORY_REG(IMGWarp) + +/** +*@brief Resizes "images" with "offset" using bilinear interpolation. \n + +*@par Inputs: +*@li img: input image, A 4-D tensor of shape `[n, h, w, c]`. +*@li map_offset: the resize offset A 4-D float tensor of shape `[n, h, w, 2]`, 2 means (x, y) for resize point. + +*@par Outputs: +*map_img: A Tensor after resize. \n +*/ +REG_OP(Remap) + .INPUT(img, TensorType({DT_UINT8, DT_FLOAT16, DT_FLOAT32})) + .INPUT(map_offset, TensorType({DT_FLOAT32})) + .OUTPUT(map_img, TensorType({DT_UINT8, DT_FLOAT16, DT_FLOAT32})) + .OP_END_FACTORY_REG(Remap) + +/** +*@brief Resizes "images" with "offset" using bilinear interpolation. \n + +*@par Inputs: +*@li img: input image, A 5-D tensor of shape `[n, 4, c, h, w]`, +and 4 mean input[(h_top, w_left), (h_top, w_right), (h_bottom, w_left), (h_bottom, w_right)]. +*@li warp_index: the resize offset A 4-D float tensor of shape `[n, 2, h, w]`, 2 means (x, y) for resize point. + +*@par Outputs: +*remap_img: A Tensor after ResizeBilinear, A 4-D tensor of shape `[n, c, h, w]`. \n +*/ +REG_OP(IMGWarpResize) + .INPUT(img, TensorType({DT_FLOAT32})) + .INPUT(warp_index, TensorType({DT_FLOAT32})) + .OUTPUT(warp_img, TensorType({DT_FLOAT32})) + .OP_END_FACTORY_REG(IMGWarpResize) + /** *@brief Function spatial transformer . \n @@ -1342,6 +1558,383 @@ REG_OP(SpatialTransformerD) .ATTR(use_default_theta, ListBool, {}) .OP_END_FACTORY_REG(SpatialTransformerD) -} // namespace ge +/** +* @brief Resize the input tensor. \n +currently, only support resize image tensor using nearest neighbor and linear interpolation. + +* @par Inputs: +* Input x must be a 4-D tensor. Inputs include: \n +* @li x: A Tensor. Must be one of the following types: uint8, int8, int16, \n +int32, int64, float16, float, double. 4-D with shape [batch, height, width, channels] \n +or shape [batch, channels, height, width]. +* @li roi: A 1-D float Tensor. only takes effect when attr coordinate_transformation_mode \n +is "tf_crop_and_resize" +* @li scales: A 1-D float Tensor, the scale array along each dimension, Only one of \n +'scales' and 'sizes' can be specified. +* @li sizes: A 1-D int64 Tensor, The size of the output tensor. nly one of \n +'scales' and 'sizes' can be specified. If 'size' is specified, then set scales \n +to empty data (zero shape) in this operator's input list. + +* @par Attributes: +* @li coordinate_transformation_mode: String. Defaults to half_pixel. how to transform \n +the coordinate in the resized tensor to the coordinate in the original tensor. \n +other optional: pytorch_half_pixel, align_corners, asymmetric, tf_half_pixel_for_nn, \n +tf_crop_and_resize. +* @li cubic_coeff_a: Float. Defaults to -0.75, only used in cubic interpolation. \n +other optional: -0.5 +* @li exclude_outside: Int. Defaults to 0, If set to 1, the weight of sampling \n +locations outside the tensor will be set to 0 and the weight will be renormalized \n +so that their sum is 1.0. +* @li extrapolation_value: Float. Defaults to 0.0f. When coordinate_transformation_mode \n +is "tf_crop_and_resize" and x_original is outside the range [0, length_original - 1], \n +this value is used as the corresponding output value. +* @li mode: String. Defaults to nearest. Three interpolation modes: nearest (default), \n +linear and cubic. +* @li nearest_mode: String. Defaults to round_prefer_floor. Four modes: round_prefer_floor, \n +round_prefer_ceil, floor, ceil. Only used by nearest interpolation. + +* @par Outputs: +* y: A Tensor. Has the same type as x. + +* @attention Constraints: \n +* Input x must be a 4-D tensor. + +* @par Third-party framework compatibility +* Compatible with tensorflow ResizeNearestNeighborV2 operator. +*/ + +REG_OP(Resize) + .INPUT(x, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, + DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .INPUT(roi, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .INPUT(scales, TensorType({DT_FLOAT})) + .OPTIONAL_INPUT(sizes, TensorType({DT_INT64})) + .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, + DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .ATTR(coordinate_transformation_mode, String, "half_pixel") + .ATTR(cubic_coeff_a, Float, -0.75) + .ATTR(exclude_outside, Int, 0) + .ATTR(extrapolation_value, Float, 0) + .ATTR(mode, String, "nearest") + .ATTR(nearest_mode, String, "round_prefer_floor") + .OP_END_FACTORY_REG(Resize) + +/** +*@brief Function parse image from string to int. \n + +*@par Inputs: +*@li contents: A Tensor of type string. 0-D. The JPEG-encoded image. \n + +*@par Attributes: +*@li channels: An optional int. Defaults to 0. Number of color channels for the decoded image. +*@li ratio: An optional int. Defaults to 1. Downscaling ratio. +*@li fancy_upscaling: An optional bool. Defaults to True. If true use a slower but nicer upscaling of the chroma planes +*@li try_recover_truncated: An optional bool. Defaults to False. If true try to recover an image from truncated input. +*@li acceptable_fraction: An optional float. Defaults to 1. The minimum required fraction of lines before a truncated input is accepted. +*@li dct_method: An optional string. Defaults to "". string specifying a hint about the algorithm used for decompression. \n + +*@par Outputs: +*image: A Tensor dtype of uint8. +*/ +REG_OP(DecodeJpeg) + .INPUT(contents, TensorType({DT_STRING})) + .OUTPUT(image, TensorType({DT_UINT8})) + .ATTR(channels, Int, 0) + .ATTR(ratio, Int, 1) + .ATTR(fancy_upscaling, Bool, true) + .ATTR(try_recover_truncated, Bool, false) + .ATTR(acceptable_fraction, Float, 1.0) + .ATTR(dct_method, String, "") + .OP_END_FACTORY_REG(DecodeJpeg) + +/** +*@brief Image warping using per-pixel flow vectors. \n + +*@par Inputs: +*@li image: 4-D Tensor with shape `[batch, height, width, channels]`. +*@li flow: 4-D Tensor with shape `[batch, height, width, 2]`. \n + +*@par Outputs: +*y: Returns 4-D with the same shape and dtype as `image`. \n +*/ +REG_OP(DenseImageWarp) + .INPUT(image, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(flow, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) + .OP_END_FACTORY_REG(DenseImageWarp) + +/** +*@brief Calculate the resize_d function. \n + +*@par Inputs: +*One inputs, including: +* @li x: A tensor. Must be one of the following types: +* float16, float32. \n + +*@par Attributes: +*@li sizes: An optional listInt. \n +*@li scales: An optional listFloat. + Defaults to none. \n +*@li roi: An optional listInt. + Defaults to none. \n +*@li coordinate_transformation_mode: An optional String. + Defaults to "half_pixel". \n +*@li cubic_coeff_a: An optional float. + Defaults to -0.75. \n +*@li exclude_outside: An optional int. + Defaults to 0. \n +*@li extrapolation_value: An optional float. + Defaults to 0.0. \n +*@li mode: An optional String. + Defaults to "nearest". \n +*@li nearest_mode: An optional String. + Defaults to "round_prefer_floor". \n + +*@par Outputs: +*y: A Tensor with the same type of x's, + shape depends on x and sizes. \n +*/ +REG_OP(ResizeD) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .REQUIRED_ATTR(sizes, ListInt) + .ATTR(scales, ListFloat, {}) + .ATTR(roi, ListInt, {}) + .ATTR(coordinate_transformation_mode, String, "half_pixel") + .ATTR(cubic_coeff_a, Float, -0.75) + .ATTR(exclude_outside, Int, 0) + .ATTR(extrapolation_value, Float, 0.0) + .ATTR(mode, String, "nearest") + .ATTR(nearest_mode, String, "round_prefer_floor") + .OP_END_FACTORY_REG(ResizeD) + +/** +*@brief Calculate the resize_grad_d function. \n + +*@par Inputs: +*One inputs, including: +* @li grads: A tensor. Must be one of the following types: +* float16, float32. \n + +*@par Attributes: +*@li original_size: An optional listInt. \n +*@li roi: An optional listInt. + Defaults to none. \n +*@li scales: An optional listFloat. + Defaults to none. \n +*@li coordinate_transformation_mode: An optional String. + Defaults to "half_pixel". \n +*@li cubic_coeff_a: An optional float. + Defaults to -0.75. \n +*@li exclude_outside: An optional int. + Defaults to 0. \n +*@li extrapolation_value: An optional float. + Defaults to 0.0. \n +*@li mode: An optional String. + Defaults to "nearest". \n +*@li nearest_mode: An optional String. + Defaults to "round_prefer_floor". \n + +*@par Outputs: +*y: A Tensor with the same type of x's, + shape depends on x and sizes. \n +*/ +REG_OP(ResizeGradD) + .INPUT(grads, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .REQUIRED_ATTR(original_size, ListInt) + .ATTR(roi, ListInt, {}) + .ATTR(scales, ListFloat, {}) + .ATTR(coordinate_transformation_mode, String, "half_pixel") + .ATTR(cubic_coeff_a, Float, -0.75) + .ATTR(exclude_outside, Int, 0) + .ATTR(extrapolation_value, Float, 0.0) + .ATTR(mode, String, "nearest") + .ATTR(nearest_mode, String, "round_prefer_floor") + .OP_END_FACTORY_REG(ResizeGradD) + +/** +*@brief Computes the gradients of DenseImageWarp with respect to image and flow. \n + +*@par Inputs: +*@li grad: gradients with respect to DenseImageWarp output. +*@li image: 4-D Tensor with shape `[batch, height, width, channels]`. +*@li flow: 4-D Tensor with shape `[batch, height, width, 2]`. \n + +*@par Outputs: +*grad_image: Returns 4-D with the same shape and dtype as `image`. +*grad_flow: Returns 4-D with the same shape and dtype as `flow`. \n +*/ +REG_OP(DenseImageWarpGrad) + .INPUT(grad, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(image, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(flow, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(grad_image, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(grad_flow, TensorType({DT_FLOAT, DT_FLOAT16})) + .OP_END_FACTORY_REG(DenseImageWarpGrad) + +/** +*@brief This operation samples input X by using interpolation based on flow field grid, + which is usually gennerated by affine_grid. The grid of shape [N, H, W, 2] is the concatenation of + (x, y) coordinates with shape [N, H, W] each, where x is indexing the 4th dimension (in width dimension) of + input data x and y is indexng the 3rd dimention (in height dimension), finally results is + the interpolation value of 4 nearest corner points. The output tensor shape will be [N, C, H, W]. + +*@par Inputs: +*@li x: 4-D Tensor with shape `[batch, channels, height, width]`. +*@li grid: flow field grid, 4-D Tensor with shape `[batch, height, width, 2]`. + +*@par Attributes: +*@li interpolation_mode: An optional string specifying the interpolation method. Only 'bilinear' is + supported for now . +*@li padding_mode: An optional string specifying the pad method. Only 'zeros' is supported for now . +*@li align_corners: An optional bool. If "true", the centers of the corner + pixels of the input and output tensors are aligned. Defaults to "false" . + +*@par Outputs: +*y: Returns 4-D Tensor with the same dtype as `X`. + +*@par Third-party framework compatibility +*Compatible with pytorch GridSampler2D operator. + +*@par Restrictions: +*Warning:THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(GridSampler2D) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(grid, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(interpolation_mode, String, "bilinear") + .ATTR(padding_mode, String, "zeros") + .ATTR(align_corners, Bool, false) + .OP_END_FACTORY_REG(GridSampler2D) + +/** +*@brief This operation unnormalize input Grid, which is usually gennerated by affine_grid. + +*@par Inputs: +*@li grid: flow field grid, 4-D Tensor with shape `[batch, height, width, 2]`. +*@li assist: Assist matrix, a 4-D tensor of type float16. + +*@par Attributes: +*@li align_corners: An optional bool. If "true", the centers of the corner + pixels of the input and output tensors are aligned. Defaults to "false" . + +*@par Outputs: +*diff: Returns 4-D Tensor with the same shape and dtype as `grid`. +*position: Returns 4-D Tensor with the same shape as `grid`. +*/ +REG_OP(GridUnnormal) + .INPUT(grid, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(assist, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(diff, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(position, TensorType({DT_INT32})) + .ATTR(align_corners, Bool, false) + .OP_END_FACTORY_REG(GridUnnormal) + +/** +*@brief This operation unfold input X based on unnormalized grid, which is gennerated by GridUnnormal. + +*@par Inputs: +*@li x: 4-D Tensor with shape `[batch, channels, height, width]`. +*@li position: 4-D Tensor with shape `[batch, output_height, output_width, 2]`. + +*@par Attributes: +*@li padding_mode: An optional string specifying the pad method. Only 'zeros' is supported for now . + +*@par Outputs: +*y: Returns 4-D Tensor with the same dtype as `x`. +*/ +REG_OP(ImageUnfold) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(position, TensorType({DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(padding_mode, String, "zeros") + .OP_END_FACTORY_REG(ImageUnfold) + +/** +*@brief This operation select images to warp_images according to offsets. + +*@par Inputs: +*@li images: 4-D Tensor with shape `[batch, height, width, 3]`. +*@li offsets: 4-D Tensor with shape `[batch, 4, new_height, new_width]`. + +*@par Outputs: +*warp_images: Returns 5-D Tensor with shape +`[batch, 4, new_height, new_width, 3]` and the same dtype as `images`. +*/ +REG_OP(IMGWarpOffsets) + .INPUT(images, TensorType({DT_UINT8, DT_FLOAT16, DT_FLOAT})) + .INPUT(offsets, TensorType({DT_FLOAT, DT_INT32})) + .OUTPUT(warp_images, TensorType({DT_UINT8, DT_FLOAT16, DT_FLOAT})) + .OP_END_FACTORY_REG(IMGWarpOffsets) + +/** +*@brief This operation samples 3d input x by using interpolation based on flow field grid, + which is usually gennerated by affine_grid. + +*@par Inputs: +*@li x: 5-D Tensor with shape `[batch, channels, depth, height, width]`. +*@li grid: flow field grid, 5-D Tensor with shape `[batch, depth, height, width, 2]`. + +*@par Attributes: +*@li interpolation_mode: An optional string specifying the interpolation method. +*@li padding_mode: An optional string specifying the pad method. +*@li align_corners: An optional bool. If "true", the centers of the corner + pixels of the input and output tensors are aligned. Defaults to "false" . + +*@par Outputs: +*y: Returns 5-D Tensor with the same dtype as `x`. + +*@par Third-party framework compatibility +*Compatible with pytorch GridSampler3D operator. + +*@par Restrictions: +*Warning:THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(GridSampler3D) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .INPUT(grid, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .ATTR(interpolation_mode, String, "bilinear") + .ATTR(padding_mode, String, "zeros") + .ATTR(align_corners, Bool, false) + .OP_END_FACTORY_REG(GridSampler3D) +/** +*@brief Computes the gradients of GridSampler3D. + +*@par Inputs: +*@li grad: 5-D Tensor with shape `[batch, channels, depth, height, width]`. +*@li x: 5-D Tensor with shape `[batch, channels, depth, height, width]`. +*@li grid: flow field grid, 5-D Tensor with shape `[batch, depth, height, width, 2]`. + +*@par Attributes: +*@li interpolation_mode: An optional string specifying the interpolation method. +*@li padding_mode: An optional string specifying the pad method. +*@li align_corners: An optional bool. If "true", the centers of the corner + pixels of the input and output tensors are aligned. Defaults to "false" . + +*@par Outputs: +*dx: Returns 5-D Tensor with the same dtype and shape as `x`. +*dgrid: Returns 5-D Tensor with the same dtype and shape as `grid`. + +*@par Third-party framework compatibility +*Compatible with pytorch GridSampler3DGrad operator. + +*@par Restrictions: +*Warning:THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(GridSampler3DGrad) + .INPUT(grad, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .INPUT(grid, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OUTPUT(dx, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OUTPUT(dgrid, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .ATTR(interpolation_mode, String, "bilinear") + .ATTR(padding_mode, String, "zeros") + .ATTR(align_corners, Bool, false) + .OP_END_FACTORY_REG(GridSampler3DGrad) + +} // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_IMAGE_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/internal_ops.h b/third_party/fwkacllib/inc/ops/internal_ops.h index 9dde14a5..bcc3f1c3 100644 --- a/third_party/fwkacllib/inc/ops/internal_ops.h +++ b/third_party/fwkacllib/inc/ops/internal_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/linalg_ops.h b/third_party/fwkacllib/inc/ops/linalg_ops.h index 7a6fbc59..69c77bf6 100644 --- a/third_party/fwkacllib/inc/ops/linalg_ops.h +++ b/third_party/fwkacllib/inc/ops/linalg_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -61,8 +61,8 @@ REG_OP(CholeskyGrad) *@par Inputs: *The input x has to be symmetric and positive definite.Inputs include: -*x:A Tensor. Must be one of the following types: double, float32. Shape -is [..., M, M] . \n +*x:A Tensor. Must be one of the following types: double, float32, float16, +complex64, complex128. Shape is [..., M, M] . \n *@par Outputs: *y:A Tensor. Has the same type as x . \n @@ -76,10 +76,31 @@ form square matrices. */ REG_OP(Cholesky) - .INPUT(x, TensorType({DT_FLOAT, DT_DOUBLE})) - .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(x, TensorType({DT_FLOAT, DT_DOUBLE, \ + DT_FLOAT16, DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, \ + DT_FLOAT16, DT_COMPLEX64, DT_COMPLEX128})) .OP_END_FACTORY_REG(Cholesky) +/** +*@brief Computes the outer product of two 1D vectors . \n + +*@par Inputs: +*The input x1 and x2 has to be a 1D vector.Inputs include: +*@li x1:A Tensor. Must be one of the following types: float16, float32. +Shape is [N] . \n +*@li x2:A Tensor. Must have the same type as x. Shape is [M] . \n + +*@par Outputs: +*y:A Tensor. Has the same type as x . \n +*/ + +REG_OP(Ger) + .INPUT(x1, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(x2, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OP_END_FACTORY_REG(Ger) + /** *@brief Computes the sign and the log of the absolute value of the determinant of one or more square matrices . \n @@ -87,8 +108,8 @@ of one or more square matrices . \n *@par Inputs: *The input x is a tensor of shape [N, M, M] whose inner-most 2 dimensions form square matrices. Inputs include: -*x:A Tensor. Must be one of the following types: double, float32. Shape is -[..., M, M] . \n +*x:A Tensor. Must be one of the following types: double, float32, +complex64, complex128. Shape is [..., M, M] . \n *@par Outputs: *@li y:A Tensor. Has the same type as x. @@ -103,9 +124,9 @@ form square matrices. \n */ REG_OP(LogMatrixDeterminant) - .INPUT(x, TensorType({DT_FLOAT, DT_DOUBLE})) - .OUTPUT(sign, TensorType({DT_FLOAT, DT_DOUBLE})) - .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(x, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(sign, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) .OP_END_FACTORY_REG(LogMatrixDeterminant) /** @@ -114,8 +135,8 @@ REG_OP(LogMatrixDeterminant) *@par Inputs: *The input x is a tensor of shape [N, M, M] whose inner-most 2 dimensions form square matrices. Inputs include: -*x:A Tensor. Must be one of the following types: double, float32. Shape is -[..., M, M] . \n +*x:A Tensor. Must be one of the following types: double, float32, complex64, +complex128. Shape is [..., M, M] . \n *@par Outputs: *y:A Tensor. Has the same type as x . \n @@ -129,8 +150,8 @@ form square matrices. */ REG_OP(MatrixDeterminant) - .INPUT(x, TensorType({DT_FLOAT, DT_DOUBLE})) - .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(x, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) .OP_END_FACTORY_REG(MatrixDeterminant) /** @@ -140,8 +161,7 @@ their adjoints (conjugate transposes) . \n *@par Inputs: *The input x is a tensor of shape [..., M, M] whose inner-most 2 dimensions form square matrices. Inputs include: -*x:A Tensor. Must be one of the following types: double, float. Shape is -[..., M, M] . \n +*x:A Tensor of input. Shape is [..., M, M] . \n *@par Attributes: *adjoint:An optional bool. Defaults to False.Boolean indicating whether to @@ -159,8 +179,8 @@ form square matrices. \n */ REG_OP(MatrixInverse) - .INPUT(x, TensorType({DT_FLOAT, DT_DOUBLE})) - .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(x, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) .ATTR(adjoint, Bool, false) .OP_END_FACTORY_REG(MatrixInverse) @@ -169,8 +189,7 @@ REG_OP(MatrixInverse) *@par Inputs: *The input rhs must have the same type as matrix. Inputs include: -*@li matrix:A Tensor. Must be one of the following types: double, float. -Shape is [..., M, M]. +*@li matrix:A Tensor of input. Shape is [..., M, M]. *@li rhs:A Tensor. Must have the same type as matrix. Shape is [..., M, K] . \n *@par Attributes: @@ -189,9 +208,9 @@ dimensions form square matrices. \n */ REG_OP(MatrixSolve) - .INPUT(matrix, TensorType({DT_FLOAT, DT_DOUBLE})) - .INPUT(rhs, TensorType({DT_FLOAT, DT_DOUBLE})) - .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(matrix, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .INPUT(rhs, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) .ATTR(adjoint, Bool, false) .OP_END_FACTORY_REG(MatrixSolve) @@ -221,8 +240,8 @@ dimensions form square matrices. \n */ REG_OP(MatrixSolveLs) - .INPUT(matrix, TensorType({DT_FLOAT, DT_DOUBLE})) - .INPUT(rhs, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(matrix, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .INPUT(rhs, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) .INPUT(l2, TensorType({DT_DOUBLE})) .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE})) .ATTR(fast, Bool, true) @@ -234,8 +253,7 @@ matrices by backsubstitution . \n *@par Inputs: *The input rhs must have the same type as matrix. Inputs include: -*@li matrix: A Tensor. Must be one of the following types: double, float. -Shape is [..., M, M]. +*@li matrix: A Tensor. Shape is [..., M, M]. *@li rhs:A Tensor. Must have the same type as matrix. Shape is [..., M, K] . \n *@par Attributes: @@ -256,9 +274,9 @@ dimensions form square matrices. \n */ REG_OP(MatrixTriangularSolve) - .INPUT(matrix, TensorType({DT_FLOAT, DT_DOUBLE})) - .INPUT(rhs, TensorType({DT_FLOAT, DT_DOUBLE})) - .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(matrix, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .INPUT(rhs, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) .ATTR(lower, Bool, true) .ATTR(adjoint, Bool, false) .OP_END_FACTORY_REG(MatrixTriangularSolve) @@ -268,8 +286,7 @@ REG_OP(MatrixTriangularSolve) *@par Inputs: *The input shape of x must be [..., M, N]. Inputs include: -*x:A Tensor whose shape is [..., M, N]. Must be one of the following types: -double, float . \n +*x:A Tensor whose shape is [..., M, N]. \n *@par Attributes: *full_matrices: An optional bool. Defaults to False. If true, compute @@ -289,9 +306,12 @@ dimensions form matrices of size [M, N]. \n */ REG_OP(Qr) - .INPUT(x, TensorType({ DT_FLOAT16, DT_FLOAT, DT_DOUBLE })) - .OUTPUT(q, TensorType({ DT_FLOAT16, DT_FLOAT, DT_DOUBLE })) - .OUTPUT(r, TensorType({ DT_FLOAT16, DT_FLOAT, DT_DOUBLE })) + .INPUT(x, TensorType({ DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \ + DT_COMPLEX64, DT_COMPLEX128 })) + .OUTPUT(q, TensorType({ DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \ + DT_COMPLEX64, DT_COMPLEX128 })) + .OUTPUT(r, TensorType({ DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \ + DT_COMPLEX64, DT_COMPLEX128 })) .ATTR(full_matrices, Bool, false) .OP_END_FACTORY_REG(Qr) @@ -320,12 +340,40 @@ form square matrices. \n */ REG_OP(SelfAdjointEig) - .INPUT(x, TensorType({ DT_DOUBLE, DT_FLOAT })) - .OUTPUT(eigen_value, TensorType({ DT_DOUBLE, DT_FLOAT })) - .OUTPUT(eigen_vector, TensorType({ DT_DOUBLE, DT_FLOAT })) + .INPUT(x, TensorType({ DT_DOUBLE, DT_FLOAT, DT_COMPLEX64, DT_COMPLEX128 })) + .OUTPUT(eigen_value, TensorType({ DT_DOUBLE, DT_FLOAT, DT_COMPLEX64, DT_COMPLEX128 })) + .OUTPUT(eigen_vector, TensorType({ DT_DOUBLE, DT_FLOAT, DT_COMPLEX64, DT_COMPLEX128 })) .ATTR(compute_v, Bool, true) .OP_END_FACTORY_REG(SelfAdjointEig) +/** +*@brief Computes the sign and the log of the absolute value of the determinant +of one or more square matrices . \n + +*@par Inputs: +*The input x is a tensor of shape [N, M, M] whose inner-most 2 dimensions +form square matrices. Inputs include: +*x:A Tensor. Must be one of the following types: double, float32, float16 +Shape is [..., M, M] . \n + +*@par Outputs: +*@li y:A Tensor. Has the same type as x. +*@li sign:A Tensor. Has the same type as x . \n + +*@attention Constraints: +*The input x is a tensor of shape [N, M, M] whose inner-most 2 dimensions +form square matrices. \n + +*@par Third-party framework compatibility +*Compatible with tensorflow LogMatrixDeterminant operator. +*/ + +REG_OP(Slogdet) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OUTPUT(sign, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OP_END_FACTORY_REG(Slogdet) + /** *@brief Computes the singular value decompositions of one or more matrices . \n @@ -384,8 +432,8 @@ of the rows encoded as a list of indices in `0..M-1`. Shape is `[..., M]` . \n */ REG_OP(Lu) - .INPUT(input, TensorType({DT_FLOAT, DT_DOUBLE})) - .OUTPUT(lu, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(input, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(lu, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) .OUTPUT(p, TensorType({DT_INT32, DT_INT64})) .REQUIRED_ATTR(output_idx_type, Type) .OP_END_FACTORY_REG(Lu) @@ -404,8 +452,8 @@ y: Shape is `[..., M, M]` . \n */ REG_OP(MatrixSquareRoot) - .INPUT(input, TensorType({DT_FLOAT, DT_DOUBLE})) - .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(input, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) .OP_END_FACTORY_REG(MatrixSquareRoot) /** @@ -424,9 +472,9 @@ y: Tensor of shape `[..., M, K]` containing the solutions \n */ REG_OP(TridiagonalSolve) - .INPUT(diagonals, TensorType({DT_FLOAT, DT_DOUBLE})) - .INPUT(rhs, TensorType({DT_FLOAT, DT_DOUBLE})) - .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(diagonals, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .INPUT(rhs, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128})) .ATTR(partial_pivoting, Bool, true) .OP_END_FACTORY_REG(TridiagonalSolve) diff --git a/third_party/fwkacllib/inc/ops/list_ops.h b/third_party/fwkacllib/inc/ops/list_ops.h new file mode 100644 index 00000000..a1b622e9 --- /dev/null +++ b/third_party/fwkacllib/inc/ops/list_ops.h @@ -0,0 +1,504 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! + * \file list_ops.h + * \brief + */ +#ifndef OPS_BUILT_IN_OP_PROTO_INC_LIST_OPS_H_ +#define OPS_BUILT_IN_OP_PROTO_INC_LIST_OPS_H_ + +#include +#include "graph/operator_reg.h" +#include "graph/operator.h" + +namespace ge { + +/** +*@brief Creates and returns an empty tensor list. \n + +*@par Inputs: +*@li element_shape: A shape compatible with that of elements in the list. +*@li max_num_elements: The maximum number of elements. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li handle: An empty tensor list . \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow EmptyTensorList operator. +*/ +REG_OP(EmptyTensorList) + .INPUT(element_shape, TensorType({DT_INT32,DT_INT64})) + .INPUT(max_num_elements, TensorType({DT_INT32})) + .OUTPUT(handle, TensorType({DT_VARIANT})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(EmptyTensorList) + +/** +*@brief Returns a list which has the passed-in `Tensor` as last element +and the other elements of the given list in `input_handle`. \n + +*@par Inputs: +*@li input_handle: The old list. +*@li tensor: The tensor to put on the list. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li output_handle:A list with the elements of old list followed by tensor. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListPushBack operator. +*/ +REG_OP(TensorListPushBack) + .INPUT(input_handle, TensorType({DT_VARIANT})) + .INPUT(tensor, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL,DT_RESOURCE, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .OUTPUT(output_handle, TensorType({DT_VARIANT})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListPushBack) + +/** +*@brief The last element of the input list as well as a +list with all but that element. \n + +*@par Inputs: +*@li input_handle: The input list. +*@li element_shape: A shape compatible with that of elements in the list. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li output_handle:A list with the elements of the old list followed by tensor. +*@li tensor:The withdrawn last element of the list. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListPopBack operator. +*/ +REG_OP(TensorListPopBack) + .INPUT(input_handle, TensorType({DT_VARIANT})) + .INPUT(element_shape, TensorType({DT_INT32})) + .OUTPUT(output_handle, TensorType({DT_VARIANT})) + .OUTPUT(tensor, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL,DT_RESOURCE, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListPopBack) + +/** +*@brief The number of tensors in the input tensor list. \n + +*@par Inputs: +*@li input_handle: The input list. \n + +*@par Outputs: +*@li length:The number of tensors in the list. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListLength operator. +*/ +REG_OP(TensorListLength) + .INPUT(input_handle, TensorType({DT_VARIANT})) + .OUTPUT(length, TensorType({DT_INT32})) + .OP_END_FACTORY_REG(TensorListLength) + +/** +*@brief The shape of elements in the input tensor list. \n + +*@par Inputs: +*@li input_handle: The input list. \n + +*@par Attributes: +*@li shape_type: The type of shape in the list. \n + +*@par Outputs: +*@li element_shape:A shape compatible with that of elements in the list. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListElementShape operator. +*/ +REG_OP(TensorListElementShape) + .INPUT(input_handle, TensorType({DT_VARIANT})) + .OUTPUT(element_shape, TensorType({DT_INT32,DT_INT64})) + .ATTR(shape_type, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListElementShape) + +/** +*@brief List of the given size with empty elements. \n + +*@par Inputs: +*@li element_shape: A shape compatible with that of elements in the list. +*@li num_elements: The number of elements to reserve. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. +*@li shape_type: The type of shape in the list. \n + +*@par Outputs: +*@li handle: An output tensor list . \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListReserve operator. +*/ +REG_OP(TensorListReserve) + .INPUT(element_shape, TensorType({DT_INT32,DT_INT64})) + .INPUT(num_elements, TensorType({DT_INT32})) + .OUTPUT(handle, TensorType({DT_VARIANT})) + .ATTR(element_dtype, Type, DT_INT32) + .ATTR(shape_type, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListReserve) + +/** +*@brief Get input tensor list elements of index position. \n + +*@par Inputs: +*@li input_handle: The input list. +*@li index: A tensor of position. +*@li element_shape: A shape compatible with that of elements in the list. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li item: An output tensor value of index position . \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListGetItem operator. +*/ +REG_OP(TensorListGetItem) + .INPUT(input_handle, TensorType({DT_VARIANT})) + .INPUT(index, TensorType({DT_INT32})) + .INPUT(element_shape, TensorType({DT_INT32})) + .OUTPUT(item, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListGetItem) + +/** +*@brief Sets the index-th position of the list to contain the given tensor. \n + +*@par Inputs: +*@li input_handle: The input list. +*@li index: The position in the list to which the tensor will be assigned. +*@li item: The element to be assigned to that position. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li output_handle: An output tensor list . \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListSetItem operator. +*/ +REG_OP(TensorListSetItem) + .INPUT(input_handle, TensorType({DT_VARIANT})) + .INPUT(index, TensorType({DT_INT32})) + .INPUT(item, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL,DT_RESOURCE, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .OUTPUT(output_handle, TensorType({DT_VARIANT})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListSetItem) + +/** +*@brief Push tensor to list. \n + +*@par Inputs: +*@li input_handles: The input tensor lists. +*@li tensor: The tensor push into tensor list. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li output_handles: The output tensor lists. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListPushBackBatch operator. +*/ +REG_OP(TensorListPushBackBatch) + .INPUT(input_handles, TensorType({DT_VARIANT})) + .INPUT(tensor, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .OUTPUT(output_handles, TensorType({DT_VARIANT})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListPushBackBatch) + +/** +*@brief Stacks all tensors in the list. \n + +*@par Inputs: +*@li input_handle: The input tensor list. +*@li element_shape: A shape compatible with that of elements in the tensor. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. +*@li num_elements: The number of elements in the list. \n + +*@par Outputs: +*@li tensor: The tensor of list. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListStack operator. +*/ +REG_OP(TensorListStack) + .INPUT(input_handle, TensorType({DT_VARIANT})) + .INPUT(element_shape, TensorType({DT_INT32})) + .OUTPUT(tensor, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .ATTR(element_dtype, Type, DT_INT32) + .ATTR(num_elements, Int, -1) + .OP_END_FACTORY_REG(TensorListStack) + +/** +*@brief Concats all tensors in the list along the 0th dimension. +Requires that all tensors have the same shape except the first dimension. \n + +*@par Inputs: +*@li input_handle: The input list. +*@li element_shape: The shape of the uninitialized elements in the list. +If the first dimension is not -1, it is assumed that all list elements have +the same leading dim. +*@li leading_dims: The list of leading dims of uninitialized list elements. Used if +the leading dim of input_handle.element_shape or the element_shape input arg +is not already set. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li tensor: The concated result. +*@li lengths: Output tensor containing sizes of the 0th dimension of tensors +in the list, used for computing the gradient. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListConcatV2 operator. +*/ +REG_OP(TensorListConcatV2) + .INPUT(input_handle, TensorType({DT_VARIANT})) + .INPUT(element_shape, TensorType({DT_INT32,DT_INT64})) + .INPUT(leading_dims, TensorType({DT_INT64})) + .OUTPUT(tensor, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .OUTPUT(lengths, TensorType({DT_INT64})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListConcatV2) + +/** +*@brief Splits a tensor into a list. \n + +*@par Inputs: +*@li tensor: The input tensor. +*@li element_shape: A shape compatible with that of elements in the tensor. +*@li lengths: Vector of sizes of the 0th dimension of tensors in the list. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li output_handle: The list. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListSplit operator. +*/ +REG_OP(TensorListSplit) + .INPUT(tensor, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .INPUT(element_shape, TensorType({DT_INT32,DT_INT64})) + .INPUT(lengths, TensorType({DT_INT64})) + .OUTPUT(output_handle, TensorType({DT_VARIANT})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListSplit) + +/** +*@brief Creates a TensorList which, when stacked, has the value of `tensor`. \n + +*@par Inputs: +*@li tensor: The input tensor. +*@li element_shape: The shape of elements in the list. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li output_handle: An output tensor list . \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListFromTensor operator. +*/ +REG_OP(TensorListFromTensor) + .INPUT(tensor, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .INPUT(element_shape, TensorType({DT_INT32,DT_INT64})) + .OUTPUT(output_handle, TensorType({DT_VARIANT})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListFromTensor) + +/** +*@brief Resizes the list. \n + +*@par Inputs: +*@li input_handle: The input tensor list. +*@li size: size of the output list. \n + +*@par Outputs: +*@li output_handle: The output tensor list. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListResize operator. +*/ +REG_OP(TensorListResize) + .INPUT(input_handle, TensorType({DT_VARIANT})) + .INPUT(size, TensorType({DT_INT32})) + .OUTPUT(output_handle, TensorType({DT_VARIANT})) + .OP_END_FACTORY_REG(TensorListResize) + +/** +*@brief Creates a Tensor by indexing into the TensorList. \n + +*@par Inputs: +*@li input_handle: The input tensor list. +*@li indices: The indices used to index into the list. +*@li element_shape: The shape of elements in the list. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li values: The tensor. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListGather operator. +*/ +REG_OP(TensorListGather) + .INPUT(input_handle, TensorType({DT_VARIANT})) + .INPUT(indices, TensorType({DT_INT32})) + .INPUT(element_shape, TensorType({DT_INT32})) + .OUTPUT(values, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListGather) + +/** +*@brief Creates a TensorList by indexing into a Tensor. \n + +*@par Inputs: +*@li tensor: The input tensor. +*@li indices: The indices used to index into the list. +*@li element_shape: The shape of the elements in the list (can be less specified than +the shape of the tensor). +*@li num_elements: The size of the output list. Must be large enough to accommodate +the largest index in indices. If -1, the list is just large enough to include +the largest index in indices. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li output_handle: The TensorList. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListScatterV2 operator. +*/ +REG_OP(TensorListScatterV2) + .INPUT(tensor, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .INPUT(indices, TensorType({DT_INT32})) + .INPUT(element_shape, TensorType({DT_INT32,DT_INT64})) + .INPUT(num_elements, TensorType({DT_INT32})) + .OUTPUT(output_handle, TensorType({DT_VARIANT})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListScatterV2) + +/** +*@brief Scatters tensor at indices in an input list. \n + +*@par Inputs: +*@li input_handle: The input tensor list. +*@li tensor: The input tensor. +*@li indices: The indices used to index into the list. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li output_handle: The TensorList. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListScatterIntoExistingList operator. +*/ +REG_OP(TensorListScatterIntoExistingList) + .INPUT(input_handle, TensorType({DT_VARIANT})) + .INPUT(tensor, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,DT_INT8, + DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_QINT8,DT_QUINT8, + DT_QINT16,DT_QUINT16,DT_QINT32,DT_BOOL, + DT_STRING,DT_COMPLEX64,DT_COMPLEX128})) + .INPUT(indices, TensorType({DT_INT32})) + .OUTPUT(output_handle, TensorType({DT_VARIANT})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListScatterIntoExistingList) + +/** +*@brief Concat two tensor lists to a new tensor list. \n + +*@par Inputs: +*@li input_a: The input tensor list A. +*@li input_b: The input tensor list B. \n + +*@par Attributes: +*@li element_dtype: The type of elements in the list. \n + +*@par Outputs: +*@li output: The output list. \n + +*@par Third-party framework compatibility. +*Compatible with tensorflow TensorListConcatLists operator. +*/ +REG_OP(TensorListConcatLists) + .INPUT(input_a, TensorType({DT_VARIANT})) + .INPUT(input_b, TensorType({DT_VARIANT})) + .OUTPUT(output, TensorType({DT_VARIANT})) + .ATTR(element_dtype, Type, DT_INT32) + .OP_END_FACTORY_REG(TensorListConcatLists) +} // namespace ge + +#endif // OPS_BUILT_IN_OP_PROTO_INC_LIST_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/logging_ops.h b/third_party/fwkacllib/inc/ops/logging_ops.h index bc8ae2b8..03be7757 100644 --- a/third_party/fwkacllib/inc/ops/logging_ops.h +++ b/third_party/fwkacllib/inc/ops/logging_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/lookup_ops.h b/third_party/fwkacllib/inc/ops/lookup_ops.h index b37ab048..5d928e5a 100644 --- a/third_party/fwkacllib/inc/ops/lookup_ops.h +++ b/third_party/fwkacllib/inc/ops/lookup_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/math_ops.h b/third_party/fwkacllib/inc/ops/math_ops.h index 149e0e37..319bcf70 100644 --- a/third_party/fwkacllib/inc/ops/math_ops.h +++ b/third_party/fwkacllib/inc/ops/math_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -222,6 +222,24 @@ REG_OP(Bucketize) .REQUIRED_ATTR(boundaries, ListFloat) .OP_END_FACTORY_REG(Bucketize) +/** +*@brief Returns a new tensor with the truncated integer values of the elements of input. \n + +*@par Inputs: +*One inputs, including: +* @li input_x: A tensor. Must be one of the following types: float16, float32, int8, uint8, int32. \n + +*@par Outputs: +*y: A tensor with the same type and shape of input_x \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator Trunc. \n +*/ +REG_OP(Trunc) + .INPUT(input_x, TensorType({DT_FLOAT16,DT_FLOAT, DT_INT8, DT_INT32, DT_UINT8})) + .OUTPUT(output_y, TensorType({DT_FLOAT16,DT_FLOAT, DT_INT8, DT_INT32, DT_UINT8})) + .OP_END_FACTORY_REG(Trunc) + /** *@brief Computes the sum along sparse segments of a tensor . \n @@ -365,6 +383,27 @@ REG_OP(GetNext) .ATTR(channel_name, String, "") .OP_END_FACTORY_REG(GetNext) +/** +*@brief Get dynamic dims after GetNext. \n + +*@par Inputs: +*input: A nested structure of Tensor objects, from GetNext's output. \n + +*@par Attributes: +*@li shape_info: GE shape_info for each inputs, -1 means unknow dim. +*@li N: Inputs number. \n + +*@par Outputs: +*dims: GE unknow dims, a vector of int64. \n +*/ + +REG_OP(GetDynamicDims) + .DYNAMIC_INPUT(input, TensorType({DT_INT32, DT_INT64})) + .OUTPUT(dims, TensorType({DT_INT32, DT_INT64})) + .REQUIRED_ATTR(shape_info, ListInt) + .REQUIRED_ATTR(N, Int) + .OP_END_FACTORY_REG(GetDynamicDims) + /** *@brief End of sequence . \n @@ -494,6 +533,29 @@ REG_OP(NextAfter) .OUTPUT(output, TensorType({DT_FLOAT, DT_DOUBLE})) .OP_END_FACTORY_REG(NextAfter) +/** +*@brief Calculate the P-norm distance between vectors function. \n + +*@par Inputs: +*One inputs, including: +* @li input_x: A tensor. Must be one of the following types: +* float16, float32. \n + +*@par Attributes: +*@li p: An optional float.Defaults to 2. \n + +*@par Outputs: +*y: A Tensor with the same type and shape of input_x's. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator Pdist. \n +*/ +REG_OP(Pdist) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(p, Float, 2.0) + .OP_END_FACTORY_REG(Pdist) + /** *@brief Compute element-wise finiteness, return a boolean tensor. @@ -624,6 +686,7 @@ REG_OP(NLLLoss) .OUTPUT(y, TensorType({DT_FLOAT})) .OUTPUT(total_weight, TensorType({DT_FLOAT})) .ATTR(reduction, String, "mean") + .ATTR(ignore_index, Int, -100) .OP_END_FACTORY_REG(NLLLoss) /** @@ -653,6 +716,7 @@ REG_OP(NLLLossGrad) .INPUT(total_weight, TensorType({DT_FLOAT})) .OUTPUT(x_grad, TensorType({DT_FLOAT})) .ATTR(reduction, String, "mean") + .ATTR(ignore_index, Int, -100) .OP_END_FACTORY_REG(NLLLossGrad) /** @@ -710,6 +774,9 @@ REG_OP(IFMR) *@par Third-party framework compatibility *Compatible with mindspore + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(WtsARQ) @@ -741,6 +808,9 @@ REG_OP(WtsARQ) *@par Third-party framework compatibility *Compatible with mindspore + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(ActsULQ) @@ -748,8 +818,8 @@ REG_OP(ActsULQ) .INPUT(clamp_min, TensorType({DT_FLOAT16, DT_FLOAT})) .INPUT(clamp_max, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) - .OUTPUT(clamp_min_mask, TensorType({DT_BOOL})) - .OUTPUT(clamp_max_mask, TensorType({DT_BOOL})) + .OUTPUT(clamp_min_mask, TensorType({DT_BOOL, DT_FLOAT16, DT_FLOAT})) + .OUTPUT(clamp_max_mask, TensorType({DT_BOOL, DT_FLOAT16, DT_FLOAT})) .OUTPUT(x_clamped_loss, TensorType({DT_FLOAT16, DT_FLOAT})) .ATTR(fixed_min, Bool, false) .ATTR(num_bits, Int, 8) @@ -768,12 +838,15 @@ REG_OP(ActsULQ) *@par Third-party framework compatibility *Compatible with mindspore + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(ActsULQInputGrad) .INPUT(y_grad, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(clamp_min_mask, TensorType({DT_BOOL})) - .INPUT(clamp_max_mask, TensorType({DT_BOOL})) + .INPUT(clamp_min_mask, TensorType({DT_BOOL, DT_FLOAT16, DT_FLOAT})) + .INPUT(clamp_max_mask, TensorType({DT_BOOL, DT_FLOAT16, DT_FLOAT})) .OUTPUT(x_grad, TensorType({DT_FLOAT16, DT_FLOAT})) .OP_END_FACTORY_REG(ActsULQInputGrad) @@ -790,11 +863,14 @@ REG_OP(ActsULQInputGrad) *@par Third-party framework compatibility *Compatible with mindspore + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(ActULQClampMaxGrad) .INPUT(y_grad, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(clamp_max_mask, TensorType({DT_BOOL})) + .INPUT(clamp_max_mask, TensorType({DT_BOOL, DT_FLOAT16, DT_FLOAT})) .INPUT(x_clamped_loss, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(clamp_max_grad, TensorType({DT_FLOAT16, DT_FLOAT})) .OP_END_FACTORY_REG(ActULQClampMaxGrad) @@ -812,15 +888,208 @@ REG_OP(ActULQClampMaxGrad) *@par Third-party framework compatibility *Compatible with mindspore + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(ActULQClampMinGrad) .INPUT(y_grad, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(clamp_min_mask, TensorType({DT_BOOL})) + .INPUT(clamp_min_mask, TensorType({DT_BOOL, DT_FLOAT16, DT_FLOAT})) .INPUT(x_clamped_loss, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(clamp_min_grad, TensorType({DT_FLOAT16, DT_FLOAT})) .OP_END_FACTORY_REG(ActULQClampMinGrad) +/** +* @brief Computes Lp norm. + +* @par Inputs: +* @li x: An ND tensor of type float16, float32. \n +* +* @par Attributes: +* @li p: Int, "inf" or "-inf", default value is 2. +* @li axes: ListInt, {} means all axes will be computed. +* @li keepdim: Bool, default is false. +* @li epsilon: Float, default is 1e-12. \n + +* @par Outputs: +* @li y: An ND tensor of type float16, float32. The shape of y is depending +* on axes and keepdim. \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator LpNorm. +*/ +REG_OP(LpNorm) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(p, Int, 2) + .ATTR(axes, ListInt, {}) + .ATTR(keepdim, Bool, false) + .ATTR(epsilon, Float, 1e-12) + .OP_END_FACTORY_REG(LpNorm) + +/** +* @brief get complex. + +* @par Inputs: +* @li real: An ND tensor of type float32. double +* @li imag: An ND tensor of type float32. double \n +* +* @par Outputs: +* @li out: An ND tensor of type complex64, complex128 \n +*/ +REG_OP(Complex) + .INPUT(real, TensorType({DT_FLOAT, DT_DOUBLE})) + .INPUT(imag, TensorType({DT_FLOAT, DT_DOUBLE})) + .OUTPUT(out, TensorType({DT_COMPLEX64, DT_COMPLEX128})) + .ATTR(Tout, Type, DT_COMPLEX64) + .OP_END_FACTORY_REG(Complex) + +/** +* @brief deal complex. + +* @par Inputs: +* @li input: An ND tensor of type complex64, complex128 \n +* +* @par Outputs: +* @li output: An ND tensor of type float32. double \n +*/ +REG_OP(Imag) + .INPUT(input, TensorType({DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(output, TensorType({DT_FLOAT, DT_DOUBLE})) + .ATTR(Tout, Type, DT_FLOAT) + .OP_END_FACTORY_REG(Imag) + +/** +* @brief deal complex. + +* @par Inputs: +* @li input: An ND tensor of type complex64, complex128 \n +* +* @par Outputs: +* @li output: An ND tensor of type float32. double \n +*/ +REG_OP(Angle) + .INPUT(input, TensorType({DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(output, TensorType({DT_FLOAT, DT_DOUBLE})) + .ATTR(Tout, Type, DT_FLOAT) + .OP_END_FACTORY_REG(Angle) + +/** +*@brief Computes the gradient of SoftMarginLossGrad. \n + +*@par Inputs: +*Three inputs, including: +* @li predict: A tensor. Must be one of the following types: +* float16, float32. \n +* @li label: A tensor with same shape of predict. Must be one of the following types: +* float16, float32. \n +* @li dout: A tensor with same shpae of predcit. Must be one of the following types: +* float16, float32. \n + +*@par Attributes: +* @li reduction: Specifies the reduction to apply to the output: +* 'none' | 'mean' | 'sum'. Default: 'mean'. \n + +*@par Outputs: +* gradient: A Tensor with the same type of predict. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator SoftMarginLoss Backward. \n +*/ +REG_OP(SoftMarginLossGrad) + .INPUT(predict, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(label, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(dout, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(gradient, TensorType({DT_FLOAT16,DT_FLOAT})) + .ATTR(reduction, String, "mean") + .OP_END_FACTORY_REG(SoftMarginLossGrad) + +/** +*@brief Calculate the cross product of two tensors. \n + +*@par Inputs: +*One inputs, including: +* @li x1: A tensor. Must be one of the following types: +* float16, float32, int32, int8, uint8, int16. \n +* @li x2: A tensor. Must be one of the following types: +* float16, float32, int32, int8, uint8, int16. \n + +*@par Attributes: +*@li dim: the dimination of compute.Defaults to -65530. \n + +*@par Outputs: +*y: A Tensor with the same type and shape of x1's. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator cross. \n +*/ +REG_OP(Cross) + .INPUT(x1, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8, DT_INT16})) + .INPUT(x2, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8, DT_INT16})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8, DT_INT16})) + .ATTR(dim, Int, -65530) + .OP_END_FACTORY_REG(Cross) + +/** + *@brief Computes batched the p-norm distance between each pair of + *the two collections of row vectors. \n + + *@par Inputs: + *Two inputs, including: + * @li x1: A tensor with shpae: BxPXM. Must be one of the following types: + * float16, float32. \n + * @li x2: A tensor with shpae: BxRxM. Must be one of the following types: + * float16, float32. \n + + *@par Attributes: + * @li p: An optional float >= 0 or inf. Defaults to 2.0. \n + + *@par Outputs: + * y: A Tensor with the same type of x1's and with shape BxPxR. \n + + *@par Third-party framework compatibility + *Compatible with the Pytorch operator Cdist. \n + */ +REG_OP(Cdist) + .INPUT(x1, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(x2, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(p, Float, 2.0) + .OP_END_FACTORY_REG(Cdist) + +/** +*@brief Computes the grad of x1 in cdist. \n + +*@par Inputs: +*Four inputs, including: + * @li grad: Grad with shape BxPxR. Must be one of the following types: +* float16, float32. \n +* @li x1: A tensor with shpae: BxPXM. Must be one of the following types: +* float16, float32. \n +* @li x2: A tensor with shpae: BxRxM. Must be one of the following types: +* float16, float32. \n +* @li cdist: Output tensor of cdist forward with shpae: BxPXR. +* Must be one of the following types: float16, float32. \n + +*@par Attributes: +* @li p: An optional float >= 0 or inf. Defaults to 2.0. \n + +*@par Outputs: +* y: A Tensor with the same type and shape of x1's. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator Cdist Backward. \n +*/ +REG_OP(CdistGrad) + .INPUT(grad, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(x1, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(x2, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(cdist, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT})) + .ATTR(p, Float, 2.0) + .OP_END_FACTORY_REG(CdistGrad) + } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_MATH_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/matrix_calculation_ops.h b/third_party/fwkacllib/inc/ops/matrix_calculation_ops.h index ed23d3f6..b317be37 100644 --- a/third_party/fwkacllib/inc/ops/matrix_calculation_ops.h +++ b/third_party/fwkacllib/inc/ops/matrix_calculation_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -38,8 +38,8 @@ namespace ge { * float32, int32. Has format [ND, NHWC] . \n *@par Attributes: -*@li transpose_a: A bool. If True, changes the shape of "x1" from [M, K] to [K, M]. -*@li transpose_b: A bool. If True, changes the shape of "x2" from [M, K] to [K, M] . \n +*@li transpose_x1: A bool. If True, changes the shape of "x1" from [M, K] to [K, M]. +*@li transpose_x2: A bool. If True, changes the shape of "x2" from [M, K] to [K, M] . \n *@par Outputs: *y: The result matrix Tensor. 2D. Must be one of the following types: float16, @@ -70,8 +70,8 @@ REG_OP(MatMul) * float32, int32. Has format [ND, NHWC] . \n *@par Attributes: -*@li transpose_a: A bool. If True, changes the shape of "x1" from [M, K] to [K, M]. -*@li transpose_b: A bool. If True, changes the shape of "x2" from [M, K] to [K, M] . \n +*@li transpose_x1: A bool. If True, changes the shape of "x1" from [M, K] to [K, M]. +*@li transpose_x2: A bool. If True, changes the shape of "x2" from [M, K] to [K, M] . \n *@par Outputs: *y: The result matrix Tensor. 2D. Must be one of the following types: float16, @@ -91,6 +91,36 @@ REG_OP(MatMulV2) .ATTR(offset_x, Int, 0) .OP_END_FACTORY_REG(MatMulV2) +/** +*@brief Multiplies matrix "a" by matrix "b", producing "a * b" . \n + +*@par Inputs: +*Two inputs, including: +* @li x1: A matrix Tensor. 2D. Must be one of the following types: int8. +* @li x2: A matrix Tensor. 2D. Must be one of the following types: int8. +* @li compress_index: A compress index matrix of type int8. +* @li bias: A 1D Tensor. Must be one of the following types: int32, float16. + +*@par Attributes: +*@li transpose_x1: A bool. If True, changes the shape of "x1" from [M, K] to [K, M]. +*@li transpose_x2: A bool. If True, changes the shape of "x2" from [M, K] to [K, M] . \n + +*@par Outputs: +*y: The result matrix Tensor. 2D. Must be one of the following types: float16, +* int32. \n + +*/ +REG_OP(MatMulV2Compress) + .INPUT(x1, TensorType({DT_INT8})) + .INPUT(x2, TensorType({DT_INT8})) + .INPUT(compress_index, TensorType({DT_INT8})) + .OPTIONAL_INPUT(bias, TensorType({DT_INT32, DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_INT32, DT_FLOAT16})) + .OPTIONAL_INPUT(offset_w, TensorType({DT_INT8})) + .ATTR(transpose_x1, Bool, false) + .ATTR(transpose_x2, Bool, false) + .ATTR(offset_x, Int, 0) + .OP_END_FACTORY_REG(MatMulV2Compress) /** *@brief Performs Matrix-to-matrix Multiply, producing c=alpha[0]*a*b+beta[0]*c . \n @@ -149,15 +179,15 @@ REG_OP(GEMM) *@brief Multiplies matrix "a" by matrix "b", producing "a * b" . \n *@par Inputs: -*Three inputs, including: +*Two inputs, including: * @li x1: A matrix Tensor. Must be one of the following types: float16, * float32, int32. 2D or higher. Has format [ND, NHWC, FRACTAL_NZ]. * @li x2: A matrix Tensor. Must be one of the following types: float16, * float32, int32. 2D or higher. Has format [ND, NHWC, FRACTAL_NZ] . \n *@par Attributes: -*@li adj_x: A bool. If True, changes the shape of "x1" from [B, M, K] to [B, K, M]. -*@li adj_y: A bool. If True, changes the shape of "x2" from [B, M, K] to [B, K, M] . \n +*@li adj_x1: A bool. If True, changes the shape of "x1" from [B, M, K] to [B, K, M]. +*@li adj_x2: A bool. If True, changes the shape of "x2" from [B, M, K] to [B, K, M] . \n *@par Outputs: *y: The result matrix Tensor. 2D or higher. Must be one of the following types: float16, @@ -175,6 +205,42 @@ REG_OP(BatchMatMul) .ATTR(adj_x2, Bool, false) .OP_END_FACTORY_REG(BatchMatMul) + +/** +* @brief Multiplies matrix "a" by matrix "b", producing "a * b" . \n + +* @par Inputs: +* Three inputs, including: +* @li x1: A matrix Tensor. Must be one of the following types: float16, +* float32, int32. 2D or higher. Has format [ND, NHWC, FRACTAL_NZ]. +* @li x2: A matrix Tensor. Must be one of the following types: float16, +* float32, int32. 2D or higher. Has format [ND, NHWC, FRACTAL_NZ] . \n +* @li bias: A matrix Tensor. Must be one of the following types: float16, +* float32, int32. 2D or higher. Has format [ND, NHWC, FRACTAL_NZ] . \n + +* @par Attributes: +* @li adj_x1: A bool. If True, changes the shape of "x1" from [B, M, K] to [B, K, M]. +* @li adj_x2: A bool. If True, changes the shape of "x2" from [B, M, K] to [B, K, M] . \n + +* @par Outputs: +* y: The result matrix Tensor. 2D or higher. Must be one of the following types: float16, +* float32, int32. 2D or higher. Has format [ND, NHWC, FRACTAL_NZ]. Has the same shape length as "x1" and "x2" . \n + +* @par Third-party framework compatibility +* Compatible with the TensorFlow operator BatchMatmul. +*/ + +REG_OP(BatchMatMulV2) + .INPUT(x1, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8})) + .INPUT(x2, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8})) + .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32})) + .OPTIONAL_INPUT(offset_w, TensorType({DT_INT8})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32})) + .ATTR(adj_x1, Bool, false) + .ATTR(adj_x2, Bool, false) + .ATTR(offset_x, Int, 0) + .OP_END_FACTORY_REG(BatchMatMulV2) + /** *@brief Computes half the L2 norm of a tensor without the sqrt . \n @@ -334,7 +400,7 @@ REG_OP(MatrixSetDiagD) * int64, complex64, qint8, quint8, qint32, uint16, complex128, half, uint32, * uint64 *@li indices: An ND Tensor. -*Must be one of the following types: int32, int64 +*Must be one of the following types: int32 or int64 *@li updates: An ND Tensor. *Must be one of the following types: float16, float32, int8, uint8, double, * int64, complex64, qint8, quint8, qint32, uint16, complex128, half, uint32, @@ -378,6 +444,9 @@ REG_OP(ScatterNdUpdate) *@par Third-party framework compatibility * Compatible with the TensorFlow operator TensorScatterUpdate. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(TensorScatterUpdate) .INPUT(x, TensorType::BasicType()) @@ -386,6 +455,34 @@ REG_OP(TensorScatterUpdate) .OUTPUT(y, TensorType::BasicType()) .OP_END_FACTORY_REG(TensorScatterUpdate) +/** +*@brief Uses "updates" to update tensor "data" by "indices". \n + +*@par Inputs: +* Three inputs, including: +*@li data: An ND Tensor . \n +*Must be one of the following types: float16, float32, int32, int8, uint8 +*@li indices: An ND Tensor of type int32 or int64 +*@li updates: An Tensor. Same shape as indices. format:NCHW, NHWC . \n +*Must be one of the following types: float16, float32, int32, int8, uint8 + +*@par Attributes: +*@li axis: An optional attribute. Defaults to 0. + +*@par Outputs: +*y: A Tensor. Has the same type and format as input "data" . \n + +*@par Third-party framework compatibility +* Compatible with the ONNX operator ScatterElements. +*/ +REG_OP(ScatterElements) + .INPUT(data, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(indices, TensorType::IndexNumberType()) + .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .ATTR(axis, Int, 0) + .OP_END_FACTORY_REG(ScatterElements) + /** *@brief Adds sparse "updates" to a variable reference . \n @@ -394,7 +491,7 @@ REG_OP(TensorScatterUpdate) *@li var: An ND Tensor . \n *Must be one of the following types: float16, float32, int32, int8, uint8 -*@li indices: An ND Tensor of type int32 or int64. +*@li indices: An ND Tensor of type int32 or int64 *@li updates: An Tensor. format:NCHW, NHWC . \n @@ -412,10 +509,10 @@ REG_OP(TensorScatterUpdate) * Compatible with the TensorFlow operator ScatterAdd. */ REG_OP(ScatterAdd) - .INPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .INPUT(indices, TensorType::IndexNumberType()) - .INPUT(updates, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) - .OUTPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .ATTR(use_locking, Bool, false) .OP_END_FACTORY_REG(ScatterAdd) @@ -428,7 +525,7 @@ REG_OP(ScatterAdd) *Must be one of the following types: float16, float, int32, int8, uint8 *@li indices: An ND Tensor. -*Must be one of the following types: int32 +*Must be one of the following types: int32 or int64 *@li updates: An ND Tensor. *Must be one of the following types: float16, float, int32, int8, uint8 @@ -443,10 +540,10 @@ REG_OP(ScatterAdd) * Compatible with the TensorFlow operator ScatterDiv. */ REG_OP(ScatterDiv) - .INPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) - .INPUT(indices, TensorType({DT_INT32})) - .INPUT(updates, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) - .OUTPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(indices, TensorType::IndexNumberType()) + .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .ATTR(use_locking, Bool, false) .OP_END_FACTORY_REG(ScatterDiv) @@ -458,7 +555,7 @@ REG_OP(ScatterDiv) *@li var: An ND Tensor. *Must be one of the following types: float16, float, int32, int8, uint8 *@li indices: An ND Tensor. -*Must be one of the following types: int32 +*Must be one of the following types: int32 or int64 *@li updates: An ND Tensor. *Must be one of the following types: float16, float, int32, int8, uint8 *@par Attributes: @@ -472,10 +569,10 @@ REG_OP(ScatterDiv) * Compatible with the TensorFlow operator ScatterNdAdd. */ REG_OP(ScatterNdAdd) - .INPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .INPUT(indices, TensorType::IndexNumberType()) - .INPUT(updates, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) - .OUTPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .ATTR(use_locking, Bool, false) .OP_END_FACTORY_REG(ScatterNdAdd) @@ -499,6 +596,9 @@ REG_OP(ScatterNdAdd) *@par Third-party framework compatibility * Compatible with the TensorFlow operator TensorScatterAdd. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(TensorScatterAdd) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) @@ -515,7 +615,7 @@ REG_OP(TensorScatterAdd) *@li var: An ND Tensor. *Must be one of the following types: float16, float, int32, int8, uint8 *@li indices: An ND Tensor. -*Must be one of the following types: int32, int64 +*Must be one of the following types: int32 or int64 *@li updates: An ND Tensor. *Must be one of the following types: float16, float, int32, int8, uint8 @@ -530,10 +630,10 @@ REG_OP(TensorScatterAdd) * Compatible with the TensorFlow operator ScatterNdSub. */ REG_OP(ScatterNdSub) - .INPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .INPUT(indices, TensorType::IndexNumberType()) - .INPUT(updates, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) - .OUTPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .ATTR(use_locking, Bool, false) .OP_END_FACTORY_REG(ScatterNdSub) @@ -557,6 +657,9 @@ REG_OP(ScatterNdSub) *@par Third-party framework compatibility * Compatible with the TensorFlow operator TensorScatterSub. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(TensorScatterSub) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) @@ -573,7 +676,7 @@ REG_OP(TensorScatterSub) *@li var: An ND Tensor. *Must be one of the following types: float16, float, int32, int8, uint8 *@li indices: An ND Tensor. -*Must be one of the following types: int32, int64 +*Must be one of the following types: int32 or int64 *@li updates: An ND Tensor. *Must be one of the following types: float16, float, int32, int8, uint8 *@par Attributes: @@ -587,10 +690,10 @@ REG_OP(TensorScatterSub) * Compatible with the TensorFlow operator ScatterSub. */ REG_OP(ScatterSub) - .INPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .INPUT(indices, TensorType::IndexNumberType()) - .INPUT(updates, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) - .OUTPUT(var, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .ATTR(use_locking, Bool, false) .OP_END_FACTORY_REG(ScatterSub) @@ -761,7 +864,7 @@ REG_OP(ConfusionMatrix) *@li var: An ND Tensor. *Must be one of the following types: float16, float, int32, int8, uint8 *@li indices: An ND Tensor. -*Must be one of the following types: int32 +*Must be one of the following types: int32 or int64 *@li updates: An ND Tensor . \n *Must be one of the following types: float16, float, int32, int8, uint8 @@ -778,7 +881,7 @@ REG_OP(ConfusionMatrix) */ REG_OP(ScatterMul) .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) - .INPUT(indices, TensorType({DT_INT32})) + .INPUT(indices, TensorType::IndexNumberType()) .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .ATTR(use_locking, Bool, false) @@ -791,13 +894,13 @@ REG_OP(ScatterMul) *@par Inputs: * Three inputs, including: *@li var: An ND Tensor. -*Must be one of the following types: float16, float, int32 +*Must be one of the following types: float16, float, int32, int8, uint8 *@li indices: An ND Tensor. -*Must be one of the following types: int32 +*Must be one of the following types: int32 or int64 *@li updates: An ND Tensor. -*Must be one of the following types: float16, float, int32 +*Must be one of the following types: float16, float, int32, int8, uint8 *@par Attributes: *use_locking: An optional bool. Defaults to "False". If "True", the operation @@ -810,10 +913,10 @@ REG_OP(ScatterMul) * Compatible with the TensorFlow operator ScatterMin. */ REG_OP(ScatterMin) - .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32})) - .INPUT(indices, TensorType({DT_INT32})) - .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32})) - .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32})) + .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(indices, TensorType::IndexNumberType()) + .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .ATTR(use_locking, Bool, false) .OP_END_FACTORY_REG(ScatterMin) @@ -824,13 +927,13 @@ REG_OP(ScatterMin) * Three inputs, including: *@li var: An ND Tensor . \n -*Must be one of the following types: float16, float, int32 +*Must be one of the following types: float16, float, int32, int8, uint8 *@li indices: An NCHW, NHWC, or ND Tensor . \n -*Must be one of the following types: int32 +*Must be one of the following types: int32 or int64 *@li updates: An NCHW, NHWC, or ND Tensor . \n -*Must be one of the following types: float16, float, int32 +*Must be one of the following types: float16, float, int32, int8, uint8 *@par Attributes: *use_locking: An optional bool. Defaults to "False". @@ -843,10 +946,10 @@ REG_OP(ScatterMin) * Compatible with the TensorFlow operator ScatterMax. */ REG_OP(ScatterMax) - .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32})) - .INPUT(indices, TensorType({DT_INT32})) - .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32})) - .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32})) + .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(indices, TensorType::IndexNumberType()) + .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .ATTR(use_locking, Bool, false) .OP_END_FACTORY_REG(ScatterMax) @@ -860,7 +963,7 @@ REG_OP(ScatterMax) *Must be one of the following types: float16, float, int32, int8, uint8 *@li indices: An ND Tensor . \n -*Must be one of the following types: int32 +*Must be one of the following types: int32 or int64 *@li updates: An ND Tensor . \n *Must be one of the following types: float16, float, int32, int8, uint8 @@ -876,10 +979,10 @@ REG_OP(ScatterMax) * Compatible with the TensorFlow operator ScatterUpdate. */ REG_OP(ScatterUpdate) - .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8})) - .INPUT(indices, TensorType({DT_INT32})) - .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8})) - .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8})) + .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(indices, TensorType::IndexNumberType()) + .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .ATTR(use_locking, Bool, false) .OP_END_FACTORY_REG(ScatterUpdate) @@ -979,6 +1082,137 @@ REG_OP(MatrixDiagV2) .OUTPUT(output, TensorType::BasicType()) .OP_END_FACTORY_REG(MatrixDiagV2) +/** +* @brief Add updates to var_out according to axis and indices. + +* @par Inputs: +* Three inputs, including: +* @li var: A Tensor. Must be one of the following types: +* float16, float32, int32, int8, uint8. +* @li indices: A Tensor of the indices, type should be int32. +* @li updates: A Tensor of the same type as "var". + +* @par Attributes: +* @li axis: An required int to specify the axis to perform indices add. + +* @par Outputs: +* @li var_out: A Tensor. Same as input "var". + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator index_add. + +* @par Restrictions: +* Warning:THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(IndexAdd) + .INPUT(var, TensorType({DT_INT32, DT_INT8, DT_UINT8, DT_FLOAT32, DT_FLOAT16})) + .INPUT(indices, TensorType({DT_INT32})) + .INPUT(updates, TensorType({DT_INT32, DT_INT8, DT_UINT8, DT_FLOAT32, DT_FLOAT16})) + .OUTPUT(var_out, TensorType({DT_INT32, DT_INT8, DT_UINT8, DT_FLOAT32, DT_FLOAT16})) + .ATTR(axis, Int, 0) + .OP_END_FACTORY_REG(IndexAdd) + +/** +*@brief: Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices input \n + +*@par Inputs: +* Two inputs, including: +*@li x: A Tensor. Must be one of the following types: +* float16, float32, double, int32, uint8, int16, int8, complex64, int64, +* qint8, quint8, qint32, uint16, complex128, uint32, uint64. +*@li diagonal:(int, optional) – the diagonal to consider。\n + +*@par Outputs: +*y: A Tensor. Has the same type as "x" . \n + +*@par Third-party framework compatibility +* Compatible with the Pytorch operator Triu. +*/ +REG_OP(Triu) + .INPUT(x, TensorType::BasicType()) + .ATTR(diagonal, Int, 0) + .OUTPUT(y, TensorType::BasicType()) + .OP_END_FACTORY_REG(Triu) + +/** +*@brief: Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices input \n + +*@par Inputs: +* Two inputs, including: +*@li x: A Tensor. Must be one of the following types: +* float16, float32, double, int32, uint8, int16, int8, complex64, int64, +* qint8, quint8, qint32, uint16, complex128, uint32, uint64. +*@li diagonal:(int, optional) – the diagonal to consider。\n + +*@par Outputs: +*y: A Tensor. Has the same type as "x" . \n + +*@par Third-party framework compatibility +* Compatible with the Pytorch operator Tril. +*/ +REG_OP(Tril) + .INPUT(x, TensorType::BasicType()) + .ATTR(diagonal, Int, 0) + .OUTPUT(y, TensorType::BasicType()) + .OP_END_FACTORY_REG(Tril) +/** +*@brief Concatenates a list of N tensors along the first dimension. +*@par Inputs: +* Two inputs, including: +* @li values: A list of Tensors. Must be one of the following types: int32, float16, float32. +* Tensors to be concatenated. All must have size 1 in the first dimension and same shape. +* It's a dynamic input. +* @li shape: A Tensor of the same type as "x". +* The final shape of the result. Should be equal to the shapes of any input +* but with the number of input values in the first dimension . \n + +*@par Attributes: +*equation: The subscripts for the Einstein summation. \n +*N: tensor size of input \n + +*@par Outputs: +*@li y: Sums the product of the elements of the input operands along dimensions specified + using a notation based on the Einstein summation convention. \n + +*@attention Constraints: +*Input N must be Int. \n + +*@par Third-party framework compatibility +*Compatible with Pytorch einsum operator. +*/ +REG_OP(Einsum) + .DYNAMIC_INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .REQUIRED_ATTR(equation, String) + .REQUIRED_ATTR(N, Int) + .OP_END_FACTORY_REG(Einsum) + +/** +*@brief Returns a 2-D tensor with ones on the diagonal and zeros elsewhere. \n + +*@par Inputs: +*No inputs + +*@par Attributes: +*@li num_rows: An required int. \n +*@li num_columns: An optional int.Defaults to 0. \n +*@li batch_shape: An optional ListInt.Defaults to []. \n +*@li dtype: An optional int.Defaults to 0. \n + +*@par Outputs: +*y: A Tensor with targeted type and shape. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator Eye. \n +*/ +REG_OP(Eye) + .OUTPUT(y, TensorType::BasicType()) /* "Result, has targeted element type" */ + .REQUIRED_ATTR(num_rows, Int) + .ATTR(num_columns, Int, 0) + .ATTR(batch_shape, ListInt, {}) + .ATTR(dtype, Int, 0) + .OP_END_FACTORY_REG(Eye) + } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_MATRIX_CALCULATION_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/nn_batch_norm_ops.h b/third_party/fwkacllib/inc/ops/nn_batch_norm_ops.h index 0c6a5dff..9629976e 100644 --- a/third_party/fwkacllib/inc/ops/nn_batch_norm_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_batch_norm_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -144,6 +144,64 @@ REG_OP(BatchNorm) /** *@brief Performs batch normalization . \n +*@par Inputs: +* Five inputs, including: (NHWC, NCHW, or NC1HWC0 supported) +*@li x: A 3D or 6D Tensor of type float16 or float32, with format NDHWC or NCDHW for 4D or NDC1HWC0 for 6D. +*@li scale: A Tensor of type float32. Must be 1D if input "x" is with format NDHWC or NCDHW. Must be 6D +if input "x" is with format NDC1HWC0. Specifies the scaling factor. +*@li offset: A Tensor of type float32. Must be 3D if input "x" is with format NDHWC or NCDHW. Must be 6D +if input "x" is with format NC1HWC0. Specifies the offset. +*@li mean: A Tensor of type float32. Must be 3D if input "x" is with format NDHWC or NCDHW. Must be 6D +if input "x" is with format NC1HWC0. Specifies the mean used for inference. Must be "None" if the +operation is used for training. +*@li variance: A Tensor of type float32. Must be 3D if input "x" is with format NHWC or NCHW. Must be +5D if input "x" is with format NC1HWC0. Specifies the variance used for inference. Must be "None" +if the operation is used for training . \n + +*@par Attributes: +*@li epsilon: An optional float32, specifying the small value added to variance to avoid dividing by zero. Defaults to "0.0001". +*@li data_format: An optional string, specifying the format of "x". Defaults to "NHWC". +*@li is_training: An optional bool, specifying if the operation is used for training or inference. Defaults to "True" . \n + +*@par Outputs: +* Five outputs, including: (NHWC, NCHW, or NC1HWC0 supported) +*@li y: A 3D or 6D Tensor of type float16 or float32 for the normalized "x", with format NDHWC or NCDHW for 4D or NDC1HWC0 for 6D. +*@li batch_mean: A Tensor of type float32. Must be 3D if input "x" is with format NDHWC or NCDHW. Must be 6D +if input "x" is with format NDC1HWC0. Specifies the mean of "x". +*@li batch_variance: A Tensor of type float32. Must be 1D if input "x" is with format NDHWC or NCDHW. +Must be 6D if input "x" is with format NDC1HWC0. Specifies the variance of "x". +*@li reserve_space_1: An optional Tensor of type float32. Must be 1D if input "x" is with format NDHWC or NCDHW. +Must be 6D if input "x" is with format NDC1HWC0. Specifies the mean of "x" for gradient computation. Pass "None" to skip this output. +*@li reserve_space_2: An optional Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. +Must be 6D if input "x" is with format NDC1HWC0. Specifies the variance of "x" for gradient computation. Pass "None" to skip this output . \n + +*@attention Constraints: +*@li If the operation is used for inference and outputs "reserve_space_1" and "reserve_space_2" are available, +then "reserve_space_1" has the same value as "mean" and "reserve_space_2" has the same value as "variance". +*@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square root instruction . \n + +*@par Third-party framework compatibility +*@li Compatible with the TensorFlow operator fused_batch_norm. +*@li Compatible with the TensorFlow operator fused_batch_norm_v2. +*/ +REG_OP(BatchNorm3D) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(scale, TensorType({DT_FLOAT})) + .INPUT(offset, TensorType({DT_FLOAT})) + .OPTIONAL_INPUT(mean, TensorType({DT_FLOAT})) + .OPTIONAL_INPUT(variance, TensorType({DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(batch_mean, TensorType({DT_FLOAT})) + .OUTPUT(batch_variance, TensorType({DT_FLOAT})) + .OUTPUT(reserve_space_1, TensorType({DT_FLOAT})) + .OUTPUT(reserve_space_2, TensorType({DT_FLOAT})) + .ATTR(epsilon, Float, 0.0001) + .ATTR(data_format, String, "NCDHW") + .ATTR(is_training, Bool, true) + .OP_END_FACTORY_REG(BatchNorm3D) +/** +*@brief Performs batch normalization . \n + *@par Inputs: * Five inputs, including: (NHWC or NCHW supported) *@li x: A 4D Tensor of type float16 or float32. @@ -242,6 +300,52 @@ REG_OP(BatchNormGrad) /** *@brief Performs the backpropagation of BatchNorm . \n +*@par Inputs: +* Five inputs, including: +*@li y_backprop: A 3D or 6D Tensor of type float16 or float32, with format NDHWC, NCDHW, or NDC1HWC0, for the gradient. +*@li x: A 3D or 6D Tensor of type float16 or float32, with format NDHWC, NCDHW, or NDC1HWC0. +*@li scale: A 3D or 6D Tensor of type float32, with format NDHWC, NCDHW, or NDC1HWC0. +*@li reserve_space_1: A 3D or 6D Tensor of type float32, with format NDHWC, NCDHW, or NC1HWC0. It is an output of BatchNorm. +*@li reserve_space_2: A 3D or 6D Tensor of type float32, with format NDHWC, NCDHW, or NC1HWC0. It is an output of BatchNorm . \n + +*@par Attributes: +*@li epsilon: An optional float32. Defaults to "0.0001". A small float number added to the variance of "x". +*@li data_format: An optional string. Defaults to "NCDHW". +*@li is_training: An optional bool. Defaults to "true". Specifies the operation is for training (default) or inference . \n + +*@par Outputs: +*@li x_backprop: A Tensor of type float16 or float32, with format NHWC, NCHW, or NC1HWC0, for the offset of "x". +*@li scale_backprop: A Tensor of type float32, with format NDHWC, NCDHW, or NDC1HWC0, for the offset of "scale". +*@li *offset_backprop: A Tensor of type float32, with format NDHWC, NCDHW, or NDC1HWC0, for the offset of "offset". +*@li *reserve_space_4: A Tensor of type float32, with shape NDHWC, NCDHW, or NDC1HWC0. Pass "None" to skip this output. +*@li *reserve_space_5: A Tensor of type float32, with shape NDHWC, NCDHW, or NDC1HWC0. Pass "None" to skip this output . \n + +*@attention Constraints: +* The preceding layer of this operator must be operator BatchNorm . \n + +*@see BatchNorm +*@par Third-party framework compatibility +* Compatible with the TensorFlow operators FusedBatchNormGradV2 and FusedBatchNorm3DGrad. +*/ +REG_OP(BatchNorm3DGrad) + .INPUT(y_backprop, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(scale, TensorType({DT_FLOAT})) + .INPUT(reserve_space_1, TensorType({DT_FLOAT})) + .INPUT(reserve_space_2, TensorType({DT_FLOAT})) + .OUTPUT(x_backprop, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(scale_backprop, TensorType({DT_FLOAT})) + .OUTPUT(offset_backprop, TensorType({DT_FLOAT})) + .OUTPUT(reserve_space_4, TensorType({DT_FLOAT})) + .OUTPUT(reserve_space_5, TensorType({DT_FLOAT})) + .ATTR(epsilon, Float, 0.0001) + .ATTR(data_format, String, "NCDHW") + .ATTR(is_training, Bool, true) + .OP_END_FACTORY_REG(BatchNorm3DGrad) + +/** +*@brief Performs the backpropagation of BatchNorm . \n + *@par Inputs: * Five inputs, including: *@li y_backprop: A 4D Tensor of type float16 or float32, with format NHWC or NCHW, for the gradient. @@ -315,35 +419,7 @@ REG_OP(BNInference) .ATTR(use_global_stats, Bool,true) .ATTR(mode, Int,1) .OP_END_FACTORY_REG(BNInference) -/** -*@brief aicpu batch normalization host . \n -*@par Inputs: - -*@li mean: A Tensor of type float32 or float16. Must be 1D if input "x" Specifies the mean used for inference. -*@li variance: A Tensor of type float32 or float16 . Must be 1D if input "x" Specifies the variance used for inference. -*@li momentum: An optional float, mean and variance's Scale factor -*@par Attributes: -*@li epsilon: An optional float32, specifying the small value added to variance to avoid dividing by zero. Defaults to "0.00001". -*@li use_global_stats: mean inference mode , only can be "True". -*@li mode: An optional attr, not use -*@par Outputs: -*@li alpha: A Tensor of type float16 or float32 for the cpu calculate mean -*@li beta: A Tensor of type float16 or float32 for the cpu calculate variance -*/ -REG_OP(BnHost) - .INPUT(mean, TensorType({DT_FLOAT, DT_FLOAT16})) - .INPUT(variance, TensorType({DT_FLOAT, DT_FLOAT16})) - .INPUT(momentum, TensorType({DT_FLOAT16,DT_FLOAT})) - .OPTIONAL_INPUT(scale, TensorType({DT_FLOAT16,DT_FLOAT})) - .OPTIONAL_INPUT(offset, TensorType({DT_FLOAT16,DT_FLOAT})) - .ATTR(epsilon, Float, 0.00001) - .ATTR(mode, Int, 1) - .ATTR(use_global_stats, Bool, true) - .OUTPUT(alpha, TensorType({DT_FLOAT, DT_FLOAT16})) - .OUTPUT(beta, TensorType({DT_FLOAT, DT_FLOAT16})) - .OUTPUT(mu, TensorType({DT_FLOAT16,DT_FLOAT})) - .OP_END_FACTORY_REG(BnHost) /** *@brief Performs batch normalization . \n diff --git a/third_party/fwkacllib/inc/ops/nn_calculation_ops.h b/third_party/fwkacllib/inc/ops/nn_calculation_ops.h index 35296870..98473c65 100644 --- a/third_party/fwkacllib/inc/ops/nn_calculation_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_calculation_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -365,6 +365,25 @@ REG_OP(BiasAddGrad) * 4-D with shape [batch, out_height, out_width, out_channels] * or [batch, out_channels, out_height, out_width]. * Gradients with respect to the output of the convolution. + *\n + *\n + * The following are the supported data types and data formats: +*@verbatim + | Tensor | out_bckprop | filter | y + ------------|-------------|---------|-------- + | Data Type | float16 | float16 | float16 + | |-------------|---------|-------- + | | float32 | float32 | float32 + | |-------------|---------|-------- + | | float64 | float64 | float64 + ------------|-------------|---------|-------- + | Format | NCHW | NCHW | NCHW + | | NHWC | HWCN | NHWC +@endverbatim + * For float32 and float64 type, the actual calculation on the chip is based on + * float16. + *\n + * *@par Attributes: * Five attributes: * @li strides: A tuple/list of 4 integers. The stride of the sliding window @@ -377,8 +396,53 @@ REG_OP(BiasAddGrad) * channels. * @li data_format: An optional string from: "NHWC", "NCHW". Defaults to * "NHWC". Specify the data format of the input and output data. + *\n + *\n + * The following value range restrictions must be met: +*@verbatim + | Name | Field | Scope + -------------------|----------|-------------- + | input_size | H | [1, 4096] + | | W | [1, 4096] + -------------------|----------|-------------- + | Filter | H | [1, 255] + | | W | [1, 255] + -------------------|----------|-------------- + | out_backprop | H*strideH| [1, 4096] + | | W*strideW| [1, 4096] + -------------------|----------|-------------- + | y(fmap) | H | [1, 4096] + | | W | [1, 4096] + -------------------|----------|-------------- + | Stride | H | [1, 63] + | | W | [1, 63] + -------------------|----------|-------------- + | Padding | Top | [0, 255] + | | Bottom | [0, 255] + | | Left | [0, 255] + | | Right | [0, 255] + -------------------|----------|-------------- + | Dilation | H | [1, 255] + | | W | [1, 255] + +@endverbatim + * In Ascend910, fmap or out_backprop's H and W not support 1 when + * fmap_h + pad_top + pad_bottom != (filter_height - 1) * dilation_h + 1 + * If filter_h = 1 and filter_w = 1, out_backprop_w * stride_h * stride_w < 4096 + *\n + * *@par Outputs: * y: A Tensor. Has the same type as filter,and has same format as input_size. + *\n + * out_backprop_height = (fmap_height + pad_top + pad_bottom - + * (dilation_h * (filter_height - 1) + 1)) + * / stride_h + 1 + *\n + * out_backprop_width = (fmap_width + pad_left + pad_right - + * (dilation_w * (filter_width - 1) + 1)) + * / stride_w + 1 + *\n + * *@par Third-party framework compatibility * Compatible with Tensorflow's conv2d_backprop_input */ @@ -454,6 +518,21 @@ REG_OP(Conv2DBackpropInputD) * @li bias: An optional tensor. Must have the same type as "y". * @li offset_w: An optional 1D tensor for quantized deconvolution. * Type is int8. Reserved.\n + *\n + *\n + * The following are the supported data types and data formats: +*@verbatim + | Tensor | x | filter | bias | y + ------------|---------|---------|---------|-------- + | Data Type | float16 | float16 | float16 | float16 + | |---------|---------|---------|-------- + | | int8 | int8 | int32 | int32 + ------------|---------|---------|---------|-------- + | Format | NCHW | NCHW | ND | NCHW +@endverbatim + * For int8, a dequant or requant operator must be followed. + *\n + * *@par Attributes: * Six attributes: * @li strides: A tuple or list of 2 integers. The stride of the sliding window @@ -467,9 +546,54 @@ REG_OP(Conv2DBackpropInputD) * @li data_format: An optional string from: "NCHW". Defaults to "NCHW". \n Specify the data format of the input and output data. * @li offset_x: An optional integer for quantized deconvolution. - * Defaults to "0". + * The negative offset added to the input image for int8 type. Ensure offset_x + * within the effective range of int8 [-128, 127]. Defaults to "0". + *\n + *\n + * The following value range restrictions must be met: +*@verbatim + | Name | Field | Scope + -------------------|----------|-------------- + | x (out_backprop) | H*strideH| [1, 4096] + | | W*strideW| [1, 4096] + -------------------|----------|-------------- + | Filter | H | [1, 255] + | | W | [1, 255] + -------------------|----------|-------------- + | y (fmap) | H | [1, 4096] + | | W | [1, 4096] + -------------------|----------|-------------- + | Stride | H | [1, 63] + | | W | [1, 63] + -------------------|----------|-------------- + | Padding | Top | [0, 255] + | | Bottom | [0, 255] + | | Left | [0, 255] + | | Right | [0, 255] + -------------------|----------|-------------- + | Dilation | H | [1, 255] + | | W | [1, 255] + -------------------|----------|-------------- + | Offset_x | | [-128, 127] + +@endverbatim + * In Ascend910, fmap or out_backprop's H and W not support 1 when + * fmap_h + pad_top + pad_bottom != (filter_height - 1) * dilation_h + 1 + * If filter_h = 1 and filter_w = 1, out_backprop_w * stride_h * stride_w < 4096 + *\n + * *@par Outputs: * y: A Tensor. 4D tensor with shape [batch, channels, height, width]. + *\n + * out_backprop_height = (fmap_height + pad_top + pad_bottom - + * (dilation_h * (filter_height - 1) + 1)) + * / stride_h + 1 + *\n + * out_backprop_width = (fmap_width + pad_left + pad_right - + * (dilation_w * (filter_width - 1) + 1)) + * / stride_w + 1 + *\n + * * When type of x is float16, the type of y must be float16. * When type of x is int8, the type of y must be int32. */ @@ -502,6 +626,25 @@ REG_OP(Deconvolution) * [batch, out_height, out_width, out_channels] or [batch, out_channels, * out_height, out_width]. Gradients with respect to the output of the * convolution. + *\n + *\n + * The following are the supported data types and data formats: +*@verbatim + | Tensor | x | out_backprop | y + ------------|---------|--------------|--------- + | Data Type | float16 | float16 | float16 + | |---------|--------------|--------- + | | float32 | float32 | float32 + | |---------|--------------|--------- + | | float64 | float64 | float64 + |-----------|---------|--------------|--------- + | Format | NCHW | NCHW | NCHW + | | NHWC | NHWC | HWCN +@endverbatim + * For float32 and float64 type of x and outbackprop, the actual calculation on the chip + * is based on float16. + *\n + * *@par Attributes: * Five attributes: * @li strides: A tuple/list of 4 integers. The stride of the sliding window @@ -514,8 +657,52 @@ REG_OP(Deconvolution) * channels. * @li data_format: An optional string from: "NHWC", "NCHW". Defaults to * "NHWC". Specify the data format of the input and output data. + *\n +*\n +* The following value range restrictions must be met: +*@verbatim + | Name | Field | Scope + -------------------|----------|-------------- + | x(fmap) | H | [1, 4096] + | | W | [1, 4096] + -------------------|----------|-------------- + | Filter Size | H | [1, 255] + | | W | [1, 255] + -------------------|----------|-------------- + | out_backprop | H | [1, 4096] + | | W | [1, 4096] + -------------------|----------|-------------- + | y | H | [1, 4096] + | | W | [1, 4096] + -------------------|----------|-------------- + | Stride | H | [1, 63] + | | W | [1, 63] + -------------------|----------|-------------- + | Padding | Top | [0, 255] + | | Bottom | [0, 255] + | | Left | [0, 255] + | | Right | [0, 255] + -------------------|----------|-------------- + | Dilation | H | [1, 255] + | | W | [1, 255] + +@endverbatim + * In Ascend910, out_backprop's H and W not support 1 when + * fmap_h + pad_top + pad_bottom != (filter_height - 1) * dilation_h + 1 + *\n + * *@par Outputs: * y: A Tensor. Has the same type as x, has the same format as filter_size. + *\n + * out_backprop_height = (in_height + pad_top + pad_bottom - + * (dilation_h * (filter_height - 1) + 1)) + * / stride_h + 1 + *\n + * out_backprop_width = (in_width + pad_left + pad_right - + * (dilation_w * (filter_width - 1) + 1)) + * / stride_w + 1 + *\n + * *@par Third-party framework compatibility * Compatible with Tensorflow's conv2d_backprop_filter */ @@ -597,16 +784,14 @@ REG_OP(Conv2DBackpropFilterD) | Tensor | x | filter | bias | y ------------|---------|---------|---------|-------- | Data Type | float16 | float16 | float16 | float16 - | |---------|---------|---------|-------- | | float32 | float32 | float32 | float32 - | |---------|---------|---------|-------- | | int8 | int8 | int32 | int32 ------------|---------|---------|---------|-------- | Format | NCHW | NCHW | ND | NCHW | | NHWC | HWCN | | NHWC @endverbatim * For float32 type, the actual calculation on the chip is based on -* float16. For int8, a dequant or requant operator must be followed. +* float16. *\n * *@par Attributes: @@ -617,8 +802,7 @@ REG_OP(Conv2DBackpropFilterD) * (top, bottom, left, right) side of the input. *@li dilations: Optional. A list of 4 integers. The dilation factor for each * dimension of input. The dimension order is determined by the data format of -* "x". The N and C dimensions must be set to 1. The H and W dimensions must be -* set to 1 for int8 type. Defaults to [1, 1, 1, 1]. +* "x". The N and C dimensions must be set to 1. Defaults to [1, 1, 1, 1]. *@li groups: Optional. An integer of type int32. The number of blocked * connections from input channels to output channels. In_channels and * out_channels must both be divisible by "groups". Defaults to 1. @@ -652,6 +836,8 @@ REG_OP(Conv2DBackpropFilterD) | Offset_x | | [-128, 127] @endverbatim +* The W dimension of the input image supports cases exceeding 4096, but it may +* cause compilation errors. *\n * *@par Outputs: @@ -666,21 +852,6 @@ REG_OP(Conv2DBackpropFilterD) * out_width = (in_width + pad_left + pad_right - * (dilation_w * (filter_width - 1) + 1)) * / stride_w + 1 -* -*@attention Constraints: -*@li The following restrictions on the output must be met: -*@verbatim - | Output | Restrictions - ----------|-------------------------------- - | H == 1 | H * W(input) == H * W(filter) - | W == 1 | - ----------|-------------------------------- - | H != 1 | W(input) == W(filter) - | W == 1 | Only for Ascend310 Hi3796V300CS -@endverbatim -* "H * W (input)" indicates the image size after padding and "H * W (filter)" -* indicates the filter size after dilation."W(input)" and W(filter) indicate -* the same rule on the W dimension. *\n * *@par Quantization supported or not @@ -778,7 +949,7 @@ REG_OP(Conv2DCompress) * With the format "HWCN" , the data is stored in the order of: [filter_height, * filter_width, in_channels / groups, out_channels]. *@li offsets: A 4D tensor of x-y coordinates offset and mask. With the format -* "NHWC", the data is stored in the order of: [batch, in_height, in_width, +* "NHWC", the data is stored in the order of: [batch, out_height, out_width, * deformable_groups * filter_height * filter_width * 3]. *@li bias: An optional 1D tensor of additive biases to the filter outputs. * The data is stored in the order of: [out_channels]. @@ -816,31 +987,20 @@ REG_OP(Conv2DCompress) *@li deformable_groups: Optional. An integer of type int32. The number of * deformable group partitions. In_channels must be divisible by * "deformable_groups". Defaults to 1. +*@li modulated: Optional. Specify version of DeformableConv2D, true means v2, +* false means v1, currently only support v2. *\n *\n * The following value range restrictions must be met: *@verbatim | Name | Field | Scope --------------------|--------|---------------------------- - | Input Image Size | H | [1, 100000] - | | W | [1, 4096] - --------------------|--------|---------------------------- - | Filter Size | H | [1, 255] - | | W | [1, 255] + | Input Image Size | H | [1, 100000 / filter_height] + | | W | [1, 4096 / filter_width] --------------------|--------|---------------------------- - | Stride | H | [1, 63] + | Filter Size | H | [1, 63] | | W | [1, 63] - --------------------|--------|---------------------------- - | Padding | Top | [0, 255] - | | Bottom | [0, 255] - | | Left | [0, 255] - | | Right | [0, 255] - ------------ -------|--------|---------------------------- - | Dilation | H | [1, 255] - | | W | [1, 255] @endverbatim -* "W(input)" indicate the image width after padding and W(filter) indicates the -* filter width after dilation. *\n * *@par Outputs: @@ -855,21 +1015,7 @@ REG_OP(Conv2DCompress) * out_width = (in_width + pad_left + pad_right - * (dilation_w * (filter_width - 1) + 1)) * / stride_w + 1 -* -*@attention Constraints: -*@li The following restrictions on the output must be met: -*@verbatim - | Output | Restrictions - ----------|-------------------------------- - | H == 1 | H * W(input) == H * W(filter) - | W == 1 | - ----------|-------------------------------- - | H != 1 | W(input) == W(filter) - | W == 1 | Only for Ascend310 Hi3796V300CS -@endverbatim -* "H * W(input)" indicates the image size after padding and "H * W(filter)" -* indicates the filter size after dilation. "W(input)" and W(filter) indicate -* the same rule on the W dimension. +*\n * *@par Quantization supported or not *@li No @@ -891,6 +1037,7 @@ REG_OP(DeformableConv2D) .ATTR(groups, Int, 1) .ATTR(data_format, String, "NHWC") .ATTR(deformable_groups, Int, 1) + .ATTR(modulated, Bool, true) .OP_END_FACTORY_REG(DeformableConv2D) /** @@ -916,12 +1063,12 @@ REG_OP(DeformableConv2D) *@par Attributes: * @li groups: Number of blocked connections from input channels to output - * channels. Reserved. + * channels. * @li data_format: An optional string from: "NDHWC", "NCDHW". * Defaults to "NDHWC". Specify the data format of the input and output data. * @li dilations: A list of 5 integers. Specifies the dilation factor for each - * dimension of "x", now only support [1,1,1,1,1] - * The N and C dimensions must be 1. Has the same format as "x". + * dimension of "x". + * The N, C and D dimensions must be 1. Has the same format as "x". * @li offset_x: An optional int. Input offset, used for quantized inference. * Defaults to 0. Reserved . \n @@ -967,8 +1114,8 @@ REG_OP(Conv3D) *@par Required Attributes: * @li strides: A list of 5 integers. Specifies the stride of the sliding window - * for each dimension of "x". - * The N and C dimensions must be 1. Has the same format as "x". + * for each dimension of "out_backprop". + * The N and C dimensions must be 1. Has the same format as "out_backprop". * @li pads: A list of 6 integers. * Supports only padding along the D, H and W dimensions in sequence of head, * tail, top, bottom, left and right . \n @@ -976,14 +1123,15 @@ REG_OP(Conv3D) *@par Attributes: * Three attributes: * @li groups: Number of blocked connections from input channels to output - * channels. Reserved. + * channels. * @li data_format: An optional string from: "NDHWC", "NCDHW". * Defaults to "NDHWC". Specify the data format of the input and output data. * @li dilations: A tuple/list of 5 integers, The dilation factor for each - * dimension of the input, now only support [1,1,1,1,1] + * dimension of the input. + * The N, C and D dimensions must be 1. Has the same format as "out_backprop". *@par Outputs: - * y: A Tensor. Has the same type as filter,and has same format as input_size + * y: A Tensor. Has the same type as filter,and has same format as "input_size" *@par Third-party framework compatibility * Compatible with Tensorflow's conv3d_backprop_input @@ -1011,8 +1159,8 @@ REG_OP(Conv3DBackpropInput) *@par Required Attributes: * @li strides: A list of 5 integers. Specifies the stride of the sliding window - * for each dimension of "x". - * The N and C dimensions must be 1. Has the same format as "x". + * for each dimension of "out_backprop". + * The N and C dimensions must be 1. Has the same format as "out_backprop". * @li pads: A list of 6 integers. Supports only padding along the D, H and W * dimensions in sequence of head, tail, top, bottom, left and right. * @li input_size: A tuple/list of type int32, int64. An integer vector @@ -1023,13 +1171,14 @@ REG_OP(Conv3DBackpropInput) *@par Attributes: * Three attributes: * @li groups: Number of blocked connections from input channels to output - * channels. Reserved. + * channels. * @li data_format: An optional string from: "NDHWC", "NCDHW". * Defaults to "NDHWC". Specify the data format of the input and output data. * @li dilations: A tuple/list of 5 integers, The dilation factor for each - * dimension of input, now only support [1,1,1,1,1] + * dimension of input. + * The N, C and D dimensions must be 1. Has the same format as "out_backprop". *@par Outputs: - * y: A Tensor. Has the same type and data format as out_backprop. + * y: A Tensor. Has the same type and data format as "out_backprop". *@par Third-party framework compatibility * Compatible with Tensorflow's conv3d_backprop_input @@ -1072,9 +1221,7 @@ REG_OP(Conv3DBackpropInputD) * @li c_t: A optinal Tensor dtype of float16, float32. The cell state at time t . \n *@par Third-party framework compatibility: -* Compatible with the Pytorch operator adds. -*@par Restrictions: -*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +* Compatible with the Caffe operator LSTM. */ REG_OP(LSTM) .INPUT(x, TensorType({DT_FLOAT16})) @@ -1121,14 +1268,15 @@ REG_OP(LSTM) *@par Attributes: * Three attributes: * @li dilations: A tuple/list of 5 integers, The dilation factor for each - * dimension of input, now only support [1,1,1,1,1]. + * dimension of input. + * The N, C and D dimensions must be 1. Has the same format as "x". * @li groups: Number of blocked connections from input channels to output - * channels. Reserved. + * channels. * @li data_format: An optional string from: "NDHWC", "NCDHW". * Defaults to "NDHWC". Specify the data format of the input and output data. *@par Outputs: - * y: A Tensor that has the same type as x + * y: A Tensor that has the same type as "x" * and the format is NDHWC, NCDHW or DHWCN. *@par Third-party framework compatibility * Compatible with Tensorflow's conv3d_backprop_filter @@ -1172,9 +1320,10 @@ REG_OP(Conv3DBackpropFilter) *@par Attributes: * Three attributes: * @li dilations: A tuple/list of 5 integers, The dilation factor for each - * dimension of input, now only support [1,1,1,1,1]. + * dimension of input. + * The N, C and D dimensions must be 1. Has the same format as "x". * @li groups: Number of blocked connections from input channels to output - * channels. Reserved. + * channels. * @li data_format: An optional string from: "NDHWC", "NCDHW". * Defaults to "NDHWC". Specify the data format of the input and output data. @@ -1224,15 +1373,16 @@ REG_OP(Conv3DBackpropFilterD) *@par Attributes: * Five attributes: * @li groups: Number of blocked connections from input channels to output - * channels. Reserved. + * channels. * @li dilations: A tuple/list of 5 integers, - * The dilation factor for each dimension of input, now only support [1,1,1,1,1] + * The dilation factor for each dimension of input. + * The N, C and D dimensions must be 1. Has the same format as "x". * @li data_format: An optional string from: "NDHWC", "NCDHW". * Defaults to "NDHWC". Specify the data format of the input and output data. * @li output_padding: The size will be added in the output shape. * @li offset_x: Input offset_x value. Reserved. *@par Outputs: - * y: A Tensor. Has the same type and format as x. + * y: A Tensor. Has the same type and format as "x". */ REG_OP(Conv3DTranspose) .INPUT(input_size, TensorType({DT_INT32, DT_INT64})) @@ -1273,15 +1423,16 @@ REG_OP(Conv3DTranspose) *@par Attributes: * Five attributes: * @li dilations: A tuple/list of 5 integers, The dilation factor for each - * dimension of input, now only support [1,1,1,1,1] + * dimension of input. + * The N, C and D dimensions must be 1. Has the same format as "x". * @li groups: Number of blocked connections from input channels to output - * channels. Reserved. + * channels. * @li data_format: An optional string from: "NDHWC", "NCDHW". * Defaults to "NDHWC". Specify the data format of the input and output data. * @li output_padding: The size will be added in the output shape. * @li offset_x: Input offset_x value. Reserved. *@par Outputs: - * y: A Tensor. Has the same type and format as x. + * y: A Tensor. Has the same type and format as "x". *@par Restrictions: * Warning: THIS FUNCTION IS DEPRECATED. Please use Conv3DTranspose instead. */ @@ -1316,6 +1467,22 @@ REG_OP(Conv3DTransposeD) * or [out_channels, in_channel, filter_height, filter_width]. * @li bias: An optional 1D tensor of type float16 or int32. Format is "ND". * @li offset_w: An optional 1D tensor for quantized inference. Reserved. + *\n + *\n + * The following are the supported data types and data formats: +*@verbatim + | Tensor | x | filter | bias | y + ------------|---------|---------|---------|-------- + | Data Type | float16 | float16 | float16 | float16 + | |---------|---------|---------|-------- + | | int8 | int8 | int32 | int32 + ------------|---------|---------|---------|-------- + | Format | NCHW | NCHW | ND | NCHW + | | NHWC | HWCN | | NHWC +@endverbatim + * For int8, a dequant or requant operator must be followed. + *\n + * *@par Required Attributes: * @li strides: A required tuple/list of 4 integers. The stride of the sliding * window for H/W dimension. The index of H/W is same as data_format. @@ -1333,10 +1500,58 @@ REG_OP(Conv3DTransposeD) * @li output_padding: The size will be added in the output shape. Defaults * to [0, 0, 0, 0]. * @li offset_x: An optional int. Input offset, used for quantized inference. - * Defaults to "0". + * The negative offset added to the input image for int8 type. Ensure offset_x + * within the effective range of int8 [-128, 127]. Defaults to "0". + *\n + *\n + * The following value range restrictions must be met: +*@verbatim + | Name | Field | Scope + -------------------|----------|-------------- + | input_size | H | [1, 4096] + | | W | [1, 4096] + -------------------|----------|-------------- + | x (out_backprop) | H*strideH| [1, 4096] + | | W*strideW| [1, 4096] + -------------------|----------|-------------- + | filter | H | [1, 255] + | | W | [1, 255] + -------------------|----------|-------------- + | y (fmap) | H | [1, 4096] + | | W | [1, 4096] + -------------------|----------|-------------- + | Stride | H | [1, 63] + | | W | [1, 63] + -------------------|----------|-------------- + | Padding | Top | [0, 255] + | | Bottom | [0, 255] + | | Left | [0, 255] + | | Right | [0, 255] + -------------------|----------|-------------- + | Dilation | H | [1, 255] + | | W | [1, 255] + -------------------|----------|-------------- + | Offset_x | | [-128, 127] + +@endverbatim + * In Ascend910, fmap or out_backprop's H and W not support 1 when + * fmap_h + pad_top + pad_bottom != (filter_height - 1) * dilation_h + 1 + * If filter_h = 1 and filter_w = 1, out_backprop_w * stride_h * stride_w < 4096 + *\n + * *@par Outputs: * y: A Tensor. A Tensor of type float16 or int32, and has same format as * input_size. + *\n + * out_backprop_height = (fmap_height + pad_top + pad_bottom - + * (dilation_h * (filter_height - 1) + 1)) + * / stride_h + 1 + *\n + * out_backprop_width = (fmap_width + pad_left + pad_right - + * (dilation_w * (filter_width - 1) + 1)) + * / stride_w + 1 + *\n + * */ REG_OP(Conv2DTranspose) .INPUT(input_size, TensorType({DT_INT32, DT_INT64})) @@ -1405,21 +1620,22 @@ REG_OP(Conv2DTransposeD) /** *@brief Computes the deformed convolution output with the expected input *@par Inputs: - * Four inputs: + * Two inputs: * @li x: A Tensor of type float16,float32 * @li offsets: A Tensor of type float16,float32.Deformation offset parameter. *@par Required Attributes: * @li strides: A tuple/list of 4 integers.The stride of the sliding window for * height and width for H/W dimension. - * @li pads: A tuple/list of 4 integers.Padding added to each dimension + * @li pads: A tuple/list of 4 integers.Padding added to H/W dimension * of the input. * @li ksize: A tuple/list of 2 integers.kernel size. *@par Attributes: - * Three attributes: + * Four attributes: * @li dilations: A tuple/list of 4 integers, The dilation factor for each dimension * of input. Defaults to [1, 1, 1, 1] * @li data_format: An optional string from: "NCHW", "NHWC". Defaults to "NCHW". Specify the data format of the input x. * @li deformable_groups: Specify the c-axis grouping number of input x. + * @li modulated: Specify version of DeformableConv2D, true means v2, false means v1 *@par Outputs: * y: A Tensor. A Tensor of type float16, float32. */ @@ -1433,7 +1649,69 @@ REG_OP(DeformableOffsets) .ATTR(dilations, ListInt, {1, 1, 1, 1}) .ATTR(data_format, String, "NCHW") .ATTR(deformable_groups, Int, 1) + .ATTR(modulated, Bool, true) .OP_END_FACTORY_REG(DeformableOffsets) +/** +*@brief Computes the gradients of DeformableOffsets with respect to input and offsets +*@par Inputs: + * Three inputs: + * @li grad: A Tensor of type float16,float32. gradients with respect to DeformableOffsets output + * @li x: A Tensor of type float16,float32. + * @li offsets: A Tensor of type float16,float32.Deformation offset parameter. +*@par Required Attributes: + * @li strides: A tuple/list of 4 integers.The stride of the sliding window for + * height and width for H/W dimension. + * @li pads: A tuple/list of 4 integers.Padding added to H/W dimension + * of the input. + * @li ksize: A tuple/list of 2 integers.kernel size. +*@par Attributes: + * Three attributes: + * @li dilations: A tuple/list of 4 integers, The dilation factor for each dimension + * of input. Defaults to [1, 1, 1, 1] + * @li data_format: An optional string from: "NCHW", "NHWC". Defaults to "NCHW". Specify the data format of the input x. + * @li deformable_groups: Specify the c-axis grouping number of input x. + * @li modulated: Specify version of DeformableConv2D, true means v2, false means v1. +*@par Outputs: + * grad_x: A Tensor of type float16, float32. Gradients with respect to input_x + * grad_offsets: A Tensor of type float16, float32. Gradients with respect to input_offsets +*/ +REG_OP(DeformableOffsetsGrad) + .INPUT(grad, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(offsets, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(grad_x, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(grad_offsets, TensorType({DT_FLOAT16, DT_FLOAT})) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(pads, ListInt) + .REQUIRED_ATTR(ksize, ListInt) + .ATTR(dilations, ListInt, {1, 1, 1, 1}) + .ATTR(data_format, String, "NCHW") + .ATTR(deformable_groups, Int, 1) + .ATTR(modulated, Bool, true) + .OP_END_FACTORY_REG(DeformableOffsetsGrad) + +/** +*@brief Computes the deformed dilation output with the expected input +*@par Inputs: + * One inputs: + * @li x: A Tensor of type int8, float16, float32 +*@par Required Attributes: + * @li dilations: A tuple/list of integers. +*@par Attributes: + * Two attributes: + * @li padding_value: default value filling in blank + * @li pads: A tuple/list of integers. +*@par Outputs: + * y: A Tensor. A Tensor of type int8, float16, float32. +*/ +REG_OP(Dilation) + .INPUT(x, TensorType({DT_INT8, DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_INT8, DT_FLOAT16, DT_FLOAT})) + .REQUIRED_ATTR(dilations, ListInt) + .ATTR(pads, ListInt, {}) + .ATTR(padding_value, Float, 0.0) + .OP_END_FACTORY_REG(Dilation) + } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_NN_CALCULATION_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/nn_detect_ops.h b/third_party/fwkacllib/inc/ops/nn_detect_ops.h index a013fb33..5fa40ad6 100644 --- a/third_party/fwkacllib/inc/ops/nn_detect_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_detect_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -254,22 +254,22 @@ is min_size/sqrt(aspect_ratio), the width is min_size*sqrt(aspect_ratio). Defaul *@par Third-party framework compatibility * It is a custom operator. It has no corresponding operator in Caffe. */ - REG_OP(PriorBox) - .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(img, TensorType({DT_FLOAT16, DT_FLOAT})) - .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) - .REQUIRED_ATTR(min_size, ListFloat) - .REQUIRED_ATTR(max_size, ListFloat) - .REQUIRED_ATTR(aspect_ratio, ListFloat) - .ATTR(img_h, Int, 0) - .ATTR(img_w, Int, 0) - .ATTR(step_h, Float, 0.0) - .ATTR(step_w, Float, 0.0) - .ATTR(flip, Bool, true) - .ATTR(clip, Bool, false) - .ATTR(offset, Float, 0.5) - .ATTR(variance, ListFloat, {0.1}) - .OP_END_FACTORY_REG(PriorBox); +REG_OP(PriorBox) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(img, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .REQUIRED_ATTR(min_size, ListFloat) + .REQUIRED_ATTR(max_size, ListFloat) + .REQUIRED_ATTR(aspect_ratio, ListFloat) + .ATTR(img_h, Int, 0) + .ATTR(img_w, Int, 0) + .ATTR(step_h, Float, 0.0) + .ATTR(step_w, Float, 0.0) + .ATTR(flip, Bool, true) + .ATTR(clip, Bool, false) + .ATTR(offset, Float, 0.5) + .ATTR(variance, ListFloat, {0.1}) + .OP_END_FACTORY_REG(PriorBox); /** *@brief Performs SSD prior box detection, with four additional matrices and the "aspect_ratio" attribute deleted compared to PriorBox . \n @@ -306,25 +306,25 @@ is min_size/sqrt(aspect_ratio), the width is min_size*sqrt(aspect_ratio). Defaul *@par Restrictions: *Warning: THIS FUNCTION IS DEPRECATED. Please use PriorBox instead. */ - REG_OP(PriorBoxD) - .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(img, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(data_h, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(data_w, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(box_height, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(box_width, TensorType({DT_FLOAT16, DT_FLOAT})) - .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) - .REQUIRED_ATTR(min_size, ListFloat) - .REQUIRED_ATTR(max_size, ListFloat) - .ATTR(img_h, Int, 0) - .ATTR(img_w, Int, 0) - .ATTR(step_h, Float, 0.0) - .ATTR(step_w, Float, 0.0) - .ATTR(flip, Bool, true) - .ATTR(clip, Bool, false) - .ATTR(offset, Float, 0.5) - .ATTR(variance, ListFloat, {0.1}) - .OP_END_FACTORY_REG(PriorBoxD); +REG_OP(PriorBoxD) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(img, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(data_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(data_w, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(box_height, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(box_width, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .REQUIRED_ATTR(min_size, ListFloat) + .REQUIRED_ATTR(max_size, ListFloat) + .ATTR(img_h, Int, 0) + .ATTR(img_w, Int, 0) + .ATTR(step_h, Float, 0.0) + .ATTR(step_w, Float, 0.0) + .ATTR(flip, Bool, true) + .ATTR(clip, Bool, false) + .ATTR(offset, Float, 0.5) + .ATTR(variance, ListFloat, {0.1}) + .OP_END_FACTORY_REG(PriorBoxD); /** *@brief Performs SSD prior box detection, with four additional matrices and the "aspect_ratio" attribute deleted compared to PriorBox . \n @@ -358,22 +358,22 @@ is min_size/sqrt(aspect_ratio), the width is min_size*sqrt(aspect_ratio). Defaul *@par Restrictions: *Warning: THIS FUNCTION IS DEPRECATED. Please use PriorBox instead. */ - REG_OP(PriorBoxDV2) - .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(img, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(boxes, TensorType({DT_FLOAT16, DT_FLOAT})) - .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) - .REQUIRED_ATTR(min_size, ListFloat) - .REQUIRED_ATTR(max_size, ListFloat) - .ATTR(img_h, Int, 0) - .ATTR(img_w, Int, 0) - .ATTR(step_h, Float, 0.0) - .ATTR(step_w, Float, 0.0) - .ATTR(flip, Bool, true) - .ATTR(clip, Bool, false) - .ATTR(offset, Float, 0.5) - .ATTR(variance, ListFloat, {0.1}) - .OP_END_FACTORY_REG(PriorBoxDV2); +REG_OP(PriorBoxDV2) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(img, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(boxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .REQUIRED_ATTR(min_size, ListFloat) + .REQUIRED_ATTR(max_size, ListFloat) + .ATTR(img_h, Int, 0) + .ATTR(img_w, Int, 0) + .ATTR(step_h, Float, 0.0) + .ATTR(step_w, Float, 0.0) + .ATTR(flip, Bool, true) + .ATTR(clip, Bool, false) + .ATTR(offset, Float, 0.5) + .ATTR(variance, ListFloat, {0.1}) + .OP_END_FACTORY_REG(PriorBoxDV2); /** *@brief Performs Position Sensitive ROI Pooling . \n @@ -531,10 +531,10 @@ as xx...xyy...yww...whh...hbb...bc0c0..c0c1c1...c1......cncn...cn . \n * It is a custom operator. It has no corresponding operator in Caffe. */ REG_OP(Yolo) - .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) - .OUTPUT(coord_data, TensorType({DT_FLOAT16,DT_FLOAT})) - .OUTPUT(obj_prob, TensorType({DT_FLOAT16,DT_FLOAT})) - .OUTPUT(classes_prob, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(coord_data, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(obj_prob, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(classes_prob, TensorType({DT_FLOAT16, DT_FLOAT})) .ATTR(boxes, Int, 3) .ATTR(coords, Int, 4) .ATTR(classes, Int, 80) @@ -584,10 +584,10 @@ REG_OP(Yolo) * It is a custom operator. It has no corresponding operator in Caffe. */ REG_OP(YoloV2DetectionOutput) - .INPUT(coord_data, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(obj_prob, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(classes_prob, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(img_info, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(coord_data, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(obj_prob, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(classes_prob, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(img_info, TensorType({DT_FLOAT16, DT_FLOAT})) .REQUIRED_ATTR(biases, ListFloat) .ATTR(boxes, Int, 5) .ATTR(coords, Int, 4) @@ -598,7 +598,7 @@ REG_OP(YoloV2DetectionOutput) .ATTR(score_threshold, Float, 0.5) .ATTR(iou_threshold, Float, 0.45) .ATTR(pre_nms_topn, Int, 512) - .OUTPUT(box_out, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(box_out, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(box_out_num, TensorType({DT_INT32})) .OP_END_FACTORY_REG(YoloV2DetectionOutput) @@ -647,12 +647,12 @@ REG_OP(YoloV2DetectionOutput) *Warning: THIS FUNCTION IS DEPRECATED. Please use YoloV2DetectionOutput instead. */ REG_OP(YoloV2DetectionOutputD) - .INPUT(coord_data, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(obj_prob, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(classes_prob, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(img_info, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(windex, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(hindex, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(coord_data, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(obj_prob, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(classes_prob, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(img_info, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(windex, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(hindex, TensorType({DT_FLOAT16, DT_FLOAT})) .REQUIRED_ATTR(biases, ListFloat) .ATTR(boxes, Int, 5) .ATTR(coords, Int, 4) @@ -663,7 +663,7 @@ REG_OP(YoloV2DetectionOutputD) .ATTR(score_threshold, Float, 0.5) .ATTR(iou_threshold, Float, 0.45) .ATTR(pre_nms_topn, Int, 512) - .OUTPUT(box_out, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(box_out, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(box_out_num, TensorType({DT_INT32})) .OP_END_FACTORY_REG(YoloV2DetectionOutputD) @@ -707,16 +707,16 @@ REG_OP(YoloV2DetectionOutputD) * It is a custom operator. It has no corresponding operator in Caffe. */ REG_OP(YoloV3DetectionOutput) - .INPUT(coord_data_low, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(coord_data_mid, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(coord_data_high, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(obj_prob_low, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(obj_prob_mid, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(obj_prob_high, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(classes_prob_low, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(classes_prob_mid, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(classes_prob_high, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(img_info, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(coord_data_low, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(coord_data_mid, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(coord_data_high, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(obj_prob_low, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(obj_prob_mid, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(obj_prob_high, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(classes_prob_low, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(classes_prob_mid, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(classes_prob_high, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(img_info, TensorType({DT_FLOAT16, DT_FLOAT})) .REQUIRED_ATTR(biases_low, ListFloat) .REQUIRED_ATTR(biases_mid, ListFloat) .REQUIRED_ATTR(biases_high, ListFloat) @@ -729,7 +729,7 @@ REG_OP(YoloV3DetectionOutput) .ATTR(score_threshold, Float, 0.5) .ATTR(iou_threshold, Float, 0.45) .ATTR(pre_nms_topn, Int, 512) - .OUTPUT(box_out, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(box_out, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(box_out_num, TensorType({DT_INT32})) .OP_END_FACTORY_REG(YoloV3DetectionOutput) @@ -776,22 +776,22 @@ s *Warning: THIS FUNCTION IS DEPRECATED. Please use YoloV3DetectionOutput instead. */ REG_OP(YoloV3DetectionOutputD) - .INPUT(coord_data_low, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(coord_data_mid, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(coord_data_high, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(obj_prob_low, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(obj_prob_mid, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(obj_prob_high, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(classes_prob_low, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(classes_prob_mid, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(classes_prob_high, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(img_info, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(windex1, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(windex2, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(windex3, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(hindex1, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(hindex2, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(hindex3, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(coord_data_low, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(coord_data_mid, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(coord_data_high, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(obj_prob_low, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(obj_prob_mid, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(obj_prob_high, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(classes_prob_low, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(classes_prob_mid, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(classes_prob_high, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(img_info, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(windex1, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(windex2, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(windex3, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(hindex1, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(hindex2, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(hindex3, TensorType({DT_FLOAT16, DT_FLOAT})) .REQUIRED_ATTR(biases_low, ListFloat) .REQUIRED_ATTR(biases_mid, ListFloat) .REQUIRED_ATTR(biases_high, ListFloat) @@ -804,7 +804,7 @@ REG_OP(YoloV3DetectionOutputD) .ATTR(score_threshold, Float, 0.5) .ATTR(iou_threshold, Float, 0.45) .ATTR(pre_nms_topn, Int, 512) - .OUTPUT(box_out, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(box_out, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(box_out_num, TensorType({DT_INT32})) .OP_END_FACTORY_REG(YoloV3DetectionOutputD) @@ -848,7 +848,7 @@ There are three Yolo operators at Yolov3DetectionOutput's preceding layer on Yol * It is a custom operator. It has no corresponding operator in Caffe. */ REG_OP(YoloV3DetectionOutputV2) - .DYNAMIC_INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .DYNAMIC_INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) .REQUIRED_ATTR(biases, ListFloat) .ATTR(boxes, Int, 3) .ATTR(coords, Int, 4) @@ -862,7 +862,7 @@ REG_OP(YoloV3DetectionOutputV2) .ATTR(N, Int, 10) .ATTR(resize_origin_img_to_net, Bool, false) .ATTR(out_box_dim, Int, 3) - .OUTPUT(box_out, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(box_out, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(box_out_num, TensorType({DT_INT32})) .OP_END_FACTORY_REG(YoloV3DetectionOutputV2) @@ -910,9 +910,9 @@ REG_OP(YoloV3DetectionOutputV2) * Warning: THIS FUNCTION IS DEPRECATED. Please use YoloV3DetectionOutputV2 instead. */ REG_OP(YoloV3DetectionOutputV2D) - .DYNAMIC_INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) - .DYNAMIC_INPUT(windex, TensorType({DT_FLOAT16,DT_FLOAT})) - .DYNAMIC_INPUT(hindex, TensorType({DT_FLOAT16,DT_FLOAT})) + .DYNAMIC_INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .DYNAMIC_INPUT(windex, TensorType({DT_FLOAT16, DT_FLOAT})) + .DYNAMIC_INPUT(hindex, TensorType({DT_FLOAT16, DT_FLOAT})) .REQUIRED_ATTR(biases, ListFloat) .ATTR(boxes, Int, 3) .ATTR(coords, Int, 4) @@ -926,7 +926,7 @@ REG_OP(YoloV3DetectionOutputV2D) .ATTR(N, Int, 10) .ATTR(resize_origin_img_to_net, Bool, false) .ATTR(out_box_dim, Int, 3) - .OUTPUT(box_out, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(box_out, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(box_out_num, TensorType({DT_INT32})) .OP_END_FACTORY_REG(YoloV3DetectionOutputV2D) @@ -968,8 +968,9 @@ REG_OP(SPP) * Three inputs, including: *@li x: An NC1HWC0 tensor of type float16 or float32, describing the feature * map. -*@li rois: A tensor of type float16 or float32, with shape +*@li rois: A tensor of type float16 or float32, with 3D shape * [batch, 5, roi_max_num], describing the RIOs. +* roi_max_num must be less than or equal to 6000 and must be divided by 16. *@li roi_actual_num: A optional tensor of type int32, with shape [batch, 8], specifying * the number of ROIs per batch . \n @@ -1201,35 +1202,6 @@ REG_OP(RpnProposalsD) .OUTPUT(sorted_box, TensorType({DT_FLOAT16})) .OP_END_FACTORY_REG(RpnProposalsD) -/** -*@brief Computes Score Filte Pre-Sort function. - -*@par Inputs: -*Inputs include: -* @li rois: A Tensor. Must be float16. N-D with shape [N, 4]. -* @li cls_bg_prob: A Tensor. Must be float16. N-D with shape [N, 1]. - -*@par Attributes: -* @li score_threshold: required, float, threahold of topk process. -* @li k: required, Int, threahold of topk process. -* @li score_filter: bool, mark of score_filter. Defaults to "true" -* @li core_max_num: int, max number of core. Defaults to "8" -*@par Outputs: -* @li sorted_proposal: A Tensor. Must be float16. -* N-D with shape [8*6002, 8]. -* @li proposal_num: A Tensor. Must be uint32. N-D with shape [8, 8]. -*/ - -REG_OP(ScoreFiltePreSort) - .INPUT(rois, TensorType({DT_FLOAT16})) - .INPUT(cls_bg_prob, TensorType({DT_FLOAT16})) - .OUTPUT(sorted_proposal, TensorType({ DT_FLOAT16})) - .OUTPUT(proposal_num, TensorType({ DT_UINT32})) - .REQUIRED_ATTR(score_threshold, Float) - .REQUIRED_ATTR(k, Int) - .ATTR(score_filter, Bool, true) - .ATTR(core_max_num, Int, 8) - .OP_END_FACTORY_REG(ScoreFiltePreSort) /** *@brief Computes Score Filte Pre-Sort function. @@ -1383,6 +1355,7 @@ REG_OP(DecodeWheelsTarget) *@attention Constraints: * Only computation of float16 data is supported. +* Note: when the class num per image * max_size_per_class is too big, will compile fail with ERROR-insufficient memory */ REG_OP(BatchMultiClassNonMaxSuppression) .INPUT(boxes, TensorType({DT_FLOAT16})) @@ -1464,9 +1437,9 @@ REG_OP(NormalizeBBox) * y: A Tensor. Must have the same type as box_predictions. */ REG_OP(DecodeBboxV2) - .INPUT(boxes, TensorType({DT_FLOAT16,DT_FLOAT})) - .INPUT(anchors, TensorType({DT_FLOAT16,DT_FLOAT})) - .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(boxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(anchors, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) .ATTR(scales, ListFloat, {1.0, 1.0, 1.0, 1.0}) .ATTR(decode_clip, Float, 0.0) .ATTR(reversed_box, Bool, false) @@ -1477,7 +1450,8 @@ REG_OP(DecodeBboxV2) * *@par Inputs: *Inputs include: -* x: A Tensor. Must be float16 or float32. +* x: A Tensor. Dtype support: flaot16, flaot, int16, int8, + uint8, int32, int64. * *@par Attributes: * @li axis: optional, int. @@ -1485,16 +1459,364 @@ REG_OP(DecodeBboxV2) * *@par Outputs: * @li y1: A Tensor. Must have the same type as x. -* @li y2: A Tensor. Indices of y1 in x.Dtype must be int32. +* @li y2: A Tensor. Indices of y1 in x. Dtype must be int32. +* */ REG_OP(Sort) - .INPUT(x, TensorType({ DT_FLOAT16 })) - .OUTPUT(y1, TensorType({ DT_FLOAT16 })) - .OUTPUT(y2, TensorType({ DT_INT32 })) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT16, DT_INT8, + DT_UINT8, DT_INT32, DT_INT64})) + .OUTPUT(y1, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT16, DT_INT8, + DT_UINT8, DT_INT32, DT_INT64})) + .OUTPUT(y2, TensorType({DT_INT32})) .ATTR(axis, Int, -1) .ATTR(descending, Bool, false) .OP_END_FACTORY_REG(Sort) +/** +*@brief Computes iou for input bboxes and gtboxes. + +*@par Inputs: +* Two inputs, including: +*@li bboxes: boxes, a 4D Tensor of type float16 with the shape (x0, x1, y0, y1), +*@li gtboxes: boxes, a 4D Tensor of type float16 with the shape (x0, x1, y0, y1).\n + +*@par Attributes: +*@li mode: A optional attribute of type string, whether judge the mode of iou. \n + +*@par Outputs: +*@li overlap: A 2D Tensor of type float16 with shape [n, m]. \n + +*@attention Constraints: +* Only computation of float16 data is supported. + +*@par Restrictions: +*Warning:THIS FUNCTION IS DEPRECATED. Please use Iou instead. +*/ +REG_OP(PtIou) + .INPUT(bboxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(gtboxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(overlap, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(mode, String, "iou") + .OP_END_FACTORY_REG(PtIou) + +/** +*@brief Greedily selects a subset of bounding boxes in descending order of +score . \n + +*@par Inputs: +*Input boxes and scores must be float16 type. Inputs include: +*@li boxes: A input tensor with shape [num_batches,spatial_dimension,4]. +The single box data format is indicated by center_point_box. +*@li scores: A input tensor with shape [num_batches,num_classes,spatial_dimension] +*@li max_output_size: A scalar integer tensor representing the maximum number +of boxes to be selected by non max suppression. +*@li iou_threshold: A 0-D float tensor representing the threshold for deciding +whether boxes overlap too much with respect to IOU. +*@li score_threshold: A 0-D float tensor representing the threshold for +deciding when to remove boxes based on score . \n + +*@par Attributes: +*center_point_box:Integer indicate the format of the box data. +The default is 0. 0 - the box data is supplied as [y1, x1, y2, x2] +where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair +of box corners and the coordinates can be provided as normalized +(i.e., lying in the interval [0, 1]) or absolute.Mostly used for TF models. +1 - the box data is supplied as [x_center, y_center, width, height]. + Mostly used for Pytorch models. \n + +*@par Outputs: +*@li selected_indices: A 2-D integer tensor of shape [M] representing the +selected indices from the boxes tensor, where M <= max_output_size. \n + +*@attention Constraints: +*Input boxes and scores must be float16 type . \n + +*@par Third-party framework compatibility +*Compatible with onnx NonMaxSuppression operator. + +*@par Restrictions: +*Warning:THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ + +REG_OP(NonMaxSuppressionV6) + .INPUT(boxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(scores, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(max_output_size, TensorType({DT_INT32})) + .OPTIONAL_INPUT(iou_threshold, TensorType({DT_FLOAT})) + .OPTIONAL_INPUT(score_threshold, TensorType({DT_FLOAT})) + .OUTPUT(selected_indices, TensorType({DT_INT32})) + .ATTR(center_point_box, Int, 0) + .ATTR(max_boxes_size, Int, 0) + .OP_END_FACTORY_REG(NonMaxSuppressionV6) + +/** +*@brief Greedily selects a subset of bounding boxes in descending order of +score . \n + +*@par Inputs: +*Input boxes and scores must be float16 type. Inputs include: +*@li boxes: A input tensor with shape [num_batches,spatial_dimension,4]. +The single box data format is indicated by center_point_box. +*@li scores: A input tensor with shape [num_batches,num_classes,spatial_dimension] +*@li max_output_size: A scalar integer tensor representing the maximum number +of boxes to be selected by non max suppression. +*@li iou_threshold: A 0-D float tensor representing the threshold for deciding +whether boxes overlap too much with respect to IOU. +*@li score_threshold: A 0-D float tensor representing the threshold for +deciding when to remove boxes based on score . \n +*@li index_id: A input tensor with shape [num_batches,num_classes,spatial_dimension,3] +the last dim representing (batch_id,class_id,index_id) . \n + +*@par Attributes: +*center_point_box:Integer indicate the format of the box data. +The default is 0. 0 - the box data is supplied as [y1, x1, y2, x2] +where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair +of box corners and the coordinates can be provided as normalized +(i.e., lying in the interval [0, 1]) or absolute.Mostly used for TF models. +1 - the box data is supplied as [x_center, y_center, width, height]. + Mostly used for Pytorch models. \n + +*@par Outputs: +*@li selected_indices: A 2-D integer tensor of shape [M] representing the +selected indices from the boxes tensor, where M <= max_output_size. \n + +*@attention Constraints: +*Input boxes and scores must be float16 type . \n + +*@par Third-party framework compatibility +*Compatible with onnx NonMaxSuppression operator. +*/ + +REG_OP(NonMaxSuppressionV7) + .INPUT(boxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(scores, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(max_output_size, TensorType({DT_INT32})) + .OPTIONAL_INPUT(iou_threshold, TensorType({DT_FLOAT})) + .OPTIONAL_INPUT(score_threshold, TensorType({DT_FLOAT})) + .OPTIONAL_INPUT(index_id, TensorType({DT_FLOAT16})) + .OUTPUT(selected_indices, TensorType({DT_INT32})) + .ATTR(center_point_box, Int, 0) + .ATTR(max_boxes_size, Int, 0) + .OP_END_FACTORY_REG(NonMaxSuppressionV7) + +/** +*@brief Obtains the ROI feature matrix from the feature map list. It is a customized fused operator for mmdetection. \n + +*@par Inputs: +* Three inputs, including: +*@li features: A 5HD Tensor list of type float32 or float16. +*@li rois: ROI position. A 2D Tensor of float32 or float16 with shape (N, 5). "N" indicates the number of ROIs, +* the value "5" indicates the indexes of images where the ROIs are located, "x0", "y0", "x1", and "y1". + +*@par Attributes: +*@li finest_scale: A optional attribute of type int, specifying the scale of calculate levels of "rois". +*@li roi_scale_factor: A optional attribute of type float32, specifying the rescaling of "rois" coordinates. +*@li spatial_scale: A optional attribute of type list float32, specifying the scaling ratio of "features" +* to the original image. +*@li pooled_height: A optional attribute of type int32, specifying the H dimension. +*@li pooled_width: A optional attribute of type int32, specifying the W dimension. +*@li sample_num: An optional attribute of type int32, specifying the horizontal and vertical sampling frequency +* of each output. If this attribute is set to "0", the sampling frequency is equal to the rounded up value of "rois", +* which is a floating point number. Defaults to "0". +*@li pool_mode: An optional attribute of type string to indicate pooling mode. Defaults to "avg" . \n +*@li aligned: An optional attribute of type bool, specifying the align to corner. Defaults to true . \n + +*@par Outputs: +* output: Outputs the feature sample of each ROI position. The format is 5HD Tensor of type float32 or float16. +* The axis N is the number of input ROIs. Axes H, W, and C are consistent with the values of "pooled_height", +* "pooled_width", and "features", respectively. + +*@par Third-party framework compatibility +*Compatible with mmdetection SingleRoIExtractor operator. +*/ +REG_OP(RoiExtractor) + .DYNAMIC_INPUT(features, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(rois, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(finest_scale, Int, 56) + .ATTR(roi_scale_factor, Float, 0) + .ATTR(spatial_scale, ListFloat, {1.f / 4, 1.f / 8, 1.f / 16, 1.f / 32}) + .ATTR(pooled_height, Int, 7) + .ATTR(pooled_width, Int, 7) + .ATTR(sample_num, Int, 0) + .ATTR(pool_mode, String, "avg") + .ATTR(aligned, Bool, true) + .OP_END_FACTORY_REG(RoiExtractor) + +/** +*@brief Performs Position Sensitive PS ROI Pooling . \n + +*@par Inputs: +* Two inputs, including: +*@li x: An NC1HWC0 tensor of type float16 or float32, describing the feature +* map, dimension C1 must be equal to +* (int(output_dim+15)/C0))*group_size*group_size. +*@li rois: A tensor of type float16 or float32, with shape +* [batch, 5, rois_num], describing the ROIs, each ROI consists of five +* elements: "batch_id", "x1", "y1", "x2", and "y2", which "batch_id" indicates +* the index of the input feature map, "x1", "y1", "x2", or "y2" must be +* greater than or equal to "0.0" . \n + +*@par Attributes: +*@li output_dim: A required int32, specifying the number of output channels, +* must be greater than 0. +*@li group_size: A required int32, specifying the number of groups to encode +* position-sensitive score maps, must be within the range (0, 128). +*@li spatial_scale: A required float32, scaling factor for mapping the input +* coordinates to the ROI coordinates . \n + +*@par Outputs: +*y: An NC1HWC0 tensor of type float16 or float32, describing the result +* feature map . \n + +*@attention Constraints: +* HC1HWC0: channel must be Group_size squared, rois_num is a multiple of 16 +*/ +REG_OP(PSROIPoolingV2) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(rois, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .REQUIRED_ATTR(spatial_scale, Float) + .REQUIRED_ATTR(output_dim, Int) + .REQUIRED_ATTR(group_size, Int) + .OP_END_FACTORY_REG(PSROIPoolingV2) + +/** +*@brief Performs Position Sensitive PS ROI Pooling Grad . \n + +*@par Inputs: +* Two inputs, including: +*@li x: An NC1HWC0 tensor of type float16 or float32, describing the result +* feature map . \n +*@li rois: A tensor of type float16 or float32, with shape +* [batch, 5, rois_num], describing the ROIs, each ROI consists of five +* elements: "batch_id", "x1", "y1", "x2", and "y2", which "batch_id" indicates +* the index of the input feature map, "x1", "y1", "x2", or "y2" must be +* greater than or equal to "0.0" . \n + +*@par Attributes: +*@li output_dim: A required int32, specifying the number of output channels, +* must be greater than 0. +*@li group_size: A required int32, specifying the number of groups to encode +* position-sensitive score maps, must be within the range (0, 128). +*@li spatial_scale: A required float32, scaling factor for mapping the input +* coordinates to the ROI coordinates . \n +*@li input_size: A required listInt, mapping the gradinput size: (H, W) + +*@par Outputs: +*y: An NC1HWC0 tensor of type float16 or float32, describing the feature +* map, dimension C1 must be equal to +* (int(output_dim+15)/C0))*group_size*group_size. + +*@attention Constraints: +* HC1HWC0: channel must be Group_size squared, rois_num is a multiple of 16 +*/ +REG_OP(PSROIPoolingGradV2D) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(rois, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .REQUIRED_ATTR(spatial_scale, Float) + .REQUIRED_ATTR(output_dim, Int) + .REQUIRED_ATTR(group_size, Int) + .REQUIRED_ATTR(input_size, ListInt) + .OP_END_FACTORY_REG(PSROIPoolingGradV2D) + +/** +*@brief Generate the responsible flags of anchor in a single feature map. + +*@par Inputs: +*@li gt_bboxes: Ground truth box, 2-D Tensor with shape `[batch, 4]`. + +*@par Attributes: +*@li featmap_size: The size of feature maps, listint. +*@li strides: Stride of current level, listint. +*@li num_base_anchors: The number of base anchors. + +*@par Outputs: +*flags: The valid flags of each anchor in a single level. +*/ +REG_OP(AnchorResponseFlags) + .INPUT(gt_bboxes, TensorType({DT_FLOAT})) + .OUTPUT(flags, TensorType({DT_UINT8})) + .REQUIRED_ATTR(featmap_size, ListInt) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(num_base_anchors, Int) + .OP_END_FACTORY_REG(AnchorResponseFlags) + +/** +*@brief Generates bounding boxes based on yolo's "anchor" and "ground-truth" boxes. +* It is a customized mmdetection operator . \n + +*@par Inputs: +* Three inputs, including: +*@li anchor_boxes: anchor boxes generated by the yolo training set. +* A 2D Tensor of type float32 or float16 with shape (N, 4). "N" indicates the number +* of ROIs, "N" indicates the number of ROIs, and the value "4" refers to (tx, ty, tw, th). +*@li gt_bboxes: target of the transformation, e.g, ground-truth boxes. +* A 2D Tensor of type float32 or float16 with shape (N, 4). +* "N" indicates the number of ROIs, and 4 indicates "dx", "dy", "dw", and "dh" . +*@li stride: Scale for each box. +* A 1D Tensor of type int32 shape (N,). +* "N" indicates the number of ROIs. \n + +*@par Attributes: +*@li performance_mode: select performance mode, "high_precision" or "high_performance". +* select "high_precision" when input type is float32, the output tensor precision +* will be smaller than 0.0001, select "high_performance" when input type is float32, +* the ops will be best performance, but precision will be only smaller than 0.005. + +*@par Outputs: +*encoded_bboxes: Bboxes generated based on "anchor_boxes" and "gt_bboxes". Have the +* same format and type as "anchor_boxes". +* +*@attention Constraints: +* input anchor boxes only support maximum N=20480. \n +*/ +REG_OP(YoloBoxesEncode) + .INPUT(anchor_boxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(gt_bboxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(stride, TensorType({DT_INT32})) + .ATTR(performance_mode, String, "high_precision") + .OUTPUT(encoded_bboxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .OP_END_FACTORY_REG(YoloBoxesEncode) + +/** +*@brief Performs Position Sensitive PS ROI Pooling Grad. + +*@par Inputs: +* Eight inputs, including: +*@li assigned_gt_inds: Tensor of type float16 or float32, shape (n, ) +*@li overlaps: A Tensor. Datatype is same as assigned_gt_inds. IOU between gt_bboxes and bboxes. shape(k, n) +*@li box_responsible_flags: A Tensor. Support uint8. Flag to indicate whether box is responsible. +*@li max_overlaps: A Tensor. Datatype is same as assigned_gt_inds. overlaps.max(axis=0). +*@li argmax_overlaps: A Tensor. Support int32. overlaps.argmax(axis=0). +*@li gt_max_overlaps: A Tensor. Datatype is same as assigned_gt_inds. overlaps.max(axis=1). +*@li gt_argmax_overlaps: A Tensor. Support int32. overlaps.argmax(axis=1). +*@li num_gts: A Tensor. Support int32. real k. shape (1, ) + +*@par Attributes: +*@li output_dim: float. IOU threshold for positive bboxes. +*@li group_size: float. minimum iou for a bbox to be considered as a positive bbox +*@li spatial_scale: bool. whether to assign all bboxes with the same highest overlap with some gt to that gt. + +*@par Outputs: +*@li assigned_gt_inds_pos: A Tensor. Support float16/float32. shape (n, ). +*/ +REG_OP(GridAssignPositive) + .INPUT(assigned_gt_inds, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(overlaps, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(box_responsible_flags, TensorType({ DT_UINT8 })) + .INPUT(max_overlaps, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(argmax_overlaps, TensorType({ DT_INT32 })) + .INPUT(gt_max_overlaps, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(gt_argmax_overlaps, TensorType({ DT_INT32 })) + .INPUT(num_gts, TensorType({ DT_INT32 })) + .OUTPUT(assigned_gt_inds_pos, TensorType({DT_FLOAT, DT_FLOAT16})) + .REQUIRED_ATTR(pos_iou_thr, Float) + .REQUIRED_ATTR(min_pos_iou, Float) + .REQUIRED_ATTR(gt_max_assign_all, Bool) + .OP_END_FACTORY_REG(GridAssignPositive) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_NN_DETECT_OPS_H_ + diff --git a/third_party/fwkacllib/inc/ops/nn_norm_ops.h b/third_party/fwkacllib/inc/ops/nn_norm_ops.h index 35c4c7d4..b44c0780 100644 --- a/third_party/fwkacllib/inc/ops/nn_norm_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_norm_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -55,7 +55,9 @@ REG_OP(LogSoftmaxGrad) *Two inputs, including: * @li features: A Tensor. Must be one of the following types: half, float32, double. * A "batch_size * num_classes" matrix. -* @li labels: A Tensor of the same type as "features". batch_size vector with values in [0, num_classes). +* @li labels: A Tensor. Must be one of the following types: 'int32', 'int64'. +* batch_size vector with values in [0, num_classes). +* This is the label for the given minibatch entry. *@par Outputs: @@ -105,6 +107,9 @@ REG_OP(SoftmaxCrossEntropyWithLogits) * @li grad_softmax: A Tensor. Has the same shape and type as "softmax". * The format is NC1HWC0 or DN . \n +*@par Attributes: +* axes: An optional list of ints. Defaults to "{-1}" . \n + *@par Outputs: *grad_x: A Tensor. Has the same shape and type as "softmax" . \n @@ -115,6 +120,7 @@ REG_OP(SoftmaxGrad) .INPUT(softmax, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .INPUT(grad_softmax, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) .OUTPUT(grad_x, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .ATTR(axes, ListInt, {-1}) .OP_END_FACTORY_REG(SoftmaxGrad) /** @@ -160,20 +166,20 @@ REG_OP(SigmoidCrossEntropyWithLogits) .OP_END_FACTORY_REG(SigmoidCrossEntropyWithLogits) /** -*@brief Computes the sigmoid cross entropy loss of "predict" and "target" . \n +*@brief Computes the sigmoid cross entropy loss of "predict" and "target". *@par Inputs: * four inputs, including: *@li predict: A multi-dimensional Tensor of type float16 or float32, specifying the predictive value. -*@li target: A multi-dimensional Tensor of type float16 or float32, specifying the target value . \n -*@li weight: An multi-dimensional Tensor, specifying the weight value. \n +*@li target: A multi-dimensional Tensor of type float16 or float32, specifying the target value. +*@li weight: An multi-dimensional Tensor, specifying the weight value. *@li pos_weight: An multi-dimensional Tensor, specifying the pos weight value. \n *@par Attributes: -*reduction: A character string from "none", "mean", and "sum", specifying the reduction type to be applied to the output. Defaults to "mean" . \n +*reduction: A character string from "none", "mean", and "sum", specifying the reduction type to be applied to the output. Defaults to "mean". \n *@par Outputs: -*loss: Sigmoid cross entropy between the predictive value and target value. Has the same dimensions as "predict" . \n +*loss: Sigmoid cross entropy between the predictive value and target value. Has the same dimensions as "predict". \n *@par Third-party framework compatibility * Compatible with PyTorch operator BCEWithLogitsLoss. @@ -330,6 +336,41 @@ REG_OP(SoftmaxV2) .ATTR(axes, ListInt, {-1}) .OP_END_FACTORY_REG(SoftmaxV2) +/** +*@brief Function softmax with dropoutDoMaskV3D + +*@par Inputs: +*Two inputs, including: +* @li x: A mutable Tensor. The type only support float16. +* @li mask: A mutable Tensor. Must met all of the following rules: +* shape of mask should be 1D. +* dtype of mask should be uint8. +* value of shape should met the following algorithm: +* value = (size(x) + 128 - 1) // 128 * 128 + +*@par Attributes: +* @li keep_prob: A mutable Tensor. Must met all of the following rules: +* shape of "keep_prob" should be (1,) or [1,]. +* Has the same type as "x" . \n +* @li axes: A list of int. The dimension softmax would be performed on. Defaults +* to "[-1]" . \n + +*@par Outputs: +*y1: A mutable Tensor. Has the same type as "x". +*y2: A mutable Tensor. Has the same type as "x". \n + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(SoftmaxV2WithDropOutDoMaskV3D) + .INPUT(x, TensorType({DT_FLOAT16})) + .INPUT(mask, TensorType({DT_UINT8})) + .OUTPUT(y1, TensorType({DT_FLOAT16})) + .OUTPUT(y2, TensorType({DT_FLOAT16})) + .REQUIRED_ATTR(keep_prob, Float) + .ATTR(axes, ListInt, {-1}) + .OP_END_FACTORY_REG(SoftmaxV2WithDropOutDoMaskV3D) + /** *@brief Computes log softmax activations . \n @@ -427,6 +468,33 @@ REG_OP(MVN) .ATTR(eps, Float, 1e-9) .OP_END_FACTORY_REG(MVN) +/** +*@brief Normalizes the input . \n + +*@par Inputs: +* One input: +*x: An NCHW tensor of type float16 or float32 . \n + +*@par Attributes: +*@li eps: An optional float32 epsilon for not dividing by zero. Defaults to "1e-9" . \n +*@li axes: A list of Intefers, along which axis to reduce. Defaults to "[0, 2, 3]" . \n + +*@par Outputs: +*y: An NCHW tensor of type float16 or float32 . \n + +*@attention Constraints: +* The input tensor must have the NCHW format, whose shape length must be 4. +*@par Third-party framework compatibility +* Compatible with the ONNX operator MeanVarianceNormalization. +*/ + +REG_OP(MVNV2) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) /* "First operand." */ + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) /* "Result, has same element type as inputs" */ + .ATTR(eps, Float, 1e-9) + .ATTR(axes, ListInt, {0, 2, 3}) + .OP_END_FACTORY_REG(MVNV2) + /** *@brief Normalizes the input "x1" . \n @@ -498,6 +566,31 @@ REG_OP(LayerNorm) .ATTR(epsilon, Float, 0.0000001) .OP_END_FACTORY_REG(LayerNorm) +/** +*@brief Returns a tensor where each sub-tensor of input along dimension +* dim is normalized such that the p-norm of the sub-tensor is lower than the value maxnorm. \n + +*@par Inputs: +*One input, including: +* @li x: A Tensor. Must be one of the following types: float16, float32 . \n + +*@par Attributes: +* @li p: Specify L_p norm, the type is float. +* @li dim: The processed dim, the type is int. +* @li maxnorm: Threshold for comparison, the type is float. \n + +*@par Outputs: +*One outputs, including: +* @li y: shape and dtype of output, should be same shape and type as input. +*/ +REG_OP(Renorm) + .INPUT(x, TensorType::BasicType()) + .OUTPUT(y, TensorType::BasicType()) + .REQUIRED_ATTR(p, Float) + .REQUIRED_ATTR(dim, Int) + .REQUIRED_ATTR(maxnorm, Float) + .OP_END_FACTORY_REG(Renorm) + /** *@brief LayerNormGrad operator interface implementation * calculating: dy, x, variance, mean, gamma @@ -586,6 +679,48 @@ REG_OP(LayerNormXBackprop) .OUTPUT(pd_x, TensorType({DT_FLOAT, DT_FLOAT16})) .OP_END_FACTORY_REG(LayerNormXBackprop) +/** +*@brief LayerNormXBackpropV2 operator interface implementation +* calculating: dy, x, variance, mean, gamma +* pd_xl = data_dy*data_gamma +* pd_var = np.sum(((-0.5)*pd_xl*(data_x - data_mean) +* np.power((data_variance + EPSLON), (-1.5))), +* reduce_axis, keepdims=True) +* pd_mean = np.sum(((-1.0)*pd_xl +* np.power((data_variance + EPSLON), (-0.5))), +* reduce_axis, keepdims=True) +* + pd_var*(1.0/m) +* np.sum(((-2.0)*(data_x - data_mean)), reduce_axis, keepdims=True) +* pd_x = pd_xl*np.power((data_variance + EPSLON), (-0.5)) + +* pd_var*(2.0/m)*(data_x - data_mean) + pd_mean*(1.0/m) +* res_for_gamma = (data_x - data_mean) * np.power((data_variance + EPSLON), (-0.5)) + +*@par Inputs: +*Five inputs, including: +* @li dy: A Tensor. Must be one of the following types: float16, float32. +* @li x: A Tensor. Must be one of the following types: float16, float32. +* @li variance: A Tensor. Must be one of the following types: float16, float32. +* @li mean: A Tensor. Must be one of the following types: float16, float32. +* @li gamma: A Tensor. Must be one of the following types: float16, float32 . \n + +*@par Outputs: +*Three outputs, including: +* @li pd_x: A Tensor. Must be one of the following types: float16, float32. +* @li res_for_gamma: A Tensor. Must be one of the following types: float32. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(LayerNormXBackpropV2) + .INPUT(dy, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(variance, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(mean, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(gamma, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(pd_x, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(res_for_gamma, TensorType({DT_FLOAT})) + .OP_END_FACTORY_REG(LayerNormXBackpropV2) + /** *@brief LayerNormBetaGammaBackprop operator interface implementation * calculating: dy, x, variance, mean @@ -629,6 +764,35 @@ REG_OP(LayerNormBetaGammaBackprop) .REQUIRED_ATTR(shape_gamma, ListInt) .OP_END_FACTORY_REG(LayerNormBetaGammaBackprop) +/** +*@brief LayerNormBetaGammaBackpropV2 operator interface implementation +* calculating: dy, x, variance, mean +* pd_gamma = np.sum((data_dy*res_for_gamma), param_axis, keepdims=True) +* pd_beta = np.sum(data_dy, param_axis, keepdims=True) + +*@par Inputs: +*Three inputs, including: +* @li dy: A Tensor. Must be one of the following types: float16, float32. +* @li x: A Tensor. Must be one of the following types: float16, float32. +* @li variance: A Tensor. Must be one of the following types: float16, float32. +* @li mean: A Tensor. Must be one of the following types: float16, float32 . \n + +*@par Outputs: +*Three outputs, including: +* @li pd_gamma: A Tensor. Must be one of the following types: float16, float32. +* @li pd_beta: A Tensor. Must be one of the following types: float16, float32. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(LayerNormBetaGammaBackpropV2) + .INPUT(dy, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(res_for_gamma, TensorType({DT_FLOAT})) + .OUTPUT(pd_gamma, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(pd_beta, TensorType({DT_FLOAT, DT_FLOAT16})) + .REQUIRED_ATTR(shape_gamma, ListInt) + .OP_END_FACTORY_REG(LayerNormBetaGammaBackpropV2) + /** *@brief Return "output" according to the algorithm of dropout_do_mask: * scale_x = x *(1 / keep_prob) @@ -656,7 +820,68 @@ REG_OP(DropOutDoMask) .INPUT(keep_prob, TensorType({DT_FLOAT, DT_FLOAT16})) .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) .OP_END_FACTORY_REG(DropOutDoMask) - + +/** +*@brief Return "output" according to the algorithm of dropout_do_mask: +* scale_x = x *(1 / keep_prob) +* output = select(mask == 1, scale_x, 0) + +*@par Inputs: +*Three inputs, including: +* @li x: A mutable Tensor. Must be one of the following types: +* float16, float32 +* @li mask: A mutable Tensor. Must met all of the following rules: +* shape of mask should be 1D. +* dtype of mask should be uint8. +* value of shape should met the following algorithm: +* value = (size(x) + 128 - 1) // 128 * 128 +* @li keep_prob: A mutable Tensor. Must met all of the following rules: +* shape of "keep_prob" should be (1,) or [1,]. +* Has the same type as "x" . \n + +*@par Output: +*y: A mutable Tensor. Has the same type as "x". +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(DropOutDoMaskV3) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(mask, TensorType({DT_UINT8})) + .INPUT(keep_prob, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) + .OP_END_FACTORY_REG(DropOutDoMaskV3) + +/** +*@brief Return "output" according to the algorithm of dropout_do_mask: +* scale_x = x *(1 / keep_prob) +* output = select(mask == 1, scale_x, 0) + +*@par Inputs: +*Two inputs, including: +* @li x: A mutable Tensor. Must be one of the following types: +* float16, float32 +* @li mask: A mutable Tensor. Must met all of the following rules: +* shape of mask should be 1D. +* dtype of mask should be uint8. +* value of shape should met the following algorithm: +* value = (size(x) + 128 - 1) // 128 * 128 +*@par Attributes: +* @li keep_prob: A mutable Tensor. Must met all of the following rules: +* shape of "keep_prob" should be (1,) or [1,]. +* Has the same type as "x" . \n + +*@par Output: +*y: A mutable Tensor. Has the same type as "x". +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(DropOutDoMaskV3D) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(mask, TensorType({DT_UINT8})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) + .REQUIRED_ATTR(keep_prob, Float) + .OP_END_FACTORY_REG(DropOutDoMaskV3D) + /** *@brief Scales the input . \n @@ -703,7 +928,7 @@ REG_OP(Scale) *@par Inputs: *One input, including: -*@li x: A Tensor. Must be 4-D shape, and only support the following types: float16, float32 . \n +*x: A Tensor. Must be 4-D shape, and only support the following types: float16, float32 . \n *@par Attributes: *@li depth_radius: An optional int32, specifying the half-width of the normalization window. Defaults to "5". @@ -960,24 +1185,532 @@ REG_OP(INInferV2D) .OP_END_FACTORY_REG(INInferV2D) /** -*@brief Performs instance normalization for inference of InHost part. +* @brief InstanceNorm operator interface implementation. -*@par Inputs:\n -* One input, including: (NC1HWC0 supported) -* variance: A [N, C1, 1, 1, C0] Tensor of type float32, for the variance. +* @par Inputs: +* Three inputs, including: +* @li x: A Tensor. Must be one of the following types: float16, float32. +* @li gamma: A Tensor. Must be one of the following types: float16, float32. +* @li beta: A Tensor. Must be one of the following types: float16, float32. + +* @par Attributes: +* @li data_format: An attribute of type String \n +* @li epsilon: An attribute of type Float. \n + +* @par Outputs: +*Three outputs, including: +* @li y: A Tensor. Has the same type as "x". \n +* @li mean: A Tensor. Has the same type as "x". \n +* @li variance: A Tensor. Has the same type as "x". \n + +* @par Third-party framework compatibility +* Can be used by onnx InstanceNormalization +*/ +REG_OP(InstanceNorm) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(gamma, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(beta, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(mean, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(variance, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(data_format, String, "NDHWC") + .ATTR(epsilon, Float, 1e-6) + .OP_END_FACTORY_REG(InstanceNorm) + +/** +*@brief InstanceNormGrad operator interface implementation. + +*@par Inputs: +*Five inputs, including: +* @li dy: A Tensor. Must be one of the following types: float16, float32. +* @li x: A Tensor. Must be one of the following types: float16, float32. +* @li variance: A Tensor. Must be one of the following types: float16, float32. +* @li mean: A Tensor. Must be one of the following types: float16, float32. +* @li gamma: A Tensor. Must be one of the following types: float16, float32 . \n + +*@par Outputs: +*Three outputs, including: +* @li pd_x: A Tensor. Must be one of the following types: float16, float32. +* @li pd_gamma: A Tensor. Must be one of the following types: float16, float32. +* @li pd_beta: A Tensor. Must be one of the following types: float16, float32. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(InstanceNormGrad) + .INPUT(dy, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(variance, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(mean, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(gamma, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(pd_x, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(pd_gamma, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(pd_beta, TensorType({DT_FLOAT, DT_FLOAT16})) + .OP_END_FACTORY_REG(InstanceNormGrad) + +/** +*@brief InstanceNormXBackprop operator interface implementation. + +*@par Inputs: +*Five inputs, including: +* @li dy: A Tensor. Must be one of the following types: float16, float32. +* @li x: A Tensor. Must be one of the following types: float16, float32. +* @li variance: A Tensor. Must be one of the following types: float16, float32. +* @li mean: A Tensor. Must be one of the following types: float16, float32. +* @li gamma: A Tensor. Must be one of the following types: float16, float32 . \n + +*@par Outputs: +*Two outputs, including: +* @li pd_x: A Tensor. Must be one of the following types: float16, float32. +* @li res_for_gamma: A Tensor. Must be one of the following types: float32. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(InstanceNormXBackprop) + .INPUT(dy, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(variance, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(mean, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(gamma, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(pd_x, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(res_for_gamma, TensorType({DT_FLOAT})) + .OP_END_FACTORY_REG(InstanceNormXBackprop) + +/** +*@brief InstanceNormBetaGammaBackprop operator interface implementation. + +*@par Inputs: +*Two inputs, including: +* @li dy: A Tensor. Must be one of the following types: float16, float32. +* @li res_for_gamma: A Tensor. Must be one of the following types: float32.\n + +*@par Outputs: +*Two outputs, including: +* @li pd_gamma: A Tensor. Must be one of the following types: float16, float32. +* @li pd_beta: A Tensor. Must be one of the following types: float16, float32. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(InstanceNormBetaGammaBackprop) + .INPUT(dy, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(res_for_gamma, TensorType({DT_FLOAT})) + .OUTPUT(pd_gamma, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(pd_beta, TensorType({DT_FLOAT, DT_FLOAT16})) + .OP_END_FACTORY_REG(InstanceNormBetaGammaBackprop) + +/** +* @brief Computes Kl_div_loss_grad or Kl_div_loss_backward. \n + +* @par Inputs: +* Three inputs, including: +* @li grad: A Tensor. Must be one of the following types: float16, float32. +* Required. +* @li input: A Tensor. Has the same type as "grad". Required. +* @li target: A Tensor. Has the same type as "grad". Required. \n + +* @par Attributes: +* @li reduction: An optional attribute of type String. Defaults to "mean". \n +* @li log_target: An optional attribute of type Bool. Defaults to false. \n + +* @par Outputs: +* @li y: A Tensor. Has the same type as "grad". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator KlDivLossGrad. +*/ +REG_OP(KlDivLossGrad) + .INPUT(grad, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(input, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(target, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(reduction, String, "mean") + .ATTR(log_target, Bool, false) + .OP_END_FACTORY_REG(KlDivLossGrad) + +/** +* @brief Computes l1_loss_grad or l1_loss_backward. \n + +* @par Inputs: +* Three inputs, including: +* @li grads: A Tensor. Must be one of the following types: float16, float32. +* Required. +* @li predict: A Tensor. Has the same type as "grads". Required. +* @li label: A Tensor. Has the same type as "grads". Required. \n + +* @par Attributes: +* @li reduction: An optional attribute of type String. Defaults to "mean". \n + +* @par Outputs: +* @li y: A Tensor. Has the same type as "x". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator L1LossGrad. +*/ +REG_OP(L1LossGrad) + .INPUT(grads, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(predict, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(label, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(reduction, String, "mean") + .OP_END_FACTORY_REG(L1LossGrad) + +/** +* @brief Computes loss of lp, p=1,2,3.... + +* @par Inputs: +* @li predict: An ND tensor of type float16, float32. +* @li label: An ND tensor of type float16, float32. \n + +* @par Attributes: +* @li p: A required int attribute that decides which loss to compute, now the p only can be 1 to compute l1_loss. +* @li reduction: An optional string.Defaults to "mean". \n + +* @par Outputs: +* @li y: An ND tensor tensor with the same shape and type as "predict". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator LpLoss. +*/ +REG_OP(LpLoss) + .INPUT(predict, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(label, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .REQUIRED_ATTR(p, Int) + .ATTR(reduction, String, "mean") + .OP_END_FACTORY_REG(LpLoss) + +/** +* @brief Computes gradients of mse loss. + +* @par Inputs: +* @li predict: An ND tensor of type float16, float32. +* @li label: An ND tensor of type float16, float32. +* @li dout: An ND tensor of type float16, float32. \n + +* @par Attributes: +* @li reduction: An optional string.Defaults to "mean". \n + +* @par Outputs: +* @li y: An ND tensor tensor with the same shape and type as "predict". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator MseLossGrad. +*/ +REG_OP(MseLossGrad) + .INPUT(predict, TensorType({DT_FLOAT32, DT_FLOAT16})) + .INPUT(label, TensorType({DT_FLOAT32, DT_FLOAT16})) + .INPUT(dout, TensorType({DT_FLOAT32, DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT32, DT_FLOAT16})) + .ATTR(reduction, String, "mean") + .OP_END_FACTORY_REG(MseLossGrad) + +/** +* @brief Computes mse loss. +* @par Inputs: +* two inputs, including: +* @li predict: An ND Tensor of dtype float16 or float32. +* @li label: An ND Tensor of dtype float16 or float32.\n +* +* @par Attributes: +* @li reduction:An optional str from sum, none, mean, Defaults to "mean".\n +* +* @par Outputs: +* @li y: when reduction=sum/mean, y is scale. when reduction=none, y has +* same type and shape as "predict".\n +*/ +REG_OP(MseLoss) + .INPUT(predict, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(label, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(reduction, String, "mean") + .OP_END_FACTORY_REG(MseLoss) + +/** +* @brief Calculates the reversed outputs of the function "smooth_l1_loss_v2". \n + +* @par Inputs: +* Three Inputs, including: +* @li predict: A Tensor. Must be one of the following types: +* float16, float32. +* @li label: A Tensor. Has the same type as "predict". +* @li dout: A Tensor. Has the same type as "predict". \n + +* @par Attributes: +* Two Attributes, including: +* @li sigma: An optional float. Defaults to 1.0. \n + +* @li reduction: An optional string. Defaults to "mean", +* Must be one of the following: "none", "mean", "sum". \n + +* @par Outputs: +* @li gradient: A Tensor. Has the same type as "predict". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator SmoothL1LossBackward. +*/ +REG_OP(SmoothL1LossGradV2) + .INPUT(predict, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(label, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(dout, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(gradient, TensorType({DT_FLOAT, DT_FLOAT16})) + .ATTR(sigma, Float, 1.0) + .ATTR(reduction, String, "mean") + .OP_END_FACTORY_REG(SmoothL1LossGradV2) + +/** +* @brief Creates a criterion that uses a squared term if the absolute +* element-wise error falls below beta and an L1 term otherwise. It is +* less sensitive to outliers than the MSELoss and in some cases prevents +* exploding gradients. + +* @par Inputs: +* @li predict: A multi-dimensional Tensor of type float16 or float32, +* specifying the predictive value. \n +* @li label: A multi-dimensional Tensor of type float16 or float32, +* specifying the target value. \n + +* @par Attributes: +* @li sigma: An optional int. Specifies the threshold of loss. Defaults +* to "1.0". \n +* @li reduction: An optional str. Specifies the reduction to apply to +* the output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied, +* 'mean': the sum of the output will be divided by the number of elements in +* the output,'sum': the output will be summed. Default: 'mean'. \n + +* @par Outputs: +* @li loss: Indicates the loss between the predictive value and target value. +* Has the same dimensions as "predict". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator smooth_l1_loss. \n +*/ +REG_OP(SmoothL1LossV2) + .INPUT(predict, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(label, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .OUTPUT(loss, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .ATTR(sigma, Float, 1.0) + .ATTR(reduction, String, "mean") + .OP_END_FACTORY_REG(SmoothL1LossV2) + +/** +* @brief Computes Centralization. result = x - mean(x, axes) + +* @par Inputs: +* @li x: An ND tensor of type float16, float32. +* @par Attributes: +* @li axes: The dimensions to reduce. Must be one of the following types: int, list, tuple, NoneType. +* Must be in the range [-rank(x), rank(x)). +* @par Outputs: +* @li y: A Tensor. Has the same type as "x". \n + +* @par Third-party framework compatibility +* custom operator \n +*/ +REG_OP(Centralization) + .INPUT(x, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .OUTPUT(y, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .ATTR(axes, ListInt, {-1}) + .OP_END_FACTORY_REG(Centralization) + +/** +*@brief Roll the tensor along the given dimension(s). +* Elements that are shifted beyond the last position are re-introduced at the first position. +* If a dimension is not specified, the tensor will be flattened before rolling and then restored to the original shape. \n + +*@par Inputs: +*One inputs, including: +* @li x: A tensor . Must be one of the following types: +* float16, float32, int32, uint32, int8, uint8. \n *@par Attributes: -* epsilon: An optional float32, specifying the small value added to -variance to avoid dividing by zero. Defaults to "0.00001" . \n +* @li shifts: The number of places by which the elements of the tensor are shifted. \n +* @li dims: Axis along which to roll. \n -*@par Outputs:\n -* variance_sqrt: A [N, C1, 1, 1, C0] Tensor of type float32, for the variance_sqrt. +*@par Outputs: +* y: A Tensor with the same type and shape of x's. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator Roll. \n */ -REG_OP(InHost) - .INPUT(variance, TensorType({DT_FLOAT})) - .OUTPUT(variance_sqrt, TensorType({DT_FLOAT})) - .ATTR(epsilon, Float, 0.00001) - .OP_END_FACTORY_REG(InHost) +REG_OP(Roll) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_UINT32,DT_INT8,DT_UINT8})) + .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_UINT32,DT_INT8,DT_UINT8})) + .REQUIRED_ATTR(shifts, ListInt) + .ATTR(dims, ListInt, {}) + .OP_END_FACTORY_REG(Roll) + +/** + *@brief Calculate the loss. Creates a criterion that optimizes a two-class classification + logistic loss between input_x and input_y (containing 1 or -1). \n + + *@par Inputs: + *One inputs, including: + * @li input_x: A tensor. Must be one of the following types: + * float16, float32. \n + * @li input_y: A tensor. Must be one of the following types: + * float16, float32. \n + + *@par Attributes: + *@li lambd: An optional string.Defaults to "mean". \n + + *@par Outputs: + *output_z: while reduction == "none", A Tensor with the same type and shape of input_x's. \n + * while reduction == "sum" or "mean", A Tensor with the same type of input_x , shape of which is (1,) + + *@par Third-party framework compatibility + *Compatible with the Pytorch operator SoftMarginLoss. \n + */ +REG_OP(SoftMarginLoss) + .INPUT(input_x, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(input_y, TensorType({DT_FLOAT, DT_FLOAT16})) + .ATTR(reduction, String, "mean") + .OUTPUT(output_z, TensorType({DT_FLOAT, DT_FLOAT16})) + .OP_END_FACTORY_REG(SoftMarginLoss) + +/** +* @brief Computes gradients of sigmoid_cross_entropy_with_logits_v2. + +* @par Inputs: +* @li predict: An ND tensor of type float16, float32. +* @li target: An ND tensor of type float16, float32. +* @li dout: An ND tensor of type float16, float32. +* @li weight: An optional ND tensor of type float16, float32. +* @li pos_weight: An optional ND tensor of type float16, float32. \n + +* @par Attributes: +* @li reduction: An optional string.Defaults to "mean". \n + +* @par Outputs: +* @li gradient: An ND tensor tensor with the same shape and type as "predict". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator SigmoidCrossEntropyWithLogitsGrad. +*/ +REG_OP(SigmoidCrossEntropyWithLogitsGradV2) + .INPUT(predict, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(target, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(dout, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(weight, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(pos_weight, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(gradient, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(reduction, String, "mean") + .OP_END_FACTORY_REG(SigmoidCrossEntropyWithLogitsGradV2) +/** + * @brief Calculate the PoissonNllLoss function. + * target∼Poisson(input)loss(input,target)=input−target∗log(input)+log(target!) \n + + * @par Inputs: + * Two inputs, including: + * @li input_x: A tensor. Must be one of the following types: + * float16, float32. \n + * + * @par Inputs: + * @li target: A tensor. Must be one of the following types: + * float16, float32. \n + + * @par Attributes: + * four Attributes, including: + * @li log_input: An optional bool. Defaults to "True" \n + * + * @par Attributes: + * @li full: An optional bool. Defaults to "False" \n + * + * @par Attributes: + * @li eps: An optional float. Defaults to "1e-8" \n + * + * @par Attributes: + * @li reduction: An optional string. Defaults to "mean" \n + + * @par Outputs: + * loss: A Tensor has same element type as two inputs. \n + + * @par Third-party framework compatibility + * Compatible with the Pytorch operator PoissonNllLoss. \n + */ +REG_OP(PoissonNllLoss) + .INPUT(input_x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(target, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(loss, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(log_input, Bool, true) + .ATTR(full, Bool, false) + .ATTR(eps, Float, 1e-8) + .ATTR(reduction, String, "mean") + .OP_END_FACTORY_REG(PoissonNllLoss) +/** + *@brief rnn_gen_mask + * @par Inputs: + * @li seq_length: A ND Tensor of type int32. Recoed the current length of each batch.\n + * + * @par Attributes: + * @li num_step: A required int.\n + * @li hidden_size: A required int. \n + * + * + * @par Output: + * y: A mutable Tensor of type float16, with the shape of [num_step, batch_size, hidden_size]. \n + * + */ +REG_OP(RnnGenMask) + .INPUT(seq_length, TensorType({DT_INT32})) + .OUTPUT(seq_mask, TensorType({DT_FLOAT16})) + .REQUIRED_ATTR(num_step, Int) + .REQUIRED_ATTR(hidden_size, Int) + .OP_END_FACTORY_REG(RnnGenMask) + +/** +* @brief Creates a criterion that optimizes a multi-class multi-classification hinge loss (margin-based loss) +* between input x (a 2D mini-batch Tensor) and output y (which is a 2D Tensor of target class indices) \n + +* @par Inputs: +* Two inputs, including: +* @li x: A tensor. Must be one of the following types: +* float16, float32. \n +* +* @par Inputs: +* @li target: A tensor. Must be the following types: +* int32. \n + +* @par Attributes: +* @li reduction: An optional string. Defaults to "mean" \n + +* @par Outputs: +* y: A Tensor has same element type as input x. \n +* is_target: A Tensor has same element type as input target. \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator MultiLabelMarginLoss. \n +*/ +REG_OP(MultilabelMarginLoss) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(target, TensorType({DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(is_target, TensorType({DT_INT32})) + .ATTR(reduction, String, "mean") + .OP_END_FACTORY_REG(MultilabelMarginLoss) + +/** +*@brief Performs batch normalization . \n +*@par Inputs: +* Two inputs +*@li input_x: A Tensor. Support float32. shape (n, c, d). +*@li seq_len: A Tensor. Each batch normalize data num. Support Int32. Shape (n, ). \n +*@par Attributes: +*@li normalize_type: Str. Support "per_feature" or "all_features". +*@li epsilon: An optional float32, specifying the small value added to +variance to avoid dividing by zero. Defaults to "0.00001" . \n +*@par Outputs: +* One outputs +*@li output_y: A Tensor for the normalized "x".Support float32. shape (n, c, d).\n +*/ +REG_OP(NormalizeBatch) + .INPUT(input_x, TensorType({ DT_FLOAT })) + .INPUT(seq_len, TensorType({ DT_INT32 })) + .OUTPUT(output_y, TensorType({ DT_FLOAT })) + .REQUIRED_ATTR(normalize_type, String) + .ATTR(epsilon, Float, 0.00001) + .OP_END_FACTORY_REG(NormalizeBatch) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_NN_NORM_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/nn_ops.h b/third_party/fwkacllib/inc/ops/nn_ops.h index 9edc469a..49fd02fa 100644 --- a/third_party/fwkacllib/inc/ops/nn_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,7 +20,144 @@ */ #ifndef OPS_BUILT_IN_OP_PROTO_INC_NN_OPS_H_ #define OPS_BUILT_IN_OP_PROTO_INC_NN_OPS_H_ - +#include "graph/operator_reg.h" #include "nn_pooling_ops.h" +namespace ge { +/** +* @brief Says whether the targets are in the top "k" predictions . \n + +* @par Inputs: +* Three inputs, including: +* @li predictions: A 2D Tensor of type float32. A "batch_size * classes" tensor. +* @li targets: A 1D Tensor of type IndexNumberType. A batch_size tensor of class ids. +* @li k: A 1D Tensor of the same type as "targets". +* Specifies the number of top elements to look at for computing precision . \n + +* @par Outputs: +* precision: A Tensor of type bool . \n + +* @attention Constraints: +* @li targets must be non-negative tensor. + +* @par Third-party framework compatibility +* @li Compatible with the TensorFlow operator InTopKV2. +*/ +REG_OP(InTopKV2) + .INPUT(predictions, TensorType({DT_FLOAT})) + .INPUT(targets, TensorType(IndexNumberType)) + .INPUT(k, TensorType({IndexNumberType})) + .OUTPUT(precision, TensorType({DT_BOOL})) + .OP_END_FACTORY_REG(InTopKV2) + +/** +*@brief Performs batch normalization . \n + +*@par Inputs: +* Five inputs, including: (NHWC, NCHW, or NC1HWC0 supported) +*@li x: A 4D or 5D Tensor of type float16 or float32, with format NHWC or NCHW for 4D or NC1HWC0 for 5D. +*@li scale: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. Must be 5D +if input "x" is with format NC1HWC0. Specifies the scaling factor. +*@li offset: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. Must be 5D +if input "x" is with format NC1HWC0. Specifies the offset. +*@li mean: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. Must be 5D +if input "x" is with format NC1HWC0. Specifies the mean used for inference. Must be "None" if the +operation is used for training. +*@li variance: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. Must be +5D if input "x" is with format NC1HWC0. Specifies the variance used for inference. Must be "None" +if the operation is used for training . \n + +*@par Attributes: +*@li epsilon: An optional float32, specifying the small value added to variance to avoid dividing by zero. Defaults to "0.0001". +*@li data_format: An optional string, specifying the format of "x". Defaults to "NHWC". +*@li is_training: An optional bool, specifying if the operation is used for training or inference. Defaults to "True" . \n + +*@par Outputs: +* Five outputs, including: (NHWC, NCHW, or NC1HWC0 supported) +*@li y: A 4D or 5D Tensor of type float16 or float32 for the normalized "x", with format NHWC or NCHW for 4D or NC1HWC0 for 5D. +*@li batch_mean: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. Must be 5D +if input "x" is with format NC1HWC0. Specifies the mean of "x". +*@li batch_variance: A Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. +Must be 5D if input "x" is with format NC1HWC0. Specifies the variance of "x". +*@li reserve_space_1: An optional Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. +Must be 5D if input "x" is with format NC1HWC0. Specifies the mean of "x" for gradient computation. Pass "None" to skip this output. +*@li reserve_space_2: An optional Tensor of type float32. Must be 1D if input "x" is with format NHWC or NCHW. +Must be 5D if input "x" is with format NC1HWC0. Specifies the variance of "x" for gradient computation. Pass "None" to skip this output . \n + +*@attention Constraints: +*@li If the operation is used for inference and outputs "reserve_space_1" and "reserve_space_2" are available, +then "reserve_space_1" has the same value as "mean" and "reserve_space_2" has the same value as "variance". +*@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square root instruction . \n +*/ +REG_OP(FusedBatchNormV2) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(scale, TensorType({DT_FLOAT})) + .INPUT(offset, TensorType({DT_FLOAT})) + .OPTIONAL_INPUT(mean, TensorType({DT_FLOAT})) + .OPTIONAL_INPUT(variance, TensorType({DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(batch_mean, TensorType({DT_FLOAT})) + .OUTPUT(batch_variance, TensorType({DT_FLOAT})) + .OUTPUT(reserve_space_1, TensorType({DT_FLOAT})) + .OUTPUT(reserve_space_2, TensorType({DT_FLOAT})) + .ATTR(epsilon, Float, 0.0001) + .ATTR(data_format, String, "NHWC") + .ATTR(is_training, Bool, true) + .OP_END_FACTORY_REG(FusedBatchNormV2) + +/** + * @brief: Large amount of data sort.First operator of TopK. + * @par Inputs: + * two input, including: + * @li input_data: A Tensor. Data to be sorted. Support float16 + * @li input_index: A Tensor. Range(0, 2048). Datatype and format is same as input_data. + * @par Attributes: + * @li k_num: Int.Number to be sorted. + * @par Outputs: + * 1 output, including: + * @li output_proposal: A Tensor. Datatype and format is same as input_data. Proposal sorted for each channel. + */ +REG_OP(SegmentSort) + .INPUT(input_data, TensorType({DT_FLOAT16})) + .INPUT(input_index, TensorType({DT_FLOAT16})) + .OUTPUT(output_proposal, TensorType({DT_FLOAT16})) + .REQUIRED_ATTR(k_num, Int) + .OP_END_FACTORY_REG(SegmentSort) + +/** + * @brief: Large amount of data sort.Second operator of TopK. + * @par Inputs: + * two input, including: + * @li input_proposal: A Tensor. Proposal sorted for each channel. Support float16 + * @par Attributes: + * @li k_num: Int.Number to be sorted. + * @par Outputs: + * 1 output, including: + * @li output_proposal: A Tensor. Datatype and format is same as input_data. Proposal sorted for each channel. + */ +REG_OP(MultiMerge) + .INPUT(input_proposal, TensorType({DT_FLOAT16})) + .OUTPUT(output_proposal, TensorType({DT_FLOAT16})) + .REQUIRED_ATTR(k_num, Int) + .OP_END_FACTORY_REG(MultiMerge) + +/** + * @brief: Large amount of data sort.Third operator of TopK. + * @par Inputs: + * two input, including: + * @li input_proposal: A Tensor. Proposal sorted for each channel. Support float16 + * @par Attributes: + * @li k_num: Int.Number to be sorted. + * @par Outputs: + * 2 output, including: + * @li output_data: A Tensor. Datatype and format is same as input_data. Data sorted. + * @li output_index: A Tensor. int32. Data index. + */ +REG_OP(SingleMerge) + .INPUT(input_proposal, TensorType({DT_FLOAT16})) + .OUTPUT(output_data, TensorType({DT_FLOAT16})) + .OUTPUT(output_index, TensorType({DT_INT32})) + .REQUIRED_ATTR(k_num, Int) + .OP_END_FACTORY_REG(SingleMerge) +}// namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_NN_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/nn_pooling_ops.h b/third_party/fwkacllib/inc/ops/nn_pooling_ops.h index ab35ba47..80a21333 100644 --- a/third_party/fwkacllib/inc/ops/nn_pooling_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_pooling_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -182,6 +182,128 @@ REG_OP(AvgPool3D) .ATTR(data_format, String, "NDHWC") .OP_END_FACTORY_REG(AvgPool3D) + +/** +*@brief Performs average pooling on the input. + +*@par Inputs: +*@li x: A 5-D Tensor of shape [batch, depth, height, width, channels] and type float16, float32, double. +*@li filter: An optional tensor of type float16, float32, double, fractal_z_3d layout. +*@li multiplier: An optional tensor of float16, float32, double. + +*@par Attributes: +*@li ksize: List of ints that has length 1, 3 or 5. The size of the window for each dimension of the input tensor. +*@li strides:List of ints that has length 1, 3 or 5. The stride of the sliding window for each dimension of the input tensor. +*@li pads: List of ints, implicit zero paddings on both sides of the input. +*@li ceil_mode: When true, will use ceil instead of floor in the formula to compute the output shape. +*@li count_include_pad: When true, will include the zero-padding in the averaging calculation. +*@li divisor_override: if specified, it will be used as divisor, otherwise size of the pooling region will be used. +*@li data_format: A string, format of input data . \n + +*@par Outputs: +*y: The average pooled output tensor . \n + +*@attention Constraints: +*@li "ksize" is in the range [1, 255]. "strides" is in the range [1, 63] + +*@par Third-party framework compatibility +* Compatible with the TensorFlow operator AvgPool3D. +*/ +REG_OP(AvgPool3DD) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE})) + .OPTIONAL_INPUT(filter, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE})) + .OPTIONAL_INPUT(multiplier, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE})) + .REQUIRED_ATTR(ksize, ListInt) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(pads, ListInt) + .ATTR(ceil_mode, Bool, false) + .ATTR(count_include_pad, Bool, true) + .ATTR(divisor_override, Int, 0) + .ATTR(data_format, String, "NDHWC") + .OP_END_FACTORY_REG(AvgPool3DD) + +/** +* @brief Computes AvgPool3DGrad function. + +* @par Inputs: +* @li orig_input_shape: An NDHWC tensor of type int32. +* @li grads: An NDHWC tensor of type float16, float32, or double. + +* @par Attributes: +* @li ksize: List of ints that has length 5. The size of the window for each dimension of the input tensor. +* @li strides:List of ints that has length 5. The stride of the sliding window for each dimension of the input tensor. +* @li pads: List of ints, implicit zero paddings on both sides of the input. +* @li ceil_mode: When true, will use ceil instead of floor in the formula to compute the output shape. +* @li count_include_pad: When true, will include the zero-padding in the averaging calculation. +* @li divisor_override: if specified, it will be used as divisor, otherwise size of the pooling region will be used. +* @li data_format: A string, format of input data. + +* @par Outputs: +* @output: A mutable tensor with the same shape and type as "orig_input_shape". + +* @attention Constraints: +* @li "ksize" is in the range [1, 255]. "strides" is in the range [1, 63] + +* @par Third-party framework compatibility +* @li Compatible with the TensorFlow operator AvgPoolGrad. +*/ + +REG_OP(AvgPool3DGrad) + .INPUT(orig_input_shape, TensorType({DT_INT32})) + .INPUT(grads, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE})) + .OUTPUT(output, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE})) + .REQUIRED_ATTR(ksize, ListInt) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(pads, ListInt) + .ATTR(ceil_mode, Bool, false) + .ATTR(count_include_pad, Bool, true) + .ATTR(divisor_override, Int, 0) + .ATTR(data_format, String, "NDHWC") + .OP_END_FACTORY_REG(AvgPool3DGrad) + +/** +* @brief Performs average pooling on the input. + +* @par Inputs: +* @li grads: An NDHWC tensor of type float16. +* @li filter: An optional tensor of type float16, fractal_z_3d layout. +* @li multiplier: An optional tensor of float16. + +* @par Attributes: +* @li orig_input_shape: List of ints that has length 5. The size of the window for each dimension of the input tensor. +* @li ksize: List of ints that has length 5. The size of the window for each dimension of the input tensor. +* @li strides:List of ints that has length 5. The stride of the sliding window for each dimension of the input tensor. +* @li pads: List of ints, implicit zero paddings on both sides of the input. +* @li ceil_mode: When true, will use ceil instead of floor in the formula to compute the output shape. +* @li count_include_pad: When true, will include the zero-padding in the averaging calculation. +* @li divisor_override: if specified, it will be used as divisor, otherwise size of the pooling region will be used. +* @li data_format: A string, format of input data . \n + +* @par Outputs: +* @output: The average pooled output tensor . \n + +* @attention Constraints: +* @li "ksize" is in the range [1, 255]. "strides" is in the range [1, 63] + +* @par Third-party framework compatibility +* Compatible with the TensorFlow operator AvgPool3DGradD. +*/ +REG_OP(AvgPool3DGradD) + .INPUT(grads, TensorType({DT_FLOAT16})) + .OPTIONAL_INPUT(filter, TensorType({DT_FLOAT16})) + .OPTIONAL_INPUT(multiplier, TensorType({DT_FLOAT16})) + .OUTPUT(output, TensorType({DT_FLOAT16})) + .REQUIRED_ATTR(orig_input_shape, ListInt) + .REQUIRED_ATTR(ksize, ListInt) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(pads, ListInt) + .ATTR(ceil_mode, Bool, false) + .ATTR(count_include_pad, Bool, true) + .ATTR(divisor_override, Int, 0) + .ATTR(data_format, String, "NDHWC") + .OP_END_FACTORY_REG(AvgPool3DGradD) + /** *@brief Performs max_pool_ext2 on the input . \n @@ -278,8 +400,8 @@ No default value. specifying the stride of the sliding window for each dimension of the input tensor. No default value. *@li padding: A required string type of float16. -*@li pads: A list type of int32. Default value {0, 0, 0}. -*@li dilation: A list type of int32. Default value {1, 1, 1}. +*@li pads: A list type of int32. Default value {0,0,0,0,0,0}. +*@li dilation: A list type of int32. Default value {1,1,1,1,1,1}. *@li ceil_mode: A ceil mode number of int32 . Default value 0. *@li data_format: An optional string. Defaults to "NDHWC" . \n @@ -302,12 +424,37 @@ REG_OP(MaxPool3D) .REQUIRED_ATTR(ksize, ListInt) .REQUIRED_ATTR(strides, ListInt) .REQUIRED_ATTR(padding, String) - .ATTR(pads, ListInt, {0,0,0}) - .ATTR(dilation, ListInt, {1,1,1}) + .ATTR(pads, ListInt, {0,0,0,0,0,0}) + .ATTR(dilation, ListInt, {1,1,1,1,1,1}) .ATTR(ceil_mode, Int, 0) .ATTR(data_format, String, "NDHWC") .OP_END_FACTORY_REG(MaxPool3D) +/** +*@brief Applies a 2D adaptive max pooling over an input signal conposed of several input planes. \n +* The output is of size H x W, for any input size. + +* @par Inputs: +* One input, including: +* @li x: A Tensor. Must be one of the following data types: +* float16, float32, float64. \n + +* @par Attributes: +* @li output_size: A required list of 2 ints +* specifying the size (H,W) of the output tensor. \n + +* @par Outputs: +* @li y: A Tensor. Has the same data type as "x" \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator AdaptiveMaxPool2d. +*/ +REG_OP(AdaptiveMaxPool2d) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE})) + .OUTPUT(argmax, TensorType::IndexNumberType()) + .REQUIRED_ATTR(output_size, ListInt) + .OP_END_FACTORY_REG(AdaptiveMaxPool2d) /** * @brief Computes second-order gradients of the maxpooling3d function . \n @@ -477,8 +624,9 @@ REG_OP(MaxPoolV2) *@par Inputs: * One input: -*x: An NC1HWC0 Tensor. Supported type: float, double, int32, - * uint8, int16, int8, int64, uint16, half, uint32, uint64 . \n +* x: An 4D Tensor. Supported type: float, double, int32, + * uint8, int16, int8, int64, uint16, half, uint32, uint64. + * Must set the format, supported format list ["NCHW, NHWC"]. \n *@par Attributes: *@li ksize: A required list of int8, int16, int32, or int64 values, @@ -490,8 +638,8 @@ REG_OP(MaxPoolV2) *@li padding: A required string. No default value . \n *@par Outputs: -*y: A Tensor. Has the same type and format as input "x". -*argmax: A Tensor. Has the same type and format as input "x". +*@li y: A Tensor. Has the same type and format as input "x". +*@li argmax: A Tensor. Has the same type and format as input "x". *@attention Constraints: *@li "ksize" is a list that has length 4: ksize[0] = 1 or ksize[3] = 1, * ksize[1] * ksize[2] <= 255. @@ -517,10 +665,12 @@ REG_OP(MaxPoolWithArgmax) *@par Inputs: * Three inputs, including: -*@li x: An NC1HWC0 tensor. Supported type: float, double, int32, +*@li x: An 4d tensor. Supported type: float, double, int32, * uint8, int16, int8, int64, uint16, half, uint32, uint64. -*@li grad: An NC1HWC0 tensor. Supported type: float, double, int32, + * Must set the format, supported format list ["NCHW, NHWC"] +*@li grad: An 4d tensor. Supported type: float, double, int32, * uint8, int16, int8, int64, uint16, half, uint32, uint64. + * Must set the format, supported format list ["NCHW, NHWC"] *@li argmx: An NC1HWC0 tensor of type int32 or int64 . \n *@par Attributes: @@ -741,7 +891,7 @@ REG_OP(AvgPoolV2Grad) * @brief Computes gradients of averagev2 pooling function. * @par Inputs: -* @li input_grad: An NHWC tensor of type float16, float32, or double. +*input_grad: An NHWC tensor of type float16, float32, or double. * @par Attributes: * @li orig_input_shape: A required tuple or list of type int32. @@ -759,10 +909,10 @@ REG_OP(AvgPoolV2Grad) * @li data_format: An optional string. Defaults to "NHWC". * @par Outputs: -* @out_grad: A mutable tensor with the same shape and type as "orig_input". +*out_grad: A mutable tensor with the same shape and type as "orig_input". * @par Third-party framework compatibility -* @li Compatible with the TensorFlow operator AvgPoolGrad. +*Compatible with the TensorFlow operator AvgPoolGrad. */ REG_OP(AvgPoolV2GradD) .INPUT(input_grad, TensorType({DT_FLOAT16})) @@ -1037,6 +1187,7 @@ REG_OP(MaxPool3DGrad) .OUTPUT(y, TensorType::RealNumberType()) .REQUIRED_ATTR(ksize, ListInt) .REQUIRED_ATTR(strides, ListInt) + .ATTR(padding, String, "SAME") .REQUIRED_ATTR(pads, ListInt) .ATTR(data_format, String, "NDHWC") .OP_END_FACTORY_REG(MaxPool3DGrad) @@ -1107,7 +1258,7 @@ REG_OP(AvgPool1DD) *@par Inputs: * One input: -*x: An NC1HWC0 Tensor of type float16. +*x: An 4d Tensor of type float16. Must set the format, supported format list ["NCHW, NHWC"]. *@par Attributes: *@li ksize: A required list of int8, int16, int32, or int64 values, specifying the size of the window for * each dimension of the input tensor. No default value. @@ -1148,9 +1299,9 @@ REG_OP(MaxPoolWithArgmaxV2) *@par Inputs: * Three inputs, including: -*@li x: An NC1HWC0 tensor of type float16. -*@li grad: An NC1HWC0 tensor of type float16. -*@li argmx: An NC1HWC0 tensor of type uint16 or int64 . \n +*@li x: An 4d tensor of type float16. Must set the format, supported format list ["NCHW, NHWC"] +*@li grad: An 4d tensor of type float16. Must set the format, supported format list ["NCHW, NHWC"] +*@li argmx: An 4d tensor of type uint16 or int64. Must set the format, supported format list ["NCHW, NHWC"] \n *@par Attributes: *@li ksize: A required list of int8, int16, int32, or int64 values, specifying the size of the window for @@ -1291,5 +1442,306 @@ REG_OP(MaxPoolV3Grad) .ATTR(global_pooling, Bool, false) .ATTR(ceil_mode, Bool, false) .OP_END_FACTORY_REG(MaxPoolV3Grad) + +/** +*@brief Performs Dilation2D on the input . \n + +*@par Inputs: +*x: A tensor of shape is 4d, format is support NHWC. +*filter: A tensor of shape is 3d, the type is same with x, and the c dimension is same with x. \n + +*@par Attributes: +*@li strides: A required list of 4 ints, specifying the stride of the sliding window. The strides of the N and C dimensions are 1. +*@li rates: A required list of 4 ints. The rates of the N and C dimensions are 1. +*@li padding_mode: A optional string. Defaults to "SAME", it support SAME and VALID. +*@li pads: An optional list of 4 ints. +*@li ceil_mode: An optional bool. Defaults to "false". Use ceil or floor to calculate the output size when padding_mode is "CALCULATED". +*@li data_format: An optional string, specifying the data format of "rates" and "strides", either "NCHW" or "NHWC" (default). \n + +*@par Outputs: +*y: The output tensor. Has the same type and format as input "x" . \n + +*@par Third-party framework compatibility +* Compatible with the TensorFlow operator Dilation2D. +*/ +REG_OP(Dilation2D) + .INPUT(x,TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .INPUT(filter,TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .OUTPUT(y,TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(rates, ListInt) + .ATTR(padding_mode, String, "SAME") + .ATTR(pads, ListInt, {0,0,0,0}) + .ATTR(ceil_mode, Bool, false) + .ATTR(data_format, String, "NHWC") + .OP_END_FACTORY_REG(Dilation2D) + +/** +*@brief Performs Dilation2DBackpropFilter on the input. \n + +*@par Inputs: +*x: A tensor of shape is 4d, format is support NHWC. +*filter: A tensor of shape is 3d, the type is same with x, and the c dimension is same with x. +*out_backprop: Has the same type and format as input x and the c dimension is same with x. \n + +*@par Attributes +*@li strides: A required list of 4 ints, specifying the stride of the sliding window. The strides of the N and C dimension are 1. +*@li rates: A required list of 4 ints, the rates of the N and C dimensions are 1. +*@li padding_mode: A optional string. Defaults to "SAME", it support SAME and VALID. +*@li pads: A optional list of 4 ints. +*@li ceil_mode: An optional bool. Defaults to "false". Use ceil or floor to calculate the output size when padding_mode is "CALCULATED". +*@li data_format: An optional string, specifying the data format of "rates" and "strides", either "NCHW" or "NHWC" (default). \n + +*@par Outputs: +*y: The output tensor. Has the same type and format as input "filter" . \n + +*@par Third-party framework compatibility +* Compatible with the TensorFlow operator Dilation2DBackpropFilter. +*/ + +REG_OP(Dilation2DBackpropFilter) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .INPUT(filter, + TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .INPUT(out_backprop, + TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .OUTPUT(y, + TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(rates, ListInt) + .ATTR(padding_mode, String, "SAME") + .ATTR(pads, ListInt, {0, 0, 0, 0}) + .ATTR(ceil_mode, Bool, false) + .ATTR(data_format, String, "NHWC") + .OP_END_FACTORY_REG(Dilation2DBackpropFilter) + +/** +*@brief Performs Dilation2DBackpropInput on the input. \n + +*@par Inputs: +*x: A tensor of shape is 4d, format is support NHWC. +*filter: A tensor of shape is 3d, the type is same with x, and the c dimension is same with x. +*out_backprop: Has the same type and format as input x and the c dimension is same with x. \n + +*@par Attributes +*@li strides: A required list of 4 ints, specifying the stride of the sliding window. The strides of the N and C dimension are 1. +*@li rates: A required list of 4 ints, the rates of the N and C dimensions are 1. +*@li padding_mode: A optional string. Defaults to "SAME", it support SAME and VALID. +*@li pads: A optional list of 4 ints. +*@li ceil_mode: An optional bool. Defaults to "false". Use ceil or floor to calculate the output size when padding_mode is "CALCULATED". +*@li data_format: An optional string, specifying the data format of "rates" and "strides", either "NCHW" or "NHWC" (default). \n + +*@par Outputs: +*y: The output tensor. Has the same type and format as input "x" . \n + +*@par Third-party framework compatibility +* Compatible with the TensorFlow operator Dilation2DBackpropInput. +*/ + +REG_OP(Dilation2DBackpropInput) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .INPUT(filter, + TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .INPUT(out_backprop, + TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .OUTPUT(y, + TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16})) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(rates, ListInt) + .ATTR(padding_mode, String, "SAME") + .ATTR(pads, ListInt, {0, 0, 0, 0}) + .ATTR(ceil_mode, Bool, false) + .ATTR(data_format, String, "NHWC") + .OP_END_FACTORY_REG(Dilation2DBackpropInput) + +/** +* @brief Applies a 2D adaptive average pooling over +* an input signal composed of several input planes. \n + +* @par Inputs: +* One input, including: +* @li x: A Tensor. Must be one of the following data types: +* float16, float32. \n + +* @par Attributes: +* @li output_size: A required list of 2 ints +* specifying the size (H,W) of the output tensor. \n + +* @par Outputs: +* @li y: A Tensor. Has the same data type as "x" \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator AdaptiveAvgPool2d. +*/ +REG_OP(AdaptiveAvgPool2d) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) + .REQUIRED_ATTR(output_size, ListInt) + .OP_END_FACTORY_REG(AdaptiveAvgPool2d) + +/** +* @brief Compute gradients of adaptive averagev2 pooling function. + +* @par Inputs: +* @li input_grad: A Tensor. Must be one of the following data types: +* float16, float32. + +* @par Attributes: +* @li orig_input_shape: A required tuple or list of type int32. + +* @par Outputs: +* @li output_grad: A tensor with the same type as "input_grad". + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator AdaptiveAvgPool2dGrad. +*/ +REG_OP(AdaptiveAvgPool2dGrad) + .INPUT(input_grad, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(output_grad, TensorType({DT_FLOAT, DT_FLOAT16})) + .REQUIRED_ATTR(orig_input_shape, ListInt) + .OP_END_FACTORY_REG(AdaptiveAvgPool2dGrad) + +/** +* @brief Performs the backpropagation of MaxPoolWithGradArgmaxV1. + +* @par Inputs: +* Three inputs, including: +* @li x: An NC1HWC0 tensor of type float16. +* @li grad: An NC1HWC0 tensor of type float16. +* @li argmax: An NC1HWC0 tensor of type uint16 or int64. \n + +* @par Attributes: +* @li ksize: A required list of int8, int16, int32, or int64 values, specifying the size of the window for +* each dimension of the input tensor. No default value. +* @li strides: A required list of int8, int16, int32, or int64 values, specifying the stride of the sliding window for +* each dimension of the input tensor. No default value. +* @li pads: A required listint. \n + +* @par Outputs: +* y: A Tensor. Has the same type and format as input "x". \n + +* @attention Constraints: +* @li "ksize" is a list that has length 4: ksize[0] = 1 or ksize[3] = 1, ksize[1] * ksize[2] <= 255. +* @li "strides" is a list that has length 4: strides[0] = 1 or strides[3] = 1 +* @li "pads" is listint. +* @li "ceil_mode" defaults to False. +* @li "data_format" defaults to "NC1HWC0". \n + +* @par Third-party framework compatibility +* Compatible with the TensorFlow operator MaxPoolGradWithArgmaxV1. +*/ + +REG_OP(MaxPoolGradWithArgmaxV1) + .INPUT(x, TensorType({DT_FLOAT16})) + .INPUT(grad, TensorType({DT_FLOAT16})) + .INPUT(argmax, TensorType({DT_UINT16})) + .OUTPUT(y, TensorType({DT_FLOAT16})) + .REQUIRED_ATTR(ksize, ListInt) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(pads, ListInt) + .ATTR(dtype, Int, 3) + .ATTR(dilation, ListInt, {1, 1, 1, 1}) + .ATTR(ceil_mode, Bool, false) + .OP_END_FACTORY_REG(MaxPoolGradWithArgmaxV1) + +/** +* @brief Performs max pooling on the input and outputs both max values and indices. + +* @par Inputs: +* One input: +* x: An NC1HWC0 Tensor of type float16. \n + +* @par Attributes: +* @li ksize: A required list of int8, int16, int32, or int64 values, specifying the size of the window for +* each dimension of the input tensor. No default value. +* @li strides: A required list of int8, int16, int32, or int64 values, specifying the stride of the sliding window for +* each dimension of the input tensor. No default value. +* @li pads: A required string. No default value. \n + +* @par Outputs: +* y: A Tensor. Has the same type and format as input "x". +* argmax: A Tensor. type:uint16, format:NC1HWC0. \n + +* @attention Constraints: +* @li "ksize" is a list that has length 4: ksize[0] = 1 or ksize[3] = 1, ksize[1] * ksize[2] <= 255. +* @li "stride is a list that has length 4: strides[0] = 1 or strides[3] = 1, strides[1] <= 63, strides[0] >= 1, +* strides[2] <= 63, strides[2] >= 1. +* @li "pads" is listint. +* @li "ceil_mode" defaults to False. +* @li "data_format" defaults to "NC1HWC0". \n + +* @par Third-party framework compatibility +* Compatible with the TensorFlow operator MaxPoolWithArgmaxV1. +*/ +REG_OP(MaxPoolWithArgmaxV1) + .INPUT(x, TensorType({DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT16})) + .OUTPUT(argmax, TensorType({DT_UINT16})) + .REQUIRED_ATTR(ksize, ListInt) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(pads, ListInt) + .ATTR(dtype, Int, 3) + .ATTR(dilation, ListInt, {1, 1, 1, 1}) + .ATTR(ceil_mode, Bool, false) + .OP_END_FACTORY_REG(MaxPoolWithArgmaxV1) + +/** +*@brief Randomly sample a subset of positive and negative examples,and overwrite +the label vector to the ignore value (-1) for all elements that are not +included in the sample.\n + +* @par Inputs: +* One input: +* labels: shape of labels,(N, ) label vector with values. \n + +* @par Attributes: +* @li batch_size_per_images: A require attribute of type int. +* @li positive_fraction: A require attribute of type float. + +*@par Outputs: +*y: The result of subSample. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator SubSample. +*@par Restrictions: +*Warning: This operator can be integrated only by MaskRcnn. Please do not use it directly. +*/ +REG_OP(SubSample) + .INPUT(labels, TensorType({DT_INT32})) + .OUTPUT(y, TensorType({DT_INT32})) + .REQUIRED_ATTR(batch_size_per_images, Int) + .REQUIRED_ATTR(positive_fraction, Float) + .OP_END_FACTORY_REG(SubSample) + +/** +*@brief Randomly sample a subset of positive and negative examples,and overwrite +the label vector to the ignore value (-1) for all elements that are not +included in the sample.\n + +* @par Inputs: +* two inputs, including: +* @li labels: shape of labels,(N, ) label vector with values:. +* @li shuffle_matrix: random matrix with shape (N, ). \n + +* @par Attributes: +* @li batch_size_per_images: A require attribute of type int. +* @li positive_fraction: A require attribute of type float. + +*@par Outputs: +*y: The result of subSample. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator SubSampleLabels. +*@par Restrictions: +*Warning: This operator can be integrated only by MaskRcnn. Please do not use it directly. +*/ +REG_OP(SubSampleLabels) + .INPUT(labels, TensorType({DT_INT32})) + .INPUT(shuffle_matrix, TensorType({DT_INT32})) + .OUTPUT(y, TensorType({DT_INT32})) + .REQUIRED_ATTR(batch_size_per_images, Int) + .REQUIRED_ATTR(positive_fraction, Float) + .OP_END_FACTORY_REG(SubSampleLabels) + } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_NN_POOLING_OPS_H diff --git a/third_party/fwkacllib/inc/ops/nn_training_ops.h b/third_party/fwkacllib/inc/ops/nn_training_ops.h index 047fd6da..75e91aee 100644 --- a/third_party/fwkacllib/inc/ops/nn_training_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_training_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -2101,6 +2101,55 @@ REG_OP(FusedMulApplyMomentumExtern) .ATTR(use_locking, Bool, false) .OP_END_FACTORY_REG(FusedMulApplyMomentumExtern) +/** +*@brief Updates '*var' according to the momentum scheme. +* accum = accum * momentum - x1 * x2 * lr +* if use_nesterov is True: +* var += accum * momentum - x1 * x2 * lr +* else: +* var += accum +* +*@par Inputs: +*@li var: A mutable tensor. Must be one of the data types defined in +* TensorType::NumberType(). Should be from a Variable(). +*@li accum: A mutable tensor. Has the same type as "var". Should be from a +* Variable(). +*@li lr: A tensor for the learning rate. Has the same type as "var". Should be +* from a Variable(). +*@li x1: A Tensor has type TensorType::NumberType(). +*@li momentum: A scalar. Has the same type as "var". +*@li x2: A scalar has the same type as "var". +* +*@par Attributes: +*@li use_nesterov: An optional bool. Defaults to "False". +* If "True", var will be updated by using Nesterov momentum. +*@li use_locking: An optional bool. Defaults to "False". +* If "True", updating of the "var" tensor is protected by a lock; +* otherwise the behavior is undefined, but may exhibit less contention. +* +*@par Outputs: +* var: A mutable tensor. Has the same type as input "var". +* +*@attention Constraints: +* The input tensors must have the same shape. +* +*@par Third-party framework compatibility +* Compatible with the TensorFlow operator ResourceApplyKerasMomentum. +* +*/ +REG_OP(FusedMulApplyKerasMomentum) + .INPUT(var, TensorType::NumberType()) + .INPUT(accum, TensorType::NumberType()) + .INPUT(lr, TensorType::NumberType()) + .INPUT(x1, TensorType::NumberType()) + .INPUT(momentum, TensorType::NumberType()) + .INPUT(x2, TensorType::NumberType()) + .OUTPUT(var, TensorType::NumberType()) + .OUTPUT(accum, TensorType::NumberType()) + .ATTR(use_locking, Bool, false) + .ATTR(use_nesterov, Bool, false) + .OP_END_FACTORY_REG(FusedMulApplyKerasMomentum) + /** *@brief Update "g" according to the LARS algorithm . \n diff --git a/third_party/fwkacllib/inc/ops/no_op.h b/third_party/fwkacllib/inc/ops/no_op.h index 7834591c..b27b1fa0 100644 --- a/third_party/fwkacllib/inc/ops/no_op.h +++ b/third_party/fwkacllib/inc/ops/no_op.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/nonlinear_fuc_ops.h b/third_party/fwkacllib/inc/ops/nonlinear_fuc_ops.h index e0e5dfc6..ca1c24eb 100644 --- a/third_party/fwkacllib/inc/ops/nonlinear_fuc_ops.h +++ b/third_party/fwkacllib/inc/ops/nonlinear_fuc_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -223,7 +223,29 @@ REG_OP(Relu6Grad) .INPUT(features, TensorType::RealNumberType()) .OUTPUT(backprops, TensorType::RealNumberType()) .OP_END_FACTORY_REG(Relu6Grad) - +/** +*@brief Calculate the elu_grad_v2 function. +*Applies the element-wise function: +* Computes the backward for the elu: if x>0, 1; otherwise elu() + alpha . +*@par Inputs: +*One inputs, including: +* @li grads: A tensor. Must be one of the following types: +* float16, float32. +* @li activations: A tensor. Must be one of the following types: +* float16, float32. +* +*@par Outputs: +*y: A Tensor with the same type and shape of grads's. +* +*@par Attributes: +*@li alpha: scalar parameter, default value = 1.0 +*/ +REG_OP(EluGradV2) + .INPUT(grads, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(activations, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) + .ATTR(alpha, Float, 1.0) + .OP_END_FACTORY_REG(EluGradV2) /** * @brief Compute sigmoid of "x" element-wise . \n @@ -508,6 +530,42 @@ REG_OP(Elu) .ATTR(alpha, Float, 1.0) .OP_END_FACTORY_REG(Elu) +/** +*@brief Continuously Differentiable Exponential Linear Uints: +* Perform the linear uint element-wise on the input tensor X using formula: +* max(0, x) + min(0, alpha * (exp(x/alpha) - 1)). \n + +*@par Inputs: +*x: A float16, float32, for the input data type . \n + +*@par Attributes: +*alpha1: A float32. Defines at which negative value the ELU saturates. Defaults to "1.0" . \n + +*@par Attributes: +*alpha2: A float32. Defines at which negative value the ELU saturates. Defaults to "1.0" . \n + +*@par Attributes: +*alpha3: A float32. Defines at which positive value the ELU saturates. Defaults to "1.0" . \n + +*@par Outputs: +*y: A float16, float32, for the normalized result . \n + +*@attention Constraints: +*@li The input is of type float16 or float32 . \n + +*@par Multiple batches supported or not +*Supported +*@par Third-party framework compatibility +*@li Compatible with ONNX's Celu operator +*/ +REG_OP(Celu) + .INPUT(x, TensorType({DT_FLOAT,DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT,DT_FLOAT16})) + .ATTR(alpha1, Float, 1.0) + .ATTR(alpha2, Float, 1.0) + .ATTR(alpha3, Float, 1.0) + .OP_END_FACTORY_REG(Celu) + /** *@brief Computes gradients for the exponential linear (Elu) operation. * @@ -640,6 +698,352 @@ REG_OP(Mish) .OUTPUT(y, TensorType({ DT_FLOAT,DT_FLOAT16 })) .OP_END_FACTORY_REG(Mish) +/** + * @brief: pytorch mish_grad operator. + * @par Inputs: + * three input, including: + * @li grad: A Tensor. shape, datatype and format is same as x + * @li x: A Tensor. Must be one of the following types: float16, float32 + * @li tanhx: A Tensor. shape, datatype and format is same as x + * @par Outputs: + * 1 output, including: + * @li x_grad: A Tensor. shape, datatype and format is same as x + */ + +REG_OP(MishGrad) + .INPUT(grad, TensorType({ DT_FLOAT,DT_FLOAT16 })) + .INPUT(x, TensorType({ DT_FLOAT,DT_FLOAT16 })) + .OPTIONAL_INPUT(tanhx, TensorType({ DT_FLOAT,DT_FLOAT16 })) + .OUTPUT(x_grad, TensorType({ DT_FLOAT,DT_FLOAT16 })) + .OP_END_FACTORY_REG(MishGrad) + +/** + * @brief pytorch hardtanh_backward operator. + * + * @par Inputs: + * 2 inputs, including: + * @li result, minimum tensor of the linear region range, + * datatype: float16/float32, format:ND/5HD. + * @li grad, maximum tensor of the linear region range, + * datatype:float16/float32, format:ND/5HD. \n + + * @par Attributes: + * 2 attributes, including: + * @li min_val, minimum value of the linear region range, datatype:float. + * @li max_val, maximum value of the linear region range, datatype:float. \n + + * @par Outputs: + * 1 output, including: + * @li y, hardtanh_backward output tensor, datatype and format is same as + * input result. \n + + * @attention Constraints: + * This operator only supports dataType: float16/float32, format: ND/5HD. \n + + * @par Third-party framework compatibility + * Compatible with the Pytorch operator HardtanhGrad. + */ +REG_OP(HardtanhGrad) + .INPUT(result, TensorType({ DT_FLOAT16, DT_FLOAT })) /* "First operand." */ + .INPUT(grad, TensorType({ DT_FLOAT16, DT_FLOAT })) /* "Second operand." */ + .OUTPUT(y, TensorType({ DT_FLOAT16, DT_FLOAT })) /* "Result, has same element type as two inputs" */ + .ATTR(min_val, Float, -1.0) + .ATTR(max_val, Float, 1.0) + .OP_END_FACTORY_REG(HardtanhGrad) + +/** +* @brief Calculates the softplus loss function with attributes of beta and threshold. \n + +* @par Inputs: +* One inputs, including: +* @li x: A mutable Tensor. Must be one of the following types: +* float16, float32. \n + +* @par Attributes: +* @li beta: An optional float. Defaults to "1.0" \n + +* @li threshold: An optional float. Defaults to "20.0" \n + +* @par Outputs: +* @li y: A mutable Tensor. Has the same type as "x" \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator Softplus. +*/ +REG_OP(SoftplusV2) + .INPUT(x, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .OUTPUT(y, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .ATTR(beta, Float, 1.0) + .ATTR(threshold, Float, 20.0) + .OP_END_FACTORY_REG(SoftplusV2) + +/** +* @brief Calculates the reversed outputs of the function "softplus_v2". \n + +* @par Inputs: +* Two inputs, including: +* @li input_gradients: A mutable Tensor. Must be one of the following types: +* float16, float32. +* @li input_features: A mutable Tensor of the same type as "input_gradients" \n + +* @par Attributes: +* @li beta: An optional float. Defaults to "1.0" \n + +* @li threshold: An optional float. Defaults to "20.0" \n + +* @par Outputs: +* @li output_backprops: A mutable Tensor. Has the same type as "input_gradients" \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator SoftplusGrad. +*/ +REG_OP(SoftplusV2Grad) + .INPUT(input_gradients, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(input_features, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .OUTPUT(output_backprops, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .ATTR(beta, Float, 1.0) + .ATTR(threshold, Float, 20.0) + .OP_END_FACTORY_REG(SoftplusV2Grad) + +/** + * @brief ThresholdedRelu takes one input data (Tensor) and produces one output data (Tensor) + * where the rectified linear function, y = x for x > alpha, y = 0 otherwise, is applied to the tensor elementwise. + * + * @par inputs + * one input including: + * @li x: input A Tensor. Must be one of the following types: float32, float16 + * + * @par output + * one output including: + * @li y:A Tensor of the same type as x + * + */ +REG_OP(ThresholdedRelu) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(alpha, Float, 1.0) + .OP_END_FACTORY_REG(ThresholdedRelu) + +/** +* @brief Calculate the hard shrinkage function. \n + +* @par Inputs: +* One inputs, including: +* @li input_x: A tensor. Must be one of the following types: +* float16, float32. \n + +* @par Attributes: +* @li lambd: An optional float. Defaults to 0.5. \n + +* @par Outputs: +* y: A Tensor with the same dtype and shape of input_x's. \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator Hardshrink. \n +*/ +REG_OP(HardShrink) + .INPUT(input_x, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(output_y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(lambd, Float, 0.5) + .OP_END_FACTORY_REG(HardShrink) + +/** +*@brief Calculate the hard shrink grad function. \n +* +* Computes the gradient for the HardShrink: if x > lambda or x < -lambda, x,otherwise 0 +* +*@par Inputs: +*Two inputs, including: +* @li gradients: A tensor. Must be one of the following types: +* float16, float32. \n +* @li features: A tensor. Must be one of the following types: +* float16, float32. \n +* +*@par Outputs: +*backprops: A Tensor with the same type and shape of features's. \n +* +*@par Attributes: +*@li lambd: An optional float.Defaults to 0.5. \n +* +*@par Third-party framework compatibility +*Compatible with the Pytorch operator Hardshrink_backward. \n +*/ + REG_OP(HardShrinkGrad) + .INPUT(gradients, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(features, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(backprops, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(lambd, Float, 0.5) + .OP_END_FACTORY_REG(HardShrinkGrad) + +/** +* @brief Calculate the hard sigmoid function. \n + +* @par Inputs: +* One inputs, including: +* @li input_x: A tensor. Must be one of the following types: +* float16, float32, int32. \n + +* @par Attributes: +* @li alpha: An optional float. Defaults to 0.16666666. \n +* @li beta: An optional float. Defaults to 0.5. \n + +* @par Outputs: +* y: A Tensor with the same dtype and shape of input_x's. \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator Hardsigmoid. \n +*/ +REG_OP(HardSigmoid) + .INPUT(input_x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32})) + .OUTPUT(output_y, TensorType({DT_FLOAT, DT_FLOAT16})) + .ATTR(alpha, Float, 0.16666666) + .ATTR(beta, Float, 0.5) + .OP_END_FACTORY_REG(HardSigmoid) + +/** +* @brief Calculate the soft shrinkage function. \n + +* @par Inputs: +* One inputs, including: +* @li input_x: A tensor. Must be one of the following types: +* float16, float32. \n + +* @par Attributes: +* @li lambd: An optional float. Defaults to 0.5. \n + +* @par Outputs: +* y: A Tensor with the same dtype and shape of input_x's. \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator Softshrink. \n +*/ +REG_OP(SoftShrink) + .INPUT(input_x, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(output_y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(lambd, Float, 0.5) + .OP_END_FACTORY_REG(SoftShrink) + +/** +* @brief Calculate the reversed outputs of the function "soft_shrink". \n + +* @par Inputs: +* Two inputs, including: +* @li input_grad: A tensor. Must be one of the following types: +* float16, float32. \n +* @li input_x: A tensor of the same dtype as "input_grad". \n + +* @par Attributes: +* @li lambd: An optional float. Defaults to 0.5. \n + +* @par Outputs: +* y: A Tensor of the same dtype and shape as "input_graxd". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator SoftShrinkGrad. \n +*/ +REG_OP(SoftShrinkGrad) + .INPUT(input_grad, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(input_x, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(output_y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(lambd, Float, 0.5) + .OP_END_FACTORY_REG(SoftShrinkGrad) + +/** +*@brief Calculate the gradient of log simoid. \n + +*@par Inputs: +*Two inputs, including: +* @li grads: A tensor, gradient of previous layer. Must be one of the following types: +* float16, float32. \n +* @li features: A tensor, input of log sigmoid. Must be one of the following types: +* float16, float32. \n + +*@par Outputs: +*One outputs, including: +* @li backprops: A tensor with the same type of and shape of grads. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator LogSigmoidBackward. \n +*/ +REG_OP(LogSigmoidGrad) + .INPUT(grads, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(features, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(backprops, TensorType({DT_FLOAT16, DT_FLOAT})) + .OP_END_FACTORY_REG(LogSigmoidGrad) + +/** +*@brief Calculate -ln(1+e^(-x)). \n + +*@par Inputs: +*One inputs, including: +* @li x: A tensor. Must be one of the following types: +* float16, float32. \n + +*@par Outputs: +*One outputs, including: +* @li y: A tensor with the same type and shape of x's. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator LogSigmoid. \n +*/ +REG_OP(LogSigmoid) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) /* "input:x" */ + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) /* "output:y" */ + .OP_END_FACTORY_REG(LogSigmoid) + +/** +*@brief Calculate the backward outputs of the function "hard_sigmoid" \n + +*@par Inputs: +*One inputs, including: +* @li grads: A tensor. Must be one of the following types: +* float16, float32. \n +* @li input_x: A tensor. Must be one of the following types: +* float16, float32. \n + +*@par Outputs: +*One outputs, including: +* @li y: A tensor with the same type and shape of x's. \n + +* @par Attributes: +* @li alpha: An optional float. Defaults to 0.16666666. \n +* @li beta: An optional float. Defaults to 0.5. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator LogSigmoidGrad. \n +*/ +REG_OP(HardSigmoidGrad) + .INPUT(grads, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(input_x, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) + .ATTR(alpha, Float, 0.16666666) + .ATTR(beta, Float, 0.5) + .OP_END_FACTORY_REG(HardSigmoidGrad) + +/** +* @brief Calculate the shrink function. \n + +* @par Inputs: +* One inputs, including: +* @li input_x: A tensor. Must be one of the following types: +* float16, float32. \n + +* @par Attributes: +* @li lambd: An optional float. Defaults to 0.5. \n +* @li bias: An optional float. Defaults to 0.0. \n + +* @par Outputs: +* y: A Tensor with the same dtype and shape of input_x's. \n + +* @par Third-party framework compatibility +* Compatible with the ONNX operator Shrink. \n +*/ +REG_OP(Shrink) + .INPUT(input_x, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(output_y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(lambd, Float, 0.5) + .ATTR(bias, Float, 0.0) + .OP_END_FACTORY_REG(Shrink) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_NONLINEAR_FUC_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/npu_loss_scale_ops.h b/third_party/fwkacllib/inc/ops/npu_loss_scale_ops.h index 8d7ef9f9..f36d2935 100644 --- a/third_party/fwkacllib/inc/ops/npu_loss_scale_ops.h +++ b/third_party/fwkacllib/inc/ops/npu_loss_scale_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/outfeed_ops.h b/third_party/fwkacllib/inc/ops/outfeed_ops.h index e0b783bc..53b9d701 100644 --- a/third_party/fwkacllib/inc/ops/outfeed_ops.h +++ b/third_party/fwkacllib/inc/ops/outfeed_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/pad_ops.h b/third_party/fwkacllib/inc/ops/pad_ops.h index f746b3b3..6854c866 100644 --- a/third_party/fwkacllib/inc/ops/pad_ops.h +++ b/third_party/fwkacllib/inc/ops/pad_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -101,7 +101,7 @@ REG_OP(FillD) */ REG_OP(BroadcastTo) .INPUT(x, TensorType::BasicType()) - .INPUT(shape, TensorType({DT_INT32})) + .INPUT(shape, TensorType({DT_INT32,DT_INT64})) .OUTPUT(y, TensorType::BasicType()) .OP_END_FACTORY_REG(BroadcastTo) @@ -161,7 +161,7 @@ REG_OP(Pad) *@brief Pads a tensor . \n *@par Inputs: -*x: A Tensor. Must be one of the following types: float16, float32, int8, uint8, int32 . \n +*x: A Tensor. Must be one of the following types: float16, float32, int32 . \n *@par Attributes: *paddings: An optional "vector>". Defaults to "{}". @@ -180,8 +180,8 @@ REG_OP(Pad) * Warning: THIS FUNCTION IS DEPRECATED. Please use Pad instead. */ REG_OP(PadD) - .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_FLOAT})) - .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_FLOAT})) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) .REQUIRED_ATTR(paddings, ListListInt) .OP_END_FACTORY_REG(PadD) @@ -213,7 +213,7 @@ REG_OP(PadV2) *@brief Pads a tensor . \n *@par Inputs: -*x: A Tensor. Must be one of the following types: float16, float32, int8, uint8, int32 . \n +*x: A Tensor. Must be one of the following types: float16, float32, int32 . \n *constant_values: A Tensor. Must have the same type as input. *@par Attributes: @@ -227,10 +227,7 @@ REG_OP(PadV2) *y: A Tensor of the same type as "x" . \n *@par Third-party framework compatibility: -* Compatible with TensorFlow operator Pad. -* -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use Pad instead. +* Compatible with TensorFlow operator PadV2. */ REG_OP(PadV2D) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) @@ -272,42 +269,42 @@ REG_OP(PadV3) .ATTR(paddings_contiguous, Bool, true) .OP_END_FACTORY_REG(PadV3) -/** -*@brief Pads a tensor. - -*@par Inputs: -*x: A Tensor. Must be one of the following types: float16, float32, int8, uint8, int32. - -*@par Attributes: -* @li paddings: An required "vector>". -* For each dimension D of input, paddings[D, 0] indicates how many -* values to add before the contents of tensor in that dimension, -* and paddings[D, 1] indicates how many values to add after the -* contents of tensor in that dimension. -* @li constant_values: An optional int value for pad. -* @li mode: An optional string, Defaults to "constant", indicates paddings mode, -* support "constant", "reflect", "edge" -* @li paddings_contiguous: An optional bool value, Defaults to true. -* If true, paddings is arranged as [[begin0, end0], [begin1, end1], ...] -* If false, paddings is arranged as [[begin0, begin1], ..., [end0, end1], ...] - -*@par Outputs: -*y: A Tensor of the same type as "x". - -*@par Third-party framework compatibility: -* Compatible with ONNX operator Pad. - -* @par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use PadV3 instead. -*/ -REG_OP(PadV3D) - .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8})) - .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8})) - .REQUIRED_ATTR(paddings, ListListInt) - .ATTR(constant_values, Int, 0) - .ATTR(mode, String, "constant") - .ATTR(paddings_contiguous, Bool, true) - .OP_END_FACTORY_REG(PadV3D) + /** + *@brief Pads a tensor. + + *@par Inputs: + *x: A Tensor. Must be one of the following types: float16, float32, int8, uint8, int32. + + *@par Attributes: + * @li paddings: An required "vector>". + * For each dimension D of input, paddings[D, 0] indicates how many + * values to add before the contents of tensor in that dimension, + * and paddings[D, 1] indicates how many values to add after the + * contents of tensor in that dimension. + * @li constant_values: An optional int value for pad. + * @li mode: An optional string, Defaults to "constant", indicates paddings mode, + * support "constant", "reflect", "edge" + * @li paddings_contiguous: An optional bool value, Defaults to true. + * If true, paddings is arranged as [[begin0, end0], [begin1, end1], ...] + * If false, paddings is arranged as [[begin0, begin1], ..., [end0, end1], ...] + + *@par Outputs: + *y: A Tensor of the same type as "x". + + *@par Third-party framework compatibility: + * Compatible with ONNX operator Pad. + + * @par Restrictions: + * Warning: THIS FUNCTION IS DEPRECATED. Please use PadV3 instead. + */ + REG_OP(PadV3D) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8})) + .REQUIRED_ATTR(paddings, ListListInt) + .ATTR(constant_values, Int, 0) + .ATTR(mode, String, "constant") + .ATTR(paddings_contiguous, Bool, true) + .OP_END_FACTORY_REG(PadV3D) /** *@brief Create a diagonal tensor @@ -403,5 +400,76 @@ REG_OP(EmbeddingRankId) .ATTR(mode, String, "mod") .OP_END_FACTORY_REG(EmbeddingRankId) +/** +*@brief EmbeddingLocalIndex, Sort statistics index according to rank_id \n + +*@par Inputs: +* @li addr_table: A 2D tensor which last dimension must be 3. +* @li index: A tensor with data type int32, int64, uint32, uint64. + +*@par Attributes: +* @li row_memory: The size of Embedding vector in a row, the default is 320. +* @li mode: String type, currently there are two options: 'mod' and 'order' + +*@par Outputs: +* @li local_idx:Index on each server. +* @li nums:The number of local_idx found on each server. +* @li recover_idx:The sorted local_idx element is at the position corresponding +* to the original input index. + +*@par Third-party framework compatibility +* Compatible with the TensorFlow operator Diag. +*/ +REG_OP(EmbeddingLocalIndex) + .INPUT(addr_table, TensorType({DT_UINT64})) + .INPUT(index, TensorType({DT_INT64,DT_INT32,DT_UINT32,DT_UINT64})) + .OUTPUT(local_idx, TensorType({DT_INT64,DT_INT32,DT_UINT32,DT_UINT64})) + .OUTPUT(nums, TensorType({DT_INT64,DT_INT32,DT_UINT32,DT_UINT64})) + .OUTPUT(recover_idx, TensorType({DT_INT64,DT_INT32,DT_UINT32,DT_UINT64})) + .ATTR(row_memory, Int, 320) + .ATTR(mode, String, "mod") + .OP_END_FACTORY_REG(EmbeddingLocalIndex) + +/** +* @brief Fill the value to a tensor has the specified shape. + +* @par Inputs: +* One inputs, including: +* @li dims: An Tensor, specify the shape that the value to fill. + +* @par Attributes: +* @li value: An optional float value. Defaults to 0.0. + +* @par Outputs: +* @li y: A Tensor. Has the shape specify by attr shape, and full of the value specify by attr value. + +* @par Third-party framework compatibility +* Compatible with the ONNX operator ConstantOfShape. +*/ +REG_OP(FillV2) + .INPUT(dims, TensorType({DT_INT16, DT_INT32, DT_INT64})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_INT16, DT_INT32, DT_INT64})) + .ATTR(value, Float, 0) + .OP_END_FACTORY_REG(FillV2) + +/** +* @brief Fill the value to a tensor has the specified shape. + +* @par Attributes: +* @li value: An optional float value. Defaults to 0.0. + +* @li dims: An required listInt to specify the shape that the value to fill. + +* @par Outputs: +* @li y: A Tensor. Has the shape specify by attr shape, and full of the value specify by attr value. + +* @par Third-party framework compatibility +* Compatible with the ONNX operator ConstantOfShape. +*/ +REG_OP(FillV2D) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_UINT8, DT_INT16, DT_INT32, DT_INT64})) + .ATTR(value, Float, 0) + .REQUIRED_ATTR(dims, ListInt) + .OP_END_FACTORY_REG(FillV2D) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_PAD_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/parsing_ops.h b/third_party/fwkacllib/inc/ops/parsing_ops.h index 5c7adfd8..b625180a 100644 --- a/third_party/fwkacllib/inc/ops/parsing_ops.h +++ b/third_party/fwkacllib/inc/ops/parsing_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -51,6 +51,246 @@ REG_OP(StringToNumber) .ATTR(out_type, Type, DT_FLOAT) .OP_END_FACTORY_REG(StringToNumber) +/** +*@brief Convert serialized tensorflow.TensorProto prototype to Tensor. +*@brief Parse an Example prototype. +*@par Input: +*serialized: A Tensor of type string. +*dense_defaults: DYNAMIC INPUT Tensor type as string, float, int64. \n + +*@par Attributes: +*num_sparse: type int num of inputs sparse_indices , sparse_values, sparse_shapes +*out_type: output type +*sparse_keys: ListString +*sparse_types: types of sparse_values +*dense_keys: ListString +*dense_shapes: output of dense_defaults shape +*dense_types: output of dense_defaults type \n + +*@par Outputs: +*sparse_indices: A Tensor of type string. +*sparse_values: Has the same type as sparse_types. +*sparse_shapes: A Tensor of type int64 +*dense_values: Has the same type as dense_defaults. + +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +**/ +REG_OP(ParseSingleExample) + .INPUT(serialized, TensorType({DT_STRING})) + .DYNAMIC_INPUT(dense_defaults, TensorType({DT_STRING,DT_FLOAT,DT_INT64})) + .DYNAMIC_OUTPUT(sparse_indices, TensorType({DT_INT64})) + .DYNAMIC_OUTPUT(sparse_values, TensorType({DT_STRING,DT_FLOAT,DT_INT64})) + .DYNAMIC_OUTPUT(sparse_shapes, TensorType({DT_INT64})) + .DYNAMIC_OUTPUT(dense_values, TensorType({DT_STRING,DT_FLOAT,DT_INT64})) + .ATTR(num_sparse, Int, 0) + .ATTR(sparse_keys, ListString, {}) + .ATTR(dense_keys, ListString, {}) + .ATTR(sparse_types, ListType, {}) + .ATTR(Tdense, ListType, {}) + .ATTR(dense_shapes, ListListInt, {}) + .OP_END_FACTORY_REG(ParseSingleExample) + +/** +*@brief Decodes raw file into tensor . \n +*@par Input: +*bytes: A Tensor of type string. + +*@par Attributes: +*little_endian: bool ture +*out_type: output type + +*@par Outputs: +*Output: A Tensor +**/ +REG_OP(DecodeRaw) + .INPUT(bytes, TensorType({DT_STRING})) + .OUTPUT(output, TensorType({DT_BOOL,DT_FLOAT16,DT_DOUBLE,DT_FLOAT, + DT_INT64,DT_INT32,DT_INT8,DT_UINT8,DT_INT16, + DT_UINT16,DT_COMPLEX64,DT_COMPLEX128})) + .ATTR(out_type, Type, DT_FLOAT) + .ATTR(little_endian, Bool, true) + .OP_END_FACTORY_REG(DecodeRaw) + +/** +*@brief Convert serialized tensorflow.TensorProto prototype to Tensor. \n + +*@par Inputs: +*serialized: A Tensor of string type. Scalar string containing serialized +*TensorProto prototype. \n + +*@par Attributes: +*out_type: The type of the serialized tensor. The provided type must match the +*type of the serialized tensor and no implicit conversion will take place. \n + +*@par Outputs: +*output: A Tensor of type out_type. \n + +*@attention Constraints: +*The implementation for StringToNumber on Ascend uses AICPU, +*with badperformance. \n + +*@par Third-party framework compatibility +*@li compatible with tensorflow ParseTensor operator. +*/ +REG_OP(ParseTensor) + .INPUT(serialized, TensorType({DT_STRING})) + .OUTPUT(output, TensorType(DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, + DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_UINT32, + DT_UINT64, DT_BOOL, DT_DOUBLE, DT_STRING, + DT_COMPLEX64, DT_COMPLEX128})) + .ATTR(out_type, Type, DT_FLOAT) + .OP_END_FACTORY_REG(ParseTensor) + +/** +*@brief Converts each string in the input Tensor to the specified numeric +*type . \n + +*@par Inputs: +*Inputs include: +*records: Each string is a record/row in the csv and all records should have the +*same format. \n +*record_defaults: One tensor per column of the input record, with either a +*scalar default value for that column or an empty vector if the column is +*required. \n + +*@par Attributes: +*OUT_TYPE: The numeric type to interpret each string in string_tensor as . \n +*field_delim: char delimiter to separate fields in a record. \n +*use_quote_delim: If false, treats double quotation marks as regular characters +*inside of the string fields (ignoring RFC 4180, Section 2, Bullet 5). \n +*na_value: Additional string to recognize as NA/NaN. \n + +*@par Outputs: +*output: A Tensor. Has the same type as x . \n + +*@attention Constraints: +*The implementation for StringToNumber on Ascend uses AICPU, with bad +*performance. \n + +*@par Third-party framework compatibility +*@li compatible with tensorflow StringToNumber operator. +*/ +REG_OP(DecodeCSV) + .INPUT(records, TensorType({DT_STRING})) + .DYNAMIC_INPUT(record_defaults, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, + DT_INT64, DT_STRING})) + .DYNAMIC_OUTPUT(output, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, + DT_INT64, DT_STRING})) + .ATTR(OUT_TYPE, ListType, {}) + .ATTR(field_delim, String, ",") + .ATTR(use_quote_delim, Bool, true) + .ATTR(na_value, String, ",") + .ATTR(select_cols, ListInt, {}) + .OP_END_FACTORY_REG(DecodeCSV) + +/** +*@brief Convert serialized tensorflow.TensorProto prototype to Tensor. +*@brief Parse an Example prototype. +*@par Input: +*serialized: A Tensor of type string. \n +*name:A Tensor of type string. \n +*sparse_keys: Dynamic input tensor of string. \n +*dense_keys: Dynamic input tensor of string \n +*dense_defaults: Dynamic input tensor type as string, float, int64. \n + +*@par Attributes: +*Nsparse: Number of sparse_keys, sparse_indices and sparse_shapes \n +*Ndense: Number of dense_keys \n +*sparse_types: types of sparse_values \n +*Tdense: Type of dense_defaults dense_defaults and dense_values \n +*dense_shapes: output of dense_defaults shape \n + +*@par Outputs: +*sparse_indices: A Tensor of type string. \n +*sparse_values: Has the same type as sparse_types. \n +*sparse_shapes: A Tensor of type int64 \n +*dense_values: Has the same type as dense_defaults. \n +*@par Third-party framework compatibility \n +*@li compatible with tensorflow StringToNumber operator. \n +*/ +REG_OP(ParseExample) + .INPUT(serialized, TensorType({DT_STRING})) + .INPUT(name, TensorType({DT_STRING})) + .DYNAMIC_INPUT(sparse_keys, TensorType({DT_STRING})) + .DYNAMIC_INPUT(dense_keys, TensorType({DT_STRING})) + .DYNAMIC_INPUT(dense_defaults, TensorType({DT_FLOAT, DT_INT64, DT_STRING})) + .DYNAMIC_OUTPUT(sparse_indices, TensorType({DT_INT64})) + .DYNAMIC_OUTPUT(sparse_values, TensorType({DT_FLOAT, DT_INT64, DT_STRING})) + .DYNAMIC_OUTPUT(sparse_shapes, TensorType({DT_INT64})) + .DYNAMIC_OUTPUT(dense_values, TensorType({DT_FLOAT, DT_INT64, DT_STRING})) + .ATTR(Nsparse, Int, 0) + .ATTR(Ndense, Int, 0) + .ATTR(sparse_types, ListType, {}) + .ATTR(Tdense, ListType, {}) + .ATTR(dense_shapes, ListListInt, {}) + .OP_END_FACTORY_REG(ParseExample) + +/** +*@brief Transforms a scalar brain.SequenceExample proto (as strings) into typed +*tensors. +*@par Input: +*serialized: A Tensor of type string. \n +*feature_list_dense_missing_assumed_empty:A Tensor of type string. \n +*context_sparse_keys: Dynamic input tensor of string. \n +*context_dense_keys: Dynamic input tensor of string \n +*feature_list_sparse_keys: Dynamic input tensor of string \n +*feature_list_dense_keys: Dynamic input tensor of string \n +*context_dense_defaults: Dynamic input tensor of string, float, int64 \n +*debug_name: A Tensor of type string. \n + +*@par Attributes: +*Ncontext_sparse: Number of context_sparse_keys, context_sparse_indices and context_sparse_shapes \n +*Ncontext_dense: Number of context_dense_keys \n +*Nfeature_list_sparse: Number of feature_list_sparse_keys \n +*Nfeature_list_dense: Number of feature_list_dense_keys \n +*context_sparse_types: Types of context_sparse_values \n +*Tcontext_dense: Number of dense_keys \n +*feature_list_dense_types: Types of feature_list_dense_values \n +*context_dense_shapes: Shape of context_dense \n +*feature_list_sparse_types: Type of feature_list_sparse_values \n +*feature_list_dense_shapes: Shape of feature_list_dense \n + +*@par Outputs: +*context_sparse_indices: Dynamic output tensor of type int64. \n +*context_sparse_values: Dynamic output tensor of type string, float, int64. \n +*context_sparse_shapes: Dynamic output tensor of type int64 \n +*context_dense_values: Dynamic output tensor of type string, float, int64. \n +*feature_list_sparse_indices: Dynamic output tensor of type int64. \n +*feature_list_sparse_values: Dynamic output tensor of type string, float, int64. \n +*feature_list_sparse_shapes: Dynamic output tensor of type int64 \n +*feature_list_dense_values: Dynamic output tensor of type string, float, int64. \n +*@par Third-party framework compatibility \n +*@li compatible with tensorflow StringToNumber operator. \n +*/ +REG_OP(ParseSingleSequenceExample) + .INPUT(serialized, TensorType({DT_STRING})) + .INPUT(feature_list_dense_missing_assumed_empty, TensorType({DT_STRING})) + .DYNAMIC_INPUT(context_sparse_keys, TensorType({DT_STRING})) + .DYNAMIC_INPUT(context_dense_keys, TensorType({DT_STRING})) + .DYNAMIC_INPUT(feature_list_sparse_keys, TensorType({DT_STRING})) + .DYNAMIC_INPUT(feature_list_dense_keys, TensorType({DT_STRING})) + .DYNAMIC_INPUT(context_dense_defaults, TensorType({DT_FLOAT, DT_INT64, DT_STRING})) + .INPUT(debug_name, TensorType({DT_STRING})) + .DYNAMIC_OUTPUT(context_sparse_indices, TensorType({DT_INT64})) + .DYNAMIC_OUTPUT(context_sparse_values, TensorType({DT_FLOAT, DT_INT64, DT_STRING})) + .DYNAMIC_OUTPUT(context_sparse_shapes, TensorType({DT_INT64})) + .DYNAMIC_OUTPUT(context_dense_values, TensorType({DT_FLOAT, DT_INT64, DT_STRING})) + .DYNAMIC_OUTPUT(feature_list_sparse_indices, TensorType({DT_INT64})) + .DYNAMIC_OUTPUT(feature_list_sparse_values, TensorType({DT_FLOAT, DT_INT64, DT_STRING})) + .DYNAMIC_OUTPUT(feature_list_sparse_shapes, TensorType({DT_INT64})) + .DYNAMIC_OUTPUT(feature_list_dense_values, TensorType({DT_FLOAT, DT_INT64, DT_STRING})) + .ATTR(Ncontext_sparse, Int, 0) + .ATTR(Ncontext_dense, Int, 0) + .ATTR(Nfeature_list_sparse, Int, 0) + .ATTR(Nfeature_list_dense, Int, 0) + .ATTR(context_sparse_types, ListType, {}) + .ATTR(Tcontext_dense, ListType, {}) + .ATTR(feature_list_dense_types, ListType, {}) + .ATTR(context_dense_shapes, ListListInt, {}) + .ATTR(feature_list_sparse_types, ListType, {}) + .ATTR(feature_list_dense_shapes, ListListInt, {}) + .OP_END_FACTORY_REG(ParseSingleSequenceExample) + } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_PARSING_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/quantize_ops.h b/third_party/fwkacllib/inc/ops/quantize_ops.h index b53cfeb6..69d5e67e 100644 --- a/third_party/fwkacllib/inc/ops/quantize_ops.h +++ b/third_party/fwkacllib/inc/ops/quantize_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -60,6 +60,26 @@ REG_OP(Dequantize) .ATTR(mode, String, "MIN_COMBINED") .OP_END_FACTORY_REG(Dequantize) +/** +*@brief Quantizes the input . \n +*@par Inputs: +*x: shape and dtype of input_x. \n +*scales: shape and dtype of input_scales. \n +*zero_points: shape and dtype of input_zero_points \n +*@par Attributes: +*@li axis: the processed dim. \n +*@par Outputs: +*y: shape and dtype of output_y, should be same shape as input, dtype is same as the quantified type . \n +*/ +REG_OP(Quantize) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(scales, TensorType({DT_FLOAT})) + .INPUT(zero_points, TensorType({DT_INT8,DT_UINT8,DT_INT32})) + .OUTPUT(y, TensorType({DT_INT8,DT_UINT8,DT_INT32})) + .REQUIRED_ATTR(dtype, String) + .ATTR(axis, Int, 1) + .OP_END_FACTORY_REG(Quantize) + /** *@brief Quantizes the input . \n @@ -194,7 +214,7 @@ REG_OP(AscendRequant) *@brief Requantizes the input of int16 . \n *@par Inputs: -*@li x: An NC1HWC0 tensor of type int16, specifying the input. +*@li x0: An NC1HWC0 tensor of type int16, specifying the input. *@li req_scale: An NC1HWC0 tensor of type uint64, specifying the scaling ratio. *@li x1: An NC1HWC0 tensor of type int16 . \n @@ -203,22 +223,21 @@ REG_OP(AscendRequant) *@li relu_flag: A optional bool, specifying whether to perform ReLU, either "True" or "False". Defaults to "False" . \n *@par Outputs: -*@li y: The dequantized output tensor of type int8 and with format NC1HWC0. +*@li y0: The dequantized output tensor of type int8 and with format NC1HWC0. *@li y1: The dequantized output tensor of type int16 and with format NC1HWC0 . \n *@par Third-party framework compatibility * It is a custom operator. It has no corresponding operator in Caffe. */ REG_OP(AscendRequantS16) - .INPUT(x, TensorType({DT_INT16})) + .INPUT(x0, TensorType({DT_INT16})) .INPUT(req_scale, TensorType({DT_UINT64})) .OPTIONAL_INPUT(x1, TensorType({DT_INT16})) - .OUTPUT(y, TensorType({DT_INT8})) + .OUTPUT(y0, TensorType({DT_INT8})) .OUTPUT(y1, TensorType({DT_INT16})) .ATTR(dual_output, Bool, false) .ATTR(relu_flag, Bool, false) .OP_END_FACTORY_REG(AscendRequantS16) - } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_QUANTIZE_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/ragged_array_ops.h b/third_party/fwkacllib/inc/ops/ragged_array_ops.h index 9b31aa8e..20484623 100644 --- a/third_party/fwkacllib/inc/ops/ragged_array_ops.h +++ b/third_party/fwkacllib/inc/ops/ragged_array_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/ragged_conversion_ops.h b/third_party/fwkacllib/inc/ops/ragged_conversion_ops.h index 13488a25..020e3da4 100644 --- a/third_party/fwkacllib/inc/ops/ragged_conversion_ops.h +++ b/third_party/fwkacllib/inc/ops/ragged_conversion_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/ragged_math_ops.h b/third_party/fwkacllib/inc/ops/ragged_math_ops.h index 8af4f867..258b0ca1 100644 --- a/third_party/fwkacllib/inc/ops/ragged_math_ops.h +++ b/third_party/fwkacllib/inc/ops/ragged_math_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/random_ops.h b/third_party/fwkacllib/inc/ops/random_ops.h index b46da435..b65a68f1 100644 --- a/third_party/fwkacllib/inc/ops/random_ops.h +++ b/third_party/fwkacllib/inc/ops/random_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -356,6 +356,39 @@ REG_OP(DropOutGenMask) .ATTR(seed2, Int, 0) .OP_END_FACTORY_REG(DropOutGenMask) + +/** +*@brief Generate random uint8 mask for dropout v3 . \n + +*@par Inputs: +include: +*@li shape:The shape of the output tensor. +*@li prob:0-D. Prob of 1 . \n + +*@par Attributes: +*@li seed:If either seed or seed2 are set to be non-zero, the random number +*generator is seeded by the given seed. Otherwise, it is seeded by a random seed. +*@li seed2:A second seed to avoid seed collision . \n + +*@par Outputs: +*y:Output (1-D) random number using uint8 data format . \n + +*@attention Constraints: +*The output is aligned with 16 + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. + +*@see DropOutGenMaskV3() +*/ +REG_OP(DropOutGenMaskV3) + .INPUT(shape, TensorType({ DT_INT32, DT_INT64 })) + .INPUT(prob, TensorType({ DT_FLOAT16, DT_FLOAT })) + .OUTPUT(y, TensorType({ DT_UINT8 })) + .ATTR(seed, Int, 0) + .ATTR(seed2, Int, 0) + .OP_END_FACTORY_REG(DropOutGenMaskV3) + /** *@brief Generates values in an interval . \n @@ -495,6 +528,62 @@ REG_OP(ShuffleChannel) DT_UINT16, DT_INT32, DT_UINT32,DT_INT64,DT_UINT64})) .ATTR(group, Int, 1) .OP_END_FACTORY_REG(ShuffleChannel) + +/** + * @briefGenerate a tensor of samples from a multinomial + * distribution according to the probabilities of each of + * the possible outcomes. + * + * @par inputs + * one input including: + * @li x:Input tensor with shape [batch_size, class_size], + * where class_size is the number of all possible outcomes. + * Each value along the axis zero represents the unnormalized + * log-probability of each corresponding outcome in a batch. + * + * @par output + * one output including: + * @li y:Output tensor with shape [batch_size, sample_size], + * where sample_size is the number of times to sample. + * Each value along the axis zero represents the outcome of + * the corresponding sample in a batch. + * + * @par Restrictions: + * Warning:THIS FUNCTION IS EXPERIMENTAL. Please do not use. + */ +REG_OP(MultinomialFuss) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_FLOAT64})) + .OUTPUT(y, TensorType({DT_INT32, DT_INT64})) + .ATTR(dtype, Int, 6) + .ATTR(sample_size, Int, 1) + .ATTR(seed, Float, 0) + .OP_END_FACTORY_REG(MultinomialFuss) + +/** +* @brief During training, randomly zeroes some of the elements of the input tensor +* with probability +* +* @par Inputs: +* @li x: A ND Tensor. Must be one of the following data types: Float, Float16 +* @li seed: A ND Tensor. Must be one of the following data types: Float +* +* @par Attributes: +* @li p: probability of an element to be zeroed +* +* @par Outputs: +* @li y: A tensor with the same shape and type as "x". +* @li mask: A tensor with the same shape and type as "x". +* @li new_seed: A tensor with the same shape and type as "seed". +*/ + +REG_OP(DropoutV2) + .INPUT(x, TensorType({ DT_FLOAT16, DT_FLOAT })) + .INPUT(seed, TensorType({ DT_FLOAT })) + .OUTPUT(y, TensorType({ DT_FLOAT16, DT_FLOAT })) + .OUTPUT(mask, TensorType({ DT_FLOAT })) + .OUTPUT(seed, TensorType({ DT_FLOAT })) + .REQUIRED_ATTR(p, Float) + .OP_END_FACTORY_REG(DropoutV2) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_RANDOM_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/reduce_ops.h b/third_party/fwkacllib/inc/ops/reduce_ops.h index 6f44093e..97c7b8e1 100644 --- a/third_party/fwkacllib/inc/ops/reduce_ops.h +++ b/third_party/fwkacllib/inc/ops/reduce_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ namespace ge { *@attention Constraints: * This operator is a BatchNorm fusion operator for updating the moving * averages for training. -* This operator is used in conjunction with BNTrainingUpdate. +* This operator is used in conjunction with BNTrainingReduce. */ REG_OP(BNTrainingReduce) .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) @@ -45,6 +45,27 @@ REG_OP(BNTrainingReduce) .OUTPUT(square_sum, TensorType({DT_FLOAT})) .OP_END_FACTORY_REG(BNTrainingReduce) +/** +*@brief Performs reduced batch normalization . \n + +*@par Inputs: +*x: A 6D Tensor of type float16 or float32, with format NDC1HWC0 . \n + +*@par Outputs: +*@li sum: A 3D Tensor of type float32 for SUM reduced "x". +*@li square_sum: A 3D Tensor of type float32 for SUMSQ reduced "x" . \n + +*@attention Constraints: +* This operator is a BatchNorm fusion operator for updating the moving +* averages for training. +* This operator is used in conjunction with BN3DTrainingReduce. +*/ +REG_OP(BN3DTrainingReduce) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(sum, TensorType({DT_FLOAT})) + .OUTPUT(square_sum, TensorType({DT_FLOAT})) + .OP_END_FACTORY_REG(BN3DTrainingReduce) + /** *@brief Performs the backpropagation of BatchNorm . \n @@ -88,6 +109,49 @@ REG_OP(BNTrainingReduceGrad) .ATTR(epsilon, Float, 0.0001) .OP_END_FACTORY_REG(BNTrainingReduceGrad) +/** +*@brief Performs the backpropagation of BatchNorm . \n + +*@par Inputs: +* Seven inputs, including: +*@li grads: A 6D Tensor of type float16 or float32, with format NDC1HWC0, for +* the gradient. +*@li x: A 6D Tensor of type float16 or float32, with format NDC1HWC0. +*@li diff_scale: A 6D Tensor of type float32, with format NDC1HWC0, +* for the mean of "x". +*@li diff_offset: A 6D Tensor of type float32, with format NDC1HWC0, +* for the variance of "x". +*@li scale: A 6D Tensor of type float32, with format NDC1HWC0. +*@li batch_mean: A 6D Tensor of type float32, with format NDC1HWC0, +* for the mean of "x". +*@li batch_variance: A 6D Tensor of type float32, with format NDC1HWC0, +* for the variance of "x" . \n + +*@par Attributes: +*epsilon: An optional float32. Defaults to "0.0001". A small float number +* added to the variance of "x" . \n + +*@par Outputs: +*y: A Tensor of type float16 or float32, with format NDC1HWC0, for the offset +* of "x" . \n + +*@attention Constraints: +* The preceding layer of this operator must be BN3DTrainingReduceGrad . \n + +*@see BN3DTrainingReduceGrad +*/ +REG_OP(BN3DTrainingReduceGrad) + .INPUT(grads, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(diff_scale, TensorType({DT_FLOAT})) + .INPUT(diff_offset, TensorType({DT_FLOAT})) + .INPUT(scale, TensorType({DT_FLOAT})) + .INPUT(batch_mean, TensorType({DT_FLOAT})) + .INPUT(batch_variance, TensorType({DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT})) + .ATTR(epsilon, Float, 0.0001) + .OP_END_FACTORY_REG(BN3DTrainingReduceGrad) + /** *@brief Performs reduced batch normalization . \n @@ -120,7 +184,7 @@ REG_OP(BNTrainingReduceGrad) *@attention Constraints: *@li This operator is a BatchNorm fusion operator for updating the moving averages for training. -*This operator is used in conjunction with BNTrainingReduce. +*This operator is used in conjunction with BNTrainingUpdate. *@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square * root instruction. */ @@ -141,6 +205,59 @@ REG_OP(BNTrainingUpdate) .OUTPUT(batch_variance, TensorType({DT_FLOAT})) .OP_END_FACTORY_REG(BNTrainingUpdate) +/** +*@brief Performs reduced batch normalization . \n + +*@par Inputs: +* Seven inputs, including: (NDC1HWC0 supported) +*@li x: A 6D Tensor of type float16 or float32. +*@li sum: A 6D Tensor of type float32 for the output of operator +* BN3DTrainingUpdate. +*@li square_sum: A 6D Tensor of type float32 for the output of operator +* BN3DTrainingUpdate. +*@li scale: A 6D Tensor of type float32, for the scaling factor. +*@li offset: A 6D Tensor of type float32, for the scaling offset. +*@li mean: A 6D Tensor of type float32, for the updated mean. +*@li variance: A 6D Tensor of type float32, for the updated variance . \n + +*@par Attributes: +*@li epsilon: A required float32, specifying the small value added to variance +* to avoid dividing by zero. +*@li factor: A required float32, specifying the weight for updating the mean +* and variance . \n + +*@par Outputs: +* Five outputs, including: (NDC1HWC0 supported) +*@li y: A 6D Tensor of type float16 or float32, for normalized "x". +*@li mean: A 6D Tensor of type float32, for the updated mean. +*@li variance: A 6D Tensor of type float32, for the updated variance. +*@li batch_mean: A 6D Tensor of type float32, for the mean of "x". +*@li batch_variance: A 6D Tensor of type float32, for the variance of "x" . \n + +*@attention Constraints: +*@li This operator is a BatchNorm fusion operator for updating the moving +averages for training. +*This operator is used in conjunction with BN3DTrainingUpdate. +*@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square +* root instruction. +*/ +REG_OP(BN3DTrainingUpdate) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(sum, TensorType({DT_FLOAT})) + .INPUT(square_sum, TensorType({DT_FLOAT})) + .INPUT(scale, TensorType({DT_FLOAT})) + .INPUT(offset, TensorType({DT_FLOAT})) + .INPUT(mean, TensorType({DT_FLOAT})) + .INPUT(variance, TensorType({DT_FLOAT})) + .REQUIRED_ATTR(factor, Float) + .REQUIRED_ATTR(epsilon, Float) + .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT})) + .OUTPUT(mean, TensorType({DT_FLOAT})) + .OUTPUT(variance, TensorType({DT_FLOAT})) + .OUTPUT(batch_mean, TensorType({DT_FLOAT})) + .OUTPUT(batch_variance, TensorType({DT_FLOAT})) + .OP_END_FACTORY_REG(BN3DTrainingUpdate) + /** *@brief Performs batch normalization for inference . \n @@ -284,6 +401,40 @@ REG_OP(BNTrainingUpdateGrad) .OUTPUT(diff_offset, TensorType({DT_FLOAT})) .OP_END_FACTORY_REG(BNTrainingUpdateGrad) +/** +*@brief Performs the backpropagation of BatchNorm . \n + +*@par Inputs: +* Four inputs, including: +*@li grads: A 6D Tensor of type float16 or float32, with format NDC1HWC0, +* for the gradient. +*@li x: A 6D Tensor of type float16 or float32, with format NDC1HWC0. +*@li batch_mean: A 6D Tensor of type float32, with format NDC1HWC0, +* for the mean of "x". +*@li batch_variance: A 6D Tensor of type float32, with format NDC1HWC0, +* for the variance of "x" . \n + +*@par Attributes: +*epsilon: An optional float32. Defaults to "0.0001". A small float number +* added to the variance of "x" . \n + +*@par Outputs: +*@li diff_scale: A Tensor of type float32, with format NDC1HWC0, +* for the offset of "scale". +*@li diff_offset: A Tensor of type float32, with format NDC1HWC0, +* for the offset of "offset" . \n + +*/ +REG_OP(BN3DTrainingUpdateGrad) + .INPUT(grads, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(batch_mean, TensorType({DT_FLOAT})) + .INPUT(batch_variance, TensorType({DT_FLOAT})) + .ATTR(epsilon, Float, 0.0001) + .OUTPUT(diff_scale, TensorType({DT_FLOAT})) + .OUTPUT(diff_offset, TensorType({DT_FLOAT})) + .OP_END_FACTORY_REG(BN3DTrainingUpdateGrad) + /** *@brief Performs the backpropagation of BatchNorm for inference . \n @@ -635,8 +786,8 @@ REG_OP(ReduceMin) * Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceMin instead. */ REG_OP(ReduceMinD) - .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8})) - .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8})) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8,DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8,DT_INT32})) .REQUIRED_ATTR(axes, ListInt) .ATTR(keep_dims, Bool, false) .OP_END_FACTORY_REG(ReduceMinD) @@ -747,14 +898,14 @@ REG_OP(Reduction) *@brief Computes the euclidean norm of elements across dimensions of a tensor . \n *@par Inputs: -*@li input_tensor: A Tensor. Must be one of the following types: float16, float32, int32. +*@li x: A Tensor. Must be one of the following types: float16, float32, int32. *@li axes: A Tensor of type int8 or int32. Specifies the dimensions to reduce. Defaults to "None" . \n *@par Attributes: *keep_dims: An optional bool. If "True", reduced dimensions will be retained. Defaults to "False" . \n *@par Outputs: -*output_tensor: A Tensor. Must be one of the following types: float16, float32, int32 . \n +*y: A Tensor. Must be one of the following types: float16, float32, int32 . \n *@attention Constraints: * If "axes = None", all dimensions will be reduced. "axes" must be in the range [-rank(input_shape), rank(input_shape)) . \n @@ -821,7 +972,7 @@ Defaults to "0.00001" . \n *batch_ variance: A Tensor of type float32 for the result variance . \n *@attention Constraints: -*For Ascend 310, the result accuracy fails to reach 1 due to the square root instruction. +*For Ascend 310, the result accuracy fails to reach 0.001 due to the square root instruction. */ REG_OP(INInferV2) .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) @@ -839,7 +990,7 @@ REG_OP(INInferV2) *@brief Performs reduced instance normalization . \n *@par Inputs: -*x: A Tensor of type float16 or float32, with format NC1HWC0 . \n +*x: A Tensor of type float16 or float32. \n *@par Outputs: *@li sum: A Tensor of type float32 for SUM reduced "x". @@ -862,19 +1013,19 @@ REG_OP(INTrainingReduceV2) *@par Inputs: * Seven inputs, including: (NC1HWC0supported) *@li x: A Tensor of type float16 or float32. -*@li sum: A T [N, C1, 1, 1, C0] ensor of type float32 for the output of operator INTrainingReduceV2. -*@li square_sum: A [N, C1, 1, 1, C0] Tensor of type float32 for the output of operator INTrainingReduceV2. -*@li gamma: A [N, C1, 1, 1, C0] Tensor of type float32, for the scaling gamma. -*@li beta: A [N, C1, 1, 1, C0] Tensor of type float32, for the scaling beta. -*@li mean: A [N, C1, 1, 1, C0] Tensor of type float32, for the updated mean. -*@li variance: A [N, C1, 1, 1, C0] Tensor of type float32, for the updated variance . \n +*@li sum: A Tensor of type float32 for the output of operator INTrainingReduceV2. +*@li square_sum: A Tensor of type float32 for the output of operator INTrainingReduceV2. +*@li gamma: A Tensor of type float32, for the scaling gamma. +*@li beta: A Tensor of type float32, for the scaling beta. +*@li mean: A Tensor of type float32, for the updated mean. +*@li variance: A Tensor of type float32, for the updated variance . \n *@par Attributes: *@li momentum: A required float32, specifying the momentum to update mean and var. *@li epsilon: A required float32, specifying the small value added to variance to avoid dividing by zero . \n *@par Outputs: -* Three outputs, including: (NC1HWC0 supported) +* Three outputs *@li y: A Tensor of type float16 or float32, for normalized "x". *@li batch_mean: A Tensor of type float32, for the updated mean. *@li batch_variance: A Tensor of type float32, for the updated variance . \n @@ -882,7 +1033,7 @@ REG_OP(INTrainingReduceV2) *@attention Constraints: *@li This operator is a InstanceNorm fusion operator for updating the moving averages for training. * This operator is used in conjunction with INTrainingReduceV2. -*@li For Ascend 310, the result accuracy fails to reach 1 due to the square root instruction. +*@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square root instruction. */ REG_OP(INTrainingUpdateV2) .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) @@ -965,7 +1116,7 @@ for the updated variance. *@attention Constraints: *@li This operator is a InstanceNorm fusion operator for updating the moving averages for training. * This operator is used in conjunction with GNTrainingUpdate. -*@li For Ascend 310, the result accuracy fails to reach 1 due to the square root instruction. +*@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square root instruction. */ REG_OP(GNTrainingUpdate) .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) @@ -982,6 +1133,98 @@ REG_OP(GNTrainingUpdate) .OUTPUT(batch_variance, TensorType({DT_FLOAT})) .OP_END_FACTORY_REG(GNTrainingUpdate) +/** +*@brief Joins a string Tensor across the given dimensions. \n + +*@par Inputs: +include: +*@li input:A Tensor of type string. The text to be processed. +*@li reduction_indices:A Tensor of type int. The text to be processed. + +*@par Attributes: +*@li keep_dims:A bool, An optional bool. Defaults to False. If True, retain reduced dimensions with length 1.. +*@li separator:string. + +*@par output: +*@li output::A Tensor of type string.. +*/ +REG_OP(ReduceJoin) + .INPUT(input, TensorType({DT_STRING})) + .INPUT(reduction_indices, TensorType({DT_INT32})) + .OUTPUT(output, TensorType({DT_STRING})) + .ATTR(keep_dims, Bool, true) + .ATTR(separator, String, "") + .OP_END_FACTORY_REG(ReduceJoin) + +/** +* @brief Calculates the standard deviation and average value of Tensors. + +* @par Inputs: +* @li x: A Tensor. Must be one of the following types: +* float16, float32. \n + +* @par Attributes: +* Three Attributes, including: +* @li dim: An optional listint, Defaults to "None". \n + +* @li unbiased: An optional bool. Defaults to "True". +* If "True", Use Bessel Correction. +* If "False", Do not use Bessel Correction. \n + +* @li keepdim: An optional bool. Defaults to "False". +* If "True", Keep the original tensor dimension. +* If "False", Do not keep the original tensor dimension. \n + +* @par Outputs: +* Two Outputs, including: +* @li y1: A Tensor. Has the same type as "x". +* @li y2: A Tensor. Has the same type as "x". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator ReduceStd. +*/ +REG_OP(ReduceStd) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(y1, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(y2, TensorType({DT_FLOAT, DT_FLOAT16})) + .ATTR(dim, ListInt, {}) + .ATTR(unbiased, Bool, true) + .ATTR(keepdim, Bool, false) + .OP_END_FACTORY_REG(ReduceStd) + +/** +* @brief Calculates the standard deviation of Tensors. + +* @par Inputs: +* include: +* @li x: A Tensor. Must be one of the following types: float16, float32. \n +* @li mean: A Tensor. It's the mean of X. Must be one of the following types: float16, float32. \n + + +* @par Attributes: +* Three Attributes, including: +* @li dim: An optional listint, Defaults to "None". \n +* @li unbiased: An optional bool. Defaults to "True". +* If "True", Use Bessel Correction. +* If "False", Do not use Bessel Correction. \n +* @li keepdim: An optional bool. Defaults to "False". +* If "True", Keep the original tensor dimension. +* If "False", Do not keep the original tensor dimension. \n + +* @par Outputs: +* @li y: A Tensor. It's the std of X. Has the same type as "x". + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator ReduceStdWithMean. +*/ +REG_OP(ReduceStdWithMean) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(mean, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) + .ATTR(dim, ListInt, {}) + .ATTR(unbiased, Bool, true) + .ATTR(keepdim, Bool, false) + .OP_END_FACTORY_REG(ReduceStdWithMean) } //namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_REDUCE_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/resource_variable_ops.h b/third_party/fwkacllib/inc/ops/resource_variable_ops.h index 1b60d42a..74ac83f8 100644 --- a/third_party/fwkacllib/inc/ops/resource_variable_ops.h +++ b/third_party/fwkacllib/inc/ops/resource_variable_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/rnn.h b/third_party/fwkacllib/inc/ops/rnn.h index 84723872..80546860 100644 --- a/third_party/fwkacllib/inc/ops/rnn.h +++ b/third_party/fwkacllib/inc/ops/rnn.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -33,6 +33,7 @@ namespace ge { *@li c:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li w:A 4D Tensor. Must be one of the following types: float16. The format must be FRACTAL_Z. *@li b:A 1D Tensor. Must be one of the following types: float16. The format must be ND . \n +*@li mask:A 1D Tensor. Must be one of the following types: uint8. *@par Attributes: *@li keep_prob:An integer identifying the keep prob in the op. Default to 1. @@ -42,7 +43,6 @@ namespace ge { *@par Outputs: *seven outputs: -*@li mask:A 1D Tensor. Must be one of the following types: uint8. *@li ct:A 4D Tensor. Must be one of the following types: float16, float32. *@li ht:A 4D Tensor. Must be one of the following types: float16. *@li it:A 4D Tensor. Must be one of the following types: float16, float32. @@ -187,16 +187,16 @@ REG_OP(DynamicRNNGrad) *@brief: DynamicRNN calculation. *@par Inputs: *ten inputs: -*@li x:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. -*@li w:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. -*@li b:A 1D Tensor. Must be one of the following types: float16, float32. The format must be ND. -*@li seq_length:A 1D Tensor. Must be one of the following types: int32. The format must be ND. -*@li init_h:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. -*@li init_c:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. -*@li wci:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. -*@li wcf:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. -*@li wco:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. -*@li mask:A 1D Tensor. Must be one of the following types: uint8. The format must be ND . \n +*@li x:A required 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li w:A required 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li b:A required 1D Tensor. Must be one of the following types: float16, float32. The format must be ND. +*@li seq_length:A optional Tensor. Only Support float16 in FRACTAL_NZ and int32 in ND. +*@li init_h:A optional 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li init_c:A optional 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li wci:A 4D optional Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li wcf:A 4D optional Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li wco:A 4D optional Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li mask:A 1D optional Tensor. Must be one of the following types: uint8. The format must be ND . \n *@par Attributes: *@li cell_type:An string identifying the cell type in the op. Default to "LSTM". Only LSTM is currently supported. @@ -209,6 +209,7 @@ REG_OP(DynamicRNNGrad) *@li time_major:An bool identifying the time major in the op. Default to true. *@li activation:An string identifying the type of activation function in the op. Default to "tanh". Only tanh is currently supported. *@li forget_bias:An float identifying the forget bias in the op. Default to 0. +*@li gate_order:An string identifying the type of gate order in the op. Support "ijfo" and "ifjo". Default to "ijfo". *@li is_training:An bool identifying is training in the op. Default to true . \n *@par Outputs: @@ -221,12 +222,14 @@ REG_OP(DynamicRNNGrad) *@li f:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li o:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li tanhct:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@par Third-party framework compatibility: +* Compatible with the TF operator LSTM. */ REG_OP(DynamicRNN) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) .INPUT(w, TensorType({DT_FLOAT16, DT_FLOAT})) .INPUT(b, TensorType({DT_FLOAT16, DT_FLOAT})) - .OPTIONAL_INPUT(seq_length, TensorType({DT_INT32})) + .OPTIONAL_INPUT(seq_length, TensorType({DT_INT32, DT_FLOAT16})) .OPTIONAL_INPUT(init_h, TensorType({DT_FLOAT16, DT_FLOAT})) .OPTIONAL_INPUT(init_c, TensorType({DT_FLOAT16, DT_FLOAT})) .OPTIONAL_INPUT(wci, TensorType({DT_FLOAT16, DT_FLOAT})) @@ -251,9 +254,237 @@ REG_OP(DynamicRNN) .ATTR(time_major, Bool, true) .ATTR(activation, String, "tanh") .ATTR(forget_bias, Float, 0.0) + .ATTR(gate_order, String, "ijfo") .ATTR(is_training, Bool, true) .OP_END_FACTORY_REG(DynamicRNN) +/** +*@brief: DynamicRNNV2 calculation. +*@par Inputs: +*ten inputs: +*@li x:A required 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li weight_input:A required 4D Tensor. Must be one of the following types: float16, float32. +*The format must be FRACTAL_Z. +*@li weight_hidden:A required 4D Tensor. Must be one of the following types: float16, float32. +*The format must be FRACTAL_Z. +*@li b:A required 1D Tensor. Must be one of the following types: float16, float32. The format must be ND. +*@li seq_length:A optional 1D Tensor. Must be one of the following types: int32. The format must be ND. +*@li init_h:A optional 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li init_c:A optional 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li wci:A 4D optional Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li wcf:A 4D optional Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li wco:A 4D optional Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li mask:A 1D optional Tensor. Must be one of the following types: uint8. The format must be ND . \n + +*@par Attributes: +*@li cell_type:An string identifying the cell type in the op. Default to "LSTM". Only LSTM is currently supported. +*@li direction:An string identifying the direction in the op. Default to "UNIDIRECTIONAL". +*Only UNIDIRECTIONAL is currently supported. +*@li cell_depth:An integer identifying the cell depth in the op. Default to 1. +*@li use_peephole:An bool identifying if use peephole in the op. Default to false. +*@li keep_prob:An float identifying the keep prob in the op. Default to 1. +*@li cell_clip:An float identifying the cell clip in the op. Default to -1. +*@li num_proj:An integer identifying the num projection in the op. Default to 0. +*@li time_major:An bool identifying the time major in the op. Default to true. +*@li activation:An string identifying the type of activation function in the op. Default to "tanh". +*Only tanh is currently supported. +*@li recurrent_activation:An string identifying the type of activation function in the op. Default to "sigmoid". +*Supprot "sigmoid" and "hard_sigmoid". In general, set "hard_sigmoid" for TF Keras LSTM. +*@li forget_bias:An float identifying the forget bias in the op. Default to 0. +*@li gate_order:An string identifying the type of gate order in the op. Support "ijfo" and "ifco". Default to "ijfo". +*Set "ijfo" for TF operator LSTM, Set "ifco" for TF Keras LSTM. +*@li stateful: An bool identifying the type of stateful in the op. Default to fasle.Only false is currently supported. +*@li merge_mode: An string identifying the type of merge_modein the op. Default to "concat". +*Only "concat" is currently supported +*@li is_training:An bool identifying is training in the op. Default to true . \n + +*@par Outputs: +*eight outputs: +*@li y:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li output_h:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*Return the last output_h. +*@li output_c:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*Return the last output_c. +*@li i:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li j:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li f:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li o:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li tanhct:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@par Third-party framework compatibility: +* Compatible with the TF operator LSTM or TF keras operator LSTM. +*/ + +REG_OP(DynamicRNNV2) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(weight_input, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(weight_hidden, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(b, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(seq_length, TensorType({DT_INT32})) + .OPTIONAL_INPUT(init_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(init_c, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(wci, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(wcf, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(wco, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(mask, TensorType({DT_UINT8})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(output_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(output_c, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(i, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(j, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(f, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(o, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(tanhc, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(cell_type, String, "LSTM") + .ATTR(direction, String, "UNIDIRECTIONAL") + .ATTR(cell_depth, Int, 1) + .ATTR(use_peephole, Bool, false) + .ATTR(keep_prob, Float, 1.0) + .ATTR(cell_clip, Float, -1.0) + .ATTR(num_proj, Int, 0) + .ATTR(time_major, Bool, true) + .ATTR(activation, String, "tanh") + .ATTR(recurrent_activation, String, "sigmoid") + .ATTR(forget_bias, Float, 0.0) + .ATTR(gate_order, String, "ijfo") + .ATTR(stateful, Bool, false) + .ATTR(merge_mode, String, "concat") + .ATTR(is_training, Bool, true) + .OP_END_FACTORY_REG(DynamicRNNV2) + +/** +*@brief: DynamicRNNV3 calculation. +*@par Inputs: +*ten inputs: +*@li x:A required 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li w:A required 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li b:A required 1D Tensor. Must be one of the following types: float16, float32. The format must be ND. +*@li seq_length:A optional 1D Tensor. Must be one of the following types: int32. The format must be ND. +*@li init_h:A optional 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li init_c:A optional 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li wci:A 4D optional Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li wcf:A 4D optional Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li wco:A 4D optional Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li mask:A 1D optional Tensor. Must be one of the following types: uint8. The format must be ND . \n +*@li real_mask:A 4D optional Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li project:A 4D optional Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. + +*@par Attributes: +*@li cell_type:An string identifying the cell type in the op. Default to "LSTM". Only LSTM is currently supported. +*@li direction:An string identifying the direction in the op. Default to "UNIDIRECTIONAL". Only UNIDIRECTIONAL is currently supported. +*@li cell_depth:An integer identifying the cell depth in the op. Default to 1. +*@li use_peephole:An bool identifying if use peephole in the op. Default to false. +*@li keep_prob:An float identifying the keep prob in the op. Default to 1. +*@li cell_clip:An float identifying the cell clip in the op. Default to -1. +*@li num_proj:An integer identifying the num projection in the op. Default to 0. +*@li time_major:An bool identifying the time major in the op. Default to true. +*@li activation:An string identifying the type of activation function in the op. Default to "tanh". Only tanh is currently supported. +*@li forget_bias:An float identifying the forget bias in the op. Default to 0. +*@li is_training:An bool identifying is training in the op. Default to true . \n + +*@par Outputs: +*eight outputs: +*@li y:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li output_h:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li output_c:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li i:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li j:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li f:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li o:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li tanhct:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@par Third-party framework compatibility: +* Compatible with the TF operator LSTM. +*/ +REG_OP(DynamicRNNV3) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(w, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(b, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(seq_length, TensorType({DT_INT32})) + .OPTIONAL_INPUT(init_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(init_c, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(wci, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(wcf, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(wco, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(mask, TensorType({DT_UINT8})) + .OPTIONAL_INPUT(real_mask, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(project, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(output_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(output_c, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(i, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(j, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(f, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(o, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(tanhc, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(cell_type, String, "LSTM") + .ATTR(direction, String, "UNIDIRECTIONAL") + .ATTR(cell_depth, Int, 1) + .ATTR(use_peephole, Bool, false) + .ATTR(keep_prob, Float, 1.0) + .ATTR(cell_clip, Float, -1.0) + .ATTR(num_proj, Int, 0) + .ATTR(time_major, Bool, true) + .ATTR(activation, String, "tanh") + .ATTR(forget_bias, Float, 0.0) + .ATTR(is_training, Bool, true) + .OP_END_FACTORY_REG(DynamicRNNV3) + +/** +*@brief: DynamicLSTMV2 calculation. +*@par Inputs: +*ten inputs: +*@li x:A required 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li w:A required 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li b:A required 1D Tensor. Must be one of the following types: float16, float32. The format must be ND. +*@li cont:A required 2D Tensor. Must be one of the following types: float16, float32. The format must be ND. +*@li w_xc_x_static:A optional 2D Tensor. Must be one of the following types: float16, float32. The format must be ND. +*@li h0:A optional 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li c0:A optional 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li wci:A optional 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li wcf:A optional 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li wco:A optional 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li mask:A optional 1D Tensor. Must be one of the following types: uint8. The format must be ND . + +*@par Attributes: +*@li num_output:An integer identifying the num projection in the op. Default to 0. +*@li expose_hidden:An bool identifying the expose_hidden in the op. Default to flase. +*@li need_output_last:An bool identifying the time major in the op. Default to true. +*@li forget_bias:An float identifying the forget bias in the op. Default to 0. + +*@par Outputs: +*eight outputs: +*@li y:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li output_h:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li output_c:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li last_output_h:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li last_output_c:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@par Third-party framework compatibility: +* Compatible with the Caffe operator LSTM. +*@par Restrictions: +* Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(DynamicLSTMV2) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(w, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(b, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(cont, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(w_xc_x_static, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(h0, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(c0, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(wci, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(wcf, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(wco, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(mask, TensorType({DT_UINT8})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(output_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(output_c, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(last_output_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(last_output_c, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(num_output, Int, 0) + .ATTR(expose_hidden, Bool, false) + .ATTR(need_output_last, Bool, false) + .ATTR(forget_bias, Float, 0.0) + .OP_END_FACTORY_REG(DynamicLSTMV2) + /** *@brief: LSTMInputGrad calculation. *@par Inputs: @@ -297,6 +528,60 @@ REG_OP(LSTMInputGrad) .OP_END_FACTORY_REG(LSTMInputGrad) + +/** +*@brief: Dynamic LSTM Cell grad calculation.Calculate the gradient of gates and cell state. +*@par Inputs: +*twelve inputs: +*@li init_c:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li c:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li dy:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li dh:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li dc:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li i:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li j:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li f:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li o:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li tanhct:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li mask:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li t_state:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ . \n + +*@par Attributes: +*@li forget_bias:An integer identifying the forget bias in the op. Default to 1. +*@li activation:An string identifying the type of activation function in the op. Default to "tanh". Only tanh is currently supported . \n +*@li direction:An string that marks the calculation sequence of the operator. Default to "Forward". +*@li gate_order:An string mark the order of output 4 gate. Default to "ijfo". + +*@par Outputs: +*two outputs: +*@li dgate:A 4D Tensor. Must be one of the following types: float16. +*@li dct_1:A 4D Tensor. Must be one of the following types: float16, float32. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(DynamicLSTMGradCell) + .INPUT(init_c, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(c, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(dy, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(dh, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(dc, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(i, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(j, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(f, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(o, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(tanhct, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(mask, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(t_state, TensorType({DT_INT32, DT_INT32})) + .OUTPUT(dgate, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(dct_1, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(forget_bias, Float, 1) + .ATTR(activation, String, "") + .ATTR(direction, String, "Forward") + .ATTR(gate_order, String, "ijfo") + .OP_END_FACTORY_REG(DynamicLSTMGradCell) + + /** *@brief: Basic LSTM Cell backward calculation.Calculate the gradient of input and hidden state. *@par Inputs: @@ -475,9 +760,9 @@ REG_OP(BasicRNNCell) .OP_END_FACTORY_REG(BasicRNNCell) /** -*@brief: DynamicGRU calculation. +*@brief DynamicGRU calculation. *@par Inputs: -*seven inputs: \n +*seven inputs: *@li x:Must be one of the following types: float16. The format must be FRACTAL_NZ. *@li w:Must be one of the following types: float16. The format must be FRACTAL_Z. *@li b:Must be one of the following types: float16, float32. The format must be ND. @@ -497,7 +782,7 @@ REG_OP(BasicRNNCell) *@li is_training:An bool identifying is training in the op. Default to true. *@par Outputs: -*five outputs: \n +*five outputs: *@li y:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li output_h:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li r:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. @@ -531,9 +816,9 @@ REG_OP(DynamicGRU) .OP_END_FACTORY_REG(DynamicGRU) /** -*@brief: DynamicGRUV2 calculation. +*@brief DynamicGRUV2 calculation. *@par Inputs: -*seven inputs: \n +*seven inputs: *@li x:Must be one of the following types: float16. The format must be FRACTAL_NZ. *@li weight_input:Must be one of the following types: float16. The format must be FRACTAL_Z. *@li weight_hidden:Must be one of the following types: float16. The format must be FRACTAL_Z. @@ -555,16 +840,13 @@ REG_OP(DynamicGRU) *@li is_training:An bool identifying is training in the op. Default to true. *@par Outputs: -*six outputs: \n +*six outputs: *@li y:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li output_h:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li update:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li reset:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li new:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li hidden_new:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. - -*@par Restrictions: -*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(DynamicGRUV2) .INPUT(x, TensorType({DT_FLOAT16})) @@ -592,6 +874,68 @@ REG_OP(DynamicGRUV2) .ATTR(is_training, Bool, true) .OP_END_FACTORY_REG(DynamicGRUV2) + +/** +*@brief DynamicGRUV2Hidden calculation. +*@par Inputs: +*five inputs: +*@li x_weight_input:Must be one of the following types: float32. The format must be FRACTAL_NZ. +*@li weight_hidden:Must be one of the following types: float16. The format must be FRACTAL_Z. +*@li bias_hidden:Must be one of the following types: float16, float32. The format must be ND. +*@li seq_length:Must be one of the following types: int32. The format must be ND. +*@li init_h:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. + +*@par Attributes: +*@li direction:An string identifying the direction in the op. Default to "UNIDIRECTIONAL". +Only UNIDIRECTIONAL is currently supported. +*@li cell_depth:An integer identifying the cell depth in the op. Default to 1. +*@li keep_prob:An float identifying the keep prob in the op. Default to 1. +*@li cell_clip:An float identifying the cell clip in the op. Default to -1. +*@li num_proj:An integer identifying the num projection in the op. Default to 0. +*@li time_major:An bool identifying the time major in the op. Default to true. +*@li activation:An string identifying the type of activation function in the op. Default to "tanh". +Only tanh is currently supported. +*@li gate_order:An string identifying the gate order in weight and bias. Default to "zrh". "rzh" is another option. +*@li reset_after:An bool identifying whether to apply reset gate after matrix multiplication. Default to true. +*@li is_training:An bool identifying is training in the op. Default to true. + +*@par Outputs: +*six outputs: +*@li y:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li output_h:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li update:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li reset:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li new:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li hidden_new:Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(DynamicGRUV2Hidden) + .INPUT(x_weight_input, TensorType({DT_FLOAT32})) + .INPUT(weight_hidden, TensorType({DT_FLOAT16})) + .OPTIONAL_INPUT(bias_hidden, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(seq_length, TensorType({DT_INT32})) + .OPTIONAL_INPUT(init_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(output_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(update, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(reset, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(new, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(hidden_new, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(direction, String, "UNIDIRECTIONAL") + .ATTR(cell_depth, Int, 1) + .ATTR(keep_prob, Float, 1.0) + .ATTR(cell_clip, Float, -1.0) + .ATTR(num_proj, Int, 0) + .ATTR(time_major, Bool, true) + .ATTR(activation, String, "tanh") + .ATTR(gate_order, String, "zrh") + .ATTR(reset_after, Bool, true) + .ATTR(is_training, Bool, true) + .OP_END_FACTORY_REG(DynamicGRUV2Hidden) + + /** *@brief: DynamicGRUV2Grad calculation. *@par Inputs: @@ -618,7 +962,6 @@ REG_OP(DynamicGRUV2) *@li cell_clip:An float identifying the cell clip in the op. Default to -1. *@li num_proj:An integer identifying the num projection in the op. Default to 0. *@li time_major:An bool identifying the time major in the op. Default to true. -*@li bias_type:An string identifying the type of bias_type function in the op. Default to "double_bias". *@li gate_order:An string identifying the gate order in weight and bias. Default to "zrh". "rzh" is another option. *@li reset_after:An bool identifying whether to apply reset gate after matrix multiplication. Default to true. @@ -630,6 +973,9 @@ REG_OP(DynamicGRUV2) *@li db_hidden:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li dx:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li dh_prev:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(DynamicGRUV2Grad) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) @@ -658,7 +1004,6 @@ REG_OP(DynamicGRUV2Grad) .ATTR(cell_clip, Float, -1.0) .ATTR(num_proj, Int, 0) .ATTR(time_major, Bool, true) - .ATTR(bias_type, String, "double_bias") .ATTR(gate_order, String, "zrh") .ATTR(reset_after, Bool, true) .OP_END_FACTORY_REG(DynamicGRUV2Grad) @@ -667,7 +1012,7 @@ REG_OP(DynamicGRUV2Grad) *@brief: GRUV2HiddenGrad calculation. *@par Inputs: *nine inputs: \n -*@li weight_hidden:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li dh_pre_t:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li init_h:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li h:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li dy:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. @@ -678,6 +1023,7 @@ REG_OP(DynamicGRUV2Grad) *@li hidden_new:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@par Attributes: +*@li t_state:An Int identifying the current t state. Default to [0, 4]. *@li gate_order:An string identifying the gate order in weight and bias. Default to "zrh". "rzh" is another option. *@par Outputs: @@ -685,10 +1031,12 @@ REG_OP(DynamicGRUV2Grad) *@li dh_prev:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li dgate_h:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li dnt_x:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ -REG_OP(GRUV2HiddenGrad) - .INPUT(weight_hidden, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(init_h, TensorType({DT_FLOAT16, DT_FLOAT})) +REG_OP(GRUV2HiddenGradCell) + .INPUT(dh_pre_t, TensorType({DT_FLOAT16, DT_FLOAT})) .INPUT(h, TensorType({DT_FLOAT16, DT_FLOAT})) .INPUT(dy, TensorType({DT_FLOAT16, DT_FLOAT})) .INPUT(dh, TensorType({DT_FLOAT16, DT_FLOAT})) @@ -699,8 +1047,197 @@ REG_OP(GRUV2HiddenGrad) .OUTPUT(dh_prev, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(dgate_h, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(dnt_x, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(t_state, Int, 0) .ATTR(gate_order, String, "zrh") - .OP_END_FACTORY_REG(GRUV2HiddenGrad) + .OP_END_FACTORY_REG(GRUV2HiddenGradCell) + +/** +* @brief Calculates the reversed outputs of the function "embedding". \n + +* @par Inputs: +* Two inputs, including: +* @li grad: A mutable Tensor of word grad. Must be one of the following types: +* float32. +* @li indices: A mutable word index Tensor of the int32 type.\n + +* @par Attributes: +* @li num_weights: An int attr which use to judge how many words in dict. \n + +* @li padding_idx: An int attr judge which word to fill zeros. Defaults to "-1". \n + +* @li scale_grad_by_freq: An optional bool. Defaults to "False". +* If "True", "grad_weight" will be scale by word_frequency. +* If "False", "grad_weight" will not be scale by word_frequency. \n + +* @par Outputs: +* @li grad_weight: A mutable output Tensor of new word grad has the same type as "grads". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator EmbeddingDenseGrad. +*/ +REG_OP(EmbeddingDenseGrad) + .INPUT(grad, TensorType({ DT_FLOAT32 })) /* "First operand." */ + .INPUT(indices, TensorType({ DT_INT32 })) /* "Second operand." */ + .OUTPUT(y, TensorType({ DT_FLOAT32 })) /* "Result, has same element type as two inputs" */ + .REQUIRED_ATTR(num_weights, Int) + .ATTR(padding_idx, Int, -1) + .ATTR(scale_grad_by_freq, Bool, false) + .OP_END_FACTORY_REG(EmbeddingDenseGrad) + +/** +*@brief CommonLSTM calculation. +*@par Inputs: +*eight inputs: \n +*@li x:Each time step is a 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li w:Each direction is a 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li r:Each direction is a 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_ZN_LSTM. +*@li b:An optional input. Each direction is a 1D Tensor. Must be one of the following types: float16, float32. The format must be ND. +*@li sequence_lens:An optional input. A 1D Tensor.Must be one of the following types: int32. The format must be ND. +*@li initial_h:An optional input. Each direction is a 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li initial_c:An optional input. Each direction is a 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li p:An optional input. Each direction is a 1D Tensor.Must be one of the following types: float16, float32. The format must be ND. + +*@par Attributes: +*@li activation_alpha:Optional scaling values used by some activation functions. Empty is currently supported. +*@li activation_beta:Optional scaling values used by some activation functions. Empty is currently supported. +*@li activations:The list of activation functions. Empty is currently supported. +*@li clip:An float identifying the cell clip in the op. Default to -1. +*@li direction:Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward(default), reverse, or bidirectional. +*@li hidden_size:Number of neurons in the hidden layer. Reserved. +*@li input_forget:Couple the input and forget gates if 1. Reserved. + +*@par Outputs: +*three outputs: \n +*@li y:First dimension is time step, second dimension is direction, others is a 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li y_h:Each direction is a 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li y_c:Each direction is a 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*/ + +REG_OP(CommonLSTM) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(w, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(r, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(b, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(sequence_lens, TensorType({DT_INT32})) + .OPTIONAL_INPUT(initial_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(initial_c, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(p, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y_c, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(activation_alpha, ListFloat, {}) + .ATTR(activation_beta, ListFloat, {}) + .ATTR(activations, ListString, {}) + .ATTR(clip, Float, -1.0) + .ATTR(direction, String, "forward") + .REQUIRED_ATTR(hidden_size, Int) + .ATTR(input_forget, Int, 0) + .OP_END_FACTORY_REG(CommonLSTM) + +/** + * @brief Calculate the mask. According to hidden_size and num_step, convert seq_length to mask. + * + * @par Inputs: + * @li seq_length: A 1D Tensor. Must be one of the following types: int32. Record the current length of each batch. [batch_size]. + * @li b: A 1D Tensor. Must be one of the following types: fp16/fp32. Record the hidden_size. [4 * hidden_size]. + * @li x: A 3D Tensor. Must be one of the following types: fp16/fp32. Record the num_step/batch_size/input_size. [num_step, batch_size, input_size]. + * + * @par Outputs: + * seq_mask: A 3D Tensor. Must be one of the following types: fp16/fp32. with the shape of [num_step, batch_size, hidden_size]. And has the same type as "b" \n + * + * @par Restrictions: + * Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. + */ +REG_OP(RnnGenMaskV2) + .INPUT(seq_length, TensorType({DT_INT32})) + .INPUT(b, TensorType({{DT_FLOAT16, DT_FLOAT})) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(seq_mask, TensorType({DT_FLOAT16, DT_FLOAT})) + .OP_END_FACTORY_REG(RnnGenMaskV2) + +/** +* @brief Common GRU calculation. + +* @par Inputs: +* Eight inputs, including: +* @li x: The input sequences packed (and pontentially padded) into on 3D Tesnor(float16). The format must be FRACTAL_NZ +* @li w: The weight tensor for the gates is 3D Tensor(float16). The format must be FRACTAL_Z +* @li r: The recurrence weight tesnor is 3D Tensor(float16). The format must be FRACTAL_Z +* @li b: The bias tensor for the gates. The format must be ND +* @li sequence_lens: Optional tensor specifying lengths of sequences(int32). The format must be ND +* @li init_h: Optional initial value of the hidden(float16,float32). The format must be FRACTAL_NZ + +* @par Attributes: +* @li activation_alpha: Optional scaling values used by some activation functions. \n +* @li activation_beta: Optional scaling values used by some activation functions. \n +* @li activations: A list of 2 (or 4 if bidirectional) activation functions for update, reset, and hidden gates. \n +* @li clip: Cell clip threshold. \n +* @li direction: Specify if the RNN is forward, reverse, or bidirectional. \n +* @li hidden_size: Number of neurons in the hidden layer. \n +* @li linear_before_reset: When computing the output of the hidden gate, apply the linear transformation before multiplying by the output of the reset gate. \n + +* @par Outputs: +* @li y: A Tensor that concats all the intermediate output values of the hidden(float16,float32). The format must be FRACTAL_NZ +* @li y_h: The last output value of the hidden(float16,float32). The format must be FRACTAL_NZ +*/ +REG_OP(CommonGRU) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(w, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(r, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(b, TensorType({DT_FLOAT16, DT_FLOAT})) + .OPTIONAL_INPUT(sequence_lens, TensorType({DT_INT32})) + .OPTIONAL_INPUT(initial_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(y_h, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(activation_alpha, ListFloat, {}) + .ATTR(activation_beta , ListFloat, {}) + .ATTR(activations , ListString, {}) + .ATTR(clip, Float, -1.0) + .ATTR(direction, String, "forward") + .REQUIRED_ATTR(hidden_size, Int) + .ATTR(linear_before_reset , Int, 0) + .OP_END_FACTORY_REG(CommonGRU) +/** +* @brief Calculates the reversed outputs of the function "embedding". \n + +* @par Inputs: +* Four inputs, including: +* @li weight: A mutable Tensor of word grad. Must be one of the following types: +* float32. +* @li indices: A mutable word index Tensor of the int32 type.\n +* @li offsets: A mutable word index Tensor of the int32 type.\n +* @li per_sample_weights: to indicate all weights should be taken to be 1. +* If specified, per_sample_weights must have exactly the same shape as input +* and is treated as having the same offsets, if those are not None. +* Only supported for mode='sum'..\n + +* @par Attributes: +* @li mode: An string attr which use "sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag.. \n + +* @li scale_grad_by_freq: An optional bool. Defaults to "False". +* If "True", "grad_weight" will be scale by word_frequency. +* If "False", "grad_weight" will not be scale by word_frequency. \n +* @li sparse: if True, gradient w.r.t.attr weight matrix will be a sparse tensor. \n +* @li include_last_offset: if True, attr offsets has one additional element, where the last element +* is equivalent to the size of indices. This matches the CSR format.. \n + +* @par Outputs: +* @li grad_weight: A mutable output Tensor of new word grad has the same type as "grads". \n + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator EmbeddingBag. +*/ +REG_OP(EmbeddingBag) + .INPUT(weight, TensorType({ DT_FLOAT32 })) + .INPUT(indices, TensorType({ DT_INT32 })) + .OPTIONAL_INPUT(offsets, TensorType({DT_INT32})) + .OPTIONAL_INPUT(per_sample_weights, TensorType({DT_FLOAT32})) + .OUTPUT(y, TensorType({ DT_FLOAT32 })) + .ATTR(mode, String, "mean") + .ATTR(scale_grad_by_freq, Bool, false) + .ATTR(sparse, Bool, false) + .ATTR(include_last_offset, Bool, false) + .OP_END_FACTORY_REG(EmbeddingBag) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_RNN_H_ diff --git a/third_party/fwkacllib/inc/ops/rpn_ops.h b/third_party/fwkacllib/inc/ops/rpn_ops.h index b7649a44..089af326 100644 --- a/third_party/fwkacllib/inc/ops/rpn_ops.h +++ b/third_party/fwkacllib/inc/ops/rpn_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/save_ops.h b/third_party/fwkacllib/inc/ops/save_ops.h index 0ce473b7..5ce6c2e0 100644 --- a/third_party/fwkacllib/inc/ops/save_ops.h +++ b/third_party/fwkacllib/inc/ops/save_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/sdca_ops.h b/third_party/fwkacllib/inc/ops/sdca_ops.h index cbd9839d..34c6a268 100644 --- a/third_party/fwkacllib/inc/ops/sdca_ops.h +++ b/third_party/fwkacllib/inc/ops/sdca_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/selection_ops.h b/third_party/fwkacllib/inc/ops/selection_ops.h index 2c99e82e..1c26e033 100644 --- a/third_party/fwkacllib/inc/ops/selection_ops.h +++ b/third_party/fwkacllib/inc/ops/selection_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -239,6 +239,30 @@ REG_OP(GatherV2D) .REQUIRED_ATTR(axis, Int) .OP_END_FACTORY_REG(GatherV2D) +/** +*@Gathers values along an axis specified by dim . \n + +*@par Inputs: +*@li x: A Tensor. Must be one of the following types: float16, float32, int32, int64. +*@li index: A Tensor. Must be one of the following types: int64 . \n + +*@par Attributes: +* dim: the axis along which to index . \n + +*@par Outputs: +* y: A Tensor. Has the same type as "x" . \n + +*@par Third-party framework compatibility +*Compatible with the PyTorch operator Gather. +*/ + +REG_OP(GatherElements) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT64})) + .INPUT(index, TensorType({DT_INT64})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT64})) + .ATTR(dim, Int, 0) + .OP_END_FACTORY_REG(GatherElements) + /** *@brief Extracts a strided slice of a tensor. Roughly speaking, this op extracts a slice of size (end-begin)/stride from the given input tensor. @@ -275,8 +299,6 @@ REG_OP(GatherV2D) *@par Outputs: *y: A Tensor. Has the same type as "x" . \n -*@attention Constraints: - *@par Third-party framework compatibility * Compatible with the TensorFlow operator StridedSlice. */ @@ -327,8 +349,6 @@ REG_OP(StridedSlice) *@par Outputs: *y: A Tensor. Has the same type as "x" . \n -*@attention Constraints: - *@par Third-party framework compatibility * Compatible with the TensorFlow operator StridedSlice. @@ -385,8 +405,6 @@ REG_OP(StridedSliceD) *@par Outputs: *output: A Tensor. Has the same type as "dy" . \n -*@attention Constraints: - *@par Third-party framework compatibility * Compatible with the TensorFlow operator StridedSliceGradD. @@ -444,8 +462,6 @@ REG_OP(StridedSliceGradD) *@par Outputs: *output: A Tensor has the same type as "dy" . \n -*@attention Constraints: - *@par Third-party framework compatibility * Compatible with the TensorFlow operator StridedSliceGrad. */ @@ -486,6 +502,38 @@ REG_OP(UnsortedSegmentSum) .OUTPUT(y, TensorType::NumberType()) .OP_END_FACTORY_REG(UnsortedSegmentSum) +/** +*@brief Creates a one-dimensional tensor of size steps whose values are evenly spaced from start to +* end, inclusive, on a logarithmic scale with base base. \n + +*@par Inputs: +*One inputs, including: +* @li assist: A tensor. Must be one of the following types: +* float16, float32. \n + +* @par Attributes: +* @li start: An required float. Used to select the start. \n +* @li end: An required float. Used to select the end. \n +* @li steps: An optional int.Defaults to 100. \n +* @li base: An optional float.Defaults to 10.0. \n +* @li dtype: An optional int.Defaults to 1. \n + +*@par Outputs: +*y: A Tensor with the same type and shape of input_x's. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator logspaced. \n +*/ +REG_OP(LogSpaceD) + .INPUT(assist, TensorType({DT_FLOAT, DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) + .REQUIRED_ATTR (start, Float) + .REQUIRED_ATTR (end, Float) + .ATTR(steps, Int, 100) + .ATTR(base, Float, 10.0) + .ATTR(dtype, Int, 1) + .OP_END_FACTORY_REG(LogSpaceD) + /** *@brief Computes the sum along segments of a tensor . \n @@ -796,6 +844,34 @@ REG_OP(SliceD) .REQUIRED_ATTR(size, ListInt) .OP_END_FACTORY_REG(SliceD) +/** +*@brief Extracts a slice from a tensor. +* This operation extracts a slice of size "size" from a tensor "x" +* starting at the location specified by "begin" . \n + +*@par Inputs: +*@li x: A Tensor. Must be one of the following types: +* float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8, +* int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32 . \n + +*@par Inputs: +*@li offsets: The starting location for the slice. + +*@par Attributes: +*@li size: The tensor shape . \n + +*@par Outputs: +*y: A Tensor. Has the same type as "x". The slice extracted from the tensor. +*@par Restrictions: +*Warning: THIS FUNCTION IS DEPRECATED. Please use Slice instead. +*/ +REG_OP(SliceDV2) + .INPUT(x, TensorType::BasicType()) + .INPUT(offsets, TensorType::IndexNumberType()) + .OUTPUT(y, TensorType::BasicType()) + .REQUIRED_ATTR(size, ListInt) + .OP_END_FACTORY_REG(SliceDV2) + /** * @brief Finds values and indices of the "k" largest elements for the last * dimension . \n @@ -829,8 +905,8 @@ REG_OP(SliceD) * @li sorted = true * @li It's unstable sorted indices on the platform of Ascend310 -* @par Third-party framework compatibility -* @li Compatible with the TensorFlow operator TopK. +* @par Restrictions: +* Warning: THIS FUNCTION IS DEPRECATED. Please use TopKV2 instead. */ REG_OP(TopKD) .INPUT(x, TensorType::RealNumberType()) @@ -855,6 +931,44 @@ REG_OP(TopKD) * Number of top elements to look for along the last dimension (along each row * for matrices) . \n +* @par Attributes: +* @li sorted: An optional bool. Defaults to true. +* If true, the resulting "k" elements will be sorted by the values in descending +* order. +* @li dim: An optional int. Defaults to -1. For reserved use. +* @li largest: An optional bool. Defaults to true. For reserved use. \n + +* @par Outputs: +* @li values: A Tensor, specifying the sorted data. Has the same type as +* "input". +* @li indices: A Tensor of type int32, specifying the indices of sorted data . \n + +* @see TopK() +* @par Third-party framework compatibility +* @li Compatible with the TensorFlow operator TopKV2. +*/ +REG_OP(TopKV2) + .INPUT(x, TensorType::RealNumberType()) + .INPUT(k, TensorType({DT_INT32})) + .OUTPUT(values, TensorType::RealNumberType()) + .OUTPUT(indices, TensorType({DT_INT32})) + .ATTR(sorted, Bool, true) + .ATTR(dim, Int, -1) + .ATTR(largest, Bool, true) + .OP_END_FACTORY_REG(TopKV2) + +/** +* @brief Finds values and indices of the "k" largest elements for the last +* dimension . \n + +* @par Inputs: +* Two inputs, including: +* @li x: A 1D or higher tensor of type BasicType, with the last dimension +* at least "k". +* @li k: A 0D Tensor of type int32. +* Number of top elements to look for along the last dimension (along each row +* for matrices) . \n + * @par Attributes: * @li sorted: An optional bool. Defaults to true. * If true, the resulting "k" elements will be sorted by the values in descending @@ -876,15 +990,17 @@ REG_OP(TopK) .OUTPUT(values, TensorType::RealNumberType()) .OUTPUT(indices, TensorType({DT_INT32})) .ATTR(sorted, Bool, true) + .ATTR(largest, Bool, true) + .ATTR(dim, Int, -1) .OP_END_FACTORY_REG(TopK) /** *@brief Creates a new tensor by applying sparse "updates" to individual values or slices within a tensor (initially zero for numeric, empty for string) of the given "shape" according to "indices" . \n *@par Inputs: *Inputs including: -* @li indices: A required index tensor. Must be one of the following types: float32, float16, int32, int8, uint8. -* @li x: A required slice tensor. Must be one of the following types: float32, float16, int32, int8, uint8. -* @li shape: A required list of int32, specifying the output shape. +* @li indices: A required index tensor. Must be one of the following types: int32 or int64. +* @li x: A required slice tensor. Must be one of the following types: float32, float16, int32, int8, uint8... +* @li shape: A required list of int32 or int64, specifying the output shape. *@par Outputs: *y:A output Tensor with same datatype as "updates" . \n @@ -895,7 +1011,7 @@ REG_OP(TopK) * Compatible with the TensorFlow operator ScatterNd. */ REG_OP(ScatterNd) - .INPUT(indices, TensorType::BasicType()) + .INPUT(indices, TensorType::IndexNumberType()) .INPUT(x, TensorType::BasicType()) .INPUT(shape, TensorType::IndexNumberType()) .OUTPUT(y, TensorType::BasicType()) @@ -908,11 +1024,11 @@ REG_OP(ScatterNd) *@par Inputs: *Inputs including: * @li indices: A required index tensor. Must be one of the following types: - * float, float16, int32, int16. format:ND. + * int32 or int64. format:ND. * @li x: A required slice tensor. Must be one of the following types: - * float, float16, int32, int16. format:ND. + * float16, float, int32, int8, uint8. format:ND. *@par Attributes: -* @li shape: A required list of int32, specifying the output shape. +* @li shape: A required list of int32 or int64, specifying the output shape. *@par Outputs: *y: A Tensor. Has the same type as "x". format:ND . \n @@ -927,8 +1043,8 @@ REG_OP(ScatterNd) */ REG_OP(ScatterNdD) .INPUT(indices, TensorType::IndexNumberType()) - .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT16})) - .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT16})) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8})) .REQUIRED_ATTR(shape, ListInt) .OP_END_FACTORY_REG(ScatterNdD) @@ -1752,6 +1868,33 @@ REG_OP(Crop) .REQUIRED_ATTR(offsets, ListInt) .OP_END_FACTORY_REG(Crop) +/** +*@brief Returns a namedtuple (values, indices) where values is the cumulative +* the cumulative minimum of elements of input in the dimension dim. +* And indices is the index location of each maximum value found in the dimension dim. \n + +*@par Inputs: +*One inputs, including: +* @li x: A tensor . Must be one of the following types: +* float16, float32, int32, uint32, int8, uint8. \n + +*@par Attributes: +* @li axis: Axis along which to cummin. \n + +*@par Outputs: +* y: A Tensor with the same type and shape of x's. \n +* indices: A Tensor with the int32 type and the same shape of x's. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator Cummin. \n +*/ +REG_OP(Cummin) + .INPUT(x, TensorType::BasicType()) + .OUTPUT(y, TensorType::BasicType()) + .OUTPUT(indices, TensorType::BasicType()) + .REQUIRED_ATTR(axis, Int) + .OP_END_FACTORY_REG(Cummin) + /** *@brief Extends the input with copies of data along a specified dimension. For example: *(1) If x = [[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, 12]]], with shape (2, 3, 2); @@ -1921,6 +2064,249 @@ REG_OP(CumulativeLogsumexpD) .ATTR(exclusive, Bool, false) .ATTR(reverse, Bool, false) .OP_END_FACTORY_REG(CumulativeLogsumexpD) + +/** +* @brief Add updates to var according to axis and indices. + +* @par Inputs: +* Three inputs, including: +* @li var: A Tensor. Must be one of the following types: +* float16, float32, int16, int32, int8, uint8. +* @li indices: A Tensor of the indices, type should be int32. +* @li updates: A Tensor of the same type as "var". \n + +* @par Attributes: +* @li axis: An required int to specify the axis to perform indices add. \n + +* @par Outputs: +* @li var: A Tensor. Same as input "var". + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator index_add_. +*/ +REG_OP(InplaceIndexAdd) + .INPUT(var, TensorType({DT_INT16, DT_INT32, DT_INT8, + DT_UINT8, DT_FLOAT32, DT_FLOAT16})) + .INPUT(indices, TensorType({DT_INT32})) + .INPUT(updates, TensorType({DT_INT16, DT_INT32, DT_INT8, + DT_UINT8, DT_FLOAT32, DT_FLOAT16})) + .OUTPUT(var, TensorType({DT_INT16, DT_INT32, DT_INT8, + DT_UINT8, DT_FLOAT32, DT_FLOAT16})) + .REQUIRED_ATTR(axis, Int) + .OP_END_FACTORY_REG(InplaceIndexAdd) + +/** +* @brief Replace the value of X with value according to mask. +* @par Inputs: +* three inputs, including: +* @li x: A Tensor of dtype is float16 or float32 or int64 or int32 or int8. +* @li mask: A Tensor of dtype bool. +* @li value: A Tensor of dtype float16 or float32 or int64 or int32 or int8. + +* @par Outputs: +* @li y: A tensor. Must be one of the following dtypes: +* float16, float32, int64, int32, int8. +*/ +REG_OP(MaskedFill) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT32, DT_INT64})) + .INPUT(mask, TensorType({DT_BOOL})) + .INPUT(value, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT32, DT_INT64})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT32, DT_INT64})) + .OP_END_FACTORY_REG(MaskedFill) + +/** +* @brief Choose the value of X with value according to mask. + +* @par Inputs: +* two inputs, including: +* @li x: A Tensor of dtype is float16 or float32. +* @li mask: A Tensor of dtype is bool. \n + +* @par Outputs: +* @li y: A tensor with the same type as x. \n + +* @par Third-party framework compatibility +* Compatible with the Numpy operator select. +* Replaces the pytorch operator masked_select in some scenarios.\n +*/ +REG_OP(MaskedSelectV2) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(mask, TensorType({DT_BOOL})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .OP_END_FACTORY_REG(MaskedSelectV2) + +/** +* @brief Slice a tensor at its last dim, e.x. a[..., begin:end:stride]. \n + +* @par Inputs: +* One inputs, including: +* @li x: A Tensor. Must be one of the following types: float16, float32, int16, int32. + +* @par Attributes: +* @li start: An attribute of type Int, start index of last dim. \n +* @li end: An attribute of type Int, end index of last dim. \n +* @li stride: An attribute of type Int, stride of slice. \n + +* @par Outputs: +* @li y: A Tensor. Has the same type as "x". \n + +* @par Third-party framework compatibility +* No compatibility +*/ +REG_OP(SliceLastDim) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_INT16, DT_INT32, DT_INT64})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_INT16, DT_INT32, DT_INT64})) + .REQUIRED_ATTR(start, Int) + .REQUIRED_ATTR(end, Int) + .ATTR(stride, Int, 1) + .OP_END_FACTORY_REG(SliceLastDim) + +/** +* @brief Extracts a strided slice of a tensor. Roughly speaking, this op \n +* extracts a slice of size (end-begin)/stride from the given input tensor. \n +* Starting at the location specified by begin the slice continues by \n +* adding stride to the index until all dimensions are not less than end. \n +* +* @par Inputs: +* Four inputs, including: +* @li x: A Tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8, \n +* complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16, \n +* complex128, float16, uint32, uint64, complex64, complex128. \n +* @li begin: A Tensor of type int32 or int64, for the index of the first value to select. +* +* @li end: A Tensor of type int32 or int64, for the index of the last value to select. +* +* @li axes: A Tensor of type int32 or int64, indicate axis to be select. +* +* @li strides: A Tensor of type int32 or int64, for the increment. +* +* @par Attributes: +* @li begin_mask: A Tensor of type int32. \n +* A bitmask where a bit "i" being "1" means to ignore the begin \n +* value and instead use the largest interval possible. +* @li end_mask: A Tensor of type int32. \n +* Analogous to "begin_mask". +* @li ellipsis_mask: A Tensor of type int32. \n +* A bitmask where bit "i" being "1" means the "i"th position \n +* is actually an ellipsis. +* @li new_axis_mask: A Tensor of type int32. \n +* A bitmask where bit "i" being "1" means the "i"th \n +* specification creates a new shape 1 dimension. +* @li shrink_axis_mask: A Tensor of type int32. \n +* A bitmask where bit "i" implies that the "i"th \n +* specification should shrink the dimensionality. +* +* @par Outputs: +* y: A Tensor. Has the same type as "x". +* +* @attention Constraints: +* +* @par Third-party framework compatibility +* Compatible with the TensorFlow operator StridedSliceV2. +*/ +REG_OP(StridedSliceV2) + .INPUT(x, TensorType::BasicType()) + .INPUT(begin, TensorType::IndexNumberType()) + .INPUT(end, TensorType::IndexNumberType()) + .OPTIONAL_INPUT(axes, TensorType::IndexNumberType()) + .OPTIONAL_INPUT(strides, TensorType::IndexNumberType()) + .ATTR(begin_mask, Int, 0) + .ATTR(end_mask, Int, 0) + .ATTR(ellipsis_mask, Int, 0) + .ATTR(new_axis_mask, Int, 0) + .ATTR(shrink_axis_mask, Int, 0) + .OUTPUT(y, TensorType::BasicType()) + .OP_END_FACTORY_REG(StridedSliceV2) + +/** +*@brief Fills the elements of the input tensor with value val by selecting the indices in the order given in index. \n + +*@par Inputs: +*Three inputs, including: +* @li x: A tensor. Must be one of the following types: +* float16, float32, int32. \n +*@li assist1: A tensor. Must be one of the following types: +* float16, float32, int32. \n +*@li assist2: A tensor. Must be one of the following types: +* float16, float32, int32. \n + +* @par Attributes: +* @li dim: A required int. Used to select the dimension of this tensor. \n + +*@par Outputs: +*y: A Tensor with the same type and shape of input_x's. \n + +*@par Third-party framework compatibility +*Compatible with the Pytorch operator IndexFill. \n +*/ +REG_OP(IndexFillD) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .INPUT(assist1, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .INPUT(assist2, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) + .REQUIRED_ATTR(dim, Int) + .OP_END_FACTORY_REG(IndexFillD) + +/** +* @brief For each row r of this and for each column c, do (*this)(r, c) += src(j, c), \n +* where j ranges from indexes[r].first through indexes[r].second - 1. \n +* In general indexes must be >= 0 and < src.NumRows(); \n +* but to represent an empty range you may use the pair (-1, -1) or any pair of numbers (i, j) such that i >= j. \n + +* @par Inputs: +* Three inputs, including: +* @li x: A Tensor. Must be one of the following types: +* float16, float32. +* @li indices: A Tensor of the indices, type should be int32. +* @li src: A Tensor of the same type as "x". \n + +* @par Outputs: +* @li x: A Tensor. Same as input "x". + +* @par Third-party framework compatibility +* Compatible with the kaldi operator AddRowRanges. +*/ +REG_OP(AddRowRanges) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(src, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(indices, TensorType({DT_INT32})) + .OUTPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .OP_END_FACTORY_REG(AddRowRanges) + +/** +*@brief masked fill tensor along with one axis by range. +* boxes. It is a customized masked fill range operator . \n + +*@par Inputs: +* Four inputs, including: +*@li x: input tensor. A ND Tensor of float32/float16/int32/int8 with shapes +* 1-D (D,), 2-D(N, D), 3-D(N, C, D) +*@li start: masked fill start pos. A 3D Tensor of int32 with +* shape (num, N). "num" indicates the number of loop masked fill, and the value N +* indicates the batch of ND Tensor, if input x shape is 1-D, N = 1. \n +*@li end: masked fill end pos. A 3D Tensor of int32 with +* shape (num, N). "num" indicates the number of loop masked fill, and the value N +* indicates the batch of ND Tensor. \n +*@li value: masked fill value. A 2D Tensor of float32/float16/int32/int8 with +* shape (num,). "num" indicates the number of loop masked fill + +*@par Attributes: +*@li axis: axis with masked fill of int32. Defaults to -1. + +*@par Outputs: +*y: A ND Tensor of float32/float16/int32/int8 with shapes 1-D (D,), 2-D(N, D), 3-D(N, C, D) + +* @par Restrictions: +* Warning: input shape's length must not be bigger than 1024 * 1024 * 1024. +*/ +REG_OP(MaskedFillRange) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32})) + .INPUT(start, TensorType({DT_INT32})) + .INPUT(end, TensorType({DT_INT32})) + .INPUT(value, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32})) + .REQUIRED_ATTR(axis, Int) + .OP_END_FACTORY_REG(MaskedFillRange) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_SELECTION_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/set_ops.h b/third_party/fwkacllib/inc/ops/set_ops.h index 1d02fa15..04e04f1b 100644 --- a/third_party/fwkacllib/inc/ops/set_ops.h +++ b/third_party/fwkacllib/inc/ops/set_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/sparse_ops.h b/third_party/fwkacllib/inc/ops/sparse_ops.h index d7512790..a1fc9ee6 100644 --- a/third_party/fwkacllib/inc/ops/sparse_ops.h +++ b/third_party/fwkacllib/inc/ops/sparse_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -383,11 +383,11 @@ REG_OP(SparseFillEmptyRowsGrad) REG_OP(SparseTensorDenseMatMul) .INPUT(x1_indices, TensorType({DT_INT32, DT_INT64})) .INPUT(x1_values, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, \ - DT_COMPLEXT64, DT_COMPLEX128, DT_FLOAT16})) + DT_COMPLEXT64, DT_COMPLEX128, DT_FLOAT16, DT_INT64})) .INPUT(x1_shape, TensorType({DT_INT64})) - .INPUT(x2, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_COMPLEXT64, \ + .INPUT(x2, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_COMPLEXT64, \ DT_COMPLEX128, DT_FLOAT16})) - .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_COMPLEXT64, \ + .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_COMPLEXT64, \ DT_COMPLEX128, DT_FLOAT16})) .ATTR(adjoint_a, Bool, false) .ATTR(adjoint_b, Bool, false) diff --git a/third_party/fwkacllib/inc/ops/spectral_ops.h b/third_party/fwkacllib/inc/ops/spectral_ops.h index 64fa7814..34ccb398 100644 --- a/third_party/fwkacllib/inc/ops/spectral_ops.h +++ b/third_party/fwkacllib/inc/ops/spectral_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,6 +26,24 @@ namespace ge { +/** +*@brief Computes the inverse 1-dimensional discrete Fourier transform over the +inner-most dimension of `x`. \n + +*@par Inputs: +*@li x: A Tensor. Must be the following types: complex64, complex128. \n + +*@par Outputs: +*@li y: A complex tensor of the same rank as `x`. \n + +*@par Third-party framework compatibility +* Compatible with TensorFlow IFFT operator. +*/ +REG_OP(IFFT) + .INPUT(x, TensorType({DT_COMPLEX64,DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_COMPLEX64,DT_COMPLEX128})) + .OP_END_FACTORY_REG(IFFT) + /** *@brief Real-valued fast Fourier transform . \n @@ -47,6 +65,84 @@ REG_OP(RFFT) .OUTPUT(y, TensorType({DT_COMPLEX64})) .OP_END_FACTORY_REG(RFFT) +/** +*@brief Inverse real-valued fast Fourier transform. \n + +*@par Inputs: +*@li x: A complex64 tensor. +*@li fft_length: An int32 tensor of shape [1]. The FFT length. \n + +*@par Outputs: +*@li y: A float32 tensor of the same rank as `input`. The inner-most + dimension of `input` is replaced with the `fft_length` samples of its inverse + 1D Fourier transform. \n + +*@par Third-party framework compatibility +* Compatible with TensorFlow IRFFT operator. +*/ +REG_OP(IRFFT) + .INPUT(x, TensorType({DT_COMPLEX64})) + .INPUT(fft_length, TensorType({DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT})) + .OP_END_FACTORY_REG(IRFFT) + + +/** +*@brief 2D fast Fourier transform. \n + +*@par Inputs: +*@li x: A complex64 tensor. + +*@par Outputs: +*@li y: A complex64 tensor of the same shape as `input`. The inner-most 2 + dimensions of `input` are replaced with their 2D Fourier transform. \n + +*@par Third-party framework compatibility +* Compatible with TensorFlow FFT2D operator. +*/ +REG_OP(FFT2D) + .INPUT(x, TensorType({DT_COMPLEX64, DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_COMPLEX64, DT_COMPLEX128})) + .OP_END_FACTORY_REG(FFT2D) + +/** +*@brief Calculate the one-dimensional discrete Fourier transform on the +innermost dimension of the input. \n + +*@par Inputs: +*@li x: A Tensor. Must be the following types: complex64, complex128. \n + +*@par Outputs: +*@li y: A complex tensor with the same shape as input. The innermost dimension +of the input is replaced by its 1-dimensional Fourier transform. \n + +*@par Third-party framework compatibility +* Compatible with TensorFlow FFT operator. +*/ +REG_OP(FFT) + .INPUT(x, TensorType({DT_COMPLEX64,DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_COMPLEX64,DT_COMPLEX128})) + .OP_END_FACTORY_REG(FFT) + +/** +*@brief Calculate the inverse 1-dimensional discrete Fourier transform on the +innermost dimension of the input. \n + +*@par Inputs: +*@li x: A Tensor. Must be the following types: complex64, complex128. \n + +*@par Outputs: +*@li y: A complex tensor with the same shape as input. The innermost dimension +of the input is replaced by its inverse two-dimensional Fourier transform. \n + +*@par Third-party framework compatibility +* Compatible with TensorFlow IFFT2D operator. +*/ +REG_OP(IFFT2D) + .INPUT(x, TensorType({DT_COMPLEX64,DT_COMPLEX128})) + .OUTPUT(y, TensorType({DT_COMPLEX64,DT_COMPLEX128})) + .OP_END_FACTORY_REG(IFFT2D) + } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_SPECTRAL_OPS_H_ \ No newline at end of file diff --git a/third_party/fwkacllib/inc/ops/split_combination_ops.h b/third_party/fwkacllib/inc/ops/split_combination_ops.h index efe4715d..fe25a46f 100644 --- a/third_party/fwkacllib/inc/ops/split_combination_ops.h +++ b/third_party/fwkacllib/inc/ops/split_combination_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -62,8 +62,8 @@ REG_OP(Split) *Must be one of the following types: float16, float32, int32, int8, int16, int64, uint8, uint16, uint32, uint64 *@par Attributes: -*@li split_dim: A required int8, int16, int32, or int64. Specifies the dimension along which to split. No default value. -*@li num_split: A required int8, int16, int32, or int64. Specifies the number of output tensors. No default value . \n +*@li split_dim: A required int32. Specifies the dimension along which to split. No default value. +*@li num_split: A required int32. Specifies the number of output tensors. No default value . \n *@par Outputs: *y:Dynamic output. A list of output tensors. Has the same type and format as "x" . \n @@ -94,12 +94,12 @@ REG_OP(SplitD) *@par Inputs: * Three inputs, including: *@li x: An ND Tensor. -*Must be one of the following types: -*@li size_splits: A list of int8, int16, int32, or int64. Specifies a list containing the sizes of each output tensor along the split dimension. -*@li split_dim: An int8, int16, int32, or int64. Specifies the dimension along which to split . \n +*Must be one of the types:float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8, int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32. +*@li size_splits: Must be one of the types:int32, int64. Specifies a list containing the sizes of each output tensor along the split dimension. +*@li split_dim: Must be the following type:int32. Specifies the dimension along which to split . \n *@par Attributes: -*num_split: A required int8, int16, int32, or int64. Specifies the number of output tensors. No default value . \n +*num_split: A required int32. Specifies the number of output tensors. No default value . \n *@par Outputs: *y: Dynamic output.A list of output tensors. Has the same type and format as "x" . \n @@ -129,9 +129,9 @@ REG_OP(SplitV) *Must be one of the following types: float16, float32, int32, int8, int16, int64, uint8, uint16, uint32, uint64 *@par Attributes: -*@li size_splits: A required list of int8, int16, int32, or int64. Specifies a list containing the sizes of each output tensor along the split dimension. -*@li split_dim: A required int8, int16, int32, or int64. Specifies the dimension along which to split. No default value. -*@li num_split: A required int8, int16, int32, or int64. Specifies the number of output tensors. No default value . \n +*@li size_splits: A required list of int32. Specifies a list containing the sizes of each output tensor along the split dimension. +*@li split_dim: A required int32. Specifies the dimension along which to split. No default value. +*@li num_split: A required int32. Specifies the number of output tensors. No default value . \n *@par Outputs: *y: Dynamic output.A list of output tensors. Has the same type and format as "x" . \n @@ -317,15 +317,15 @@ REG_OP(Concat) * int64, uint8, uint16, uint32, uint64, float16, float32, bool . It's a dynamic input. \n *@par Attributes: -*@li axis: A optional int, defaultvalue is 0. +*@li axis: A optional int, default value is 0. * Dimension along which to pack. The range is [-(R+1), R+1). *@li N: A required int. Number of tensors . \n *@par Outputs: *y: A Tensor. Has the same type as "x". + *@par Third-party framework compatibility -*Compatible with the TensorFlow operator Pack. -It's a dynamic output. +* Compatible with the TensorFlow operator Pack. */ REG_OP(Pack) .DYNAMIC_INPUT(x, TensorType::BasicType()) diff --git a/third_party/fwkacllib/inc/ops/state_ops.h b/third_party/fwkacllib/inc/ops/state_ops.h index db1f5353..3c8e32b6 100644 --- a/third_party/fwkacllib/inc/ops/state_ops.h +++ b/third_party/fwkacllib/inc/ops/state_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/stateful_random_ops.h b/third_party/fwkacllib/inc/ops/stateful_random_ops.h index 366112d6..c2f65c6a 100644 --- a/third_party/fwkacllib/inc/ops/stateful_random_ops.h +++ b/third_party/fwkacllib/inc/ops/stateful_random_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/stateless_random_ops.h b/third_party/fwkacllib/inc/ops/stateless_random_ops.h index dad3c379..ff9daaa3 100644 --- a/third_party/fwkacllib/inc/ops/stateless_random_ops.h +++ b/third_party/fwkacllib/inc/ops/stateless_random_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/string_ops.h b/third_party/fwkacllib/inc/ops/string_ops.h index 4a88bc79..f9cc2549 100644 --- a/third_party/fwkacllib/inc/ops/string_ops.h +++ b/third_party/fwkacllib/inc/ops/string_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,6 +25,235 @@ #include "graph/operator_reg.h" namespace ge { +/** +*@brief Creates ngrams from ragged string data . \n + +*@par Inputs: +include: +*@li data:1-D.The values tensor of the ragged string tensor to make ngrams out of. +*@li data_splits:The splits tensor of the ragged string tensor to make ngrams out of . \n + +*@par Attributes: +* separator:The string to append between elements of the token. Use "" for no separator. +* ngram_widths:The sizes of the ngrams to create. +* left_pad:The string to use to pad the left side of the ngram sequence. Only used if pad_width != 0. +* right_pad:The string to use to pad the right side of the ngram sequence. Only used if pad_width != 0. +* pad_width:The number of padding elements to add to each side of each sequence. +* preserve_short_sequences: Preserve short sequences. \n + +*@par Outputs: +*@li ngrams:The values tensor of the output ngrams ragged tensor. +*@li ngrams_splits:The splits tensor of the output ngrams ragged tensor. \n + +*@see StringNGrams() + +*@par Third-party framework compatibility +*compatible with StringNGrams op of tensorflow + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(StringNGrams) + .INPUT(data, TensorType({DT_STRING})) + .INPUT(data_splits, TensorType({DT_INT32, DT_INT64})) + .OUTPUT(ngrams, TensorType({DT_STRING})) + .OUTPUT(ngrams_splits, TensorType({DT_INT32, DT_INT64})) + .REQUIRED_ATTR(separator, String) + .ATTR(ngram_widths, ListInt, {}) + .REQUIRED_ATTR(left_pad, String) + .REQUIRED_ATTR(right_pad, String) + .REQUIRED_ATTR(pad_width, Int) + .REQUIRED_ATTR(preserve_short_sequences, Bool) + .OP_END_FACTORY_REG(StringNGrams) + +/** +*@brief Decodes each string in `input` into a sequence of Unicode code points . \n + +*@par Inputs: +include: +*@li input:The text to be decoded. Can have any shape. Note that the output is flattened +to a vector of char values. \n + +*@par Attributes: +* input_encoding:Text encoding of the input strings. This is any of the encodings supported +by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. +* errors:Error handling policy when there is invalid formatting found in the input. +The value of 'strict' will cause the operation to produce a InvalidArgument +error on any invalid input formatting. A value of 'replace' (the default) will +cause the operation to replace any invalid formatting in the input with the +`replacement_char` codepoint. A value of 'ignore' will cause the operation to +skip any invalid formatting in the input and produce no corresponding output +character. +* replacement_char:The replacement character codepoint to be used in place of any invalid +formatting in the input when `errors='replace'`. Any valid unicode codepoint may +be used. The default value is the default unicode replacement character is +0xFFFD or U+65533. +* replace_control_characters:Whether to replace the C0 control characters (00-1F) with the +`replacement_char`. Default is false. \n + +*@par Outputs: +*@li row_splits:A 1D tensor containing the row splits. +*@li char_values:A 1D tensor containing the decoded codepoints. +*@li char_to_byte_starts:A 1D int32 Tensor containing the byte index in the input string where each +character in `char_values` starts. \n + +*@see UnicodeDecodeWithOffsets() + +*@par Third-party framework compatibility +*compatible with UnicodeDecodeWithOffsets op of tensorflow + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(UnicodeDecodeWithOffsets) + .INPUT(input, TensorType({DT_STRING})) + .OUTPUT(row_splits, TensorType({DT_INT64})) + .OUTPUT(char_values, TensorType({DT_INT32})) + .OUTPUT(char_to_byte_starts, TensorType({DT_INT64})) + .REQUIRED_ATTR(input_encoding, String) + .ATTR(errors, String, "replace") + .ATTR(replacement_char, Int, 65533) + .ATTR(replace_control_characters, Bool, false) + .ATTR(Tsplits, Type, DT_INT64) + .OP_END_FACTORY_REG(UnicodeDecodeWithOffsets) + +/** +*@brief Decodes each string in `input` into a sequence of Unicode code points. \n + +*@par Inputs: +include: +*@li input:The text to be decoded. Can have any shape. Note that the output is flattened +to a vector of char values. \n + +*@par Attributes: +* input_encoding:Text encoding of the input strings. This is any of the encodings supported +by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. +* errors:Error handling policy when there is invalid formatting found in the input. +The value of 'strict' will cause the operation to produce a InvalidArgument +error on any invalid input formatting. A value of 'replace' (the default) will +cause the operation to replace any invalid formatting in the input with the +`replacement_char` codepoint. A value of 'ignore' will cause the operation to +skip any invalid formatting in the input and produce no corresponding output +character. +* replacement_char:The replacement character codepoint to be used in place of any invalid +formatting in the input when `errors='replace'`. Any valid unicode codepoint may +be used. The default value is the default unicode replacement character is +0xFFFD or U+65533. +* replace_control_characters:Whether to replace the C0 control characters (00-1F) with the +`replacement_char`. Default is false. \n + +*@par Outputs: +*@li row_splits:A 1D tensor containing the row splits. +*@li char_values:A 1D tensor containing the decoded codepoints. \n + +*@see UnicodeDecode() + +*@par Third-party framework compatibility +*compatible with UnicodeDecode op of tensorflow + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(UnicodeDecode) + .INPUT(input, TensorType({DT_STRING})) + .OUTPUT(row_splits, TensorType({DT_INT64})) + .OUTPUT(char_values, TensorType({DT_INT32})) + .REQUIRED_ATTR(input_encoding, String) + .ATTR(errors, String, "replace") + .ATTR(replacement_char, Int, 65533) + .ATTR(replace_control_characters, Bool, false) + .ATTR(Tsplits, Type, DT_INT64) + .OP_END_FACTORY_REG(UnicodeDecode) + +/** +*@brief Transcode the input text from a source encoding to a destination encoding. \n + +*@par Inputs: +include: +*@li input:The text to be processed. Can have any shape. \n + +*@par Attributes: +* input_encoding:Text encoding of the input strings. This is any of the encodings supported +by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. +* output_encoding:The unicode encoding to use in the output. Must be one of `"UTF-8", "UTF-16-BE", "UTF-32-BE"`. +Multi-byte encodings will be big-endian. +* errors:Error handling policy when there is invalid formatting found in the input. +The value of 'strict' will cause the operation to produce a InvalidArgument +error on any invalid input formatting. A value of 'replace' (the default) will +cause the operation to replace any invalid formatting in the input with the +`replacement_char` codepoint. A value of 'ignore' will cause the operation to +skip any invalid formatting in the input and produce no corresponding output +character. +* replacement_char:The replacement character codepoint to be used in place of any invalid +formatting in the input when `errors='replace'`. Any valid unicode codepoint may +be used. The default value is the default unicode replacement character is +0xFFFD or U+65533. +* replace_control_characters:Whether to replace the C0 control characters (00-1F) with the +`replacement_char`. Default is false. \n + +*@par Outputs: +*@li output:A string tensor containing unicode text encoded using `output_encoding`. \n + +*@see UnicodeTranscode() + +*@par Third-party framework compatibility +*compatible with UnicodeTranscode op of tensorflow + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(UnicodeTranscode) + .INPUT(input, TensorType({DT_STRING})) + .OUTPUT(output, TensorType({DT_STRING})) + .REQUIRED_ATTR(input_encoding, String) + .ATTR(output_encoding, String, "UTF-8") + .ATTR(errors, String, "replace") + .ATTR(replacement_char, Int, 65533) + .ATTR(replace_control_characters, Bool, false) + .OP_END_FACTORY_REG(UnicodeTranscode) + +/** +*@brief Encode a tensor of ints into unicode strings. \n + +*@par Inputs: +include: +*@li input_values:A 1D tensor containing the unicode codepoints that should be encoded. +*@li input_splits:A 1D tensor specifying how the unicode codepoints should be split into strings. \n + +*@par Attributes: +* output_encoding:The unicode encoding to use in the output. Must be one of `"UTF-8", "UTF-16-BE", "UTF-32-BE"`. +Multi-byte encodings will be big-endian. +* errors:Error handling policy when there is invalid formatting found in the input. +The value of 'strict' will cause the operation to produce a InvalidArgument +error on any invalid input formatting. A value of 'replace' (the default) will +cause the operation to replace any invalid formatting in the input with the +`replacement_char` codepoint. A value of 'ignore' will cause the operation to +skip any invalid formatting in the input and produce no corresponding output +character. +* replacement_char:The replacement character codepoint to be used in place of any invalid +formatting in the input when `errors='replace'`. Any valid unicode codepoint may +be used. The default value is the default unicode replacement character is +0xFFFD or U+65533. \n + +*@par Outputs: +*@li output:The 1-D Tensor of strings encoded from the provided unicode codepoints. \n + +*@see UnicodeEncode() + +*@par Third-party framework compatibility +*compatible with UnicodeEncode op of tensorflow + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(UnicodeEncode) + .INPUT(input_values, TensorType({DT_INT32})) + .INPUT(input_splits, TensorType({DT_INT32, DT_INT64})) + .OUTPUT(output, TensorType({DT_STRING})) + .ATTR(errors, String, "replace") + .ATTR(output_encoding, String, "UTF-8") + .ATTR(replacement_char, Int, 65533) + .OP_END_FACTORY_REG(UnicodeEncode) /** *@brief Split elements of input based on delimiter into a SparseTensor . \n @@ -61,6 +290,116 @@ REG_OP(StringSplit) .ATTR(skip_empty, Bool, true) .OP_END_FACTORY_REG(StringSplit) +/** +*@brief Replaces the match of pattern in input with rewrite. \n + +*@par Inputs: +include: +*@li input:A Tensor of type string. The text to be processed. \n + +*@par Attributes: +*@li pattern:A string. The regular expression to match the input. +*@li rewrite:A string. The rewrite to be applied to the matched expression. +*@li replace_global:An optional bool. Defaults to True. If True, the replacement is global, +otherwise the replacement is done only on the first match. + +*@par output: +*@li output::A Tensor of type string. +*/ +REG_OP(StaticRegexReplace) + .INPUT(input, TensorType({DT_STRING})) + .OUTPUT(output, TensorType({DT_STRING})) + .ATTR(pattern, String, "") + .ATTR(rewrite, String, "") + .ATTR(replace_global, Bool, true) + .OP_END_FACTORY_REG(StaticRegexReplace) + +/** +*@brief The input is a string tensor of any shape. The pattern is the +*regular expression to be matched with every element of the input tensor. +*The boolean values (True or False) of the output tensor indicate +*if the input matches the regex pattern provided. + +*@par Inputs: +include: +*@li input:A Tensor of type string. The text to be processed. \n + +*@par Attributes: +*@li pattern:A string. The regular expression to match the input. + +*@par output: +*@li output::A bool tensor with the same shape as `input`. +*/ +REG_OP(StaticRegexFullMatch) + .INPUT(input, TensorType({DT_STRING})) + .OUTPUT(output, TensorType({DT_BOOL})) + .ATTR(pattern, String, "") + .OP_END_FACTORY_REG(StaticRegexFullMatch) + +/** +*@brief A Tensor of type string. The input to be joined. \n + +*@par Inputs: +include: +*@li input:A Tensor of type string. The text to be processed. +*@li segment_ids:A Tensor. Must be one of the following types: int32, int64. +*A tensor whose shape is a prefix of data.shape. Negative segment ids are not supported. +*@li num_segments:A Tensor. Must be one of the following types: int32, int64. A scalar. + +*@par Attributes: +*@li separator:An optional string. Defaults to "". The separator to use when joining. + +*@par output: +*@li output::A Tensor of type string.. +*/ +REG_OP(UnsortedSegmentJoin) + .INPUT(input, TensorType({DT_STRING})) + .INPUT(segment_ids, TensorType({DT_INT32,DT_INT64})) + .INPUT(num_segments, TensorType({DT_INT32,DT_INT64})) + .OUTPUT(output, TensorType({DT_STRING})) + .ATTR(separator, String, "") + .OP_END_FACTORY_REG(UnsortedSegmentJoin) + +/** +*@brief Inputs to TensorFlow operations are outputs of another TensorFlow operation. +*This method is used to obtain a symbolic handle that represents the computation of the input. + +*@par Inputs: +include: +*@li input:A Tensor of type string. The text to be processed. + +*@par Attributes: +*@li encoding:An optional string. Defaults to "". + +*@par output: +*@li output::A Tensor of type string.. +*/ +REG_OP(StringLower) + .INPUT(input, TensorType({DT_STRING})) + .OUTPUT(output, TensorType({DT_STRING})) + .ATTR(encoding, String, "") + .OP_END_FACTORY_REG(StringLower) + +/** +*@brief Inputs to TensorFlow operations are outputs of another TensorFlow operation. +*This method is used to obtain a symbolic handle that represents the computation of the input. + +*@par Inputs: +include: +*@li input:A Tensor of type string. The text to be processed. + +*@par Attributes: +*@li encoding:An optional string. Defaults to "". + +*@par output: +*@li output::A Tensor of type string.. +*/ +REG_OP(StringUpper) + .INPUT(input, TensorType({DT_STRING})) + .OUTPUT(output, TensorType({DT_STRING})) + .ATTR(encoding, String, "") + .OP_END_FACTORY_REG(StringUpper) + /** *@brief Split elements of source based on sep into a SparseTensor . \n @@ -488,7 +827,7 @@ include: */ REG_OP(AsString) .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_FLOAT, \ - DT_DOUBLE, DT_BOOL})) + DT_DOUBLE, DT_BOOL, DT_COMPLEX64, DT_COMPLEX128})) .OUTPUT(y, TensorType({DT_STRING})) .ATTR(precision, Int, -1) .ATTR(scientific, Bool, false) @@ -557,6 +896,45 @@ REG_OP(DecodeBase64) .INPUT(x, TensorType({DT_STRING})) .OUTPUT(y, TensorType({DT_STRING})) .OP_END_FACTORY_REG(DecodeBase64) + +/** +*@brief StringNormalization performs string operations for basic cleaning . \n + +*@par Inputs: +*@li input: only accepts [C] or [1, C] UTF-8 strings tensor . \n + +*@par Outputs: +*@li output: UTF-8 strings tensor after cleaning . \n + +*@par Attributes: +*@li stopwords : list of strings (default is empty). +*List of stop words. If not set, no word would be removed from input strings +tensor. + +*@li is_case_sensitive : bool (default is false). +*Boolean. Whether the identification of stop words in input strings tensor is +case-sensitive. Default is false. + +*@li case_change_action : string (default is "NONE"). +*string enum that cases output to be lowercased/uppercases/unchanged. Valid +values are "LOWER", "UPPER", "NONE". Default is "NONE". + +*@li local : string (default is "en_US"). +*Environment dependent string that denotes the locale according to which output +strings needs to be upper/lowercased.Default en_US or platform specific equivalent +as decided by the implementation . \n + +*@attention Constraints: +*@li input can be either a 1-D or 2-D tensor, the shape of 2-D tensor must be [1, C]. +*/ +REG_OP(StringNormalizer) + .INPUT(input, TensorType({DT_STRING})) + .OUTPUT(output, TensorType({DT_STRING})) + .ATTR(stopwords, ListString, {}) + .ATTR(is_case_sensitive, Bool, false) + .ATTR(case_change_action, String, "NONE") + .ATTR(local, String, "en_US") + .OP_END_FACTORY_REG(StringNormalizer) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_STRING_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/swap_co_ops.h b/third_party/fwkacllib/inc/ops/swap_co_ops.h index a1bf4f8b..6e8eaac3 100644 --- a/third_party/fwkacllib/inc/ops/swap_co_ops.h +++ b/third_party/fwkacllib/inc/ops/swap_co_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/target_crop_and_resize.h b/third_party/fwkacllib/inc/ops/target_crop_and_resize.h index 9c61f2c9..9bef1d7b 100644 --- a/third_party/fwkacllib/inc/ops/target_crop_and_resize.h +++ b/third_party/fwkacllib/inc/ops/target_crop_and_resize.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/ops/transformation_ops.h b/third_party/fwkacllib/inc/ops/transformation_ops.h index 64e18fc7..4a46e35f 100644 --- a/third_party/fwkacllib/inc/ops/transformation_ops.h +++ b/third_party/fwkacllib/inc/ops/transformation_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -130,28 +130,27 @@ REG_OP(Transpose) .OP_END_FACTORY_REG(Transpose) /** -*@brief Doing format_transfer for various data format only -support "NHWC/NCHW" to "NC1HWC0" and "NC1HWC0" to "NHWC/NCHW" -"NCHW" to "FRACTAL_Zn" or "FRACTAL_Zn" to "NCHW". -"HWCN" to "FRACTAL_Zn" or "FRACTAL_Zn" to "HWCN" . \n +*@brief Do format transfer for various data format. +* In general, the framework will insert it atomatically . \n *@par Inputs: -*src: A Tensor dtype of all types . \n +*src: A Tensor. For all branches can be types: float16, float32, int32, int8, bool. +* For branches without padding also can be types: int16, int64, uint8, uint16, uint32, uint64 . \n *@par Attributes: -*@li src_format: A string source data format, can be "NHWC", "NCHW", "FRACTAL_Zn" etc. -*@li dst_format: A string target data format, can be "NC1HWC0", "NCHW", "FRACTAL_Zn" etc. -*@li group: A required int32, default value is 1. \n +*@li src_format: A string source data format, can be "NHWC", "NCHW", "FRACTAL_Z" etc. +*@li dst_format: A string target data format, can be "NC1HWC0", "NCHW", "FRACTAL_Z" etc. +*@li group: A optional int32, default value is 1. \n *@par Outputs: -*dst: A Tensor dtype of all types. +*dst: A Tensor. Has the same type as "src". */ REG_OP(TransData) .INPUT(src, TensorType::BasicType()) .OUTPUT(dst, TensorType::BasicType()) .REQUIRED_ATTR(src_format, String) .REQUIRED_ATTR(dst_format, String) - .ATTR(group, Int, 1) + .ATTR(groups, Int, 1) .OP_END_FACTORY_REG(TransData) /** @@ -174,21 +173,27 @@ REG_OP(Permute) .OP_END_FACTORY_REG(Permute) /** -*@brief Flattens the inputs. Reserves axis 0 and flattens the input tensors -* along axis 1 . \n +*@brief Flattens the inputs tensor into a 2D matrix. If input tensor has shape (d_0, d_1,..., d_n), +* then the output will have shape (d_0 X d_1 ... d_(axis-1), d_axis X d_(axis + 1)...X d_n)\n *@par Inputs: -*One input: -*x: A multi-dimensional Tensor. Must be one of the following types: -* int8, uint8, int16, uint16, int32, uint32, int64,uint64, float16, float32 . \n +* One input: +* x: A multi-dimensional Tensor. Must be one of the following types: +* int8, uint8, int16, uint16, int32, uint32, int64,uint64, float16, float32. *@par Outputs: -*y: A 2D flattened Tensor (Reserves axis 0 and flattens the input tensors -* along axis 1). Must be one of the following data types: int8, uint8, int16, -* uint16, int32, uint32, int64,uint64, float16, float32 . \n +* y: A 2D flattened Tensor with the contents of the input tensor, with input dimensions up to axis flattened +* to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output. +* Must be one of the following data types: int8, uint8, int16, uint16, int32, uint32, int64,uint64, float16, float32 . + +*@par Attributes: +* axis: A optional int32, default value is 1. Indicate up to which input dimensions (exclusive) should be flattened +* to the outer dimension of the output. The value for axis must be in the range [-r, r], where r is the rank of +* the input tensor. Negative value means counting dimensions from the back. When axis = 0, the shape of +* the output tensor is (1, (d_0 X d_1 ... d_n), where the shape of the input tensor is (d_0, d_1, ... d_n). *@par Third-party framework compatibility -* Compatible with TensorFlow operator Flatten. +* Compatible with TensorFlow / ONNX operator Flatten. */ REG_OP(Flatten) .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, @@ -197,6 +202,7 @@ REG_OP(Flatten) .OUTPUT(y, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT, DT_FLOAT16})) + .ATTR(axis, Int, 1) .OP_END_FACTORY_REG(Flatten) /** @@ -357,7 +363,7 @@ REG_OP(DepthToSpace) *@brief Permutes data into spatial data blocks and then prunes them . \n *@par Inputs: -*@li x: A 4D Tensor with format NHWC. +*@li x: A 4D Tensor with format. Must set the format, supported format list ["NCHW, NHWC"] *@li crops: A 1D list or tuple of int32 or int64 . \n *Must be one of the following types: float16, float32 @@ -418,12 +424,8 @@ REG_OP(BatchToSpace) * Warning: THIS FUNCTION IS DEPRECATED. Please use BatchToSpace instead. */ REG_OP(BatchToSpaceD) - .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, - DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, DT_INT16, DT_COMPLEX64, - DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32})) - .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, - DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, DT_INT16, DT_COMPLEX64, - DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32})) + .INPUT(x, TensorType::BasicType()) + .OUTPUT(y, TensorType::BasicType()) .REQUIRED_ATTR(block_size, Int) .REQUIRED_ATTR(crops, ListInt) .OP_END_FACTORY_REG(BatchToSpaceD) @@ -434,9 +436,10 @@ REG_OP(BatchToSpaceD) *@par Inputs: * Two inputs, including: -*@li x: An NHWC Tensor. Must be one of the following types: +*@li x: An 4D Tensor. Must be one of the following types: * float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8, * int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32. +* Must set the format, supported format list ["NCHW, NHWC"] *@li paddings: A 2D tensor of type int, specifying the input . \n *@par Attributes: @@ -518,7 +521,8 @@ REG_OP(Unpack) * @par Inputs: * x: A 4D Tensor with shape [batch, in_rows, in_cols, depth], Must be one of the * following types:float32, double, int32, uint8, int16, int8, int64, uint16, -* float16, uint32, uint64 +* float16, uint32, uint64. The inputs must have data_format with one of follows: +* NHWC, NCHW. * @par Attributes: * @li ksizes: A required list or tuple. The size of the sliding window for each @@ -533,7 +537,6 @@ REG_OP(Unpack) * This is equivalent to rate in dilated (a.k.a. Atrous) convolutions. * @li padding: A required string. The type of padding algorithm to use, support "SAME" or "VALID". \n -* @li data_format: A required string. The format of input, only supported NHWC. \n * @par Outputs: * y: A 4D Tensor with shape [batch, out_rows, out_cols, ksize_rows * @@ -554,7 +557,6 @@ REG_OP(ExtractImagePatches) .REQUIRED_ATTR(strides, ListInt) .REQUIRED_ATTR(rates, ListInt) .REQUIRED_ATTR(padding, String) - .ATTR(data_format, String, "NHWC") .OP_END_FACTORY_REG(ExtractImagePatches) /** @@ -563,6 +565,7 @@ REG_OP(ExtractImagePatches) * @par Inputs: * x: A 5D Tensor with shape [batch, in_planes, in_rows, in_cols, depth] . \n +* The inputs must have data_format with one of follows: NDHWC, NCDHW. \n * @par Attributes: * @li ksizes: A required list or tuple. The size of the sliding window for each @@ -571,7 +574,6 @@ REG_OP(ExtractImagePatches) * patches are in "x". Must be: [1, stride_planes, stride_rows, stride_cols, 1]. * @li padding: A required string. The type of padding algorithm to use , * support "SAME" or "VALID" . \n -* @li data_format: An optional string. The format of input, only supported NDHWC. \n * @par Outputs: * Output: A 5D Tensor with shape [batch, out_planes, out_rows, out_cols, ksize_planes * @@ -590,7 +592,6 @@ REG_OP(ExtractVolumePatches) .REQUIRED_ATTR(ksizes, ListInt) .REQUIRED_ATTR(strides, ListInt) .REQUIRED_ATTR(padding, String) - .ATTR(data_format, String, "NDHWC") .OP_END_FACTORY_REG(ExtractVolumePatches) /** @@ -717,6 +718,210 @@ REG_OP(CompressFcOp) .OUTPUT(compress_index, TensorType({DT_INT8})) .REQUIRED_ATTR(compress_parameters, ListInt) .OP_END_FACTORY_REG(CompressFcOp) + +/** +*@brief Performs Col2im for each batch entry. \n + +*@par Inputs: +*@li input_x: The Col Tensor. 5-D, shape: `(n, c1, kernel_h*kernel_w, ho*wo, c0)`. +where ho/wo is do = (output_d + 2*padding_d - dilation_d*(kernel_d - 1) - 1)//stride_d + 1 \n + +*@par Outputs: +*@li output_y: The img Tensor. 5-D, shape: `(n, c1, output_h, output_w, c0)`. \n + +*@par Attributes: +*@li kernel_shape: ListInt, value: `(kernel_h, kernel_w)`, the shape of kernel in convolution. +*@li dilation: ListInt, value: `(dilation_h, dilation_w)`, the dilation in convolution. +*@li padding: ListInt, value: `(padding_h, padding_w)`, the dilation in convolution. +*@li stride: ListInt, value: `(stride_h, stride_w)`, the dilation in convolution. \n + +*@par Third-party framework compatibility +* Compatible with Pytorch col2im/im2col_backward operator. +*/ +REG_OP(Col2im) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) + .INPUT(output_size, TensorType({DT_INT32, DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) + .REQUIRED_ATTR(kernel_size, ListInt) + .REQUIRED_ATTR(dilation, ListInt) + .REQUIRED_ATTR(padding, ListInt) + .REQUIRED_ATTR(stride, ListInt) + .OP_END_FACTORY_REG(Col2im) + +/** +* @brief Performs Im2col for each batch entry. \n + +* @par Inputs: +* x: A 4D Tensor with shape [batch, in_rows, in_cols, depth], Must be one of the +* following types:float32, int8, float16. The inputs must have data_format with +* one of follows:NHWC, NCHW. + +* @par Attributes: +* @li ksizes: A required list or tuple. The size of the sliding window for each +* dimension of images. +* @li strides: A optional list or tuple. How far the centers of two consecutive +* patches are in the images. Defaults to "{1}". +* @li dilations: A optional list or tuple. Defaults to "{1}". +* This is the input stride, specifying how far two consecutive patch +* samples are in the input. Equivalent to extracting patches +* with patch_sizes_eff = patch_sizes + (patch_sizes - 1) * +* (dilations - 1), followed by subsampling them spatially by a factor of dilations. +* This is equivalent to rate in dilated (a.k.a. Atrous) convolutions. +* @li padding_mode: A optional String. The type of padding algorithm to use, +* support "SAME", "VALID", "CALCULATED". Among the three modes, only the "CALCULATED" +* means to use the pads below. Defaults to "CALCULATED". +* @li pads: A optional list or tuple. The pad distance. Defaults to "{0}". \n + +* @par Outputs: +* y: A 4D Tensor with shape [batch, out_rows, out_cols, ksize_rows * +* ksize_cols * depth] containing image patches with size ksize_rows x ksize_cols +* x depth vectorized in the "depth" dimension. Note "out_rows" and "out_cols" +* are the dimensions of the output patches . \n + +* @attention Constraints: +* "ksizes", "strides", "dilations" and "pads" are lists of integers . \n + +* @par Third-party framework compatibility +* Compatible with Pytorch Im2col operator. +*/ +REG_OP(Im2col) + .INPUT(x, TensorType::RealNumberType()) + .OUTPUT(y, TensorType::RealNumberType()) + .REQUIRED_ATTR(ksizes, ListInt) + .ATTR(strides, ListInt, {1}) + .ATTR(dilations, ListInt, {1}) + .ATTR(padding_mode, String, "CALCULATED") + .ATTR(pads, ListInt, {0}) + .OP_END_FACTORY_REG(Im2col) + +/** +*@brief Generates a 2D or 3D flow field (sampling grid), given a batch of affine +matrices theta. \n + +*@par Inputs: +*Input theta must be float16 or float, output_size must be int32 type.Inputs +include: +*@li theta: input batch of affine matrices with shape (N,2,3) for 2D or (N,3,4) +for 3D +*@li output_size: the target output image size. (N×C×H×W for 2D or N×C×D×H×W for +3D) Example: torch.Size((32, 3, 24, 24)) . \n + + +*@par Attributes: +*align_corners: if True, consider -1 and 1 to refer to the centers of the corner +pixels rather than the image corners.Refer to grid_sample() for a more complete +description. A grid generated by affine_grid() should be passed to grid_sample() +with the same setting for this option. Default: False \n + +*@par Outputs: +*@li y: A 2-D integer tensor of shape [M] representing the +selected indices from the boxes tensor, where M <= max_output_size. \n + +*@attention Constraints: +*Input theta must be float16 or float, output_size must be int32 type . \n + +*@par Third-party framework compatibility +*Compatible with Pytorch affine_grid operator. +*/ + +REG_OP(AffineGrid) + .INPUT(theta, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(output_size, TensorType({DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(align_corners, Bool, false) + .OP_END_FACTORY_REG(AffineGrid) + +/** +*@brief Make memory of a view be contiguous. \n + +*@par Inputs: +*Four inputs, including: +*@li x: The input tensor. +*@li size: The shape of output tensor. +*@li stride: The stride of output tensor. +*@li storage_offset: The offset in the underlying storage of the output tensor. \n + +*@par Outputs: +*y: A Tensor. Has the same type as "x" . \n + +*@par Third-party framework compatibility +*Compatible with the pytorch operator as_strided. +*/ +REG_OP(AsStrided) + .INPUT(x, TensorType::BasicType()) + .INPUT(size, TensorType::IndexNumberType()) + .INPUT(stride, TensorType::IndexNumberType()) + .INPUT(storage_offset, TensorType::IndexNumberType()) + .OUTPUT(y, TensorType::BasicType()) + .OP_END_FACTORY_REG(AsStrided) + +/** +*@brief This transform extracts n-grams from the input sequence and save them as a +vector. \n + +*@par Inputs: +*@li input: can be either a 1-D or 2-D tensor for n-gram extraction, It is ether string UTF-8 or int32/int64 . \n + +*@par Attributes: +*@li max_gram_length : int (required) +*Maximum n-gram length. If this value is 3, 3-grams will be used to generate the output . +*@li max_skip_count : int (required) +*Maximum number of items (integers/strings) to be skipped when constructing an n-gram from X. +If max_skip_count=1, min_gram_length=2, max_gram_length=3, this operator may generate 2-grams +with skip_count=0 and skip_count=1, and 3-grams with skip_count=0 and skip_count=1. +*@li min_gram_length : int (required) +*Minimum n-gram length. If this value is 2 and max_gram_length is 3, output may contain counts of +2-grams and 3-grams. +*@li mode : string (required) +*The weighting criteria. It can be one of "TF" (term frequency), "IDF" (inverse document frequency), +and "TFIDF" (the combination of TF and IDF). +*@li ngram_counts : list of ints (required) +*The starting indexes of 1-grams, 2-grams, and so on in pool. It is useful when determining the boundary +between two consecutive collections of n-grams. For example, if ngram_counts is [0, 17, 36], +the first index (zero-based) of 1-gram/2-gram/3-gram in pool are 0/17/36. This format is essentially identical +to CSR (or CSC) sparse matrix format, and we choose to use this due to its popularity. +*@li ngram_indexes : list of ints (required) +*list of int64s (type: AttributeProto::INTS). This list is parallel to the specified 'pool_*' attribute. The i-th element +in ngram_indexes indicate the coordinate of the i-th n-gram in the output tensor. +*@li pool_int64s : list of ints +*List of int64 n-grams learned from the training set. Either this or pool_strings attributes must be present but not both. +It's an 1-D tensor starting with the collections of all 1-grams and ending with the collections of n-grams. The i-th element +in pool stores the n-gram that should be mapped to coordinate ngram_indexes[i] in the output vector. +*@li pool_strings : list of strings +*List of strings n-grams learned from the training set. Either this or pool_int64s attributes must be present but not both. +It's an 1-D tensor starting with the collections of all 1-grams and ending with the collections of n-grams. The i-th element +in pool stores the n-gram that should be mapped to coordinate ngram_indexes[i] in the output vector. +*@li weights : list of floats +*list of floats. This attribute stores the weight of each n-gram in pool. The i-th element in weights is the weight of +the i-th n-gram in pool. Its length equals to the size of ngram_indexes. By default, weights is an all-one tensor.This attribute +is used when mode is "IDF" or "TFIDF" to scale the associated word counts. \n + +*@par Outputs: +*@li output: tensor(float) +*For 1-D input, output is the n-gram representation of that input. For 2-D input, the output is also a 2-D tensor +whose i-th row is the n-gram representation of the i-th input row. More specifically, if input shape is [C], the corresponding +output shape would be [max(ngram_indexes) + 1]. If input shape is [N, C], this operator produces a [N, max(ngram_indexes) + 1]-tensor. \n + +*@attention Constraints: +*@li input can be either a 1-D or 2-D tensor, shape is [C] or [N, C]. +*@li max(ngram_indexes) + 1 == len(weights), len(y) == len(weights). +*@li ngram_counts and pool(pool_int64s or pool_strings) must match. +*@li either pool_strings or pool_int64s attributes must be present but not both. +*/ + +REG_OP(TfidVectorizer) + .INPUT(input, TensorType({DT_INT32, DT_INT64, DT_STRING})) + .OUTPUT(output, TensorType({DT_FLOAT})) + .REQUIRED_ATTR(max_gram_length, Int) + .REQUIRED_ATTR(max_skip_count, Int) + .REQUIRED_ATTR(min_gram_length, Int) + .REQUIRED_ATTR(mode, String) + .REQUIRED_ATTR(ngram_counts, ListInt) + .REQUIRED_ATTR(ngram_indexes, ListInt) + .ATTR(pool_int64s, ListInt, {}) + .ATTR(pool_strings, ListString, {}) + .ATTR(weights, ListFloat, {}) + .OP_END_FACTORY_REG(TfidVectorizer) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_TRANSFORMATION_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/warp_perspective_ops.h b/third_party/fwkacllib/inc/ops/warp_perspective_ops.h index e19cbd7c..8ef69d8b 100644 --- a/third_party/fwkacllib/inc/ops/warp_perspective_ops.h +++ b/third_party/fwkacllib/inc/ops/warp_perspective_ops.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/third_party/fwkacllib/inc/runtime/event.h b/third_party/fwkacllib/inc/runtime/event.h index 57948c47..01f63705 100644 --- a/third_party/fwkacllib/inc/runtime/event.h +++ b/third_party/fwkacllib/inc/runtime/event.h @@ -41,6 +41,11 @@ typedef enum rtEventWaitStatus { #define RT_EVENT_DDSYNC 0x04U #define RT_EVENT_TIME_LINE 0x08U +#define RT_EVENT_DDSYNC_NS 0x01U +#define RT_EVENT_STREAM_MARK 0x02U +#define RT_EVENT_DDSYNC 0x04U +#define RT_EVENT_TIME_LINE 0x08U + /** * @ingroup dvrt_event * @brief create event instance diff --git a/third_party/fwkacllib/inc/runtime/rt.h b/third_party/fwkacllib/inc/runtime/rt.h index aa394eea..10f884f2 100644 --- a/third_party/fwkacllib/inc/runtime/rt.h +++ b/third_party/fwkacllib/inc/runtime/rt.h @@ -27,6 +27,7 @@ #include "mem.h" #include "rt_model.h" #include "stream.h" +#include "rt_stars.h" #include "rt_ffts.h" #endif // __CCE_RUNTIME_RT_H__ diff --git a/third_party/fwkacllib/inc/runtime/rt_stars.h b/third_party/fwkacllib/inc/runtime/rt_stars.h new file mode 100644 index 00000000..188656b1 --- /dev/null +++ b/third_party/fwkacllib/inc/runtime/rt_stars.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. + * Description: + */ + +#ifndef __CCE_RUNTIME_STARS_H +#define __CCE_RUNTIME_STARS_H + +#include "base.h" + +#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +extern "C" { +#endif + +/** + * @ingroup rt_stars + * @brief launch stars task. + * used for send star sqe directly. + * @param [in] taskSqe stars task sqe + * @param [in] sqeLen stars task sqe length + * @param [in] stream associated stream + * @return RT_ERROR_NONE for ok, others failed + */ +RTS_API rtError_t rtStarsTaskLaunch(const void *taskSqe, uint32_t sqeLen, rtStream_t stream); + +/** + * @ingroup rt_stars + * @brief create cdq instance. + * @param [in] batchNum batch number + * @param [in] batchSize batch size + * @param [in] queName cdq name + * @return RT_ERROR_NONE for ok, ACL_ERROR_RT_NO_CDQ_RESOURCE for no cdq resources + */ +RTS_API rtError_t rtCdqCreate(uint32_t batchNum, uint32_t batchSize, const char *queName); + +/** + * @ingroup rt_stars + * @brief destroy cdq instance. + * @param [in] queName cdq name + * @return RT_ERROR_NONE for ok, others failed + */ +RTS_API rtError_t rtCdqDestroy(const char *queName); + +/** + * @ingroup rt_stars + * @brief get free batch in the queue. + * @param [in] queName cdq name + * @param [in] timeout batch size + * @param [out] batchId batch index + * @return RT_ERROR_NONE for ok, ACL_ERROR_RT_WAIT_TIMEOUT for timeout + */ +RTS_API rtError_t rtCdqAllocBatch(const char *queName, int32_t timeout, uint32_t *batchId); + +/** + * @ingroup rt_stars + * @brief launch a write_cdqm task on the stream. + * When the task is executed, the data information will be inserted into the cdqe index position of the queue. + * @param [in] queName cdq name + * @param [in] cdqeIndex cdqe index + * @param [in] data cdqe infomation + * @param [in] dataSize data size + * @param [in] stream launch task on the stream + * @return RT_ERROR_NONE for ok, others failed + */ +RTS_API rtError_t rtCdqEnQueue(const char *queName, uint32_t cdqeIndex, void *data, uint32_t dataSize, + rtStream_t stream); + +/** + * @ingroup rt_stars + * @brief launch a write_cdqm task on the stream. + * When the task is executed, the data information will be inserted into the cdqe index position of the queue. + * @param [in] queName cdq name + * @param [in] cdqeIndex cdqe index + * @param [in] data cdqe infomation + * @param [in] dataSize data size + * @param [in] stream launch task on the stream + * @return RT_ERROR_NONE for ok, others failed + */ +RTS_API rtError_t rtCdqEnQueuePtrMode(const char *queName, uint32_t cdqeIndex, const void *prtAddr, + rtStream_t stream); + +#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +} +#endif +#endif // __CCE_RUNTIME_STARS_H diff --git a/third_party/fwkacllib/inc/tdt/tsd_client.h b/third_party/fwkacllib/inc/tdt/tsd_client.h index 665c8b82..36fc500e 100644 --- a/third_party/fwkacllib/inc/tdt/tsd_client.h +++ b/third_party/fwkacllib/inc/tdt/tsd_client.h @@ -107,88 +107,6 @@ TDT_LIB_EXPORT TDT_StatusT UpdateProfilingMode(const uint32_t logicDeviceId, con */ TDT_LIB_EXPORT TDT_StatusT TsdSetMsprofReporterCallback(MsprofReporterCallback callback); -/** -* @ingroup CreateCmdParameterObj -* @brief creat tsdclient func parameter obj. -* -* @par Function -* creat tsdclient func parameter obj. -* -* @param type [IN] type tdt::TsdCmdType, tsd func type. -* @param cmdParameterObj [IN] type void *, func parameter obj. -* @retval TDT_OK Success -* @retval TDT_INTERFACE_NOT_SUPPORT -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li data_common.h: Header file where tdt::TsdCmdType and tdt::InputItem defined. -* @li status.h: Header file where 'TDT_StatusT' defined -*/ -TDT_StatusT CreateCmdParameterObj(tdt::TsdCmdType type, void **cmdParameterObj); - -/** -* @ingroup SetCmdParameterObjAttribute -* @brief set cmdParameterObj input value. -* -* @par Function -* set cmdParameterObj input value. -* -* @param type [IN] type tdt::TsdCmdType, tsd func type. -* @param cmdParameterObj [IN] type void *, func parameter obj. -* @param itemType [IN] type tdt::InputItem, func input type. -* @param valuePtr [IN] type const void *, input value. -* @param valueLength [IN] type int, input value length. -* @retval TDT_OK Success -* @retval TDT_INTERFACE_NOT_SUPPORT -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li data_common.h: Header file where tdt::TsdCmdType and tdt::InputItem defined. -* @li status.h: Header file where 'TDT_StatusT' defined -*/ -TDT_StatusT SetCmdParameterObjAttribute(tdt::TsdCmdType type, void *cmdParameterObj, tdt::InputItem itemType, const void *valuePtr, int valueLength); - -/** -* @ingroup GetCmdParameterObjAttribute -* @brief set cmdParameterObj input value. -* -* @par Function -* set cmdParameterObj input value. -* -* @param type [IN] type tdt::TsdCmdType, tsd func type. -* @param cmdParameterObj [IN] type void *, func parameter obj. -* @param itemType [IN] type tdt::InputItem, func input type. -* @param valuePtr [IN] type const void *, input value. -* @param valueLength [IN] type int, input value length. -* @retval TDT_OK Success -* @retval TDT_INTERFACE_NOT_SUPPORT -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li data_common.h: Header file where tdt::TsdCmdType and tdt::InputItem defined. -* @li status.h: Header file where 'TDT_StatusT' defined -*/ -TDT_StatusT GetCmdParameterObjAttribute(tdt::TsdCmdType type, void *cmdParameterObj, tdt::InputItem itemType, void *valuePtr, int &valueLength); - -/** -* @ingroup TsdClientCmd -* @brief creat tsdclient func parameter obj. -* -* @par Function -* creat tsdclient func parameter obj. -* -* @param type [IN] type tdt::TsdCmdType, tsd func type. -* @param cmdParameterObj [IN] type void *, func parameter obj. -* @retval TDT_OK Success -* @retval TDT_INTERFACE_NOT_SUPPORT -* -* @par Dependency -* @li libtsdclient.so: Library to which the interface belongs. -* @li data_common.h: Header file where tdt::TsdCmdType and tdt::InputItem defined. -* @li status.h: Header file where 'TDT_StatusT' defined -*/ -TDT_StatusT TsdClientCmd(tdt::TsdCmdType cmd, void *cmdParameterObj); - #ifdef __cplusplus } #endif // __cplusplus diff --git a/third_party/fwkacllib/inc/toolchain/adx_datadump_server.h b/third_party/fwkacllib/inc/toolchain/adx_datadump_server.h index a1c39a51..67adecd9 100644 --- a/third_party/fwkacllib/inc/toolchain/adx_datadump_server.h +++ b/third_party/fwkacllib/inc/toolchain/adx_datadump_server.h @@ -1,12 +1,18 @@ /** -* @file adx_datadump_server.h -* -* Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. -* -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -*/ + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef ADX_DATADUMP_SERVER_H #define ADX_DATADUMP_SERVER_H diff --git a/third_party/fwkacllib/inc/toolchain/prof_acl_api.h b/third_party/fwkacllib/inc/toolchain/prof_acl_api.h index c8715041..07b32149 100644 --- a/third_party/fwkacllib/inc/toolchain/prof_acl_api.h +++ b/third_party/fwkacllib/inc/toolchain/prof_acl_api.h @@ -14,151 +14,99 @@ * limitations under the License. */ -#ifndef MSPROF_ENGINE_PROF_ACL_API_H_ -#define MSPROF_ENGINE_PROF_ACL_API_H_ - -#define MSVP_MAX_DEV_NUM 64 -#define MSVP_PROF_API __attribute__((visibility("default"))) +#ifndef MSPROFILER_API_PROF_ACL_API_H_ +#define MSPROFILER_API_PROF_ACL_API_H_ // DataTypeConfig -#define PROF_ACL_API 0x0001 -#define PROF_TASK_TIME 0x0002 -#define PROF_AICORE_METRICS 0x0004 -#define PROF_AICPU_TRACE 0x0008 -#define PROF_MODEL_EXECUTE 0x0010 -#define PROF_RUNTIME_API 0x0020 -#define PROF_RUNTIME_TRACE 0x0040 -#define PROF_SCHEDULE_TIMELINE 0x0080 -#define PROF_SCHEDULE_TRACE 0x0100 -#define PROF_AIVECTORCORE_METRICS 0x0200 -#define PROF_SUBTASK_TIME 0x0400 - -#define PROF_TRAINING_TRACE 0x0800 -#define PROF_HCCL_TRACE 0x1000 -#define PROF_DATA_PROCESS 0x2000 -#define PROF_TASK_TRACE 0x3842 +#define PROF_ACL_API 0x00000001 +#define PROF_TASK_TIME 0x00000002 +#define PROF_AICORE_METRICS 0x00000004 +#define PROF_AICPU_TRACE 0x00000008 +#define PROF_MODEL_EXECUTE 0x00000010 +#define PROF_RUNTIME_API 0x00000020 +#define PROF_RUNTIME_TRACE 0x00000040 +#define PROF_SCHEDULE_TIMELINE 0x00000080 +#define PROF_SCHEDULE_TRACE 0x00000100 +#define PROF_AIVECTORCORE_METRICS 0x00000200 +#define PROF_SUBTASK_TIME 0x00000400 + +#define PROF_TRAINING_TRACE 0x00000800 +#define PROF_HCCL_TRACE 0x00001000 + +#define PROF_TASK_TRACE 0x00001852 + +// system profilinig switch +#define PROF_CPU 0x00010000 +#define PROF_HARDWARE_MEMORY 0x00020000 +#define PROF_IO 0x00040000 +#define PROF_INTER_CONNECTION 0x00080000 +#define PROF_DVPP 0x00100000 +#define PROF_SYS_AICORE_SAMPLE 0x00200000 +#define PROF_AIVECTORCORE_SAMPLE 0x00400000 #define PROF_MODEL_LOAD 0x8000000000000000 // DataTypeConfig MASK -#define PROF_ACL_API_MASK 0x0001 -#define PROF_TASK_TIME_MASK 0x0002 -#define PROF_AICORE_METRICS_MASK 0x0004 -#define PROF_AICPU_TRACE_MASK 0x0008 -#define PROF_MODEL_EXECUTE_MASK 0x0010 -#define PROF_RUNTIME_API_MASK 0x0020 -#define PROF_RUNTIME_TRACE_MASK 0x0040 -#define PROF_SCHEDULE_TIMELINE_MASK 0x0080 -#define PROF_SCHEDULE_TRACE_MASK 0x0100 -#define PROF_AIVECTORCORE_METRICS_MASK 0x0200 -#define PROF_SUBTASK_TIME_MASK 0x0400 - -#define PROF_TRAINING_TRACE_MASK 0x0800 -#define PROF_HCCL_TRACE_MASK 0x1000 -#define PROF_DATA_PROCESS_MASK 0x2000 +#define PROF_ACL_API_MASK 0x00000001 +#define PROF_TASK_TIME_MASK 0x00000002 +#define PROF_AICORE_METRICS_MASK 0x00000004 +#define PROF_AICPU_TRACE_MASK 0x00000008 +#define PROF_MODEL_EXECUTE_MASK 0x00000010 +#define PROF_RUNTIME_API_MASK 0x00000020 +#define PROF_RUNTIME_TRACE_MASK 0x00000040 +#define PROF_SCHEDULE_TIMELINE_MASK 0x00000080 +#define PROF_SCHEDULE_TRACE_MASK 0x00000100 +#define PROF_AIVECTORCORE_METRICS_MASK 0x00000200 +#define PROF_SUBTASK_TIME_MASK 0x00000400 + +#define PROF_TRAINING_TRACE_MASK 0x00000800 +#define PROF_HCCL_TRACE_MASK 0x00001000 + +// system profilinig mask +#define PROF_CPU_MASK 0x00010000 +#define PROF_HARDWARE_MEMORY_MASK 0x00020000 +#define PROF_IO_MASK 0x00040000 +#define PROF_INTER_CONNECTION_MASK 0x00080000 +#define PROF_DVPP_MASK 0x00100000 +#define PROF_SYS_AICORE_SAMPLE_MASK 0x00200000 +#define PROF_AIVECTORCORE_SAMPLE_MASK 0x00400000 #define PROF_MODEL_LOAD_MASK 0x8000000000000000 -#include -#include - -/** - * @name ProrErrorCode - * @brief error code enum of prof_acl_apis - */ -enum ProfErrorCode { - PROF_ERROR_NONE = 0, // ok - PROF_ERROR_PARAM_INVALID, // param invalid, for example nullptr - PROF_ERROR_REPEAT_INIT, // profiling has already been inited - PROF_ERROR_CONFIG_INVALID, // config invalid, for example invalid json string - PROF_ERROR_DIR_NO_ACCESS, // dir is not accessable - PROF_ERROR_FAILURE, // failed to init or start profiling - PROF_ERROR_NOT_INITED, // profiling has not been inited - PROF_ERROR_DEVICE_INVALID, // device id invalid - PROF_ERROR_UNSUPPORTED, // unsupported data type or ai core metrics - PROF_ERROR_REPEAT_START, // profiilng has already been started - PROF_ERROR_NOT_STARTED, // profiling has not been started -}; - -/** - * @brief transfer profiling config in acl.json to sample config - * @param aclCfg [IN] profiling json string from acl.json as {"switch":"on", "result_path":"/home",...} - * @param sampleCfg [OUT] json string for GE as {"startCfg":[{"deviceID":"all","jobID":"1234",...}]} - * @return ProfErrorCode - */ -MSVP_PROF_API int32_t ProfAclCfgToSampleCfg(const std::string &aclCfg, std::string &sampleCfg); +#ifndef OS_TYPE +#define OS_TYPE 0 +#endif // OS_TYPE -/** - * @name ProfInit - * @brief init profiling - * @param profInitCfg [IN] config of init profiling of json format - * @return ProfErrorCode - */ -MSVP_PROF_API int32_t ProfInit(const std::string &profInitCfg); - -/** - * @name ProfAicoreMetrics - * @brief aicore metrics enum - */ -enum ProfAicoreMetrics { - PROF_AICORE_ARITHMATIC_THROUGHPUT = 0, - PROF_AICORE_PIPELINE = 1, - PROF_AICORE_SYNCHRONIZATION = 2, - PROF_AICORE_MEMORY = 3, - PROF_AICORE_INTERNAL_MEMORY = 4, - PROF_AICORE_STALL = 5, - PROF_AICORE_EVENT = 255 -}; +#if (OS_TYPE != LINUX) +#define MSVP_PROF_API __declspec(dllexport) +#else +#define MSVP_PROF_API __attribute__((visibility("default"))) +#endif -/** - * @name ProfConfig - * @brief struct of ProfStart - */ -struct ProfConfig { - uint32_t devNums; // length of device id list - uint32_t devIdList[MSVP_MAX_DEV_NUM]; // physical device id list - ProfAicoreMetrics aicoreMetrics; // aicore metric - uint64_t dataTypeConfig; // data type to start profiling -}; +#include +namespace Msprofiler { +namespace Api { /** - * @name ProfStartProfiling - * @brief start profiling - * @param profStartCfg [IN] config to start profiling - * @return ProfErrorCode + * @name ProfGetOpExecutionTime + * @brief get op execution time of specific part of data + * @param data [IN] data read from pipe + * @param len [IN] data length + * @param index [IN] index of part(op) + * @return op execution time (us) */ -MSVP_PROF_API int32_t ProfStartProfiling(const ProfConfig *profStartCfg); +MSVP_PROF_API uint64_t ProfGetOpExecutionTime(const void *data, uint32_t len, uint32_t index); +} +} -/** - * @name ProfStopConfig - * @brief struct of ProfStop - */ -struct ProfStopConfig { - uint64_t padding; -}; +#ifdef __cplusplus +extern "C" { +#endif -/** - * @name ProfStopProfiling - * @brief stop profiling - * @param profStopCfg [IN] config to stop profiling - * @return ProfErrorCode - */ -MSVP_PROF_API int32_t ProfStopProfiling(const ProfConfig *profStopCfg); - -/** - * @name ProfFinalize - * @brief finalize profiling task - * @return ProfErrorCode - */ -MSVP_PROF_API int32_t ProfFinalize(); +MSVP_PROF_API uint64_t ProfGetOpExecutionTime(const void *data, uint32_t len, uint32_t index); -/** - * @name ProfGetDataTypeConfig - * @brief get dataTypeConfig started with of one device - * @param deviceId [IN] deviceId to get dataTypeConfig - * @param dataTypeConfig [OUT] result get - * @return ProfErrorCode - */ -MSVP_PROF_API int32_t ProfGetDataTypeConfig(uint32_t deviceId, uint64_t &dataTypeConfig); +#ifdef __cplusplus +} +#endif -#endif // MSPROF_ENGINE_PROF_ACL_API_H_ +#endif // MSPROFILER_API_PROF_ACL_API_H_ diff --git a/third_party/fwkacllib/inc/toolchain/prof_mgr_core.h b/third_party/fwkacllib/inc/toolchain/prof_mgr_core.h index 4f013eef..f8cb1b22 100644 --- a/third_party/fwkacllib/inc/toolchain/prof_mgr_core.h +++ b/third_party/fwkacllib/inc/toolchain/prof_mgr_core.h @@ -16,7 +16,16 @@ #ifndef MSPROF_ENGINE_PROF_MGR_CORE_H_ #define MSPROF_ENGINE_PROF_MGR_CORE_H_ +#ifndef OS_TYPE +#define OS_TYPE 0 +#endif // OS_TYPE + +#if (OS_TYPE != LINUX) +#define MSVP_PROF_API __declspec(dllexport) +#else #define MSVP_PROF_API __attribute__((visibility("default"))) +#endif + #include #include diff --git a/third_party/fwkacllib/inc/toolchain/prof_reporter.h b/third_party/fwkacllib/inc/toolchain/prof_reporter.h index ff91351b..d5ed7569 100644 --- a/third_party/fwkacllib/inc/toolchain/prof_reporter.h +++ b/third_party/fwkacllib/inc/toolchain/prof_reporter.h @@ -41,42 +41,44 @@ namespace Engine { * the Reporter class .used to send data to profiling */ class MSVP_PROF_API Reporter { - public: - virtual ~Reporter() {} +public: + virtual ~Reporter() {} - public: - /** - * @ingroup reporter - * @name : Report - * @brief : API of libmsprof, report data to libmsprof, it's a non-blocking function \n - The data will be firstly appended to cache, if the cache is full, data will be ignored - * @param data [IN] const ReporterData * the data send to libmsporf - * @retval PROFILING_SUCCESS 0 (success) - * @retval PROFILING_FAILED -1 (failed) - * - * @par depend: - * @li libmsprof - * @li prof_reporter.h - * @since c60 - * @see Flush - */ - virtual int Report(const ReporterData *data) = 0; +public: + /** + * @ingroup reporter + * @name : Report + * @brief : API of libmsprof, report data to libmsprof, it's a non-blocking function \n + The data will be firstly appended to cache, if the cache is full, data will be ignored + * @param data [IN] const ReporterData * the data send to libmsporf + * @retval PROFILING_SUCCESS 0 (success) + * @retval PROFILING_FAILED -1 (failed) + * + * @par depend: + * @li libmsprof + * @li prof_reporter.h + * @since c60 + * @see Flush + */ + virtual int Report(const ReporterData *data) = 0; - /** - * @ingroup reporter - * @name : Flush - * @brief : API of libmsprof, notify libmsprof send data over, it's a blocking function \n - The all datas of cache will be write to file or send to host - * @retval PROFILING_SUCCESS 0 (success) - * @retval PROFILING_FAILED -1 (failed) - * - * @par depend: - * @li libmsprof - * @li prof_reporter.h - * @since c60 - * @see ProfMgrStop - */ - virtual int Flush() = 0; + /** + * @ingroup reporter + * @name : Flush + * @brief : API of libmsprof, notify libmsprof send data over, it's a blocking function \n + The all datas of cache will be write to file or send to host + * @retval PROFILING_SUCCESS 0 (success) + * @retval PROFILING_FAILED -1 (failed) + * + * @par depend: + * @li libmsprof + * @li prof_reporter.h + * @since c60 + * @see ProfMgrStop + */ + virtual int Flush() = 0; + + virtual uint32_t GetReportDataMaxLen() = 0; }; } // namespace Engine diff --git a/third_party/prebuild/aarch64/libalog.so b/third_party/prebuild/aarch64/libalog.so index e041ad7e1e1f697da1328537689b7b300ae292dd..65aefa59a84fb99b46d0674ef6870f62e282ec82 100755 GIT binary patch literal 225280 zcmeEvd3;nw_HT7}fB*sk0z_a;C!mJ71!M`L(CHuq1&m45xQy912}#Jps?r2UaT!f3 zN<>GRfS8VsMp;a7Mq+Sb+$Q3N&WwhDh|6dM!32T4@2Puk=g_9-ectEu=KcP8cjj{M z_j{{OojP^u)N*gfl~>N4Wwlt8dDzs;icrX-2IJrdjshFsDdkYHst^7hrAF$sy?jr8 zdlNFns!8*zbPnU+-gvU_C*%3PpA7ltH8s?bWuI=!G<2lYlusQo@~I=N7hg-5Hm|{) zJqTAi_i$DHuVXo#d8M>;Bz{$%t@9l=GVbxahW0bknO8I4waBOSG#>wN{|(_3NnBna zyF?8%5}H@8j;w)kiEtInlGAzT@jVaw z-EvbD^;@LB2tlO#vD32O9y)t-BVvx%*b3(|q>iP4mt7FI(bSyV6 z>P}o&^1Tb!)j03Mc@NGtIRA*#JRanmhjCqt^D&&&IM?HR5~q3miEli(Zo>IA&S!Ae z;xvzEaq}F57jS(M=Qf->a5mtS$14DTVPY4quijyYL!ubi# zPjP;Z^Itf>#JL}58_okb58|ZbE1X~BJdE>OoZsX80q2i6+i@Pjc@*a{oOJwRTvP|H zoqYc*Ur+H>UQRRUqN@e}$7#iB!%0VPT*+ViGVYJ-Ae;b-(xnx{ZaHitCnYmkVO~aYNI1ATY zocW9w;9A7@B(s?B%W$pW`^Dn9S?&^WOPOEB*X6jbzlx7{@SL6CP&hx*- zcE`gX^}Wt>_~W@Z{`%cbG4l?eGopWc=lqY~Tk_Z!=Z^noRMMD@{avbjhmS{VpC^ zSy}bvC%?aM`to~j{qf-ohpl}#b$iyAyJ9=H+%kJ=((I435?Z%gPaj`>@9`yj${u!( zeRaXH`!9L1^~VbiPB}UA*-vh|eM4#S)M?|cu6X9X>kqz~@I(4p$FIMvCgYB@u9;82 z{b=7+FYWwk_RcL&#~gU_>W?G39)3Riz4tG%jDFz8TZb&Z|H8J=8)kpJ=lr5)UklaUszG&I}O~<~UaM=?dqdxZ~rvKfIRx#LC_` z-q1Fz;qhr7-czwI?_mAGCnhZG+PdvnbM)@RKc-%Ge%^%r6P~`{i{vW?uDxn_cE2z0 z`S&@G?$7&nA2~KJKQ=YT{qpe=>C1g#GV- z^7Pw(zw@?N?-^ZrYwVZRbAH$@LMFFKU+_|(_8CRvjE z2Twh6;N-&5cRb#4WADaaM!fjSN7?6m-q-o)kr&Uq>Ec6cs?Kt!cNANSDhoGcg@-lY zeD#Yvx6PQ-?~5A;E`0s$roxDO{&mrv51+T7`RQKQ<=$yMe8CInfA)3Gv&}~~uh}=^ zPxozXxc{7}-1y*$x9{%k{i+(0cGtB2UuC}7_XYRJm;UH|F{AO`_0?;#YsXKmUjFpv|?D7u+2hkrr`(;r$N|>H47kj_V`Rtv?)EF>dCt`;&I-tVGp_ZttoFM08r z%N|&@Vvg7Q=WpspA0PVs3wK#9&z|@67h6UwyYJKmJ2Ur;E_5Gp_Zxim$_3Sy;Hha{ zg;}rsJ@$?rFKs=MbaETo?4MWlKb?`#fAu8;u036U`&VnWygYy9$IVgUcgMIlZEZgI zsO$YHJ8#TP`%~qD)E!^^Vf;4BJFCXta>2?Ew|@Oo+uCREEsWW9+w~vRzjx%$ii=mh z^zqKWdl&UtwffDsVmzs%D%!sp92Iug%ZDy4f9m+VpZ|3IoIe$Pb5Z!?eY$dQtlc%| z;TJ5kn!ngTbZfz1_if&`VcJDwD$qx1)ep;`EpMUz|C+kC||GTQ+C+jYqmEATe`NsF2 zy!)NFr`~FL`Bl%YWp_-PRBU}~(=~s5{M*4}+IFm~cf%oaj;`Kw{(>8hmE~=`Z=7ez z=$%`G7R)>Q-n+(DEqH3jU&b$9IPZ$!#w$i?DKOjZW!C|kKcY^>ALcb%^&Pr z)#on{Hg0`wci*os`E}Y`E9bT(w{~88aR0n-l7n8EH0`gJaqnF?Zo<5<$1Z67u-yA* z`0{ISnexk>eLijfRwHpw>6Zl1Cp190WtjK( zB*%vJPEY(<0r<86usAb%Dt z9Qhk7WBwPQKi&~QPI`cH??k0~s@Kv0et0)@=_!5QsGj|yC4gV;4B!uM1hD720R9kv zR?qrW2e88{*l_4c|MUR*j78#}{QRi^`g|Rr+;0Qets_9YToj;v*95TV!~pFz5c>C2 z-}V6UBqe}7lLN@<6F{FN=--pwHUzNGzXHg4D?op@2B`0>0PXl}0Nxv*+|L8}Nk{%lbT><1T31Ejs0pej}0DfKoIne>y_oV=GrUj_itN{8i2oT>&1IQT=K+j15(mxTv z{&Ns#B6^62w+4{EG=Tgu0qnCUK)Fu@=%@1n*azw;dk=d48bF`10puJDFisT&h_|KS zd+P5$28dT52C&;IjHgiPH^++s(oYPa|6KvnW0>vHKF^zY3;J92~UYNXOVP zgRi0uAsn=SLdW^IQu?ST3=H?sM~3##{;K`BfyXlcGMC#@V-P1<&U$1VjCp<-YiR0u zvF@kPC&@^d&gq-E+;%RviTPF9Kl>{eZyJ8j<&Ib0wAqOx26mxiD%&%PH`k!HK5o}>9y{kD zM!7*;uRpW=!;1_en0ea6C4Ka!Fz(U)^|4KlJFo-SD~kE!T;JeMgGdFY<0`gWeyAZB z>gmIx{jI-hqK!`+rarrLe?s0<+<>P4bg&#xogu)(sn+1DS^hutxDWjwGYHzBqvJT+)5CsF z^JY5YVL&Q3l{dIAH)s{X{mA9lk4o7;UDp``%stlqLs@_7AUI-K{ubPm{Q3a~Vb+)S z8;P%Ff1apwDV4+N_xYuNt&fqu>2F5DLQWrzjze-B&lv=oLm%VWo_pAyrvKM-zo@3o zZ5(F5`yszPt6f z*O85K>ZejH~a5A?(g{=pG|%N>z~N_CmPw*qg>xqdQpR84fD|> zjdB|n8+KxBj^Xrq+%ENR8HDL4 z-?ID;_Cszqbv;ZxSPfGxBaD>jZu*$Oc1UD9gfj18zJbTP7}lqX%Z=S*BsA@CrO8u6 z;8@D(pX2uGpchg&9Qt3p!(%(Q{L>&z`|M>q9KOUzZ^n~@JPwBV^+#pu&o7=#ed4)Y zLRgV}F83M~MC}smH_m1Rb9-_8G2_*xJYMhn!VvHz*DGq6k-nAv!5n|*@i-ei*hqMi z%dKEPOsqACvCN;x{i5|7gJ@y?S8m68*1v_zeTv7&cE9m)2iv)t$6vF*{J{Fxe`^Rf z`QNjBma_a@pN-XMNsc`KeZe_$TwHcpN?_N#ppjCwV|FX`CJt%LTlKB-*FqtpRH`q zHOzm(cIYTD(ubPu%l;7awvlimr%!_8k)APr@uUOKi4SADz02v#xF4q$B+90x+KG4u&yo+?7}tM$hic+YPnS5w$e)Mz8UIqu|gxd~jZgVWdZcoEC% zuWIH;aXjhZej3aCa*hwJ^lxvQBsk3YHjsHW#^4Wg z`tzAT%=}v8u^P*Imhgu%?VSD=Jg0h{*q(>|;-@{>&_CfF zqg7ne`yORPsFs+qt2>XE{a4NAjznSTKJNPfviBR%;y9dlq$ z-Ch@Sdl|G^Z_Xb$K2$OPgBj=j+OeJIrC~gd*g5@Y+>Qwxe_(cf+`{^|KWAWWMrGyr zoVwfK&361J$2~jGTM~@NDxLdF0{0g){U%Od>o@NIjrED~)926Z&xif|nWiOVw`#xf zD}noCezIZ6P;QsMvmJu{#?xKgE{W{tPqO|&<~+e~+ zdF&7Q;|)1`SWXTyl6|6%8icvOH|lJI-^c5wrJQ~U+b!R3zoI|;RrO0oLep<^SWge@ znZR$WHZY_4Ci~*gpBa z4f&}o{~#2m^r?RHsLM^B`{PD(`_;dZR`i&w)qzOAV+ z((|-QJ;C#(1dhYOEdS5!pDNo(AI$Pg*lrzcw^ZY?3gU6$@S|ty{|t{`F8Z(&M>?nP z!~HQP%zS|MxZ3cRLAaQIisid_ zyfgdnH*C*(Ubjx<^e?hMtG^qB>9>DmyE**qmd5SAbd;gb8kQ5w?GoiTKY5Dh4}1Lf zXOg*I)qeAq$Jh@YmmBiOviwUp4yO(>_|43Rv0v5xXb>lj$Ld_R|5CQUxsQ>aR=k!jNzpt;Mpy|(}m=FHW zNNC!BINXNhgmJr=4!2hm?=v`~<83YP?~P^oA>6*MG(&)?|1!3Z{G#bh9ai^L!mE~YyaFxSk3ys&33Ee@j9E^ z@d{4A=eUvH)c;y;m+B>k{BkZ=rKRO9EG|kbE6*q`PfJs2b5iD|W#^RUzs!b&5SQ33EuZ52H6*()LhzYStu*AIL z?25u12%ej>IHzz{Y4JiRnp=>!SS`$1Se8?+U~wbOoTA*~GG^1t$oN?$%Z!`CoFYm` zIiYVwj&s>u_<@>JHXl_g%_y8-u`s#3l(Li;7b@tX3v(`G!P%tE1k$D_J;)y5np<>3z%&I6XyrC3EEGtO7 zdhx=70^NX`!SgA|NR6DBb(`771h2`RnX!zj2v5o=Bv(jMWkn^W1x4k#FahLFoKvP{ zzy@W-g*l5=UQT&V(PC|;g>a3V#!sa-x^i(&ky-mIixz7ioI(%Gnmd`c zVJ2M)RNxk)=7N$CJUKC=v@B-JMCuLMH7dmuhLII$)Dy z(x?_iSw=M`URRQxQSS4Y#p;@poTB9Xc{vLewIJG=oG3Yep~@}I(XBJV6h`&bHJ*&{ zP(~u=6d_um`x|{#v$~qe1$l`DB{>NhMcIWprG{Sd`8in&Oi^eys<}2fxRhd}Gf^e* zsH`9_XJJYCGV)*CXJnNZ6c_0XGmHJzhnQHyONz@2a+lqZQJ$5bQ;Jvslig5Ux}dBi zBP%B{qdZ@g`Pw@!J9~c45}g!MFP(s{TUu^JmRZH6T3UR5Mp0f4OVeH6KUHy_jy+s; zM5S^B%(4YH6qM)B%s~{&hv-;cX+#-qwn^k%<)sx_4j0gU>Y0w+{a`$mfp`*EUS2w7ZgC!6(N&F+ zIzFSYFcTx5u6;HV(ok1Ov85ofz~@6}MC$RE>U3xdBOOdiCBVHjMhA%31d8k?&S(#r zgtYNtT~Qy>7V#nK7i=ZX<)zVpPeV>Y9{d_zH)S^BvRa556=&&KWCS9LOynMMSy}LB zRZyh+xprG0$jHu4TS7hG$CNH)0v1C}jB+RpAk-kX>7GEhB;AM$s-&X4jCuep(Vg53 zhG&o}FD*+ePOh$F%Pho7BNItCx}PC@PMMF#tc6BZI5H)Qr;x#Eipp|Ib)kCPHKglX z9n1{@&WhZGoWhbEJq~B(>r0U_gqx-7EWT;i%p3%p;$;}dorM`i3(RX_K~WCwNo_qj zaV|6D(;$KW%SV~ZU=ElktGtjJq>%DxPtXlwGI|7w&&?xako}T!N(vEkXXfN)R1}sw zm(46FRk_6&WmHOO0Zk+dit^N%71Kr`HO{O7nM&oB<>V|-x$x^Um5b?I8MFtF0=cmg zON+@fj9Tc{FuIaY9w$fCO3o?Fg@!s2v1a?hBl(fiw#?1*OH3@Mpgh`7NC1Sz$wwBb*(A3wY^yG!6GeTWzZ6BYc;^9(gL)}Pz zMI;p$lRjG5yam|>rE_xU7nd78xwy=jexs)(p>=dil4P12P{&0F%~84Nmk2azMpQ4= zLyQr)^A{FptI`}jorCIHM7ByqL!0$fOK8f5%!Nj7{cKLgLer*X|I)?qZ6hquL~>R( z00d)tOR<}b1~nKtm(eKb3viTxspMrvS+h%vD@sy|uPMh&fYMxt?xi`*A@g#|D=?bj zQnG|@=9FX^V?74sYnEUfPz%fQ$l(o^%w^DpS^33Fipb~UsKt%PY*1|ygnEnt_1{{jt2D2u@H?UPj4$!p(=8Eh#Q6$XXWQSw?BbLbN$n4OBd|_zy*4 zwv)A>jF!UwN@VK^AJ4*zcPz1`=67;m;J6rL*{;u7R`;+}Dsw{+oxn9F;>p ztpl%dpInqpPs?!Or%70gfhom>8DkPp0D~0TRh8v0G{s@G!h8;dIjhTGI8)63zV&$KEoKuQJ=yR^e&wadj{ma@BzAH>42hj$ifl=?pMZF!BI%o zdFSc@qEP=!&u5vlVU_E<)A?Y2s*p}ohA@GlMgMEeR}KHxPn`v@P$vCDX=zNt;8kc` zh@sAjDJ)e4mn@#Q(i3exV_vKC8n?+g5LHJ1P#S|v%7M*G=^xF->3&*K!YIBlC!+|c zqD=2hP!h~koEfF11&B~-Y1lU@Do#T$%Fs(AG!QLni*vKF%q3Q@annk*w5y5>ifCox z^PSs3=qIqoHK+h`(~O)7F_?_fyfOs}S5!MQ3;ihT;<92M+>0?)F{hn*X|(f#sR?Z? z7?TlT9UzUx2F7L(3XvhPkfsJ%iw$pA&=HENnWc-7TYCp?lQE7X0w(34`WWZW5M=}x zEllUp@+pa)b^0{os0g0MXP*cH^ym!25UHmVrW_hxv5?WzZ#@mhYz7sk>;=X)7u1}E z)fU;kOcwzHW=ky0D9q12gA zeDX}o2zzSBAv?1144t~|os7m6HJN2@*ZJ#oT~PrU41#=WGSzKT-RGdDF#?j0URQF4 zRkhz}+84H|W;Q>gePXm6TIamUBif zYVz-arPlPD}qX|&2NG^Y94g=NcV@TK`E zb^uf|IwE-yO*iIJkfYAZYa*o5PKie}MWL9DS+X%<)Z1UQ$B6+aKcfs%NT}h%x_@Fb zh%)iK!gyk=;fVGno{6x8?pjfbhSjC`p6L0so~&WTQ!qDU8MMVrEE^FVmon@vn+58W zm`xR;uVb#ICvWC6>bdhWmTFZh7W!3;9?~2uU;^Dt{<)1=s?iRZ%O~8*oyBVEwlcKDJ9Qm&Bmi;-7FfGQG81A%mNHstmiekIy>bd zt0k9U)^>)Lx?EZn%rA~FURY9!j_4aBw0Mmum9HAVHR>6Ux^3;F$;xLo!x@iy$iX4e zmkiY*0lCmauXcrBJDSeyr-X3w4Dj8B_z z@%SnH7SF2u|3`lj&*bncADzy`R{WaExW`LnU#jk>J>4nQOQe+dX6kN9bY~Ogn<@VH zS3^d(Br}KPE%+qo|Nrm*Tj2k-1&ppnrCLYfXRv^Foa@|=;Y*8NYCAYO==oj0c^-7e z^Yy=ZZad@o41P}X4Ea{ybMxqr~C0P=1cteHO#yG_$ubB{P=3- zJ$}50`FcOTmiZ<>zJd7`Kfa0iRzJRl`F1~kAM=VG*7Vy}=0p7W!_3?L_zvb{{P^Hn z!%rN3d3+PO`4T@qig}kGAH#f=A0NxS$B%a~U+>2!FyG|ICoc^)u-|olfGmk&$tD_=6Rwc}b`0-1bxBKxf=41T$HOxEw_-f`8{df=a>3)1I^Cf`FcOTh505wzLohFKmIWDt$utv^X-0o2lFAl&a}UJ%kW#f zA0Nhij2|Dvyu*)AU_Q~0PiH>ek6+4si639ZyvvWTWxmRfZ(!cz$M0dj-jCnMe3KvF z&U}j>AH2t~Ppcmv#eBOT?_gf_KGV*r%!l~#CCuCXco*}nJg=b768WcRy8j#&{Cdq8 z|8xj`r{Kf7j1LrPSgJCrq@UUa|B*-^CHPB4`WV6EQ(xZ^D|r6YQ$KSEo~LM)f8P-zcsbsM37&saq4U@UUn}HC2_8Su@EtLNf6m9k z{{{b;kmC^iNRd84@S_EvDEJg1Cspvj6Y0|h&p*Y|PxA%OKef<&iQw-L@|Oz!93kH& z_)UUeBlvK^R|&pE$gdXsJi&Vee^kh+6}(;W^@5KOe1qWk3BF12k%HeN_&WvPBKUg+ zzfbUE1m7z7v4TG=_;Ur{F8KMv4jqCwfAgoa{nG9KuM0WBftgcss-;6={Rews<*dRXvl1>Y`shmg}D_@_mB)!FU;mkT~v@DB<(A%f>WS)+4> z37+;W%)>7DD@+pCD8U~Ve2n0~6nw1UrwiU8_!)vv5PY5B69s>f&@)x=KZ*3|f_Df$ zU+@t^PKn^-MEa$IKS!i@3EnBvuMs@|Necb6O7QgG6f%!$!N;2Hc3I4F)9~Sc41wTvh9fJQt@ajaj z|0f7OSn%hIazg~qKT+4Y!UVrsq_+$HVD$+Xy|GnT71m91{ zNfi7>kv>)Ma(^jZ@K=fS`GQ|0&M(|gQa;pSCSMb$>?=R$d z1Rp8*TEX8e_K|k6j_-o9>xV8#DQSgTa zf34u#1^a%u&CM5M15{JkQ5gW&m3#_3#5f`35B*(3Pt1m7a~ ze+zz};0r{#t%AQ^q(3b98wB4j_*B7n2)>t)uTFORKY!Apa|H|jMj2UoH5}g7*lX|74JUS}XWtB7ME!R|>vC z@HYs3ngsu>;P(hVL&#|ne7)fJ2|iQst%AQo$UiLjERnul@Y#az5d2GmSEsuDzfJJL zf^QXkh~RSsA13%3!P^C&EBGkEn}2hK9>)lNfT(Y*;ExMA4#EFZ@CkzFKZ&ECCJKIx zke@2}bitYsG`so_Mza->T3H~!7r&{nAi1Z%8 z-yzc13jT!P>jnR^;2Q*=FZd?Gzb5!Sf-ew!i{Nh){64`yA^29ohlqL|7JQ*d-!6Fm zQ$qS_hv0XK^y+lC{~r>3u;4d|azg~aP^1qN{I7zy3;v|wqXgd~_!z+#2|iZvTLteB ze2L%_1iw(|pD6f6B7LghO9h`U_z6NzzTjsIzC`e4Le5gbFBa)tf-e{8*9iUvk-kds ze-!Df1^xl2zDe-ag5M+fnSyT-yjAe~1iwU-+bZ}D z!5xyq2kaF1LEub*9|o=z_%Yxbf!6~!3;Yyto4_@|)?wY{Zvl=JxDMDU@bkc# z0>1=YDew;98i98LHw*kKaGSu*z}Dg2<-Z9WDe&9CPJ!PC&J=hraHYVX0M`ioIdHSU zUjVlW+y-nN(Ov!_;7EbL1$GMj18}CmM}R8@J`P+X@Co2%flmXs32co=`C;AV_Xds> zxF4`n;DNxI0uKSM6nHprjld&;n*}}xxJ}?lVC&i4<(~%}DR4BfQ{an%GXxyK4eS*7ec()i_X1Z6{0VT4z@Gy*3;YFeo4{?r*6{A~4*^FC z{4KCk;2(f91wH~?De!UN8i7v$Hw%0kxJ_W|Oq6f$F26T$q`>`vodOR8&J=hEaHYV* zfolXF3EV93IlyfKM*>?Ty30QgI8xwfV5h(r17`|60k~4&$-p%NUk2PP@O0odft|qC z$nNrI0Y?ga6|hs_`M{Y1CjnOqd_8cDz&8Om3w$eZo4}dC)-m1X=K@Cxd>gP+;3D8m zflGla1zrqXBk*$IW`XYnZWFi?*gCem{JVi81-=j1De!~9nF2oyTq*Elz%>G|2W}Sl zDd0ANYk;lic9*{eI8xv`V5h*(17`~S5^$x!JAi8h-U-|+@TG&0B#oeG;o{1)+qvcZWGuEY>nNP%wyb_!esoGEZAaHYVDfolX_4%{s8 zoxp7ZR{~ov>@NRq;7Eb*19l4hAaJI@4+B>U{1|YJ!0UmV1%3*+P2d_}>qXt=Zvl=J zxDMDU@bkc#0>1=YDew;98i98LHw*kKaGSu*z}Abq%YPF%QsB3NodUlPoGI{L;7Wl% z0j?4FbKquyzW{C%xDD8PNq6~&fFlL|7T78955SoM9|5iu_&9Klz$bv41wIYjCa`rD z%8%(Tzc+BC!2N)o0uKbv6nF@5rNG01YXlw%+$``pz-A-CQJAthe%<@|X&jOAV_$pwh!1IAK1x^C46!?1J8i8*D zZWj1f;5LCXfvuNzm!At9De!H;PJxSnGX*XMt`vANaE-vrftv-s6Sz&_N?_~6?(**j zjuiMlV5h(j0%r>RFmR>7j{(;RydJn&;HQAw1g-(LPURWJ+XQX~wodLY|4raXf!_vp3j98BroelFD+T@pxJKa5ftv;X0=P}! zHelXh-VC!s@AKP7iZ{SFQ`vE%z z9tfN%@DSiifrkUv2s{$FS>SVk+XRjTwodIX|2*JGfun()0$&WADewf~N`WT>*9d$W zaI?VEf!hRj0$VTZE`Jtqq`+4JI|ZH(oGEY;aHYW41J?+A6L7P@w*t2boC$25)?I!s zaHPPu0XqdQ0?rh;6u460#lSTJF9&WG_)g$9fh&Qnmv@(cH*lnZPp$Z?x5=XFBa|zN z_EnE2TfF%0w&mE(R_{mHN42;nPwjgy{pZzP_-wmv0Lx&ZiGA+WE9}}L7R$x8f8(Q-6LE{BqyS2ik9^T zp0{>&ji9~WQ!8H8`@{(-%VSfM@ZXW_oP+Z5Zf%kW^{c_Do}At3+&8;(zs*{A+cdTI zC}ca3)(#zRLcTjrbghFe+K-;>T33estYygmF7DG0smAHJ-%j_DmPWEg#&p%VwZEhD z)QV=UGhOi>wHg!I2 z{0DvY`m3%NwG*|^v7t)a<~Hc+3PwFqFKQ1DY-aDHwq@hqrq=E~VDTKWLe730v{8=E z^+->4(CxZscIRZYmEEQ$u7zF6wyXQOk}9=r$LR8lU|Z6Io{c%ne0H8YAH3P-jv%!y z5psqp*AQy=R@<=XV?!OC)ShNrQkmL!I%apCO=SkDN#q}vU{@0PMhQ6lR3T(f z4yU6scAIr}jn!q0$MfBIZgZ^K>$4r!!B@j}l}Jx{HbF0^2R>*&Pq*z@v#dVM`3&FJCqM)Obmx_=R%h0+v(uRuQPBS z2;siq32}dhbPCU8IrM%<%D1DBga@hd&@+YPo9}#jBkw$vf%dICFvOMQ5@qPV<{IpN z4QXT^(x1vB+nieQiZ>nl;J>n>>p}5uJM2jPYBBhYHM2V-VUwEkRVUhh?|kTC?~VR- zK)JWUj{9vx(T`k7YY`WAb=kZ-po_N?X%GWj@vI+wBz&~(rvX7K#Wm8=*#tYEOMMjg z4%{m>y@iB`txx&$#{8uxyfPTY>nAG9ny9+*Fqe$lil=KSB@P z1|_o^O+P{#EW-D%Db%0Lw%mmNqHWzYROioupOatPOM3hAX*t1;##Wm>THPL=Y|7Jo zdAEFxmv-YG%NuxRd5Gsmc}u&?)8z*%U-@0}Lq0gQLg|?K%P7_PGTQj(Pc7cJ5%cNy%7_n_@G&P-iB$Q`yi)V+1J#l2&M>U`w_)oFt~Glr1N%V1;tBr1AL zkmQHfO$3im#iA?Gt` zMxP8Htx6TzjOyjI7Z8b_3R8)L)vpFgEI?(qIH$^pAMI$>M4*`9}>$Je%z(d0*8*+xYlv<;6wFwB+I zW;?rXzb&lp3+O{-HT}}H{VVvG3uE|r>JL;tI2yOfj$}X6X7iwHSNvdK`%&LS-_vuG ztz#^17j)EZcfQef)V?i{iJ#ht_8VIF0_3ElZOC42u+u8oLbVQa{}nn>8+epq(?*Il zu-V37m4aB%?uk@x)7QVUjnH-K9pq@-G?M+$K3x0#Gmvv;`wp#Z?l5G#&|b3a3@Y1x zw)-i+ve$Fjmfk44hjMq1b|r0r&2)cDKUA^A%7;Oa``r4@h z<#fRw6q^sBPG>Fdt*?O|wX6r(z&}qG zY-7aykwzY*--+~pMH_VORv*y#NbxShHOhTzg~fX+Ub&s^voSBR>bWk>?J$pXr=UM? zu&SRXsvm=8s?{!Z5k?bI^HZq&0s>UV(c5yx_d2sxBb%Ar0vCLD8A$RIfpo>6YHS0{Xd z^d|jH-7T;`>9z87kay+hs_`V+??39N>^}XnA`JZwK<+E7Ust^41H|Nd^nn(o?Qiv- zTCLm>+WzS8u;)bB{n!}QS%G#S`-j5*HJ>{gYkH}PdR_#3+hMzK*n1w@AOq=+jiGsE zyqD^BYK7AqVYj$ZmK*U+&r>Lds30}ij=E6%MnA9n8|qSX&RlKZ%>%CakW1A0t9Fh&#EkJwCR-MfQ)wY+jY+n52 zDjIs!Z$;c}ToB|vYO~ZS*d560A>N!iLIR4>iR9IWMRyrOjv#R^URp@t7h>=rmqx-M5s_&ZmpkK^D8`?snM@=S|6+0o`8{%%?_(a6X zoDVnNXBo8->zEmk{iN;jkz29$*p9X2%USWT!AviWZLPM^(I;oqS}h)aZ`FO)Q>+@# zhRr?2j>Z*z)Zmqa)S|c(T`A_gH}e;aRiggmI=WI$+RlzP<71r>@9SdF)?cH3nZ4(Z zI|P5qhM(<+k8NDwfS-+yZX2Z!r0YGeUtiXgzG$a z2y|MBdcKSPG#dFnz*Qj*Z#4VYYHEL@PaRFx{fYjBNFDX3bc_whY?kO&Q(ROI-Nv)kv z?X3;R8vnMx9KpB0EuQiZk7T(naJ#||9;xY*Fizd!cmi{b#%D&&8K+Lpeng#I^G++q zb<5!uAGV>M5m>|2^j6nCJ@l$^D%f(r8lc{39i*@h9~Mn(z^h?v)FB$<#(0JCkow&~ z_#(|`gQ3$kur>Np-B%d1$cALA_!&F~G>y~!r5WicUm1;$@Xd4{A07CUjntQ@A5s76 zT5YtyDQEX|mg5+w=ic>@^CN8iE$TkLhjMqD<$9uZxtqD%Hg2DPBHtL~GsfF)eGj6q ztah4ZU8wUu%6Vygq_!9UJJ5Pa$49F2?|k-RL+=LrmmOh zb?$Ay8gW*R_g6q)8teCML1R`o&t-4Upy2Q^ok!l~+PH z4|F9xlUPp+<`Fa|Tnha#7m2=<<2&u4nCnQhoU8;cr)j({=L(cV^A}U+!O)jvQ~b$? z&Lo${i~cC9CwZG@n|jA+dE+2YjmCebGwQXGh>t^5kh+#+;d^6ao`5mSoF6Nz+U8q} zBOX(^G?!hAIS|$nQ};p_S8vsrZgouk8s)n%PcrsRjQu2^92?2`t2grYxc<)DEkkJy z{xkYc3i=BE6UiwQ*B`sn@d&jq_EG8(^P|&_cdh#paT@J`=T1G(r#Ak15XHdVwMyto zyu=y3)p*a_<+0SB=(j^Ce?-cvn<#HY`8mmyzw4K-5!lE0DJaZwVim8`O?^+TXw+-3 z<6m06$1(nNgsV=^rK&R*I<+D`#^Q>YS2qjw_e@bVW-Pi0V@IQr62Gi z`&_^DQ*Ii|MxyO$oWVX%QZJ4RVSSXl%Nq7w6ULDyYuNpr7;98}T5S0DYEeX2S4!GZ zwP-HJ70(n$=f};H?mvmO%Fvd}?(fa{pG3?rb1L_SU(_PBRZ2PH1;wHe=!dvehdCL> zuh6;<((`oJB+sP|#H&%!ClJ%mNB={-i%!Sf^d!=gE;NQ5!}vftt@;)H0ppM!f6A`( zZpWV3@h_cT#POsK#L-hL-tw*2XYH>H-U!_Mk|b!t5GMjf4ZO( z_4iFTu&=9$?qZ#GShNSaI?x|)Kpkn_LH&>RAXdRvvYqW?!DDW93!Q=}W}P z6r?|jbf*5K`+@5@t>aRib_UXZ#POKwJR5mPp9ROKaKU;_nlovT|*s> zE3Ik)_E}Qa()<%K)wMjML1Dh>S{~6r<^CPAPp#PE>#vxz*3&*B#a)_v{R}&HU{B+? z&5EBKV~^?Y#{8<&W{ci}`qKP-7v{)!jdU!34Q*lix?zLiut8{a)@`OQPtm^k0P3#i zG=J}&)7blUulYN$k8^6p=e~Hd^T#0XPFs-MHne-6>1FJbwAx12y}ZfduKpQ&7vfo~ zZA9H8;4l`}w_YsZxRNYjS=9_$H3K_A+0q4qt9cy|DEG6nyx zguN(k#z0m&@~pF}i93Is>3tb<$rjk{F~o#_L8j67M)~}E_X4i3Csx-t5%SG^w67kD zJfEKGnuNWyz57`Qn$Ox#bxmFcnecbrXE68n$@~gq!R|tq>zb@OZgAA%25kq=~`{dK&)#;W&^ zI}z`&M_5ns#*RJm^Khk@g?LrJy3~~vi9N#z$26_mQ0PW|dRG_b=Uu(MJF+a;W5(WN zSFqP(>o@L~qh0H6gPt_cUprY1J_@|Qk8AG%VEpE;9)47Rm*aA*BQM8$x8Zep9Dg^! zpB?DeQ-JS;ohW}0^q{>v(u4L5C=MPdV|_fA>pAd`NKZZoo%YsTefcSZB<2e;R;#Y{Ob~gLT+<-(WulV?bRWjO7kX;*^Ymj>ZUwa+g^x_ph{u ze%FS%U3zavqrEG9ayw*npuMF|uRy1!lUU_9D>+*njpqXnl?GlGM5+&Hi&2>q+$@zk$9jV_arm!TbR+xfr@`!+h@>=t}v&hQImd zvWAZJv{qjPSs9UWjhG8A@SxB9f!Z490`T;nRE~pP@iyPO_1CgK-kZld#@!O+=oVx4uphel$juYwU8spSa>{nnC_v=vuDB%kV3z?<8R7pg3LMnIjzDnnC@% zHQ1-w6mNBJ9?{oJb@zyRw}z<{+7lt2sSc#KSr1cps)yA7z#z5FyIM8A2_3zw9gRJ< zr6=eNJrBYM&f{_);QnfZ4ZJHHgNfFl;2Ti@ji3U zY%kU$u>ZVq*mr5%vc%CydXVk2S}ktVM}I;-^3fYa`Q@T~`(eNGM+_Xd1$E9s8tM-f zT;App^Nn&)Zz|^*s<+4H#(besw)e+e-ZW8OswmHO%qY+M$bfNFU$X7fPYfKF$Mw6B z%kw^B)a&Ud=8vQDHd1-%R@6&%Qh8s)US=H|xXg1znbSm>o*%i)p#yc9PosWY&<@kh zvW6OUdz$OE1#OdM4?3f6R37>7pSUa=%7R}Gq4~u{xW5@Oum)vP-;9D>V}57!N9vpS zooam<-otKs)ObJTf=^Q%jzB*RLI0$8R;wR3IC>Ug@9pl_L%$uu<%Zxo9cBK3%k0l( z_7o4|pX6At`nXTxeJagUF>jfgR%ls7F+8o@vdDpb1B+6fG)6@RIi^MgDL3&hJdYgh zh^712u*N3dOh@Cx3|?!{I7W5H-ycO>8(KFFc0&BFr!}+QkExHxx^l)Rw1-9Sh~zi{ znb%qpGGb>$VV#0F7f0`*ItEQeU0P_q<4g~BTQEOZj5<;*v|z07Lj0SBx>Ic0w86KA z&|{miehPN)gncl6Cegb{8t;AM8Rl@%HJaw`Qz5fSsfl)^JF3?i#$F7i{}#{oV}4F+ z;55`n>)r>t`=#qzJp$w0Z|8^Gv--GmFrFW`1=an6d4rxq)42YlRgdeP*!$>!4p=MH zoxpfUdww(zqIbJ1P~NtzUhWzj>W=Rz$oID2tUA+9PV%OmoQ!h{&RCpNabAXV8qUkT z7U=wP)`)T2vQ+1r=QzBZ*In&(^l^AS_nhTz9vFvs+sEta)7N_d?`$`Z2=><4tnMx0 zy}ht6t?BAEueP{7DBIItO*--uY|~dwq&*53#(ql-#(lIm{5C~lY;D4R@)c+U%==)Y z5z+l{Z^zhi3-ZICCQ%-p4)K^|?|?kRABNSvfV6n8PTy%&sTP`dIyz^ez4X2`>b(7C z2lhh-U_Lw$=OAzR$rzmDaZbQ_Db9)B`H*o!``56#4UpXlU-%MxOMgB`rI7z&uWeFI zmJKmuc=WIBhVOVVHhaFqyNNf^v%&6qq^5`zhF+-iT4!r zo*lm98+T3F@tas?dOy8Z2!_n>;ll@EXUq}sEdaf5r#Z;*=nqh@8GY1G1t+^wwp-7> z-*W`}VdzJ#*pD*Pufq3f5!k!Ndn@?X@aWf(X9INnl>3FQ|7G3vFBl~1f0?CmEb702 z>rZty^)%)gA#Sq8rr*1g(&4|f-djU$3)!!tO*~i=j-@O1L9WL$&u**xIh0?C=j0RP z@$3NVMSg*}4nG=J_Yulk0vRu`eu~!PXopg8G?#9{b82_250m1s2S$Cj7=BN_L-D~L zf>;*hG2U^#pc?PO^G#bZHsczB^JkQ)&gz)b2;bN}pnJ{S3cJ1s-DnN0`WoK|dbV1Y zJ%Dm)Z*UdT+WTWK7x~F|n+MRh8E5Num;w&#u8FJQOU(tY>9%U^PgzlB+Yiufe=lrCZ8jWZ&2i*wg&mFeTf?Kz!!xS8!u1*0 zWhb6ne%nsx9dm-z4OqvMH`Po9g~6KW<~!7T4ARki%?7mL638e26Y{GNx9a21W!+SN-CxJEZh^*! z7a-ue`;l*-$Ybl}_N?yhZSCb6vN1jg_n6xb!ae4f zn|CX>=N{Eq^Uy4e5jKn$voTHtVVnqse-De^b&2Y%p*@N$dROF1ipO_IR4%;(BfV~h z4gQ<;jYF9`{)g&IW%u!`Z}@+?z7Z&M`TtOTsqA;~{fTVf13&(E+t-3Jr~ePtm&$&~ zufEUxx9e-gT7{uf7Xp#US zC&w#_RrH>lzCXdc!RR*-PhA|deDCf>+;aV=xE1t&7Pn~LXT~myTNJ;Bq25$3`8It& zPO*#hyP${qn{}r-ya%y)BVuz6V)GW*V9PbCvo~z832}Ne;`Gyq(_0XypPB8t!rBY5 z8hdCfvDWz+?*ZmvACtAiZ|ry87X}^x@avb%b@#`SU!rV-cbGBx43?8b-Ihs#V`tOj|OmpBhPU9M)(-a}i z0X#Rxi9X&|E9|`*%2cDd9;n%5 zantvfR2Q>e@0d2nyJFhgd+!H~uUK=M<8M0jSNt8rbl3#*9p85hR{_&^44BXQzGI-g z=DU7cXAVO5Cl57XbI(Hx2L_d;HhOY&w!j2?IXH>&XvWd0uZjm0Ttrz4H2 z7ve(F>UYgLg}Og8^cX_lSjlt;p}!6C^%}xH%>8#xNAGaYT&G*GPFHo;z^=)7r?RrQ zvHm1`gif?|F2p!Yy2!biX{*q+_~xmrYa5McmKaN8W)Eo%zccI&zoOq+R5|euGg85x zXftX@{??<4-Z@Hp(l;7u=Kcrv6zH7?y>q2^t5JB@M|#lvPAk5RSvds$0K0F%w-(s1 zPriYtg6d+9yyt}d(b?G45x>?`d32;=ldypMCC|2CkHVr@UkbC1)T z`NZ9OSNC>wHXq-4#M$zXBN58>PG;8OBdf9QioD(V1peFT@1)}!Nchq(*3+0?gYhr}>FIkK^j-a%9`oHDm507sPjPsRjciN5eLUNJ4{Qcq zA8to~SEvic2Nf2Stgy!r9*Xx`6E}=B$5C~b)!^|hwdCo&Bdt@%);dm@?;dIYYbIp$ zQk6*?PR>}AS#{;|J@~$Arn2?lpn5IH$32bHsn~;|xf0&XMeo4aP4gQ1){EwM_~y56 z1pEqfnmRkaUBiA(9qrA~``~5Y>%DF4TZLke2k(Xv>spO5Qt#EY(mP(vdGI|@61Cr_ zR{S;u^B;<5#`@KGr%CmtZ-!2w{)wFbPL%luY(aYv6xU3<9SCN-IgH;5P(Iq9k^Vv7 z^iGEyirIybv(+aj)SN?AY1^w&?lJfc6t&BFkU{UaLy%U=`WUiEzcjqF9*ghe=o>k- zUy2=hs7=h6N#738Jea;6p!(9DFxe2lDXOAx2grWQaDNQ@M_~s>xu>oUa}UKk!#3=f zn=*i_Z6*H{jJV;7j%6M_t@JVzaL|hg1uU;s<8>D7x`$foc7g#F>Zy{Sy#D| z{(;y|wzK!{m~s^9kS7U!Mt{$E4BrpxJda5Q>pgPto0f@E)y-_F1jg%A|Bpj5`$VoCEzy z2JHpXd_&_JiepF{nr?h5acJm+)Wp)>j32dFzeYaP1bCLK$${xM_e zeqSs#ep605Qe745dI9PQA02YktS9uI1zm~z0o=2=$9$nht(~pkfstQS`sOQKA7dY{ z1?wR>8rNQz*-<5-`=l0IcO2Z zdHT-cG1$QTZv2z?@m}U$*IxQY<5Sf2DfD0RT}OYl%?93x@wPgJa_If#ete&V_+A$a zynm%@FU5^Ew3)|habF59!-{=Nd=s=4e))2wwUgeR!v0AgBj(dPeRB-_U+leme3aF- z|Nl%TT&j41go~HT41rdnRjZ)ljY$F`7Q6(!w6vlqa%xYR zBv55)Pw5dX>E#fOwYF$QY;CLUw?WiiY)>h8Au8qj{_M-llOfple1E_5&-eH$P4YZz zuf6tKYp=c5+H3Deacq|7e(t@zGFzWXF!!nTOm?3cJ_bMJVDj>GnKAOGc)lKZU$;H#wOMD+0;YyJ-X3rkM0<$D=h`O_-CoXy z@6@wRV@zpsRDL{Ox3k34>euKR_D;Op360XT06fpKCsW!+e2^WNPV5@pM}O$aY4n$% zz5Ed0m)b>Xi)-IN`j9qu{4F136VEo& ze(mX;Cq=(rE8+V<=(?2gO7C0FE76a{1;!`u0!DkH8s9tMt8?&~b1i%gXF;t#4L0!K z0p8aMdwZywg>+$Vl;w4%PB!rg_Ukk!WEbB+=P$xWTDuwKWeYi+v%8zI?4eJyPO;x; z){ZBq^i;r;dxBu|<@jw)qiTEhVrT8SxXS!t(3w7PVEkYP z!~Pr%-g|4$2)(>zLuEgbsfl4r%Ia_n;}H$dBEP$6Cpr>GnA!{DH%pi0M@Q{rBtN)D zHnnA9(WXkLh|j-t20vwg`trX^?Q0e7)lO|RhsE!Yi7srO^tjZ2Z|B@q6uw{+v9-$Y zl=i9dHDL#4y748c({ASXC(yI@8r!FIMEx56INdl8Tz>pNMMrertIvLO7la>t7kbsd z{Pc0Y|JM)9Y~c3?=s!;X{vMi-@~oX_qD5!L#EVk>8%{r}0lOU7FFV-wl&SYwBl(&$ zf1EQKiJdGTG4&SJ$Q0r^J-< zd2ZW}$Y1H6%TK`vs>kQu>nY&V*@ z+#Q69_tcj?1805P_P$`L4gPK)u*M$Zd&-~IS%_z1MTut%ic#!XHx(fY^N?wpn&65v3d@2}m9jCCe~`MfcprfyUq-@1Za$h}_~if*QeE2JMk0S+2iXDjl5 z!A>dn-mNns*cLsr^V|B`8mF}nW$aA{8_SrJ$|-bROumvjL5-ccwBriKD%+qu#@*2O zbMRU_SBTfFtf`Bun`U`-5pC3;+J6vu(?9M17wvz@-)5cokmarHfYB9vBlS6fwt`n) z8DA`4%eqj2>FyU+O}BlV8tG$%>*GhX(YSn@IAbxmgW`CSwEKaB^Ks8>#>bB3-(U@) zCM7R|eJcQuHw z;z^BPW8t;p6}8j5QvAwGcpe)5<@$$p+~*+JXK1JXzvt)_4TqwWa|^xkbPfpX-fQt} zHF$>RH#j_Lhev0+I+^dpRuh_aW*j@JTAtC^Sa>$`7}n_k-?fGsN4?YYX4Yt8(pLlQ zKuvk@jP8X{U1Kf2uJL2}J>Yr4zf~0hcA9!g&s7pc4mpU+Q`G`E)~UOdWRhg+i3UG-x*!14i4k|cU8><<@uVa ziJ!<=67(xx$j-gT)vfC};|>jvc;4`d^x5cQbq)Ly-v0n!j{6*9*rVAOH=JEFI=DQ^ z+BFe>f6^V(Z+XPz5@VsqcPKdIG;`#F#M#2R0i2zj#q|24`x}znmsa-yI~TL?B8BXg zkJEVVnZ+1q7HbP<7Lz4sSl%RFir$xUQ|a(sq~RLywJyf;A&hu*(L2i{R=-O+og;__Uu5*xMx&ns_}ST zl#b!oZ&Xh5JDkyJM;GN6C}+7Tqy1s(RzKHYc4UV?vn0Og%o6MVcAZ;l!e4rdJ|50g zg=Kg6yOk>QPw=ex_ptn9{GC2M)v@&?>W!(blAPbhz62Gv&MHk@pxkk;E*Aco^+-JL zK7#!FNh#&#>!Ewcw-~3MCyt_yk2>0`nOB@KTGNqspRcH7eWNF6r)`4+PLR*ECt@KT2 z-lQ+e9kiU4t-ph@M#u6Fw0HI#*erf9mikV{buNAB>@mEo*4<&$aTV?jd*nSf7x+5< zm-4c46dL;eH0sp%a_&9W0_@fQ{ zHNfW_{Jp@x06ckJYW<43OMPm-GCdM<3w({FG9@Pr2rv0`^d-0|?cShth z<2;Td!t-hS5I&Q5lY8^Prq_{?&xb4pPg8&96u9IwWKJ*Qu|9*P;bd_9o9VxN!+P3l zU03dOHve=6!RIz*yKA{yE>^Z`$30)>(i_m z$DWbICpD|&fF>finO|5-fOIzBPz_v(YZkF(|{xtnA*?Ir2pYoBWv8aP{MV7rg=JBP2KN%S4~c&o_EC!}7rR`OZhuoJuB!gkNAB9-sI1 z)h?h9!H-8CS@gxfQ-1?_mS8Vi&%EAeu5db^s0=%4yYi3Pe|Y49H2nnJA^5qM^|!5S zcmVn79roYMUmrKhPd!5VsXImSOR?j+SEjn@MSH(1^2T4)S+UA$`F1{T3+88c90$%c zdJ=H|Xq>-w=cvY|m{U2D6)$sVEIH$`z-jFAxst)qaMvjDa%jE_dEdZ$`40(T^!~Zo z<)PtI%EJxIcR0AsHFmWeq{z~Z8 zcw|$=S3TF?%Ad;DkN&N8+SAc{{jOYEGyF09?Ho!TdJ1=Cq;6w*Rv8%eTYw9O7vVYw9t2mYgK@I)a)E|Te}TUm&tB+LeUtjP_e8mSf{=ma z`8vLgcx=vI11cLV9p!#egcRI`UpXdg=Pqt@9*cusC>+$Q9&J&4vw ztzN1;*sMEt_QJo{@i7bcEb)6QYq!p?=5__)5@ZuFr*x-KE9dxhr;zRw(p^ILANQfL ziCa_mt*@9iw*7GK3Nm*IHE%;7%>8KWcU5;Ye%W8GQ1=R*%lJjp3&=$@>u=`d zUnjBm8W5x2&NzR_UA3R2eyWPS?3F?P1+~~f*~*>q{?*=wqwjgy?Y;4G_Oi?H_4;w{ zy)r)leE9$PCYAYt{GLIlbO&Pt_kSe0+Xxxl`L~ze-G0O4A${3(J(s-v{G0@Ty5YTI zlva5CBsM$EeE$eP6Ylv=Y{KKRjws9BpdJzdmyChTKSM9;>K%r0%nw z`p0$DKdx8(BQ=lrQ}Z|*p0u51>m}M+f-rw-8TCK$4bvD;>siIR#cW{r!S=7%Vf1vn z_{Kcgzv9~)G>45WoQ^AIz5zW;ezmrzwJGR%gg#ntw0mr6&STnr49`5C`@yB3^MQMe_ursx2lG<75ar;^ zT)wd=qrlbO+SvVRBPYe-sx1|stH9F=d?)fqa91e$y4mJ|hUbM>IAx1``yaie+FIL} z;w06MNH*gAWzcEm|L>|tF*Os?0nJ1FmNm9cf;bpGP^?kLTy9%z^-Omq_umT4WOD4^whocJl6M&XUzX{^|&b)~jUAD5cZ=cwz?=Omeros>#rs)@w{!o2v$)d;pIB$L zD$M@WM|PQcj!hYs-^=gc!IthU3l2X}7JKz#&TmZ&j$Kfet$FK3&TgsSx2z2FYmlGv z!;kX!d!SSLlESYDxWjWf{Rx&hV90;+UUd60$Y0WP`O@Ntcq4x7rf>28K;1EgbBhne zM|eB2rtFBa4dp}Q8@^r@@45f|@t(ro{nIud(FSZP_{w=T!F}h|246j|F8J5;CIw$R zZ*s8qybFWu`BTcX=O>9t+NobrzwHxEZA`kujm{7W~xvqcVEXt@Xxs0QZ9xHii zjzncrH~e?nhp07r5jawO*=M{*uApf*GCr)>6pj8S%6=SMl) zxI=iK1E=O`7N2o3a1Zmh{Ej+!Ngvf|>T8qmQ+g{NnmO`x@eE%#c?88y(sOuMh@XUe zCH<}Y(uEty8PvCZZpwya5PV2=(egpp4KLqdb2G#8InKBLUi`D=8&c9d48AtK7lSM?$#P6ozQ$}t11gMCs-5e{k({tqudG3 z9QvNS&z*rhF?D^4^R(ib`o4nq;!*CM180e*sYRNwf0fl8^r@fn*F9|y8`@&*xhBo{R*$O-EgKb2+ANPlf1_^Q*vzB3;tKdM?- zd#7b({#G+)&frOBc&Gf0XgEP~H0zlC4uQ8@${lEJv^=HO0)Opr@CVqJrrO}!N%9EL zxx>bL#q-9@V`4Y$LEC5Ofw3F2cC#l5tu5Hb1ai&%jCkGivldSB;vu1WH2lKMj0hd&1nKoQs=j?k)lWM19Ddqob`1Oj-;t>N)t>f&yY}Qqe|pDBL2l2* zpI^6UefD!Zs;BPQGbono`ON4)>}mY?H}|}>EB5cnV9>V7=WW|F=GZ^%89g}MGvy7f zcLS=cZmqlT)7;}ako?NX1b0ciGyR?nHLweh4#EYi55KSZwu|p;ylpafxz_NFgS)m> z@4oHX_3vDK&&9FWw$^v<+!h-cbd2HIU9rQqtq)?`25c-FJEpSVoxkIr*v7&Aw$0<7 z*w$Mo-N)Std>>&lIBwsDFW52qPq#kX4E*|mG1Z=Q`1@8eJS*dl-aiq~PcP>lOmu0| zt@q#e$Op1JR&$^1=uej)SB}43!?WZ&^PY`;I(8g>m)W~aZt6E!Hk{aD@JjOfd2D43 zdXfxAhwM92x7bzDKYFrnVUU^mp`Wo`{}R$VK$5qh`_4)A(!+ z^t6(P6iuR2bcm*(;D?L`@9)r4@DEYV?9QDOz9~VDq4n?! z$0|mYec!_v#FH%kj$#?zZKd|R(ap2^9$&tS{Y0yOmZlBj3vK0d32)Nq{}1r*Y`(5^ z%qCXBH-Nq!%h3H+Xw0>+K2`-C6QT2R@ZtMi-&~%pzZN@`MrYuUSzA=o(1w4};9`x2 zl6uzG!zYc^xE6w=mG)`ItG2q&?CoyN%U*T2^1JYLnRnXBOJDU@p#|E`%z;C?@-zO{d*y9Q*1G%z-f0Xuvkw&OwSL9U zmu^?&&jW_{qWx?6z{2jKov#cl6c&G2jcZ{W}dT_|XmG zgYnUIRJiByTx)ac0QZ;j`~B#F{B*^39+o&_dz$*dqshH$yuRO=z~2YFheQ9j%YRK# z^ViShX`TOw{mhY1RX$5|D8W9e&XSERPhI~2`R&=@`7wH{KGU2}DLdNwm>*>fqOF2+ zJ#MdkJM(pWmG%=gzdA24IpQ~HFZ;-N3N(wx*}zJ#YP+c)aJW>*qWve)DqO1Ns5{x= z*{rf zT*W5w`bBu7HAlQmcTWr-|E=Kzbu8q=`=_qIgZcH4(+Q1p4?f<6h}=@xv>*|L>*#|v zLj?Oe^zeLpK&{=^-1FT$lRUkhQ!JyL(nI)^x{uDtiW-kA!LRi%%e!ce#}?>{!9VFy zBRJFKk9PL6x_vwHOaLEHFRS<|&U(&WcGOK*G@7@ z2PX!DM+QR|+<}h3OJdn!`P$BMQ)~HW-pdAdq8nOA71t8ine&F;ralPWIs>=|oK~lY zR~z1RmW3aKUeP^+cfP&nhvIAHX9cVFK{g&<4!nnZ#qx4@kc)*2phGaim2$YWMtzpP zWgir$`*^E>j~bx!BluLm$-9lFFY5QN1m{ycyVcSED0s}gGIEWDr?|GN0WssDhHGks zjuRXm-{-xRueG0Pb<@c*T^Q?=f-{zZBn!U)S<`C--2A*MX@&C?|G&EFRG=FB`~{wn_@dZ25F4;s!xc(Zm;t6;SX$c zu$xL?>O2fK;!$8^&qPnYH{9+jPjh$wa0_tJv91O7FU-3$q4{_4_!W5N@BNI>v%8)3 z#oDcb;r)&t(XDac4Gy2XOd$(zo8%8D{+CT)?a7l!@au$;)lT}PoxKAMTv0!jahUkK zfD7B!jSnH4@;mUb7Voz6q#~c_Bu73lyCVrc=~=f~KYQDC+2^;RQ@Z>R<9!P~5nWl@ z;1im&36i(dk=?3kwX(AJPO1HT$7A_U_$s>t`t|FY`Fm5z-**-NF8!`k6YcSdb{Bj@ z>YG^mcgUW8;ZgHF!wKZr%shCw3Lh`)YeJUpBXeJScIwCAWg zyVtFq%y)9mA!>uKf`7KoQD+c`d}g|=2? zqq#N)K8j!6*wap)u{O#t&bM6Tc$Yge)O^r9lsrw&fLf#m=hL;iS`6h3US}Re>yTK} zb_;hV7!<0mz|*t^dHojtt3NNRmPxL^?!&^s>Sp1dyKIE6DoUX2*ms-Lnf${xpq>a|aI`H(RPlYpf=#qSH=@NUuyK{G& z^9d8M%j4L;b^bMHn4*jgpr0Z{1tzA1cc$?76d!ePMi>!g{ zxdhxt)dt5!WU^DOJ;Zcg?`mudFM{n zEy?h^@K)=P`n!sDZ_Yj{)Y#SEOZ1UJ2VaB^(eT?y`x^Et)(+7eQGZ%-FZ}a zIXn>Vb>RLYFu!py%CC!OL2ht(5ilBq_SMhlz2LtL{AYlFrWej(;2aX3=Wu=wm}X#} zDB{c;oWbDmj0nyX9nKcumjkbUzQ?nIZ;(AYe80na3ow%PHy9r|AM#@gJw@M&o6w0~rTJa$ zqU%ieudUoP*W;XRPUb3%&+&2TBHyO$B5un@Y>iz5E5cXcWncK9!>n6=T}ZKJX>VJ$ zUAf0e@ZPL>j4!%RopAIA4qmWh96!Vh58D^qv;QF6BYwT@3!kIAmz?3B$!8HWn|PW0 zg|{pCSotjXjw^oe#@}n^zHyCR@p9VjT}02|*0|h{?0i(7N!mzPhDje-M?B7*%5e`# znQzXW$Q%{SdU!|NIzCHZ>USqP)l3b7c#^~yop?v=`iq#4@*N*0*U$pbp_dxOxY4

7v>wQ>c6wI-G`};ZA@Zx$$2QFmaPOi`G`}Do$<~S|KK|^+ z2gwezbSA_fd#3#Q0Gjb*P0V3+(bl*%ADimvJg{}hMD7yTnTwsoQlhUCJxrj7@;{V6 z(6b7sL*nILWGQ@6TxS5QJf+65f5mrt?JMgX8-B<3H<;&|r&(mHIc)NJ)WS$lvD-6q ztkHbOArEf~btLi+#7o5h;;ZVY<&)RJ5B+?Kp8+zoaau(_37yyl6Q{KunWEN2bANdL zFFc1=jIScU5xH+AAES8l4aQ^?KOpV!!jd;{CKf5nf@zL(mz(pK&E zGS)L0vvNPW8&c2ylix+>O_HzS!wC-<*-!i?GJr5WFA zPQKDhja#&Q44i=*zxGo!{zZykCAYx>;(MWFO#N{S7YfwC2~g-^bn0fxoqX-{O8szmm|gjeaut zXA^_?Cq@rU-BQEf>a&b9Wa_V4<6vzFaz$Gxet3GLcp43!eg!f6*Bl9kFMdAL{ z;p~fk$!I(>n8jJC@+VSnjn+CJ;nDNRRy?)m^&ctdpXXDG9QJRJssB66-{gBI;5Uz3 z`{$c-{^12%7udviUG!ZH?KMBd`Y#)5*P*Tlx}Dzp^;&Yhjyds3r=QUAdGpRa zmtFo{ALGdu^dN>mV{4d1Z@LdzMfDawjPv?7mHDgw`;wRV_%UepeDQ5^&-bl2@XO5a z7<22S2(OQKypHw}rAPgv*S8!IX)~jbHlB{?d>Pw^?t8c>J+H!FPyagF@SW_6Qaf_O z0c$55J#UG>v!JcviQ;&?Eo$;S=q%a>-|s)tzP*pR=i3L&6UpEIyu;bCPVH6P#vbqc z!unAIb4xiRwLoJEZjBu$xd&_3e&qajk*C);&xcEl?JnM_hF%or`)|=>jUy^EPoKUw z$Gg9(PxK%H?8C6`<=WYc;ai!7Z)2Vj^gISH za^%x&DL5#2sN8BpK$V|1&m zeE8{|cYL~@ob!(EKRmGXtMJmte&8Z!8FVNnmd`*=!_{^d>rfOgy#?Lx1J`x5)!bHY zAeH7mKc14ix&As*jD4~?`8d!T~hz*`!>l@=MT_-^(&p)OZ#X%B0eckrJr_u z#`c-HY-9}1@)0%XbRtXDj>&hEPpbPX)UW#2IsIyQslMs&+uiuj0d{m3c2BV<^lwy7 zB}kol=N#lM-vR%!{#U;umM5ofYMJmmc<%N_isvvGzp1ao9&HcOL&Gn_1IbtZsr+ZP z6+fOwpY<#eY?H9aT$+w|)A^-N;Gsz{Jy&c07<+Vl{o(#=@ z0XRSJUYLu+hPmEznur};apcmwVoxZ>C-o*R1cC(CGPwrGDOeD z-wRI3S8>@HLB-+s97cTuG|QKb#(k>i+KFGOTqWaL69m=aHyO)%+G%gc<=?g|uSWi- zzBWfaVwSol^mG8@8;+jlpgkVkyJJ@@w?}%f*kmjHjbz-)RcZ_x-wt>o8bp`m^BewF zo<+aw`LFm}zuWm{>-D5hPmcdEuWR+ih5K;Ftde>gX(XKTgKv0EPsI{HCp4%^IkmGc2LNII9eg?b8o zo9%loWnqo%4F8g;-O-(;pJm|q8S*!J#u-)htLN%F$v3qY^1cTCrpRk1;V*XR z`g{A|d+f)dSNQVci z`j%{kr-l7t!_&kGhK_0Ac#!tX;LGi_c@o~KPn)N%tX7-$YS*UI-tjk)HsY^p(Kpc6 z)H%y-&OaobtSB2%;?}2c%%3}hpoLDn%6?Ja@<;Xpm(F0%c(@z#r-?t zbp`aw?(PS_=*B-c>yBh-zwcPQH}!SCk74&WxO01M5RRjb$zxD|#kW3g7CxD? z+dWzKjAsYozS*&{2ZjV4A9b{`UXP!>G#G5p4IGod5IRM3!2XG7c^gOs6qODAi;v#~{$GLDx)#lMv7T*g z!p=7^-&)Cy%*M`2f7{HyTV=0Y)MoNx_*uJU-^YWm5m@or_a}PeRgOogQDG-|J_p>- zS^v^~*!Pv#_xpcg_b}m0uf9a*W^!WCV(wVBbpHqEymYt6`^{NZ`H2e{KaTLm03THA zo_?3Fw4CSW95jA`)`eBFn$W6r3? zRX){tWEOkrOU!G#hRz4C@MsRiX)j#K?h_5K>lw59)LpQmTYc+q4`-d@H#$Z~Fz!-Lg}{Y}-W#?*~z9CYj2H?)2xR;b(N+P9UFb+rSA+ z;Z>UaR*vsAHzAKs)_&d1sJv_gb)l_-<80#Zc}E^?{7?DWEtx`F@TVy~{|0Yl|I)lW z7hGN+s)jgS2tHuOHVD4_5}wG%y%N4{hIYv}cWh11dzs6R+I3=M7wwXK8$M0H2f(Mb z-tWt)t$42X8O}u{JF5+x+P(&?>UE+p@xik7Clz2`1?ELyB#S6qm*5g77vOdR=V|>2 z*Z(VPLh6cCk{0~$<%4#$Nz?2V2fk?E!{9@`~O$+zTiR5 z-F{-+zN$3$17Sll+@p8ZaY0Aj0QR<1mv1?!`{R~!|wMG5? zz{qTPb=3z8dpO1Qr~9-}^Ii1JKoy)|McYJr^Hx%vtgHQTDEq_>KMGG4UUAvaz4uRgHHt zf)5(s>5NBf+)lg{nuwxYyNIXF*quz@=iq~?4|i)WYmPTPI@Id(BsPLI_k>pH-Lwl z`!pU#&$UNVx^Cp+v9J)Q5|79x=^om*e(wBL?6tS^%vW*sQ-22S|K0p=riU9$W-asdDMyXbc>dLj8qZY|VFCsSt`ncdD9 zghM$3y^HfsaLLXyjht@dz3}(aV`{dIEZrKQwM6;Dp8_Wu|3rV%+e?0K^&$NW`?Hi! z@_p0H-7@BGo8!}K%A3JI;ZZ$i9KDJSuM~yb<>5Nd3C95^ z7{Q8tx+2PRXlsMY(j`HY3hM$Gg8i5C-^eHz2u=fBgGa~@JF;!g!3i74?H+Ki1;48-FgH2$j@JOi_YVz{#N*Gd|td) zS~m1`=!LI^@6Vg~9C^qO8xN1y{*Cie2`*Sw!hBl|wsx7wsGA-^*b=dR8h2$w2eN=Uuy9&h_!1 z**+O#{XNKEzN^n`{sO%?p`f!Bz82H0#X7%Yc*Omap&Pr}Dt{$ zag^*ZIl4mrTRP!u7FPTGH*2fxSPnf!(0t0)MvE&V~ABuLJmz z4bCTiq;USCn15ruLzMsYHbyiXJB5F5Wj`^z#?h-iSq}%yc02AMtl}EmsM?0wJ+f>ONnY!EcMD$+s z!rCjlXQjDu6?!LIm6m^x-uw=I>cqeI^F#Kcz@LcCYL=g3>TB#=m!3HK*N7)w_}+HR zW#KSKzwTN$_7?tsfqqNunr(-lZQ7;cq1H?BMl^^&;%6Pcf3zRZSLG8#{|n&tHtyLX zPYv#NPg779{uOxPR@@N9*WzK)vGDf}<`H1Tw=}%B^P!lNMhDF~U#$rJ@OK?~-?hX{Pq3EKcl~Pi1&r;9=v_@=EjU>3qI7n##+$Ksmxur1=(J}S zjBQ$zcDmW_;S%Mc@{EFYvoVd%}f%5#_FW(D?>< zzmN5xd|T?RBxe8! z&wcL;!nf$_JlYeKv2SrKdEDbT*B`I`x|_3s;&AZj9QP_;BVmfZhp_wrir}~>WF9lwya{or>Un>ZclxFl`*KV zdl*x+|4+HTQ|}xY_fh(42N&|pW4G-5#m_c+$z6q6`X7!wMWb?1%z;g((&qc8rh05H zxHA7Xe0&Hyq8vRw;F=h6R8Fvs+;I|lDnF4tBG|0lpk0rP`;9x#)R~c3{;TMU=B4@x z_8-_R-0DMXhMs$Uldjcie?fjz!q$3=Uy9>P;krB=-%HgvUItEnV<}u+!r{wVxEFv^ zjHB^B$xrH0@{m7m;H0&mXn z2|sr)ie};e4)9+G?yu;&{yqzNS-V@LD?Zwf!P1piA5JFs(bm(i_{`F8Wh;DZfxR1; zhwv9wPbz(uE)PYn`&Vo(tk?1@b^_G3 z%2>A^;Abn}PS$>j@M>Ody(j3#G?$3BbhOw_k>bLR#Zkv{pV60b71=U2i41`z0M8ekb2GudaQ6fZsbH|224`eKnxRH6i!!GLFoNx>vCe+hM&I_*3aR>a`dxGmzgZWv< z-$w>BhlBIigX~?Qnuo=CJ-oW@s^YwErVd>>cI&ThE7TEONZ+5OKhc{}Ufa$eOS8*Y z$DrBgBXs|y#(h7we->j8@bx@T@5hd8K|f39?Jo9z3=iB~`7?cLz6$Pr{C*w%@1pOA zjBeX8Yy4gO?dQWw@#Cwthhy>BI}~4_?YA5r4`|xi|x#I3ip}zp4rG@7n#lZu?HYi{`5G zYTkyFqx+IaY5s*czCJyd^ET*d-}?fbg&1{i$ei97P@8}8GtBG$_;0K~KDYc_+0o%H za%P?2Qm#byHc5^C0>+Z#J7?za)R6E^+NS3Y;Qpszli=Ekr`5*1&sK!oJ|{ok-0{8_ znfY^WjB}%4TIi!TI2xNj*yJgcgQ%yCeAH(cv+uhOyh&>Q)8H1&RKYMew4bQ>i#fDg zxv}6{)wRbq;6QF%g08#RZ=CvS=Z?=+j4PR{H1qmwaOm${JkNTXgQ4N+hGt(!T&#b= zHglc<8!=}}&nkFp`8O0kj(jeFrf589-gOTx?6Xjdw0rkLgP|e2hbowy+>79{B23l%U+*6nuUV!^g;IuE$i_Z&-d>&T9=N9r9s~A@@ zuzRrooA&y<%jGwWZm>W5OP(t~Y4xJW=Q)gd^pfOR(i_QHG#WhY zOFs@C@!!|YiD$`sYo()`623iog)BVotNp_z?M?sOGuNyC!w=nF{^mjbuPf=_)+g)cLmrC@9+|Hre4=ZWU_}3)=R6;*->Y>{A@2mZ5 zCGCm34%Pk*`2@Y^&zj^rCGCkp57qvalJUz&VeEmbU zSH3Mu|6KU@cx7GBh3Idm)8Adk9=iR*CGCAaPy6FJw=YioYQBAS4eOoqy$SQ&*xG#5 z)@nUwecGWq$zNlytAMxhg!#sV;+!?@+*47@{(dr5W!9lD(!cDm)_AoKTs^4tbS3S{ z>Gi62)mXLG_?W*OKR0LgiHiB)nL3YdVhU(wk5F~6%w5yJ;)lx1#@%^lXH|F=bj-$w zRUJ4vvivG|b|&jo8XD601l&tz>czFMLce|HvD>lDK8M{2%xuP^x<^l^_{kUw_c?0M z5B|+Oo_<)L^I7^*zF{vs@N1U%K+Pnv`N7Y-ini2@$MBM|Orqbu;5!XJi+5>MOta@z zzWlVQ6*ReyX3;|b0sCs628~yIJ`f-LOsqU-zOCTz#g~sUgEd{cz*@gXcZUV1)!%IH z4jXCj4pTn9d05b~c^5UtO~K73=6N*;52G)&v1gXKtH<0a#`nOWKWFMjhUOI?ntSKT zE3M}2l$Txn!{|=p9QJkWof!jYqa0c5$l`s3)EI2GYpOfH8Xs)#Afta*k1;g=3}cUR z_oeC?pQ7e+=o7hm-4*dC#kY8{|my;d%gl+!dLcqhgN6Op0D;h#Ge#j zffZj5{M7Qb2AS!c@;|MwM;^@Ak6XTO^!jS}nkx$zM)*2E%2&=pdcL;IakO&JgY@W{ zKKL3OLSKvb;|js6X*?wafRPS||dE6}L1X%9kt^8B^%*nxRo7B=tpJn&xtUivyiymf0s zVDkQ{o=(;vC@kz31Ey>h3(>Nj^z=GFw+-?>rxQ>-RRipFUE`D^pLZx}Y3+w;Xal zADf=#PM80iyUg+TH)_u-lRCoOWj+wzDBfdC_2kz+s64dlNqk=JF?@r=(INPq*LiKv z7xBZc5r>B>p;v7Z;4phB)GRl!-kaKB=3QU+v`6uD5i=TGiNnK%;1V9yN=VM@=>Jt{ zRR8UL^`9LS&WiN!_2Y}Q`wi`^UsUP$K(&@{+(n#aX+I*o%=IVwl?#w8e)N>`rVkhH zk@ah^=G|8A#!E|{jU(qGYKhtEA4N+j%;*KGV|2D-^e(&UKrc=7UP%yGzj<{_d6;t zLI3r-`&#!d8@^u};d_N&pMPRH<4T< zd!j3j2*xRwJc4gNsb>G?0m0=pBU7K}Jn;@{x$9fWBTm?R+q3@r8PAQP_9i}JK^gif z9A($uux}Lkpln;J=h&-n-S<5H*A>iR?OFJ{$z{)(@8-^fq`70|*!*emPjj=AdGyr* z+~FP{_G*%v2i0y1@6Pyf)%OAORcfi3YnO}9hR5t@@XVaWQoc}kOh`YYe9d^ib{-ji z7e0zVUvY5XYnr^_9)4FpUJfr)Tdh1`o%UOezh%ES@(t;(Z+B9vNBO)LnfqhmqrFe@ zINA-%NBi#5o~dwjA06K3`i|0mlf$4=#XTTv^zehXylvl!& zfS*yE8Qw`&%GqvaT{3s9E4Sa&Z^Ii;+VyPz3R{n9@@43UeDP*8kNRy$XdW?lz{~d_ zu!~)>3G!#p;I92SUrdc%8}wW78FK5ZgMNoMAm`P@8=4!+PktENti6sc*vIUkA0B%Z z^Lj$nIpLeNm0|gse#f2j%Q^dW=YBx$?*MurndjJ(o7+Fz0pF=#;T#V0QF-e!bVK?P z+%$LJW6-TL^qQ01e4n@hc)zQ_HEphclJ!M% z!sLyam-c);duixabD6b?F}H7T_tNZHbIDV+mDnfZ{h?un4=6;{b<7?4= zn6Vu;zZrllI$rI&8M!EjjI5Rw^uWeO?9G_GWQ_Vo>B67rC&9kFVyesOk9B#x^h~j; zkCDuHfYTi{3F`Jn^84HZZ58#u|P{{z?rZ9L8USKPm0Rkb~vFCW6ia@!Tl zX&o^>+mAR~i0(`0H+Lys9Gd?s{2B+3K8gPSR{BrP$nDbo`XG!W``O^kP~X(dPr$l66L7S&p-!klapW_vGSAWjYei(akz2W*-*Nyh_ z9ueN|;LKX{qsUs*t5)k*h4sew*HY!>a%8y7;qY}R4>C9RT3Yj}&5HIP_cUfJ!?~`V z)|%gL)B60~4Y7nh_o-aNRh%7DE=Bx(5&kCNYaRBboi#?a6Q86FXH<;tN$;ea@`J9X z?;LBE)>qxVCp-2qHsuQDnC5$H*>T}7n0w?YHp%WBgAA_b-Gr9&!Vy)+g+tu_rTi0f z*2~>lB)$1IeM%;s#1r~^Ib)Taz%^cYg}+m{*%OHcsR<3-@7ciJIj#E-jPtt5o?_nL zgCrbZbvT<@vpxpCpCQ+F#-RSIm{a0;QngN+2X1e1IJ}cRLVqgtliX3gaP1dQsA+hc zpR((kbFu|*!awEr+gRszI-7+*vspPPyHEIdVV_X=G{???mS#sshJ7V3#{woPy8Yhonlb0sAP=n-S?afyqfT{S(h<{LHzVTL|AD!s_^_LM zvs2wicI*ds67)+}y2lUw%@4pn`mt1<%^n%Nwz4=b+{{?eOY^OF-9M@R^|$gr8sneA zXY_-uqiS_ z$Dwb%qtD~$qMgN&2z{M*S4|(Z;|mONxINBYeZggqH~E87n$^EzhJbN++Wfx#+0eBT zx~%=GtoF3Ezz0j4J!iQGxE6G5|BC#Ec4D3A8mSydWv)DA9-2C1#Z%I&a_srKZ`n1| z)Th8R>d1^f{O!luXU*%Z(cE~MHYZ1TndP15XWd1PpTVebt!t;4VT5Q!KKoZZ zVB*)eS=+9`zKLHtx7pB64wsEN;W+es8NRdR=I4(1^smUEhO>CViMjd)&J9vy*Rk5tk3BlZ*o0lIH$MLT z1MuDk1lW?IOh)8C!Z_e@^$z58WWMzi2To^_^=&TRwL-Q zBL5b4GJ*f!&HAc&|6FaqP&Q!|{+0S!N%UG-}bNgWnsPaw!*j59QrGA`vkK7HZ+vZo32M) z|LM`jryWiIvgscOe!bz1zH3^XXS#d0?lIRUJI1t8yr()`=>_(zI43<^=lcxY5y0uZ zs$}lhSLr|te8nEC_VX?61EtA};#*Bo~L;`1tC_(o>F3%ISUAFpFay}k2w6v^{~iZcF9BzDud{5Lfvmsx|T zr6S&#b$oDr4L`Fy^X-(UEy>Rs-@Z_T%>E4aWyjC(8^uel3s*8XzS!U9XMK)WdD_k7 z_LQsD{7IqXJ_jBvV=m=R48H*nv-t3t=dgt`3(1r+m4`t2-m2`sv##Z{Ec>P0iX7j1A||F2OV3PVuJr z`}`Bu?!muJALuE559ptg6Z$7PHGsbv`=va((i%) zt64*&pS8Ku!&oA_qlxvz$3-h>qj|6yTq$Az)iwzx*g{;%Gy1Adk+V=dqA{}exmu`V zo&6iEd!q9^bVWX7-O_@MH1e?e{K0S|d=ehv5H7`7-bR`E0G^Y;;o?ErZmUCPYG%8mQjyHTBV7Q6g9KCHb5)ZPC% z&z@7!y`TDyobGP+-?#7UJCna*toTWrbho%<;e9Xh+++Bdpr! z$0Z}yk5=OO?s%elVk+_KAd@Sq$iK}y$tZyT=8my>#av}G_Y&wXp*P5B?i(rHe9HJT zl79p3s8y`i?-$db){-py7~-#38w|0eK~%US#``s5qEXwJFB^3vZ=m1jF-gFcDhLp;1&eryK&Q$y^% zni%@=o968sOA$ICa&7V-s9`6ojMb+55He`dFf1#&0Dhf_@>p*ty82!mHFk+_Eqd^2RW82j%M$L zdig9mCf`BdZ`8d)`tIoc(AwG0evb;>t8RO<7$4OPE5=9r$Q`{yj%ojj-xmC1=--j8 z>NkP>3w<7+A3>i{{A<9?e5%$NTaC}#!PS<6FCJ;%J0QoQwfZ{l1ygM`a@(Fj2a@P)Kz{RWc=(kWK{$f(AHzGH zp=xDsWPr;r3eTJPz9Xu125R_%BZzNW$N_sBAo-mC_<=9o-mW`Lj?Gt*pVYlmHcwoc zUw`dQ`##Hi=~fQE&gySvevovAeB;INRrz)0$EWc7-SAyDQ2Q3jO-RqWPLYpbbL(Za zdjXz#{&#u4rq3$yl?y?>w9@f4cVp1;LH@qU;aztl-^K=CEBkelgZlY1@BO}nYVKpq zi&clGIwXTx@UUik^Rdl*oBJNd5!H#qY4;ssE$M~!q$I~|)#M4?e5dIv?(ecej~tuI z#*UL*be@K>71mOd57$`)XFo*GPaMumU3<}R4E;(DI%^{tN>|PkPwDSKU5t1jwqbuA zxqkeSfLycolFivz<&}A+dG;uG^3Nfkw{}{;>-7y6;mA~H>(46Z3@&y>YgvN)uyR;t z?Yl#EhI~HsXkRKjXy2J>Y7YZ)gRz15nJzatm9rlc(XTV0eJAp5_jmTM`1%IP-urJe@QJkM$R|*&tb5OD(#s}jUyE<+O<*6Sr}b&O?&JoyA-*?Cdssst1TN{m>P__XX?{vx`aMRwZRUB;4^&9i{9twwIqbwBgUyl6-x9RRMpsK1r^(gWJbXf}ca2s=H-G zBYIZsqca}9*Yy#FudD0>HW3ep6zH*MVtjlZXn&rZ8@itt|9osMKVv*~%gV!7Jw|Qa zg=J^QiHY@G&*Xo+m*@DkYs%20O!}x$cizT;(Yvo>J0zRcjEy;D_WQ>(4?C%I)AJpA zu9%wmOfjsFfqRdUobxSqJTJok1bquuZ9IJY&ArCE-OKbj2k+t3=fBXW$KP_RlRq`2 z%AM$a7=6FO+qizA@~^u8;bm->v1|DJ*tIpf$7eM5i*MMPef>6kZMc5uoal)!DzcsT6BY6qu0(EcuC2Q~$qX&NdAUGmeI+p2xr02P}Gi zwSz6zW#l*beFncK5vQ+_{b)gcrY_@K#XTmi4YL1dBCEa3jRtai+V@CP*Dc-GndqBF z9TqAFx%|;1!!wc5b1%Lpj2*6f2jcml!hZMUN_ZtdPH~_4J`%j}`ylu8j+~X*3k~oY zG|?Z9q-g|*1XiR@>%J*naOuh2%mqWpRJ8FsHj-xuSK1Lhj@c)W6?s?#$u{kMuS zeXCm^-(y#94E z2W7`(TO|+4S#sO+Xy3Lf4ewVl?=}BF44o-wt4>#+SCo<;Ubsb*jM~92t zxV@f5;I?Nx9QruN!M%L2o!VXH;izlbH7Nj)`@{ieO>g!Ca`FAMuT zPPHKHeVV)lKOrD{Y&>I-WrUp#otS&`cc!Bn!xwY7c2y4z&qbg{#MlwiZYa3jLfkKN%%0z zjnDgW@@HE1+4}Xm&O@}repi;X^YcW!AWyz&G;4euKiI~u-0Knq6IyPKAJ@VfwwIVt z`uIKgl4ZPt=dKd+Bo$^(y%#=ft-lj~bOIld;qIy*IazlP4)>y;icPZcGmSi6pxs*V zDK1m&DLwJFW!Dc~JI-zk-%GR=oZ3T+yDz_Zu2=nkL;V}SqB5TVUUst6&Ruh_u$7Z- zlL5c*3ZLuK&P90qW7?MX5jff}qz`OVKJ$2y*D+*cWJC_~yN=G%_M&eAb|wBqp+7q} zv$pP5bbQ0Lm0c73EZ{vqx@c$nuyvn;U+duYE!o1oRL&`LeTBU)eHW!|^{#LIMOxCGi0jG?c2zw5^i4!xc0;QE^PagVRNBK)X>)4VUWH&q^v zd|SPPQw&xLSGUdccvyImgZmM1k36Craj@wgVu|8Bb#<-ivHTjXGZo}=H79<^JUNsf zul+>PQL#wA6Z~`6tcPaG01JpD2cHY~@*& z+K*e{g?tayZEV3u@;;UPk5_p|J)HL2j1TiW@X3dHjJ372X5(65z3-Dl=d2&2nvKf* zXVLpk{22W{N^3DTMZe!Kd(N2Kca+Yd1=^F=! z;XC&mJmuONea2sEa6UP4cd@32^A_G`lHMri_%^iF(uaJam*OLQ4XcUG+P8cBBf`xN zzt-te|EA8b$Z2xMfzwJ-=r62DNzwB`Nbt2On zu3+o#E`e+54QF*TeSE0+&FY)|M%973lb#{3ZN7O?QV+4_4RV-1&qE%R`T}x0^ZFS* z>O^mRPIMIcQQ4q%*l2TRroDeyhR=q+)o;mI|NBb#u=PVhM;Un{J(r$c&a-HYFF$?L zrPMB-6!hHE$va@>^J&c${BoWNW*6<`AKM(ibZvP4le|-X&?sQ-KGlePTTA~5tASM> zNbTQ)jCB@7F=y#I75|x&b9$s%r_O;-f>Yi}`h?zAx6PYe(5dFgdBiuUrGt+qcd8m_ z^m7gNV0-(s4rizR-h*(B2JfGcxrZ^jn?54!`ni?AV2*Qri&oW3OMg6km&cj2b$Eh5 z%E3Pu!Q1kj>pOc~s5>+z7s)FNpR7LS3;Rmr+_?_zaYk*$8SGUmzjot`T0`?^GWNsh=ebdd@HO}^8!DTocP4hCuGz;< zR|DtmM>lcM=3cgFX#NS_yLf6NI5zejPYumq2%P)`*|c0Co>DGwcz%Ih>rJd$?&2!t z>Pq9PW*1jQ?St%;UJhUSr9_x?y9AGBjtTxH~6h^x|xF!L)b-*v8? z`j5s{&(l76Q2UDNy60V=sicXkBJ^zUD!dyN-VI*i6ivdbIxxjll3yI&>|!62J9hE) z?7fv2yL{S+x2=A8KQh2JX$_QL{T}EET>Nwa^x^CEikYNGRlhFSEi-1L&zD-1)qj`2pX2}xpjA=S&%$#!) z%NH$NoLP_%aN@e<%NlP=%x##zuyM}GiKUHA%NH&?e+<7iEnC{Sbm{qHnoc>)w==B- z8hLw|=xYGeg$tKAU8_I%&-K&PxGb^k+NF&Rb7n1SxUTWU0@&C>hfC(qp*OmSwwr0% zM8U6LK7ZNCi2|_4zpP=|@}|Um{w{+O_V?2xzn@`#FA!kK4dyLeJiB4xtc8s?G;(aa(9b-^9eq{kBf?&b z>g2@orp5%cFK>*n!SgSQgV$!D3l}!dYd`@1Nc+Ng{TL(t7sTsPNd}FJ8fGtSoP0y$ zqGeHeXgmyY(XvF-jq{gfuKll$CxZT;8cuM@;zf-S{z`8%ix(|hx_Dt?&f*&vC6+Bt z7#W;@RZaaRQ!km~$7FsQ-9wL0PN?0&#Z66WJZJv0`HL5wlR(sqm(E(W{JQhUEI&DM zW5d!#g+Jymn!C90$DGF5%jdbjyt;Zh9h#o0mt1)1e*-aYK4ca)HY_!x^xU!X`fs4X zsEg-^=6#eWPHPHMnp|IhY5ji(QI0QBT9z*J;<<@A4L27?)~9WW%wM%*EooS2TYKwt$Y#Nq)5gvDz?jockNn>``qTX1 zDPvAMW8ob8|5gnPdbr5&crI4nkm);mHy)eu)y$}ye95F)Gp1cSW!7c&Q!kl0Yv!f1 zX3U&)=@q2Q`0s#yYD1ZmOQHttm5Hrp5LHs^c{{>)SOEfH+V`xO!u#RPt zIg2fso0yA#P%wKQZy)p-6RYJgLS129T)d=lQKIQumZgHK_~}=WeRTZt91>Q`6LXg? zzOK)6;4N?772LxwX;{kY>l!!@hylzpeh;(C5=RFPe}=`QG0}wY1VgbxW4rEFt=_^ffUe^fcboaNYbxjYdoGITy|2SK(R2Om4_%JuP55h$|;MK|-N0 z+W)4B4VNrlHh=ETS2f^RG%mHz9i1XA>OUR~eh5d>uy@uXgIE~HG@Ux8$^10_Z)Wk5 zn-fbK;9z9nhIM_A?VQoR`j%H}2IJ^$m@@|t!i=MUxwkHzocKr3d3`hb>=D8$mWzLY z;MvQU8SiNRqUC!|r#m!asere;p`AvkCiMflHE-;At_E)^x zc)Ue^h!yfpi>lm6nJ(_3SwJs*nj#A&n9zvxoR%UK4Qf-_!57Z@*R_#(|~Shft^bql?b^705O#PDwL za~f}$pJ|**x}u~}asEmVeLqg_#FWc!USgeeFqq~C$=kIvs}}PPpBpEv5Q#G&L#2)U zP2=8lM%$U+4{C9!Av9%zW)jv4}s72`(Hum z=qp)w3l3XBeh!Wo|HvLlp;;-tWaSeJl@M?|vPl}Je3FN6>G`&J`7)DGbRY)_q7=?7 zf=d=RnOuEh$zrpq<$D9UgLmlArRz-mvS@Ll>E`QZFJ5@!`MvUKeZWO1DdK}Ki!5Uz zuM2L>p^_v;{c!thTD2n%LeVd{ri2d%DO#y~92pia;tce)SEu>o`RMYuhK)E^3r)@= zqR@T78IV47PtOaMV7U|QDNH=|cZvwqPi>j#CsshuPradGso9srr#)3L#SKc|L|~=# zUb~nHI%ob;g000Q$BP7)&|3uQgW$r;BGrd7XTE=vC-rRdY!>s32YK)`^6jFmbvu#r z^DMvZ4yqZZhwq~wW%bmPmrwzX>)tZlrN=xTlTS6mEg{y4$HNuoc`rR8wd)*r-};c6 z?fA~T>V=_Uu95-Et|j2~)9qvj;dd!4C9Zw1#z@uVANy)ifG1zktI^+~)U`8yw`!{)OKR@sf{AI4Ic zhf)45B;)gUP2U(LSNX3O9X1@F1g6em7jldWJ7wG!XSAY! zJAhM{UAF|AmRTTR{_ADeiZvIdXo9Hu>jTVpe@J{K-InZwB??<$k$o^jUojCh{}!JI zN<@|-Zy%72Tj>7o{+MYp^^z-VrcIp`OqyJK#T0td_O?27VhfC0$lpo>==se0nmTQT zU3wYYO#C!EWc<{|8b5-mlO|7^B60oqWlZsh7C@v`JT9 zcA1{nUvbIIsnaJ1bp|_dlj`(1m{fnI`8j=3-Gx`on0o0Y=C2^&?`fCT)y(uy`1`WS z^>vdkb58_6W4eK>yL9^WXgmIxI`h(ciwL;d8CP8-qlPs-38Me+{{UK02ey`PM0wF zZ+fl!>7GxW4w3vfRY>@6N&#Mct_kXDrcc&|OLia9%204KCr>sbnL5L^n=#dmOU&lK zNi*!j-+V^{A&fLBCvMaf7^7N1QbJt$ZopaBeIdkUBnKS3kT?KoCE#iP>)(BSi+aibB zgjmn}v~|*9I~kDiQxpo(2qAVzS@#dnldP6jJPx*Whl0igI{MRwO%ona>*Ok0R8QtC z;k)X;(z9jXxS|_YdO?t3>nKs;Kw#$l7wZe+$TWea;k4KpPt)pgDphOI_o;dmoX*ol zqCMithE8Ki>1`!~trXyH4fX|5kMnZX^gxut<-?(F!$w-kMYqsL^P>ccpAl~&cV~KN zV^X=1zmm;h4kC9)7NzJa4S7g_VNqxi1u=kf6GRma`lVRhn1E3HNgh8P5LWV-YG^`yehUgeK60o`w4Dy<~yTTy#N;m9=>IXf?d=WIHQQ+dloffRDIYAg4Jf=bi$b0TR=MO9?K*%2H2Mpd%TY37$9A+?!KrCSuG z5gi&Lm$Fi^*_<@mkQ-wf7nv!@Aivmwx7@H7%m@4oq=Tz zG=eYGcPPSvRD(K44s~+m0ajNB5q&=0|hQp zrE1UgTmwXYmH2QOT1Gxt(SszQNHT2Gb-E_J?9tfSRcTo4(>DdVq&Ss!3QWw-B#m= z6Trsv0en8f5)+W%+&&V*M}++$Z|ORG)YTbCA1;vA$1P-{=VT+Q3ab|DpAz`~`|tl4 zatJN;jn&V$VAEbxxu>P-05-MH?mw^>Cwjdtps>gh#8KI_9I|1Pp5%cMA+K6aOhTsl z)nQ7BzRAOmu3#T>c2I*-$W8RN(gauz;m-p8xL6IX?Z13G(kn|%1l$k$E>XDWa>-h3{G!=_bRWho>e!Sjfa0D#BetykJFNMrhOYeus zhhi{pCYG{O8c$KD9e-n;L7e7*^Wa9Q3oK8XPM$EaP^o-M3!TwcR#|tj(bq!BFFK3P ze1!Y9o-fZ{YJIWCpbX7WuZlBhDbxz%A^XBkFCPvdQz=lWSo2pG*YqR^3|;|-A~w)2p2`T(SWU>G_aM6GV`O`bQnWCT9KwL zRs0^}f?yw>^+6JPoZAwJ9x=uRJTI|=n&%~y7d^6vULv)u=`4?-Li!o6L9d319t}!U zF)$>@LdW6K{!~~3y-P)m#nBVmLZt5-@m-Vk0Z69!@6tAsI~CAP3ten^J5!pi;Fs)pj})AxD}BYzX6?%!hA_Ht~^a&X)X zLFt}u#8#`(^(f^?zk-m>Zm4~tvW%L}2QO~Xc?=O&s=+Wd%JR{BFnqKCaiZd?YUo{8 zqpkBN${5Af*+`jxG)QkvzjwZ54~m-Q%G?6;mvYF(7P=7A)vyv|lpConinP<~1yLFJ z$#q}GLO2sHFH}TprBl*nTj9f@!+1f2m~4rO^ER1&AHck?Gx={-xs9AHKHPq|2k+bQ zJ>=64NBtOWDT`?yWTX=h*7=2zX#7C1!z)(nMp|wwIrNgP-K0Id`C@p7Lo|Ft`4qiD z0H&9I*4iOn+%xp+hV%yZ8rDG373jzxYlFq&Esd}o3W5L6tf&4+Xlo!E!fMolOZf=F z*k5@s<8YY9%|d4s_eON>N_`phA;#IlSEn~NVA8jxLI!m!d8EEG(;L|<`bs2w#p90< zd<5Kxi-aKq!L={YRw?!bmEswKv@|2$D_3xFL*g6d0*oXzz1BlF%y@d+)5xb$#0)F> zOMF)TZn0t;+I3Ubqdo=`lI>L?3=~B!A0YQ;xbF@GQ1XbEER6D02jdy>jlno~?SNX` zWi{fDN)5c_s*#8Fa8JsCTa~;{F+HprdIg^Ox$y!G-IDl57``NwW!eMC@_VrVm^``5 zUIv*ae?WhHrp8;?A7!phiwv4zdqZf9wL%%NJcDNPS5g}LudM-F1GWZi4g91uAVa%) ze;++{X~Z6c8UH8zKC%e$9`Vu!+&=qrJZzUKRUp+&ak#CkKraFKDv;{*c_Y>g^IeLhr1DoH+V+*p^N-U~kIfaqVE8#t}P&XcQ zlA3NeA$|^yvET zdck6qdQI_*XX)Q*y78}B=={_;sQD(&DH+GYGF~P+K6eFjJu@p&2G7p*4SqrBNVKDR zuj^;!wpQ6`Hv5;4U!|?-zjpCb+P$j!>bQ?GQ~4_!N&sDny1UC(FKZ2Yr2m+hS7j|U zbjrTy_hrQ6{!Ta@!{Zh7`9|}d{4##VL(DB2ag)A`;#$Xt-c2%&pA4iPgFFxPl3+|v z{5i&2DOjdiPyT~=P~YgYk`I~23Q=Vn;;A1z;SC0w`H3fSv|zGuWGwP!;*2j{tr#z5 z&#~Uabv+$*s3hhEU5KAJ?L90`M5Q(46)9Ou@RjZ#i6RV^eeRDPdvQOld~X(Y7+nxT z?DrIRunH|r@!XXC*5mJe9CoU4<+{e0^(UnPVLmr+=f~EFshyj|PcwHal??k&|@1Ta$&_|9-t>bT$hp0t6Mwv3mt<&9TeN+a-^_6@k zMXG@|oa3;*-|WJ#4|me<2nOXEXby~UuJ}F10rDIp9!&}*t^lXKx)J=A+C&XhM&wbXGUj0`m8?ewXR?BGw_X zZVM4jCxFD#twId$&rRsXiYlrY#yibkOa`TN!QPrzvD%?LlVmVND_>nBCf-<|rAOhJ zowjoloH7_FflqBO{3irp+^ica-U=!9gahtkGChWTZt z6O$UsDAUcu$~V?Sr^(b0j|s*&sV;uO+SD&YT!gjnKcz%DqvgW+SPxvf_&g93;h>Vo z%>TWGUd%`MJ#&4MZ$-$ZyoTj~kuHzmqmN&Dq0cmuhLDX_%i|#Gr|YgXl+1KuGFeT) z+COD4Y4#baS?qynGg*%8_)X|nSP;`QUyglXV){-;-qH6Qnfw7)Ua1_2ltDikda2XV zxLfp+kzDar^Mn+_FTWXQlg?Mst;n!g^z~x22ftXreWv&4;FPTz5CJwCtP1-(^c|pu z537X4O3QC>V@iI5om0xtOs_m;7H=jdqHO3fRHj{--d_vf_dWr+AC~$g*NO=!e@4+T zb#AbJS?8h3k6K--f!hjyWw8sz0(mEFdZezh`^DbF7UdcTBds@OC)D8;wqnq)qT_K^ zV`uc{WF}ZM*g`VfhpM--DGR;PE=6*g1mvHW6rHBC^HRRY9tgk{8^)1NpS#|x1z784 z9Qhl#Uaqf?w`iQEz)eQlaZm1tieb@iytLwb?RskQaZPjm#`V#6+pkoylI4`AOnucwXy4g!t? zmcD_00G0tR0}g!adg@RS`t$AUsk4CnfHQzI-?^SDyA^x@y@36Hay=CXoB})r*gOON z(B~LnCt$;0Tu&_kjsq6n4!v9eJ>V4JIN-zr`T;oh=Jiy0G32>;J=G7`f9ZN^8ZdJC zda7sx;9J*I0l>w#!4Gf=@EyYc)%Db-JJ3(SLj^k^e6A1?U1C0Brst`b|GZ@vmt2UQH_k+yprBZ=eJ8{X51Fa0ajtKUUFv4SWFy zQW*dDW1O{AssgaYkxKOgE&}4;cGfC#QmF;NWDUcmCzsZ@l@fTsxNr&4A3$DdjYQmHY($--31g_oVi*MdL2^t3LO zN&uDujsrFTo(1d!qqVlha3-}-QrZL4=}kQm6`x7yCapl3^)Ur{~+4GGnFa_Tm?;!@myANmhC2G|TZ0T>3H0_+DY`amjm4KQ+lDiwGb{oREA z0*(V-2Alv~0-OZQe+2CVZU!uRAeCwWEC-AOwgM&rI{{w<9DES^q5BU(ACIEnrJy7D zFvbVa2e<&(0Qe4I1W?}X3#gCw$k3-%H=m)Ub4SNENcwh&+&_BSPfc=0zz(^&=5is8ixdE2}mjMU&K#nIM zcNOdv&{Yk20LOf=8^F$*R4T6=`mV+N0i3Q+r534t0Q!3puo3Ni5bXgT0vrR3P`L^6 z0A2x{0dyUNo&j3{OFjfTz}_z*%xDPma1pX3m8gLx2 zwGZR72l4`z0QLiJ1{?&e0L*_0c0+i;S-|q2f_+y(uAhco0!{;t1D5?a$W;ybkD*<_ zflq^v5Au8l{R12TOaP7njsso+oB_=LEa(9{0gGxdPXHSLU7rITVCm;U2Ur1k6>tG? zQ!V=Y3dRj^>E|IA;LI<;t^s|&h<5j4JOCR27XjmdlfMMJ11uRrJ9Xd-xCw9)FaS6> zjD8Y+1pXAzmBjefL;e#OU%*npHvr237XX(5%l2X1@F|i&bG&pw%jYv_+G`g0yvig7w&Zprwh)kp>~E>kF4?x9Nq>D*|0{^s zWuMBt!f*=jb$Nzz5{@QR;Nj>}mu_bh>P?{D&n zJPrCeuInCg9GtIKWuhXY;T)>-oI=CS!8bn_c7lD;HOY6v;XYk-W<9F(J3OaWd6H{J z3n~+9ht|1I+;TkMJK`KJtUkFqHVe(pxfd?DFTUx%zTTZS%wTc*q&d<)xUs!na;w8^Y5}HGUkmJyAA}*G4j2Yx`Qw{HMVcm$cecm(T ztedaV`|5W3fGC*vIP2+_rn$iDGI%XvFGu5rF;erUmZys*>^M);3bCIXmo#GjVs2Cp zV;ye23Od?rCM8`N4Ij;4jQ5CRI0qwNg;6+P`Sx2>qgo&4 z5r?2W@0p+T%6ph^;PEc_^;5f~^9&bw z63(H*Zj3WV`0^z;F{Az%|Cr;8Uwuw{B;Rezjlsm89nx)ZLAJG`c80t@wKt zxH90HrHs1%hI71$+@ZXuXU}^{OmxcQtd7x4rJUPRr z3+_u7J#^DOx<&ZjaqvI&RpbFguVkBNfg1;|PWqs+HI=6mjx$5j2Ytbj_$5d6@StPl z7ahl6alC}7qS|#k-AOpTUvSi%_&G=H!oo!^x>0mrtX;T(Hb|EnZ$V6r9E8#(jdvMv zuK>sMrQ&q{nOi)touSo@`iQs_tM<)$600_Wz7_O&zXv~wYx7uQ)#CN>b=(>N-A2$Q zF;?VHG4Jmz)HBYR4 zHr+s_`Hs|4MZTt8Lq3M)6@94BO~92QC-WL1*nWkMa_V&*d2#Dy&<&h`%^2ulGVnbg zW#5DTOY0YY2Q!;Jk@8J)eSZUdB51G4%m=PXdJj0WYk*_(tqhEC-f7elU{B_5P-*M#hlO0{Wu%Mf8&cTzv?KpC2 z5B}DPzwMXy;@W*l%GSCD{swubh8xR;y zGx1~cq4Br|KF#00p3749CnAN(TtHel_|o4%gv zTgmh3!~@=u`zw<&;=2IcHOSFV2%dxHb)Dvxd0nS^MTM{(@EUjz z_3C(*l3(&Wk?K2aq=n8`=>jjyYSW=B(RUtLB9Z zbg!Y#80wIm=#y##G-v`JT03Vz^Ezl|3E^HR);i&P5cqk9a~i-}MTaDtzBZE{3)f=& z{xq#e!nuI+T5vmoD>vX=pz{H@88{JZdE_1g{XrNbWg}i;(D)4X8i4BuZZ~j0DI1J~ z=PYKC`jpUBzBpv$; zc;>jB4SXf`FxS94`CaIq;)iN|%-pmk^wQ>VG%uN3c|7L`n7(On`JfhYyk)5fzAbIX*HMyNcmj?!upbM{{Q!$Olc?$P7 zQ$v4wJ@u}+ztD8`NbYc+XK1x|G^YvC#f3`57sB%4WGWJ?sMoc#GR7rcti%vSjirXq zP(x~uhLb4k3oamtp{a!R9PWpX3wo6|eS+e=AYFPri>WsFAp!#8p(;{RDZ z7s0i0OsM7Bp}alk$@$4$x^o;PW1v4ZhqzGo$zWqJ{yihPUNHeaAr})pD1fz*@sqCh z&d*_E#KD5V6Sj_oP9p`OM9_$twcQ|@59igIE2#-al|{T_c(zqceZz;$r4_hW zfGZIglmX+w4Fb2*Y)6%eRj?poNlC|OPAl{$<5V^!ryjVbZA%CajTKTJs8&GuY_vho z-2!;N_A0Km=jB@AU3)K9Tae;IvRwnsB52e+sv|s}$6W>Pl$1@`kb4N5!JIvb+>g%9 zrqzk@mNu|=4x7>8oc#q*m2PafjcJc zY9`-f&EI&Cxv7ykoR20y<>*uGlc#gNL;oW;@hiEZ6DM>8IzYrpzO{osB)q zXR|P>4Cq17?8b9gTKgcIh;b0V(7Fa9kCQK|opKI{{ni_((~3G`-lgLr zJ*Pmd4&h7Bj2l2#E^SfwC5cs)L#wM){zN$dnp!@ZjqcH$KmmN_1l>h8c=hG)kvY7ifqW39mL1di4j`VcM-oX>!xdjr570*-Wmej9Qgw1`Um zcyVh2bOF$DyhM4r+s@nynFk&2p(4-ly1K;rs@WP!?xB*v4;3ZW53eJ#k6~AiHE}2S zEQ8O+tJhN#)L!k1IFS}U6(=I)BQGh&DZ3N#2G{Vc$LaT-xmN_P8$Ba;`G)VVO5Ec~-Z@%Q zbNr5-6MBVEjOIeJ;Qg8mL%^yen&L|qc=fIj>JGq1S5A5 ziL62FJi~VK6oouhXoHykfMaXWDcAk{;(UaILP9_|yjzl`?i zjKSppg6*?UDs-Rz6UUkF(4O2gH0?Mz`i5iVTaG=$f9yzn+tDdjB_Hy{q<8qELXZHD zyC4FotlLb^7|p?O#f8V|mr!zXOP3SA zz;5LB<^r&-Lb?^MR6J*>L;Z?>)aS0jh7%5v`H*=CT0iJY%!Pi`37`(y3w>yQB!KG! z&NyerfqTV5cNVxY;0i@eS$77wQ@{yd!2GTUN?|;Cq>LLsLgi1yT&@h5DTY z{VPN-Z9(K!F&%P#R!ua}kdK@K-PwQA_j^j`o&=)3oV^I|#Jq$bz+N}upe#0pV{)T`si7y?ZHuN4mvXEg-g>*y!MrAninG z^KfClii$oWB|5DiZ8zPEKRB~gCT&;6kRCj*5Mks#1h%|CqWwksnbe*U$J6tB=V(VG zkt2>eT*IJbv3?FAaBU(N_9_wBdl+buj0dfpKMRoIHJq2CIih)u>nZXpY&v32zYqIF z(6Nmf#>Ox&6OINN3-V26p!ee3RUz@khj3otDlE8W;C35uG$vu-%7JT8G3SbBhSi8B zy?AI?kJ$5>3;X74)k8XR@TP!MhMK*?R&!`oQC5T$^JdQ$Cv>lI!cpH0r#8I{8!wAM*8=ftv&_PhjL97r?u~ zj{{F_Vg9Qa6Eha`03R2=CHV6?XvDh2H1H=Tn%$rw{x_m&1lK%t-FO`I?bjng2t*SAV3(Tx#x#34|S-2+G-Sn(}`QXp_$J z72(Wao@{f_K{2Cv(1`faGqQHHaQ{%z!Sir+^YTHuA-tr!g8Y0X!qD@cA#|(`JX^u@ z4e-2%Ym8;TgPsBlo_Ib?4{?XqHJwMyG_Rh_r=J}YrRThNXq^}Akg5`MZ36t_|0g}B zq`x$Kn&fI9@73-{^c!uK7fD<{MrIqnbz4p5Fe@hZ9x&t*{0IKMXm*Jt5h4qVuR zs{^jpf@=k?$$)dAjW}>V;C#5&e3|Vp$@~RPt)#Kq-&xQMg3pcY?+R!Vpb>rcW^OAF z$@}q}(3FDa>`mIL1I^S8Xh^>i(7XW}W$P~BjsZ8R+UIdG_Lb_<`OtHqp=A?Bb^KP=nN^*l@T55z{sE0fqVpAv!DqRjVHYy61lUXy!u(%9@09A54Bwm z`r_;6w(Ec^G2mRFYXxp2a22>#ZDX%(@*~GUQx2MJ<4Em|gJw5q=$bw*e9v+{EcM2{ zn2Gll(CkD#qs<%pPJHt4z|04lZ2BRZQqVNrKt`ge15LBylO9LY{EC1k0Gd3|)5nGH zF|JpEYviFb`r~oX#8EFB4e8|^Xa+%}Y>RMLfIDWvy$f8QMFu+m8&>?QbQ!QXrN?|T zXa+3xDu8<#IAMe7{jlC=7i0*4CIXsl<4gDf&`|$g$2I(ePW(Gu7dknB4kMPA$mAp491q>dwFQw$=a=IyW6Bc^oM4b1Bdxj>D=X zl7V=H;Wj+KsG@`-@=Dlb;)c`5u$qcsLH2wWJn2mP1g{il*(HdrUPlpAytm% zJafT|$P}3(3Y#gUKJP{YvvbhRD`0sH{HGl_BbkGOdtG{;Mf)kw&>c0Sx1CyBeY~hL zvB8(T)pK&)iSNIeYaJX0W&`qC=h2>FFgop&G?VLypkcA-mk9-En=bOXs z2joA<5hYAvYWaby#FTZlSKau?&B zBr-VUCmnR~je-d}5Vsen=x~7Ul(U}B(s@s;J-!YH>~i*9q9@81->hA5Uxwtc1u1#< zlXTCVpQR;*4#mj>c6&%TItYv>k>XwrczNi4`UoDp%6YkRxi1ScT9&LtB#BtvK$Bd5 z3f>HiKrO+(h0AUd-Fn=a6u1zrB#c-I5hYCns;MU{$KT|i#} z&8C~svCfU;R*mLR5=$*ycp8;nC{cy>2=rd}>lWEWERdse{S@k`9Sc04cmgAH$Qv<< zmr=I@eAFIC#4b4q4VjMSxfg$m@GfYx^gB~{9yH)M^I7>k=rfMQr*V$pnB%Y^w<`U^ z%cxg|cSQfQ^Fb@FXFmUdzFxLMSr4!D|lLdQEt??W$^uI~(PJjiL?aSb)2u>E9T0<{994{1CZ|=S9F2KUJ zPZ4_*>G8r7P9mwp`w?Ds*U%E(^UymlSm)d$>xYZnB)$8D^LU~A)S8nxf~yVCcNgL% zyw{s2_gv}NpI&;Y9WL~pxJAVIp3wrl{(`<>fiyj9PUlV4J2#X+traiZG^Jm}@!A1z9l1kN)%$)NRW|pVkGr@Te%1%6wLj>em-g+CcoZ)%Z zAv{y3BO#QasGOUnacsauJB9a|`8kVWah#rn+_+vP2%!{lt}=)0@vcN=hDjB=K`z%2pCd-^H7+h_6TJ~&CZ%ID_i=l}+r;#B^SaQ4!N$}08q*p~{Qx);yI zE1h&2QPxE}Mfmokt&Q|vIIihK{xtzy32--PpBm57Ha9HVg)w=ZwR2vOgDb#0NuTvdU9x~ye)ts6% zW^m}GS%KUm9`Vy{aW*U;8|C81v0s%oKjS!k_nEuM<`Z`g-{DE#?j6}MT6|!rq=_DX z;SD(PxC(P}#xYbvBy>A@J5_rAeEm>KJ^JW`=NT%2Sf%)Og8$iFY?qv~g?psr3q(l2 z!%H!L0<__5_sRUDolYUX9ZlOP94>M0;Ja;2zlQt^f!xKJFy@yAq zLG<9J8t>j+z&mzsoTzr<+&12}leSp$Va$`K^qhzOu7w;gWX9CM@hV96TZcO1sKfTF z>QH!zF@`f&7hH$tUO(!*gF3~yrq3pP6TmG2hvCs}S`2>$KH=;B0YSQm%#ycFSJ z_F-LQ+j4=V#0|dqoe)|Nwf!sOeWKY7nr6_H6HVnxnh4OXpb3L!#@uH-C-Np7cszh3 z#3KzL zsI84&^b34dY({wdV{5?HfUN;r1GWZi4cHp6HDGJN)_|=6TLZQRYz^2Nur*+7z}A4R z0b2vM25b%38n88BYrxiktpQsDwgzks*cz}kU~9nEfUN;r1GWZi4cHp6HDGJN)_|=6 zTLZQRYz^2Nur*+7z}A4R0b2vM25b%38n88BYrxiktpQsDwgzks*cz}kU~9nEfUN;r z1GWZi4cHp6HDGJN)_|=6TLZQRYz^2Nur*+7z}A4R0b2vM25b%38n88BYrxiktpQsD zwgzks*cz}kU~9nEfUN;r1GWZi4cHp6HDGJN)_|=6TLZQRYz^2Nur*+7z}A4R0b2vM z25b%38n88BYrxiktpQsDwgzks*cz}kU~9nEfUN;r1GWZi4cHp6HDGJN)_|=6TLZQR zYz^2Nur*+7z}A4R0b2vM25b%38n88BYrxiktpQsDwgzks*cz}kU~9nEfUN;r1GWZi z4cHp6HDGJN)_|=6TLZQRYz^2Nur*+7z}A4R0b2vM25b%38n89+e@6r5$20R?N}BNw zE*(A&Pd_Test>Ln2PeDd&KT65|sByf`<#Ohi z;&L8WSmN?@gGBs0m!DG%|BK)i9$yU-|Gpp4 zexM?FX>b4k|0{O_*cYT|&!E0kroKnMq;?Wg6MbA(bp2eWeGYvGxJ+84?;w|{75XN* zO#3YQj&YgxS!%C^OO4v64_(tomj3I@<18QT7wJ2}Ws-^HpX4&>gUVA}CS6c@n#-gM zD$j74bV20>F4HxA7r9Kjpz;!zXHZ*> zH*h)0Ciq(k-OlDK78g@-&y9;PMQY%ej1o%TIE7k;@XDJU_%Hx*uq@_GmNzKHw*EgseGO^zM@ZCYoN5SsQf!%Q?&obx%y zFcXc+^UZR;??yC=HX{wUF?)lyR2!GME!l(Ye{<1ci+TicE>=R7Pc+uf#fqr%h-Q4A zrt*zu{3=c5<;?iin#$*y@waFyZ)nEnYbw8J#;?)T7?|+|dfvf|cWOn$MvT19)e1GG zd*j_)ZLM}CgD!Kmb=vfz*-oimuC`vg@`FtHB2DFY&HQiGR9@GN$LfP&cAomPHwwL7)tt!8@aO`i7sl$H7;0^!u>iMsE>MUIxnlVtRf z^ee|p;#cZ_Kk8&}ca5gzoM5i=FVxf=Q23v}As<8)8_nZ&67(d0iA&-)Qy_$o>hIKu zEUUj?5f??;BIoB7{x@(ze3~DYe5Uz^;$yW#$>&yW*}_NS*J}A+HOndS8?^qr&ExfL zwBx|IE4+&n?(>WvyPxR`q(FbG@Q+EnYULfqzsh*k$}9+o|G0(zM@&D#c=BQN$!GdW z#-p=&Z7dsy+3T)Q3hvfG1GtFSyTQhHvr%y-#tk#@zRXEFmCugKSBgKyAc zj*p}-#u}kCAn`S#UunF$dyw&kOt0`SFuurw{~5*?Tkx{o4O+Qn{(eEwr}1I|gGgUw zd?(W@{hwuggz-wAKWBX0g8wVV_gV1oGQOYjV!cGW4h$37!yx1PrFyj68DDgZY*Dpa z%J^an{wc}7njh5jJpTN$tV8)AH?h5lz4AGYAX!1#y-|7FI7@Tq^$g$xQs=huMBqtxpO)r15J0{xst!ve;YxsZ9LW zYb7~m`Cqw-{I2gQNhDPAm;Y)e{>69>ZMG{lE;Dy#!mrgTGV)okP3N2COx{G!$iG_T z1ikRjjBn=KQl4VXWs&m_gg(>wBJQ5*Zzty=R4Wn>dnh!^`KLFLQ(IOXBz%kIRQL&# zoXArAS|&Yj&=#`jv+OrA(XZEv)|%z~_nXMszt+shiQyxCmeInBPpL`H_1esBX8!kq z{zmieN{N}j(sRl0X2K&*zRS#i%T4%~KbnaT?CpPM!mrmB-)H7ueiQx+AI!vOgEmcq z89r4glK)hAweI*CKg0A2uk^6=%}o5)YvcEsYvuQwi{zi7cIoE5;mT|leJ~#3wQ(W}X;3LexD1`;W@R9qYm3flSfc`eD3q`ldaa4Bl1%Wp(;L9&(!GApq{v7k4`5%(O zH8z*OVZ8DWFEjt|Gv4)k65(U~8az;-9KH3kQQ(n(U_ZIQ1Unc%FeMR7jNi-n<^L`P zd4utX7++E>@ydU8FuwT~iC6mXWBd{&5b&Aeew`3_i8KEFb*7)TtiQht{HLg1Om0DhI}OV>$0D2mV5 z7~ju&C}sS21fB~9a5)S9@0d^Lhs|>Si1CA#`LYUm7t-gIEi8`&+6I9)G{~1*vf$m! zr{Yf~q0;98#xMP{#PhPDbuhk}5)1gK?cz@>Jljv4KmZJ|AHE`~wov%J>g4 ze(Da1s1VeeR?B!54=8+-z%!8n#IoRD&Vv6%=3kyC+b!c|=1YuM`_OaT#&0oxz+xxA z$M_kJpNp7&TH!ZK{>uIr7+-Ox#KZi==kFOGVgAZ5{R`t;uS+^*4{MS7B>VJP_}nA# z%+vrjWx;RFg0Ex#YQI@7#nsw?r}3TSeQptVIL`EC+a=+v-1g_P@c-p3_-W=d#`(ZQ z%x8}Aed7}0W&A%WJjab{-AOTipjXPn!=M#Iv65KMDdGc7EuQ6WV2g%#oImVZNiy1Ti zuNc4ln#8NP`d=77c245^nEn=QeKYCpZh=QU^BqYa=4KwvLjM%gH*>oQrf+1tkN1gc zzO*U+ypAjWu`K*Qmj!={`80IP4h?X-Ut_#)LL%x|pEFtbECMg=pW|mXf9(g%rg3-uH$?sQx}F@JwU?wOR1bXTe99|F~t{{BMjed0Og0wVPo4 z#Fu2FySeT0Ed0Nb1^->qvGro`WDvJL;#+O>+gm!`F&KN)+(~rAlgB5)5GmIbj z9f?R{zeJy}GQRH*Wm)O}&lo=$k$lQn{)@ohCrM3zf5`MjUED6y-%^x$p1x1uo!a6@ zn4Y`Pq$@R{uM8q=4tpH%ckP%Pt6BJeL-AQJ`M=8i|B~?yzbO&3 z^0xL*3eWAbRcOmu_}{ic>a96PGPulq9$|dLxa7Zy+x-aR^Esd3V)~~Uug+tr^{Y+b zxv~NL9MdQ1K|DUqd@qrO{>w~XG$#q)<#xZy_z5=fO^kno@tu}&oM-&<21%&w`L6_? zg)o36rZ1h4$c=n&^&O478`LQhgyP1A~ z^H(Y_{VB#zO)~@Le=-Z7-(va+j(&QQqJ2^k^E}I zzsY-WBjWQA-?rzWLWA!pr>)GJc%DezBWh*(c$R(K7}IxJ^1#2v_~eU{ah&;li}C&EC1R5CZ!z9y$?Lw$ z_|89)giB2CyeqSvyRzV`1Ri-Fj_Z^^f0prMJTH`g{siOuACU4Z|MOFfZ{~b;Im`2D zf#-q&j4^#H`^gHv_Zs8NXC*?7;W53~JKJ$!U;(7Wq@#Ftn z;;+eOw68F})Utp41I6Eh|1*WJkPIrgnZIScI+w`9qy2#K<(vRh`pms2v)(ocJoLcv zjN-qU@nvieFmLhM#rWiJ%d)c1LyY&eNjYC<`WG0l&NGxT{wU+89+UJ5#($piFcW1#jGXJC{p7}WAr?|l+xA7w5eHQ!wbQb={ zv*2H2KFO1kvGSka%tAlU^kY1~lpcP-_!3^1OIglU@5?-|@63XKMBtI1d`6C=%5PUQ zeu?Ar1$kS0E(@P9(=V`oFs$P9BI6g?4in7h^I7{DwMT zmoM3Ajf(zXB|@#&9gI&}*7sh<=UOjL&bEc;)B!Grswol5mOX zI~ZT_rbJw3e4O!1QOQTmi&q#w#s(5*`UK+t z@;^z&6U=|Vz;nR>4l{ix?;o4_UN{T=PiMh@midgayDbbyH3qu_2ZEpM4aVa2 zu?}rhKx^uZ2K~T=gZ@}>OIuHOERHIXE!(w@mIx|Ug~GwQP%O?xNsdA&wx=&d*!FO5 ztg|W973}F%M3v!Cu$u~j;E~3-KOWR#qWMTfYk98j$!DXXc+d+>DAcfR+uq>OXZ+#b zpgY>pu(z!-{$xwb5r0dVngsC^9VB^GfUdXg>S*Z*_Z;?z>v}rsf=7a3f!)#3672RL z4hQ!fp;n1-r>IpKkA~A^+d5iEiu&Gouul*@3HgXO$zKVDANIGspcq0A-NCkaRoLGl zP-WENnr_G~C3&36f>jz>jz;~lc+(M4V>?PDVr5S+3xzspj)ZD_F%)m>6l6fYNa|>a zhPvbXdbpNMI5m-Vc8zHW0SQFc>Bz z2)lX$>DFO_v3PHLJ4_`2ZjxzBysM=x+|wP5X)P^*9v*|1K)fd!Yw`EOTzk4A;UILg zrMzr=d1j)P_E2}I#UG9Ok7Bsu(W6>>)ZZ0s3G{Y#9YqyGsRfjA6DbWGdeqg`(+$bm z+R+u5WlO{#C7Xqzp||aFWO3at2bo3&<`2EH@e>mB+A73=~DRSHCp@KqGng1LlJM@ux0Vf6-&gxZ2)7U|;- z2}6f{{_a3Hs0E^-Bf)5kKN1pkz^SUYyN!CKfhg*a9!2l{0cIcX5t>7(4i?sdfk1y~ ziZ_N{Vg>9n-r65C-+aP&(;bP>U0AJDU3rF^X}(YDcWGdJJ@7#OKCbTzh9kkKg{QPsa44haxN3dtiTBN9yXPGBEj@b;_2!! z(6@DB3RUTdNL+Z-7Jt0GC;Gyco<K6iahB`BKeD|H6uCbX97y8Rw^UCaKes>VG{ElqAu-JTX1@l>{K-?Hrqtt-~i5j3OXFU2gFmglyWRrQ9$!e_@q4SSDtg+dr1 zEE&>Ly|Tw+iPNSU8N=Uh(-%CqJqeZZvjw zsljdR7P4Z}`vPr{1>^03_XgKWSW|~k%l;RkO)b(JkD2&U&l`hr*{<+)R9|S0N}i)N z0kV*oMPcw+Fp-uRsYo{4h_S#J3NI(*f_&b%=8Yba9i-8Uz!CGb_(V^r+aw6wB7CC> zMV0_*gs+C8VE)f-dmIhY+UX6p;VRU2L?0y6#MZH>PtL`Qp>1#L?0M1LD7Apz(;6!p z05uQH{%}ATlakjDk`{D1*47yeV1@8^nM5VYYGRGT(5nInWI}->)ORp!2n!d`C44c= zOAb52qXdpnO=y+1ClAo%`B+#l_TrH8N*7KtEEn2Z}s zQ8eBg5hd809Cy_tX?7@)4q;8IQYFI6Zd4*U{bW9|?g*{S?YL}dfop1ep{1=8;jTXv zHtClJmo!8C#GDbc2eXw{{E&!F#c)C8aPBn$i0?VdCaL+u5yVD4QL~e?j%=vA6YfS1 zu&M2q=gJ$y@FGw)#dOQ zJGx>*?GiQx?ls*o{ce9)%qY33Fq&c;7R*qT9HVwbx2HHFD419bj8$y^tL8cth1}bA_v$@xs~5m?aDQo2ACw=SrZeh zI0TIKz_*IY*lAwRFh2vm5mSR=)=O3ErX{^lZ}$p6BxZB9x6pAR4i0Ziw0arPsD6BAe8+%5bTWw zA(a@LgOLD?7Wc$R9)UK<2Z;6L@KK;m8Z#TN44yS8tRYOUzU_!y)WLWAHpy4)j0I=U z(LLQqjvx;pO~!LnF8NsOX`G>ZIDd^sg$7X=t6xN0D@@{GH-#wF#&(Lil;p-}3qIKz z1u5Sv4Jd?mOpMV!%Y*%kj0zh=9SxyKQ0{i3ArTyz>R_CoG;5FhD0~#J>T->MdeZI* zh7qJ_)6A@ec3&@HuWLdZM#FPdnm4VEFmdWTVmpyzfeuJ>`cR=ard^Fm zny3iiO(;xoYZ!`l3=5zZ>K49I>{d)wsae`yl6RI<8G&?q8>j1t?iNnb zth-!ju7RUE4lFoZreSI$P|DbE@X_79l0diyDZ>63kYd1U5f3-{V=o}2c@evrMtwH} z0+Zk4I64qsxvXL?X5Ll_m4$=Bh(>x=TMzUP0f|~DE#e~_hjXBc(u=rd8fx1vAw7g@ z>XFedf}appexgKYp&<=|A&l#g=5Aq2@jOoxsc|;aZ98#8&UxXV+CD+S9 z3o!m7f<+vs#|@?lLXH+obs*9c4z-!*Nh2ze528tkESgEEnr`H6(tZw&KonmOMa3dS zm)b{ur>Bj)GJ%( zEQCqto2VNZ))O*q>sEN%Hd@Rulr(@(P|Gxs!f$k~T;N6Si~?PKV#-}Hbs8dZ)P-#d zLJUjaWjeP3Ens=2`Q!1<zF&(;LZPm-JS+ZS=^7;8|-FTLaR)*eWxbCUgax>#PrU zY3;G2-R7;L+z5z;ie#07(tM(5)GJ&9Ivqf6+9bBf*_bdb&piR*Xj4SpARDZqIZCRf zP>%dOt!+{cj#c0tgP6BnV%IEFZj|LCZ{K2GSHL>Gu*r0zFes?pAEri;1fm#HI&9-g z4fGBXRY4emrzCk9(nyF_F}k!t5rZcMm65odEL9w)5NR3Fk(;2c41^)43Wj150t%}X zd$fZQ>7d9CK$o7Yvh!WKec&diM%Yuz=!vvoh@Lty?OyD)OuQA7m$rW~Ye2>38>Wqx zWsgpVDny5@D&`T}QlV2}G5Ye*7NxzbX{gCiKu)Po(nY$DT5pV;wpLcxiPP(w&x9;00?FVc3x#Q6wP#i(GyVSeIik=b?-Xz;p2V~iLSVM})p z4y@=V>meBsqgBRbvRODpL)1XAh|o!uifS-TR%X0u zD`VM>39lQocu}!6j#(T5?Q|^m$qx^OM`fSQUOu3`NKf@ledvmHL@kkn8YQzO@F+o{ z`trc2c5>h*rb02o5_MBWJ(}F~FtKZ8ZdD&;T0CUx_COFB<6w7Nkj79nNh{a^ouLQ? zXwK9lc^InmABDvkB`0T)wlP|6w&S)?2zBg;CPO3gvtl-gR-{#%rVcMa#E#|{C(1>% z3SkAmrXIv&QpY0aB;rJ|I+!yL216ICqjEPeKWo~IG$?j)kXw52nDXEVZ(CZzEA0rT zuv5inLM(SX5>?dKU;uUGqXfCEKvv|EnyR5t3f_X<=Fm~?pZtM9h804L1&tF$w}wea zBGYytCOjFM2qIsw&mZrN`okD@L{j?lCpX%ewvfa`=;mNI+IayFaYf$BWL;{MF=EoG zNf#tWQMd$)=Mdh8V`bj2s12#mmqsfoe4JQ(NMC3`3?YR;h|$E(U8B2X7>yCc^D=cs z!GJe-xL5AqWgb%aOJ!3^Qd1Cvyel;V$D!M+A@oKS;WN-V@z|-7^eAk-rX49jxgTt) zX{yID2K6W$PvPi6coQ8r(0PJ^m~iyKSjnMljP3}1VR6#4AtaTNRH~I`EU1uGkReQ^ zKTbp05sX_SC{m+T{bB4#OhXpa^Xf_zYD|ruVVctTi$p7ITy>8hTACQ{>#4-!{U{k5 zxiq=-#5xXO3r;N1Q5wAc?Os0~<6~lwy$Xkf2_O@p6l?Xy5SsF75PcU12?OHMm3&ep z7a6ffu|C6~@t098^(G|JZ$wibQ)lR&0b~GDT{t50A1N;lffctb>L!(Dhs~R1Qi<fDBxZX^3{HAOR$qq?sNuFSCwQ9 zd4xySP}gAW?+Ue{)gJJYJPzYwK2Dx>bs=9U#Swkj;z!0JbQsZnjL+5mnBX4NzXe}) zOmCXcEh>oPnx>CBzo?K~aPhDBH(OPG^`3=-8w(_5E?3A`6&ccbV<*0)xfRlg57(#dLk>7)KV4h7ZuS$S9XL0uChGc?w}^0V@;f@OGr-uO}HN)`MW+%(o# z?=LEN_MeTl)AB3qF-v{*?|>+%+EMg02FB0lQK0sf{?+foE4WAlWcVokE7TWoVXUwI zT@eN8_wbA#Yx`rkX{@jQ{SgJryvEuq`~MZ(UWc!;5B2YqEYJ_dQB{E@Q+ZJY)94Ce+Tskvi%4vR`pxeQ+(<8IrU%FSO0ExaGoW}*>RQu}pTFX{T%=E*GQbKF}OF&cmWo9z` zKEJGQeRqIp>7(FPT$BG*ANB9Ic)9+9y1`KCQK5+st5~4~blt-8e^8?F`n2{(_$v98 sUEt@0^u9|<`FGJR>wgsIxs4wsx582XB&EVg=`|?yLv@w^oU^`e4{1PZY5C;6{~vS-=S)VPTS3Q z=eIwKhubu1U6qlp|Lu-Du4CrD>zI*mT^suwSus;AnMOxSP5CM~Uj^s0uEj>2vaTVV zJs4Ly_i|PJuOm5~b)~d)Bz{$vMZZ%&HW~e|W#{N*%DP(lZbClN^Z)M=$_bOWyg+vG zo@)|XSFVn(+W_1mE6%v#X7$+fImbR*+rRh4nfFdBDO)o4rx&;XlXCVzh6T7r;OvDn zxZk77nc#5YSCGxV!nQZm+3V2gzdahE9!PMHa_o)Rvn-@>Ymf>Kc2tBu8qrJbn%EL_ zeADg0VQPi_+PJC^Tcuk)s4^bh?CiI!R)uYgv@N$g`b6Z7O1(LB%K5Qd)D=oS=e7lp z=ouOl>Pnmzx#52AnvtrH@&N>kIncOqVJc>aQnQrm z^M_F7bPWnR%vfX&o#1$HTZ*;`HEr4CmuGSL3vfHT>ggT%X11#aV@O z15WFB4nHi4!od3Z2IZkqtf8nI#-#EX-*@Uwh=V6>jaDI*RD9&$jeuwjWoOJwXE~*9B zpZNE2zMkM~SC^Aap2D>iXB*=)xSqw?&X~efH=NyZk{^fQ?1__(-ngEN^E{lP%=P8# zK)zDCLE<;z!N5b98_w5oTqAHsGLFLaLY$*;j>Z{{(>gBU-{lNhJqdMnP`82=8}JNY-s zOyl2~xMuV39C6<&Hy2zU^Z9(e3)eX~3vkY5j?x$M?_$2r!?grwDdRG}-p$uVxGuqY z56=5=F2zZQ8`ow0yByc$I9K5O19Oz-e*XOct`#^R#Q6|&EBX3IT>pgg5yl=|AH%r{ z=i|(+#&r$Or*J-lb1hCUPU~2Q-&Hu{51qK#ImP~Ks$;~8e|GeXoXCCZzZp@-(=bqj( zYu2&yznocg(|zXzUGVdW0XaX6h&gv~@n2s#c;=7qlq81tcx2X(K@TQ=9=+!5+48SG zed3P(e|+;mW69@VE_g1v^4tqvfAgOkzx~r)Z@hP5MdV*Dxoz;B&n@lO^!$A#dpR;LaZ(y>#HJ z_pV=DI(b)Y+on5aOir4yKQp0eo9)+=%O5;>_ukS+og?3vb7JM?FHJf#?fFk{zh`Yp z(c~*fUpN1`_is7;M#2xS-Y0LFQk8yhTKlw(?>v6avX^)MG-KzcjWLIwdGy7s_dmEi zXxJZbyR+Ydm6tXTxOK+bCEX`@`<6ry(Gb1o=#QyWF3ufy zaNNdAzDT~h&#GC2vU+~`!1u!+KbZ4-k6L$O!ZX+0K7U5@_8C_VKO0i9ciyro*K9m? zB&g)O*Y3z3_-(KClVUu7n|13CchrTyb@16g9c(RqAoZI3uyM^8ycl(s8_}vH3 z47hUK!4E#&_|Av--Sx%;!^-cB{c_FBpSJw<{mP&2*f)5{*0+y*m9XFT-u`O}kEA>~ z`EOg2f|7fMOg?t#%-mu3KG}L(_qtPqUwZYEtl^)Z<9z(sOBdZf=14_lZ;z|BD5$W! zU~Oi2Si|qGduiv^sWW?iaa*6ce>=Z^ZsY_1zB%VUTf?yp6$b|Y`Jr_?Rt_JP6CX1E zo;_{d-%$P1R$STZtBjY<`KxEh%YUkVDZTE&r`A+dFZizT9|g(tGv8kO;){PM3*NAK z;+*>jM5aZqELiz)zxIz>?!6__W&7dClF`!!u1wms>xrjNzn=E_vInvc%_-S-@W|QO z-F__}Kklj3A76F#k3+B9+4jYOK~>$a>Umj~=go&bm%nu5ls_(8GPAn+FW=M*JK6um zzpk(aJ%7>0FE$Nc^w8N$c4q7yR^U12>Dl+XrE}H6IIn?r#_set(Q-{g#HqkGnsZwDY!%v_F^6N!|X% zAI5GCdUx5#J1$xJ@s_WDYF_pHg9RVezJKh#`D2#7ynpA1)$@8RTmIJDG2Yan^IN{@ zJ1T6&D@U#_Uh{or&rerhF+Hn!Lh^0zKXd=PanHWp_{tmJJ4^4KFrmoy_WB$D z^yIgFM>KC=UF$)kXCGg__u@IXo+!;-_t0qX-NSZn={9Hf`E#D#{@U0Db7x-_Qg`)p z&kcHiNc!4w?I%7TK4!*$76RvzcX!QG+jvhBV?1@X7J}#?%D}3=ycT75UUysjPPW|Dp&T%wk2R!WR996sc zx3}$zW{$y~ZoG?0eZNkO0k@skHUDsc^f{qjr++H|pM?2zSLOD>ytgYp2^0LT_`3t> zxdCy#tMmzdyXI>H@LPL#oxZY9*ZhwG^#4l$eYOYS!vn~97wev`>UBDRoR}h?*c8AHe}db0Ww(#9km`zmIDni# z2hjhQ0RB0AP}lNp0qnmafc)vOaMW+CjCp$iJA4>G&c6cq)jtCG|F!`Be?N5T%5L@{ zUHd~+0QnOG=xGb!54Q#I2R9bEUFq{}0Qs#wx=#PE0PV5>`gi5$lLPdZ+XM8|vH~LIh@XAHcNO2N1H{7%1C(nIz~3K$|1^O8F+6r@hbIHn>-GTlxio;BZvy!3bphJ( zIjw&$mDiUv>zL)ft@~*&AyF4B=h%ixs}ftg61o9d$DYtl-DX@k4F`Pb+)4Q1eK(`~5;?2ew*oBVcY|q%)Mvi5-R^1=bF1+Cv z%=OA;`EE7L-A($~-6kR2RUctG zp1>cTGuY}Eg>26QY){LdZF>Cer4srXJ+1U#>NwU*g>buD@~=YsQojrBW>Q9&Z0cfe z-)h_Ke(gS$`(2aOF0B94Y|jI<(8EDH z2z0#2{!nqL;S&t4?qfS2?QQs2<_B@V3;D_rwah=wcCa3Pq%i-JmHr1qtYCgG`@ed` z5Vfr5CoHFe?K6ziFJt>e@PiY}J~wl{7WOv+tahp6c64()7O#C+>IE)0beGA$knNenb}k-g zi193c9FIq}9~;7|?-yLJig1&jo?Fo2Vt=k@f41ti)6(+}lhCsBS=MtiKX7T%Qj|J} z(+RjH!Ld#DcU_Gq|Wmfzz=D3vlvPr+0B{zld_G1Ty*nD51WF^_9jysF|jA7agCIQ>y>-xltVmOp>S z<+}XJ4dZrCXfXn;_DyFytU0%{oVHD=*5VuY_OH@kjQbib;l_#(WF6 zZ^&Vj&gvH#tY_-woy*N-`EHI&FpECu`J(o7zxlwUT(20v`M^Z(ry+?(eu25DMD9m> z-!yX3&GqpS+c|;lVA=Bu_BSub&wDt19M>zR*$}ZTe=*l9)URH@A{psl!TrMW!~0pE z#400a8q4`N%db6Tc&ooxS@z_I?3SF}a6FP9%6?K|vZ*y}hssnVcmTIcH*VjA-6r99 zPVYd)Nlp{n!D@G!pAsL#c3Z>gf8_MVFBrm$enLkP+cS*yOhkSi$T&^{qe#ZUcz)&OK(sM5Nug%==Vp-03?(Z#kne>)D zPjWlPjxl@!r=Q2^^X7Jz|0M2{efF+03Gd+aZ$cpPo4H<=J_oqnV>gpNod(;GxyWY^b!aThqdYt>0e!&JDs-utBSkF9;8xhP0Tl1?rLnJc4lKmm|GefN5a(6Mm@E?X>!S+1F z?GnOq*uipEbNao%7$Sz#mvFn(RvZ2kmVb?vp8e38KVOS{SV zP@WGYu$+b5FOG7*u+oQfddH6@;V4c&l=W%y)5pbro9E}Zl#I$(^+j_t6+Z{B-`?H}`{QKX6Wd4=ui^&7uZ*$oT;re3R?DQwdPU$f}o4boyPrc6m8Jqh%?#LeQdWzzkc^E z%Xjm{>*ZG%Z@tbzS8_P2#R{~yJ2p67L6 zNKaGl2Ig;Ozuj@f5NFJ7mBDr{W;~FPx@!@9NC;Q~BG6`LreyKH22r>m)ew)gCShGoJ+5d0cF0I@ycd(xI><g8p5(Oz3WMQ>-UDY;=>3Ye_cFpX<<2sVISJ>jR`UmnxG#Y z7Z2q6jx`EecD{}MP`^NpbXI$%aJwu#pEconRk1(p*rUa&h0H&|e&z7s_`_t)G2Hv3*kiZUk8RzsLPIW*{rV_5Byyr?TFpx9W91*SD4h zSbp1$<6GqqCjAO?Q5RT#`;{S5n0K-rQvLeHR+is#z$C2T^k-OpUYjAT`0zT^ruJ1I z8Ga4Nu@&t9qi#0!%Hn!`&FS6j50?ICxIf15zFZlXtJ2bP=N1*Fm6oNKl%=Jqw3#Wh z)3UNlvUBrG%d$&SX2%y46=tWTXB1?cdtKj2%UqbAmXlwYUXZ^S32z@8lUz`gdvjqy zQRbWlDy68XVD|j7?1eWMUJpX!8QEpmmXs8g#1~~{tJ1QPYy#@~qTJ*~rN{~yu_S`h z3jsPi#7~-8nv6TyW${IYh1r>9*;xyaW8C=U>@q(-xh%bGerZ1GG{MR=J?j=Is(OFrX6X&6Z%KN=4fE$Fmz7YKvZ4Y7CAB8bMJzb0lOj3k`30o= zIMTgKtzkXz@nt0i)3P)1D?byhl0P?lT7FKBwk_YCS!mtOq_$7X%`PiLGE&uvgs)iPk9(xQUw1qzyE7cS5?oLfvOCQ<8Jm31zQ;tC3kskAVZ#7#==30;tXVq$tpY4(hQqKxzcU9H6HGD#)D5WIFlcA+tTM4Q)+cmYg?N<&32HjMs6r69Tl35Ere&j}S7HKrL|MuFOltetrMd8|$>vvFNiNyFY;s!K zg7mZk>Q1nhv7+U%AWY-=e0YJ+?;x}=6AEM*51g2kJ-28<_U!D^()3(tm7AED32i3p z$|s^o@@e3#f}-5Cd~M^h#Z>j&?6P761N6r$*hS+Cv;56iT9#EbzfAXu>G=o-Md;8u z3lUokXY8sVt`yy{s90BpjZLjiAsL3C*hvMA?<77h2hB@GXJ?rP8$S&?>To?1SxV4R zGZ!P<*ei&?HNB*;3z}kAXIe{{mYp#l{v)Lr>C{a+@smloVSEKn&;_D#ue-&%GodU! z_qtoG6gt!Ng88L+y0OPndIZ0?va*s%*B9l|75&8wZt>{_1sRCNx+YmjNYSc*I(U9! zzOU(J$V82GI+)d@gL$X~c!kFB&3H|q$PS{DZDkVD#)q}iKBQ~sL*&j}W6kBJ(HKdw zKR*}lhz_1I12Is|MU9FwbwDuDfWiYcb#g&=cCr2ymzfENRQZKEE@;2=f%L4bw7V%% z_?VKpOu%-ilPQtH26`?+jdo@FN#&S0qKfC2mC`s4n{*_%VpAtl<)&roDv$%~STr4> z%VZ+yrvDh>GfRE^wF~t?2lu2z@zm!zO<`$vi7r$Rg+{vmsbi}V;GCb6kX=xmt%t_U zJbfuNqpDT9&f=T1Ov^^JC|ZP()me~UILEpcs^B6V63OKE-VOU*5`C1arVV znPmmkGzFAL`-E-~i_rl+J|~xqLH0|^E-pZfo|c`HKEI&MxoBE`iOMNL&{ruX`82lY z7v`$Y6;sq_NsZ1mAXep+W@pb)Iq>jOm4mUl6xvhjoLp|K#F8TN3{wl;8fL`t$>ZdR zMakI(InYoiBGzg@_$J>{+Lk%Feu;_Y6qHB%2?>C(xLovqEfLObxm(0w$ zp{UIGXQx`^8%~m<+p$Iu?CRUf|F~Wr7ytzeLswBG*V-3|! zi^x)mXlSdR>Ta4SA#;Jrt?$iDpKIBa>|e3~zHI^oP35L%0YEUOqZE-zFw|h=Ttved zao1DWqXbMPFDlHOQBpL&IHl;uGRzz(&CTdun#0U3H@j>;#wc8h@1`F!i!;rDj1l$5 zyAj>h+|pcfc*Byp3|)|!S9EtF`CJ^exVg;+)%GES#dn=%I?bzLK_f!1i;D-llOb7wA+{M|o4jnUb@ZbY`8mc0No_?gp62_zJi5|N*o zJ#(6_76Kn={yEzk^7H0Sv$(nOFnw{Eo-n1&rKOjN^qnc~iJhtWddcROHH$+aSroN5 z^Dyl>y3c@0&dkfsGQ1vQEurZc;n1DS@(bZZ`DLY)%qQHssoCP9g8a-y0q&)jq|b%R zV5(2WLyP~SD80NYrDdqU5?OjeNK>S2)8T07@J$y3iOffP`9x*vEIz1fAgu%M*8>uU z;?mq)ZX)UuI&MR8{7x>!La#Kp6c_EnrMXF1Oo1t(HI+$Yrqrc*b4dm+Xkz@ErT?W~ zMag_JF@iv%?g{B;bcdD&#prT69aiDw(r8(_Wa=nG`I9p-O-DB<<*VQ*PV2na>rtaX z|4a9$TXS5Mi1zNb3!sN8ANM z-+?u5L?b>W z0h+EXTbcxucO#m&(fKm#hm~H@1l$zTDiUL}s1p zw=||1`c6`I4t4+wGxa>wREqClGN_k~MwWlWqeK{SW)^toBF@oa7qHCfA;PB)`1o`T zp7~{qIugf`nlAfCe+mHjW*UKad>qLtASp}twXy~HJAQsgs89kh`(6D|z1rwTx@`41Z z^qdH{XzEC@5mQt%Thm)XwC9Jwo|j$eW6xm z{#?I`(M_7m1Wce?%Rjf7BAVVgGizpHGB(7l;@DJ}D*02%Gy@RHZY*V;eoOi%jVmth zA}zE2i76$|?5xJ4HI*I&QG80#w0sN?tmln6Iy>bdt0fm>&eKUtT`o<@Zzzf{np<3g z{%B&mRTGUUm9HAVHEO3@9k%w-WaZAy(CJneIXFW4lA$^zAP0KrrKY7F_}_4(pD&|U zpRyu{kL(nGZ`6xI%ao?oP1|)UiKJRLyHHr(_LZXxw=ziupthmOumOtcq7bs9$p_!) z;=WHTyG{VfxZ9PAprwvfBO0|tIha0V6=3c#W*p5VO3LWxSo)cpiD`uoVW%+jl1m6q zq?>bnzi8e8E-j5l-o<##gP+#poish3&rtKD@xPGQHK5g8Xv6J`-lMSzwb^agy^)8E1_(g|>ZTSAn{Kf-P!FJsL?JuRe{g){_ zDE+sxx@L@o4ml>3WP|YH$N&4^|FghhrGN0(juVLQh$9tJC_Tx7*@Al(snUCQ2BP>04FmJs# zVe$3MJN(k`Wj@i5Z)D!(#~)z6*pF{w-tEU9WxmpnZ)M)=$6J3Ctk#bYp%+MS)cf(F z%s2Y+VazxA@e$0o`0+8!EBfFYhh;Yh^PzrxBJ&Y`d@A!XetaJD4nKY&^ND`En|YTX zU&(y2AHRlqw;%6izS581%)Hl+uVud0kKe(3y&u1q`9?p!k@+S+zKQu3KmI85mf)$Hy}7_TwGQSNib@%zOR# zMCNP#_*CZW{dgDijedL{^G$wyG4m~c{6gmOhebR2znl3`KfZ$b2tU4(`4~Tb4f75^ zelzoleta$SEfYJU9n6RN@u|#5 z`0;tn=kdIPUb8GTMe6n~7CgT~t$DZLw`s=w6VYy7jH6*`sgN^D@b8KAF@on$Q1snc z!Q<6C-{BBEe`=-gB?x|~FE##-so)0+eNqJj( zZo%^>G&)y>;HL;Vm4Yu8a@Gi*Kbg?EynM_$0)(yYmvCd2%i50fW8|m_^X9I9fHRK!*?VI{)~@>KMVeC!KVtIU%A$I zU4rLN*)^Xhc)U96JBkJG@v%xR6nu!_-Gaxfw!Wi6@DGXlRtmna;MWKquXg(mui#hu zSo}?E!Q*XR-%%@g>m6pgxkK=`igN1(f45Hx{+_$w@#?kjXcRnNt@j-V1i!(@;(JEH z_Y>tF6+FL!taG&p9-qeej#j}x>SOWaWQRY~TGTp11kayn>RX|LPZ9cr34W<11lI_` zA__8l>+n6g*y`_Z@D*%kii}@QFgtO2K3K?>p8AezT8N$}9Lk2sxVt z&s!2YSFPZO2{}6if31*HFZc^Y`n`hZPe=6KM#2A0q(30|xk7%E;D?L!M+N_^;9CSA zF8Ef#yM%mos>A>3TQBPf5&U-+iEF6fBLp8N_(;J=2)s3jQC0-z<3k#7^I>75slh`W=F&ztLbF^@1O5k+|*^{B)7N zQSjC`FO=N$~v1lP=|`!yI${JrPVljU_gaZ@bqM}S z!6yj*ULhw@@Z$xaD)>`Ej!W=Y2|iEoO@c2L{AYq+DEO-d?-u+ug0B$#Cc#$0+TLiyc@U4QMCU|wa!~e&K`i2O8sNh2d|FDn~CirUwA0hZp1wTsgGXx(a zc>dFk`fjY?+eLbZ;7f&`34(uLq)!xlg5XmHzf8z+3BFRK&lCJ%!50f2kNAAYLcu@d zW0i6XUhb1r2!5tWUn%%>A%BhFs|4>A{4l|97W^zBzgFIFYY@OuS+ zz2F-qZwbNmfZ%Hc-z50ifTf02-rDEK~tPZfNUkna+FoJgN1 z_*B6c3x0;+7Ye>i@NU6BCCaT3e5OcWDfmf(Un6+A|Lhg~Eh7D9!B>cKYXzSy_#J{T z7jo(a|E);BSMb(1^>n*Y@Fzt21A>20@J)hG5&TiXe9vQcy*@3|8Et1 zh~R$_e5l}kuMXgTnBY@I`Ut__Ciqc;cM5yP2>y1FK34EQ3Em<2-w8fJ@OKD4QSf=9 z+*H9oBGS7A|9ipb34Xiaiv@pB@CyZhr{LX!&lmbs2>y>EeWl>zMfx>@|5Bv)3VyBN zHw)e-u1ZHwoS)_@jd7KiQ-2wg^65 zq;C~`hTzrN4*!2q@F9YKSm+-r_|HZ9Fu@-Xe1zaL1wTsge-?a<;IjlDEBHl%cL?4t z^h^-^4Y7fui!5b={F00u}EJl_+x_KA^4XCUoZF^!S5CPYl3eSe6HXR2tH5nO@e!2vPZ0cE!6yp-_d@?v!5513F2NTGK2Pu?g`8r+#|nO-;ERPE zx8V6tz396Yf}bbSR|@{mLjD@TKP1w71^=VqHw*q*A*WXG3k1JI@FhY{z2H}g^m_&W zfZ!ViKT+@p1m7y;HwnH>@J9vTBKQ`;&lh~F;44JAil6h+^V~m)^dW+ORq&yLUn}IC zU9zhhUCcA7yIOU2$?nbd_B6TNRV}#R7^Euu-tTM+jT+#Iu!Vmb9%0{ao%-HGxaTzY zG=3O3L*Pe&%LRS{xJuxsfExsU7Pwj9Dq!0{UAC6L2{=mN8epfuF9K%>{4#L4z}tbV z1l|eUAn+T&%>p+7+Xi)%{}ynR!0!M%1^xgyL*RYDny z;AVlHz_uYB22HA@Iw<@>JGNVW9+060V7eZb`ce+pbB@aMn{0)GMAEO0ZhEnJtcBH$>2qk)|Qj{(jQcpPxKz!QP11fBxiAn-N7%>p}tZBZTN zPX~??cowiz;2VH51Wp1j7x)(7DuHhYZV>oR;AVj{fNdi>%Fh9g68J7)r@)2483LC8 zmkYcAxJux~zzqW52iz=hIk0VHNBQ>yM+y88uv6fNfinbt6u4aACxEL2ehRoj;AerG z1+D_NUD#3nCg3Q6Yk-{szX+Tm@XNsE0&fSd5_l(YgTQY9Hw)YVY`dtV{I`Ik1bzqD zDewot83OMEE*JPy;3|PX2W}Af3*csfn}KZ?ca(nwI7;Aeft>>X0GuK4G2n86PXbp7 zd>Xhx;9r581-4y_@<(-)-yJwg;GV!vf%^bw2;2|2T;M^#RRRwIZV-4laI?Tsz_!sH zQ3B5bb_#q0aE8E1 zz~utp0$e5V?Z6EJ-wE6-a0amLl8*9ofTIMy3)m@eA#jGkCBWqZF95C*crkE;!1n<+ z3tSFtyR@VH`+=hbehAnp@Wa3v0zV2|F7Ok;RRTW++#v9?z|8_z0oyL?D1Q@hl)yE> zPJv$p&Jg%z;BtYt16K*W6SzU(H-MW3ZUDB8=_vm#;3$FL0d@-f0dR)E`+&;@{uH=M z;Lm{@1pWfJS>R@1+vOeQ9|4XM_*-D7z&`+I2z(5oa;AVks(@}m* zNBP}>qXg~=>=d{UaE8GBfXf9Q1Y9NX5a0%ZhXXeY90hC}+fn{Sz)=E613LvC1Dql7 zIN)-DCjwUqJO#Kx;A?=J1$F}4#&wiG9XLwhS-?(#Zvf5^I0?91;9G#J1il@(LEt-q zn+47Qwq4OtehzSyz;^*V1ug{65V!=mT;K)3RRS*tZV>oB;AVl#fo1&=EN}y`ZDL3HZvjUM{0^{F;17T^1l|W+F7T(oRRVtw+#v85z|8_T1KTEb zlz#*`O5ksSodW*=oFVWr;BtXa0#^xq8n{8=UxAwiw#`8Ku^r`i2aXcBC$LlCKEN3Q z_X92$co1-vz(ar=1Rf6DEN~RCZE{EX7Xe2J91ZLgcnolcz~g|+1)d08CGZsB27#{u zZWh=HY@5=d{VI78qP;BtW%09OgT7`Q>;`+%DTE(f+<)lvTaz)=D}1ne~U?2fBc;n#=nnD&^tjr)wZ9{bF`ft>}Wf?DJfZC_1kPXRaC2iJIh z#XVe?gO5U4_P*{UlwVtqaw1((o^9>zt6#~CudbUjt$Ir@N87gkj<#1XakTAiZy)?E z^4^T{kJwesSN4I?hwWj}_H*4y&GsQR2kk>^+>o&m`9DWmk{KWBPNFg*n<70f+_fI9TabOOM%~UXc|*6?aq#Cu zC)j@io=-L6U7SYcozdp}*NnDbo=TpXyX~kSBm|* zYO)XN6p8z;AwhNN6IER;ue&5`(b8X zZA*BP@ht~@t5o~efat1lM_W4k*6u!P+wtVM>a$D!ruC!xtM_KKO`x{#p(fbXvV8~b z_8RzY(kIvpUxoH}V}0yF8BO*9HEFokBVAC4JBjdeB(*A4fl{AjYRu>%sxQvHUjq_M_Wfm4A-`)vi0>0MH;$lKj{VE zb42KS1JM2haF6`10Wt{J<0^e-d5_LMLuq;-&vvAx-F`;8e{_*9{v4#M??^}e;X|a8vi49K_XR##2a)EjE@Ul-O(N?%?C}}W zy~eUuplx(}{yd}2fqq}{Yx`h&#sRKH)rWc3;TnnSRHeT2DwP7<7QYSksUwC0g zn;Us_8Sv-x;1>tb2Gq}74Ple5{z0;+&y!q|r_(oudFc5K^J5&x&@Sj1jm8pQ?#R{Bw&k zbRTy0@svWo%;U3X-t;=lJ{0@$K6(oIrlq4D?XP7t_3@D0bhI6{S43WSUp}p4h@-B_ z9uckX2~W1C{lw_6mI9 z=g)$ww~erQnhkM3I9y?WQu9@V?- zw#mDI5f|_}p`)$7yV|_INw|~vCRwLVu z_L60%Q`wFIo~QiEeu~Qu>W;FzD0jqAchV-<%#6DObh&?k44R9ryV%+0=5}(0n6dK2 z@0`B2f(?D`w3W(%jVR_EL7lAnQG2bo>gDRM<&x&AA@}HydEwa-( z=o=BN>UJTmOh@$~f2O{iU{fhggVzr6_EdE=UQ)jGxG&=g**FLGHF2q*v2B<;={o4K zne`wW_~+R(gxkYAQ0JM7^gEIMd$d9O9tHmisy-W!7>sd{Vmr-+T(%(3N4Ou}Tm6LZ zFjCO&KjAx#6di{mlqaaSqi$-5nimwTQqC@kt3Hk~8nS$L48K(UWTkzf8~PZfM?Jlm zmqwxw1oc*R*TT-EYwnQiMrXtSGJDP%-E3ENhkB{Hbl{ENL7qr;zSf!AVfk=VkMp4a zA+B#6%Ni$SQ6H1CNcQpzFdv0Hl1s88>j!zrj%U!`q&w9?>K_CU}szP+2ycvGWs#>Tn0P4_c`iZ-PCwJj>E1Iuv0kf zN;XfY`>4zDg%ooSp_>DTwj^= zxHHI;esxXB6TRI@<)1q0)-PVP!-;k)|5Vjw;<|qEU%TAVhWHYFd<6Mxd^K$2>u=s7Rd*Te=`C{9J<~(= zU3;FI7kRop<=@;-YEB`>i~3f#wx=9NJo%dMH>0n&!G~OE%db(#^6uA-K7z4H`_Twh zcMyI=ZP+|i)kVT@4qrlk)gQJ|nESbtTH$NduP9F1FEzeT=_9?7o)OS%E$aIY%Ic4_ zG*4EDl^bw1?c1qeG}7GR1lpR`NGI$;(Yuz~tMxcG)vS{yLC0N7gE8Oeh7@ns#xW3N5Pvm5P4>zt;xXZLN!`pI?IYsc_zXrs66<71iL zi#+bI!^f(6B#c(~I-W+o>Yf`qbF?}$<1uxn;@u{UyFo{neB6wBMq=(<)m`1RvHz^m zDkNy7I#<2hg#Chp@EKY^JqBCTx(IQ7tipIjbA~?bf5V{D1F$vv3f9eoeB;adt2stD zjnsYqQ>3GOwBB1c6}s~H<>*G^S3JfpXY~pAf#n0%I5onO;~J^g9e;zIA7STTQTO~V z${leHms@|KE_Vl)+l)NaUs{mwcgSbPL#=Nu+O_tueL(af2j_KNtn*fLUK+osEpCP# zXdaI7y;iwK>3RMt$U_??)nmOtaw6+TdHRD3S8CotjDJmNZ)AlF0zF0%@XigG@@srj*rI1a1-_H7`^) zwbi$_!u*ZOr8(RxjQyBDPaY53ySuA8m(4NxLfF@h`GHyMo4p9196QN*y*lcixL(eC zg8I{%iuSNl;DdNiIAt5=gioOV)BKUfisP{JM`7P;3`&4I_?Aa{! z^w&GRyBh0#r!000_C+G`pWfSs`BY@evfC+dWZCd!%70sH`(Ug~e+mwBoL%ullIBFaeoWzp*-!ju9Pd!PT0?nCY=zUQnp~&n2xxj9der#yEv>=-<$p=aRK< z=#-BBIwHl27ngeeqSFqH_Ci+&`s16hiP`(?A6OZbSZEi2Edu@Joc@7xT}ykKYMhFn09gxeUck-Ip_Ow)*mAItEoh zr!NtIUPSuidS6nn$4%N~POC1}Y41haPdLs|o!21`>9YmxHXOex=3(CCD{Flc=XYG9 z^Up>8zajrn=-Ixf-$&57RyoFCo@Un5v>w}nwxab|I@(IN)2%k$PUP=c57+jF4I;4D zQI7J+)?{OQA=(LRJkrcI-o*(|K)0J`B6)Im# zAdgv(55l^4aLuKtKaG`GmrbC49fo?-egO6hYB85rgAymD_i@xkI+Ul>7PNAy zZOC^Gi1F#&!L^4^bp6xpwPLR>WiDi%VE-VUNneUh?XXeAnf3`*+GWUZr5g!bK7xD{ z$IoS3Vx9y$4yoBwO!LVw$T?rn#WA1OdxlhRvJcg}0&Ulh{N(4<26~*e?C%<@?f)V2 zy@EN-TIja$RL6drwU73rDL?ujkmbhOUG9tNIF9|MF_6)Od*oMX_)Tf>8}t2!&6r;e zruF&nA;-*9`uqBTUVAoO;o0+hvsd;4@{)`jSO(=MIT!K#{h1S%ei)kusGHKT1|&Tx zPE{kGPIX}3jP|1VJ+-%*N4`K~Qw6xoQ2yFu7&ozgw8kHb?cu8boIiro<0P-4>bt24 z6iX-IK72g-gpM_PbgZHHsq=@LwmqTe8J*Kqp)8V3c0Yo-4b2;tr~CE;b^OqE_m20_ z`rayo%A~Q*8rM|g_?0wn(s*Bu`S(8dpFru|SZAO58F4yB{nQ)jwVneUbxYAVA8Kn) zSw-syjN9(T={pqG3+}~{JE+V?mV0)|Cf~SV#TpvlY3__QQgkcUNGI(!{MV(ihxehG zAGcwxl!N-xv!h)$_0x(Wj>Wst7FJ9(HW&mO42a&7!!gf0LC2yP)LqX>KkS&3M&Q}j zap**QDy>+noL%y{&mOck+i4H>*!y?vaes zK4n2)iu=vR7S!f7mLKeyZ;f}8b=-LgI&a1N?i=V!`F{^xeREgLE1_c}?HzpxS?NRK z>M-}4gZPv3F?4zd96eK$v54x~9&h*Ui?m*@+D@ab&rDTqw<||oY8d9pqt#Zj%OuR9 z+kZW8Ue+4N>81-!ys*kU13#xaID+wv5@YR2`>>kyM;(~w+G^@BpJ=lO*G$8H&U$-D z_1Z8skK%0818aw@MeI=n)()XL;7rVsBKoND%Ldc4)!^u7Q8$X=h+nJM&2Y5Qm`H2g zC5XufAs_3;+T8VnJYQkHh*(jRS$`$QH5wlV)bvMQeD)Z+vY$ z1objqcXJrmMpjDp}^ekV*bdx}68zNk-EN%po%Dp3=`9b);Xn zHIC66hn_o{{O?!f!#)iBX-M>QSE{z*z+~T6pcN+UWB+mw$lE-}v?tUl?C?I9TmSQK%iS>e+|ip#RUOs;%VVbQbzJs_ z!Lvuh?vM0T==b&q45nu`k=|=`zfJ((6SAqC zb8&T}3~FC0BcID4`G^sfB+H5qdmgmn!&EJ2BIL9}Uz#^y-7-0?AZVT!u{^CTXr2T6 z1wl%+(VQnL*fBXWSb2!2SRXaa5lg>!V{J{mm5#=VsXX_iJsi?!Gvv^|%U{vHc=k*& zB3kdmyb#~!P*XppJt2A~BF7BKyeTLlJ$C9S%o`E^sQ=M;o7tp1%}eo266=92kVkPL z67zBg>QC{6(v0|%Z$3wLuhsK8>}~&oG&E;D0(<$!GqV?iaV02vCG1wO)c6Rbp?Qw= z9tid4@rVgne~{kMA5poaa}Vh3m#%&JV2pE})`gX{F5H&c!;_8i{G>g&<`mv>(Q_FZ z*MGF>alH*|gI4H(Ia|$XjAud6fz~{_%8 zJ!6DUEcAIBYc=u}9mgd$q8?cV+2b(rwo{?47qx*0k+!uI|uen&c*t< z56<(d!_UOv9E)=t&MR<^uYMLXupf@{2h{X{Y^;}SFjt;8cbH093+%wYa#f}s334 zwGHKwk6L!=ewx-In9tsB&B4ymbFf6zqZitb_Rh)w=)Eh<5%7$g*3ZiY>f2^_%TIoK9Qqf-=z*c(re@i5uvqoX=NO@)<&k5-8Fw31qw#IX*t>E2wXY6I@ z=-p%UBmmbSjjvvpcGPQTA5pJM@%|L@yazc{FRF`GH`DJzJXA+J>gR$_4?(^5qx?e1 zruM8sIfqa;YE#5?w8Oxf3Aq0r&TOPX9Oz57+Y6iyJPudH=xEH#lj5+)MSXP}>_qz< z6bm9k5wn85Ni_Fc_g7W70{7Q%v3Z(tjl_8u^i;iDC)J@%_nh0YR%t>Xm;qgA?Lp5D zNUnEF(4sF;9_{@tL)wU5%Ci`0Xb+w%Y7g>Fzo9-htzb5(ge%w{`8|!#8cN-L4 z08Bbg$JGP7C|u|G+MpxukGPm+VlD4R`khEeGU!=>wk_Ik9_mPA$&aw(?T|rhr6XeO zIE;PH5f@sznP=jgA?Fq3L7V&9o9b}LN>?AO%X=2-@P6T7MQd8rpXP4+QI6xcy4Efq zY3XX$c^^UECv+KlZ>}SjNA&Om4egw-`cHPHl zKaJiEqyE*^c+dlKF&=1N@Q?G>Sa1W%>uM}GyW|D4UdI@?1@WosyuQ^N2KTGp6yCp@ z)^+iSRqJQqeHiph@5Sz`*InYiy6STG)$6ZtU%g?H`|6EXy06|e)qVAI@$RdGf;ygs zQfx~{JI11pI!5Bzp^nKkPvkYA3-73yvGfz)`rK>p=J77?UftBq-EUodFn(jsbsm0W zuDM~4@^~Lmbybf{$CzNp_%H+GLNLY!6K8f|y;hZpy~s>@Ht0^G_bBO^B|ST$XSDlU zXnpuUSKm04N$a8ijrvmAz5MF?<{tYg>2v7{}0zU2xYGLU#c&a z{g_{UBmbxCYjY-iBZU0^WSl{Z6Pxq^SZY|b4h*uP&vJkU2AZ7(4 zX8l+3%E|GHVii3f{u%RGJo}68g*v-wUPtdcSkJsg+`{}LaNG+1KZ{!jF>ca)hhi7S zEs9^~qux|5m4A?97wNaLi~3u2r#ZV9v3VV0a}{FqCfH!pjjF9XY_J}2dIRG0M#Sk& zh||x_a9?ffhFFbt;!@1}e#ZXmZ0v7S88q+D!ZToczn$j(^gNU1`?PoUCdMiU=9$_q z^d3k!zYlO@M@)Bg?`#{|pQE~*C0qDOU;4fbA+apEx^C(!O2>_HyyYE@^A4^@vOV7%y7jc1T;*pIE*igmm- zm#DdaTtt|rYBsGNuw?D`oB$+vV}hUb`7xaY*PhbX1$T4`R;@h(6mJu8s< z(lh)tYn_ZWC_N9MXC?GZWEA%3y^uxEDQuy5H?1GuF@z1);#oZQcT*11o(I;V@4|lM z~$yqq$;!UvqYedA2=jB-Wtu%5%}MxayGuoYhgi z@{)I&(~-yNqOgT4Q&ICX)x zd&hS@WOsTFf!MK+p7-x+x0!b{5Yw&N)3-g8H-q=?M%ZB|?|Jy9D%3;kb(H&P3;OP<@9{eK z1Mr+9X4w#mORBdmS#x;D6j4-jW}Wj|HLX`hZg!luo;hLtpE3>kyQ%V|wP&Wz%c#6| z@m{<~GfmlhtySITr|}aXuf|OafRN&r16d3>7jAH3D3L#i9U<5ttJBRC}B;n zzeA#T-seG2>|J92VgT0p^z39{^j6pfdxLt--h|jjzE1B9QagNR!#e~R2PrPeXIZ2_ zy&G~Gb&uivPWa?s&_3q*o_>B^sbd|!@6k4MnRnAE-!Dij+nL_!9u7Gadt-3#X`h?{ z)>vApWkmGz*zxTSwaK?gN6#*Ukygsu4_TyN8s1A7iT9xBT`1_05`jEsoalJ3hvq}H zCrI_By+E=d-Vv#68-aJ6V82E9eFFOkVfOx>$@qSvKb{*kV-E?>j^-TtZ0(RzeE%1( z?9rKcpM=`td+1{J5NKa{K+OpKeV+N=fOLKxI;(pzuSYuC=Q{Ww^Ioe9&o`TF`dRr{ zH+0>N-`Kkydk|xcg1wq-_=0=pvJb$owOG1O}2~Z-a6?x(jiaMC%DpB zM9+xzejuJVon6uq#~w+Hi+*p49M{&aUJ`l$S7rv9BxgY8I9om8Ri4F#p zC(&LJ&2#AeRr=_6qd-TlBp9J?N-yk3BnjpN!%O%?Uci6q--# z{b}>PAk~weU(jBg%~qb|s=wGn-_eamJxB)a3Au1z;T_C{(2Y2nBM?XF@Y#*0gn6>L zk9S<`DF9Elq`G~C-!#9`@da}mD_;8cJun}peGl~etr4hyGwL6%Y;!i#ya#&`RR3bs zpX#pg?7(|5*Bv^O?`=fg>0VR+wYTdyinyAN_)71f9rVRfGZ!Kqsjdoj{Sh)1>T%qv zC-lAsx)Mio!1efzalb{anW3L~kiBm3%{#b0*w3N*U|#H@vZ>s4s1MeW`{G$=iu1$= zL7#QX%-cy<>-~`jrF~c)y~nZZV!UUM^@SzxZN$xb^l>WdDEbYRXVtq()|tz&O+F9X!T6w28f@^*bHoYYyUlG{pCs7?gVu&uJ)bG^5SDc>m=R zaOpPeUtxcB3;gnxC|esnBZK{u_9N!gGivL-1zOLz!CQUrD}1vw0^b-oUD5bfWQ?Z* zwx+p41kzY}sGii`hcSo#3g3$c)nDq_6VdT3i1IEDkS|`OONR_yj{6c%A=1k-GP_C_ zaj7ROfLx4A^sKKVhLHYvKW`7d&!GCYA!G3ltY;t6QQRfDR$9^{1biv#Y0Yz_Uh$*2UpU5l z8j!AD_`xprgHGx7a|Y^1S9rD~y;r2);^#N?jp!=G12_K8;CYBg6o0&k6OZE^ATPxR z#H%K(eaUC~M>yNwKpeoBSOYoRw%|P^%184v%I88XUj8wpw;t&!zED3Iag{H9qfU=+d#V0QkluPv&x(ii zjN0Yv8@8GT_&LSIy?AazeS+fRN%(vMVx)Z;HwE%@6q%{jCwpCj;@cSGOMD+mKIM z{;GXT#d;ou{irPS4Uw7KW_Fd2-S#2Ppg@4HYM#J!YmNP~Ux$-7kWoCu6(B$M827@_qu zGO0Y;C)_i=qdqh@_W3OIr2b2JNe{VCh`D-AQ;<6;;@i&SImNC2kG=Pgv%0GG|KBqQ z5EWB?H1a{7?J_EBW4nqbZ6 zATuMJ5XiS4NlxRgPanY^rHCt}ACCYB4aD~&b3f*}&S3SaCL7zLXLhdk&pS$|%bAnP zDRjPzykqah#s)K&_MOUDWgC>kfLWjVH}G0JH#%1!Yii083oXxv(MJ8L{Y2nR|FoY) z`!Dld^R8w=k*?qyNq>$2BY2$wQQcKRAHk*J0oTVt;Cvs$Tp!=1jpXOs?7o~n{Q<3&aal+{XT#^Gm{;Xa4q7Xg^GtG(-xh!V zU*ORkCT~Mc41N%Bnxo=*MejSq(dvgaHH*yk{HOeWKWlLSFU0qk@S9)3cZvHK!t;ds z14gjLFuw)oV#jBl!53dWPYgfcbq{<{JN;ISUwMfF_})vy#G5UiPddJT#nCAm2BK5v zn+x#ty)#U3UR-^48a$(OpLKZZ9Uh(C>1DnXTg})=oyJV;=zjQjCN{Q#IeaSXbb#+# zO--AA)ANn2(ZrKg0lTnI`6A-L^)}z8 zI)CyF#8=sK*q0}7)X13eCvrXH4>-eO=IiOmtbw&NL%h{Z9!7UXC?=Dvv14I3?H>KD z(WR>31kTS^R?kqLZzJ{5GZ{;ge#Hyfx#QuH=7F9w?$Gdv=MC?#zX)Bds)k>}`vCZ| z)Dx9qkCtBEa8dP{!MiJ1(`MrD&%S-(jSrh#Vp-_%4FrdrmW*GSyhu2^z}d^0JINh+ z5#NoW4l)Q6lkHs0z>7i1UimnU*Pcx)<80cj=xkcW>)Z!%!>nnWl*4=gp4DHz?xKg_ zO%h!ZzjT&IIlm3iA-*FYa~GHT^L~GlcAJoavtw0WM{77EU6Gk-Y`y6G8E_Bt`Qw;f z$&aZlteKJ@(-Xj!F&_1!y|d%^-WK&S()6LWiSy}$x;o86_3WK@KJI>aWg*)(-#PhzBsP&_ENoyE#*r#|%&yy!p&qe*@d(n>@;m6K&SvW3T z6~3=#XxI(>Gau5rIx6=n_%qnVY`S)a){(J%*LtQiV43%~U$k4i;d=u$`_g&#;&OWa zZ?Wf^)3=t7+yqYM(U{yTzp*p~#GTLLPpgi9KXIn34a;@YRy5M@G<)v~b<6y&db3X< zOEa!N9-S(G@=w4@f9%<@)6Cg1lk>bNQ@en%#>et)Xzx9IbcgsM|FoBJO`*Ncn8M4d z9mtM)p2J-(FZOWk%%1YHwnlSmRPVor{(tWJSD(_Wn(6i&TSan+(JRGQUvhfe2>cU8 z@Vg`U;|%t?Z`XKKGjY zp6BhsxLoBlk7ICHM%(-GnZ%pyt4Fv04jK7;$c5l(AHr|oGO^+Xc|3L}$mOUcbmX}$d<<}lGdE2~SrFM&f?fdi5sq3t-JwkATkHfww7+^2s zGsAh>jnGsN%|Yt;@GFc-@anI)5AV0g2FPF0oR=-gGM|S-t76MOY&YZGmT_@^7Jo%C z|HF(=eJbX^i{CUa?!o^!3mT-~!to`|HE=N3@wM64VSF!}e4icTUVeWE@RCXT9g%;r zx0-t!tX!srimytwl|AdF{W-3E;$kCHwcTdg`uLyrd!0UJoIhavVdOEhmpdI?E7-mui#VQ2~xwtTYjzm3jh7YaD(}7-?LK>KlR((-}<=2 z?tjX!eHpx2YGCEZV)wUcAEN!&=oz+#e(`Um!=tL?3;H-OXwK|=5#kl$LeI`zrfuKoDMpYy#$x?r`Zki#jOoaMArOy1V5Ir@w1b z@}|_?_l{gJspmxQW-)iSY}|uBm^-CZtKY}?Wv{jFr~6J0b&tzgj9)bU9Jz>QeP<5- zVK#fU0Ws|d8RvHf1>66TI;2YWzHbSJtel0-lWlw^F{H}deDpmhJAW`Sh`sM}e8a)4 z*`sm?pEGtIUuIP9CH_t`W}PQ*;9iJ|AlQQpHvK2h>2qyg;-2>v+iuAj9*-F4x%9=? z8x4a$eehl}O&7erO>>L6K7!Ard;Sb(Sw6sb-=DV?zMuH_BOCtJ?AP5#&DYl533C=z zx^Q35psl~9W_atA;P8Fa(fyX1;ro^cJ9A%|P0h~9TkreA9O`vWF*V4aA3uC+c09Fm zmDI{nFW5uf+!pHQ?p56!wQ^seR&FUg@ptQXcLrf@N;!2uiLIwFp04TndbdpA>)rO< zr*VH%e1ktnHhguf=CF~4({aVv*P&+>TdEItwFieErjM@c?Vemc=SJ+lN09pjILgT1 zC>HqvXEUS&qmsdJ<;MP*bHwuhbI_qZ_+D}=s$q~1DjS7D9G*%0hdLAHZiNTW;ojhD zf}J<=i~tQC7?-=AzqyN{Y72Vo&i~^FaGn7f^lHxw+ft6NQ%!BcV?2}H430ZA{bBHD z=(nDI^yq%YTe$b(mgD*F1pYgb|Az73Nn7WFqq|eNV^e=pdj;)!%MMNNdKWhLmV5T~ z9iM#>+wuH)tWomK{|OzRrg2kS{~&$3`itC|;L-2<^c%jC>~$Tu^jQJi?fm{B+UmTh zawp2!nYny@UPi>sre4#=`cD`+?ZCfKO``Ce44y9Fd)?eeUw7EN(OAywVne|xTjbk+ zivWgT0h`^6M9sHR2n_1Dh|4v#}7r@gXkgydCmyWvmn8pb1kCP*B* z@CMe`}%ywI}_sxyuQE2i_dn@I%9=INmu9Z|{+#)4A)0 zyCYP)G1Ba3QQTn8Uun(y6@NcSz3emP!HGx9%bvgdwWIhUr>!i{RR3{5cg(2YuUQ%9 zmLNanlW*kvbMQ|3l49NjnNz~^=ufc34I@6!?_Y;*KL+_ndM@8v{19)%k6+NY_nGcoP*vg{^LJqnAbU7(ZqbN`Um9hY0;f<{_mghx8!#rI;(l0xgwp=-7?*k<)P+;{6{}8;^*f6 z1)N#*J$tu1^LBP>ZHhCOTIbaFXnq%uxKn$&XquL%3Hvvy>Sp@X=ZP^sJz!`ngJSo<9=6u+`T4#v1jn)Pl2zhJ~(vY1LRXx z1FPJdbT4JbJf`Z0;LvZ%_lSlQBuC_cyv+Qrdx5=1aCTzDlJGzM6Ygc+J7H5b^|)I1 z6>6_}}Znt-sNqZ2|cF z^SHlAzH7VI{Pv%@x)$oywMI%7gIxZvfxBxf@Gm6$dl+NJ<^AMW#r;Zb$UZ&8&bU1S zdyko(7e5pI#dI)^y%%J-7vvP#(PPsg{rYRs5$9uY(@l4Z_sJFYhdjT~m3`)K?w1&J z`;|97Y7eM-zOjmqtW zpUQ)!$cxRwH;_-SKJTX=Y>+vttz7oie2>dbYa{E2>RqSaQoiBE`trBDedH;3o>pHz z{_VGv-}IvWJK*n|UkrF2*B7lv@$W~d75_(S)3wGwvc;X3V;xd1Jj2}&xAWV9st@j~ z{Qk}tPdxu4`-cA4yu*o6KYg)h=z$k=XMT9!IYIWt%kS9o;=P$q?5mo;@5Nzdsly*R z^LH;c{lk}EeD*-u*D8Wxd*)oS=f#Pq{qDsxhlelDdqwN+aaEN!)!hAI?(iJ?cJ3ca za(Cxz3-3(7wsB9(DM7gMwiEB(c=P3VH{CpkdpxVTQ~jfRs$RJH$$MYB{LahE%Jy`< zwrNkhMgO4?lSlnY%4px`}E9C-p#xA zS#lS*ut##+nqc^r@xjhb#7g(3nX||!;q1kh8yX%;2jzQig^s!)@%)+4Qo;VgLh`6- z{!VNznRPM0 zuW2Kn_nUcR?OEmM;#}2vVMQdcWDt6f7yk8(9T66`sV7f@`gd_XmHC~najZ|;VV$1c}!uOIl$I%)c!H>s<0 zD0df+&Y>qJ?pVd|$~Pprze)KNjp=dxtm)Vnn=iI^RCzvhau>GYL-PDk{P`g5)lR*zXh?j*&k zvb9;pAf9CKyA)IDE+)1AN5=ODXt#VR7+po*}jLdxx zdj);d;_IjSJIZsd$*H49ryk<(GtPV`MR z^Hj0zdWZ9Q;J*faZ)Tj@cZ;{}JkPZYE{xi(bnO&Z`92PuxMtuIdmZn*-2IF0Cr7vmX35o$q@7W$EwP!EoJK zdpkN}b!!Z=ywuc&?&NpbzFu@g>!;#s;!3mEEiW4jqCKuAvT>)~1QTZ|1x51W~-_@ue;e z|2>9J^`_w4X8Pj1!31#rfM=tC(X$VL$IL4u*BRlXuB~cT3>}$^!_hH1e$C&aRqqY5 z^0oEd^~l4{L0jML{TbQ)6myb1wV9K@#Yd}WEgOHRY;6`hYU`*+D^AbVfLn7i>FV>P zlUhIGbwa#j%fS8~x|7t5D5m8+l98MErTAL>n#$k3+|4Rp2c}+*`={A2Q-A6!#qW}F zJ@1rKe5~^%x`Q`EpMGv>{-(8;C!dG;y9wTi9_Sh&9%x*?db4$#^C3Dyx#=-kEU(vo#ATNR{g3^)l(F}diwAK(hgSl+r?ojJq$KN z`5M_X(UU8JOL&@-Gs7!^i;wjlU_S)SFT%Iqz~f)TD|_GUC{rikc=kf4YNa)9?z@`l z=n>r-_s7BEbEhd};cb)r0M$CkCb0hGzVoZd-WXYZ3_j7$-q8iF`1-^+OkN?vg>CD@ zcaTlFA3VhIo9#RqnM*!Kj(%un-!B=1^sLXUoB6d!c3t-QPtYk{ei!3?09esA6&mn) z%vlP_+mH7^%FC)e6wlSZ%<))0Q-=Nn`t|FY`TotK@0;`Ai{G;&U*s}fC0h$X1uW?$?wuPeuenh%P!!{R;l8KmAUhd(*RBf5}An&-ACheEn3trE9O~N}GlA%md~L1G>_NYf zy~CWRn1NlM%KowQt2xsblSTC@J|-)a1#;M?K2;Nle%7WlSFBfUU=KBe+tJIvhZc0v zz59tZP`a2oF1#4Jsn?q(Jyd)kJq(KUaN~c(^>9?~WpHKKTa-Vkci5D1-|2PGe_*`T z%d#XnvTw~hd;mD+ZdE<_C5!2Nf12mgt)@N48IOUKGNvme&`SlUyQY{X0K#A^mapo zd@1#*y|DV^(C~LW7w#?Kp3L9>;$W2D7tey=gs_k28iV%r2lKn&{~7oRJeR)YagG7! ziQ!WY=O=(UmA`Mxxa*mQ;n%zpUZO#Xe(LEe$40ZKJ$K@k*e`kD-A?Q(tnyB5xYZ>&uU~VUeg&%-trSL(A zS-1SU;OTjZbq9Yn$_EAP|IGkL`Jl0H@4fxQwfMyQ`hIs`+xf`G>t;QER6Thi%~|>5 z*j!ipVsk{|hvfN3#6!z-@|FI zInk#WoPITygRz*w^Iz*|p5OnrRl0(n+~{!*8drUBFaEdo)}$Zu<#ZlU{i=VRd-@@K zSKsva)jsB+;+O+Q7e*O7Dw`jqF5L7iG|BH{{jI&3HYt35v-iZB!gCk9yX-tAD6=~J=>1XX)YL^Q3LZD&pn)SdnvwazDF>bH-!gdFj|Ym z7te#h%$58ZWna6Z`JyxR8oO%lKh9XnsPo2G&2d)btG6>h$PLY?Wfj?=d_Xotpui=1bvRvAmFc@^d85pSaw-yfjaA7C2fzTs~wX<1{uB zeaLcFT5F2dfSJsV6m!?)rQm73%S&aHn=$&5@9za*uZi=InwcK(DnE0rtZzPb!GjX5L=5_A=Hh&Bq7G!F9U% zsCqfsXQ-Yb*C6_|z9q5K zn#-EkdUl-CA%g=wGIqk)eT(Y?VEuc`j%;|apzcHGQpr)i@Cxgo*0Bt-l?`KVQg3GS zwpr$Q7P;|W1oOTSU*{$NS-ez?EIX%j5wczL;De>I{Z_#;^JX&+;r&BEW==bj6n?12A$ zA-elqVi0#wslJvou)xYT9Y_q$jmAzF+YxU==k~e&eqlc!b)UcZ5BvS{U)fLQ-XAJh zTldh9;;NZJ;{9h1P2E_{clBA$`BU{*rE#zp1liJdGmfXp;29j@sT*`^8+O&;6Rxo( zaQSfu?6vq>K{iw(+fY1C(Q+nt0i_(?{j7b${kUs$3_44;`&J2>ynKt-g_*~oi*cIu zMzs8k!x>*|{5a9?+T)Qy9cLd0J(l|8neu~UJj%&eLN@mPjeH%nZ;$`6{YS{uf6wrp zeUKUW8B=fhW&XL~J`sZxPW2U+N2l$pK(-!l%H;8l4;H-_M|SqVpjA0p+5J&T-T}h1 zyRN9%r1eI*m4Yljg?)&TrGL7`S$TOM5Tl!Ka*F&zLB0VDV9pCAwO{C7)SxKCmZ!N_~*4;YyaVN-K$S<>hQrLB%EDUa#efS^XWs+QN79UCU>r7@j z`)U=b(IL4a^RBMG;r1PQJ3Grmz5nv-iSm&3VCOZ!Yd@!+TH5#Ucb56Es|Nj7{pNAQ zC+*^{)|rdm{E*(SDg0Lfqjx*(m~blf=%?kLhbI|)xo_6+4xsB(J6}T=e$M+ARCoW# zAmx(J$l+goD#<-0-4{@2%y-fLjZMMMMtG*Znk;SWvEj49rM^X%YNgem`r37Uuv2~Q z<@es#j!u=_A1NOmYOH#n@NRc@d>p*xJ(@m;;S(6wi#YHy{`4VAYSskNJhNK=WDXaUGy?ci|`#^7o^T<@gtG2jFvSmicF zdpk195aY*v0oeo1*{5e!h4KkB{;PTRYWdkAYoOU5d`3J)Zhh>>g0Hqz-u^lEnVpV z^1g`l2>21In?W8XkFBvt?^Gwa`*-4_JsvJNCv0?ZeZUPrrX0>4 zP!4r5IVh)A>_ds}VmTz>yXcx7lfzXdZeMy~_d&uC7Tu%Dt={=9Tr zG8N5|{{imKHRGbj_Wv?|!6^sj;RIvHJtgeqK3TPU4mjoee*>=sFCEZxuPf%>SO9DY;Ju%#eqM+N()Z~66mtT5w2%G$?W6G5bT{4E=)LBJwO6io4Bnhh|MJJ{ z;fHM126)p;9@Ebc*^7ujF`Ko~y|+6)KOFs=8Smyw&PdrYzb(u;`gK>V^l?9ZER1v{ zs==}ChP!s$m-rdiPBe%=;%5)P$NTYo4S^N?cY@d3xF_>GHMkR=ChGq_=x}?#3BDT` z-P3Du)}J5VrychlP{5Lwd&94u}9-)7-M^m*vuDuOctCO1(9xi!7c)5d9 zz0|8u+x&geJYo#xIq8q)Q4MyrhjB=U`k=R)aeS6>^s@eTF%Hp>PV}R{EIJ=jNWZ5| z-QUUk#96ztw+x%~Dsw}zVv;eWxGP5c7y=k1knBT+k;;@% zrH$U%doVG!O1|*jZwqj#B$!VLZIfFAX!n79{R z{}F8zQz#~oUHcyVQ#`Kv1bobGWyp)%V^ueCV+C?E=QV}}JM>OyyUynGCc>LJa&(7} zFJw!l7haE~Lp`3J4WYvFDN?lJIbe0%xS zyY4+NYKXxsPFv?In6DRUsuPj^ zN_U_CciD>_(Yot$MY*NOl>P2pS1adduo<(E@3+0}xZ2td`Hsm`x%Y(i=eN9vw`+HK zsN99l25Sx}_n`Sa^P_J$)QrCA9esVLpyR~ZT_drdHRYl1kjO4B4@Fm&m~$#)&vx1z zht7EGy|-_2=Z3QQ(<$-{Rq!SYj&HKAKZ%}yfbYVcVea!jl(oX2#8zx!pN)HmYh~xk z%d+^j<8oRL1RQTN^roy)m9;8pCy1ag78|LK{;aXG!q z_iFf%Vm^CZdUtOoF?JJqlB%U=1^Bhc<*opR99>oB(9x+UqYpPSzM8YR(>^v|js9>K zqU8I1V3enM78*2`dU*64m&+cta?G)V+ZQR>t@C5dqzG_<=qwQ|mY@v;ZH?WyY!%o_1yn>$_gIBGY zhpqIudoKxvLpf{BJD=N;J^0O2vIjOEILJCH|EK-y`Fnf<`ZySi2P8+?Yv$!<-7^`a zW`=KtU-;vPvwuFiqaJwQ|8CB+%;Mg=isW0uH$x-1M#wLatx;Zk)S5 z7yAmA=u)5Rzap6k_k%-Y@cTd-gXpdpMmz$ZWE>AUhK%J|KRkPj@I>^CDnSo%=sD4UgnKhMn6d0X~$%~k7pmAMSgeMx;B_+n?;=NMzG#25B#x=XdxQI&zqNjEsJPS(T zAqE-9|PyUQ=KRmxK!V~fNrV@N6<``&PPZi;b_9ykj|{@1?GNOy&5 zk5M*R`?bpJRbbP!2icB|mQR?(Cm~1p?)q6lsCzoKhWs7>jlOc)rWps04=<)pSLZzx z%XQ$x$ahn`#D2*3Tc5ONiWHZqoz4f;V7qlkPDTBR#?F5bTCW?z`UQU8k@WmCCQ{712#U%pDTY3e@!?$9Ztn?y|h~cPQ@zH5v@n};k%4vJl0Pi>(AWS zJcHJ{KF&T+!@ktWfv6rMOo7QZB;sE6AM%e#NPM|HN*Bb%#3|5{V7&4c@@U-661}Z*CJoY_(tXu%2h&RhOr4x1w5ZlTg7+2 zeY`F8%EYAo@hyW7KY}jH2Xhy*;X!3Y*9oSNWQ;DI+lc!aBNy13voG#5^|~gW-waO4 zNB7+O{G<4HG%@@~WrMQju48{st;>~d-hRN|hhg?w#@o9vq<_jQ%SQ=%=dSM>qj&1y z_qPY@v(E?NY4oGt{9PFp_!dF$gzymK%9`^?qjHDwpUpk6I-@vDaSP0vbJa-Qt1 zW_-T|-t1Y8&)@Zva`!w%=VTK3dp)3GpYGBSPoH7jWq;l^f;*yD{(u^iQIBPL|Iw(G zzb8MFgFy}#+@HMl%mo4DH15S$r4M*jiN1g|rR9$vI+Po>kt)=xC_eiQiZz$%Z{ zO#lC5zMfXd*KdO}&esO2RU>0h}xUl$hO-}Yj2l_S}xPxL_cY?GZ+@AWS$Q6NPvhXCdm6dV$lcNS3~{RBZ`oMom!*s2 zZ~Dg8`;kXV`6*(nuA2Pai$7(KWHtbl$Jd^E>ml@DsFAoUS&?#fj(mSDW$mH-F=t;IL|H z(w{N8$lR>E;2Gom6Z4_(_(RiG8$$kc`-YQ(sfu;Sai_gU{LCoyhiIr=Faj`%mUNz0&>e;=l4e%KfPyzji&B7{}i28C3^}7py*B zU~JcP+WykFJ12GcHvV3XTz(2&_I?r7zKkjGboC@9_OSc)klB=O;ut zk526gnRV3CFa{i=Re2k&(<5jT@EOOsl;0$)$NAhreVe%x5tNA{ovNC+Nds;YX^Wf7j{z>B@-4Pm4hp`E}DVb-<`7Ilg z*$Lls_26q{PAV^0j&4-&UvR^+L$^V<&J<`~_Tf)A058}vH#8j58IMMO*ZBjTE9}E3 zJr^Cz;GfiU?t?aKAIDeFoG^JR=B7PwKu#Xr+URUzncLU1KCi94mptV=n{}q|!ca9x z(yi^tE6L}h@LzoPx|1f>F!Ex5M{v{lj~A(Y%KNt-?tu1zoKAYUdw`SgC;6yd{GLgl z-`fYzE{*ggpZ~5@o?ZC*rkQ>}3a5p}JS^!P*edGS-ze11Bv?i&s{vv;miD)zLjWPDg?4Ovi+Wm?qa4WZ^aemIV z?`5CnZvGxpqP-axcX;^spIk+I(RF|^)c@(|RMm4-lGhg>J%3G`-s$1x8VAok%||wT zVQYDnJ@4i9U%r7~M~rX$U0a7Fo!^1aEF1q``kD%l{t^BEmGqxI znSJTLDPbA71%K~PmL2->6Lt-k&Sr>V^t={4%yDy<;Rj{QVl?YrEotS3#VhXRINi~H z0=BjgZb;REN-ysj;cy3M)|>C<*PD$6wH!Z<>hwI1Dm@PDV&HK2ed>#lMUSOb@m|3? z-80emJuQ3@IPv!xWN;7TJ+k5JTg#I69H#a_sQuim7*71%&pHk4KC{Ql8l%03xwO$; z95%kw+#Qu0O}|;zE9xq@%YN;^mb?cZi+>L-d+AsG%NBi$_F2}F6gAYc z;hJBPw|*n0B>owh+u>k>qer)YjlWI&2`s#-QhjDO>)tcYhBZ*T@e)3httEam+J6u} z&ACUQ@jORg;%%0m0y^1?5KnG`M%A4>;Kr0J9McPSe!U~y?QpDdWBQ1rPc?v&on$20 zj^nvqckCHbuM;2np^YO0#wxmfEUq?-Ij<)FtNjuAEZk26EoQyozCC=Jy{Q}CwX;m; z|HdWO9?yIvmN0j2q;5EI=k35Aq)*9BcYi3RKMot|#`;2fy3^t8C1E{%r=TC*H11wy2h zqcKh!ceFt7sN8PuLGk-RM&{7<0DKia;577z7SS{Te7Z-$*Jt#yH)nlU{9fMCSO;&u z%$lt|jZs(fE}#ka%(Bk)$=-|p#mGXRvP0OkZTq=XK)!Jquv_>|dZGDK|E6F`#|fF8 zeH(81+FdW+{N%ud2;8wn$JIAlX#wPIrHiJ`jbL36-mD5Ia_r8DXp}X)u>)A^g1~0FN zmzsA8?#;-EC(OTUY=hBN-uLreV8ko!HEFJ$$UIA$yFJRQn5Qq4V|Qz+ZQF{>J43}+ zp1)qs9;f7E@Ujn+o@Lv7#o<>Q_2Jt{Ue7}RB1fOck*IMxm?;ZiaB@w!{=J@>KCsXK z>~MRWm8IbNE)73kf@bxvSR!ETo;LX`dj2iwn&fm)cHZ~98~k>BGmSrS19)}U!jTQR zt(C+-EBU+;SILgsM`Mbigj^7f54nF9+yH`joz`gg$Y9%LPs*&%+nZ zjg_={nl^T=uxqB5b>}?C&-z3-(X~@7u}`!jha(&AGyeFWSof~c`D?dkHhh6SJl3KK z@UnZCt?%pQyn*cFo!8Aiyjb%b{%an|&wrq&%;b#Cnh1}^;+MP+{*LP$9L`8z5-PSs z*GJ{~$-6zEnYFJ<^Z(bZnZIEylF4(db+?JX* hj>&aFGyD0*7NnS?mdCooY-H{Z zc-+T6d4_dU^HuY>o7hmawwI6H^y-Ac;RUSS8#&7@J<4!aTJyQ#1iim1k*ntn%}x0K z+OwZRKceeABZqO^TLoP`gUxT!9qGykVze~yuG15cf%L=5;0&YBvH|H`j{m)bjXr-H zI3F`9_pR8`+xHFdyn7cqlCZfF#RXaq{=_kBTWNZDt?u&o6^ac1<<)7jFj62_{cJi}jI}*G{Q-EEs9iaT^2EwL+`lZldn+)!BPh2VxLvG6FJq6r-Sl<6 z6_*4f%lU66v0**q^6wOq4C{EuX_k1V?hM|)&8K+h+bItm&#TM+G52G4pxXPcpx4Yx ztuOB>qh@XhXDi6B`}>BJU*AD&p&WZPbf(~&&9QSgzjDFcp)(#Hrtl@a?$j*t@;u4N zbEK1}Y+6?=uO3A8soYiVa_aJJPe#X-r>DM*d*#NMdrS2Ec*$C8vV5rQ$!oua-IShW zpxK{)5DnOk7kb|9X*l2bO)qM`qeEWr#4G8*V9rQ(E_SfJCx*WTF5sERtNd?VUwu1M zljPggpAi0xc1dvhcHeROdfVgHZo;?iyASJqm35}x;4hL7dMEibfPW+Q$IC~u(OM(f zNJc-Uuef}wjeOn|J_v1b`K15ejW=u0!+nw8wcfr_`Fw$O*vlt>k2q%yHP&MJfHwop z#q!zxcZKr#2s8{NpPp+RY;a1LakNTC!h02FXTV-&h7h+&K+QEe&6q7@O-q{_Zz*^@Ll!^+OwR*#c}Tg;HdzI^`}P4w=nI~?T%l)ryG8)ad?D7waAXQ`M$EJ zg^9J6h9+P&KGF99#&fLx{XTdv`v~H*_LeBynP?)y0gFYi@kJj(Zd|Ed}&iIrW_pDt{91 z?>5)pkq!5oxIy>(EJ0TejStK_Xzjf=iJZ<~ziRd{*|S`3&u8d}Htx{Vyrqc=xfA0M>hONloy5meVN4I8JUFoJR|oO`i$dW z25xGys$zbYv1yf)++0qLH}+3-Oo1QTix3~YoviV2eR;Tv!2P>}(>a|U_M`26k*At5 zoeJMo<4&G!TJku~$k#AGq!*kQ*L%{xJdF3O(%(kW{h^e(t0Qw9cTWr>?uJLIlL^St z$!A=}9RWJefZX;bWxI*d0``dh1P?#GI0(6mVBe`c*IA-2_WX`>`E%iUm6&T>We|=J z#;!bx++Kpc4Q~S^pLyRq`l%1z!(G(uw^fpV-_Kc|GPD0d4sgW{hd#yc(yc6Uh1K6t zx&81nNj^Qvr}mAs@AGERNji<4P zvh!aUM_ebKVLs~Yiu5Aw`pYEd%;@7C4WD-3%ZELDS}Id^y5yp|bjB8~oo1g$_uy-v zCskqS&q9lEE^_TfLnZx64j<>4WGG#kE1sfXM{CN6L(8@vsbLQnzawC8b~`?aITOl$ zip~9&=N@4XrIGyeJ(mn#tGmpF;{d#yK8W|RG7ha}_`>E~m|6Rn>sp&zphx?ynPG=6 ztfwY2AeUV>G_`Yr%Vp2!%-Bryi#z9PpW(eWWkZ9VUj*M6$8U{sh|RZKe!u)!REbhtF)&nCr zkIV45w=%rU@z=Lk-*Fs0pS3owUt8w)4BGm3@ivmf?t$7d-_LRFwx66jTuk$Uf!ZY} zgqjb&zhXLXW$jWO#Y^znoP*)}QF3hTf$QGjB_BWrk-UcG<)!@*$?Gn)m%Mmin#TH0 z;J2bvY7EZ%>z%hj=UdU(q`;e2I2me$wv>=8o|BXTtx< zQqR#7ox3;_8z-Nt*TfQIs;-RT`w+j6<9jD`*}4jomo|E2=1#ZMkIY#{KYj$P-?u!n z;qzN1d#!)+-?Rp4&5=)_HAr_JR%foAq5WF?gBk4e9qxL7V(IuQD}~CGS=8b8ix{kYuU*n-3sMjW@x& zoFtpU$k6C~_Uy1`rq$^WgG;tWag08n;8XI_-(|GhW1a_xr!aQ;5aL}gJU|!Z$Gi~q zaaTEam%PgF^6LY}b{lewulqe)O85vtRrr4TP6F>?D+}Qwf4sg3&eM^Cjwbre6!j-M zIxjELXKz)wun3ptY0vxu*r?xGMX>atp7-h3!+|9r?Xz{n}f3lamvBI~n+seB8mCID~s+HwU5QP|p6L=-Fa@F~fV# z*F?PE{rQ4%S)Y`97={&%OES<{{1_x(A5WbMf3-eIt{()qkDmnJNL+egREa+N&J7Pj zlkmpj4~#4Un>siAt%J?iVc31(Ky@d~Q*=Kk{`nYMe#SIvo3$r!-gk-J=ayfTAO_ZR zJ(K@&7SHi(x1pn()4j)qx<|MS82$E9Y=>m?0mjB0GIcB?nTNg9Z|eEudaf8+dx2&> zv9Yt}lID?_L#%m;^>#cDIr;^wHa?%6xFH(vHrj17?U3o0qVZb1>hn(e1i#?+9=OrV ze@dv_k$&G4|DApfF78!O{!aHEJcsQvb`AZ*u5Hu3#%E%`c!!&LuVFX7@^|o+D-*#~ z^mSTS5BX(uXV=MtCv{_Y4#F?_$lc@fwyQyT7^A=6Hg-$%N;GERv!U|~p3b+0*Sj&v z*6MdZmfaqv{`_#YgVB3oG=Ji7ogS{2`~MxBKerr%+t%aZ;N$xooX#yD2mNM#k{kaL zb16PQytLMbo|~T9Ty(ydFPi4WNB?g-`m0&j)u6Nr@vSN z7CoQrVDoyO+v@k){F+3-x=k^3C$cd0MqkPAk!o#_{lA2<9%ODbu*ac&vU=_bmhS5e z@eSoCgvv*)yW>sah3L}Lx*)1?{VKnan=<=% z#KlWAcH+cs%%#1*2G+-k`90OKxh=GjuPA?Beumxi(|b0?*K4eH%B(u{nePf_wN2zr(^h9|ACy6jMJ~@@+UXK3&jRr-|zvd%-rBTT#5Uw z9QL|)>R)vDIBW>d*Y;~avZ|AI7ROYx_g<{`yZ!H(Y2nTy+>fD);=g3E6uwA*y#Dnv z2W7`(TVTv zL!2f9pPU}PKL+pnu>AVwR_*7#o;^$810|W$vSpgj-j?;k_hMUCt-bu1EmJ>nntC4e zGM^F7cH{PX7KhvI;n2s69o%!J?3stlUKF11;1r|&5?N=I_wAT{xSCI&GxmN%z#c00 zd8*zcz`niN<4gvlH(jqaiG6eJb?#uFyn#LQd+_bg!Ecmby%8TmJbN2szva3??)|}b zeer&Z?<>PP;P+5%EEwor9JT5@pngg~6YOZRe<9vPj}`pUZTc+}3`VeIFpWRRu);7ojr1aYv9na%r}XLR0_IK7j# z^B}RP=0Q91$iQd8bB73g7-{D6@yJ&`#&YD)3%sX6>*em<-j-bwei0fJ6J_9OJ$kU4 zcK3i!@t|T}>8H18osW3kv}f}_LtDYAJ+u^@$yPh{{~`5n{Fza?pFpeZbFZ}-RVLzKZ*J2z&;XKCq3s1mp7KEMuIu?Ub<1(OKMH^j&~G>-lz3 ze|`2|2H{S-wz8*!mp|ueOMl1Pv`dYDEcmM({5OG*?tyZ?kU0Z#w0x^sPn|#1hb=B+ z4V{4e2BY`0@ulaE>Rqq;^~Z>zenm`{;WJ>Krht(?%nHa^<15MDWP+Z%xAX)vx~Fi@ z2eK=+HxFxFJBGcf`L37g%qYe(eeT#!DWp8>sTkada^(fj`_wy$}kL6EkeH)2=(44p# zT^T5TFlQeds2ovJ0;NZpai1>lR7VUsI( zmZ1)58N86s!#>E8hw#n3FDJj}Nq(cYH0OP%-+)iP)3;gE3u~sv<(2^JeXcAzXZ@#f zCXShd-uL1^X%6gXzNN4!`nyx>2xG2!BAP>%cYTAyyBt4Gc6Q`(a7+gt+f4nCy;sc5 zr5DH(RQ3GO=~y-qe$cfw`i!5};A)47_w%)IoMH05n)FM#&im=(E%YH@>{;^cab2qU zfyZASu5$S09~AjjFC6f65SOGKoX!oNB0kfPkNX4S(+YI$R`gK01D&k!Oa7SyZendXq+&NuT|Fi8C)>5BeLI>^L zjPnK?J?ceod@gnZd0E+@E!b$)TRoT_5|-mDqHnc2FC)MIo$z57_pX+cpVD*b*$ke= za|?poJ}=n$0B3HN?dCUN1T<8PgePhm7vTLpGWY9&u^E{Q!}LBYf5ANI`WCG^e<1zvI-mA9152mi zzw6+i&f|Td@<}%y=9Fp)Bp1o61wL7Q%tiZI#BJuBl=fERw&H%)jt26BCQftpjLP#} zzkiU8)7p-=aoVjXgwJ4aO#T9UqxkHVM?p85qjUH3yNlg6 zD-TjSb{n0W2Aq%G@C%*oQQmQE?yYuxH*xW^=%r#OMp%z zJ%4*r=zMi+-P;T;_V2gH{=UHcy{5U5&ph7cEv-u%TIyPwu4`(E(Y3s(BN_iG>LWH> zi|X9uy7s0dw6AN5BX;!0aqzPlXiH1e@&*|4M%qW?^<#|nAFJGy>PY2^+h^sr2?bkPVq*wfR#}h;U7Y!%4qIGpsjK9*ObnEJlw$_$pW9#*+lO3%| zBZEt?uCBde{uT55n9OJ0>V{QtxBc9t+O@Q{x2thub4PRQ>WhECQxr$X^=-`^P0{38(}vB|ylPvY zWO8>juWEvxV@***ZJx?it&QtiWH#v;&EJ#ZA}f0NvloCWo@mi zO14!4hu^^4-j50H;ny^@vC6pyZso^j)|vGl?X|?w!2&I$SG0CCFI#_g0}D-4o8_$Q zSA6JLq2A3TIJ-aiq@n>k#+a__RvW~^II(@o#CG#({F8L+n)S&w4IKpwo0(S;yF8KS zCYKljO7^{WU2{jAuMLfjtSDw25nY92eq-o79}V9 zmVXdo>c^FH-GqTcBk}@z5EO>KqoIA}zzEdpnu+b#CMUKth0mSXKI!6#?MZ`%=R8=) z|E?_!5&ylh1O|RD@znR%_`3ZSLlpN{;&=Khej3TWJ#UA-WJ>U}U2uXS9E`oAtBfYz(i$fhyo`2S_3I5{Ic{(1eA{pdF`2y(x- zH7##$Cp=0nYi+X{QF@(^6e+^%$Ll>z`QWxA1MA_ezP*8oY^aXt)>LtgTGt#~-7RUZ zKR8D06pgs8$wqLI8TaFMxDngxCWPiWw5F+TMZ=nQ6Azl82E&Zs)6lp+7U$&+uJw~z zSErkboTZ3ezCUlDw1(C(pBr163T#U3xt+FdJX){yn~5It4q7S2Fh1LWv^abQ7aCmH z`(KW(cw8%+)}Lo$#biTU8~#?@C5!8$=R+Qz_+}~k7dNo{SZ9zasNu)BA@g#EFVeh* zjt+F!E!;-R>td)7!?EBSo33k4H_az-E^3sYKhi_`am$K)PJE*%5`{3+n~i+qdS9ZQ zUx*4@43x~^`5Vh;)TLWjt!iC8Ic+D98?Wc{(sj$0S?*qO)q(|ieyn0$YE2h%t|-${ zfX}S&1A0`k$>BBM(b14jHxboZ|Hf*vmzGq~#J+xAKIUw4_}ZFQwO)tvdW1&u$+#b3 z>=%NG(c~wrXmfa)S2uSQVL5g~KOX!hGl#sd<441KK5(a&m4EyHzj5Gi9Qc2f1KuDj zl5SpZLpr}miX}zAU#XDq7_kQl!Bq`6kluBnh&Q{zH70{~ZE_KT_Tm)_V{x`}+{Dri z>zA6v-*`6bRyVI+j(H?Ihu2C}n_kJrf&N{sgwT@KIEu-7{>Im<6%B1ojZ@Ogmeu%(DMe=iS z){l*a0yghoyay09E2fvMbh1SOq~noIo;c-`JbX*fx7KwXCZXm)`U#>K&Mbm!TH8&o zI=QCRY_j;?K0F*%-?LYIIu zASHB9&m&8)&Ixuh6Hon37J>TImVrL80(w5>x`sBhPliuBMKJjdGv7pD#q_RdWr8*~ zw-Ge8k{r$xTtsgkqy)jyk9n#GGH1SjlehG2@@y9Kj0f3&8u@l{*1DZI`FWP#c1O<) z)5DkOM_C~CdPK(6IqtsofST?2&b%tn#D--G z>N{2tIQr>!u7mJddP|9GU(gt-ntWBM77;vAc0r@pq13fAez$8^xZheNLkDm8=-^AS zbHHqcYrm|$scEGr^e;kS)nXty8H4sZLpB&39X8J#S!Fk(H;g5khjIS3khytX(>GDc zWd2>__8+fJ*5o}9CICSYk&vS&hS6po*~xUd*@AJ{u)GeD;XY`8K-)#08wEc`Q!L;A ze069d>iAVBAZ-E1jU{OKPiY z=J2`bO17N%WY6geZAfXKijQFa>^ZZ!bI*KU#^+@>;AYq8aWK2~J?3-a?3%e(EuO#V3iB-p_`YCKP4yE0 zgzs0*sjZoFrF$ay#S0Bw&7y@1$)%6d!S&Of}QlGQu%&op^f!SR%&ligV z;k|-C7TV#%r}r!bZSbx+wO44jZNdCm3+K$9UoBkv2mLiyUKIq@i)-dwF?+ThG|y`n zEjGWy4?EsPcA!h<6bvi4vUbtj`3oon=d5%n-{^0&(%Rq^};#2Xv>bn%204i=FBl8nZMY! zTRh*5OU&lq?8WvwGN4ncG;xEl#;4!!id)>N@ z=9Vad;rt;V$5AWC9){gA@}(%I!y>rlZLO5~lmK*a29HuR%Uaj1*7mJ?&Sqk|qb2gO zsK;q4u_?^JJH-5{>+IHLHu_-N+lx=lj;QwVEB02&O1CI7I+cDiZ zmo~4C1>~MC{lpjxw@o^pp6C8Fvef`;_*a=$1i#s zRA}qzDQG?LBr4X27*7Un1fN_xx$7lW$ouII&Y*Sr`UZzDw6mL4o=hS)VnU4Gg5ma4 z$f(gVhvCPeea-7B6(|~hQJr*b!_AMsPOTQNp$hv-Nm#kSh8Ao=(XJNKw<+t|+f4AKFZ}a(p~t6lB9W4eGu((Pwu3kIR3gP7E;Mwu{>cf% z!7rhvJXviwF$ZY2yl`kv2VML4Fp`%hO$oW7pC781Q!_D_%o5V;A0Ha1Ac*Qi@)}E1 zvgtE0_(VuD*0buVoZSYb(S!7tULH=0w|K>> zn%JVBABipT5g7&l%FMTN^ZzCNoWhENHHg40FE|qthw*LvJUPY^VeosnczsbRwQz8@ zzPPcWqd|;c(U>niu#LPK^zwE|R$PedV0)~P&G&2Fh;~Si*TQ)UOfwe02yIYVS$PdP zgt~=`=UrPz&_2IrPTkyEg4(MW)xL{5+UnXRb-?)k+ndNHwKd^ikz-TpJTfjts2x4B zWXjnlm#!~8x(F`a)Jy?`rj}oUXl7;~p9{2Pn-u=B=_Qq5sEy_atC%Tdy1touLsLBI z#$L#OQ}7b&JkK8ow*b0G?qBDMS(8mSkfX$fD;jmwugt`nmWB>|>{W$)DlONj-puB3HImG{89NF}|CzF5~~(S2R&&f)R2|(h&Q>*`#L? zR0Oxi79%I$&&FJ&XtCMhV}^b<#r3nSt*OZGp^a0?0pM|bOzj`$B|oVOTN-sLz0Y1~zngAgkB5R_ZR6E7 zwp~|XzpZ`5;uY#Oowi*!M~B&p;xb2XtU-QnFE$Ti6UDPQhETIZWl(v2H5}9#5+ODv z2XguHzdL#BS0ZQ*oJ7hRp|y*hqfh-iz(!1STkHDxfcp3E<3_Isg@ek^Z&z<#w46jV z45(+KM>TG1s0%diMxLC4`C#d`i$K;KMGT?(EmGos=CzQHe2I8$UKWf` z{R?U!`Erm(`|u{L@xmzR)X}H&9&Sq+|I1_bEux`&X-DgbAdN3H5U9i7)}+&-Zyc}m z!9zQ`O84OTD?ih?j~=XGltuK~c?Qp{CzQ9eCCA6!xiA+(xqd~XS;u_0u{BhM!`}S} z+B#~Rmd`ee{$ecyHO{B8);tJWnwL8FVRE~v!zgu*`55-yH+E&d0XE;D25&=qn&pJ5 z@)nEQJbwfJf3$uytZAOo*oIq5o^(yKO>w&M8^6c?mMn042Kc>w9mcRJ;z}f!8kT;O z)@;VFM%|LjIPD;5@b_hn=^EpE)tE)gaxDIQwzc%;2m5@nHI*cvvnW|<=FI6d>4I!ZoY z+|=QQR_hl5+f4Xrbft5(=i6eQXx#i%n~VOh+;chO|dx3 ziSsYmoU^GM<5st@?Ar~Yme%X@8$#N_Q$bms^~;HD<%}8-zfBZ*Bqi|SSdz_ff?vN2p<{5;E#0=ga<$3TndGOBf%lGvW4BFD|cr^#QjC4{Ty z0SBgCf^_XjOY3qV(+vfpX5ZHD;2K_f{C>YZhP_{|Duy?`u8mX)`4pxi%fFtDCO3X?EWj*0(jSVQ6i_i9NrLLEx(O;`WZ!s z$DP+K-EX8hMh@n_3$(700vS(YkrKjzDTWJt+U-TJU#nIYX=T*X+(47R&*Rvqr2EOi z&o{H*9hZe0pO;qwoZG(#Jjxi^N3nc?*Z0`{1CovD$B#bZPyaB=k9#0F*-OW)u0^M) zZ}f*l^7`hti@loK)7zeah>4N8^ZgdL8?eQS*7lC`On>LsNpxPnl+V#`$rTu#SlX8{ zA8WpAJ>ht`nOl;00l(tp7RyJq@^Sguh4nn>nYd1bIr!Lm)t|6ZvtIT?I5al>>yJmO z(T`Nyz1fA~DBm#z?eLkM7P$bNs znjQ5#hi02pmh(!kShvcKE*f9tjX0b8dY)O_RFw41+&GKzzMdaagtwm=Dj|>JF1+d% zdS-U~kzW>Zz|rZ%5cg^RTAZ+)e%*fl$DAqXC#Lr$##EX`27%>M^_n$BsuGiD!x{{_ zjN4z6L&w1XSK&#kXQAN6xAKQUsKtg-9Q+hOl7P}5krFqMEa+a7t# zY^L2n_>804|1@sYzvN`bTp<70Sc?~s67&dq!1KZGUOqoQ19vQc$SacT$J1xpdG+)H zm;2)2iqBUjx&3p6?!9WVK2M<|Og-Wo^!sd`5?f@3NuGd+KN3a!(5eBCKZs;LaF=#% zcH?k&dzs#-ypB0SQJ=8Vy01725&CF>FT~^7Za5Kn(GjZGPmh37@`@Ftr2$oERbL1mx+F>?3&P4aReEor@&W_kz)`#I6pqaa?!4wM%-~dCY+hn zSc>x__3-_ByzQ;3zDahhUxRUrZn!cx$QF!Ca>@HRe-RVyjJA!>Yi+mY3ObDY@98PT z|HfQxgDJ+(Li+l-*xqUs6}m+9n0PaiUmjU0Nyc1s$67~+X`gS~Ur|cK6>EDxd5J$> zU-IZnG%L~v2U|*WO0FBl@clHXKfm9IGFY^}$Sjfs!*uvuzs@5>WutZ1HMPIEA`3__ z!N>m0E)7?LYZ1N@d@c#@`}gxmOmK&A;*0#cQ!$?F+gdq69b*?pzL*0hK%>LC zEe*>fi#V{{^Yq7khW>JtEBHoa8A;DfY)jmEG*(m}jW;%*h4+n5=z;0#H=*tIZn3F& zjP`_)#S$Lll)b!`i3PRle=jrs`^PhQoa6? zcI_iM8zfTw*O749>4|rA93KSdQ7nJo|JUBvfW>)TXMO`n8Zk)1mSkgFPR6!uV_U{Z z!ZLQ0m>C8%{7GX8*~FFuBN%?{0Vd2KBS%fyjgpd7X-S!Zk%S0olwCIKZfL8vv{71` zI4NnIl!hN63A-e9T9Ud6O_h{1s%GDF@3}MIH!#+-+djL`v-`~>INyE0bI-l^+;h)8 z_h-=3tIE1H)7|J_Ke?%odQoc=rHIw=mk*gpWlehgJy#+fd)Wx zFe)ZNU7!n~HJ~|Py%F<+=7V;F7J-IAJ)k3?J)jpsqo9+ZHLu@@HQ12;2FeH3CT_&$ zKnuTlBbJ*5dEdGb8wFi{<3{W{XyLbS#OxdJADMu9Ky$u>{GfH9r$CoM=Ru1mk$)rV z_1CBuXwCGE*f?k%=rrguX!a(QH*+Jl9kgclMyv-kf9^(X5p)hTCmZrF--s1~j=crB zpxWP{{h%YDZqnPrjo1m$u^-%sy$x#pJLJ0+@MT|$pu?cu zprfE+&~eZa(Cj?u$L?y#_kECl=fBFy21_{espNf{*Bvu~^n4 zSi6DRK?9%#puM1O&Uo3VK)D1cU8UTF*vQRodc2Xqc}2k0Vb5opd{=moR~Gz>ZnIs!Td zdJ%LPbOF@^9> z-3vMb>H!@EZ2(;Y?Ezf|9R$sOIu;uTEd+g=-h*c6qdrB@D`+mL8?+F#4zvsOBINgPMl|U}h(pc;QXwSY_Yzed%G-EgV*By(wKy%8FAJke7KG0#%YoNImsCNO{ zUx{%AI$8yNfd=-YexTV0FfKrc4`Td2j`z>P-uIxM&tY7Ex1{wgJ2OR^| zo`fGe0y_tFgStU`LH(d}puM2kA40vJLi=lDu`W;#=rHI=9sC5Sb`*MlALH!@CT{sRu;Q&8qE~xt>7)PL^CorBt?H@(^oakTB6QD)E zit<3+{|R!7Q7=#rXyL~p57g5G|KWl@J{gNmgXVk+dMH5}bT8-#s0VZrvK}SHB36DZAWvIs)*bC?g=qTv$And6e{V)Xk1&xA6L5D$KBl=m` zL4_th*^+3MR}X3#UGK5nmGxeYupInc{KFeDxDD|kjGpI!p=-qlu3Ia@PsSf1J|M)g z(S9ytDBZE)Zbx6*{rE8kuvH2w%b5hvN$_S7FFsWE+xY1Qh9N0Fge~LeJg_Yi5`0+} z=rAd-9T@THmq@RIP7H9Z3Es@`_ zl%DydW&i0&m6+8MM@yb$9Q2u*iJi zgAn9GUWh;zDgBwL*}2PC9a5qL><*TwQNJyrF7J#XZcGz(nzTC3eabTQNl~Z19!vPw zEbhoBEd3w144ky2m1e&A)}@)*sc9i zZEf+VO5|xk9{WY;U*_>R&aE9<1{L9j!Ux+ zDD~305WHtwkmoB-9T0kWL^6+ z9$Su$%Erstw8hwa4LsRc^NfMtI9~Q+D3Ip&*{XQtkd4oSKMH;tYxE(y1nf0nE&`nT z@fx;NATMy$#+s$&5*Z+)??IX`!)tdk3k+OzBszB{LGruYaN5>XEV@$P^!|U9Uwark5(zMn9{z~P67+#S@oIpkEPsa4qI9L=|J4?QU47h?}AQV z9k5Yg)4=e5P?EfOzeLhp;q<Gl4XpiS8N=!43Wj!zO79BqbVl;~A9F_^J-y?DC}{3-+&wtXK78hpl7YMr zWnzR~f>&l*eF;O(NiQ7(d7}S9kY9wg^a#pA8;Z_Zht@gC@2@=+*MuXyX8*J^yksoWtd-&P$F2%ju10vz{aT5?&Z#>dWSJ=(lh4Jc%^5>jW?>_5@fDV(t`rNUMtL zf$aS}_zGV$@<|(_E+7RsY^AIT^3l`auYufZJR9{Dw(O%E(x1EeNsJMz74<Hi~(Yv2zp(Lb=1p1xo?^ZS+{hRfwEml7i;?Xzcp z&vNW)34T|K-;Gx*@a(t>u~fGJ%6Vf7`y+UMm%71pMJ+WK*v=!*BJwQBJmYESa)%xi z3V7gPWPAAj(wS-Em)$m=*7rd8{>XOXIXpA%BnIRoIqyJDFZNdcy>bfHqd%}W^Y4{& z0&*7q1LV98IX$!gzWPvqWM!g{F5QS}a@?=bv$Oxc5=^7BE0x`UpHTO#gB9-RzYp>P z_^E@;dC2TqDf5}|{jUD|ilg@o+>2f!$(W$$SU5(l>`5uwfnuHE`^%=wsBio48@M-m z&*3=~hdiQ-9Rv6F-{*+llZ`T`QRV{n$TILupIrPb0-FYQo&cAeUtvOJVt#dJ-7O|u zyrTSDFeiJkw?@zOvEwHfSS2tKL-PEL{N-wXCSP9(o;vVM;~BOse6twOaK6q6!Xq^= zIc*2y&kma?vj=&vBd@A6l|2Y-DG3_|_O=eQgYR`v;*h8S~f|L_YeE zZZbB&4+GmG5cp2e9AH_%D27L0iaAQGP1cr2;R-Xth%D3k)2m_dbIwb%#Z(MAARFea zNJfA9Kw8vtAd@ZBVe{~lh`~dUo&6^CPVvaTcz-$i5Wl!0Yx~y>WJJ@8Pp2J&(93b( ziU1rRH-0W4|KwXYVpd(|5&^NUNTna$YLNL@ulz?xQHYhFV=d?Gy;< zVkJTx(g!4RxOF+C_NY6FvnunFLq-d>ic2zX7)JeEo6sNMgKv=Wr5N|nA%=W$INjBc zWnD%+1zPdS*~)b(R4iWwXBBX0s36)&lmr*iRiLQ~WOxp-p=zL+z+Za=_O~U@AjluX!`qN!v z0K6ofc6{=MwUP0Yt}13{usGmJS>S|LQl%51V3Yvq->7Ym6duVaH)c`;j53Sy`#KF` zoN1`>OUXI>$^fRuc^$BKfYBKUuR~RvioJ|b|9nN0hio~Z9zk7fk0X8UY zT-gv-$S|XlaQX*lrW5K!e@h#vn8AW9l6D{ysP=Gf}^# zq%Wf~lJ?A$le%bDM1#CWkS64!b)CS;+`{P+#fFZ7S1tA`GWCLN6-dc;9=R3!A2?I$ z5v@eKK?A@FaK3a>+SR1hnY{nt#Lh#R+=KXF^kbIxxk}w8MAqGtR=G_P-=zSL6tREK zgXeNMU0W6Xa?UxBMw#9CSwy+6cW%V;vHpeq58qjQ?tJ>t=fz%CWH`P5v+2%(q4emv zbXVVRr-wh6-g4$_`su;+H0PDe6)0Z?xEQR%cS;VKOJ)Km_@%^}%UAZxG}f_P%ERf; zQ%<>M$wp8sUC`Uj;%uS=x zF_6}fNh>tx6qSvqDOz8WJH-RMzXR)58dq;X{shhv?RcgSVGF>ffzf%ge}XtQn~lO|KC8y8;XgD(T;k{mOQVvTw0&@HGlb_4pdoso@|;Z3E}Wwh6W zOakxA4sVKVB(@i@qJxt z8s#DU;Ep8TdGOYO_dJyWn@>BpedsSR)pqW{2<&vduTjb9EJ1Ca}PU)`5odY@1 z+fU!N4*}9;@RCE(@e)J-9mFDYkT_@L&hT9hD#LO5Hb3e#h&s7{hBItgrv=NoZ9{ia z5EjXC_T3r2r15ZAxCCKF;yv4w~sE$tuq7m%O&o<3yj3&5@cGuS^c>o1Ia zR!o%JfMtoCa=p9_SPrn0gc-&}X&8Q%!X#-!PBk~d-tB#9v?UCqrFUZ8-2gc?|4up7 zme(LBnpE#`V6W*g3~g;1*f_8qRB!&|gT4cN4)`cxj*X+%;&Y#{41HYgah$Y-KW3>> zd(k4;ru_Ch5YOWNK#k0!^9yRv)nVg_S?uX}z<+}Hr45O-7=}^Wuc`qGIV7tKe6L-P zuN9SCXBg39S_L9EF^;7^!xqvgg>!O{L&*}-MXEvo%3es6A;!mR;2i~Th^$lMi&;bH*_sJLs8A#AQ{} zD4XohkNmk7ocZBdH=bO5X>LP*i{8eZ)&;g<g%ci$(#nPwBbR1NDd{Fw;?ufO_O@*pFSz?cXFjSQ$rs`b zK!l~(Fn02Kixx47J?4^r%QLeTGqggIV85jj&oD5VteQbcT^>${{Yn6986EEd!7KMj zhf#-<88JEjT>5!AY{9vouw)vi6X3f5KDIes-{{6^*y5qSAYZZ!{%yF&qx>CVS-6vv zlZ4rUZP8)WCk4Q=fq8hISb2uF57DU$hiFxZWuLvYf3{qm{XkE~09J;s?%8s1W0@xp z-VS8aj8Q3nqvs$6FT_6WD9U~t_m1jPmR*Xn=}b)>fpG3FNORf_3z-$-w_E?5C`HDq zIqyL{fO}12Xs=cx_dmqAgq0N28V}naw6~Ht9xO>DkcI612xNHHudsKF7sEa@9rlt( z5dG4NJcGz%rE>8he@=IxdVysKME)ZA7lC&J?;%JxKZOwqr|qAjJsIifklcsyM0T=- zJk!WiLwVQ-h<%;3a`HB)24X1uo_g~w%>x!Y>UEvUmg7(OGVsH0u`UyfDQp2bbo)Q0 zv>-z)j>#D>Y;LCeC<%Fm_z6MgXeQ3;@r=Dl-C7SLi!vKxcnbeyJWu{|6nVGX;%(OD z6^>ohpTyenGY{Tw@RF|3Ru#uXAZc6j5aTKEkZi?+em3xI2T$)!c--I#zY9+`#QVV$ z1&`|60$|<1MEO#`hB*eCREdzsX?wOmegAA(f4b1&81kLKJ-hReq4Q0`!Jucv!bez@ z!?@QUf7m=6ywAVthIT-=P>CXNQ16c?9vJX#TDwvfA@47Ia-U{}qVubrs&OS7( z5;EQQz;1Eh?>e4QpKi-J90d!R8zNbB0Nl5+dJ3`WtUAf>w$KrAB&**>oX#m%-$ob2 zVXI4wgDB)xZcNxb>F+gQMM>B=umWIqyr+*HKhw-dwuZ4Q`W;ol1R?vKWG#cI8u<+V z?_P}8B>8k7(3OPI-N3>mjP42U&|!9zaRithm>bWUdv*KkX8D4rT=FE_-+AyfL(aR| z=mdED;1O-+{e8q;20Ny^irqKi$;QD&_$Fl)fG7GcJfzh8P}%nFh?|niE&#Tq!|dR51G@&S2+yi)>}|x$wgdKqCwo)sex&kx!LtQC z^h_T+e$I0~OxZNH)SA~IdjdRJ$fvh?y>3a)JK(W{CzXDPCmV-_JKm*^#8UvCd?hE* zkB0G81D?I$$pAln?D+9>z9Kwh-+xtqO!r?akuMbw>E!}=n!%%Ni?9h`N0P94V4kEp zECF*V`K9qXVDe7%Io-XjOUjoEtQwfG!9*X#+iXW2D#7ChPpWZBcmO=qz6*FpUy6Gm zVw(ZiaKxsBoUw(z2EjWAUeRBP`O8q(aqzqi9;IiB7p6HM=|z3)cz6f+BzTJm8$7m$Jxv?xR#ae7{nQ~8&#zH)77VN)|h70k6}&dur)<8_RrL0Ml#$s zr}*vAKjZMJ8QD)j*61g(9+$d~-w6ua-2Ln6s7P)>+Z@q#16C)lcF`rF;xJ%TR<%ncD&XQfA7>(|Chi;9}`Ra^RoVgvL$dBxf z*Q8q>!nncRdY;P->(@9hb_mGV6oGg97Tj~jvsNtqDNXydu0bsK3`EO7yhMhT(=!Mx z@gdthfxNxQt88~KuwGy%fh`e}&i+WhSZJ5eh^4BmEUmfeIL|$Uh#c&ce-il%w(>g2 znTXrT>|7*dKwK_SVLP^n(Qa{V<;}Nfe#m|p@fh;2RzAvmG2R}upUNu&f8}j4dB17( z^5|qqmFLu#myi72$bTOBd+?0*s&yM8_L3EA;wr@O)Rz~KXBm0;{L|1s6grE(9Q90vIWGH8SR?Hk0`lPb#k6=E!1NXY|jQZ)~?Qr_DDl6MfC3r`` zJE-Foz74hz-*y2n`MxgjZo3ort>w5Ewoi6Lmk`S$nZ>a70c$C)E{1J~#68Br^pDKr zB2_Wg#+PQx#my`99M+R5Z!i|q1+00e?Zv6-*-Lbfu`i2m+m(|5T$GegcnI4FevX#@ zwCpS+b_ZR-bI{#M452e?a5E2eUdB&8jO5xj%+F+S*h90>g(zwyU6iW8^{VJ5$C(XP zbjiv!XgizbIK45g`ZArG&R=!UIj*4Q(leswsc%?$rlv1noiC?rRCKRu5WA;B0`6$p z%Eddltt;Z1RpM%vi+K2gr(lz}H%Ul>iE5&F=uTJ#PKvI8TatZqR~%Gz)EezLn{{S` zgDzp-(T{iqM+_XDp3eu6;GkNF21eu?{D zcD$nRUW|dyO?(_DfDc!^(Bt^n@iPXV`J3>}gGamP-Q{R`XxmMAwu8rg6CO8s{5Rq8 zgQxcTFF@nP81@?b;eGt=TKe& z0fupn1-m%Kz?_O#JHR&rz8X1~jN|;E$1?P5^8DZvmhi`MkNBjeKEC%xhX|Cf0r@8G z|6j{zN4`PiTf}$cYVfR8$XH$P2V8*1!L#i_d@o1VY1Q+{20C`Em@XF;gZAO>kj|Vg z#?~|)NG?H+72kh)4d1_WCC8x@SXT6JDvoX#*jO3KcAl}7&CWO}`l79N)~t*_`!`X< zNxPUL46UGxQHUL7>Wr=UY-YvadW4nU%XD`fA~2oJL#AS&n3XPkpUx6bmn-HSnF#Lo zD`ux+RRL}#?uWb7TU|+%-AUK?G3z0aM)*p#cTg0KK=y1V+S`Ml>u9s~2;wgp%gQ|) zioI!PraY46KC{ko+8SN&9LU5qezXM>hv9rNv4=B|8C`$cdS)H%<&=x>4OI8r6zI(4 z6Gx{_ol**sLb&71Ix_d3-G~Qw#N#eq98{L6&V6i0X$MKp0TioUM4rM&u@@}+zBm2c zuA!YG_{vDh}*-nw&HLpGYa2i7{n>&yBwOZ(SFtou%9 zMArmxItsmF`#v*b?OzvNb2y@OpYbu$DMT9I0q(X11~~bN`f+ z#5qVD#8jhvKL&#p-_hjy#$%~)I^BgAqt=GN86*4%+N?%0_G9^iiRAd8cxQ9U5lsE~ zI^<^KyPb8iT}m$G;NnbQhOggp1hYQc6G-e*y8H5q`yYwya7G^*c-VR7!PB|>2JcU!+ir3hH18nGxGEHc zub-s%<%J)O?C5`F;Nj>)r*qFdc=mz8`-Q-G4#Z={eR+DG!TU)?EYEFxQ@nEUrX%zn$H!x_N<6O?$B+$ogXbD}P7;r9FP!@hM@$HN zbo)`G;IG`nz8YDY@$)*c0I*Y1=i@k!N%G6?{*PJUnm;%@LkIS(rg3YZaN2l**D_1h z>`aABSKvIf*haV5WL}h$ji3A{Fh4z!*f%BL>;l#c>|M&I!c&yZ1&eaaGiO}3@);M{ zxx8V>e!VbJ9*v_5z{Y{Gjjh&R2sjuo4|A-ptz2FvTBj?n0dljjpdEh_-(kL)9*-pS zXp|k-BWq8bDMN?OxDJn?oDh}s6!suxIcmJpcZ=lc$IxDNey=1)z!N-tDLDchlO-_7 zCSN)Y`IGNMJMgTV&#|T~*R3hVDXSRfG>=-J#Q59G`%;E;AzXFfiOwYsh2_aEU4{~d zRAQqqI+96=J5Chgj%!bTegoajL2>rwVPf?S~)EK^=4PGYa|JKEUIJ_r&1x`TkGhyO7kM^Wg0Q zZ-boE7t+t|9eSUb-<}FT=^WhYiWK%gQ5=2zbOGJ;o!5Pr$2+j+gJ*Z0+1-J4WBI_I zGrQ03s=9gw2a?4&O}XT>xh^^AwPRop`7YN}D93^C;>vRH{XSgXqKz>eG2@gPn-itj zIqOey9HY&dZCH%Ph4iw!^h+nkZhwnXI z!uOjTxFKxAJ!E{-N!nr^C;qM5%E!!mH01=xZ4%j#FKw0#DXa9qdTp!J)v>4;)l6=%{HAUj%y?|4huDptfk!r_Fz-BV6wFLC|w-K=uBQA9a1 z5`s+9mGuMgTjltEKAzEs;v81$sv2U%*t5}moe#bp75Kh6@#*I4KJ3ckJRGA>eG?7# z2$njM3;qS-PwbzGzNG+-4!dP=#&&{;Dpe2LAz~ErGVpg73Q1n^$~92{@HBXGz%yyo z5B6zXVatoSw`OxsV^zow1xisDte_DBVOOjv{(hkZBv&QYP0WTH*L3ZAn)K&>8ttr# z$F{n2Y;RnDWNQKNy|zCwpOdYf1U3S!7tgx!gZ|u?=+6|kMtXQ1`InJ@dNo^fkv%yF z9*EwLYvN*Y9{)bK&|wkc8Bg5KSNIy*Zk7W+{vd!Zl)D#smyq|GEVp-S@wxA&4gGbR zm|`Z=!hes{2Py0_L43J9-yyr(AYY7 z3D^1sqhkO^Ln6M*b|4--l<)=AXWM}-1IxfOeLVOn1ojTF>jW5l9!8h)dH)Mzkjgm) znbGQmZIEd5V`{+EfT;mf1EvN{4VW4*HDGGM)PSi0Qv;?3ObwVCFg0Lmz|?@L0aF8} z222f@8Zb3rYQWThsR2_1rUpz6m>MuOU~0hBfT;mf1EvN{4VW4*HDGGM)PSi0Qv;?3 zObwVCFg0Lmz|?@L0aF8}222f@8Zb3rYQWThsR2_1rUpz6m>MuOU~0hBfT;mf1EvN{ z4VW4*HDGGM)PSi0Qv;?3ObwVCFg0Lmz|?@L0aF8}222f@8Zb3rYQWThsR2_1rUpz6 zm>MuOU~0hBfT;mf1EvN{4VW4*HDGGM)PSi0Qv;?3ObwVCFg0Lmz|?@L0aF8}222f@ z8Zb3rYQWThsR2_1rUpz6m>MuOU~0hBfT;mf1EvN{4VW4*HDGGM)PSi0Qv;?3ObwVC zFg0Lmz|?@L0aF8}222f@8Zb3rYQWThsR2_1rUpz6m>MuOU~0hBfT;mf1EvN{4VW4* zHDGGM)PSi0Qv;?3ObwVCFg0Lmz|?@L0aF8}222f@8Zb3rYQWThsR2_1rUpz6m>MuO zU~0hBfT;mf1EvN{4VW4*HDGGM)PSi0Qv;?3ObwVCFg0Lm;6G9Wb>Y>e7OK}f02~7-)WS;htnImKBb(_;`HdN@?is~GlH_FELN-g zX}WlG7r)P8%3`!Ge!q?JY^FcE5o@Q%C2sf{#&dSdhqauZw8@7|P9I^x3~Q=9Sqn{D zz9yg2m_O$|@?i_7*D<}FY4nJEPi)5!zzNOl&1A?no#=bM=}AtLE+{?6Y0?Fy7dTD2 zp!6cA>6w0(IZe8tv=)*2r}Y@6t(>Oy7^Sm0P3tjA=Wv?Vhm^K+nwD*p&gV3(XDMC8 zXb%;~M19^v$D zoF3>xt)6jMKEWJru8_b7dcJqZ%Qw5`fg4ybDEaP^xoPp z_5WT@=WyE2X*;Ly;dCyi@8xtpr|;u*A*Z)p1-o zr<*zbFsB2YeuUFqoZi9dZcabS=^jq!ak`h&k8wK6>3mL)aC#@FM>)NV(_@^bZ4A=q zIHwCZJ;CY6IX%hgJ)EB7^b?$3;B+CUmpJ_-r8_-7Z&G?VDLtB$9#2ZoC8d{=(%HXftbcA&x+p1K zlay{wO7AN!e#)M^Z~x(k>`&zF&LcC>$QO}6pb250W4mRycc&KaBLM!M>1sTqyw`G( z>1sS`e=Upm+1`yu@vg?B%6R=<%2H));j&~4QvXgzgC~sY70l_F3DrKHK6g51M74is z#4|Lt&u7HfXlg&qh_BVuKAI6XrqBW~5yzMc_ZucDorEW@@G4(zQ+68X_d|{R zG#k;3Z2Y8aTebYPtKl>|t^A~Gw`;vSjQrG^4DFvpETzx!v`E% z@~`YKiLccE0mw;R?s`p)2UV_;qsD;3KmD$9Y+CPTV}G3mKh=Md@gw|NwYTOoO0uM1 z6eL>e>0z|jh|6-bwdN!}{0-Ph z|24la`7h9TrjQ23!En25(Ut!$3&e-1XGM<%$ z|2^Z`j2E$-pE2%9;*T+2!??1m%`gwLx4I<$2N`cj!uK-XoP;YoO!l`D-=w+UL zBFj%cca(Bg&La~-&PrVA?d_!bNaBflsY z@t@s9ehoP&-ABov{J2q$s(%;TkIJ$Z3sm6`8CT=+R>ntC*xMuxK;1{m-=uZ_$cWzq zIq#~U@9Tu%QPtnwXOtuHEjZ^i+Ldb8#O#;L+b`L!xHo7X=om(ts0 z3cVGbHP%PfbBr9F?sNPm^~_#ly71p z<0j<}uUhUFEql%C<>n@>kCi?PGmYg=-K5<7{MG8eLF-CdZz}nVSw{Kazlr?7E~ES< z@QeA2@fv<@Lo*523M8QLT*j>>60c@{594*E5(sm(IvHPlQQ|6YA7Z@sWrrjV~vvNJSA9&g- z>)dLAuUwZsFXa4FEO{MT&+=8ABYRQPJ}l(eG^HPP->5wW-p%rJ#IC)PqSgy5*YEw{ zzZG^qP60nY=x*`(ZGm$_NB$@U{+B86d6wThD1|I=H~*AzHQsrcX%=j7Q+=|3Px8CD z>^lUms|Y{tV1DJVPB31~c-q+UtxUlhGbOw?_qo? zBJpu<*B1rOLUrWJ%-_ZG)x7g{;J09%rQ#N)|L-!tyGjaHer7R+{9iJE?~i4nC%Np6 zI3OT>_P!$lCfb7n*Hw@o_cDL(wMj!3qX8Sf4$7O^=Fb%>Hi}F zmze(dDdwN!`C93Ji18ebXO!MXQ^@&R3j7V0v;2s$p5JG@h~oj(-gg-9evjl=VZ`+m z^0%VGbheTF$rU)(-Cvb&l>8?dUpg)EW?783pYi5a2}D_71LIbY#Qn_wiolag)wEAB ze}K!~%PeOY@A+E^Y-jwn6mq7N9JX^ce=Ra@y+>ALoaOuy_$^qk1#)CdmENp4h$1_j zc)#SYybfbsVUoD(|odPyA8}$>(Y2(~PTinbLnh zi^@ZAEpY5Cum^(*B5DhE#eJId?6Y;N!i%s;{Q!0uiPq>%rK6!@2voUknS zh$PxY#si;|fQ#{I#_bKVf;+gLKTIM2I`bE@-tt+F6`>y4XR}2Ls8jZE=j!_027Wr* zO#VD2|`7X(JAw1@T+cs#EZHF#qIJvOXDHpHqyhbwPmTe2($#KV^XRaDnlf z5s9mLWP#@zzvgpPbD1^!`{v&b9FJ)Gwh(eFC>>MM?8uCF65@Wqnk+wTyd)Wqp+Y52ld+ zi4^#0mQ%$2lF#k^LJIzGFn`x&$#|0WJi~a;xWv^w__vHN@J5J||6|6-n`FBtCDAOn zfI#CkIwj?(`Di`kR^Crg^6y}LG-=-7DR3!6|GSv^haZy#y7}cZj0awmz%b%h`ur!x zJ%1w8O8-%X*Gf5RS^EXx_exg7?{VfIc|r0QaeKcja09DI{7~^fYOLok8IOKm<{4%= zn>IuV5Z)JPS=68dPj}Azni0x`Dg`7*wzx72}EPsOSU#*)y&iL~E zvcPO^STE!1e2vZt=<`{|yPlE!%@WrBhrl_ZBY&azd0!}(-+eCye;OLKT0ER3aKsf! zan$XMPxAVR?s3p(JL4LkTPgiJQ^-G@0&h-%e}v^vd|j58BZ>B@6#SoO{-u|s-jrQ^ zjd6|V1?6wA0H=7V>kjS^wueQ=dwE|)WBlhVXO!cUBCgLGD04M^J}PjV*28{M>8+gc z{BKDC9+vY0p-y|?CUu(6DYh14KCoiRtf0FqZe<9_}vz$T3-Jg^Ab}sjK8CU1& zMT~zfh5UbD{+h?7;2P$?mV$pBCQ$NkOKd0CxIVWrUbHL)Y-fBs<2gz5&K|}`cpg`F z__V;e5<22Zf!DJftxXoF>hmj%PyVwMqWs=(GCuMz5|45jUq~T;jQMk7l9Bck>GN&I zd&fmmQ~myf6movS{B^v*RsL|@R;f=n9em@X*1205ALIQKWheIuT$ZW-y@&a0eovw! z{IZns=ATGl2jfRl$nhzDwtqYGhZvtom-QTD{G*H){zw8fte;i^@z;(vIBlvX!`0;xwCKd~`|zIgIZBPWy8C7THq^9Jf3v_?byZo?$uGA4~8gzwIOh`Mb$=-VRevik7!|m#BK-I*=FevTR>b%v#&g(zs{Z;x z3OUQn@8S4I&0iK69_g){D|mwCZ(`iT{Ssx|&ba%}C7}BKae*_FjvP#ZH>SW}W%)ht zl?pq-di!Jw{{N!*ljh~GFkbT)Tp9K!-(-C3?<9Vm<x<<8yaOAj;Ls6Sz)c{8+^N%}MLW=NMn$bqcB>K3>Lq{<};ozx`{9 zpW|)i2l^RT=hP}bInQ_<$6aBT{|AgO|C(XTM>PKvKi37j+Q$dKYMlX8?F{xahp9>2Mq=j)7*tdsmbjQ=&`HA(jNL&irr zK2-kXO{iuyzqBCaK zlJgL7@&oGLl(Nr4<{#ny+RpMT86QoWm!D^R@!OKIfZNrs_>; z-n>%+{91c~@nLS4vi~;5weQIaPI5gzDsWEd$T0KwsB-z;XbS$!bX@0^{ml zRu9Yh2gd7?)_vC%Kkrwl`mcL0`#p|h2U!nW8Lzn{1xFdbhw-C;=We znwxPuD`uYg4>R7w?NakgE92c9r}>$`lkwgbS*}{IonqX>^G*))pJP1#UnCG>{C9xc z`B_iC$^5y8rThk7N4&}S2*=e6oatW}SNF=)c(mHtzg0*%%KzNWc=Npy?`8Q9F>d93 z%Tbn}FK|xiNU^}Nug>#P4Zo`rxQ>w@Ka>J*7Pw6-JSyw+wj|oGF`j(y&XG;yZsq4Q zf`29cC4u8ULyZ)?h1>gQj4$%~x{$N}CF9N9kIFu;GG3UpZu*77`Fv`Gg$=Mdw~ zUzLEee;?yZN&e>}!0nt@Pd>%`%RiOqG`~D8a6PAd{*sbEC(}L5@+XXUvtv+x`>z;x zFG)rUt}qwbyBxm|BFe@;@o`eVkkd0ly)>$J?c_SX`Krtyo%I)fdL`CE=Y)>L2r znAh7;-@K>bQGZKYXV;@mZJm#aFEc)tmzVel-aQ46)pvwi^4hiSU0S`@@2`Eu=Y6@h zu}%Bmm(~VpfsXciUocqfZ*R)0*ADsG8V>qC-02I3s)9}0_6Dt~Hh@>9Eq-5ROEARA zVt-Ehr=e0$T0!l}|p`(Gv2xfV8xDcJ8e39e>vA@ANr3nmiTtheA)* z)*kcL`bi#`pJ<}OOB?8U=bon8CV%@;ufMXrsnU1M=NIVirdnT{_o&}ja*WC)#sZP6 zIMm@!ui0GYD8H$;C0N#m>d68hr?kkC01F}88w^z+ z6FGJvMKvjI@8q(P2gOj?hhAw3)i(<^fUi)iJsmA=q5Yk$1U@Mk9idPMrN9wrk)@*c z4I~2nc+^|}vJe1_Dyz@G6ImLAA#cb4g0Z!|AyE+QA{gpyY()Px_&R(|(5tVbHq=^M z?{9DO1-07RhIZC!Z9}NNBUtP0gblT~2K+v#A+IohSK;bRwT&%pEw$c`4)1YvK&a!m z*4W`~_0=|XwzeKe7G0_qoFM}%bp=|~+S=ZRn$K4| zx5HQ4>T3lb}f z(9v=XYW4oS}BnUZg79n;cD0AF4*<=upe6zC36ll=ry))q$Y# z<(~LU(uunrBhuSNEU+!N&mZu0NFlood@kRymU>@;Q}XQE8D}c(@cE3w^v&H>5ND)P zr8Gw%z+!fz*E-tULsc)w4H8=HJZea{2Yd-5=0V}q@z*!QdQ0O#ASC=ptvA%z-tlr? z`ytp>=|M+TNo~n~S8Xj)_=b0FiJL*U>mb=`u)Vg~+t%RsX~IS|VSZYz>x26pRb|EC zsIDrG3osU;)$Xf2;B-{h9w;q6R8n1A?Qm9>)Qawh&pUwHBYR^P4ZEVEeU)X-;@Vw# zJD<>6gH267BNTcyn1s|mzcatI)9)7!FxcX$IM&+I0-bsr4ob^$`N^g2+;h;^+J4Me z1=kDftM)c|JoQ?zz5eASd6d&7EDae;JKO3*Fl!9y=XVy6doAhcXzwV4hwDI5^{*N_ z&jZgcTIHxG4^JcGIcPWti}!iFA@W+g3J=0{SG6~E!o9P@E$wJ;EpBgXY-u{C1u-2M zbUg9%(4j2NB?*`<8=f-bk;V>39QY{$jz&}HN)IqrGvbM%{=W#KFg&~xATMzmC zjU=tTTUDPOY7TQ4xx zfP@$UV^WK&ua%~Fc-wlTb->)}1IN)Vc<}p-ZH177zJMRb@A5TzJN+T&ajHZ$yt%8T zv61_%${P|-sQ7_E$lK8rOmtx}EHcEEa~#6UP-j3e!`@`Ss}@PKLyB|=Yf_nt{XTCS zGEqIfWIn;R0L^ucc&x33$Ekn0w!Rrrowvnr&@Xi^X@=y95hBJ4&!H_MJQLjomBVM3 zHK6*=<7|?e*B@y12HQJ~zAL$BTiTl8MPvsX%C3FB@Q@$Q0m`O;Xy+ae&EWfFltzIO zJWsqY6IOU2s5PR-ZS4kOVv?018Ra2%r8Ln1y%w4}y&XpP6X>W9tyD~LV^iEPgq^lP zs-aX>gzUlnzE?<>VzjeYj(b+B5E}U;jST5mrDH|AcNaJN>R*N^;lbe5iRZbNP;Vs3M#Nk}pP}%B9FfI?y#{~q_=@4PM_3S~m$kvr+q`};eq^AnH@texdMzE~2DM{x z>j@z&fkF8ir0Iv+jY@)4G0>=S#<`#Rvrx2We|xB<@%VFIgtNX54_tpsldm-pI*u`K zXbmzvp+-6!F~!zCjCEB643A2no)_x{u*->x0(ad+)rbZd__lM2d$Vgg15gYEF2V!$;Ury`8OhR%SYKrtCe1;-6adYaC*6~0D{0y!07 zvQaiu>hBCT9}`}I+K*U5)sU1g<0-8y==6KrUZxqDlqJzJ7qJ>A6CNl~L{coGR)CyzlJH1frqar8J~gT{>JD&t-a3iJ5M zo!1}Z#U~~)APodHH<9)hcp#)R1kGpbr$>NM;;E=`tZHJB|mRii)=(F>YPii)cr?IF+Fh-ShJ*ywo}kP z8juGXX^{zIE)<&ca5TmN!9m1Z-k?vza7BQ#YP%sP?!r|K#Bi5YI6%fCI;rTVac(Oyrk zlbYphfw%8~`4|NGR|fVFChS+&XE0!(T3Mi8MyHS|*FvlHu_!vX0m2nAnakja*|#<}s5 zcMaHU$>@Zdtcy;XFoIs`Od3Ng#vv`Cg2@33+ozuvD@p5IvP4mJ)K$e4VreJFn=p^K ztFG^$^`D_%Ny{|Xq6wsnL>slvAbGhJ%wl^9ob0hkY0&eEA+;Mb9CpHa@!#TUiBDA> zt+lUEkZzEC3_F>~V8Ac{aj;-C6ioU&2T+G1#zS<`)=ukaFYPNC@}t+31E=j6mz>5m zM?(YFPhy=wH9y$WCRU%+nUbB<;Kel15;~p$#gr#m@ZPW`kcd8LMxeMvE_&1~)k@n< ztJh32s+tLl5Bhu*#O>ZCI#Es-=yGA(((myS=a^PX!yR?1Zg;g{0;BA4TTd+dm2@F5 zNaJ3HlZ_OY=sk?VfZ79b>yGaQl+s2Ujc5v$YtzG8Aj!1KBgVCG9kn58slEx-Xxdxk{q7VfY zhve?6@M*e9oI(T9^imaM!1z~g@zUBLX$>PBXE4bTiUnmbX?UX<>{{6wH)yZWfsmn| zt-+>_r0H8J+}NqCa1yG#I75x(dJRHF zYpeHBcZecsqB$5BC}wFg|0-;qwN!eK!+7*Hr(v8h7n&$`;k8f*HSCxsmwn_Rg<*?Q zq=6bn0Z$zyj)n?v3X1>{l@;=;+YuB=9g95}vE~+j-MA*#*{m4YYAD0RlDCmagJLa& zdQ0CMRK6DxWo@m0rLAD4SfD~DQ7uO!Hg2e`m?&GQ?tX05ij8#C6?-ey`=C&Yt9)(7 z_)V=sybTSj_;%43)K3&*>c)@pOyLmxFmIevq#YEV3g!B+yx#8(b#{3Dumwax@!70Z zY@s%+vN|JW2!$PPL*Xyuq)=@07)(xeJ-SgkE$L!J*NP#ZXP z0c{ZHD8-~MVdrIy*bb8`s@k&Zs@lY{Dvp)tP_>#4qvJ9mKnylIkyQ1FD~8?(wP0p5 zZZqmwrS@pFf(z?%8F&OZH$o90_AkkgW=51ml<6gA^fk z2C-r4J4TO*jS19H1`YCzM9vjr>9ES~y_(FFLNVgm37`sMlu$i@s^DI=pX|iAkEHo2 zPB64mF^e{m_W_^<$^O(3G znBzL(+>sPFH-w+0^R#6DD@K_z!gm{EVue7S8Z}buh`1q3v!l639g~%b2#=1*1QQ*A zQT5P&(lb--X!%ErzhsM`95l|vq(`nn4lJW?z(e-fMn_xFHw|#HRFlQEwu{57Ln1mS zZBwb@Dva~E@CjNTjsim7qoAP<%4yc}8jiOi zr=)NZ;tis7I)XGLYVoea=l4Q6DZqB2tAJ{^Z)e&vT^I~V9Mlw|X90_YhjH~k1-(6Q!{eOzgu5tBLQvBU3{5Jvd`m4Xsu4pM& zT-9Hdqv&BsqW=p~rPbeYSG1c;qWaU1sz1Y81LG>c`g`w+s{fx+m9O%v_P23fxUe)aeA6&-e{oRTJ&|3$zX@uTcR{ePu(Kb197{j8*@{EGe| zUf@q?#Ph5F&$Ns4C+kn;Q}nCIOSYrRSAQR|hw}$mF{<4vpOQ=0`bmE(zxw}8m#@i; z^!J_gAC+IxNxanOPy8iBDKTFeHVaYtl+0N?5T?pkf5$PO{(v$*bq@#<|COBoD&A1} z`Ed^UpzhMT!Bpu{!6b)OtRO+{>ty{Wo{%uU9w`14 shpNA_3o=jDcF8G!FMdFxw(WW}QE!D(`&P^U&qpQtuZoeWYPW*_CxRq@!TPWDAiJrJ_<}O_s8xB1+mQO40U7NF_?4 zQd$(!Zf#f6|6KPyzlZv|kH`N!9#60HKKIPI@B6;%%+!0fTQ9PqQYc&)GA`^QMghO5 zm3utGLh*_cVdP;L3p2#%@OKtAb3B*G{J+J2IeO3e$J-pm#$Q4F<$)#aD+1%o*;fSr z(jQjiO)Ppbp`6!Ip^3jPvxazGW)1PW|5H2xUyhVcG%*?|4u<0YsshU6=y)!~cmdoe z>nC~{#y}2deU7iA0`b?S?Bp9?@~?`T5HH{;XMIlaI(+;7e~q(VXS6=DezL8IiX6rF zF>d&JMFIQ(Sm5A>9a4#KnQd|_>G)eKjj))4NB#TsJYf;W4I9S5Uj$46sl!+pUjmjQ zN#jlIVCl(X6-9Au4C@d)?l0nEyJw1>04B?-OYFkfG(jfg8WRo}M~pv_$>z!tY&4&9 z4#SvMf>@Udf1(IKJCVjyHysnKD-uqI($lFanb;f#Wg)kgJ)=raLCc^{c8-WJ#>X;; zl_q2HB`PdtT$?rJlLWuuWr_s@b2jaAuxnsqdxRup8JLQSAEq-UB%Z>>z29V-1;)nm zX>3fCE<~eY5i}bo8dPD$&?D(O>>^dWyIRFGwgR6*{ALyAQ9<(|=OT=)VnJctekdR8BV;pAAE@aRcObR~WnQ-(d#M7ra47E+OqtL0$xpqwX#o1bHJrSzT4@^Rg zM+CE=+?YYrO0%%QimoYmsh9LwRpL+t+-R@bqtItsAIvzFo~zLGp}J( zYHAft3yWq_=x&T6*;Wa8CdFLVNri=_VoJg=^i(#3zn3P4`DJ5T+WxEpb0{!1KTd)f zVosrA^lL10W*tLLSVcGzYBK1y_40NME(sNu7KLrWN2S)8@4;McodxO`Y#l1g9;4Dz z=A7W83btbZ>F_ZH4KdnY%vQl7g^_#^!`zB#&7u{p7`2Fp$}i?+?e9n3L!ln0@nGqp zEUb~bh!3M!sn{vA8`;WuD zT?A{wgrjNGGE?X!ZJ2Nz%Q-GWR7jZZCMO3SX0Y_8S-LXPk}%j#*h%Kt@gk_lNR*;$ zDJ$vp6vI)(OgFb0!YC4$JjDW*1!Q83BASY%yzn)73t{RdeTCb-nG7o`PvA*D>VN!L z9G@$_^$LBcjG9=NNv+G2U@=&W86ue6L5$%_yUuK7J5tyzH6~LKu44SOMGWkyFtcvg zIc9GC&Rv2GrZ=w&BeQ|a7IUvrdWd;4_uFC-mgaSsIvt}^>6pa1gbzuM)-<}HXc1FA zktwj8=Y>dXBu~it@9u^CuaM!cr8Ff?}!M!2r)Ra{ikF9LF zwV(^^df_-l3n?qM3f~MS15*qXz*;j>N~rl%mMqnoN>6lTVoXY_g|tNnlWoK3I*JwX zQCVSRCdh!o}TUK83}TXp0Ha; zhV58WB>fOmpjleQcQG&)11hm;-2Cxzp%|4*)!dG5g$dJWR>Y-k=4?NFxFzC6EU_U^ zElO&<33gJ%(vM14pkX|57(HXJhe9TWN`W&CW6?FTs4PQ)K(yRJfY6XmE7Xg!wQr4`CLIsn6;S z;4{n@O`%I+i~MAJ_--)cDJr&%92HyJ*%UT~!lc6~&Zslcq>FMnIAZ%5^^`qKIW~*U zMWtZuAUJQhFbOZ7z)}lWMPa&}mLr1;b48z57@eLCorS{|Gxy`BVve?qB%za; z3Jg&;jgHweEts;*J@g_NG%HIX3RWVV&XcQYyQ z(h=Ax#7g8BW~C&;sg<~t9zk)W`C$?g%s2@O6H~whl0)h|+_OgoRWK&Y$ud(9R+Y%2 zQ4Hf9WnB|7K?WA8A}achjnSykC;uJ_9fs8p!?NZ4s6}*cjO~Y6(EX(SM5zjKtPDGO zrBe*9x;-MiN=3y(+&o3L3?s%-ERh@2rs(j|**gBr!!&#q`_D?#Ju$hWXo@!GK*i)~ zQo@2v;UeaF3WFubpiv?tFk#p-Ox9gdOIt@BW7%Lf>Xghn3^zNPhY%lC#NQS+SadqK zg0`QmRVhE#sItJ`(O*KdPPCY9&tfx+RXzB{xDrz3&chWQ1~PuOWI7FF3foesD@tuw z$Jq$d;q)%nGNfRPs7%>-4J=s%qtj$FF&YJK32b(}07e%Tw4yTOj$?TgW|g0;1l5NR zbD}%N31iK3*zrM_Ff$U}&RTQ2MCB1~b? z(c73*pgn_yrDVY6z=~RgVdhj9xG?D2Qf=!Q>iE-yJtH%sNQ7Iij6ri{(`eLqDz%kK zW9VtTFG|3QA2{$ONjUMhTUaNvsdPsy)0~1yY0ESDcf#>WV{>CHb(^dyOa`-pYK<|u zutYkO1x3dVR*4A%A^>jFKt+*{x5bbz4l0HG8K5(f&qRvn%7Txd2?~JHgt#*DRglgG zRYSfy=p5wZH6}Anv|I~R8~Hj&bwTx!4+qG&8H1VtW&jMuxLJT&As-LefNzW1c1Z0( z7XgcbB`D?y>Wq9>PyFr`S|i8@DBq=Q0yq^G2jGH3X}n70FG5m@XvyOZbF-rQwgyu;5=G( z5$Pq+%fNrAeHAJGx*FtPLwW=BCU6UA0&WAi-9dU6={=;ap!b1CsNI3|3Fvd66SccQ zyMb3gFNzVB$^X8akoO+^55PyD55)&Thk?(i{RQ+J@EsTdege4t0v!c@1AkDA3YT{- zV4~rde7JvMJOJ-M%lM%^1(*sjCM=sa!52mT^a(L>@TGwnz$_G(0c8TRfIOfGC;_-B zgQ^0v0X0Aa&;+yq9Y7bDXnLrv|4)1_v<)W248b>=(B{;b2Qd?1K41!%0l1kX#eItj zKHkR73S!oP4O+GU)E2M<>``nX=wjsKuUP`VBj5x$1FnD@Fws0Fv^~N1LTztQp9#x+ zC;0xz51bGS27e_Gf?}&c!+h5S9Bxxil3-UqrL$U|*>y$8TAK<$H|hmc-GODzz`Y4MEy!;LZA1Qjqz^$KA-@CZGtf@p1!{L8?E&ot-T-fb_W*7mkmCMF z@CSfFUI}F5?!Z#O1Hf$=Qg6`ZfDhn{Vt%0hKmf1;2nIM7g8a}4u{FpK z2aNzCf%QNXuo1v*6KFILgW5zC?(zS$@%T1~u~9q@GyzBiQc#TZ+Ei$#1G|B2v}_OP zUSJ<;?+498em>FzNDqP@0uBR5Q0y4!3E(7Zp8_od%7N1;b{4b}r~=NT7~ZD~;9mqT z16P2n0LN;9+P}dc1O7~i!P`*c4JWLV3xDH@4BoI{+yEWm z0|Wrv1VN_)BEU2N-q&K&0gj0yUkY>vFbj|YWC1xq0Z;^#fQhDz+Nwy^K-GacfF_^? z;HCqrhkOI1hDeQ&;(7DHH<{4JWAmYHhGOu3dfd#BS|G)PRug=@Z4GT3z!tDa%N8NU zmoEn20kxNaIs(q9?Sj-5)D3V4JW$LN)C*V!cmvA;j`@J^3;0in1t5O~Xdn;-1fy68 z(p5;qkaFs+h4#7$vGvH00^I;?ny@Sy{LK^ETfyH3YzNpt9FPbk0m%Sv_**!RrGlRU z>;|$X#IwQALG4`7y+9t24-^6i0o;y&9tDn}_Hm>oNKYay11$$m180B=VEpqZq6}6E z?F#^ImqD)p)xcGt2B-zD0k~ZUy@~u=ppD2Umcl(1+ReaSpyi)soE(m}!tyrYKJait z?j!IYqxKWfXTWpR?gV{-{BF=5pcl1YgT6ujThRBwM_>>b20j6}eFpssd;@*}e*g?V zZlnM-05@(>9^~_a!bdI`KWgJ2O-?~R15_9gLG9_F5`YwHOM}itK3-P_`Akq*Kn_qs zF?_i)_$q)JpaEzC9Mb_`7tlkoxkwE_4FO}o1jXhfH3hW*tbhe5W(#TuECdz-4!{z? z8E^yKfu(>4;0fTi94YSmAm10MA7}uu0=0ucgMpPm2(Sv6Xkn#^d$1}*PH^s47JNa&j1y`IiL!_?L6ou z;0jO;Tm@=?I-nl74%`G9fLlN#a2w!QGxG0&wg9a_JMb95tpoHa@C>zyDDn3>igkj% z0A2!JD8|X_hW0C<7kC4_1KtB40NnbJ_9GoYI*4=#=`hkyNWUV*YkWuk56}_dCol?( z0e=9FQPCH4T%hoE$X~`Yq0I|E9pIl36GXnygxD1Drvi)#G2scm2>4>abU=JUTmt!$ zpi;<}MmiI8766|eVzPi7AP*=2ihwep0^p_!IvY>}G*AqWYlE)`%tbLnP-9>oYMX%0 z2h0E#idi7F0<{4a0JeZVuo&Rjk_o=k1m78a7u0qGbw|Djs3-EhK$ijDz;eJ32mmHp z5VV7V5FiX#4Xg!liv(Q{YydW*7#@!Xe=BNl1Kkd=fjA%$NCI{OsX!W#0pPX^GzfLfpqs0XeCH-TFK#~Q(Jn$T`W{$0=(pcQz4Vh=&vfyY1x@C+`b~k{cp(sfpi4)*M#Mx;Qt23 zCdBa9;a~960A7F&;Km0k00;t8Pz;Yxo#5kb2DF6%5nvi13Wx&|fFvLVNCO<30sc%t z29N{f0o)Wo6_HOw;QWNPDli*RN6R#ja&qQCTNA~!k?Ml#Bi{h2AyWKx#^BEbOn~`- z8DI`r0#<+xz%hI97XuD}Bj60U0ImRT?nsw{dH|k)H{b*K0)7B)D?o#R5Fiv-1>m+C zbPW&=tVJHdJN03is6Mv6F>==q42R#9lqBfp!8u=BV zmB_z<^b%4$?=txRq4rhK+6l|*Cir+RZuJnm4&0cqtO5CrpiRIXpc%Lev;b`YZudbS z0Uf{-;2H1&cnNd?uKi%0TA(_BE}(~EMo5j3&PQqrY6h4CmVgz2 zn+@m!0RJA;9#{xWW{V;21ULh(fE(ZrECq1$Kwby`#18Y$`0yGj>2SfoI0o*nrjRD;XYy-BV7#nFkXabN3Bmp~soj@v(24n!Z z?FP*Ta)4Z5FOUc10|$Tt;2^-UL&z@zJq#2BM}gzO37`}>36uf2m4lu}J`sWEWoVxR zDuJpA%g%#;0k{nOHz8gP{?!R>PMsQv)uQ+f(3{9_0KJX;X3!R(6}9o#wt@cuwc9~E zCM$+9H;{F zl|WU2*?~KR3GVFPy@gaFanH$dB8+7f%bfW1(*XCfF)oBaLfk$ z1%NGJHzAHMx1Z2n2tNEvDr^bh1ULh@xq!L>?x>B&mV)mAcut5d1K%5ff3GlZzM%d< z0BWxQ4FrOL5EKgoT@9>3ZTy96!H)nUfpx$JU=y$z*aE}?+kiL#w|LM5APGnT(tr&6 zK5DIWe*Yc&CSJwJ%4nxu9j;YtG>_idR?!t+ut7-Jmm{Uh%o%Nr zMZ9JQHSV|#Y8S87wNn4Oxjt=R=J)+BFx$|hrZO&Virg(WXU?`*leEw!K`z(M^(8DfR!e-as+p&-$pFuDK*wL|5u>S5oS)Oasox+|Q`?a?TRnqMm9^PB4{55W5 z&M}>~%!-JMZ+*YKi;C&gGM{>A){O`ETkbG}ZwM;Obb8e)cncX`J}oo6)1+h8&kt%c zw=DVkN|*OqSLB<$y%+H1WZ;`WYnm1sJ*6>*Fh5`K9V-}LrshUyXuUqc?>_i*=$8KS z0gY(Kchys}2aGFETW;reGRS-1e&%hPA$NJQcVhD|Z(~)H=~4`soaoci*Wjmdq?^&l&})LGqwoOlP(DmR&|4V;;Oawq3m<+Uan8 z-t0S1WVfCzeDShzW?5lSEue;<=F^rox#ZerMSaC zCP*INKi5WeXUHV| zye(@jHEib8ZL3>`So=1u1jjl2bbniLVUJv zzCxMh@_BfMWPQN<1j)~!SC6ebRa}zpe7CWEr?B921>rwGn;xrg-5&emckEACzC~5C z_)*&OY5CeB@xP!wQq7$G`+7;5UHj(mgW1RAMq&h`SGV^mGm{(&63&TOZhZe)W$#8F z#a`jjW~R1?Q|A0y5u;{t-eRZzXlqQ_^U<0ILWK=7 za{K-H;?y61c^@$pWO<+ZQl1r+=fybg-0|gSs9m;o$wpOyLQ!h!;#Quy_Lus7Us^t@ zGsqJR=I-}aSaw*Vc!5suU|dv&ePN)Y_h7X@*WArY8Qhx_CBr^z32f(%&@hM^xcYv5 zlQru;-OgNS{#(yAaZf6kpPC>1Xvx*SlT`9iGu?H?hS=3cyu6COcQj}HcGxzOa3fcW z9X5?E7UiV+NLpx7!jT@YLrY2?%J01RyE}J<`{rq}r>{#Z3SM0N=EJ>Zwd>u#eLq+} zL%;H-cBp#gtD|-fYkt@GYOP%L=+Rb}$RnlQyq8ynKT4W*snUv}M0@gSzSmb<_l$YM z(>Kayy*n~6=)x^G;JvWqkUmfFQ1!J1FP0DUzn{NObJ)e-@qr1yw7meOCS}4`5*z{b+tOmO<`pd3ZaP6+57zNt5!`9vx++o(5{E?EgUY3(-^nHr?wvFxcs=k#wK_m3W?r6vaT=k4I1-WfQxXs1!} zLfKiD9s(VXa zWz6~wi}dC5o|UYR-CC-?;8CV){&I_*k&T19coZK97+rH)*m&WO@b9j6nV@pWFf^qpBg z@{(_CzB%5magfxyyvFWr?3zW>%O$P-uJ}}r#FZ?Mdn$A^ zE7sh$!>nELnD)bFU4yZp>;BAD_sb$$(lu}Petq#yamQBcIk~ZqlZP!9ab2Vq2bwM) zbm!i5ce9dX=Vj5kv5VyS;@c#gzn=IyLH(_<(_F>Un$j>ot-oceS)$8G&5+$uM?UhtF~O{u-@I2q>6PK>vXp`S?ji3y>z5ZaV{hDao2^Bv>)o?w*MK;x_{Nd zZlBDGV&ykA&Ig-lq8fL7Wu8v|bYX1BLg(tp@qIm|?ox&Hz3)}uUrmdt;u6oxTstIj zv3!~T(CzJs&y{<$?pE{uEXe({KHkYbb6fB7s$cJ4T$bH=BjVvOW8cXk<+il%=EG;V z6u#T0R{!&US^n9#+oqn?t9e_K8=LnbaK&o+Cs*SRk<_!t_^*8x$a}6BAn9DXw`J~* z1JlN?m)GfE=l&wtHnUyg!LoI!CblXc`W`4eyLmt$_5QBrvP1Nj1J6XCtapw+YQ{=B zQr4%Z=5l6@{-I8(n#I!<6bjB)wya!|Jr*4yr(wHzXg!bJz9Qw+oRQk@=NFm_TaEm) zW*;ys?KeWW^zzrtp_6AsDt?Q?^q z97R&EYVhq%w?ADWy?M^F(cz)nM!%zvbGZik`X0Q>v%urLjZ;4T*7!6q0&n6>aynhI+sIEm$hjC+fNIf!C619rf9YzwfP$ zslT!R#aTgP(z|YnC#MhUy!W0ytNK=OyH$#=^eb~sM!1ttq+QsQ*&h?`zT7Oj{!m=m zEzhxm+CN*~Ox9xc)#gO?|U!Q{X+7d{#e@`c2T-we!7Nr zqf@iXT)!?w{U5atmL$LPyH{d0uteZtgjXcRBW=BEYj=jufpaR`TotxRtqhXyUE~um zSM$sbhFR5zl~?}Djx8F@={>jN&Vay`?`b>{DcjjK0@jx2qjsKdvHZ0___EwI7sEI& zH*xW$_OVvq514q!oeew~y8W`l-jhSTG4+?{F!DrunS-ysZ?8z?AK*3T-IPTCRDD`- z)z%@&rlx0~(z+D7*xE0IF2?9DqlBD|S6v!7d8 zdMAsz3ZWZca~iYx^V1o=NJ|9F@4fx9No1E5BvNj@r8uuWc6PF$%pCd(!#s z8k7A4Z&KI+lGY8r70j>08*Z`Z+Lm|mu=wvEsJL*G@5dQ+Z!N>B0Fi?cTeZzkxem35 zXa{e+6B)|YVcBr6Dr+FQ)ypaT!qvy;2TiOiI+hEnWS+a!yth)#|G>;;i+`*!*nI3W zOYzs^w{uu=Yd#g4RKFjYr=hdKVb>>StNEN2c`sQ9qFrMb2Jbzqd#ENj<;UQy@0Xul zHTu0(#PaggEz_3-Y}mV|uqwLfO*QY-$BmK)-Oe?<-IVk)|DcQ88AX%wlp8gR_21fS zbKQ5^AuAt~FLFCAmg>*DQMC(H!S z9v8ax+iGB@sJHN%Vd2>Ml!OfN(pLWaYdo`_y6Y$Q-kkm?^4)W;>V%@GK0(78O~%v~ z|6cc&s~dBHO(}6yXDvQ%sTDV%@tig z!vDzJKRzed_>0c2(Qe1(!H@6g_{!_kW(xV3uV)O6ypib-o;`G=l<$D-^RIy&sjA10 zHL5-996PTq-(!=&qMoR$TstK(sjP6fWzmh#nR_=x_#H7x@GIbD>lex&@$Kg=sr2qM zT2>cbYm*f|_H#~WNaoyvO_ai&E{Xe?PggH;@~Sv;u_WB|!Zl;1oT%Q)M>^7P&w7*= zocBtP&DgZCT=sH^jA7XUHKy(Q1toHZdWXF7HKxTXe;wI!Q~Thtt&zs2Iqqh=-D|5h1Kxy5fCir<*b~5AAvVaGE^S zlPnl6(~=dl@yo_`skD6!_tJ`+9L?!30uGj3jwv2BoRN8S%t#>QNNLI!%Tl*r3c)?O zwvT=!&oBR@>K@GX)GOY9cgWOg{fi+tjX#Ha&R#t@@TiY^)p>Zn(1!km*^ire3%ZVl z?>_kA*qiTqg=1RhBd<_}+FC^3ao6hh8o!mP^p^ZRxI(y&$Et4_=Cg-|zem*tzCSi0j%*PTDXW;Mpr zhvW?SS04yV3LE%U=N)o}%k0aEAJ5LO>RmrJ^%8f_%gT)J-p03ghio`Axb>azj{B5+ zi%&nY6P@|X&s=L1__V@A9EJ3P%6lAmZ7wxg6#h~?CS<+8)1_#Y zgTdm%^<3{#_q90mQSa0-hO_ss8;rd@>#S+%uD%D6tp~WZvmG={%eJd^bRGM)ZeOrm z?`-R@k(x)R-BvEMKHi#{ZfoPD;U=|t=TNrEi!!O6GUw~%=6-HD*PdJ#++HEEf8b1F zap5Pg+MOx-g*z+)zGSwf^=B%Hq{w;i)5|ZYepXDOghK;S~?9MYuQ0-iYG< z5|zU%o4HU@^r*3{lcB@a4MB}jg$}CQZ?vv%aa&|G_jBJzG+qN(;57<*DESlWH$_NeO;sQUjFC>3Dv2x2|?L$ zBQun@bZ&Z(70mAay=(18rNrtL0S~*^Y2G~koB#gaf?b;COC`7b5o}oPBs_e+)O4xh z&IeuHM!WmOAFNeg+r7Z~W80wEgLPQVv$!Wb0jq8ZW+XIy-feoWaMq%vdS8>Epv{}H z+J&|U246)edM({rZm04f;0srW4fV|3g+{wChHP@m6a0|zcxS?sC{Gp5mP2mOOHWPp zVvpQ)y7;BB(Q@M{#cdhU32wKa9sb?FZ0a_4G^Tauk>~bZMh@SVo3uCFu3T>1!BxR` zDOGOt&itAFs~R`dY9?rB2w@WYHUv`qe)g_s_h{TTNLeaZAAYILYsA3bKGJQfla14Y zWd-dk4aWG*sse*@_zD|d)ylZao86}dWXRrl*casV(#PBB(A0wSCrqBZE1y>B;r(xp z>dbS-L6U_M#nb~HZsqm!zUnvK`rdW;_>2UVMY>1!*Kd&LN|fnl3tSVZ{^&jY`Qm~b z8xkcYPd!u&JyH@M8@Z;t(?flC;pI~fDY~KUo?#zvjmoR&=3Yqk&}}!?O%VT2_{x$60?%R&eTi?FniD1dWskVf zB8x*kUcdaM_)d=`$a7t6qxOg$uCI)HY;m(RBad%r$SdpZ>m3SsR`RPUp{88(t`jMZCS~BFNswbg18QM9_>$%)PeTP zdyUJuEf?(xdA_Knwj=YlYg=u3>%q({yU1+E!>4(efUR^23mBwk2Z@>dn7IM1;bRV74?d5jq$IA2m*@2-u&GO!9 zl!aT%paw{-=T8`laq)Hnor+`m_~X3^KM=-ug;ZLVF^>CId)c=u9W!i|Vs zqTiVLUV>F^LVc&xUD9{+PW7u@AKzD1TJ)f4zQ(Jk;X$`brcqR`@|7pAGRGW5+W+W& z$Sc-NKj+?VzN&Not5a9le>w#2Km5LP6J=h8sn;%N4L85bidpY8b6za$@rkKTD?6mW zvugiV4Yzbvfdh3~*)_8zbv>uQ%cO7G6~C-qxxOWp`6SPBBO~UieZyzDO9DTR=;kKB zHk!Jx{WQO3>npYPZ?$VyxE-N)%b2OvG%HiAI?TBLFjgH~$0a9zV@i1Ck$$6$Y4I~1 zf?Vv3cj<@z4r|dqKX^7}P43RlJVzhZBYsx%kHr2C5IB`!-?Mw6FQs!<}8y} zB^9-7WG7QBVo=gJtJO#%#6)eKy!!0O)4KjjcOI_1e@WPKqrX(Fds6I!<=buvRB9>} zeSEoM*sM@ew?m%qo?O|V?4@q6#@w?D4tE|h3G0ZmQn?9${rTd~&>J%6yv#lIj;PlwbFBlufP-tWXK!%n=hJ~y=-HYR+#Usg6JYX0&6a$~!{s5eB6$Yc$EE*sXMeRBP^ zg@1Y#Z{BkyKl|mntE}$?&UpI9Rd2xP^>Rt)knEPZVxfF>A2rRRZ@LQ0JU=@&7VH1# z@tH-U3OP&Hr7j&c@R1L=qGqt+&q}c~3j?wj8|JOE#SDM#x6abZYi8bB5_xuT|KT~Q z%o%rt`quNz3jH|0bKe5%t1r(Dhn|XBmhyFO_;uW``}n>V=SYDa7L;}6@c*ipx~uL!?i z#x%>Z-l3BlE15QIVcO^}vy1m4_31*9)Xl1VdtU=_6R&al^+-Mv1{@Zi)yqmqdOt0QGmu!FIR*>9s z;>XscCqt%9_U`-ho@>@!TD5UU{YtT4!HKhV8&zqKUmI_C^VM^1HYiZBKEzDpe$cP7 zrTw12;_Z2xBJ$H-RW4O$o;TTfDr~%Vvq|0@W*R8wa;+ zGymgl>@Q`v?n(K&4$mI7H#M4zeIl0~rtf|8ZNY(=?%nGbAKc${AZX}z*FH5#H{qUK z=NWhm^_%+G+|+*mnxVb_ z%i8Xc_O$Q9b5FF~@p#EoU0_FZJmp+2-Tc175stL_ zG0e)(>@(0vIB{CcZ(VAa;LCyQeIeJvg*N@sFj8}wof0cpX*hPHa{l8l`p+9S$$p+v zAV2HS0=L0yVt4k9)V_RZ+1(MrP%wK&du~TJtVwcZ=KD|HC>O}-b|DK{c|wS z*CneV{qD6TYTR#^Jw9R8rs#XBbV|TX1Mh98{liO-iN79R<8Y+&@vLM%4~ML=rK{)c zN_udwL)}U}Z9emtR|xmA)!%>in;IuYbmyG@VD-zP;Ptn@1luJazxhSO>$IouTM{~V z)o*Qm>wyY&xoL(?zBwQMsJ2%Brg%v5w!OVv7?8eA*eY0XuVLh!zF%LGeAgKZI|-X@ z%j)^P^6honw>@9S!oJ;WI}vO-dis=Lg68Q8B_UdANB5;uBYJ8p`@-IDdlDOWA?D#q zS#OzJEjeec|JP3R>?=so>b%-H~<+UZ=-ie6=%6*v?wfMw5Z@ zS2YL=3k>l7bhA?XaW!gM>zC%Q+FXI&+1fhAdX2?a`-5b4=X^@C-0o*=uP{P-7Z(4w7Os<5q=c5*MlIR9jQe}Iu}@=Aw*JCK zoA$uE9px>o)9pJ23|FK+I8e|(ZPwyDH3zm=JTAzI3!#c}U)&eH^7It$Ez;dR2iw1f z=lkU`W|lF(Cgff{YJOw&lER!l->pFnc{1xprJW2fNw=?0RLA?aCP3Gq# zzQTJ6wb+r4dKGy)z5KmntIcCdY({Rj$==x6>MP6bb~ZA>DQc0G!B5X{sVS+Sm>DuH z*-}sENpr=$y!y_z^r@0;bqTqr*jR_A0M&3 z@?G3?pWe7@(=e~-RNqnePsaCBE~n(`YNRh8QfP3tA1o=z{KdRDU3#s+v;y7r^+$h9 z7g{oA{-D|QM#_Nai_0@(rQXZ?NS@G4*1nwQ6m<5bVyO0|jw`3bc5o-=^qYkhUW}Pl z>cahHSjn4vOlbf2_AU4NUs-jFEMItjaLbZ?O`^?r_6ELMlz)cH|6c0tgInaYq+Z7T zSs276mu|oIkG@6O^39zqoB8jL-05KrQa&-y)_78nI=57s*(bKX_kdytD1c>yi5qH* z)*JR=&c~!HywkuST=4<`PX$Sp766SH&dV7G}h3(Jl!T^>D0tPY1I$E z>T)-HmKq&A@q^38sY3VX)Jtjto&wymPd*hZ%oAJnw84Gp{yuu<-s(V?>Lt%4qBkdd zn11?kAiCH8RavuVkAQakQg>gWEvIvp44-pPQ+cnG9`tsxG+Af)osE*oJVZlV^?%TE@^}VG+>fOG0mI4Hd(md{ z`ZM4^)K1o)W}V6Fgpu@zMl$~R91D}xA0t_>G)X_rVZCyb94Fc&#sAtelH>e3 z$#FGGa{W=eSo zdY>(we0*X_@^_KcHz&Csn(0hl-%)n*_$QL>b(%MM{%noO8G2`{g3Z^?wmbe^!%h?>>_HuMbXsy*fp5e~%&Qe=te?A0+#0CQ1KwJtp73iX`=g zNc!VPG7dE)bwWw{sir=8e@>9pSwfvWe>F+IRnp}7CM5R-Ig<17D9QUrFOu;IgY@el8`spDK_XxAr9CX-aaORFiDi zCzAc>LK2rHiCdD4=Kwr6PWD_kM$*sZmdVfGESS7*viow8#N@{t_l(Ko4kX9(Vv^&E zO|retB+p~rFbU*j{hUKme;!Hv1<82YkgWGO9RHK8H|gr+aXpgtwv!yUJ|xGFGfAC? zBymfU^YI?Z@lZh$r;Pajr#je|$|c zp3_Li=LE_1A(Nz^w@AiKoutlnlKr?=Ve<2|our@gB-`a9KKXH5N>YbS@?3h@Zt^;t zNR9_dlJ^}MB>B1|_tP~>laGT6$$9M#`+c(eLMI$Qlf|Eq?8g9->w`7Pb4U@%@rhrv znZUoML-Ki-H_7vcBgyvGlkCSHl6VWr`|AfJ`{g#teR&p1|L>FZQ;=kwFO!Vh2a-Cs zNVY2&t|yc2-wKlY6xbome|HMB2T0DNIFjd$wI-8a#~zWa7Y_4D*Q-Y|{&FPmN9K|o zpIb@B2cN@rvin*&$@OQ4&gA*7B-?ePX!85@Pm=3=dNTz+FA~6x{vsw!fXAD0^D4tX zM-spu;**|$!Ra<`hnG>u<5ph8f7TX7@l=?|7SA8GBI5XbwYbfeB;qsBga!&k7_*lp z;vx8i4Peaizc9atG7i4pO1A0mz)_qdfTA>wR#BF^d0VVD3MUvD4l?>b-8 zi1k+B6B&WQVLxsU;B!She>FZq3K*x(%O6C32tFwgSm5|ym~&Bx$d`CU#7$8AAxtcb z*ZH!C7-vqM3Ye@Kk2lWzJDyNa#0&5VLBKfMTMA#O;Q0?X5cxAu{Ri-VACK?BC*S}p zLGjP3L|hM_Tn3D@ACp6gIuX7^KBqsikBRMaMHB8QpyQ$L01>ysCw2m3j{k-EQ^bin zwiTD6kiF~f$?#`C#(W1L+jlG6G7wgAz7jh zYkVopaq*DIAHpYg0^{^Q8zyqc^ZgDI+shnZ3h!ykiTo5a!I1}w`=W8ERwL?jj-O#c zqRw+PArogmzBR_LR|1$Tnt+YdPrfQtANzZ~zFNdMc%TV1IQ`svhS)AFj#w{eKQ1UH z*2}~v{{hoN{Y;1RaeTY*i8jFWP~7w;8V7uWEHD)m-@A*b?}|@20_KY1E9Hqg(JP4h zocz*kqE5k%zsIxSHIZ+7_V0Yp)kJ>Oej=Z9o_fTi`e^bVN3>pNbY9cYdCl2g{4l_` z%fynXL-ZEr3%Z83YYMSmZ&c@pHq1>j;SS8nKhj35H}?QdVYV`VaXA7Sm^rCLiJ}HA@Wsj|DAtC zov6PbpXdpUa~ul968V?T5cw*o&ODe{AMfWoe9|8PJr{;+*;c@c9Pb z&uT5AegUd)c$BE)m`Q9eCx5*-5nqH)_y@)s{|j@@TqffC(d2iW?V6rT#BEcEd?v~l zg%0EE?OQ~|Ip?*FI?*3jI?*3dly6f*#I2HvIH$fE{0IVG$3%gMbH<@5f`}I-{XGt? zr-*m~n!w5s)o(=C$xL*e1=d^3Qeq5Zb>U=sAsZ+!nzPdEoV3dx-UNo&ybKU>-@R>Q^l|UY|40$?#k~-XAoX z7w34mSVzPK(Q(Mxj}}HmKU>jp%S6{P_99d#npm&*_*!7TB>2Guy#6Sf#EP?Blm$dS z8=nvktQF;#+Y&ftAHhQL2oECvi#n0d*)QGgL_ba9{$B6lcSM|?OT;J^yp^BhM50NAZc%z&LUHHAFreJ!f8L?%e=CX~mn8D7{D^$ccCCaTfx!E(hflNy#<{M3ViECb77>p? zb-d3Kb;c(HgjgMlhr*8n;dP$B`MW*?-AA<0eS}kg0eU~EfbQd*{-nVHk3X+odIS#) zV0%#gZ|J!<5k3DiQQYkb(N7xZ{UC~)!{;z~f415Z`y~p+HQ`4&@VF~J866mBy-9sU zKHHG!e=EwDeNW_%PhJNxR}_zaMC41L=MB#FTphhH_=3)(0+c@;b`ZW^2Ak*)=eVs~ zM#MwVebW);`-Ty5zjPvB6vf5R`_PAaL_az4Knl^%hwq6v6Xo-o6Lq3ui8`F^jf^JZ zf+zm&&o}fuKZ;NO2(}2-5jsiaGv5;VAtJbls}iC-W>iANn}dk`#o69V=sw$suG=aoKNh~I!`GXLt{WyO zp65gKr%IF9F4!I8b`>4Bf^Ug9=lI+&Oyrk&6LHRVRlOqW@6jgW-l)#w0ivHp`-y&X z>J02g`DlAN`Li>KcrSYY&e^W)mqZew?grx0b;!@lfU<`>ktvgOo=#Wd-LJ@2)w>GdamZw z@j&kj4AJ`nCaRxXIC+20%Mkk|v4q$zPW?CNe*F%89%YC=$D3wGtXI#B$d5wxe{LfB z`QtcoesPW~m+8cMRnUEhGd`Z$L_7q2PGy4XoLoWF*&p(E9r&Tk@$LQbffxtQ^?@60 z*AO}%Lr|Tr7exMO=HJ_O8GTOTh$b7R7=N?_?a}Qe-zcquXQ-Y2|6%^kwj~F+r zDxy9o|Kc-ZyHW&+IOlm|<#b}ZMy-kM;*8tn=S2ROfAX`?=TQ~t_(??9tKF?ce*!az z`kd!0ef0Su6YVc8)K4z-dF&1Jc`WC6UV+|6ccJ&uoPN5&53=FMRl4fm{SSd3pu*!b z{&`;@g!*6hPybsN6Wc5L&-o|@Kca`%al|L`1>+niUg-HP5Iw(np#HBw^;`d`p9%*m zUdJ_%7*EdlYpW3Phv;)NXpY-5_`U$YzZ>F{|AQSx$MZM%2f=uqXj5W)qsN!R+@EiV zen$NBdCtfVBENYJkHiuPV%!Y%h25BCHm8gz8B~~arJpb zoPpl2an6?p^xRm1o*Oyi`DGO`4n_E6=U|-i+5dp36Cq2)Y3MvH(j@w6i{7ts^0&JX zb!;~i`J8c3drHL1juY_`RL2l~zEM|Cj5Fstse`U#H_&m#sj~q#1mBNZ=sA!%-dp$% z7IriqKZzEi3{m~=RvCs^n{}Ek8 z9ZvqL%S2oco$s9eOC2KWRH5-)gz8Abk5Jf_POl&V*g;=jQTCX7bJhm%|*j~kUMoYegp!bvXIlIYd7d&~eB)-Y#np z@oID(;2aNSzlnIWB~fQJs{cBH80Ui3^zjI%|FfPF`79M8pEJ%&;0Gk}{i_#B2x2M9#ltK{_m zn7bbMI>+lD)D%S#bQBjwP$X&ErUa=bnrN!g1et}KG`Vfk=FgSfw5b(AQIyq$1sO$A z6h%?gbW_w;*3ntfZADSmZ81ez&F?(#_j~TW@00i5-2AiO{rvva$$8Inp7WgNJm)#j zdCmj;m9v4b{9ke)@K;%JvuS6b-B!yEr)&Ydiu+CN*X4f#e9PgWukfUcK!4B_$Ulet z+%OvUxs2ylWrR;30{mq>=c*#SZZ7C=W}GUC@>$FHbCso^H!;4HLix-iosa(jI;97L zPo1Z4%YnL)+v4-Ur^@+YdGOW1OXxqS-0qcs7i-g&E)6O zYcMXWeg}Rue*1IA0aj0i{;VSY$~5pZcntVVr5v8(K4=s5Gf4ceIZp?3J@OHLR5|!x zX)a30ugv+sQYSne^i!<6 zJ`Vt1_A~UWg!o_X13Fa~Kt96>KYTj)NxcR9NVO<32fag2~ zIvYr*gLZ5Y?S}I6?Q7sOdmi{yy)8NtbT(V-^35FgoB-%+e=EKMofhjn?jOGbo$ZX< zEFeF>ycO_a)_K8N#+ip*^A_#F6U zhXAf}`^8l7v!NR8s$S*igU-5qH=TzXkYBOT^6REB4`H5VS04?a-FYq0w-EW+{|L0( zLOE-@Gv(1;V+XdH$0FpHLi|;Xdv9ReTk+3$5OfxQ?B@SY#`osZKB(MoIv@B;Dd#fM zKmQ`YSJOXN{JZ4QUBk~RYu~zfDE;IIz^CFr^cT<>cs%;6db`&#z+Y`1rI24G+ig4_ z@>xOuVJP8ueFZv8xUMOk^zX_48KAH7IY7q6@R@Zr;FX+T9p?hRjOT-Sq(AXe(D5yW z+)@c&A^R{xXYI*=XApk&jer-faLajI8Q^)etEyKg9SHcaGl8$;ex(oc%=;F6`bdAh zIDV$x4lDk)=wZ+qyd88D|H0iM&-vVss2q;r{%#oK3ChoVBSAm87VT<(pZpQ%Y~lH= z(*KDF(BX&Y>59Ld>&Su%@Ux2i4`%#!80~}7nROfJtmHX`!UwNFzgF!Gek2Xd`HeUo z@)LA#}X(4R~E!8|t^OnwRpA4Yq!nCI*& zw@>JY3}8M|2Js)KK7^Grm{6H|Q(;gfGyp^B9uQE++jwDnWm_6;FP1 zCg```2l^`iC0zGgx$dtdou^xYpTctuAK{1kL4Pgt2G#DJwF~&k;bUZKSMNR=_$BG+ z?{d<4k@4G1&Z9iS+phzDaXs)e2*2c2@Uv=P$U)pK=QrnE;HUls_&P5t?*TmdLcoKh zGyiPR89@I;`Ty`@j^lXH_Ywb6#w#{m0l4B{dlUQ1IFjn;v3G<1;8!8%&7|}4slcCK z4?1~-PummgatGICmD@qg>$CI0cKyzqeg*Se7V+F6oAe*PA9BdN7;;DY@z?A<2XAR^h=KgpL5vm z^CKaj%%FQdUOX1_(Lk!LDkO#W!%ktU+^L5XR+OrmjgbW zc@#?LFZ3rj(4R~u{wrMXeDu>Y34iwl&}p3x`IHjAeJc8E&x^)K0B>u79CUmi8U=U( z_mwKQTmDWy4+MU@@~gkLNJXZl;3hyMBq-(BpzfoJ5n;otBaXT?e2e=hB6 zP7&hI!^}gl^2^}*yXs5O8A!iNeb~}0ba#8w%P#+GdpxjPC~oc4t3dIHbHLA1CIhzz5UrKYFZJ^V} z{i%-o4$QBc$2gLXd(I(PH-|j{IwE%G_sN%NcQfOGKEiKp0eltXY}&7?J<;xJYh1<- z0DQ=4=$G=@E)&+sZ6)(FH<12JK{N0iu4~HY-A|*xIaj*-TYVAiVIl3I;-5AU{YtqT z_{#s!8$rM0Zt$c0Z)*ho%&EYiPktWy9pJ^Y03S-d+Jom2Ri_Y6{C)mIxow6Xs{BK8 zf5-HzY)9ZLpSyA25#qi>=X(+3EiFSqN9A+gSD>Hr5#ZXdLw<{X6)+EEIQcm>8}Q_d z(cigT2k*EH@U7eztR(&yg5b!X`ZA zcmn9G84bAV!w&DG-9gMB7)tz>!@AxQ7G5SMpZeFG0u0xTxa)v6SO=3C3#) z>Fmt)YaQ1wAK`a$eOb@-MfKq=>Xn_RG2&IU+kTC^Uwf?tdg|bFg8!TTJU4wx0B9f?srnS-$^0-PWm%jX-{+^7|_6gtv86Q?XoXLI167D+`zI12sx%6?+Q9eh{ z0el7ZErs-Nt^qvxO7K}i|KSb#hXtcRNAY)c}^PPgk{|o(}jC(M?8wmeI5R5(yZX8H}SYp+Ba4eowg#xC`>xO8j*rAh#u)7b=HA+`kXD?o*6>1^hQN?$JsRDnCz)9X9zGKI>lOY3;zbN-^YN+QLe9>E-@>E9|M@!LTd%`7`pEx@B1SVGleu0i{P!tnoKR!w3w}A(l42c(vj$pntoPa!~x8{{;LYH-e7x zbFL_uX?Jde+djNn4fy;U0N+G@Zu|)Ffy^^0ApFX)@CWA8-&Xo->9-7|-&;uhPmh57 z7h31SBku+M^{=5_#ozPSfETv|uKjv@Ir_DR`)FC_+yH)-20>rP{XXW~ ztmnAsc>U%u;LrOOa@b6M_E`z}XVQKuopX)?+}@9$%{;)45okA$be3{nth)%~m_qo! zo<+L@n7{2Ky#6h~H*(*heBS;w+TA)9d{z?wZtiRHXvZ=Lzi%JNr*aYaQTk6dgHFx^ z;79j&#eYY;t+xWdm2`f#2=Fz$Z=&t~eP__mVV-{}@%R4*<5faGMEm3=){QUHvtGF(x-oF1e__XUGoW;D^Hs;N${#?yib z$g;zqiCE0MSYi1g8{P+>mC2A>4*5L#6u>hX-_~|_JQH*VGM``jo3kD8W#cfus{fZ> zgnl(Mj->EY#$(^w_67E>n>epCc^)*3=Rs-*#Oyiz547?mi`DM&{8Y!W^iUJe*&`|S z8m#X$OG(Du898jE->CYw=L^&m+6^7|_m2d;Jq_}a>EQg%puZ8c)~^xFA1h$~n2vic z{f+hXH{^KK`Q66--3E?}_V@OmquuQnq1|HQzdeuqFNgl?xIDT)^m)mBz*jkUG-DjA zn8)rTos${g8&(1QY|4MvAn0dX>&q`mXFc;4l+Nx`f#1sbULNW1^CjRbvY`*E=g-uF zpT*33QTUd(0N?&2H~$kqgdCQ&y5TK{0Kd&zFJ9n2qJ`&xD$hsG27U+aa~AphunzD+ zJf}z@{C4V3rlmgvo(KNcf1_WD|G~b1udwp6E}%cZW(mei+x_cK7_VW>TTngu2k&3B zPX&GD|CjGVo(rA^9fg0y^RI!k-28mUIQdrEw`TTh+iCE77f=t6Bs_aC=nu5~$)O7% z|H1~yzlHd3{g-k(67Zpfi#y=VudGb)sq(4a4e(;iE3Esi=G?)JBlz~IfU=MFW_@5eK;-?^lNHCU+3@sQvu)3 z^T!g>seBFX+Ig72cog`h%%jl$UiBH;^^JDRCq#cT<8<(;?cPyNe&~nmxOZF&I@_KF zeku9c>5sr~<9SUM;d@;O{1(QebXh3`Q>DwBSc(#g+)oC|pGM(xQdL(wn0Zo~fX0lqv1 z{0GU;^M6La77heIrG($J2KXh^{}jR-)}UQGkM}OdUG4e|H_Zl}_3W39W9yM`^n{lJgPc{9&hmRt+`jmi(#ku_XLl>Ud8q1{EiKdW>ma6dbb`&qRQW3LAO z9LwJNp8!8qGr*71ziTz{bM`0ybU4y@4i(}#R0;Xa;(lr%_fr{!Kg2x4w);Uph4448 z1pRi_(Ng{|6^4y`iW%?CBK|##!RN*R=qUche+B-$hx0r0AmF!H_UhjU0)PFP zm`7?)-pj>!6*50T_4AIwz)!L4#!|*_H!*&z{aSQ0+O_WwP8G&Xyeo<4zlwh|&mV`D zfq#WxmH~cNGp?xcj#YpUqd%tb2i^gnn~nw@)$_Is0ACkCe|21P8Q1NYHqepRa>{L= zIpBZ8bigwSA8`-pw5DrY;eKxXkA5v>zZCz{PeErs&!JQfAH4v0=~nc&mGT)_5Bl4={~E?|DF_0- zsn#w3k$(f7j=dnyb)>WSOZ2z$cEGj2mtO#U`=0wjypNOeOE-Qh_bCH-&ZzSL@p<4U ziTTnxe_s}X8@n-=^_mpEG9FSu{M&v{eEL_? zob$Vj{^VNvlS=;!>A2x%;UxEX?fg6PW5qWf9!$A?4t|vWD<^?Y2jh5Zpa1g{;19Cm z#&yG?4{eq{{O2E_GvFk*o|oJUIzGk|3fZr+b>K6Zac`CX+dl$)V-e_75`SS9;6u5; z)^Y#*kDzboUlkvR{DhUvPtbn7Isp6!xsOnJen|b#OLEJn^l0F3V_c<_{0!lKbFsDG zyf_d0^f_N)pDyOk`CW4=;2Hk}yb}K}DVh6=rB`9!t@G>qm5`^8b=B6g-8xYiGwwml zUOoLN;33XmAMvxL9fLo}iYvZ89`Y>Yxt{hbndi$be}Ek3l1|}#(AoYR^iBEPGad3- zPk&6uasPKY?mu_WyXSL3r`Wm|wj=}cDflx4Q$_pm&0e6t_+HQ{BR@~jjtxA_O@GaC zXm|5Sz?Gk>-O=Bw^UyEl{~;gX0~k+EA^nB2F*9fOKH z^4yPlyPyvEs)rx5?qc>*@R`YW)9G)|v*Him{e^O)A0py%eyfKAzLx%s(mzNN{|x^t z7zdMXI=|VBi>|Wb)vvQ|+e-RTI=^mV+@q3yQ6b1Ajb8!!bH6~pe1xx<0)9$)o|r}W z@iziLb0hGzzxUk&{B84~S2|v6F9p1id09&T{5wD=o8zVUBL%_qH-q+B$MLpPpjR6{ zf?jPRKRX-)`~se{DE(jm67b@ez>ms##k+u4(*MsUozHo0Zr`hVbO!iLx)S|WK8sER zo#D-(uX=U={?OY(=95&Cev2%Wre9ku{k-;Ez_;_Bi_$;gM9?3=IK1}jr9ZM?hkzfI z&u7Cyr=uEh)t}aLfbY8+d@B5wt$;UY13!m+?))s^9XkQ8{W^&Jud?_bB4ka!7IJ?& zhjiu)gB-T-d?85qq3o}HuczP=@V|}c<}w|e-;*hTuMU78osYNBKX0Rdo=yB=W1uG` z-$PHf5q{2M@VVUDcYGxi!SKK8O2|j&%bwh)ujG8!cC()bejDT1D*y4{fzNfkSEzJ~ z#sHp7I*R|dDs&yXK{Dtym1pugZhkdNxmXzF1G z^-%da<(I%;w8=fbFHHvAu5b2iKHA;Hb92>)ir)jC$@6;c*T23)zw&MZ{|f&=47KsY zQ>=Z>i`+*fQw~Z$ct7Pve_NJI=l9SIw7YIP|)4M(g>a3!~X)E6^wIgzm8^JZe9}bMeNS++M7Ai_5N2>5dDYg7(jbDwMb$*GJZWiXDEM?TN^82z&IKQE_# z+VvSuI1%`RGr^DYxvLDQnJ)`?PrQWm8yO!P%J|p@!oQpk{x>s@?IZjcw%dG^dw%_M zGuqAM{rWQEzfg*H2Qi+ea{gT#@a;P1(-^;9#Q3f1$-B#_KbD;d2xF#SrPlgbTMzy- z7|&O|eTDIm);W;RcJleaiNGJoyjIndmCPHr>!k-cz8g8d1L+TcF9T=z$)kNxJ$zLB zWCI^)65!2`_%}2DKbU^I%IBM6@H6jX$XVs^ek0&^+^_63z}Imfp>(eJ5#+qliq8*c zU7)P%A%_%jm2@xXsa>c2WUf0cJA$vj6Zz7O)z`O1XbX{?6roS>fk! zpP2Co@UtkNy@jBe?;X7VrTu#50gP8F?T^AYFrKrW@ti!;nR5{6WKV?LB)sYT)@=r# z%ML)hil4-KdIfUuM**( z`r&^;C&cq>9j|B4L%W-QhIVyc|AyyW+qnN#xs@?)-%9^k^?by^&=Wfk_M2cbO7l;>5z_fbBtF|Qzl@ic9B66^A93=>B z^poh1DgCX?Us^EREr;)3qTbR^OCdi;oddb8;5w!J>~+^re z^SkwoW3-UYhSS0SfJ?#ucEZz(z~|r+%rE8hM?U!ZnO3|#<9guFv+Vg}GXS^i`Cq*c z_#a4rtc>l}p8WeyKiOb1wFwc{_m4X5yc567UCx0M8+O`~<-5cL5Hq2HdXa|JPk0 zx9zN_r}D3s4T7=5L&ig&*OGqH-$5sn=gX?kk8&S3fPRAVe!>eUgm&%v{6~%epZ2>O$DIxQjogoGyRYQ1zn1-9O?w*}h4Iz>QA!^03&sPk z^JVNEpg-(-_qe|!_h^mWR-FR)Qp#=L@qjO5oO3wge`XxAg>lF-+P6=+|5}%Zc74Qu zoaYgCot%;9M;h8)$~-Q$ zOSjbl-&c-yQ;2`i>%h1D$)=|PU&(!n(wX=R!0r0>H&g%1sQ>Mxe?9LLCGkF^+N)t3 zK&SOM=(FuTUqbx*s?e`NJl|1!GK1&uL%5GFBYxgg(8=U|J(b&A zKG3n>;kxAz*ynkceI9W&=-BUS`S`9@;girq9mk&@5Byf1GpfGb%(&=K#ziy9=hC|& zhxwb``tv&H-D1Xhl>g_cCwBe7!)`*q=9~+DR6hGM-c{HR_-eL0n03a2j0Y-^}zKLLN;;lNk@96wGi+XyDuVnHzr!{ySK=O8fQu*?{}FzGqWzJM9blmAs#; z{hH464!iDs-U7;j@ph#@ZUN{o--!O|cr~2`_+s8e5p&`EKHmZD+V$|?Tn6|&#sQSh z7qm-xmfimDZs5*=XKZN?M^e2^q&jFW!f1Qu_?+X7v zkN#&R>HqU`(6`?UyCxI(&Ga+Ni2wCG(BGhW2r}O0mst&d?DxVRc^>o!>Ph(-x2$m@w&}tLOv2+aDHhm zz+d(_#$ESi-|Y>4DpSzkR?6W!Vc6WWUimT3Q$iq@^u{>g+xzhsm`{>31a#U-XLT{~ z?RVv5`|I#O^d#~_{Bev2E=oaz+TSe3k?i{X$-9F7aLTQabUxn$<8IfPd2t)??RRZj zWC9v_W^ldskZz_ku%RncY_l&e(%eMmF<^!KP-@p1B=+8d^@>G7lX1+l(^K5m#d_4pB z*)yP53jc)s*mdl`Zv*~3@~`~-W;@{adsWx`3jMO{MK7EN{3M=Z`Y6vMW&wW=>j{() ze$&?CGL-Z$qg@SI@sOvILEnDwsYo0EqyKgt z_)ph>|KN2P7nRRHXn*Xw?l;r_Y~KsxrTo198rrq)6UmeV!m*vm-&3_I3S~rC4(mw2+j`u(@x=HlXlae0YANY11_%C$d)acfS ze>1;#$z_1+xX&VdHQ_3^v1#CQ8_y|J4$s%3UAtb*kl~P97V}}0&MQ9#{`_f>oA!6Y z6Tn~j5A?T`a(F=$z>Kf`4p;C5!0meVA283#e($M-`2y@qjb5wG)G!}#z@D?WVjV(^po zG5Qh-+m|IT=!Kz_1~gj3-3U`6rRm~4dp%}h4k;``G{S& z|ARBoFS~C4(L66+#dBVjL(XyFXLS|ESI2QA?Y8|6)W^4h&$&(DQ|Dver+~LHFCv3{ zF5v!pG4ml+KH>&K-{}9VJl}sA{Ipy57baW;`h`3PQ8~<{d~zrsAKSg?ec)%`3_j-& z{wMk~LHaX?5Yr_V!%B zL&Se419G$9le*(}(8*Z^{A}WX&3xD8J3|gxgx`HK`n5>c5yDTBfiV08d9E#H$N4?I z9{2^c^UZ`;wt}B3=G$izKA8CcLnonKUDqCS?j0v5wQ#%!6aUbO92d*adG{O8x9b)j zPdV7{QeC_de6HoWzmN2iI#rXm{IK@UQc(<9gt4eh>O1?x^#- z`t#{0tg`N9+;}hKV837RH=Y-4Y5_k=C+k(<&)*mPs9s$n z1xU>}mOKjlY|7!V-=bezt_ED~^O5D?->x5WJ;&FsFEM5o+8uH~^sR*SAKC--ZNKj3 zBiXNWf$t;!UR1bDcIND->+#O z7SlfH_+IiY=$9})JcRtb+X4RV_kjLDf5Uz!>y6hzC+|D(*+x2TXM)bA%OQtFgg?wU z$s8-bwDTFj4_R@KUk(NR&E=qoEKk#*YPizD~3+dy>{`X?Pd^5nO&Wq!C{?fsFQYxP>nfH>zyq7}Kd2j&e2T#MesJ$Ki4EV9%Gg|dN z@a^|0#*4s>JX3jo=Og_k+_!GbfF3R<+^6ToR$g4SIHrb9EBAw?#9w*@=-cmHe9gR( zA=bMOvkwHm{cggj|APK}<^!vquVjB)c&|;iQO>W5@%hc%zpMV7E9X`w4q2E4xoso< z3f?Q9cez{N_P7OlwdH-lRX-nNyvTl+?Y5J_e>U&u=8%4}#9@pdxSn~-%KuZJf&Nx2 z&bftpQcOKjyYU|1(H_WkT=nzkw?aNi^rL)i*S9<952Bx`di7xl@a5OI^{{C+=&Z@a zxNCo(S^#{z4o}5#z#qi>&UtM2I_@uq{0ef`dG{dwy7p^9U-jf($-6N6GuV3XpqY6c zC1t=bA^ql;fWMaeeU<0IuYjK#=G*A}8uAqQFSX)71@v3&cPp+s2?``vOl?vJ29 zm+P92OVjP3Z@+Un`32~=a-Xa7{j!&VZ@*t~DDA+iQz7SK z(qDEr@VC7Rxa!HijH~##Z)zd_5a!cvy$k(Sxdk2uKO4A@R(|dr5Bj0YKwtay%~s$K z9NNqPqdN-=TQ>binO*pXarr zzw2pFl>VF2oZ-iQx1yT-6q6sH(`r)EXTJx1dmf!nyE>owc-pT<#*2nn@uFQ=C(7O@ z4&r;V%b!Gl=d<1ah#fP0+VvHm_z?Wqd8f^fpud9|S5*64NBgsGEXGUw^&QUzLeyu~ zw|ho|zWpxS5zOaVX8FBa7!OaOU#)arMxORN3r!0FPhvgfHqtL=++a2D z8>*h~B#A?YPT8g4Q|E6n?acDQfU7>d!}|7F4`6)rsLzL`g3nUgf0f&$NuZy31nA_E zpS)wir~Qsw@{ORAbP?zm6Tf5;=;T@XjhF5SeES`@(Z>Vd$GleU*K5B6e)~MouO$7c zKZf15>p$%G5%5#FE~F6uzjEGc*5yKLKmHuYWz}SiuY>`e->qkYpJK+1mypipYuT^2 zARo1F4?PX}*zZ=Sse}l1ccN9iTtp5cD^L@Vg}NVa7f4H)vP& z`Q&D_TfqD%g_myv{WYv(qwqb)p{`Lj>ov)>LFd~})j>9ik0A9xPeT_Gs zHwy4(=4Ymo{BQ0AUT4+>*O{x$bPPkf|v{s6%3_Z{|bgZ#5BJCO1+ z;C38r@kO9h)eib)r2j<<_!(x|rNN93uVowzQcX&#$^?D;U54R50i6u`;c8EgUJJPW zF2hb7uT_IVS;Xu7Mx6`#D|v3H@=TuxIs=%GF^~9bZUMf%U)Y85uJz2nQhW91OF^gR zKFGg?boRXo@)`acxBSNkK|jbm7PT91+yOdvy^GA}KxZZ6eyZn5p8-Cf`AD*SJHJ=U z0QX&oc6D8NP88VagZ+NNeysaZ_8IL1`ES|>bnN%z{>yWDJCAV$^WyCHN z{J-Ew<@WrKfp5P@@M#V3+j-8V_%CuC?RN)W5e5wX0TrM>pM36o4d8a1a0kZsY8cE0;rVKFaNsZJ@K9`{hc) z555@uZ07w7#sBH&z|Y|Q&=%si-3PdxxB2(G(5{`InSBrN?R?I=c&@jB=X!ahzbQt( z;jb}Y-F_$Wi_<`78P7G!IFEi(1v=Zf zuSq7IR*r8O?^7uM5AF&&8CIRrcX_UAzfpFg%ailFWeECP$oQ)2L(RWHzdg&%=j)B&XKMiTRi6td z1HaI!e|6)%90A@hiQgb$$~zKQW(osY9V2Yx2^`^vwBR~9P~E=J?U=dpC?hzO9;=UeI7mu<1&!+|8*w#Y%KsC)x)`rw+vytMeXD) z?u+eu3)|iRKW*H<)UaQ7aXsqbzBPsX+{g7X*o<~d3Ey`x_-Q{HaBcTJKj2%e`y-<+ zfPCz__bX+fjXs3Vf;>e%oZo}2Uo>nXi5f%gF4ejly)CxF{^=}(XeW$3pvAHzrf z|9%YkTsac_Ykz!19U^jBE)kA4jJt>2?xIzptJ2ipriaea0>YENOsG=E&#ZX_fwSqwHralJ_r2lGU&sS`_bRUl>e^W zXRWCLeI56~)Z5@AfUAAZpdHJy?ATXA*2v$kli$1#_}s?(5uygpuWc&eHRNCW`yuoC zg4VjW^j`39zmN87j#n$=v~$_7I_80_d7T5nf1>l^TgGpTxlbHUI@d7HKbZGq)eh`;ING(}N2_CA(B>V{?qbqu zXPnBe&;PaBu`|$L<^T6Dpucu~{`n7ZTz&@nnWVqz6W|xnzRe^2ierJlY8T+=5guF$ z{B6u@3KD)J?a#3Pf}cUOZ*_A)CxvkjmFLKO(5Y;J{;ND6nF9QlpQ2sGul+aV7UFuL zcH?R8yDM2&EsOkI&<1?F{{M+b0=|^_?MgrW&wy9)TtnsaZ~C|PyKn!T2L64_OIQ3& zM*%*c_Z?Jjt=WLL%|U-F$!8ApRqXfIzMy?xL;GAz{9iFHy@qif)wdkh39sR~p)}|G z1`UBcE02df3ke@fKiNk=S?$04a`N6>V5&q0>fZOi~T)=g32=h0F5dZ!$ zkk4Gkh1BnwD`cJZr4r+%^xxS8ct;xARX_6q>WTec+pmsAzm_mxRQ2s><_i^+gHAj7 znRz(q+jV}j=x-PDTwd+iK<0t0XM9`xbDss;21a=e1%|JU4)%(2$Z!ViIOuWL(QM}Kn;f*gj?o@es?omC5=w<)AQD;0FI zm^Y&R9Xk;4`MeLM^Wu&Tpg;In(AV|iINr+(TIZ_6Uj?0%tAVfm-7o-j1~9(2fc)f! zNT25;rG%eId$^JLW~x_@eFAw7zzBTl>v2?l%`$>#vj*aq#VT))sIdy`1}K5wG)``eWdCFpiYLam-;o zs7;G8j@sXsD9=sI@A8q(F6TfFiy2o@{JTeh{?K~#tA+S4?*KX2@1`}p3w*mi|C7H2 z{_0JTTN&wGCmR7HANxJEW>IVdx8GB{k9Kd)IJbQ`VJ`5uGjBxo=SKF|u5XjX@yek; znM!`Hy@`6leUrl9l=DHukNqyd6^DX<`+c-6%u8weGy1E3(c>?JPVj#4uYBJA66mCU z4SXN@IVTtJ;Z1IR+wV~Dna%qYO6R5zKtG##p*l~WV0dreS`mobUE?9GNen`ZN1o!0r1%Ynd0l(#oITfpPwjWjDs~d~+?&*OdNO zgCPg|eS`%(k61DW{AYmuq$1{1+4TU&KMOj^1Aw1L{GrPMU$rCPYYG3`WsrmY?#8XP zz%Q)9c&WZ!&U*wk^lx=sezhawA?vL?v8BgBZi5)#(Eh%4BD<~jIC zXYCx|+wUyACZ8Mr?RUp+yBhFT=1Z%-9e5?+&8&Z-@B?^1YtRF3{k-c9^lQcSfGeN( zya0Z(m?x}!W-)%)#<)r*`5!Y9c}>gK%SJ$ct%Mif1bzy5Zr)7zDY6kZ{j%RDdY>{exEn->%!XulSQD zZ)d5MPkYVdpffZHe5&2sgX24d=idW( z3gPF!4E$!^Gtzk!qCVK~j-7J`+6|o!IvUry(0v-GPva&+x&$eyQA9?P<`BibBwVeAbwGWr& zgZ|c;XjkDYz5xI2x1qo4cg=Z@@}KLL=XBN+-oSf{bJ(vxEdsus5B&V+;D61jXt#>^ z=g9vz{k7jy`^|FjIrMYj>wNqZ~}m*$>Vv7&d0C!1bzEGwO1Kuv){A0hxz20 z+}Egl#4I^{wvE9!s+}CP8~EI`0dU11CmT=GFZ+G8BXDhq$=0XlC|3M9a*YMn2<#WI%fDg3d(ydM8r^?OeNk;>2ziT#FCZ>^d z&B4G|`lam{mt`Nh`Om$A_>3PapQrvF{Mhf`JvIyY>v&IJ@gHD4O#6MheNINZ_PY{4 zrazft`IEyXK4JJ@Pk%}0(NmnK_B#Tyjdt*hPllWoe~)(nPd*y`UCVKN<`nQ#S`WDL z|HZ|?Z)06|*_JxLfgb{1LOxZ0Hqx)Iq+cB*{Q)17pAn$1^E7iT+D+xYSjXj&^}yeL zlpFu=M+3f&en<-GU$h_Ozxg=5hzZMhz z7X9{ZpP^r>|8su_euDo2TzwOKU$o;R%!{7lB*w7-v!gq*WD@08A2O93BxIpnW&axO=|R$KAayY~Rxu8T5u4E782 zto_1pp0{>-Z{ln4S#mn$rt^9T&m-*j>1Jgke|{V50AwWL_v{Ve$9@muvhkoli1Hau z{6lUf+k@;FnD4*s- z(BHv~b1MEw(yt+XmGdck1AooY;6ICWcH%jB&IN#{5bmQrx8FCs@i&yS6=(Y$&t>d7 zeie72zbj~elzyg6K(l@={t4tfjPks74~*A3?)Qh1pZa|vht>p;K6ioacy3;M-npzk9;ElypLJ(99n&uu#4X}XC?2~Hxqx-`=GOB zUyQH9gRJXfzjK%TCh%9&Kj|Qy-!ML7zaMw*+t3F)Klc6{(AgG3f0h3a7{{>Ro6Fw< zIwj0k(e?Bq<|o+i&0Wm8sWsGp)rYqag&gd6=ssiqz{VkvgUam>%)8rK1i0Gs`K&XO z!u7>RIqZEo_#DjrrsAhxfp+b83VwSK;A^L0d~-;r?J~%xg!wSVgpW@L{(=jDuk?Q< z17XH@DB~?U-#`5f{Mhy0t3Ckz#mw7LJ8>gSK;`a6f~ua9(g z`WWrDF@IRcW!3(mzt)N?X1oLZRl9>9<^L<@K~*vjDueU~-v>H&KH*c`&u&@pM?$Qw+HJXZ@&ZhYX2uRf{tD1f8{6W?=Wj0 zJ?de==lmVx>m&WwxW3zU7_R&~_$eC={DG812IHuUto+zd*8+e2!N6Dh@DrXN1$oY* z^Xs6ufWLt6uW3BtIG)Sc?*;sM5c)fa>wXpaf0g@!Ebi}`2_JMa=(qD+J4kq28RW1v z9egT3zjzMtxs2-u+22__XI#VnR+7%iA@H+tD#k_W-2Oe_sr0XOKK_dNjf<`G=tr3k zZ@*V~P$T$Rcn$c}`SR^z^sCtN>sB-V(`w~?RlNc_o2~POw<`d*-{aV42I$-G8{W(K zKpW!&DU?I|v%oLqd7{eU+aka#uflv$Ih4*oyUR{NzjS>}^82UN*4OzPL*)%2zdy;J zUp&!Y8E6Pht8NSh8j2^5tEsOG6qiq_3E*$ufALo|m;0wy*Ok{)Uo42{pH&nZ6KJjo z1Vh#Jb>q@=X9}$N+-P7Yq%;TVtO|b+c*< z4Xw(W!kGeh8cNIbPoClrH3S0wy86n1e@b~{AT8BjZun*F&H2Y7P{)zsEKw}_;4tQXUW8Xafl$I`E*VfMr_$$jp<++o?tfo~|l{ZMQ z%Bw?-`4zdNE9z^5HgZJ+CVKtVpy_}N7V%CriH2ut`VL57bRukrKN}J z{b)xdE~2jX#v2i+pXzkZj7`1hN<($sG{H4mQ!zs*%vLAm2qi{Cqh^l!j30Gx=~15PSiio57J_NV90oV5hZl5KSzl^qpqno&`@0=v%1!p zv6|cwVv|+QBWs&Nf#!^w`idD2s3?@4Dt4l(p?;RXaki{Jwf;auLwy6s3+SVo>cSrw z%O{q_A)}McEDoLFVYLI1h<@_F!+#GiC1RwCLK(sG>W2IYMU7{rrRN$}CYINgPYX1R zasDG;7O31@;X&4#Kuw?`l-3x^FUs%>D5ECO*eE1&i^?5HuIR0;I89MzpT)dlVK{M4@ZL zzN7rB(^z*h)ndTave!C3tFh=&?kIfLG2OA)p6x~(<1T#F+GyM#sAE5{Aj7H{*Yw5^ zWccGKtJi3y zqZ$HE#5o)@HI964hKai8y1%JT{F}_`Mv4DRY_rmIa6+bpX{u_=>*_{T20{|NO-l{d zH�m4OPr!tgRDPqzf6ds>&Hqcfwf>O?9E_+5jR#QGc}CiBJ%gtTq8~bMSChY&6qj zBa{)Y7DQ?Bt^6rM5@UK!-K^@mO8?|QqeQpN*RhEU8S)SZDWk5u)&$>+8cn3DC?w&b z3Sr2K)yqCgqUJ?Y@{9cvG%uE|u*A(JU^GGev2J}fNj`8s?ee1?y`Zjk>NRmlM^A^sU@M3xXPHZ zSB){zu!>w2nheRv3UjgpR^xVDdQmN>HfCzYmynr)MD&=0E@8s(bW8P3XFORLRb4kz ztZilZBqnO4pn09T*Xa|}$5>tLj2zF@q;*2~9{E(~)YFN0cg=4Uay(9>(cr^>dP2}73^y<2xjA40Y zc~A~0{lSL%P<=&x4LvuH`5EgA&yK}H7P)#_(pdOv-R&6~6TiFkVcLXs!-<6HOaAnR&xbg49{{xzoc@ zG$-0T-N~wQa*?Jx=S&)Omp_Q6Wk>|EQpQuwOd{Wmh)hW_7^B1aPdeQrSyWCNbxx$M zI}KDfM>uLkzNZtt>MYZ=)bWx8+|ZB{kOWsJnbCwUyJQKumS8EN4}#%L83HA(Mf4?gsX=gkh?k4?R363sypX<7%^G?5ZdZ0^=!KLab91t9rnHYMJi$Y68EqU zQVHWmoz3-OPc>#I23OTRzX#+aW7j(}VYw$VT_cSqgxxLLeI)jXcoSh!u{)as?P}-0 zD5K^tqiHJDa+aH`25L5AAf$DH5T8CdKL4zd;{yft(?*HWDy|l{CU-=r+8o}P1u4d# zYjSToXAzZN#{$K*bCJ6$EAmYO0;^fayzGk#lw6oKSU5EHc3ycyU4C7iIE1p2Uu2p4hQnzqZ*s0h+J}c zHNzlJ;wg|3Gym;A+)Yap)r@#erd2mWM!p<4<~n2&KLc^jKlA|1BA7O!(46m_DPHm! z)inW-PRlTYaHeULBw{e*$imHK+NjBaV10w}3?@oixFnDZUEv~}W-03D%BobSNSr0( z6-mD_7AE1`%|V!k)sLtI_1LtXY+7}>bbsiWyk2DIChm^KYa}-pP7%23$|;IZNxdbZ z(rFrvjM@cy{XNo~$Ymz60-`-KW}T?z=1wsObbfL9#9pAydY2yn7})y_RB@A>*fytFM!t3_WQ9jv1dzP+IzV^$jy*r7}jG z+kN+pPR|qqjzO5%IL&-jE^9->>9fsu@`b~?`->5#Nn=Z8O^W@EtRZPlb=4O&1^nic zi-;HZQ_KnSWZ5+v)rq$2ois37pFI>bJgPF>#ysao2dX`kwlId8i_R4LO>VcWZ4(&q zi;PO(qWoANc0E7FavAT`wN8t_V;fy7zqjnvL30mCns_)ILfT}ZF}LuH$wl==2mtq}Q)&uiu)8-*wZe~%n%&XrWa-?f zNlhU+gNfR8vFmyZ3T2k>m1)hH_^8^#O~~yMx~Ir@M-6GFm>siLmrkzL!(UiYDs^UJUuWH|0u-6 zZa627G6IQoL{c{D8|kel_=)-{(NR6eP1j4v;_wKmSaDtRSMlR}AJ~;P%??0UkbYz* z9NdeYz!S7$rp(OsfA`a?X}7ySFevqHr1hNwTSf z#ZFLWlHMxk#Zvh|lG7Y{Ns5N^Z|}8A>LWNe5lkCy;iYGct0`}6te!f1oYcAzfA|c! zHxQ_l?Xp=E5{-D6eR7N&j#6btETnW=x>+>NTw$dA6YOd9?phd1IJ^;k=GdL@xcNf- zF()vkX)k2|O&tW`b}{?Ww7P*YvN zYo$-*Y)ovuWBt0hfJdD#C+<;1Ib#toB$kg6f7vyThhrxrRTN^a#C_I4_g3hHN845U zB^NHvICW?}qZH9gUk{s!ERRZ{&&1?j`zNEsMV!TdtZVVNble;+#8dEy9(A6e-ad*^ zc(c6PE^DISo2ZK8&qSYQ$Q8eg-#7s2@T7}M{g5LnoW67VxN^!i8z@R z<-zg_DLueAPb2s1xq6;1T`|=F@+H!oJIO>1I+q8KpaH4_OcEPnxQ;Yr!VZKW4v?!i zgQJe-M$`pn`6W%vM21Z@v3S0%6%>dZX^HKN1SPw0kmD1f^O+2SCh}mDsNXtGvg3)RF8rBJOZ_o2%DYH7!KTWQe6bB>I#F?)~6p~IJ zYN(&>WB@eg%Bhx_^wO*e-GSd&bhYIkYI--;Z4>(&Ve&!DERB*z7t^2{Ex7&7aFFN+ z@0!ZgjC(vg*7L4KrhkkX-BZ%`ba&Uw3wJE`W>TH*&J)Bk8?M2M-)y3DzkfRI)Yfr4 zKre$J0x}b^)^H6%PUVtw43l`96#@UX|#=U5+y7oRRxDpSoZj2lsFApc_Xq`-*(*rAT@zn&;?zbAGJ^x5etq@r8O{k`*auE;9p6@Q!x- zkT==wyXE0~JZTd64oZ0nxsNUv-{q)0(8aZMIgvK+A2_$ixMrzC+2cFF2{@v9O4Z|Q z#_4k0rT4&O0qv*D!INb7#6`O2cV@|G^xaOme;wyurkn$Y%XyhPKb`Jk#<*RrlT;?& zNc!vDz8h{9Mu{{?=8ESPaVKHaoLTVgfNn6yg)y>W=}N!6FR#mu)-Ja>z1Ob9FR_cT z`%C>2VxmBzlIaL62`k;a!R}lO58q(#w(S@xwxhhV(mdZGHYcc?)c@&CVsYh>ktojh z|L{Vyte0RToWrE0^xVZ9bp7iJjmxi%HnJwr959!%dyyCwwMEq3PlcEPiBPC7+dr+w z=u+gOd0+I&X1$N7+r0NUEltP24-zrcMoIpK`o`J9>Iy|1HQ~vJ2zim2ku=jkm}Xwe2)%d%uLc>3T%LMRkWk!)s>URXRa8<%Jz8uTQ!i;pw)MH&R6yE-foYPq?Ukqd7~XPTZ{QDfwwJE269upCOVnwlgs z&*LufR^g)eO4riOOKy5Wxk?^E^W!R)Q4)#drUk156@gjRQa#kUMAy(%A;n9gG)aa_ z66bRK`DdC4n#ptWIyMkq<7S@8-=h+OV&;%IGBHs{F}zaT3`5eRIEOr*2HfbM{BZU-1Rm=ug_9qudRLC zPV}-H)1Jk@oD;{;c_~c{nPUmTUbm~;lrcz{t0R#mEwbFWBxa7@61wN%9syZodP^`O zYO_?v$v-rc+FCs11MmB{;;r!RvO)2i?Y=}W8|$aa`3bK#ixDuFoERtbCKGul&;)P1 zaUym%27>GLz{o@sBh%@^evJFJPy+B8Au!S}ca9nj8B=!3AGoNgyr$8tt>*E-T&KpH z#8vaHzsa$>h=$&{5HnY@{nb-@dMn%1RU;#v+j96Yx3*6=|3iD*l@@#3o`@1hFBj6! zIBSJFAes1DVWgKkv*$J#-*?vpdsy5wEBhLRbzO-PL}b_6d7Bse{RP)r+6UV_4Po^G zsk9jT^hie&+n#r^JC}*&9S?|lOQOdOjWL%3r%acFdw9Cx!^BvfkoSn`igZyOQMXH? z4~{b$Nwzkr6S;Cz$0-Vc(s{^$k>>;SWo%>d`XFwD2eX}LB%I4!;ZfI!VN`i{y2Hht z&jiw1G^O*jOZPAwGP97VXX15!1}*tU zcmfxZ`iKS1M(%?dY|56KWFlTym-RW;jdeMo;e+w!uH3XbCsZaL(P)V{1g14e%^_#E zC9&K5DNayI6d`JPnjFXPkF$Qe)~A?`W*pkWq3h^wY28W7bo?E0<)M~wqdZ29Ww1nA4svfA3_J9RG#Cl=^mZQp6&VKCoFZ2;uya_&b*cnq7nsYq@4kj6l2F8nODk9=(G{Z5>ci^=C2b>aL~Ka zNN-0+c2W}=3epg(V;G=zacqQiHhlHC4|dB@*MY?o&i_Zz> zV`&b5VB{QG9wRP@fDu3I6b^{WgQDMQu7a1}lXMg0_CTtn{qw&Z* z>|#)R)_A9Zk7HW8tyC`S)q}5l=>mD_dDw&+&F?--T{}r0jV3nt)ojO6?oMVf!KE!> zH>$_X$M>tmIJwQP6e>(uU8B}^XBQLCRg$VC(f(L0laqMk-N?j`X}c#E$z|8L##<8Q zql|YP)7#_miN3bl4+^IDOi`XUP6G*#dN%|gzJsbqAdzA3i%j#_*1s>Phs}G`&R}wF zJRP^r0{XVI-b!5CckYMCtg{vCvUAU` zF6}PY!#H2t%c6HoD^oY6$d-AfU5l@f3+JqM@`wC}oeb=5g+~thyhU?XhMYwm< zw10T&)!gkLmShrjHKF_C)?{y#(R;-XUrp%irMlaS)EE9icQvIi+Dt$d)FbKkwW5g8 z?%sVQlP$=SG2u%|{dBp?90oB9z8iPgG#nb+o<|NRd3Y7`u3jXvn(#NediFu{O3HXa z(I;Z|F;-0W<$ys#7!s5})43{WLIGpU-P5QCX-&M-+{2%TdxC{*Dk>5wV`H^xcR4-Nxl-6z-t4ceuazeWYHG~0fPQB#_Wgy{DY95i ziCk!nh^8SFlfv%k58F-nSy7?897!mOou(=kha%x=o71#0t^w)Oari;n zqSI-&)vtn!K zhFAGbDJ*st;7DQ|U86oRm}&A_TgV@*ZSula%AN7WAvWrlrx>F)zEIhM;-#Fx5XM%)ArM zvqj|kPD4DMMy-EYPZfXJd66c-SN+0FdKqpF^G8ybJzNfXKS+)i%qT?q(qm$2>B3-T zwY;d?P}zw^&Y=Dr@LvAG)_YGaEFqO`ss7`25%GF(UJef70OqRuhXVgwS8 zK?%p03_7U*lAbOHerNcLa=FiDtUneneRZ2Adi+gN-w$%ppb{50bv!1=>ZFsC5muFm zz`bJzU02x9D;#>-9KYQt)$?r746T_$uGdPja{W+()JI2Td!h8#M`PKTx|-3tEgi1YL0=##_}gGIzbLc+58*%syUz zTHq+(d1Oj%ujc4OqcYXJ%Mbj3+@+;PolHBt37a^c{viiU_W;H)so?v_O-Mi)5;GEs zFX&NYZWd4(^USWtyw91ihE`aRk+<#guXi0SG-+p88GQ^RM1!=QLzeK#fv6KUq*?d7F83=luV09K|I=UJlc~&{LiH$ z;m^e)^=|ykyCX7#Y_SBwdwg(iB-rE4lgQ~?jnVO)FLM7p`zWa6gSKT8T} zot8o~L0hx;jWlfQps>(Klio2sWWjVV*3^{^Z48InH|l@jyP zP%HQRO#-&_o=&l;=aZW@va)*W)IdX^t|AaVAP`yVm4;ZJgVbb_8q6$B;uMsZU7KWG z$@Tm2#YE9PlZ8?}RoX_J-(T)CWM6K36OOEWR~{sj{H!bXr@bg(FR$Bpt=^f z)R*X}uq4IkD?(|FQc*z`2>CGjC0r8(iRt3>Z8HgZKf+H8r^3q+_AP_1)gU})-Hwsy za9Dm$?M-1JoLbjK0M7lSm+L8~_`Xz>>cp{saW%miWG}<$F@a)L*+U8sE55~4M{@<+ zQadr~QDQSwL?F4=EjE{L=Nj0ADY_K759oE)SRvZU>Puf@xzb(gY&W`=h|?a&m5A62 zv!^rqX8ICK%M;Z&Lo3#j>!n#{Ty@hb|Aw(N*Gk1_wr|YDE#4*_;4V;lA>69R9yQgL2WL1&Z9>U~84dvCLMspA$C1tq6gxfG&J8@}AZ2W=i)rs3UO76>>RG;#S z3OOE+NC21OqYp~#$|jxGYcq_wuH`0r`w+KehARg~MCTpQ6Z-D5r)aUqmF)0&bRT1A zo2%Su_~g{<)J^p=67KXkvl2~>QI@Uuw4;l&_h&n!ur14%31=z>WJshpyrendN2_R? zDZuRVM0(%VFbn3EjW8f~z&IlP*-})cUGL850d`t_sLB+rG#MR?h#(Ev;ZK zCGNz+rb|+DV=fAzN9|WVDwHd}`Ao9Su+ef8ON&9wE@dvHSQ8zgJEa!0Ra4d`s#S3p z-L=uf1T922dnV6Rqir#pibisO9xiZi?1vWlC}#$^({u43^b<#xk`Asx#~^3f((R$8qoE?_nb&LJRwK#==KjzaLU8u4+R6ANXM{ zNVQY{4_FE(*%+TQRxqj0=968bI%1ftcVa|mX6BBW+91BANny@68I1{DLy){jG0d`_ zjOd&H#B9W6N zjAEv_u>vt}3a6P;ks7a&6ih3PE4;^mF>%NVG1*$6guXn&47^L`BI_ z?9vI4ce#h|bxiUw_F*OE9`cbCTID+~PMAT;7 z-pR!?8)hPg&;ENuBb=KMYgv(&Y}Jny^65?ShlHC+B%B>{CMDd8n53@mev1?^Z=y;= z%EmyflDO&aN|KI|b9oz)W!sEoMNPh#SLah1*2!lTi$$JV-OwmElx7D55)2u|jYABx zm2kIuGZhnVL^cVisA3LIoLG+L_w|;2@z}!UFe4H>OX4)HHbsV_vR{Fq^dJQZUKSvX&>4vE>S) z#-1hWLmQ1`7%wsUjLXtb%K5E+D%}8qU=E2 zV?NJ5xlUo?m~BRmJZM;JK{tv8CAwtvN81T4CsG-q=gUcdaP6AyVU>%x8kp@}E;T)4 zE%D{33pGr*d2`Yrwf1lwHs*n0w>CE(wx{>I9Z#Z>#Wg%oePtWEo!iq#HmP?TB#l1z zsSWX7j#+EfYY~t+oydBeu#R@i-bFcz_TnUbb+K0W2TQ$M$NPgLXFwBKqGtU+j?hc(UG_A9iNrei%2vdAPH_9xramQtQ+mim@n}9EAR_faNB{{}< zq&(8sBscuRm6Bwk8s9}=!NX`K=oy(*J5!9aw!Sg^5?Pys`e{^8b}Oq#JcGG2pXIDOjdpYDhoIHF^6@VMw;E&Bbzh3l{` z&M{-ORC~sUaXw=n>xPZ~4?6^AhvvKm?JsX?mU$aT0Ky?k$BBuKFxeONBV%r&`hs{u zyChi_V=PFjmVMvnC`T$RPWLuQ4*!szREx*%Yr_Y0akobzwa$r#UF>}Nx_cfzElaO^ zIkX~vPlvWI#^1Hyv^`daANsoY-E>L&qMHf(oV|=Ic`FzENw2U{)an&yOYKe73heYi z>9x{8T*Ic8-J6McM!TQX$MM?|w93jtqdbNdRngy=bFZrY;x0%eda681jjJJX_iJ&^ zY;8B!ar#`jl*ZSSYy(sNyfM^JKie^Fj+M11pSY^<@-Ec&(lvQ-JS4$ zHt0_B>*}ggr%s(Zb?VfqI$Jt&5L-nOgN8++``L|)lUL;t*K<5A>&~B>)D)MKW243E zecc*#>jwa-A}C9qVEK+*6*QN_)4yuYjXjCAl|X67JdPFkjn_eg|b|e=u2m|#z53gj97xcNf(ZQE0jNL&W2b5 zrTSJL2KR-6RJu!!I@4+QyBJi6_IZik%`WP4s4FgZgfb6B|Gh5-_1B)d$S$keVj4Lvl1tiwjS!i34fhH#g57 zNbAF1*Q>SO5Xl%H?Hg0RA4ubV&x5#L|3*uvpBy$n<1dh@WxQaLc$vCns}%xjPwyS- zeJ=^N%y=W;4I#1XggPOIOT|&WR$iWzgbfXB?Wp88^^~XCmN0>ipGG_qyAwP#Q8--; zU%!k8NV~t96ei@Cjz82?bidQ&757XM^&5ERzizhF(6>&^_~*URO|eK zBq6usTbPycfCj0!v)$vPmJNNR~WFcWy?&_YD?darX zd!puVo0~l0B4iHBc~|pO)1SCBIVmYmC;FU;%o{orj!s-18?!c3gJhvcXS&B^eUagc zxE!2{rJ?=ol8zd2^hq$re^W*6xTo$sz0)9eo0o9BzFwV}Hu<*>g5SsDG#L97E)8!P zcBer0K6K_V_CAXxA(ont8O3(Gcy9b;UmuRi(Wqw*CwP;*PhIXHGD|Xlj+wl4*4wADxiUveDrnO-*XOm8H|_?$jG_Hep9&wBpqd&4Rm-|5xY zNPR|kWPREccBwZ~E|#ZxBkR+aI3sm*d@f=~l3st-;prKdR}ZI0pJz|hBzmkfpEo&~ zHC6AGnGtJpc^ifLw_Yri3&qiy>qf#Q<&}hNnYXYja_{0x+5 zqAOozORnB(?3x#hUy{hRWA+7&D!ag1iO3Y=V9)tacw><8WLQ=WE7AWMUnkRMWkSoy z8$FdDS9!A`Mb!@2Yh{Bo4Vkc9-yzdb5BnNfpA5cvHd^SB_$YIS-Uv#e`?RJNIT)%` zKR6$8vu~tzX;yANl9mtzSQnP*-J3|N3wP!=cx>bg0YQ?gU(z#;mnLQs!r>dC1iHFQ z@-|9Al9ekHCF!A=>emnv?oO>*E5Dr6oiEeqw3nx8jyFo%nQ&Myu6M>ftui2ewJZ&W zAQ~82YagA{D632y*ppqJDF@9O3p=cu_ED}jR_b>rGyFRh5}B)d#Si|Ln?^R8EU+u} zLoX^fX%t+II;AYRQ6uYtA#*4=;9FIPf|&5#s8c#SJv-!3kQRBJYCzt0YuE!@zD%Zd zJKO$5=1H*jyQhbUy_pDpn`V+^s~mN^XMxmh!?4`Eq6K-0993pn|v^#W4G^~uj+=mFQc9W1v!{WSrOy+YDJUJM!MyG#Q ze7bQLfo0KG3&?dZTbK;r3QIO2_e8Sh$o-X6#XL7Z+IvHJVGBSPFc0shT?FCY8 zW^Z|~-zNJso1lOCm%ZhH=-|8Jdr7&n9GNR-LvNoMD;E5$3-bMaNhprR_gRx@#ar{y zWDSvv)_a(cHC{vALknig$MaZJS3~3DzIv!$HaJ1w-$2E*ZTt?RO;;+ybTWVbQeH0f z$QxG^(GQ>9CVG0gyYDPexg0(RakS^FKbY&Rw?wfkSZ*q9hjXdcX-VL zqYAq)JnV%e!801H{!!Pe^ufKkS3{vEaK7nlDQZ<@@`XxM4o9Zc7x*z{oRjh~y1y!F zUkH-#i~s3p<_wMdzCRli+CWoPqzQ<+e0;T+o^{#U4#xj*VPm6< zBJ@&#FU1KWncI-B!L2cB0hxq^+~HEwtAtr;X=Yj`{RFmcWB*whz6 z=8Fugz5SYsSW4#@KbE4@Y+X@d3dJ-1oh}Upv8H*(Z%CqRjRtXDAJlR7_lEMj-AKvq zr0uC+?d#8?X-h=bhqa1-6^?0g^di2+cbgeD|8hC=;%25^>s|IO@dmhA!5vp^e(SbPO zPi*lay2Uc@g_zx@N8CItvQy7$+HO%m@)?df%+m+a_YI znBL8#C79E8{cmQ|j+t{q%FJmdY3b9YnUwg~9TT_u>C2(NQiO}5^LVtvTi_0_i9eZH zEwyPgX*<}LEzRMc+jS(Y^h{NWn7UdX6Bn$+1)V-O-r0A?JQbRBv7&hlhYr zIgg0E4mcwBZS^6cyRMeZ6M?D6U(%6qrW}IKWX@4~i!(3&VHy{6S~om3Sx(L=8IW7e zPaa&#a9xMUx-Qj_-&cK!fdjm$E04%BGNv7hl{zu^^nr27%#v!a#EEbv97P_g86VNJ zfd+~mU%_>q>WHsZQja*i)Z#FYBo(f>iQUy5JpP-%JmERyCAfQfv}j)hwvIoCgW1)Z z7#iM#z~SS+^4Krl`aOR(d{$AnH$lEDleaVQ^e12Bxy_yh*b4%lW60m6249!65ZOfe zPXPBwe%i^@SAd0(6DG04l%xSOi+({8klN>2Q%|7wed1XqhA28I_xtp&C}u}^QSc(M zGiFHQ7h%d&57v>O7nem;^-bI`(kP74fuZTF6k0u*HS)f6vh8~^YWr62p2Q;6t$WGm zr~4KwTz62bfA0s^VdP4n&0+?7GUv^1<%0ORi!)u*GbR)^cF?TBv9UA%_))VIkr@a{ z*jIPbWI-IP+y_}%NNBC9I7@?*Evpk!Tmbp4=16_2})q(b_beArmeiHW#U#O}l zG7~E+;tgYClSN}R=|b+s-w5FeDrj$2M_<`?38jR=$kI`x_EGMoiJ+V?)uv^%LaR;o z{;+iBDCnq$@b_Y>@x2I)xK5Yu#B{nIp~ZB1Fm6#pSVd7zqQV6tT+ai06HR-8F~|JV zz$#MgAYN~`C(L%w_myOh2Dlf&59?CQdH+^&zF3g_WRp|Zt2~^1qT&m|Y%?s$@9zO- z@sS*#k}%Fm$NhD{gaFLL1Gc9H+S}^w!PXby2?G1s_Pd z)z?u^wh_%{$6DSsLZXZoN_{5BYdfpQ#yR0t355~jDQs-Si#Rd43>0|{3-R8_@)qkL zaYeQ1y2n=17xe7ombv-p^VSDCwzVyynR{5vgO*7PtoN3)bA^>>j_SC=X6}|G){u*g z;_uNP*O~XGGZzS|(um$#3 zbA0I&arZL1$ZzqDQQS_jPk=!ZGZ;FV$;>W`5{Xo+Es>cDkvB2GWntAHYi>B6<(56A zhACKQkz>{ByIsVx>v%;l!$qI++r`E=CYM*wc!qD7y9+_f*w$wFuGgBFG~q#mdH37I zbIVms|CXKJY>8yyaF|cRnQQ0e=G6_}RySZkeE*f1>ov?ja~yZIT-L-#@}}Cb*)hlI z7eP(XO{!Y$uCclUH3%wq7*1fnvM?exdU4{WuG8|5{9kZK*1Oyxf|2dcj2~oOz3~Y5 z)QKS5fADL~j#lw^G-STj_9S(j z!g@RrxzAmK`yE+(-7=H6&Rs#eZO@HVJ$O*XnyyO|PIK9NCfaE$%Q9IfTiMOx{n)|o z8q-YNI{KpAjk~%NQIGW)#Tx8WHXFnqCk;9yQhZJ<{F|cO4xlQk82lp1MU1dx$eN#vh3pmUrrJDu0+H*p$_Soew z>7wkO8;fLgGEGTRFq?ou76#+LlXH~G%VL1{2^E^2K`7BR$bSw`KJfEhacPQ-K@Ik9 z#Njjp5Cx35v#2PLr~6*_{9bvQ_tS#I|!XPxss&Uq0yX|pCBsCIao7|_9iRULclkmoxUfT9@&|Q3nZq{r3uHUa3 zzZPDLsky~doq;l|@h$Q$+_&^!*>gXZt$SG+z4V8R0V4Lnceh7@fb)*q%hy4 z#h#X=C30x%seIjw)<&<{;;Skjma}(#!dv&VC(Y9;qZ5}WJcSIndvFb2cQm?k-~JoG<|u- zCT12sU8~4i7p@eI=!NK=Cn{dAPe=w588Iu% znbUsM+J2U3V_0mi93PE&vfEl{@%!zJm#q=XB}m-Q_PKj6+6)hbWf zBBO!p&T(L%B#L^K`hC}QctIUSGkrH4rljJLmq7ew(plKIw`K17v%U&XvT7})8kNd>E?=D}&Wuh@ zoWA+eXvK`@6F@Xq7ni*;k) zo$VfxNKUbqO?yuYJM zi%Ar?=X;5L_8if^C)BuB$_ek3*R2~?2yHj^&-i1;AFZ7A_64Bxf`&++8|@S&BX;RY z<97{OtSr92M?Jli$2glPrkyi%`avGLUIs+%SyjkxUY^vxFl~ElZ+wU;;hps+HQ>s* z=v-5Lf1x;t)oBp!_zjEuMrVT?E~Rp-YJB~>E! z31-TH3f6cMRao1-og`Pg%1^`9tD3jV@mjjo=Qd|9J#|iRndAHNzCBV%iWAtpVDG0A zwGY{AN&~u2t0QD?1bil;iTD7*~*wcmY-!`=m zp|>RJNvpL-ZtGA?!b-l$y5Kuq8rWuYT5);J>cTqj^<~`z$g&&?p7H$Er3JOu&EWV+ z$u5?&2ENp#eYr0Xyvr<(m*al)OZ?H|`ZOlu5H`_3u`}NL;H48AWK+xYB(?U|ne5qI z$`GHKoXwg*d)eZeXS^jxh3AEJ9`e-MwsX!`92Cg|tRiP7$fAl7S!?^wzRSi_gAi1@ z_1MM>Xm9>w%bMH4e{4A#WEHsA()*h^jkS&0k~3ldVQ0De?mz5Y#mVNAQf>V^VlPs8 z8bh2WRi0)RffL#5v-g5UdF4Tp+#X1HU)iSy;TSHzNA_1D)w^S^nTQ5G5_w?S98%Va zIf?y=ajj3nU4$!ovBTUPg-0k|5`4Z?8KpF@l1v3&dUA&t)`yBdK2UNjf5t2_8qj54 zVh$rl0^_U7aA!GDQGcFXnwKZn>=WvLJQT$E zM}<}U@9MsDB~9+W$?y>zk|N+U5|Gy7!-`ka%AVm=mmvO~tho@poClQ2nY zlC#K}e!A`f!IVh7-ftNt&dH>7>CQBPX~ZUH)JY^(m=N3{IlTwzfy-KkH>gVD&5Ckm zT0$EV=uO-DRGR?2u_3wM)toUSyT3YPA$3xt7$QM9x>Ih&;TG98+vwX=?GR3j@qx3x z=Ow<&7EhjTk%O$VjtpALBa>d=Pw;hG-}O+Dwfju5p{lEs^miWPdPd%KJW-q+m#`Ic zRd(94I~elVjq$9nKUTR*CUe+qrAl|ndzrKxWgF*3caL5^JvcojVdat@Z91>dY|4w8 zeR8m2qF3bRzvMnPuMUXXxO7_BBD{;d#wn0FFjZOdZUCo@VYTDh`k7QtS&6(DQ;inc z;8dD-P9r$kzEQVSU-TiOhM0%;Wmh@Lb?uy zwsci}Ff>JU#UbHScEx)Lc$TJLvyqqG(?K=blSz$cqoCgUA<&j_fokSkb#lQ+UMv-Z z&c18tR8mc)$7m)=xu;~^9^~1Iu6xZSC{-OblaN#$)J#G$E1qT&72!^Wq0~%DFs&2z z`sps+$_=RVd_nKViAD-jw9DZ}Y*Q*oUzOxZ2j+7(2VUVLekS)`Hb13#&Mx2yYuFt5*JtaAUvaJ*g)4_Z4hd~rHo!Qjrm64h8$wNh0_fa8C zcaftL)1|4%qk>`1!o$EoO_4BGx*XwOm2~HIk}6kbsso$5>VhN7segP_x7-glWMx=M8kwV3?0oE&~| z11>RCa)s}_91YqdWs@o{bZI847CoGc8Mtz^6*F-4z`ELTd}#Vrdv8{-lMtVAz=|x`c zEq8OB=B|Gv?C4cY+r$$ph6fMl#fzRE`H(IwnHdl#ftpqhnN^_9O9YQqwI;t~AW<@% zW3TPjAql=z>VBP9=C75n2B(^rmpt96=F?=(onWz2JJmb`-4)kMtt8b>AyBO{o}g1M z%$T5%OXBP;XvBw~)5_+xEsd9Fu1g4V*}LB~eKdc*R2CPL#wA}KF3c1vj`k#EL;^@A zu8xi6%ai&@^OVG#^j)&LMMOjFwV9Er$-DDolUL-?>eAHIyN(TyUcOwKDoqqiHY7zgroEJV!4fFBU-EVhlZQi0&x%&^re*!mY`x{{ zCd5UO!xk?HV5HK1wOf6L)8cj*D@|OP8OcjF7nAu=Qy})muYAM;sfFP9;uvbQ)aVD}m4}40QlXZi zm?W}pjf$WcjZv9MRc1&u<*IpSUVXY!wkYqQ%t*pj6D~DT9xD_}HMK4lZF*W%^O}k7 zGkmRSw3jZWe!On}%2i4CT^crtN9#nEMAlUyOS$F0Xd?!^;eaZ2ec#KjypqKnTL6$r-z6>5Q@ivEqg@p@!rzQ@^X!@c2b(Ep1!_wzB$^;zE z_x#9Ai{82Q($I#eUa0r)S0t78Kacy7>XHog*wqO+Kd}ayTE4QZg$bg)r-mJRv3qnv zJ*tJ_LRl|emZv6XCX16}>AFWZ=VpWM=hO-<0*^7w^P$#W#d1)eK2Mdzc(Z+O2vd*9 z+o1N{p@vl7r3z9Ou;G$1o*USauAYg~tmNdrE{>kjiM%{OeWj%9$MNu;=_$z;JUw%I zv@a(v$eBqo>FqD_Z==TVxM}Hf-}zzL6ATO(K}j~piqRClOyB#AQgKF}@Mu6<(Tp>^ zPFHcQF%(=3lmT+4^#mHzJI>;s-jUiO7rDnHk7aZnF&ib#^Qdo^Xij^n40(6OGwAu< zWCLcOkrO@R$4zZKz#R}P|ths5|?D+^7XgePg z=SpbcPs)AV?|0tYyHxX_y9G~d`%9-L98XdD`FZQpJ#9t=x+(2wHUdXOZg*v!*cds~ zQ!k??y^&43oLp2K>G^89O4C_#Bk1W!n8z<%0qRt>Gwp(O%^u3oE9c7lC&i4HD?Af} zT5)}$3e}ZU$+-qx&?GT(v@iC8@v)7M_8BXUyQJo|K3od69Zky18eSnaw~YehJeziM zWG|CQ4jFTsKzjeRb#>k`Jvm`7Q(A9^p4v=H{ju|;&|xf}$yAD1+I#LyW%KHfmBvL} zw3$Y=Vy?H=oqHy0dfjV{ZffeqYGcgQoz`I;Fxc8vbVydEO2fMTwTCJ@)QEEUG;p>G z^?THSeR8Iyd9EE=@1o7+bfMpO={3~I4UYi4llI_c#6T|UouOM)j9Su>X`2n=ia4?yN{FaT z7qd#6#isdOzMJ)cii(!`TfL+NV*(kX-`+eczgO(Lde#}Hz)5RG_g5-1fhw|>5B)= zY)+TvX0|6MA(7_7!_x+&eGdXM%n_*nUNGa|_H3Aiucbl_#zH02r(2~(;M}zEEU(rI z9eP}>yzs0lexN4t*wUNeQQKOZb6Yg#Jk6>;`kouYtC-VQ?3#Ewzgqj`*7a&x2HE_b zzM}fH6Up~!4??9+J=roB)=Gj~{Pgs|CO^Eve@Cv-5+6A#4Gw@QJg%KrUAil~w@7OJ z=mSl%DK&uvCX!Y+GrG+X6ZyCV%M4GBdm&Uo-hli};Yy#xfZFfW?!<8VY^1q_-15hT zbO|&N555@Y`eTRf=Dci{buZQEVT;``@J{{)V!6qD<0pN%vfCogV3M$_V>=2@4wxNbDzcgCY9lOl54SJi?e zU$e5?D!btRFZ+-pc@NsNk%I(NL%jf^`!Iit0CHp3Dd-RGDl=)3bhhr ze%koo4@Ux!9zY4azZB>w}!zu8;r&wuvU^xDp`F@jF{TV$;L$e?98CO!)#}IHv(k955dSC zDvpJmsiNgwO5U}`iY`l9Q`N3h>&VJywPmGl4SIWwR1+uLMm~E&-nbnZ95UD2l8L@q zS81?__e2dAc~DZdUFGFx`J}4Bl8rfH225WX8y~(oJp}Vn%oD?+8}JMW=u4h zDsSvHs;9cmK-Qj>zyxEKHcSIdpE-X+SN^P|A&Ko(rp?U1A#vvO@{4w>a;wtJ9Kv=R zQjrBaobI&~B=;aPRK_q%lugr}QeM*;(!JbYu(mn4`llwZvsnQB{pGmzbIce=J&%mc0 z>tO0xLVZrL#Owt9BTYVB7KI-w09bHOto|x}KlVC}#wawJ8xpaNk z#l9=mM9cA~AjI_4Qwe2{9Dh0HxR4XXLDHL%K5W4@S?U&*W~%4ek72`2lM`~G<7#n6 z3V3nr(TCXfI1R#0I$E!v{JX2C;EQ|Vyr$-}H@A}nzv9RX-qng6$}WaNp!I_CzD_TN zQ_d!067D7Itw9j>m~)lFvR8E&93nZoRx#-!xfH{cv-{MQdnbYNB##uTJe`-Y4l_+7 z0YR7~DRa`D>2UvCcGp|4$(B$%d82b}+%eNc!w6@BWT!<*-gjkk>iX${lr*R(DvAe5 z@{GdD?uZbm3&@i|vQ*}0T zi_A5-pj*zDE=}e~N@HcYG^<-_5$w#>MPpOzRiD2SG;0}<%lG0Q2qn5cvs%`b^pwP2 zv(4_U9F>R>YVt=%bN5PsV3;4;ULmHz-kp!FR^DQD@fl59udV89>hOr-_0#@0OB^G% zeA0rLd5T8zkag?+Lx*1cgh%3T>Dc`l+(&2btI%Xb6@ckYBX{+~P zPkFK|SJaDldd@fLqG-0pr{7h+ij&T?DmJ&x0bk5;Sv7u1H{eY4maU$l23KltyD)vM zI94jyST^wy$C$tmg@2CJvS;t;i{%`07Yb7cx$7D2J31{H5kh9Wve;H{Y1B_EXR^?m zBuheeaILWM$z(&xt8!V`XLT4*;yeNIrB{04%mWKly~jauO>*h|VHl7f*UhHQxnS!; z8rZ&zUiMd`Bf~3GM*cN(Y`)Na+FqT~_V7(TN|u``axkOGSnU-ROf)ZpY&24cX|KEj z7<(bkXNLg*#xRtC|pOt;g!&aHx#j2GdwmDW>!rmZ9JM<)uvSy-DCIpbxRQ1-JArW z(u}x+y!Z*%8D-5w?g~JVhds<=g3jHEJr}WeZa{_WVr_j$5kFic+m~+c&&uvz&0DDWDvcpv z(#d&sP}rOpdRpX2GVWU>b!vwuM94%JtegfN`$v;nt~S$_mmNHq>&dOw`VU zmgY%TcxPLD#hEVJ);v~j*|XVp_1H`%mY5Fa!u)yfrme_$!Vze=tI2jv%YA)S3=FZF zMY-mJ2 z-JrM8%C%EQtv9?=bxPkG^us?m55sVEIjH+^G$KE6w@s&AcUp}Px9+u?t!ubIxzjt= ztOheJS$3hXOaS9r5Nfx-GLaCj2=wj6&U@<9`|42*u71}l^2xnMdQ*S%US-%D)3)c} z4FoxG(U+%2#dk1zO|m|U2ce*E^33>@W-Oghw|!)GYN9X}RGl>krXEbja%qZ{?0Y;k z*s;FZc->2Q@TgCm4-M#90uSq%quEb59m=03_il)|WzFO(11Z!5f)|8U)O~=HB3D(E zoNw5=gEwx{?a_r9q1n&O+$>7R{KU{2_;G1;cyvk(%h5?rE=_M_$aC|Qstv&L|E;FI1=7B7y5ZS{x~M{I=YQq{EE;wLdtbSiFPV7q5@ zbai^^tMiv9ievk;36Ln;Pl}y2B{m$5Ve$&ux@mZEiaS;q5%?+($K02b3~{bal_V%p z@|6wmHS4r1URyLq5U(9rLD}3Q4hv2%NJg_mLoQ?X-s$1JrcRgiw{^{&UY~wPb5W=C z`_f+fEqQnkVZReIU&qILY_I*6484c2-?8M}p%BIUJ-*j|%aLha`z@xd^Jo^bnkg;3 z(-|w7Y=g@pdL)pT4qOkTB7U=aFJfZz+?p3*4hX6z=olY{*6#^>+UblZmop5J>r^6&5M?}DKA+Ztba2p z!@VCgQ-%8y2azqUTHml2A*|fc;B3!64^9Z=R-PX)l-}R$O%^UlZn<3j9+-;a z!8UaI-C+(FZ};vv8O2rk6kfcLpV;fXM9Hget^RfG%ZNOVC-><5cz%fzia(ckhsmuS zFAI02Hkjg}luJMKP&%L5i&N~~!c|5|c0jqHA(7u_WW;19c9lm<#nRoQ(7Rfc@C zGapF-A)lEf24P%fW$jvlKNTriDKeR!u_Pj_95HdOR>@8(D$7@H)zoD)j?Cw&BwaAM zQPM<1a#2z=dCDG(C6dq)_mV*E}YBDbk~O~jVb2kEbr<&)5Kn6?Su+UEoriC zmTdXbSH>8f)j?AI7?~86qr?qP_QkXIY;Q^Ch96gCuB|u0-0=20F+E&w3o>OTj^!tX zx~(%2(TPx*kXXZyWv1!@oct=X;z<0Ps8tV?R#l#|Y8mAz0o>;4tfYxqXF_8-GLfq( zI`Woe9a(2$GfF_F?5oWFo4#Dtxf7W=o~H4lJZCb==`_#6wWr@*TVDf8zcpS*lSg{*6}>Pk%RFA?oXKi zQmmZ)rVFf9KO|Y0u?x*!0%vaRYqEQjGoe*n-SRIuYo6caubGz@B#EiyF_Gs=O`MtI z)@Q2wLH!yLH-oGSVGcEMtN1wzqNwM26H#{td8AGOZ8Ca4KgdnG=h`?Ojz}Pby)b4P zl4G)C(HIa8U^;Vr7m6eS)m?_EFUl>}Rf{kL-WgV1=~E1c!a<4*zoa~|L$5A2**Zjg zW$wJ0t`T)qUZZnW+uCC~&3Z93bMtz?PBJ%`OLZ!Vkw^9-@}Ip+%tmOY<;lu{p{%6H zoimqUY?897>|*DxIgtU*;@Y|KKy6Tg^PimCT*gC-*DBVj}FJG4IfSK77$zX;mW6C#keKCW@OJSB^k?YO6 zv&GP4@UnIFWw;pUAG*6OE6MK_7~b-`m`a^$D(O(D16cgkVGc6~uezJ$-t0}9P?7?rmCbt^p2C_-yCe%o9 zsD#DX#|tzsSs;4p(x}PA0uxcxN<~_om<||6?lCVQ$95lC)dt3qM$MT!%*#?1O_x?t zj{vTGWz{NA_?p1cjBS3pHsm>zH@d^IDP$iK$hvEq)hwP)ftU3wa$(f21C2^U6iS8n zfTvrV)!a3AG#R8y1N1eMglxHFs*D%zEah#S!)3X#Rhli9%JMvncmKZjJK4_WRWzcp z6!!k5oE6#yLo9Wn%#5OsE z;V)gCBK>z=EtyxGnnz*us7^v)WQmFn^qUs}`HEpid(uWgMk_@dNcgOvu z_wbYKd3oxpVymSJ^E9cfkw%kx=c*A+>hEX5t*a~MJ$ndr_5hZcnAMq(s5I$QJn=kg zMlULBj9$D&^`gzCsC(b2JRKlyS=3*h$%|?^uuim`xZ{vq&(m^sZoihHb11VL)0Ei( zl3GP)9Ux_mP)(KD|2$2T zlU|S!*N2{g*>h}he|Ub7Q+e5#s7ec>B7iWH=)XrGNmX(Hv@Wm!v;f9CG zy%{5O-efd6ND6Gqkb|Va=Hxg~3QXwV!HS|uD$!IX1ZTH;rMA}j<=U2+fQ8DFB)yeu z;}q2K5cy|TdTZ@) zQe?EhF$$_SSG~n(D*LF(V9vA4y~Uyz-IUKiI;4e7sl zMJI+!v-z2;_b{w0A)-bGUlt&z!H<+ixg8MzF3Zd`7r!obe96uYKG z;Hgkgu3otoj`CR#gk78K*xLU<2^a^aTe+tgO9t+`+%PYq2Au#YBgRS3fCn4VMa`Y%ZZ9n;1X9azpCnqUr=@ooK-ysLn$=OEltueS;Q(`k?e z=284YaniUql|IZKbJ*O3^*MoIW~|d^P2>u0f&p|u+zZa?4|s8{2m%eR?A5y0atXa= z0y}+1FYfAEn&FPG-4Mxk+MPbD_UtQesvMUH>(gC-W0`d6)$VirLkE8W46bL2sUr)h zeOfxP3RA|fA{?%y@?N5os`qST?m(F?$@_5QPpZ6fwMSVtwb6TeRJ^BlLus$T20KQ( zx&(a=4XuEKn$;z^-WR!5n)!Yu_N->39X-cPI4E+FA}o}+qwv*Bu_uQO9|?GWEV^eJ*PyUy-a(ado<+ zWf)%1OIo_NhRGe1CsQ$c>q%xu`Yx@=^AhhmQe;Y@DOY@e8`G`WX|ha|k-pI1WCGjN^8C$#owfgO~EFE~oTY^FDTi#pQ^r{xBhjr}(l zRL*;k@nSS$I=N;qwtMySH`K`u1s9*h=G0WY;guQxI+M9MqgJ#OIG+3@Sf4B+{I<=k zs<)T?3m&4ZeJ;q(+g{*IQ5SMMDTw;l*jckop#^)TC*}50^b%LK+eZ=VgX==BZy7?^ zvH0a9Yrco1X60ay&%lhl6GEc0UbED=9KGfmy1pJQ7q;vbw2S4eH_+-sK5V^p(Qv$lZ4lk;Bj9t}e}m zJ1jc~O#ou9i0IMsSBPv!_?>KHRJ~l(<;nr=jwVbzbkr3qdL6(XkC}vW(KGMLTNBN? zq!)Fu<5pH+q=#4Fiq|eB+!^w1@N`E;=8G^Cr; z+VzgfqS3z%3Dq?T>EeDvIrt3E4wMbiNR>ks=;%ev==pRIrW*?=t$UQueuIk530**K zl(|iPV8}90L*$}C#Rp9iqIt|)BKC|T^4Qy98&r^Nqf;xUP4h=Kr0J{}>%O5L2@9?F zT+R|wh$$Nct2TPdW}(ZEc?gJdOoKXaMZ*agji7B(%b|dT8@a0bj|=%}6X=$ooG_dIFpg0MG#ZbG6>Ou`mEdm3-#3xk7G5 zn2HTI6Ss`WV#<7TepA+qqBob0<|Sr}iE8Bw^0GnT>hl7dBtKPpXjmB7BzMDwssT-T zuL*T*5SY8xssU$Vtx`d@KrOFd(z1Q}JSF_BW*3P&-X)p8d>>V_iax+#W{pcKY#tzAKhblAeXD&}Cx!vn=!gyE+~m_hj`Th!!v* z0fEmlyqm1iImSnIe5z!kuck`!^g?Muo^X**A}>gPr9^0Gbhjh=$iJ1|#M{=e}zW-$S?JA*DZyqf;Ijg`@N7zAxb`B=Sf6m$yd3mc9~3<*Yir zs`#HH!*ce5KQ2`Qgk14UE^QUR72><4#f&t!F|3_Y#m1}Eckc`!5K5v~R>WHpyRxGG z6KnnXo~~wb!FY2G7aF)=#QayHY)s@tbX^cAFVENCn7A%TGCO-%;rRj`^Vhc))5k5d zMA0U~Tl)0{2!Kqc?tv+1BP)%naB#$$x zW-{5&SP^c<(x(>!s@#fM{2Qu*t-?EQH%>@DUSDHXyn#uvNg4o@WQ3lqs%)AU{b4Q3 zim0VfwbV|EYX=qB_fD3nGt>;4@rw#iZ}uOUmCc9u-ec4>c0%?%tmiXn1#Pd8Dl`RKCyk?+{Qy3Phj_`Arp_S$rLelqTUfx8ZXs#bmHpx zrP5UX^4O$2TW7_w``{3O1h7fqd zBk=0P^yrldd4$VSr~!@f=$TL+_wFbX^Gu)D3$Ii(^w2)5H%hF=VB!UiDDMb65`Ysu z=d1T=>Hf(LPwc!@VrGBYUn&gSR}D20A@s`L$1zyKV*9rWPU_YToJA zoZxZp#7tpyLRJf7I|aefqmvV=jQmVANvFK#?b@O|X+Beur_Cp)3Rg;fdW@wOZPLMT zS04_Bop+)4aVX3qVM|AtEoIu|k7?fVZQ}W=Io7r3tFR+o=kKYf)xNo?N7DAJUNmCT zdA)zfL=KA1dkJ{&EM1pTA1@cC^fkC3wX2t5&Bg~b;P_Y06K@zBn=G0~5OnkHozXU+ zDOD&c*Yc{wjrmBsx^}495inz7a#*+^IeSFv#rJ2^uBc@a6=OfQNbQLd^td<3ZV^l^ zjDKt&QN-!6_f)TOX2+5Q6isFUFU-O@o3xu1Uy>tVp7`d*OXFI`?4*twmN~vw^LEO$ z50t9}vQAedBnZ6-hbtL@e?BFrYKiAlIxewDs{_uzEAmzG&StbH>W-2W1=l1&qHGDR z1!tr^9(Lcv12;)qEi*7M_tH-cQ!?}3hLt<;1xc)P6T4DVPt3eF3BmIcXEk1$m^nRo zQ4eSQNpY$iIi#{@R9{)6rLXab$i*g#cJs^yj+gKvAs(pKE#bZn!ORlvAA%hMg0NEA zSgo6^+tWI)tpP9GJp`2MnwufItC^#DNJ-LVR?Q7x%>3CdYU%(FW=wWKkt01stT>}f zTv*1!LQqS{rqpG?^@NCh2Ln z+2MO9s(TOpu9Ya~f-eST^fAH#Il_?8B zyQ7q^6ughRvQz+fcF5`1CmpLOp!>y@o;q;BPSW@@IfON^}phxu#Nau>^x}K483CJI7vH1KL$^^%E zrLzyAn89ivai!MaDDSb-ByQN++p!^_)pDNhxnj@ryEoEIT&8a{WbqxFFEq(_EH;%l z>d9N*(2xs=9Rb;CVL* zH+3#mxWr`|2IpOg+N-NldpRpZ=4=1zk(86wd|Y#5+XM1}npZrh+1+>QVsR{5b#ZKX zM&4z%;SZ|D_f}J{=H+Q5oRxU~6uDZvTWQ~VO7Mt{Ff|!rnVRatT(}{2_ z&WfWxRmsFkAyZU*I`#x#206Bis{4Wy zi%OH$RX$@uegX+-E&U)DMvSuNJe(->iT5i?pXZu3moRNg*|;Y?arwyRCY1>tWVdzh zJ9S#C?=X69n(j#tHPeODO`~?%UT_NXA*;2b%EQ0ttBMQ z`p|`*XkboV2=S#btXtO>To6(|G_qZz?y`i_)wR3QK@G|C9QMv2qN*;MS_yJ43QlJ| zJ@rl6LpXiS_O6Vl!-Lpjmkw{DXW6K1`=n^xFQ25cn@C-#M-tCt8xZoGaL+DXC0F{g zBbgG7N7@H6}6|n}S%xrRetL4S~yd=HDIl zV`~%aW!G1wt;i-vY@gUBN39{DM?8dMlM^N55NR>Dej#mnvHC%bUC$M;bc+>Yq2jL`6|Kc+Jm@1uui_Y@CK5!(HQsNMbEir8^9BI zb@Jo&p#j$&%ldFC!CM$TFRS-rv45wJy+vbu4oU99x^Fo5PqnYOzM@EmG2#26+uOf_%?YG`@Sm{o z)!ISlZY8UyGU8v;GfV4VOFpw~CUsd)Ih#q7*#+BcJDHt+VuqaF>$zw%`?l|s)aDag z{p?jVZethf%zihl$4PG1y06EXiSw7;!$=ucSK~~*O?+XsikkB!9P2%^$v5j(r!lx} zoykjFt>il$)82CGhrl${`xkf?jn~oi(f4*H1|;kyoT^sFIG1-Z*iTEaow`glE#k(@z~j8u%68+X#NUm-adCVJOx=HmIlxxV&sg z-Fdz=P!aoELz3v4eFzELGRdqWhM**bm-NFyJZNK0ll||;t)ep8A=r?AMbHd2Vt3@p zYq3La>mO`kO^w48T2bM@t!+P3&3{|#LcX7E>^?od%_KF8k62H~8av4{ZwoO0gPp-) zA`R2rZVi&Bd99XiRQ1N;mlz4{F|#IKvGzJW?0a=o9e`mxpBA5CRaGhm940 zDzsq4J^K)OQL99Ct1*(4%~YbGm^wMZIkyk1>8PHU+4u<)(dQ3!lHqi!Y)vQU5l|8N z<3u`Qr#JFqhG+iR&v@_>`OI}@;c*>Z>o-OI>)7$KsZ=*@N9&1eE*^)?Va!iBz3{Tp`iRBzmm^k&J!ze=B zj*(H09Zjn|$jyPusiq$!NeYL?8A>MY#;Dbn&uz_;X8cMqH+;}B(8u_V?$QnfNPVJ8wVpQ-lhH~HFm1Yzbx3S20NSFplq7t8JN-O@?@A< zw@H1qO_xLnCa*WU*Sgy2{i9*IHL9W~TZoyHGuO){5!%a>66 zGGEHI_H^PRg=|fZp*~rfEtbkNnl;r7udK*&?K9MFnuq#yRXug_8Mldy%W%&YE{$I6 z(lejwQHlR6Oi$+nSu+h>Xr}s%ktI1_lW3AR`J~h1g*zo1?1-cll?>m7E6R`y74n*( z(h~|NPYzezk-1>`RndM|M1GSBK{S(!RLEOWK}u3WnMyGV5yP21%9Um%=*r)Eb*41y z<>tBo=_ua(70;W_O`8FdKu9iuXq=3B9@l4sJNB$X&JyIj zukXaLUCB;MCK9t_Iifoj8D;fM$}^nheCg6;aNl)Q5~=V&KHV@fTirg}e zHZ#>B(MObMOYlX}Mzx8TSbHO4$qnaC3S#50$@$Q@kcl z@Q1BK(iP0apqZ)bkxFbqZzO`U;&m7<%ou|dH3aB%X+~nQL0#~Pt0FNFP|4KY`b{S` zH5{qbD=8GaKe&4;p}6~l7(>0-ubKZ`o|+ub$?#~U!()Zv;VJoN`nqDaiQzG0MdXxNNu_Qj z+WsM2)L*=hto@i9zjJtWDtC2ytW+xJr1Mv$Yd*g!CC?yI&*+3$c=9xNaikP09`xIr zSN=ClQM(t4)-Sc$&k9q8aiyRYsT(cPBPTbRvNs2JnYU#Ud4ifRgr49RGWl?yW?x#?oT(l&iXdXyWVzA`O? zE&oi(ILklU_@wSEr}Q6{5&1{{PyZ=QT~RjaU-qB8T+6tU8=Eeb?#x}DnYubrR1$`- zmb>*ox#Gz9WH?C5s&c6~dU>=MjDs>Yj;TKMMC9`H^@*ZXW0*Bwn7%Wo4AD6tUo74& zLOL@wGkK@Z5!sPUNC!&e#qxFGu{5TC%NHjKB6#`yZ25{=$jzV1s%w|012Uwv;`xbk z07-=bG^I=tU__>v`TkX}mMPnq*n-A1y(Ii7Doqo`@v{Calf9(BUN*DlvQ#mhyKHBR z4CJUsu6#UoXRb6WGp!tI7k_Yt*5aMhqtBC}9WRt~<5ws2pRv(Pg|W#iPnw?8U#Di| zmnU`Uuft>dYq$Pdlqc(pUWqG3`70Y zKdI}4{dI6q;G<9K>Wch%Eb>>^k=#v#gE!oGI^XrABau(KqhISf%1?9l=ia${=k6o_ zpUd6X{HuTI|MXwQ6xF}{Qu^Ch37Ye;`TKy^AXVmYLGyQ%wKL44YeD(d*)AJO3zFYD610tWlyyEk(ihTZHsmpKQ)_-4o ze*fJ2yzkEmJ%!z$5hu>q4Bzr_<)!}k z^!tCe#qjdWEIsWpe9pr=V)&wmcgFB#5ATWLYaZSk!#6y z%I~85D(WY`h4Nn(3HE=sfxk{@^?!DNzsicN`DhpTH=%s)H$6H9+p?l;%C`XjHq2k>X3o=)PRvln=qx^=yuwfBc~67NU(`=I_Z$`hYUz(-Ke+rft& zj0^EH%0D0DxQ_C~XHove;LjZJe?@{t)0x;3x4ll>ctXVGG(NzK-&52c28MUjTd)?GoQc`I{hbTPRO_ z2jx#f&Rf9$N1~oK&`-P-Za(rxW=9LC&`m@Z4{O^N!`)67bFh zygvbN@$|6jTh`wLdOimFdx75sJ`4hX7W%cAfX@Q|66Aay_%928)qc^Rl>~e<0pCf$ zTfhg_-=2W?B;W%H_(%f20D8LZl4<(21pNC1@%y`;fX|_RmS0Bsmtb60f&V|q{|4|K z;9J0N2A#XW{}bg~LC-TW?j68&&GvtKf&Uoh`ylXt06ql#%YG&DZyET#LFX*+J3-Gp z@CQP_Eq^(*6NXTJ1^7>*ziY%%&o=NUgU(&xCxPdnXPyE6v;uDdAKHO`FX-t2ejDoV z1m2JOdw>t1o_^quMY{vQUx0cBf!_qajR3y_ddk4>0zM0T2KYSiHvnG%z6^X3_=})V zmVmFK{4(%w1^p|)KZgFU0)HXai#6b{1>e?z{}bBX0RCCvo4~&Sd<(d2nS!5f;P(c; z1N^Ii?*ji?;H}XAp9yNu<$6A7=f%B+2OHpLFYw#IpMKz<10M!}KOKA?1pW}vGX#7D z?T!F{1?n#Y2b`M)4l;9dz(0uk=YelvUMv8A4Cr43-VXh<1bhnX<}&a*LC^|tZ_8>% zVHNlrK<66p{{-LGf&VJ#+yMT~=+`FjZ%6%Gz)?K64II_yc7VSU{n`b-#)0`zC|7;J zTY!H*^lB?`z_~Wy55ouTz?Xn`0G~j+oxuMc<$HiX8{^mu{7a~(ANY;n=Kye2lN$sM z@yQJVe;gP+0=xs`UIzZx7_eF3zl?s(0slJGKM(x7LC^y5Uq<;w;2mgp3HT(+F9X-; zEdOT(xH!mypH<+02*#}e|7-AJ9r)d#e*^gafNui-35?ek@Qz+Z=P>;ZlZ?e+ry706XT z@OPkJ1Hg}9z6=7t0D6Xie;@D>;InAA4E)8Ye-`*#F^+S<-v~PAfwzGV3&6|Z!y@pn zMLkQvuc6&#;EzE4E5JXD@~gmKjq+>2pN#VBz<&YrWdnFC?rp$rjaGHE1HTRRcL2AZSzEpn`2Pbx zdw|bk+iZ~lOf;_LirKkzK=w^Q3n3a zpnn$lucH1r;QvYa2mWp7?*j0*fSyI*4*@@yfd3)*vkW|s`LY81TY#?uzdz_%1O9br zcOCeH(e4KD=U`kmfqww_7VyV{Kij}R0(y3UpGCX7z<&;MnETyO{vVC{TYx_l{b~jN zAk^Om{36D;9r#(a+X4J(DBlVEPcgnd!2bvB_5%Mq)ZY*Mr@_ww;12-b27!MR<%fU| zL!L)~{|We32EGCMXMukj{hb4T8THQt{}k$90RB^`XA$@i@Fn2$;LkGfUbMRc{9zdP zRp6gSJ!`=4k9yXDPlKKf;9rmWH-Wzk^=|=x1n_O({|P?t0KW_L>;fMFp8LH}{=Wry z3-Aws{#M|B4|>{wKOFsP2YwUk?*M)Rcqj00Ks`OcFN2<5;M3O@@H61U5OIv}2=L43ZyESsqTN~G?*{#Iz+Z)S=YhW+{8<40cc^C(xEF$Bdbb4p z5$NwS@Nb3utN?!`%C7?dJo>c;JQ4>2{0!>f0RCw7YZLgJ(XTDw&q4WZ;J1SRJHY=6 zbnXHlNBz0q59R;IP`(BD{n2hK@OMB@wgG<{@OI!|kNP`+|33KB3H(LS13kdsit@d{ zuc6(3;E%>Q4gkLqd>8~?K>b6&zXN<20p0`tmw~sV-C5vgA?I_z9|C+H_%8ro0RHu8 zcMQ>VQU4b3M}f|5 z;3rW34)Au=vkUw*#y$6uQ2v+D-xlECf_hqkUkBa>dkCfbHI=PvMZj92atLiv9;_|O9UBI;=cK7w}JfWI5#*be;fKz|4D z$D@2F@P9=8J;1*m{OJY$P}JWK{3QA{0K5bB3*yFv z0RJ55+ywr3)UyTrt(bS)z`ua{cYr?;^zQIT1@!a+{{s5e5By2s!vOFp)ISKk2ss}D{yo4)fIkFy z8F&xqnFan7l%E6sPL!Vq-U<2_fZqWAF9L5zJxjp5P|q^(hhn@|fPWDDSq1(YjMp0Q zNwm8T{JT)k2Jq*io=xEIK>01;UEtd`@c%-)JHWe8|1R+V0iOG#Q2y^mJuSey!M9f6 z4+Q_)fWI2_w*&umwA%sv2cMk zTMO|2!g#d;|3&n-4fwC3-FD#b1f3ngkE7jA;3q+65AYucJ-xtBpq_r<1@LVE_)*j| z2>b~8H3a;pfsX)xD&(*Xya)8m0{>yuGY7m4_&o4WfDa46pMv@qf&T>N?-KA^QO`2) ze+Pe7fPWkKvkLr|Q2!e6kAwbo;M=Hw1Nc)x|0eM70=@;j2lHqf_!{JD2l%Ve-(BE; zg?e%y4dws0A-65S=TLtu@V9^uZNPsX^|u56ZuF}I_`5)7C-Bbz?*aZbXtx*m_n`iM z;Qx$%4FG>G>KO$7dW_=`@CQL2Mu7h=_)`Y{2+WIF;GYLQbHLw%cISaV5cMnozZ>-| z0{8oR(BIrfDF4SHw=KZG0K66WJ238Tz+Z&=+kw9Y{ptYTi*e}$&ihI| zz;8hLUf^fJpMKz{!G{6h|A+E}z;8tTL%{zH{T%^5hWg9EuR$JWfxjN*=YV%YuI7Q? z1bP;LzX$a%0zZZFOThEs|1$8$VSHDB|10QO1^yDyzXts2m@n(VKZbTUfOn(*P2hc? ze+&2*QO`E;n^ArT`2T=EyTGrY-Q34Q`R9Gi7T~v{o>t&Tz_&KwkAj@E1OIdItpoV; zF<&}?zY67hfIk{|FYr;|{lM>waTx&qZ)kTA_?N(+A>fY#A4Y(`7voX}ehbEB7WgRY znFB7@(Sx6P;O9X90`PwW{}+M35%?1DZvuaof&U2FT><`j@MjhHH2AXy{JT+p9e8jB zU3#|x{F$g{6ZqYre+zhjz@1!f8+bd~-2r|9_%84t06%jd59PlV^t1r~ap0}MH^I*~ z;HN=nJMc5WJAgkK{O<&Q1oZa+Kacs<3;cnoryuxVL*52}e;DNlfgc4v1bhhi2=Ftg zzYP5MK>sZ8H=v$5;F{sV|CtB=5#S5J=P@pez~?Y7OTeE5d>QzAQU40?dqGZCfxis( ztN}mk6EFX+1HTRZ-2gs>dNzT-6!dHXeM~KLvc~2mTV^1HgX;{22uP zm+0>h@Y~U^5#T=yI?KR2F^;pq*U;`9@V^BA=Yjty_`Cr8kHEJ@;NOD!mw^8)>R$%_ zAE18)_$v6Y3cQ5+*MMI{f7gM31nq7B|69nzCh!~3uPxyJgnG7tp9j7J{D0A}UEtr0 z`g4C0%Kxi@w*Y@0`r8V;6YaJEKMHwk2i}769l*aAeC`B30D0>HehuY&fuBYF{lMRZ zaTx&K2|5RXFQA?w;N8GSfFA>12L2|D?=0}Af}T0xt*B=n_$JCP0N=s*E&~5Xw7UfS z^}v^bKOKBt0sgIMcNO@1!T&Yj7lE$>eY!5%shJZ^gW51AaUD+YbD{QGW;UPeZ?T0)HyX_W=K1(9;Y2 z>ELHS@Moa>0C46Z83g{@sAmZHGf~e7@Il~Z;QxzyW`VyO_#E(efe-V*OPCi6z@GsA zF9QD>lwSgVGvsO+_y^GL3h-MYC#%3aK>r%>$D-YJ;D3(#H-NtuadgYt{OzaR830pEtaEd#$V>R$o=9MrQ4{Etw64fuKRe;xRvfo}l+VT|u4 z@aLl4E#SAJ-EH9SK|MRb9}fQS0?(uV+@FQ=KMr|l0X~HCt-wDEezpPs8H`Ii@aLlb z4&ZNr{B#0;9^|$M_zLRp1>OSw^aKAt@Mi${G|CSG{{reC0{)+%a|HMW)L#bv3iNjt z_$ca`1O9ucXCC--AwLViUyAaJz)yg0OTf*u7~WVe1AhYeumb#VQO_#y0_0&0_>;l6 zb>Ke-d;|DLfNuhS1NyZE{H2iRZQy0_c?bB*f$svp1p0HEq5QuV^t1r~56F2d@UI8o z+JN5zzO@7YJnHWN{u@!*Z-39(4^f&jZQ2t9O z-va!%Kz}Rn8MNC5{Bx+k9r*8|za79IhJJMdzYKbMfd3Kb=>`5N;QhcyP|pDHe+NB- zz<(U$I0XEqC_e)H=P_Po;4gr_ng#yzsDBRl?}DH6z&FwE0`M`^zX<#RsDBCgw_)Bb z10O&=E5Jv=w^iVG0AB;%0{Yj1|0(#h0elv6vI+b#pmPiO`+;u*zZ3NA0RJz@!!GcL zgP*xS59R-RL4OPI?*ZNl{O>`38}KgzZwLMl7?%#<{|$ONfjpyo`DVfscWoL%_#@j{u(lUIza2=+`XpPl7*lz~2vg=7E1d$}a$a1>|87_&Ulj z0p9`tmx14f@+-g}3BIiYe-`Rl1O8gnvkv@!z=sXsZ$tS_;17n}ZUOHAf3|_2L-`%x z!@zfezZdl9{vwqBLD16z{E5&Ht-xQ5ezgI=gmGyHJ_$ML0RBCwzZ3YMp`IS#Peb`$ z;M2hSf%jp3836vX;LjlNH-QgB!0$$XM}VJ%oRopT6XP`tybM0f0slADKM(u}>RA9j z0{Rz${~!3a1pE!?*D~;a;48rY7xb(GKM6js0ske))jIHB1^pYq&!PTJ;8Wnw7VvKd zo!h`a415Rpe*@nI-VVOy{xX#RX|&q{{0!u$75FOn+y?w3DBlkJC%}ge;J<+KoxqQS zpFO}I54;!n>mi5zz~75@2Y^2t^$Y^P1?7i;e+c!D0KW>pm4O#feiryA(e51Z3GiVa z`1e8H7Jy$vyNkd-1G!oP{&Li_415CpT><_#pmP=Y5y;gV@DHQ>I`EI6{08vv1ilIU zO{ixJ_?IAW+rXa%dENp31@vndcmeI^{wkFJyTQ*E;15Urt-!yC@ofYCBN)ea;IpW| z1Nbx1-%j9vhIV^^e*^l}3;e%Pz90DeQ2zk%PXHeT{#nS+5bz&EyCcBwLc3+)FGc;c zz^{W3bHE>o`sacFG3Z$U{$$AWBJgvlX9@VzFfPl$kAeOb;P1ist^)r@$j=(^uSPxV zz+aB?8^E6jK5PR263TA@e-Qe+4g7Q9^A7Omp!_cIN2C7Sr$hPw0npO|{Q1CJf&U=z zHsHhHTRZR z;NOmV%D}Hf-e!T%p}%v$pM~+72mbSrs|DaMLOqMXUxjv;fWHy^Tn2tO__hN4PtmVc z;9mxR)`0&#=v)W>&nUkE{H@^UCh#9ZzqWvX3jNv!{$iBh0salBe;4@6z=zytLiv9@ z`r88hxtL$Az<(I+wgG<&>S+gl1>@TR{Nd{MS&v4E&|2e-`)?!T&knZ$mxvz-PhF1>m=#zl*@HfX*f0GpJ`7 zcn9iP0sa}t?JDq}!hBgHj`Hik{}}Xa0DmXsa})SagFjoqw?XGNaJ~nz1N@<=e;4>E z(3#r`<^RPf-vaz=fwuzx2h`IB{7K+*JMfo59y)-(3*+7i{KKfH2lzbd=>`6L(Af|C zM^JtM_{+i1LEz8Cyc+^OgK-=I{xOVK8Tjv@{#oF!0iAQe9}avT_>)lo0`O(fvk3f) z;O7$X-$(t+z@G|vUIBg;d{_nkER6da@E--A*Ma{X>fZqVa+Kc${$`B(7Vz6p&o=NM zLp?jde;oKO@K=KmxxWtO|9jAG3-DRA+X_4nKC}VfLOt!kUy1SR0RA&5-wFH&FpfRI zzaMxn@b{r#{lI?%^bY|46!1adUxj`R0smd#Bf#Gdddk2*jqk0%&l2!csAn1Y&A?ZHzXbeT1%3z0uK~Y;{;mUm56W)<9|gV%{Nup4 zfd4uAwGI3g;O7qTp96n(fsX*s{Y@zUzX17c0sbQ3t-v1!`Dp|Ge(1e+;I9OKI)Hx| zeC`CkfPVD=Uju)7fjIeP^-~+&42090U-wC}p1pJXGKLY%dpuY@!4g8q}{*&m} z9PmEyXCC+f`nv%9m8fSC_#DbF0e>9EaT)k3=v)DQH~6*+{I!t7HQ>Jw{;UIk9m;P2 z|8>y03A_wGYyn?I{oBC533ToNeb4z<(M2?FasL)H4A5eQ0+O_|HL3hJbIO-4Wok;Aa{5 z??SF-f&UcvJO}&-LH|7Pr+}Xez<(I^F9QD!lwSh=NX+YH;M?HC3h-}0J*&X~1^iqC z{xa}k9r&BTw+-MAL_M3pKZ$y_fPXFeyAAw<7_S}RKMg+b0zZoSbAK1g|F47pExKc}W@gy+CIx@TWnp+JJYX-FD!AkMZgN{zTxNz#oTx^#FeZ+U*7YD3tF9{wJ8H z1HfMZd=U8Sz@H)DzXLi)fPWD6l!5;|>X`-pG|0~!@I3f754;!cE&v}ye;0v&2=y-k z|4o!%2L3VVy%pf!hk90lKM?(01O9sScOCejKz=rWzZ~PZ3H(8zXAAg8K+iVtZPc>^ z{Cm*fUEu$O`g8vf%KsARZvp-qj7ux<@4$R-13m!$v;$wkICcPkEa>S3{yU(*2lx|E zz8CmY!RLP9yJ&X+_^UzZAn;c7YY6zWP|pbPUDQ(s{ydD=Ebz-1uQ}kq2m0rM|0wET z0R9UYuSMYBhw)kh-ivmZfp-F50seI0tH3{n`qzN}6X;wA{s4^E2Jjbx&zr!15&hZ% zem~H&4g6)`^A7O0q1|2JZ$$mMe+=dSOOUq~;J=M_TY;@dEh%}cLDg{qF;-^-;8#bfd3NkW#A(imlfcP;Lj@XA@p|*_)mau>%c#P`Zs|8 z3Ghwe&j-E*{A*DEHt;(@&kpdnf^WOPdr(hqJCy$u;C~D7--lea0-pze+JJvE>S+i5 zdbHaC{88v{C-7fFJw3o5jPkv}e+T{T2R;Wm836u6zz2ap5A6;C|913u1o$%eUk3gc z;M*+lUx6IX0e=$uH4pqI(76EoooIIvco*nj0{%n5mw~?z_zLj%g8o(DZvp*lz(+vO zI`DJAH-Ntt^=ty~!Md;o{6*l;Ht>g`{vF_NL;btJUkEwO{ZlCa?|}TY0DmINw*r48 z+HC`VBg(e}A3*&bz|Vruoxtyf`g?%?DfrL}{HM@vKk)AWJ^=h-kgGx9pFsH`;BN;% zM}Yqm`c(%0@4#n)-v{l^0smk0cOLlnqn-ueZQ$D?@O99?1pE<@^JU=gK)+UiKO6O| z0{1?QYyp2h=-CGTdFZ7b;15N+yTH4^=iFyQ`Ts89 zEx@m%{#M{`1^sQne-(Ua2cAPc9l-wsban!tM}K>Oe-d~v@P|Ne^aH;LItPGXLis`9 zpMYEq0sk-HBf#GbybSyq;O8vxU&H*J1O9v9&phyZgZ>5JzXm=p0)HFYT>}2M;M+3r zDc~!>XHd^7@Oz{D8t}8|*E;a;#Q1Iie<$eQ1pY4ITfpzcyx0c*SEzpn`29iuF7ST= zpL3rJ<^RWkw*dbO)ZYsH{@_C!@DF0V+JXNW>hA!)0lswte>eEh1AGQ@+Y9_dsJ|ch zXCR*gz`q{+83g`~pl1m9Kcf5y@IOHPW#A70A7+7n6X>4<{_E)PJn;9Ro(14Hpq@qG z4*;D@z#j$qSqA=H7~d7(&&2qy0{f(I+*^SEFY0Lp{zIU@4frtl-wymepuYq7gTc2>;B%0x z9^j*(rx*Bp&~88QanLyc{Jo%O5cvCm4*~x{)IS3J8u(cTei`)70)GwanFIbV&^ZtM zr&0d`@JFGZMc@y`I4%KigPbe_?*YC7{IzIz75E>a-8JAZL_O=kKY;QZz<&zkz6ty{ z!T&Ab|BUk6z(0?6cYyyO_`eJMX4I4Wd?^1NkgFEppG5twz~2VE4fvPA|90Rn$M|*t ze?R)$3H%E9-vj*b(QYsByTIpu;Qxzu2Y{bOe+Pko0`&|5KL&b6fd3WfDFfd{J+r{? z0RQKJe-w1i1HTP)E&x9cdKQ8I7WlaY{5-~G8Tb7G<6P|G5V};!zO}ctljZ&!C7$MM6bH z!<(_d*nViFT{G7;bI;uO!B)!nAC>w^Ige^m z|EbK&<5GV{#ws-(@?`O8qUeoo=aLBkN(0)c;k+-y`+!%JvUP z{dO78VX3!BzsICLQtHkh^7a2V8P7neUm)``Sn7W*>twjpFOuz-rT%W&{urr$SGF@w z>hooOCrW+09EXb3FOz=nkosCVo_9(8d$OH-rT(nc+oXQAti$`I{wFy;4@mu^GX4jp z{;F)JD)sM6zssfmBiXN2QomLDeN^f<%Ij-VzfF$A<5Isxj{iof|9`Tso|O94a-Hmw z`a#*RXQlp8>9u>Ti|fyjgUPpyQKbhSwGK8J(Ts(E%l$s`1eTtm-6}^sV|Y^ za6sz2WIKnY-XzE2nAFR1{GFHb^?$dV7XzjKmvVg=EcF#K&f!vjyBxQ&)ZZ)HA0zd> zGR|>Qe^$0XQR+9y@vlfdme=1Q^?T*@cS-%rGGF&f{XgV*wn_cHa{TX?`V+F92c-T} z+0KJfPh^}`seeI^!*Z#ALgs6g)Q?NQk4pV#vcEN{Um)}NxYYkm`rRn?f0llql=^7d zewWm@$o8L=`eQQh-BQ0v>U*UAEvffNeX@+_fYe7z{jk(ODdRjQ^)sdJ9Ld-JEwY_~ zQols@Yp~QmBI6t`^}mvOS?aG#zhk7nSJu@ysoyN~GEwRaW&Kp7K1s%Thtz*2+rLZd zXUlnduhhp%zim=~PS*4NQg4)X_<+=(m3|+T`fk}yRqEfA5}@-WWS!3`ZPJt-BN!@*4rMbzfZ=~BlTf2 zF9)Rl7qVZ6rT&PF|CrRjCUxiKeEq*qj{iWZm*n+>rGAARhv8EHgzRrw>OYeG8YA^R za@@vA-Ix8HDD^F}zZI#^lzG2H>d(n`?vnbGvcLCA{o67xZBqZNjQ@VAUnKPhr2e0> zUk^(C898oMsb44kE|ddxz9NEAw)f z)GM<6d!;^JUf(A5B~rg%>icBB9+3KfN&P{ozeCnrRqBq^mrLE3|N9vErJoZR^qtp*b z{d(#5u+)#pIyolw9+?;C)qMSLlKmYh^*_mW221@W8P9O3Un|F_EcI(-oMWW^1L=31 z)DKC0qSU`G$E_mu4@$pxNc|;Q=XXi{F4?bprM^iebNjZ%L}=IcqRZ$@ULO{Zn#X zJuLMjGR|XC-z(#B{+O@-vK-HWQXeYoWU$l^$T){feXDH0EcM4^-p5FNryPfIQty<0 zCrbTA>9->FAISLckoo`_&s|dgfb@H>)VpLow@JNG#(%%mTjcmZAoUfpod=~pU*@qY z^>51fmrMP3vcIdO{(GrED)j}jzcr~ZllAbp)Q`z{HcI^qvYjWT{zKWXE~&Rm{aL9W zl5uuR{a3R6JyP$IdFhe*kLCCrkox(uP7X`GB=uubA1L$X9Lv}LFJ$`zrS8gh220(O z{T(j#KbP&4rG8kB!x*VwEZZ3;_2DwkiBf+_wo{S%WI3L9NPV6h&%31lU8&zIbyw%Y#z?7df6)sox^wUoQ0{((fv%ze|qWqf$R2$EPOs=VhFa zOT8xN#YU;Gmwum=`ngi?l6sSD|5>Rok=J)i{oS&^d!+scSwB5eugdW}AoWGk?_sHr zmE&_v>OYh5IKBD$e_4*(K&ihX^}$k~BI6k@^}mzjS(f@k((f3l|5A>_IH_MEyOQvZ;Qr%md;vS0U0{o6902c&+H9G?fJK1SxZD)mFs z?{caCK(@0=>igw*J}UJ;%6`?PzDr*JxYWNT+utblPTAilr9Mo~-!7@Q$vS*i>PzML zbW6Qe`rRY-zm)CwNZpg;b3p1P>G!bI-!1iHQvW}49Gv6%`oBlUIZ*1eK~Tl{HWBgkk{9w{(V``k4ybZ z*{_XK|E<)Yl=?TM-X--9%l4m@`k!RKx}`o=wzEg-|0L_7N9v!D`T?oGCfh$O^-h_W zV^XikadZBZum1<+^#i3oRr(z)^@Vbt4ww3;q+XW#c{1-~r2desw{cS6C)=4Q_1RLd zNd13H{SK+WOU8ef)L)n5cCXY=O1(|$t7M$_OMRux`vX#+CiMrU{t?+uRqAtPJuH{{ zCuF`>N&O$>cs?ri74rI;)W0Lg=W(e!GVdFu-YMIEQtH2!@pno6(^7v{>LuBJx744I z{oNz=Yoy*I^=VQ+AoT`0&WEMGN9xCci!@4VHSh?C)@?kCN+8 zS?W7w{9~kEk@Y!F>N}-AQR-*NI4e@$D&x6B>K~K!aF^5@WWVl}`mNG$o769r?cXo; zFU$5Hkow2vI6Nrz4%vQH>MzOhSuXVk+22)CUo6{yRO){&+pkIeSF*p4OTAadzftNx zm+d?$^|NF=T~Z${=gYHF|ERpaTk2=Z`rIS+pUXIVr2bCX{sE~kl=@+*UnJu{CiRcW z>z&v0_5W!(o&%*mP{u!4>MzLlhfBRx_Ny%QZ%V&or2bXe{y3?BU$#F{>di7=6{+u& z*WV%av*mc+CG{TZ_g<<0UdGub^><3Y_e*`8Z2tkNpDo*eQ0iZh?N_D#b$R`AsXr*^ z#VV;c$#@=>dZTQoCiQcq{*RPImih>(ACvkX*)QirzWxWvcm_)SGqRn*QeP*pA1?LFyCm(&l-c) z+5Q7r-Td3o%jLYNN_~bLx7Qzf`U(DhPWz^j&KF;QXxrmm1KzKH9^@RRo$oyl4_RC3 z9UPz0UUo0reOYTJX zB95`!koUhGd*U6~7w^P@coz=EyKyAmi(~OVoQMIEyEFOsyv4`EW^ZrL;Puz-q@mL&) z$Ky~u0Y~CVI2KRFiP*<(W8VK%?1`siUpxZ`;+Z%U&%%*-Hjc$}a3T({drsc}JnV_* zV_&=g2jYb|6feS&crlK}OK>6%v3qXb|1#`}S72Yf5(nbdI25nJk$5eR#p`e)j<9=P z-v4^+i94|`-hcz~CLD@4<4C*($KtIx5y#jal=r_Kd*U6~7w^P@coz=EyKyAmi(~OV zoQMAA911*cTtdf%pgx#Yb@@K8|DY37m*spZ*8u{WoAw+=zYgARLIBa42rZ zk$41-#UpVd_OLr7?|(G*#I4vDkHvv_JPySZa3r3DWAS91h<)rf<^4~^o_IR;#WQdq zo{2;8EF6hv<5)ZgC*lCRL-YRUVNX0C`{D&S5HG}`coB}oi*YPof)jCw-C=qE%djV2 zfqn5x9EexrP`n05;-$0}jNSa46o4Bk>j-i?`xL9AmdR z?|(b?#5=Gr-iZV8E*y$?<4C+0$KriB5hvKaFzT~yy5jYl)#EIC$?nSo$?jfVGCvL^Qcq|UY z<8dgSfFtoF9E&I8MC@aCMBe{Y?1`siUpxZ`;+Z%U&%%*-Hjc$}a3T({dvV_XJnV_* zV_&=g2jYb|6feS&crlK}OK>6%v3p70|1#`}S72Yf5(nbdI25nJk$5eR#p`e)j<9=a z-v4^+i94|`-hcz~CLD@4<4C*($KtIx5y#janfJdPd*U6~7w^P@coz=EyKyAmi(~OV zoQMIEyEFOsyv4`E3y#LYI6SrbtJQfGy@i-Juz>#VI4BKEQC<^4~^o_IR;#WQdq zo{2;8EF6hv<5)ZgC*lCRqw@aeVNX0C`{D&S5HG}`coB}oi*YPof)jCw-OKa-mtjx5 z0{h~XI1sPKp?D3B#A|UZUWXHLgxxFh{?}tq+=+eh1{{bt;ZVF8N8&9w7H`FgIL7Yi zy#MXk6Ys#jcqb0TyKpGpjU(}19E+=5hV^4e#`{F}55Ff#z_$ZFV$8jt^ffKPimHx-&{WoAw+=zYgARLIBa42rZk$41- z#UpVd_OSbwy#LYI6SrbtJQfGy@i-Juz>#VI4BKEO6KJR}j_Qcb%FP?z|@k|_w zXW>XZ8^_`~I1vZfy*}@M9`?lZu`gbL1Mxx}iWlKXycoygB{&g>*u5d|e;M|~E3hwK zi39O!9E#WANW2!u;&nI?N7%hF?|(h^#GTj|Z@__g6As0jaU|Y?WARp;h-2(d$ot=p zJ@F3gi+AEcybFio-8d5O#j$uFPQ(dzZ_4}Mk3I20?28ZKKzsy;;-fecAIGuy1Wv^6 zH2R;I_uqg$aU=G{gK!{j!lAesM}6KnWCV`IBXJ`3uzR!Zf8&tR*b}#6Upy8E;_)~X zPr#9Q5{|`_aU%AyJ1OsfD)z+Fu`iy11My58if7?SJR8U2IXDpq*u5q1e;)S4^RX{p zfCKSD9Eum=NW2)w;w3l{huFO}?|&Kg#4E5bUWo(oY8;B!;7Gg{$KrK35l7g4Yu^8Q z?1?+EFW!Iy@g^LKH{(dW1;^s8I1$I#ot*c-9ed&(*cb1_fp`}V#k+AN-iu@LKAeaX z?B165zaM+zgV+}z!h!e*4#h`tBtDK~@d=#txqFDq_kpfs`|lpofIV>|_Qiv6Aa25; zxEV*{5jYl)#EIC$ZYl47H1@=;*cXq*fp|O)#S?HOo`hrZWSoe7?7DgXQ?VzWj(zbA z9EfM)P&^Aq;@LPB&%uc}!0v#&|9RLG&&R%a0S?3qaVTDdBk^J!iyuF%)I~g*b{eRU%UYa;!QXdZ^n^$3y#HGaUzbf z+mQFa9ed&(*cb1_fp`}V#k+AN-iu@LKAeaX?4FhPzaM+zgV+}z!h!e*4#h`tBtDK~ z@d=!WUA_-=2j=}ZU{BnLeeob1h?{UIZpM*#1dhccaU%Axdv@OcXzYnwu`eEr1Mzqq ziYMSmJPF6*$v6@F*lo=FpNc*4bnJ^~;6OYRhvHc{63@o5cn(g)0d~*H`=5tB@qFxy z7vMm=5QpMLI1(?$v3Lni#36Ri&HG=5J@E?ci&x@6yc&n%H8>Kl#j$uDPQ(#*&&&H? zk3DfG_Qe};Al`&S@n#%}x8PX36(`~tyMyxnw_{Jd1N-8gI1ul`p?Eir#Cvfp-iH%$ zg5C4;{`X@~d=UHMLpTs0!J+slj>N}tEIxq~vCH>??%=%t2JDF&u`eEk191}$#mzVp zkHE2bBu>O0c8BEskH((375n0`I1rD=p?Ct0#FKC=o{ST*kKLxc|Ebs$PshG^1`fnC zaVVaJBk^n;i|61(9AI~7-v2!8iRWWqyZ{H{g*X&1!jX6}j>SuGA`Y=TEbo6A_QWf& zFJ6fQ@oF53*WgIJ7RTasI1xwKy&&&@J@&+%*cWfWfp`-R#hY;?-hyNCR-A}q>^A59 zZ^xc^2lmA~aUkA>L-B4LiTC1IybmYh1iKgJ{qM(~_#pPhhj1W1fyuF(!Brm z*b{eRU%UYa;!QXdZ^n^$3y#HGaUzbfJ2LNoJNCpourJ<;1Mw~#ig)8kycfsfeK-*( z*u5<8e?Rub2eB_cgah#r9Ey+PNPHZ};uAO#yL=z$mh=7_uqSTBzIYH0#7#I9H{(b= z0>|Q!I1zi;ZOQu|jXiNI_QhjyARdoH@dO-+C*fE;87E>NyI$V^RP2eTV_!T22jZDH z6wktucs7p3b8sRKusbU6e;)S4^RX{pfCKSD9Eum=NW2)w;w3l{huFP5?|&Kg#4E5b zUWo(oY8;B!;7Gg{$KrK35l7g)BJY1a_Qajo7jM9UcoPoAn{g!Gf@ASkoQPxWj?Vkv zjy>@X?2C8eK)efw;@vnB@5QlrA5O#xcCXC)-;X`|Q!I1zi;y(;g2H1@=;*cXq*fp|O)#S?HO zo`hrZWSoe7?6&6pPsN^iI`+jga3G$EL-8yeiD%SiA%$;t;#naUdR#L-7P0i6`M$JQ*iqAG_o8{-NNZES`fCae&?H^Zw^yPdp#{;srPmFT|mE5st)*aV%bf6LE;$8}j~_VNbjQ`{I>2 z5U<9ecnyxkYjG@IhZAvx-5c}%*JDrIiGA?~9EdmJP`nvO;w?B9Z^el?#_oi?|Lxcl z@4&uzCl18Ba46o5Bk^7wi}&F~oM88+y#M{!6CcFB_z(`nM{p=UiX-uH9E(rjMC|f? zpgS?|zX5yVM(m3R;XvGkLvb^X`n;;}{r{>VBWX{h-NWw9w*OUy@Bdc~X{Ftl_OUn+ zkH?{S0*=I!a4epT6S0roNqPTMu_vC6eenz&h-czZJPSwS**F%@!HGD)?k#!$^ROqL zkA3k19Eca9=?FTsg8#O|$m|I4r^UV(k_N*st+<50W?N8+_O7O%sJIKu8* z^ZwUkPuz)p@dg}-H{npc8AswRI2Lcki8#jY0t4#Y=rC_aiK@o^lBPvE4_8w=n6Z!CQOzi~(--xvCh9pA-$0}jNSa46o4Bk>j- zi?`xL9Amd3?|(b?#5=Gr-iZV8E*y$?<4C+0$KriB5hvI^EAM|l_QVIVFFu3=@ev$~ zkK#yt9LM4lI1#&iALtIu`)|OWxDor}K{yaM;ZWR+Bk>3vi$~%_>|yupy#LYI6Srbt zJQfGy@i-Juz>#VI4BKEP{nD;*wd*bQX7tg?fcqR_Tvv4Gyjbrf~oQMPLo|E@K z4}0SI*cUIrfp{Sf#fxwxUW{Y$5}b%b?4FzVzYKfg71$TA#DRD<4#jJ5BwmYS@j9G{ zBkZ1+_rD%{;!f;~H{d|L35Vj%I1+Ecv3M&^#4&aU<^6BRo_GiL#XE5z-i1T)ZXAjC z;#j;7C*lOV=jZ+J$Da5g_Qi*AAU=Xa@lhO!kKprWAR9wh&}8M$@?FTJ#j1c#ba?G9*;xu1RRMc;aEHwCt@GFO?m%Qu_vC6 zeenz&h-czZJPSwS**F%@!HGD)?$EsddDs)r$G&(04#W#_C|-mk@nRf{m*7MkVs}{H z|1#`}S72Yf5(nbdI25nJk$5eR#p`e)j<9<{-v4^+i94|`-hcz~CLD@4<4C*($KtIx z5y#kV&imhvJ@F3gi+AEcybFio-8d5O#j$uFPQ(dzFU*-v_$G^ZpyKCvL>Pcn}W6O*j-cMC2ES`)Lv5(ymdH++fC!UUd@eCY@XW~#i3rFJFI2O;ri8#RS#d-hp zuqU36eenVuh!^5eya-3)#W)r(!HGD;?j?Eu%djV2fqn5x9EexrP`n05;-$0}jNSa46o4Bk>j-i?`xL9AkH6-v4&&iFaUMyb}lFT{sl)#*uh0 zj>Y?MB2KV-S>FGC?1>LzUwjA$;v+Z|AH|XQIF7|9a3Xg3KF}@a{WoAw+=zYgARLIB za42rZk$41-#UpVd_ORQM_dgnY;#Ta7$KpUd9*5!yI1*37v3N30#6EVty#J}#6Hmv! zcm@u{GjS-Mg(LB79E<1RL>ypuRNntQ?1|@NU%UVZ;)OUAFT#;{F^u@5DuzN+`|9b3+JFzd`fCKR+9EvyNNW2Bd;;lFl z$Jia6_rD!`;vLu*@5F(47Y@a{aU|Z0WAQ$mh!gBynfJdRd*XxG7azib_y`WgM{y)R zj$`o&oQPe%4|K=m{WoAw+=zYgARLIBa42rZk$41-#UpVd_ON?Z-v4OqiCeKR9*YC< zcpQo+;7B|P$KuI25&PI}&HJB+=5hV^4e#`{F}55Ff#z_$ZFV$8jt^ffKRI_kr%Xy#EI5i5sym9)tsN6As1A zI1-P*v3Mj-#2$9vlJ`Fvd*W8?i^t+XJRXPQ2{;l@!m)TVPQ*TT$LIY|#h!RN_Qf-B zAfAas@hlvPXX98r2PfhHyVvLa&%>U0KK8{6a3EfYL-8UUi5KHoyaXrW5W6?z{V&6w zcm?*wD{&xRjYIJo9EsQBSiBA=;t0Dp=KZh7p12eH;ten9u_xYveeq5lh`iJh<))P9EgwLP<#|e;^R0L zpTLRO<@-Q)V%~oP_QZ|Y7Z1XLxCw{iW*ntnW&T^xBXBGpi4(Dh-J9$E*Iz#xd*W8? zi^t+XJRXPQ2{;l@!m)TVPQ*TTC*}Q5#h!RN_Qf-BAfAas@hlvPXX98r2PfhHySL>1 z&%>U0KK8{6a3EfYL-8UUi5KHoyaXrW5WBbL{V&6wcm?*wD{&xRjYIJo9EsQBSiBA= z;t0EM&HG=EJ#i=Y#T#%S-h@N(W*mvP;8?sBC*l~plk@(!V^6#T`{JEA5bwgFcsGv3 zdvPq@hZAvv-P`j1_hV0d5c}doI1nGfq4+3{#K&)Dt!OHs_^~) zs>1jGtA;e0?;oA|`0LwiHs3#rN0{#)#Usu4k75tIr93|xd*W8?i^t+XJRXPQ2{;l@ z!m)TVPQ*TT-Ms&)*b`63zIX->#4~Xyo`oavY#fW{;6xl?cR=3%JnV_*V_&=g2jYb| z6feS&crlK}OK>6%v3o|||1#`}S72Yf5(nbdI25nJk$5eR#p`e)j<9=X-v4^+i94|` z-hcz~CLD@4<4C*($KtIx5y#kV$ot=pJ@F3gi+AEcybFio-8d5O#j$uFPQ(dz&&vDX zk3I20?28ZKKzsy;;-fecAIGuy1Wv>*-v_z_^ZpyKCvL>Pcn}W6O*j-c<48OL$KsJV z5qsD@JMVuq_Qb8&7mvk(csvfp6L2J+gk$k!oQQqwHs<|L#h!RN_Qf-BAfAas@hlvP zXX98r2PfhHyXWNn&%>U0KK8{6a3EfYL-8UUi5KHoyaXrW5WDB*{V&6wcm?*wD{&xR zjYIJo9EsQBSiBA=;t0Fv<^8Y6p12eH;te`iJh<))P9EgwLP<#|e;^R0LpTLRO<@-Q) zaNd6d_QZ|Y7Z1XLxCw{iW*mt};8;8oCt?q~L-PJdV^7?QeeqZvh{xklJOM}INjMfy z#);U+Zd2a>RP2eTV_!T22jZDH6wktucs7p3b8sRKusbyGe;)S4^RX{pfCKSD9Eum= zNW2)w;w3l{hu9sK_rDB#;uY8zuf&0PH4epVa3o%fWAQqih$HM?koUhHd*V*)i#Omv zya|Wm%{UTo!LfKNPQ)>GoAds+V^6#T`{JEA5bwgFcsGv3dvPq@hZAvv-3#;n_hV0d z5c}doI1nGfq4+3{#K&w(FCK&gaT5;3%{WS(KL6ubJQ62j z54#uD`%j<$u_tcDzIZGS#N%-&o`56qBpiz;<3#LZcSPR*RP2eTV_!T22jZDH6wktu zcs7p3b8sRKuzPXd|2*u8=VM>I00-iQI213!k$5qV#Y=D^4zYVl-v2V}iC17>yb=fE z)i@Nd!I5|^j>YS6B95?oY2N>O?1?+EFW!Iy@g^LKH{(dW1;^s8I1$I#9hvvP9ed&( z*cb1_fp`}V#k+AN-iu@LKAeaX>|U1lzaM+zgV+}z!h!e*4#h`tBtDK~@d=!WUA_-= z%X$9|*b_HmUpxp0;wBu5n{gx_fn)JVoQOT_w&eYf#-6wp`{J=U5Rb>9cmj^ZlW;7a zj1#esT`%u{D)z+Fu`iy11My58if7?SJR8U2IXDpq*d3MkKM#B2`PdgPz=3!n4#kUb zBwmbT@e-VfL+oCj_rDB#;uY8zuf&0PH4epVa3o%fWAQqih$HM?k@vqId*V*)i#Omv zya|Wm%{UTo!LfKNPQ)>GN9X--$DVix_QgAKAl`*T@opT6_u^Q*4=3UTyI1D@@5i3_ zAoj(Fa3DT{L-A1@iI3x0d;%w8m+u4JF?s(D*b_HmUpxp0;wBu5n{gx_fn)JVoQOT_ zUX}Mh8hhea?2E_ZKs+9Y;t4nsPr|WyGET%kc3boQr(#b$9sA-LI1taop?DUK#ItcM zo`Vx{fZeO}{^wy&JRkew1vn5d#G!Z*j>L;`EM9^Wafsb(^8S}$PrL&A;*~fMug0Nx z4UWWXaV%bk6LEyyYxDlsV^7?Peenhyh&SO-yctL0EjSi$#fdn^?%2Hl?bs9Vz`l4V z4#c}~DBg`D@m?H@_u)jGVE4Mb|NYn#AH=@+5Dvsga40^CBk^$@i%;N0?DBn}J1+0P z0ej*`?28BCK-`2waWjs@BXBGpi4(Dh-M8fZkH((375n0`I1rD=p?Ct0#FKC=o{ST* zkKOTk|5LFio{oL-3>=7O;!r#bN8;Hy7SF+nIKb}pdH?gUC!UXe@d6x(7vfO72uI?@ zI2JF#i8#dW4SD~|uqR%Deep^hh*#rKyaq?&wKx{9!-+V;?u~i>>#-;9#J+d~4#b;q zDBg@C@fIA5x8g(`V|PN{|90$&cVJ(<69?j5I27;3k$5kT#rtp~POy7Z-v55=i4S65 zdRW%B6j&c(4Cm~-+(=FBlg9Ea3F5Np|}}GeeN!N|KF|u{-5Xl z?jatpcW<`+ck93ZueZ0-?o0bv9Eiu`P&@%gsgv`cej=og`}jKq->LN8S3RS>;^hCI z)K%J?!_#^zj`NN76DKb;|9{rz|Erq%VW+3mzLcLW{JhO^UaUFJ-MG8uchh>O@cJ^_ zIo<199OnluuigHIme;0yhW~wG8?QIx@Q+RF4fx%T!RB9oXlwodSNr92oZc_cXInDm z=1R%Au|B>pPV23bsgH?$xbEP|NA{dN`AFHR&TLCcH@|#NrT6uR;`;v=_r8-SFKnS+ z&)q+*_cF(MiLu^YqE^1dX)^uUzG~+?y|1zzUh|Qjv%WIIq22sWUw7Beuk?QUS0^9g z|Dt@DL)LCzv-{s9yZEo^}YV#eEVkGjzb@pIrV-@=6CvJUd}OX zC5QTDm8VWV{Kmf0_BXy>`_kjkaiQ}RV|l}IH0!0~!pc+TSZ<@8|BLlu*3aQn9aFQd zRsYSlwiLGY6Z3kGgWcZXLE)~G_l^6(>km2UntFHXiI)cq?5wYeznR9qS9&{c8pzM; zu8tR+cE@p6Io;>A|KvN)7mI75e1>!P6z0EnW_9K}l9P|vHR24&>jQp6|7WmRBO8Q%Wu0dD871`OzYhHWPUN}bR1TGP)5 zSvx%ghIamsLnj|u#WDNFWtHCAGfw+Ue>(X{2S00%Iqm7|hjsqyrIU}Cw*80s`B*0p}`}LM= zzpiJ$-obvEv8{SFJw|5it`p8Q$DoH}^0x23aV}~y7yF;j=Yo0do1@w(#$07C#xmwt z%<-kqzs~wR<-Bx_Xx~r&>D&zK9B@|ueQ>gUaM_39W<4`*v(4aWdaPZiy83zMa1DKw z&PeAfdd6u#z+7kZvg=Bj$Jf}NnZM>^X+GWmp`BZ@xf@LVeENCWjGfngn0Yp9BVZf0 zb{4NO*QvT0%K&U%^XI(A{H$NIS@+RRcl5Tf_NO?d9k+2^tZ;rFdx7n|d&(29KU8@l zJ8wRdUL&8bpF12|-UqyP=e7SDXx2XGO8pwCYrTu(YhTw*pT%>CTyPcm->z&HFuD_1j>kn({=U%{bcFM|1tY(X2PlKc~{%-Z7xWd}Qa_P~J~oSs@V{4A{1m1%ZGbiga_P!$LzH@f}bH93V`)^od z4P?@FHnj7Z#{TOp`LA?4tLUHcH=Sw5&)8bn7p~nmn|iEE2lm-g8G#cTCG zT1{V8_Yp@5#pWRmL>4Z%qGQAJfTa2K?mphe}Vpe!J8D^1Iz9 zUcK1qtufynqnzG|G4&hcUu9$DxbPmxv2teiUALJ-a|~;oqtEr^*ARzmh^>GA*QdVj zUCP||UC+yfIc>WnzwVvae=eu7mWR^k+!AwW#?<4qcZ{m^UUh_HceUA%wDwVsUG0Nr zp9ZwoZZEZefHh#pU1i*_b9~I&dxZ0*zV>+C0c_XaJIvanb_;8-eyyXAiMkGpW3FD> z&i?fNX27t1YjM<|{^P&RtVOo_d#-WpN7Hp?KN#Pc%#%~*8h1PUa69)D=J)$b?HBWY zXzWz*G}@+BdTW2i&p)g5R?Pd<_37`IlCRyw{yxSv^x;zb=QtjAf1RP+gHgMN>vru< zUVmq$x0mB%+k0fb?vr^e56S28(o@W1d9ZW$S2>QSoyY3PzIhyVe*fd}9Ap0o>$QWi z)L6^!GS~cD7#qJwTo3x@?wL~iYTh60Se-MSyI0bunUgVg?p`q0pnUH3{*FAgAHIC@ zkyokLn7i-ud;OYDf7|)}v}1NIYk%P^rzn&K#{37?hgnN!vK~Ib z^MScn+`(Gw;Qqe$UiRy~>=*Bc+vGlCr<{A{+At-%W-KRjs(ZcWH(OIZEq!b1;UWFU z|2vLt2mPCCz{i+_+WR>lGIyALyDyy&r{_$r8RmS?-nUpEOIb@D?5o-T=rOaN+|K2U z!`L-zlWo?|NxYMJ;ySU*tgDN;wwmkWWq(oXch6d3KWq0@df(4B>g$f*fANQtkJPX2 zY~ReYc^~a`b2?f;V1`rfyA-9KkF-k0myL9&e+ZH;Qil;5kB+8?F8 z&y4Yl*|rYyUenIK(#!Amd9O3iw^N?rJ+S^<`1(Up-}|$9t}*vQ)d9{%$KD71A$|WF z!Wutdj)8ffWG$HchaU4DVcv7DuJnF~V`b;c;U2E`g44c-ej|QXOWcpuMzx#!n;)2U z$nK+Od?gvD{;>{pP)bZJGCyj_WJE-K-0<4(t2-@oBwgOy<6;!R!~? z`6>JKBImft9AK?}g zFuM;l+xqPO`o?u(WWBakw9NVwe-!d%bp<+xpEjvL3R!>l#d_?2cYWNYq>;u>d8FQdPR zpM7h5RH?nGe$28p&iXuJ?oFs~WQ=C54`Q43{$H8a%eAw23-er`i!05uLFq$1-^-t^ zd(3r@ZJ6sK&knukn7Ls-%T+I$x|`|8L3e2e@v|qA&CM!@QrF^V1wVy}zm7A5r@v+cozw++!Se zt7oTkz-J<6E}ZJ+Z+I>mHKPCf<=_8;F|wWM%uC;M5$EM@-j~16d7nKOrRO&Ho%+1d z@7!h^H?xhtYwrNAR~$=o9)HHZH}RTr*&1C&?b^OI`u*(O-cnzq=5s{zxmCaQ$@(+v zb0hQicYMBL=Aq*<=H`5^pY?swTIv!rK2d*VI#88=p4Mw`4xYF*4W2e#eOQqQ|Jh`{~Qv`@H_p_Vl^y z_ES7}nSEh={r30$>HfB4_eo2*HrO#coDa4>kz;YH`={+3=e~8(Q&<<3EBfxAp1HLD zdN^#3)nn#!uj;NZnfp9`H^-oZ_e^tN{uicw8qf06(r5T8YxS%2W!CE3?OMIv?YCC{ z$hlFY_FLwz{v5@&%;#&T9beBCr+kjOePsW!{{H9bcE8NMuX&DY`|mN7r|W@9d2dCD=5x~%`0yRtDa$j1Cj?q$ri zZ5C_TjL(s0YctNfvvD5cdQ`s-6pn2z8|P{n=jY#SoYgU>9Oq+&ajwb6`L=AF|He3< zHupJb#thxHVFWWeqeKyZI_-1efR_I2w#lpq`sreS%(*s<>(W_f?PmKH(QgO;`xI;A26N5g z_wRC@{Yr!Lk)^ET`aL7-#WA1ZvW^yT-L%)RY1!JZe`c5Pv9Ph_a^h~lD@aSzyEc+>pbV~58LMq_9bM${}P+Ic$v93 z`}YZIJ$%+<-p9>zj-JvQe7;lSy#AMLd{a*| zzGLP+nC%?ox_A09jeWy?>cf}!KR!qPGu=)t8_(aF@vyI}%)J=**d=q#;okbpr{4HI zm${Cb^Vb}oOHMP+o!L0M7^nTb@H_l&j=g#QVeLP40oiOHy4`ecyUq8H+;en0UDus< zy^p%0|Gqq(?aTkn_JuE0hM8-t%QJrm`*fRmpTXxb=2P7h+?gHo-?H}W=P}1upAEj8 z?aSvFzg-h6^5g%;vG`s4eSzH{f{EBhbM+gStk>pJ~EN&ne(eO3MQh-p*$-H(@fCcKkz*RL%c zw{zJS^NbQ?_gQAGo|VQc_!ld>#?Er=bBZ0pQUA=Gt}=J z3g0F5$a}vpYp1Izf8KeG<5^rgUEFi7V9aLibd{WqZr6bNIoWq^%6CbluIhh$zQN}{ zpET>3WAJ@52YlY~1I|CYZhQEQ<>$PnzFtOg@5g)MH)&5lCpOPU>F3904YhOKHTQ6T z%dx!2Y?E!Z@!EQJXr=dk{QgPTd9mMjy*hc*xj6Ppr{{OMCZ`IT6+w5%L z%=b+r*|#a#bLk%Xw#Ul6Kic~0Q_PdYK9t{Vo+{V(o#T(S_MfMtjLYoH<*dJ<^l#_m z62_B%9$ac4&OYX!C70TV$+|ZAVdTwR&Er18+>=)M?^T$!*}-QU=KiMaI6KUHn|W?O z)2#J#%x91G^NDYypP}*hFR#AWeWLJvX}-p@XLoa~?dLj^&2_*$i{4V{ebLMZ_hTIo zr}OhL?<@8gme1$;iE)lM@9R7ln9sOse0F7i*1u0m&!77Dtsm24&wFwGJ&rZv; znVW4Ki~4c9z0zww-#yA2;yJ!|0KfO0JLNO_pF7nX>hI0_2Q+7Y!!fk;;cNQO&Ba`| z&1Y)noc?$7%w|6G;`e_r`*|n(d7oQ<9{+%RhEZm`wP$!{p#K-l{VBhj_o^GscPh8@ zeLB~^4u00izED5MhVl0v7j*v6Y@eFB=Gs30jkz?nd(2NVSMm(?Z*1?cJ)SlAY0kMS z=P~|?nfuSPmXEXc&Hdx=cz&uM*FR&NjKzGXdcHYN?klx_gf-W9AIkRMs`sAsa~}P? z!uE=N)mS^WufNIqGWX}^-uzjPRljxj{x`0>4|2UVe?#)>dwu?n)#=A-MT>f-NrGhaV@=<`*yqb%`;1x*PHhqhoAN5L9UPX zn(`gVoBgr%zdhBs*{?J0dZ+KvjHP}rM4!{R*FNq3v`xs?`>?+C{#aqZ{)6>u-b3$V zoE-1^xwuBg#+>w>i+7!3Y*miwKGsG3Tx2|>-fTQQH_6{m4ecB-uKzK7tZ{yrz+aEXIYnabOd5uHuvy9W6pY^{H7*%TjFzeAC{|e`bd7t#%wLPVdw1?En02&ey){I@uF!yYHTae3V_sUt#~h%)LVTH&;8dbLFZ0yAHV* z>36Ph44AwB_FO5;yz05Km;O(6uH4UY>04XnGy7jlyC(LXD=&=iKaZ=q-kW3dS#x|j zo-4VAn|X2M^FUqm$A6a3n;Dlq7rt?tartjHE@x8TxbC~Y|G0){Yki&>m#nivg>{yg z&!NnHOg8^V7@vMGv%{<@`}e?d?mjE~Va^-7AK@wX!<;vR<-ECv`~K4(>q-62n^8CP z->-z@YtG@ZX1@mTerv{O&LP&k{od;>=6%4fXV&pO>~l+YzOOBe+t%+s&A1cJ0eik* z`DWv;-qLT}18(d;?t$4lG3Wa8#b=$H`kzA%=TP6h5U+nOyN|z;b7rTxhpgY*e6Qbk zFXr#XFLzx&ty?n(FiH|AcH>q>om)ZSCr?<-HW-@MN3 z*YCL2pK8CiWcz)&-S4cgAKJdy@45d!_j^dT-(O|hGvBb^>EC{u@7nt9FYgs*o`3Z< zzLOl~JoOv%lkbY1pRU?fc4|k=Io>g<+UJ&!J72C{S$(SPIG-`==2?z~c~3CM=6Qaa zXW+jx=h6Aux_QiLf5hw)_b;|~mN|arI?XHU)(v-vv= zr*PkXA^ZCJL-ubkv+oocTL;ggQE`8>`R&_3bN=&8WnJGt`mgUFV_aqTkL!ZnzdOzT z*|o|(yx;7fxfW)%ADer5j%&v#&I{JS=ZpJivpg@HQQSY)c5(myi}xh^ePZ9g8{pq1 z@Ezh)T(6!o?@M$4xpX)0y-)R={L0MR9OpyNuP@!r^_4$=ciZ338Z^K2eroeF7LgEOXa3|n^QLT+UeiF|2gMuzZ`A#T-&tXu{Kx9a+SVZ)1MV{wAozQ z=GwAcTbBFLvYAty+QM6lS=w!Oh|P9o*=m|?$+FiLv$Wf+YqM=b)BUWa*(q6eWHC#- z&A!YtNc|Y~WZAAXJ2%UoU(C{Ov%j+0%CNNGo;15H%R0p@?KbDmS@u7QS=#NGAGg_ZbJ}lPnw^_v z|Gk){-Ddx{&DOGPHO(%~vj3-;rQK#%*eu^SR?WJvrP+=wYyO_n)cei>+HLkBo2_Np zt~9$X%RW-f(r&XKw%PLVY<|=1^I3LzG24@8-)FP6EbE++o_jw|=jda_EbX@688+)& zl+ACNZOO9pi&>uYZT2>sZOgKiG&?2BzPFgA-DbzxY*&_TOS5yc>^qBD+HJPQX3Hbe zF;~;<(k%PdVwQHBZL-;(EL%&n9a(m4F-yD64zStU#c97?X?9zdy{wp}-DY3m8K!>j z^O<7RtgD_hTk3dZ=3K5B(`@O| z6*Ehv+QK8;Q=0Mgt!JJ+Z1#ILTg$SQtlupALNQCb%|2{^>GUzTQTX?AIr-CWGlZnK}U*;wGX18V8M~hk7ZT5fKY)_VT8q(`?mi=@wOS{d^vDr#XHos}MG%}sv z#l>tn&)#XXee;`UTe9qjirGq@t@LHHe%rEs-OicsE9Pjk;~j5vWiOlCtWTbgN^a-E z8O3Zh&yKR$YL=~~*(q7}wqllc+wU-&<=;?M&6?^;vvafTxMG%en?2KJ+eT$`n`W10 z*_L9qC(j<`-n)MO@i)Bnex0-GbK8++n~GW5ZNI;<+4ALSwwz|SW!VA6EbTVC$7cCk zzIwlvH2Zv(eTC0=Oug^Cr`=}1Z?ipFwk^$;_@2?s(XWeH+HH1AF+`$x+_c}d#VqYM`&TyGc4gXcIn8!t+0PWSwA*Z(&DOGPCC#=t zVSSFw=Xce9^P6YiWwXwhwBNQgJ0;8h&tjH#+pll4ZCSROX6I(vImImPHhaCz_GH;w znq8V@?<{6%x7o{Ww(Y8He$#A6maP=CU3vC`G}~R`@0RM@>q)cQvg}*Pnz}g`%+C_- z{OopkK54MouGTc`oSj~qv#eLl(r&YVU+cAGt5vt3!X zl4e`7>>0%@?Kb&Guy3N}BD+ zvY#zxX}8(AHp};()DAmsX}07ns~_i26tlG3>`a^O%Cglo+mdA$6tlG3?CmyNyCLnj zmS(49+4mQ-wA<_rHtXD&X1mht+$?)nF-yD6USYFsS+*z5F3qx2idouiw%KO8vaEAX z_Wqk5tMSDw?KXRs%~mI*V=kxJZCSsgidoui_86Z#rsw`mX||GPOV0B891SaGX}8(m z+H6;rZA-H)S@z6gmUf%nYqQmfX}{GpJ0;5=HPkEF-yD6e#B;LS+*_B zwq#lJ_mus{OuNknHe0zR?YEj{r)1eB#VqYM`)-@H{npa#+${T7#VqYM`?kJp)^Asu zU7BUvidoui_C}kn+?tNLC(U+b*>@GQwA<`xo9)W7&UxwlW?8?OrQK#PwAu1o(|*fo zw&bRBbbT>PyUh-?*=m-pq}i4%dwDTSyUq6UIc|FHPfq)7OS4n5>;=Uv?KXSRW_z-1 zHO^ zH_NUpW@)!s^Y82H$GMheYiV|Ami=TgOS{e9ZL{`s;jT2>k!2q!W@)$Cx7%!6_L*Z( zn%$OV=N7ZH+w26JwVziy=cn_VWoH(%wA<{JHe1a;3oEDD(i!Re-d@boZnMK}*2z9+ zsifJKEPF#SOS{dUZL@9mGnzCzCCgq>%(ms(<9zR3pCc#x44|53=VsaFVwQH>?~68D z%d)jJyEMz5Rm{?Ev%j#}a`tT6m1aA#>@mK#;I;X=PrJ?j$Y!fqwkOSQ%d)>MW@)$C ztu||)iJige{ASs`#VqYM`!$=bX3uKnG+R0|o!=i6v$Wf6&1RkYz5HRPl4e`7?6-?q z+HJPoW@}ltEzM5Jvj1Al(r&Y%%{ujaOrDR@?A$E7u9&6WX8+n|t68>|W|wB!&lj__ z+w5Q3YdYMPy!Wk(mYwA<{T_}h^BduT1o*3#_KEPG)w zOS{ef&SstL-0DiR9a(l@F-yD6{?cacIo*?Hw`JL0zPI4D`To*wvpa3Jn$5Y>l+JIK zJy^`rZnIC@Y`MO@Zl|1POJ}9?`}1O!cAMR3v(+qHNwY0k*8II@zcJHpv*y3YQ9sUU zt=nl!vs1FH`R^(8%hGPM=I@K^*`9i>+o`76xmk8YF-yD6K3vS!((KYK`;}srcANbh zo2?EtwQi>?&30tjRmCjrHv0jaZ5x(md(!N-EW5OrrQK#{*=$dib%xgGx8(5s0H5V9 zEM{r9*=aW0c0t;2InB0Y+53uF+HH2C&30wkN}8RLW$!L#X}8&{Y?kkDsWHE4_W5jo z-(JkpZnGn7wsK*bt!Dc>FkO=qidoui_8gn-$+ER9>wLC;tgb9(X}8(e_*;(j+#jCy z+m&Wpvh46;mUf%{z0FoGO0zv_c1o5t|DKiC%K4jT_t`Apqng+7XKD5MottHk^W8gdn%$OV ze^kuUZnKfic4gUWnteXYZY^eMx7jb)Y~_-)-&&e2mD2hBS}{wz&3?*eyRvLonr+Fl zwPKcboBgQG_FS6w+mmLeWZCv&mUf$+XR~}4N{#uwAf4YV8y2&)+w6O6wsKjTEvMO~ zS@y4sS=w#(4x8=CvXwO3;;gEl3x8S6(r&Xi+brLO*2mnIW~XG?dx}}wZMM~B`7X4c zt!De1WzD}AH1)psZrW}3Vw>f=(0aC(?QfR7shFkRX3w?Rt}NS?W;?R%m|~W8n|+PRX(#C}wH5*-19rm1V1Gc5arP zRm{?Evsc?}r8VuhmS&e`*=fZr?KXRf&Guy3t~A?`WhWN1wA<`?He0v@oBe~$*0L;r`&Iv&$pPv7o>R=yZnHf$TfR2! zw~}UCvg~Vocfo7(bw#_){={axvTR$LoswmLU(C{Ov$4%q#-{yN)9lY)6*uDrRZ7*)Q2_d0aZ?o;15H%YMC>rQK$i z*=$#qbuLQhH_JxFEbTV?w>DdOOWJQa&6dtc=l2W6EbTUXzs;uK4|07@vn^TnQ^hRp zHv8u`TN$7B+m>dhWZ91vv$WgnbepYZ*=m}dn`P$}v$WgnEjH_1pY~fzvrDt=dx}}w zZT1?QO~039e$#A6mc65xrQK#PwORhnA+>I&C(UllvNxyMjoCGJV~KWNx5_;44@$Ee zOXVBW?KvaT`OUJe#VqYMdy;=Y#JRt*RL!#GG+R0|o!^U#S=w#(u+2IX(tayxwk6A& z|DFr4&Bsi;%|36l>GzU+ZjolEWZBpG?xJ7z|1x*xVRBT}->;qtB<#yXkwr2?WD83~ zf+0Z6BtZiRq5%;BWkM81Q4AuWiO?bJh@{zNk?t9`42xlvuntB6SpZ)z^4?$< z8f?|WR%6)N9`@NhEbk5WR)g)_Ij*-ahOK$nC-ShoH`uETHl81xAN9wu_j}m;^RT=( z*b5Cdo*$eaMH|Qb^{_YRVR>(`rx|SjxVX)XS6qw~ol6XS^U*zdSl%1#@djJoC5G*c zVJCXn^YgI0H`pT#Hrh3Y?TTTidDv6(u)H_egABInVaqY>Y!7>E9+vk8`$>b1++5rI zs2ao8JnUEUu)H_e-3+$uVf$j(`#tP|d05^X>~;nl&(p!*7%X@=e$6!0%T-*GpJ%;V@u-oKed2g_h!S;FB&KP#0hutI(%X@=e z%03h9#@rHXV|j-D#<0^o>{@wP-W%+51{;qD=SSrjcD9Fohq(*kOl%$R4fZL6Wo@jh zw;IFNJgm;=&A{^BV1FLN&M$Sk`N8>7UkrP{hwab9^4?%?GuU`;5&p)oi#@E)+sf3- zdxO0?hJB(`cJqVq_k%HiTVnqHEDy_jgT2UL+ub}J{EcBdJnSubSl%1#=>{9m@y(Aq zW7vrv_R2gg?+sS_>*JWv&C|i(7HGgJ?yD@Sl%1# z!3JCQuvOpR=zKS3KQ0f;dxPEEVB6hX8~pYC^{`*f!}8u>#~W(` z+Z$}Bn`?u=n>zl^_ON^9VPl<{E8jiBVEa65dkkCiu)F4Ad2jWu3#|6j+qX)V&y2sb zu$X;M+Dm>1`SZMl+BV_pBu-YkS20L0&@p*{9|9xLdoeZIoZu-^!Vl zlXKsZ;d$6bE3fuP!*pIqv{ZZL>|Wo+S;iR$y3gZsW!B8Wa9;!y(n9^E9{2s3U9g*3 zy2sxea%XpnX3&Z=30zs|yTILxE{>M|J?(b)!~8vMaQAC+?`c~L%dhL-uN#-&FxQ5V zChURZyFls_?EruBAKZF20vsVdm(RQ>K6^CSC#W`vuHu=_BhWV^x_PGkf!f=5!()=I zg?|5R+JC_v{;=M~-%twMg{&;lo>=1vc-N1=50ZW(!ks}qI$Gf5)b^pW_plU|7j>0b zhuY`t0^it~vP@?(_l)jb;KrLRzl=v+qwCos9>JM?~ZLN1(6hBD9S41^5<^Lwas_`L2u3>{zC6 zf7mlCC%s?X+TLr=DSM{M>fY)Mb;sEe`gP1h@t$b(e3azm;$H=F64GKFz9*HZ#X6k} zkmUt+X)&yGLCcV6J<0lJD))Bo8%n;vBY0=+F0~iiK%p^ zkJ=aD=KEaYPgd6)&*8PRb-c%ZE$DaWA;lIZPEoEN?N z{iizEZ)~_7om!)pqv4(BcJPaKpZ!ON16aY0Qv3KHMWu@2)}C7Wp0v;780B!*p|Q2s z$s0CWI40l=I(+|2t{-uKc>eWiIa_aXVYD@T!oz>|O2;^n;YT9A^;UZRdrzr7MjzK_ zmGSgaIl~#sT2(aD{t)cNmwuVI(>XiGzG2z2ZuSk!t{s?^H@gZBGg~=2a<)?aLQm&aPiv(d-%Jyp(;j_$``eq+f&J;?^B2Jt)8@U=Vz&Xbc=v7k`=b9%n>HBGrp7XV zDK=R)waAx?Up>&?$1`yZh@5lA!6(rvMi=_1|2N^A>d{4T zIu9s*KQPW2F)NxkVpfS?%RH5-XQw{&R2E0xj_u+5uqm;H>WuJL`;zMm>&?T=gl>)A zKLvdoy}yXM8omEI^9+sNKl_WgtxF$C+q_wmHec9;4ktBf^X&<*Ob%0f-2ZB+ zG8ekL|4~cjZ2RBUQn^{@U^#het=#49b=2Qlxx=5ok@}h}9aUx~v^3b`?LRngvNllH z&Ctf$p!JdNKW%$D*C!VX5bF{DE?7)_*S|)m&Qi&KvvlD5)lHuU1+Tc8ID)t@H|ELp z&5tAdrT@^T)sN3={6CC%qKosMLw>uuJ#Kuzi?u8+%(TzPdd?m;7I;KOA#~^(gi-ym|SKI{7}0J8Q*sDjg@e z^J>OM3v>pb&WJh)U3dNaeVkX&vl4NyuC=jo+^+w^sq@q}!)_;MGx7XkWJIw?A8VJg zW3%*pBKtN`jq`XizQxBojt+f8IGoa>gmTW=y|Dq6?Z06H|NjjyH@W!flP89 zjm{0O@(q%`xVQ6Zx+lkFj#%hn$>~(xv$oz>DJL1w{dnmUI5c0Rb8W`yO5J0*s*L)R z_^#2}acG(Ipg7ztKF1SWqktjTnVjpnMR*Tx+0W!Xb!^JL@M}HZ#t$0JEFO#fTX?3E z;y=Yb=TqL%?s?BAo;yBqZE5HHAIyqHfGC#SlUkppzc z;pZye?@x>)-TD-GLS4$s-e$hsZcnD;!{sym8N0-LZ|FN!d{a~34Ur6HnF!Tp111zA1%FK7A*g3lYKqVZw&TN&u+xGsO(WZ`D;4+?KxyX?brF59f53Wz9^Q#;i(Kx$;x1r zbLd8z%+aPZfJtSnR5>NQU&yz+!~336<@@3NoKoex;r;Yd44LWu+Hemd+;6>!a@&4k0@+`Yu@_DW_ zwjF3sGP2`siRzF|kN94L*^^U{CH;4HhJ6ucOMXJzo$Pe3T9F;-rmy1vU-0h^54jSP z0qLv$$F`@f_tVIZ_#~cg4K4J1OI=$_mCbdfzL8vGUJl-#==}PGm*8AKya)H%;XQS% z&b|21MPG)B570L~V=G59Vr88rR!BE+9Sn|cPp48Dno$3zB)?pZ4mINIQ^-!fM4|$p0h(0-)r5>GMuR7kxv%86Pv$Ps0vrb--Maiq|nWNj>th|c9b!D^( zy;opUJiX_Z`eS;(CVGv`(tGOydavv0eI|WUdCeDz-hZW?qUqhxH>CH6u#-++dG~$F z26FjC(OHCfdVdR;klsgz_tg6k^@_Il(kG+$bjs-20-klPB)$8wiO`$%@$ZV>PYlSZ z&_=D_OX%G`)XDdS1@wMQV~NRG`PTXYJ=1)V=)JA3;5^gQyDLZU&Y!3B?x5_-(t9{C zA-y$Ul-yJAk)Ga%1@sec=C&-ugaHLhr7a-n$ggdqx4h5A^h2 zr11{)enMCJy}hURkQ}}D_v0VQ{x2xIvh=)eSb}(!d{kSniSI@h8 zF{;}8KHzliY0z%@IkjUW;=X!)P+VPNj>g5#v^}B^8doWXj-QpzdIvpn_u$feTo_Z| z<@rAwTbylgFy7KQ3s}V@=MnEK_T@}`oprgxVt=-5Jz5QhmTP-jp3c>1`5EaBb;NBd z&P{8qaHzLSDeu#e@@;!w6k%5>NcD8ETPn+M;+=%*YV;(oA zB9>Ag<2qY8kk)>OzQ#KZsOjutoC&p5rLK!lhmcElOD%5wq>ws2j zbCEsJd&x8KOB>}k+#ECWGZVBvV1xLZczN4mxX_nm?SY;Zm9uAgd-DQzK1-K)&4c6J zl~a~QZ$CRs`3~hhXme(+t`A+YavHC@_e0a~_w_NC<=$UAW!X-|=2rF@Yz6;c#kN=U zti*fwu7Aq1pO_8inXR=*=4dot(b>K}SK?qyMuV}2XdCIfsgp~Uv(P&?M!*L>=kpjR zP>0@?HKtHb<*BH06TZvNw!e|KR=1%Z%E-^1N~~vRPj`{e(%I@dPu^e`p$CfVInR^6k1cgIIYQbxXcJNBb-`dz&`s*;Jm1C)U5(=z;pPls3k)X?coN zhfSV+ZYPbqydF!=Yv)CkW7KEpsrwn~v(96tz0%3o*lNnQdm3nbrG8ID7F^uBW$eG02Y(p(cXN8T zCH);}FJvS3ksarH9QI#V)q7x*{OcarpsehtwnI3o@U7ANpEr5`mXGi2+3HOi*e=!I zIRJj(c<8^-*sUXBJajiah}#?Ik<4D{S*ri3ewTJezx&2j+R4REd}7z%V4u&DZQ^&X zbZDCN`~Brb+tUuox6{>5V4__zFzB(<-y(cw3+utEzEsbB|NQyWI&&HwTW~IMTr#J` zdDLbP(=pza$@feW*}ozhXZj;QQhPS`VUn{SD@Tva?~N}t8Sh80O|DI*Uxhz=Aj>7r zIE?sy`rG(b-i?w?z?O_sEVZNefq#KU%4g_Z_Z6MvR)ZI+*J3cmSexJ?g|7KmLjU854AqhE(_LQX zRCmAVKzl#Ib#40TzG=eqKXMOG<9ptJEVvi_e(1(D!+W=K*FbNd9jLbtLZ3Jf^|W8x zl=@xGx2rD~LOWfhv*+3W&ZshmdJ}uW`&+qJ{i{=N=$G?8Edut-lrcMSHE{ZWCD%s! ztnstTy`T8fV5dOmMlkyRO{4dJY65>E`r8P8LZZ_)Z;-W7qS-fn|Bh_ZzjoPZ`uB|V ziK}4OY0|zm2E+&1`q#iuG-`hvcDvF0nFDO-b%`#!yfl8jhA-~MclBHpExYp4XxUX) zM9Z%3iI!b+4f3;1d3)B zSH<-r@~LykWh}urvPPtHNHni&$V9HJDdG8*qxmisdfSa(t5J7#*Un{SY;N@^#vMnM z7u2}lQQti1DOC=Y|KghDqj{%%^q)0G=lOn&DRe)PemP&cZ&v3bY(=6ol%L|txAl$( z@AG{tF1n9C#`^dhuaACQ#5_P^JL7z0N;hHz#s6*&Ncx9S+y=ta9^H(o-AoL!_2 zed}&(*-OejpL_mQ>|r}GnQU96*z8&6e4mZi@5%mmgG>Ds>wW3WY9r>VZGo)1CG z(L7UK>f_GPLDy^_Qu`LtGsJx^GXKE@Ph0O64o`Nhk+*4=Q+Fdhx*Y!zzAyIuS>4t5 zava^T1vBKwxK3f*6XMCYV+FBr_Py_~UpFK=xp@_?!^t=3{$s!>e$ZMd$;2hdh@SUD zW8sSFH*oGt?b*1A{kg)&%JpKbL+?%dxI6U;=K)RPzOOZj%Qnr~$kUnClZSeyThl^8IIc z;@Zc&SUa{z|Mh+$a@EZ<)BQv5Np-)AF;1xakGe7Q@ZQakt7f`?|9yGgFV-8=y+|?9)sc+@8siv4dnW`q_9(=W)e+HNv2YpMJDb=F z-{@sQ?cdVl3j1Mlkj0ngugtDwe4Dq?)rC>z86QJkZ!*U-t?mARB@r`_ntrIJ5YyG~rA^XO|<7uDMe}`|N{@2R`74hVr98W^K!-fmGkjm`oC8*xN20{5V=#?&o!}U=OLpi=W<=DhZrAv zjnevox9ZrPtem9sYxLWQHZvA+>zhhi=LEeK`?UwQQNEcxl3SB_wAw%!3k$#xv#a4#!^Av+qBpP4>>(Lmh?oGrx|%UVRrrm$0Ap z%Kbc7E?@s*Y&G=GSwdhGL9X7^r@yp4qZ*zIA|SGBxUYpHTEvSj1QnqnTx z94z0$yI@NL2@?HV%5r5})^+>C;DhET)aS_aMf`O!75w-N?F?h83+|>~!4Pwmpt=4_pIx4k|4(B3 zEPkGpz_z(u!aIJ9Rxb`SU3BfDZ`Q6pWX`Q;)-&qWIugm8)-qpkXH18rJ(N*jhpNnnNQpJ?SG&THHwI`~qN1r6?iR=}OKdRJO zXu7&l0c zPoS@EOo80blntXV?%U^nt|9axMqBBb%1IaWUv@auy?nW5yuARuPU)SsIiGNS$gW#| zFT8yXTU%6@lQrb*X7pF`DIHM1bRE%-ujYCk^wG70@hyEHm({6kyQ6l$cg@GP&)<>P z!I&Qdj%Y!&fxEqx@bx^W(eNwv%@Z^|5_ev_Emh@Q8i3M=Ac2Eur3DGe6_z-8A4Xai5K}eb3mD41IrZn>2dmgs^mo1IoMhDP0T2B)jnWy zkc(V=hHOf&BigQfiey#y!?5$}KRL@qsm={vR`BV2EOS0hd4>?&##%J|xfghhqU-pn~tEbE8Sg6ZK zh(5GQeKcJ@1p4USWd9i14Q!3z#Fs;KZ{xj@jHejun(dhg|GLn_7S4K;K9<@)7VBmE z1m+G}dN-Rld+P4APx${T-$C8G)BmJRJl|by;u?+-$Er=#Wo_Emw?X5<)A0pu^2@x} z`ZVDl?P;3G&giVI>0B+^UvsFEo&5vZS-?2ebXfZn!gk2kXgyhtdek>(Yp7rAz_*m_ zxc{lG`GjPLXV?4jYOinGuqLw8j_jDNxg40#)-0p1l9hj28QOAwSf;g9(fqKJCzC4^-jsvYjzYI#bX~IBT&wb z@2z|oyC-dzj?zxsw{i>hYrOF=v~~2SZDY}yFpk<4n;6sD=A3Sf=Ns3M-hnHg`$}nj zP8{pX7AXh*An@)#ywp1TRxf+R7>jnunS6(S zI-W!4qxm25{551y`q4c89T?AA&i+mOgydd$?o`a=bZKDDT7CW(eK&tyx4(>!l-sfR z&-z^uLoC|`JEppY+s1o_zo`6S$WYPyFjw(GVyL1rs$(zwroC(0{wBE!#it?v^X;9I zdvEpG_&uddmN(LoA;4NJvQ&8iWT=aI;Vf^0cuBV6td@B_=y-d0#)HN8*bl`*AC*q` zM3q;dyTi}_3#e;J>+n&rTw!-9`xdrE@tP~c{q>ZQd>n>vGyFATDUE-S?PTq1i}bKg z{F(MQoeM9raSPCMvvIqiW1+mSiu`45Tsp23f7PBh10LIXMVdb}8z}k?CJ%X0GS-ae zb}zauwqJ^0N(FhypCFr#UeNjU1UB|#yJJ5kE1{m|<>e>PSTT^vXa4;*=z7t+Q1)L! ze~Ze9Hq!&Tm@E#kRV$&p8qd_LtN-@Ky740N*GPAsM$XJG+ygGP{{?Jc2xEO%8UMLi zAY1q4Z0YA1lQfd8b$|`|WA)diVFUXS^6W2+-@<-{d=JY_z=svlJ1nF1zeQz4zjcB< z-^#Y#%C8KqmmrI#$9pga3-$N|=u#tEOOKDDjP=FpuS@G(Y*mkj+oOkuGl{hn9|f_N zY^dTb#WjkH6nAYxtgCy;r0n4Jrq4m#X>AN?WY5H3{%7L8@R`{u)BAnA&TqxFaGYR=V~wg(R#~Rrn2j0)i%v>SX)9m3CFR>NIhB9`%9X{ zQ6>1Ma;ii8cK>E{{j+3%4Dz55#Drm#MvS4hKzHbLG~zD#}nrssp@I}JXhZPBv? zXB~PEr=LdeZzyhs#y|OKmfjmH544i>E^15Bvu5r4Idrr3eI8q?_We^d4dh{t_gC*l z9yG^EY`?z7vC8vU?nrs0KH_!bZ`7%BG4HkJ+CBep4BJ)!+wb%8(sTAfWne>j2<6lC z!s@YjQEi-seyYx*eKhOg_e3As?B=Ue{m;e@|8C-wb{ru4X&e*A>=qxXoi#t!)EvSJ z#z$_gBr-!DV3h30srVpWI~tr_bOZjco~>*|ld%nG;^w~xK@-tddEeV|eK(mc)Kjlh z89E@B8b^vpHz=Ofnhn;mi!bxIhgNQm2cGJAu@Ly@+)CT{&-~&7UE8YU|M2z8iNhra!hQ2i zaT)DJD3x0r>=%e@l;gIsQ3TxY8K)$<3UCeQUhwZ%Oo`m2xv6xVFI%mC<=2#s$FGF3 z(kaqo%Ken9>!%xE6_3xOYD?uR<*p=GoUinn_p@5xwS=~~=Vj)N?SFez*&H5)ye!u3 zoD2iEK@M*D%$_vgqf|AkJI1H+Sv~i|ACT<#AtLxXA<*BRDPd!)No))Z2(tGx+ zh}XP(UUCYK&6I zPvE-HOSJtJ^%&3SUks03|FvQb${YOM9=-*5w^tB8T}oTDSMUD;WCX_8Yx^ zbAW^Q=V;tVou?G=bJk$_2~EVCb;TS0{|eq@_z8{lewcL3_~~V!RWKtVEibHAHa&A*UHB8-}rH$_`&^;c`klR{^LEIt(6Ul`Nfwp zJae>Jh&<6o*{{C>=blm5Cuvh+<9f*d(}v`JPN^~{yr+%Nihk2M|0DB!NM%F%Al#1| zZrU{_#0{=*g)rbaGQ1yIsyyU%?eOq9ZTu^I5$-#{ZFZ;j!oYapKx|BGm;dbZ#p#(* zE{4WW7`%%crg*!xuv9r)a*982{#U*X8C4wdAoj=6n=&sVGxmIk`JRTp+|pb8xR`S9 zM?XSa|1Gt+_E zlz}>Q8nlVWCE0Vh!g)8FV>pI3O{XoQtMu~&qB(s~>^B?PP+i&hE#33&^fAtBr*VYk zwZZW>vm^Z9jCy*&8Ou+cdp%pWRC>Yt!$dpu#m!?O6DL3qlcQ(4d^e>(O_<1{xmBo0)5==ZLS?X%3r>gvmG)I&c4;KMy#Q zo_grV0+(arI)S~r{agC;9q3D6>!H83*b+H_uCYF-yvACR0p4fIh2u+uZT4*W%sY~O zma=a(_}vZvd$0*t=Ht-`5p^>zg@=z|vz)JE4DdEOZn?|fAh+?J#o;dIlD!;`%*wu2 zcVvGVG;y+m-POC_$cLY{>OAd*)VQu)_I50M;J%0QNi0Cwy?`Bt4Apop*iEUoMp^mI z7i2rJNeS;KWO*-tuQfLJz*~b~8~Dw!aq7o?_#?^go5XyVsc&xF@KE9WXjG0WPa)3< zZzt=__hO!ee9DiteV!%BTTZ1t@Y>}8@54sv{TYg#;DhBY7o%H}!IjHXc^wiSY=fbu-7m$eoW0bM6OmynJ)V>zt)2Npmh?9|f>5ua>$jdDnZ>s>D1-$w}Bryoa=pi zYNy|u9BvZG;d(haTq++Q$l;FyTh&Mo5B6;~nK&4k$jjkJ135gAKIP@`fg8jRcnV!Z zIozs14v#W;%37Vb8$7(o%k6Z}Gb^k4yOov0I~7ZU`(4Q4eC06e$>AX@A%{OgpG*$7 z_kEHsNDdXBhI05r+Ith@Gs)p6y&TRamfWh29EP!%Pu1~~CB_DHXJ$w~<;df*F^f8bB zA@HZVoQsq2rF+oFyf4KMu5`?^GCVnpI*ljiOE>6)=&3xBWZ`$nk@4gS*@S>6CnJ+a z>yRhU!N-s%4`X+%obluya1DkhGv5PGTt1@-Po_8F$z*7f=Sdh#hI368@5--CL-v$2 z9ILnvIf>dCn=|(9;(6DQ^3)ydIl6U%@@$kLUYLqJxp@n#a{zwuM8Q~E_*Bma1AMB# zPqMFi!T$g{q5s+wBmZc)EjDf@V`038Zyl%ybI5^$e@%3{nGOsM=xz1)lgqOhJ|nx? zSSzj9YzlTk>;IQ92GMyEoR8$%%5;<91*+R#VOSNt`ISaC1kX7hoXU&;IE zeW01eiWkzSypR6#b<|bhqbC5D_qm#Ha`1tVe$3zteDwDyAKRQbR{SkCXJvi#lNu-d zANc5RBd>#v73UNGnUDUs@00oHnzN&6taugeeF#3t_8vyeDBpE4zIuXW(~Y^}STWI& zq2c-qKNf!%>(eG&pQZItejHVAecDiD&iM!I=!X;7*f(5Hesy>tO?Ag~U->igW?jVex;})xQ~s<yH>7?<3N6iE?cp@GzUi&pRly7rem+I)6ty$&*CGZ$qxbXS?x?G5M=H=kHU< za`S5o2T(J<%beG%7&M|G(~x&`O)#&{YE3O zIpT+Y?!Dqy^f(+x^+98u-RN>1;64}J^5yQHI{txNNS;4UY^HrB&w=v{Y*j2LaXu@Q zYjC@~we>r1gTwgLa)W1k9;lv!d@eGZhw?h2by|MBZ?Go>u#eC#C&SJk6v(jPc{js& zj?79%vN9XUyvj@Ff%!eO&X!BtWNSYpHwOjnS;&}2eaiMFJtN=xV!UxNHsdzMqslu9 zUyE!ZenbDy@%|*9H!4;B3ok4dtHDQ=Io8WanX*o-d*FGeL2hlP%UfRHge|ot;n|=Iun8>qhj4Z#4h*m~0k@7Gl^2+uy> z>;Dsz9^n6X(g$b1c{bklCvN{1vfIGC4Nln$qd{$K*6w!VJ353?COb>EDfko{(48C_LAdo}F1;XDDHGw7T8xrU$Hod|7TBz}qQM4Znyc^1u0 z&x)Rf;~%Ft*c#EHpStB6j)iWHR_M!g(U50*QeT*_zDlwI%~vj8UA!L2$r!wRMX@O~ zj1*IWTl1xv{TJ9<=|>A~iTjb|ACadg;?QM-pL2N0#6(q@1KVkOVFF=&{NanF={_{-pPvA{X7evR%C^GKcjf% zw2#jdZ(R({^J9pA?ps1R)#uAi@#PZv7@L(3>CD%}_aLSY`I+fAdXwaJw4Q7Z`)wu9 z^rgdW3jOTBj)ik+Q^B9+bqe!gsV%Yi6+H9B7wH)Pe~Dh^b1(hUJ8Y8PZ>@epE7x!O z|2JgZ{BoFkIW(6K`#QG8%9WK90N4A$735wD_r!~CnbDWn^+D7t-@{xqZ8 zkbrJQdRO!;^eLea`wcNv7;71<^QZV$?%f<=GQP;!%bq#WILWYVUG6({@5bf^zU$QNg2#i=>n#$x znaufISG=~Mpmh=$aAN*dImD9(I$io`%$M`(B^LOaoui=@+SAPri@l(pd3{oE-arvm?`0CdM z`gkJzs*R@4BUw+1Y~}Uw!9nWdP4Lw8@oV7R(CgzZv|aM3{>;TrhWeP-B^NX0^v!&6 zIK~%`HYA6j@j?j0e>WzRUs*4F@Aq-(ec7Kg;r4V;PO*{XLgz)cql?Pp2)-1;`!+fJ zis^Xo;Auw)zb^2W$63l4#PON3|KNXI2j|CPgVp{s;I+L!mH+nsC;q+Gd)oU0g0a9g z{{0`o>HG+|e#Wz|RU;QuJ!Q|S_ilSmnU*SkmuFg!Z2kSJd@%16!>y6*R;kDi^XT&K>SBTi8)u)gm3UxF9fN4z%o zE?-X@RQ@IPnd|YEhrX=vu(fNf8Mrj;$d3oPouHp_u#qNwuQhv=XG}$V^8t(W^Cl=6TZ}63-#7j zsb90)hUrd*ro{AaoFTu2yh9 z{#WFeeg99wxBg$$aVvbaI&Sm!-Dnl|Lv&UB#>+hYMZbR04<5)T%O|?=5BJ))nQ8l+ zcn#oUu{=2&8WY=)IXXrz?{xf`L`sL1OK%Z(0<9$Ot z#N}?y9DJXT9=mx+WSqIrQPSnn*cjo-<^a9U)X^37d%5yZ(pCOHBVQ-~hOSC?_d-A9 z7aHl%p4jGeKUeemL^r3y;FnFiB*=TPAJIM8(LmcVW;nQc$pET<48^ESC&5kqf@jf{6YbEN*z2s2##9h5T zN%deL4Q&po6vT9=!kazuf5`kOXRFs`Zy>ZD@oDBfb=96kUHM+LyAF@9QCC#?oR9aS zKED0z$0yo%cLC#8mF-|XX$NzTrL8>dw*LBIe;pGodx!CS3Hej`QeSDD<{->Z3-9*G zr`|p9{nyLC)dmXt(?VSsZj(HpL_banDEdSo@MY85o_>jF$ z<~bBc2)~P8xQ=RItMa@t*_N$W{H1;#4)3Moscnzt(2X%n`~M?$j?0edFFAPanN;rRZT!E($K15-`_$gb5__vYqnwh*iJVV8 z0^Qrd_@w8Q+nq9s5gqUJ47}$c2bpKc#t9lPdR#N@e>=94cFV?{n()_R0x!ot-zyr~ z*uuU^@&{~39M8pTFvfYA*^)RfwU@>6^3ehQ`{r~{r}xE~804r3yU*I_ku8;Lpy_?& zezo_~bQOOyqw5oKu2M3mx%+4*=H$3uuemwq@!GeEuRp*qX}&+&KHC4~67zVh1&Frg zndb9$i1z=ho-vo#zah`KKC1Gp0qCUs2-X1fe=yqr&nokw_#8@jJEL#MX#d~yj5eOD zc=|bHTlU@BXY=xQR+Vcnc(q5ar!{hG0j8moBeqTF@>QnKk1ZF$pJw=~AB*vw=_0V-<$iCY1@>v4%`JfEw>}p>82;?)aV%5( z_u+x`H2R8PMYKK3({|4~w4GBx+Y>x(uP1IuXxkan*5y~}Z-%zo;|pznjZQYA?Ya$U zdqF_km%3xxZslpag{SSwp0?V*^=1oli#{UrQzt3jrhct2`v=cW51!_}ncQ}HJy=kn?{oy)&62;GW8ok4CT z)9-UUDpj8Hc`dcw=@hc3_iKfGg(Vr*@O1?#W=pwY>RPh5#TrrIBUnYgQ53& zF^=~IIBM`%^6@S7Tm0U>j!b?P8YgnPFow~3X4%aO^zgQ3eu>yZa^m!zYiMg6kCF8S z=wc&V^Co&+PezqLF+2WvjrvEHDr~AMve8yQjzP-pTj%b=qCz zv$iYf>sG$62Nd*mPuaG7U-@RK>OG-JU+-+t*9k#i&-Z;@M7~CKJ)tXo-QM^0QSPsx zuZnN9c1UrQ&P32vXO`-!wL`jUo?BP#>(f>9___}9SH*z3myLdh_*k-OafkNgpCX#_ zPTwP|4qaoOeCmAczLksCj^l(~k?|2@vi7))%@$q8@a|~ijpDArZd<(3rM>0o#~+Aw zo9T!7f-m~EJ>lDJda3qLBqr))jMLJ$hV1D4Qs=7f8kL8-GY)<>x;*u_@JoH03q5VE zz#i64?7!*DmEOJ{R!4>|6K%|{FO22xBEi73U!bdrzH|<8`f`W(n3JK2-Z%f9F-9YO z*}8!Y-AK%2`qJuUXrh;)30{USmu#aiI-hrK-`7X6jd|X+5082G3%x6d51Yj^pYt?( zUmcq5TR^kDJk5>|Y35_auNTm4wro3euQs9C)5u>v8#67S+3i(mGqO~919=jEUegsG ze%jOQAKc%Mz8J4Jg*IlBKZy?=tlZA`IKC;`zKd8f^0d7dIu+Tpx1hPTvF7FRb>v2U z=?H1-W5wY`wDt11XA|0<(tx(B2eduZ)An*t+ep1@vdIbe}4m==;xeetb~&;-lrM4?_#VpG;fLUbuNb)BWkO?q4ig^Z$T4{Fz(8 zpR+uF77#Nh_M$zu7uQSvkVn}C`EQM^Jg^o@)g^@%^4HW;M??#oWQ7(oi%V&c21~F9MnsBC!Y&jD$jyH$v05X#vY%0 zPv15O?~&7Q`+C>o`S}5!$8va93*o@CN_Y<*jh7`~E8zJ_0iKuXd(xiirOLl~pWM^F z*TZ}8{xiG>-^<*8Gr)IEu3byQ^1weE-qWsKeY^g^^FsnW7v%6f8Nz|*vG5)|TYEeY z@_erV&zu|{^17Bc6pYHZm&%Lm+47m`zG_>C zOup)gWF7L@IA7)F53qU8X7`bo_w$`syZD5hAakGFs2{8aa=K1G$Kt;&25VmhU(K@z zz(MYFTy+I`uNCZlm=(=WgCHfc6;@Agyhe-t!%o<7wms~9QjS8P-o z73b@Qu(pBkVT+%X{Wfs&b!zMBjFS~V3QqT5<(=w3f${S1IfKU5Pj3c(_x%rOtQb>e z6MHc>YcD#972hvDWY$yqd1PD1tsLuEz5!0|FGI%(KJT}%R5^mWiFP~ z6WV7d zeRu8XryV@+o*uX3IJLv$&^b%;6aPZS-I+_2>F$ZkExz2tD~G@{>&I7e{dk#j>eo%a zA8!5ioKocm-A^x7uG4ig`+syDS*i?$rkfxW)~|E$_pTqn3{gM0sy^wF`tfhdi@x+_ zZq&Y7(vH;WU&y`x ze0Wd4J{#UoXT5TGKe<%dC%hjC{b_^tnOOZF58wBcDig!|IiKnk}hhFc806&vREl?@H@3&d@z`ZR&T?d{u~Rq~SU+i)%2j!wq)s%EPW=u;rD9{g=k| zqW{y4VEvqz=6u3AFXqRk`^vYKZ2pQf6{Vl;$`OcH@NjX@+^N&X_a0rE)p2RZ+g(p} z%vz-_nl+-WBZ@xR&$}+(wa;q5G~!(g@7j5XZ}{X%oIM%MYT3A*|C)>G$7bs4=3SVl z!H&538|4x9Mh4tG4Ypi!JDucZr|>LVrMze@=p*^CHC5x0Z}qvf_Z0b*`Oy!u`xckt z!y@)bu8?mE;Iv-V!Bst6UlvaBpWv3};Kn194(?kX?#L{h&TA6fk{sM!^6hFT-*a>A z%ze*I@t@#+lY{#P_YRJInGPFaDYKw_CY>|Lo}C*`NHgZ}2Q!Lt$~1=Fy}6iR04P zDxF()dWVkw7+#t#D_^SrE$!F|-bGx^PFzo)_3m6>-gG%rm-ddfr}?b3-q?O6^TQ_+ zYkB+SYz6;S*T(Xv{7>fQtG?Wc%;8=cfjgW>E}Yk#pTvp5yygt@kN>~VYxYaVS8`r+ z8)C?CobUX)pV!=jcg^sy7l;y;pS3**@83c)bG(0I&` z*XP>2W}jmD*6euQa#e5Q4_!wi^OXsiACqU|f(hx3|i`tii~piwxl zd4lZbAoalJHIERTgZy}?2OB|`X84NcHJ``Mh4Y$kOLqLc<__?s8P1}4&1aEQ#p##B zqavO89e5L+K^HV0S;Vs%|6R-<%xkU*4m~U7<~1j2TseK4=}#{3c_qX0$1q*X&eG(S+~)xp~cth1~yN%xmg=G|820sr*_vuer78{t|L({wrox{3R=}X*(c%I@U$!t!BY+m!H$U!50 zc^x{})0bDtshPe!_9OMJR9VBz&}v?WzT#zQ74B_b^FsKO=bg=Ko}+i~VqUYu)9hLL zUu0u8ETGwNPqRrO&HTLPP6afxSn+F3Xm)i2n)L)UD|?#V>*tZ~))n4q|AqAK4({(~ zd~EjOMf|4O+-xp+nPuBp9XUh{ltA$##D+G_TqXkPO$&!6@>{5hzAKcDsdnG^EI&uboAz#p5} zybc;SvKKEvpL+Jl zr+HL1r&-R;Yt93|;GHd?p1*i}?w_A7v=Cg6~ck%@$eoz8ZS$} ze#!Hbtp0^9mD?BtCGDABs@xyK(Y|}ad+;s@@4H z@I0K?JT!;r(hv?jS}&8d8$7Q|ZfK9@pTl|0eR6ot2;snUN_Y>R#U9UzJP+qJcg^8B zE`$S*=AXm%Jnr#)gXiJA=GHkpUk>5Gqxt6$kF!_w>tLRT^O_sv@az}DfoI?Fo_<~D z+p}l*934IzJyS03QpPruh-boi%>jFxY`;R7yH%b_^GH{c|N8$dcl%@XT6$%<+h0Kg za+-5}?sjfIcY6}CKtxV-ZVv8ZXy)MJ+%0EiW^=GQ4@Ph&<=}p$F^=ksbGLJ&T<-R8 z4|hxsZf|R6oV%SH<#M+Ndbq=LaPKhAb@j!$+qwBO_4Wd;lW}uP--I|0BX{d!L7%&Q zu7mSiv7`CiZ5bP(-0k|vh~;Ew`e$ne&yMxah6T@N_-C!bvm^bpcW6)D+-=@)UJ1^3FU&&L@D%@9c4f>!1h2J-#vi}I;oDos!<^GG}??XuL z72`Wvwy#n7HFc@{VYJiYqI`MFk?8y!x6e#DMD`)=2@jJvk@I3c&b`(VxU*XrHw*89 ziW!(o(f|DZnIGl)`3?HnLphZ#xxIYUW8XLBneBbj8E>MGJMUg&JKpJ=W-dpqeej&& zruz<^F(X{s`)W8fSHt_{o!`F}^488*&^~0zXp*ajzKoF<>D=DxoT#!E{!}@~#Gg_} z2kj2eIr;$O!gNoS&#%|PD|sm?rw?wof20ZS(#qflr`o+0JZjeFmx=q*HW%*$lO0`t zq31s|{}2ClK7nLNHoBW}T5O+fKUF%Xp?=@BW)^Bqv_hUb-j*kML)zh@R>UL+nc1BM+9wATK6K&7qKDYF4N4}>a4*F&F zeNSWWI&-bUyM2@Qf5gr=g1-~HT<^X5uQ_4WGwh;#x!fEOvTFOkr9-bWri<~K{mIhl z#R2%T@4uA5+c$F3_hGwk_qNV>n&_mraoW2Wwrkade*@bU;=L>>XZ1Jk@BSL^1@l>h zm8}-b{Xyf4G>+nm?R7M-zZXAC3&Ds_Tf%GoFPiUj=MU;#r=JVtYVx>YPDaAEN7$KE zH-huma`WE(E#zM~&sO%hX6H4b@APFU@Ah~5#<@QezIJ1;;=0><7vX=xF`oQRzj8~I z)%>Eaixp23r@f+YdvRYw=5_y~?uiSvHYvhRRQcWya|o}xG0T<%ZR~ycjo8)8p5Di6{e_oT>CT3VA+V47 zKb4$#{v4HiWKZFDzOMeFv)=2hQFsIWbLX%@PmOP^zHVsi*2Tc1naJkzv||ZzlJnKj zRQ7yx=5(t3k9}c|mVw?m|A9QjK9xQ~Pvu)ABkmdYvANC+)H~rF7UJ>x(i%R?;V!ZZ z!t*a=D`}6%liUx$QE;9q{msbE*x2`LeC_r#a>mUq$ffA^EHPsEtq;k1m-c+{Zg<%` zf9|Ez6`s9-4T_-Eo7k4nXRhLP#=)a2%2#N7r5IPwh==#TPwOYa@mp{x2C7Yt&whMK zy2RO!=TcsJqcRr|AN-JeS1)bV{u2kod->?U`8hn{E^B|E`h$*Jzh)?BOgr;^t7`5V z{6}aECVA%n;TAtmw{>uByf?|4(fMwfF}nxqJ#Q7IqvTfzMl0&hmkzzCp!FB z{s;Q#TgoOIYeNT}yDxd_2Ht4aNBt`M9QM$i|1&mw)?g=cIUO4l+bKIw*XuGqxj*z5 z=#b)N*@qw6cm#X9@%fGI?I*!+HgNe&e~x#7z01@!+Si5Mlg!MP3?Y|qSsaPXYh2TZ z{McD1e}-PpCh~55&j^FeT%-icbt{TxTB)~ zC7oR%8dGL(e1O(GFE)QOdZ0gBL-ciPh2yii!}vIiWfm}gvN&!L^~N^W;+iy;5dGID z*0os5e3)BzG^q18D6fA48I8;rsFB{0#qm4W7$SG?T0EsQoU!u>KdvDI3skwaK*^nQim;w9$QQE{sv~vZ?iG z(m(N3awqwmNgU$+*dmj`Bwwc(DWH9i&)3~fOy=r`zPIpSu~O(OKZPHO+hOBNAH!lN z-V_eEpCFb|(e535wB#g|!OQW@CWBqb(qiOB@-iLz%Ql8`bu_VbKksLe1CdNRS)xsL z=3GXn{>QQPigUBE^<;FbS!{ifnuUlbpl%;e(12DW}< z{GY|vdn?X1{OCjnauw!Bo&bMp>j%b!F4qE`|Ehc;^W0X}=-oID&)UEn@C7D+2JdWO zaQ0f}`QZ7O?t5u4M*0Q%*G*qU8_C&|_?%{ZENko+@bN{TgA2zEyv^|GA27gLM}6)8Vw=OgiTz;IN17g0R7<%pQVx3eXkiUEn{EFBP{LHz2JR& z4=KR^0gwNbpdC{S%4zR}`ad%$w?{!a?T=Eq87k*&;NWcVci?f}2JePlDzd>X;Lh9N z^^ix|;EjErEpLMh$Ab8v$18r^LtDk?EvYk(l`L13?qv&oQ9tsY7jg~?ekc6?*xd8D-x2?`BW=-G3w~D>*W|uc+wI$IagFjR`R|Wy zN1SNiA3GKs;9^nm8q7U&sf#)^mQt+h{?n!rz`0yJb=$XU4>x$q8vG3gPq}>ggD4-) zQQS@}y|UjQyGgldaMx(F%kkHI|edZTaE&c&`t&eV?0eV^0@@kjd!_1wNydkXEn7T+s*orDa@h98fA8$s+H$Lls8 zO8t)Fb;Utyr@kMrcxVK8V)?UJJ@pHO=lkovIv{&W3C3ig2c4`)YaW zWaLG#3lu*JAOG)^%!BtW>JS|g-vI4GA7Az{y1`u!Try`2{EHr5=c_+P8?&)SV>*6= zcC9QO-zFB#({Ti}Eu!Ow(p~!cJ9ut%d|UpPXQJcB;rjqOR)^Q6<5K7+*#F}9jUIPP z2cW}VD@n&2feY!li--R;GBI-{>3A13$hjU-sGUBGr=s;l=%h9jwKt11-3uL#+j_8dSLWq!LPQ_qZ^SQP zT;%x781c8*Jj>-QL@p(R$#-+GNt;Tpyx;vV?_D00v5)-3KV@4_TXkM_*Vt#YW3#IG zmKygx_<>k9v3r!2|JHd!@>#l{MSLVbq&(Z!!h>8bwm5l0v5Z{@4ud}u_%?43ZXg#R zx-G%CoT( z>yI_lSS%Q$loZ#@i7IY=C2dIXQ*R#sPT;rkV_qMka#T6ny*HmcFgGH)w#}d7=yNQ# zsay4VS)YWx$jf@DD|SYL=&|CQR7YOHhlp;Wj)ZNq_(ArhgE778FugVWc3ygh@1912 zlfIwn+h=7o28y7+#ZH~@>hI`CN}oWkg8YnlI22iMu_St@^|vNdf5*1Rde_!lgTE?! zrF;bc3;9YL^-UN%b9@K%qC}fSBNq?S?vs(H zllN};AbHoEOI2s9(ssp$Y<-iCj0 zZG_$j@jsN$+i9ceWgd^s4`k*8z^6Ly#!vY9fcf?20}e%QV*XnEo0$)opNyfrOiCVC z#nwr8q`R`$i(Be{1Aa(mTEb&##!ufzQcDTBYvs?3B?gszSk+X|KMc-gPpjhq) z=FY0vr)}wn^;dXJuilAuZJg7!_UxYB5PvkN`GIWwV>V|M#*g;@0pz&QcI9lq$;6_i zoDG-*?UQHk-Ug(y5!iser9FcVGjKhpk-(SZljI$@ND7n6xo2gG)9T%1nb&>FVe=) z2AB*Xd&C84ypnl7RQ@xTNy!H9e5@MeV~AgB*j~*w1@YiOUSdp=+JIF>UuOei8*6mT#?i$#U}J2! zwY4L#0n6zFdezVdIQ|VS=2O@P^*iNXaqKX_Mq6y^Z8Y{fSx+>Hm^hT(RXtClV859B zn0TSMSJzXB6~bKkY>oYLa~|#Rx{9nMw1yX=Nf?J-M|@ToudI{vF@ZmO&Ex@p_U#`) zZ{sC?CXfF-@LQZ^G;BoYd^~72wIn-%FGcQMj8|`6nbnb9M>KHVaK?Y4ZyIMgThbF% z4#EaAo*Jb&Y{jb@pCo-m*7JQlNHlcvYPsQove`p!EL_8<^}j}!Ps5@OAn4bIeM_pZ8!BN;%*2JpMU(_CNML-zNq#-*cgIY=ewHXVT8l_e@X> zp_n!2d+vv4=6mifh(EueI&;29`{P32b6Er5qw#*|dnQt!`JUBtzDMhG>-nCiJf0%o zvkCld=6miV#tMB;I4;V_bfB+$F>Y}-5q&-FtU#6v^z|C>r*iq;;?GT>xvRGz{?z&} zi$Cx5<7&&rKY)KW`VH_&4c4PPO5YmApHHJR&En6mh`#SK{=636T7P9DEdKl=@e_Jg z=zHqfyEpNpX8Sar%Epsv3@V%0Lab%uhs-k)JwFV}f8#_bJ(d zoS*uSvj>g+R4I}B!nyf0MhfJ96X<4gf41@v4gA#Kpts4r(Gb`M>t3@wSkL*nIG!yt^GexxgF(sJYCKHKZ9*RuL|vd##VHuwTU?3O?$?BJ(GVid`0=v8g$9qM`z!< zircIlv$gxZ^kr9kXV{mqu`hLF0PD*~?Aaju^36JZvG#^BRUS~t0c8fDc-*<<;a#D4d1{;A6HeOP@@Lvy4x z@I0+tL@Rj-{{MmhVfj;hdDe!@0npD4NiBmmjAO_al2Xu9Cye!tsaZ_bC5etpTHYszE)G>fssV=&8>HdDz$C#T)QCqZjZ=I^FKqHg_T; zXFy9^t8Dag_GR$7medx~Pd9H`kZUK~`55*F$xEDT ziG2&Q-9aD2T+62uc`1xx)Ak3pT%3OX;T&`1XEA=_xuM9> z8mZig_7j42+>~|x#pv$jbZBuNtWmDwEyl#o&OrBN(8zKXUnGB$k5#`woFV-mOsu*K z^q0;^2A$2vk1ZhnO`iRaW7Ut!exO^bZ;EWhAY;{s(5cW4?u={|#j2MuUb9&BE0QJZ z3S-rcBulwm#r?vC{@13*b7fi&WVX3K7pv~#^9UBJUP8=gw)%DQN!{2>$&_+i>v;K> zp6Bsw>1E z)2kiPOY~}-)3w}OSs1S<@1XtEmUqa%lZ`pu#vPP3{8m0=Ya05*Y|Pt@9aG&Aej%3O zmfqKtV^9o4U-a%E(N1OJIf!^4JNuI4gHm}ZUQ?a%x9Fw($dc9}qr%+nkK=rB%$I_E z@Nd!I^t;1x`@=OvqV1l@jkAyF;!ECNN*^a9UutKpH@0Rs%{A1p=jz)y;BAfNLChD# zeQOjiW^vTOq1aV^!1-?OA0kFrNA^y%gT7j8@h|VYjc*nk=lgR6F`)HlFaDc-`6pxA z!Sv_zz*v9&2tC67)T?uU`fYVCgC17re~^d4)VVwGR_7nkrLazu&pz$(J3T%}UGa$I zSmk(MxYq2SexBlnASajjOxxENe2ZK*0Nd-2) z_?7Z4lymKRbc(m*Expqu^Ta)RulU&M8~>$;PS5#2AH0q(&SqE)lFEhSdpWAKK+BZh zUQdt(SEucd_qyR=oNq?|dT9T`sIrCP0AhgqZ7&RU%f^JU&{`QC8AKhNx#D!(*Rj?h z>Uaa1G}FU5*f6)=8@iX}Q)y?o-!;@jlL_d#tWg|qhuoTPn}4%ee4w%6&U&TV0h z_ap7@T$hx#*SUGW#szHfHI<>PjNMC~G1RpIzJr{pwMBS8 zL4AsKq98`^%K2K#$7e23elhLt=v>Qv6TOdf0<|Ywcy4ulrH$+Iy^%6|7gwx^+pT-P z@7(`RaL?JzUEw|7M4U*S|IoNqeHF}$d}oGzHo9)heC~+(!04Xqnv92-qugPe)wt5O z8-{T`o$`#gwWnspt?~a6ebJ0Hc&Gg{Bkm~0__#XwV@fmnh38_ftoI*rWXXLWVT{VY zQgVCbN4(*$Pn6vEqqpN-M4z?KrjzUTT-&*B%e9N^D6VC$TXC&&)fs5wa}=#z8Ohk~ zBxi5PVbMp$Z<{9X*QiY9eRuHw2fTlO@_tBVdJg_%-n)G^;8Olwd$NUh6!t~yO8co_ z-}%rp$Fuge6P~5AGjJ}oimoPdhmADf{H4Ti6#Gq^PkD#>8_D0IIgQ^xKyD%npIt*< zyCNEw(Qo2*Hy)*}ZS233|G$SioShk(o>f)#WA&&r9!I~TG16(R^OUbRLVo46A@j8M zt#(D{xWAzj+7B#TpVB-(?`qh6#f&|?SB$E6uY3P%?Wv89^|U=0d-oEH#&&Mt8gKFD zMr?Jqzgc~DHj8q}`BnOlE$EEmJ+!VZJi8nFDtw1~KV&=(ZB%NX2J&6=@#@oS*tfVn z1Nkic8{vB!wfD!=uX6qPkZieNzSHN_jBe^4Cc7tvb!(69AC(v2a}P4-<+`_5vdcfP$}^Dxzu{TRUMQ+jTi!4%q9>`=s+ddhdW z*bCWwQL+Y(L&ygyAJs2=(zSL)`80i7>s0K$`uHOBP#>Zl%TxbC-Fmi$`U1>qTt({= zXZh)xKg(}6V{*~!ev2taXJZQWPqxO5)4=;CwqAb7-GhH=a4){xgl_5VyvelfCG0^s zj#@tR_Ib-&IlEHv8gk&;V{y;EsV<8r8|3K^joSj*I1bquJ|y~#(O%zsh~SrGB<6V> z+wJM)Bx>g4%tlc6ThPqTnRDN8UQ(*Op!l6@EGuojM=6)V{cZBup8qFcm-GH~B6#up zuePwKNBqKSQi%pKfSCen{ z?K_~LeeHq#-U*y&D4FjH=%MdD=(+5b+9to-RS+kXwKvw+b&ZFS-0Ir~D`cbmyK3?- zK4bn$?rYFe^16d>qw0vz2|fGjNh>be7`twD%Xb^Dvw;<_T)uXCsq!QIp5liLz465B^xWcxlM8RaV4wFflX-P7Cg zwSaGZfeeiEWp!4a+Vx$=e9j)xHm3_COO+3~++C^**Q0x)v)W(Unf_AdpYT!V$gAzI z@E_YSs;-Tpt%_I7?)Aszdq0AX{o$0n4}EvOzS;2-mU-CrWm;3|S@jCTH(K_@f=SvO)U+7D| z7~IR3Y)U*RU!w8#=OhO~PA)U{kA2A#$@&OCPocJ?Iucb*3i%MmhdO5~r+d%hH$wmN zZ4WCwJPf>+JGhDXG0{b2CJOvaXs2`@NYtd_hrs@NK2Ru@~)y9lZQ{T_1%dI6s_9h{Fhea(9A4=TR ziOg=BsVk+Y+O9EZnRq5UK1y>~8Tn|C3;dnpsW=xH_qiY!xVmeHt(_i39l2cKy@RM@ zDL$fEF7QR{x8(x=!v6?*g=6%hT%gla%9S|(O#e&da>Mv4BYTuf^*KxL*~D?($sg(4nOt8XF3~qMBjujB`rOluKIM$~rsgZU5^MEi`z8~==zA#< zzGO4~*Y`aH$JKpWrT&$0Yk6^{-P={>lAs zU$>%Mll$MbZe>gRUtPEIA^YFIE;4K7*Q@wk6X!9kSJ~K}m)EO&!2Va)tGv(t_pMjK z+4y?btE^-HqxCCV;}N%E{mN?gzia)9VmJ4^x_(9TukL@;mc+-V-!|^kddZBOo1V)D z<7;Fa@Pp&bH_GpvBmHN-Sn-JY%V!ja4kDJ)elW@0o#l5QOFgLgLox1>bX=tbpD^+E0%f$pQKny z=V=Z>XG7mOAWsYrhC;KfjZXE-=E@Y?Zs%$1#tY1QeFHwZcokcxZvkg<`1QDc{NGFt zIt%1rtpYhXWRP;u;@i6Odys>V7s$aI&@_>QAg125Kn}j|<)BA%PRK!xYa=<>a3$s7 zZ15T1g|S!Prnex}xwOf98 zea6W#d~tsGLdHT*A}@vhmNtqm^5JS@yKm!wdh76B@v-LCZ_t<&o8O4%k20=Ed0xB+ zz|M%&KF+yX=YS8=9PcX7NMpQi;yUa1@$f-wsx;5?81kfY-N4m&_88;DF~o}U4Znil z(y<}vRvgnsAosw7LWyj|*rVeee=)DO%Gr9Oa?bb8-^8disupLHU zJu~_iJN1sf3J3E!J+K9bmy>{%(6D^1Sxz~9HXVj33JU-Yb~&YK6ct5`3G=jtrN zbqhM5?PD>nQWIPz1FhtILVVVzBH7lsRdgh7DAcD%>8EUL)$dbyhg#Q*gJzwa^^aJS7$_zrk~qj zgRsp##Qs-eld8=X8RBJI|Bky=B1G2P-y2U-4sgZQVuz%|m(6`a7d*=6N~Y8)7yjuTM*%jgtl0M9BjDyBpdi z&)(&lUy~i-sf~@m&G}l?q5asl=J#r1rz(6@Oyu&i@WJsu9M z-j6HpFW8^97@MUrr}N3kVSdf8zHel6BAu#-HoXNe!!^GzqI=38(UuvET{HXBu2L>G zI3MQ%?rqKQh1Azg`y`KQVq7K~8CYz5Qty$*-cT%V!QeyCLcd z`JUCy2!8Aiezj4&HlC+CH?aJx#6W{B{|$0sDXv)l^M30Ncw~O^eq_z)rTGQfyn4RM z`3l;scw;lYtduHq-F81 zi7sym|5E!o&{yQoF}Nm(b3*^4HeNq7?ThI@G(}HRyAZ;tkLG9gAvTR>jo43eYV

tvPHc*Pg{0Ic%>_`Y_{|c^*Lp$r`}ofvL_G04=3Nm#)6OdHo*pp=`@dyJz0!x z*f%5V78}=>!}|gr7us^_wK2oV_@r1TEY3{*kovW|i(|3%z^IRESG(qidAAz0bp7}5 ztpAc%m8Yy9HyG};&4OR-_-Zj+`Yx2YL-4zy&JN%McwWGkhBBG-nOH~l?5CW+#v=S* zm;R_-YM;qe9%rc2mqDMDj{|j2c#M|6Y(mSMh`n7qJiS6%KJ3Q;MrWhteC*d?X!*>n zu${%U9LM;x87<#}ca3N{2H$G5{675|NXy1F+Bu1RO(wGPwzK@JcvV27)zGUVdE1y+ zXD~E+GZQanX(ZWrleRVL`*OtyUXIqLo%Q;j_hF9{yEq-EPZwkNda(EEhjd%*Zl&JP zk0thJCBC~gRk;P_A+RIoA>&rxe00>sv($Mxyo=-3?02`$`<`G~mzY_D9-LzsjOVfE zbCYrSSRbn`WPV&arWlM^S@F~t>1ViR8(Jmbq`Cq=O7{D)BlbFUQl5ZfFwA ze_QXhUbh`Q_E7Pka*`2OJ?jLAeUs_}FCXz9%2MUy%FPfPolAMQ4~2G(A@=?u_x4Sy zL#R)@(0j#UKM3k8`X<$j$dKBZ?^{(i5&ZqUceLmKpZTwTXid>`Zaq<3?;Wy@v`Ooq z<2IBkzf+yylzxVNKO$&LRq-c0Iv;!BXxHSMRIB>F??*ju4=wWtYRARC{(SI-!S`{Bkn z*qa6L$jJvW&;j^(VxYP8a#vS7`))QfSjQCTrt*@Rx)=kyy08h7nc04z>Y0xE=_4%eFiJ!}Sx`mT>>`52f$9GkmTRUD1_Oia+@1xEO>VEnRwVe5r#$|*Tr6sGoAONl_RqjJimfz0(%9%O=o9lX#uM@=nuu>w$YNqm0g`F=ss)H=6CP*pjw2lJN-v%}h4U z#+0C0e``E;noxMoepV+$;};kUIQ=T5aWNfjjm#`!KcnHtX?s2TN%r4|JQ#m6?WVn^ zSD7;lMn>ZnlSkug*&NL?IOUbLr=38X(8fy;F9W$d8o;$_g*bn%1OpS&(!hR#VBpM$5Si`xLd*Z<@0 z&ExDSs{a4JGYJUm>nAh~}uC6+D zmd`nLYH6>h%VHp= zQjdmJ7XL^-!(;j;8k>Ph!u%EBlj+eqfgarkCg%0%H1tDybVQ&>|Db>O;@ef9;#cX> zEcpCxFnt5%6#BuZjr-A~MFYs$KI*W}Y%|4C=|B;dS}b-R<51zbl_{DhdX+da)%SN6 zS5)QPY}e85%svaMUP?c%f}P&rrY+D6~vRLW^^QI6a6bX zhvEqQfb&VzrM_-2dr{jB&!Na-=f>xy*qPx=;3xA~wh_7~3>rcIjHlsR#)fevzD)B& zhC%vXhhflMXfX^LjDI&@b|%+(4Eh0BBMcgkZksP_L(WfJ7LNm@6Z4Ua>8061%D1dM?)q%XQ2j$ZBGED!J6k1?&*9jTu6h+mZRQ_jeZ z^TDGsey2Jva&z4~(WYB#MA>bjp_}Jb#@W|$U;nq^e7#BW;Ux~{X>&R*7a#-I$Xn74A4BzdKmPzPWNXtOlIyaYd=&!Bq z8}!@aD%ppypd!5lyIO^z!Z7~3zl!!{V1J6uTU+Osqe)Ns9O+`Sq2fOLDA`k+teKcv znwVKL5!%ZEQLJa^3iPlR0l$bpd>VXm*GuDllg|osxjq&>ukJ%R%6tL7I2)Q%>e6}H z_wZjj{64Tq>niTvM`LQf%{{oX_!u7}VL$F1i|1KqVc$l+TVq(>U5EO#zM`A=9A40# zY+x6)$@R~`NzpXI%fsxx`+%5dZT1as;svc)7miws6>@B01T!%CwJfSkThf_w%uqs5RTqO5D$_w&eSt zip_av2l_+X8;*Xmb^1exMbnVc)$Lag?rQr;=e_9j25k?u(ZA78sjQ>DcSLe_$$2fD zv4UQo%YQu|B|G2@mrm`W;r|`*d-Q#*;nBVv{$BwPL|-TPuvUrvHlHp{uW&tj7H7Lq zhPFqHeu($A_lEzjJZHpo^6cN()M(Crf!1lGpQJqh|G~YvH7QZhV7XSVa zzu)Bd63*w*^SAi@8ox`N#iHMD^800#_rG7`_Y3@(^@1^|i_P<}?_n-LP?td@k z_p|&~UCM7%Z$js8D)&@xjQ{IQ9K#OV8|-v|@{W(4BNIMu4gb1%w{P_~I?%PVUgazu z_xr+|-0x5FTkZTz;|X|{ueVcn#dDp9B;M3K_qgQ!mz(2@*i)r<&KB*rK=Uo=&H!kB z3t4eKOHZGtvG3{ld>89$8M(UWtK&ZO{VMldG}GtOv{6T@?e-`d7FE935nZ?N9BgWs z_&CP%*YT@7D&g1fXj`=B?=Kcj;$t0}`e-xeb1_XB`BXMe+PhEf-3Wj10k^};Zm#=Q zE!;@5I*dAbuh!P8n>b9_YNtxa8Sgw$+OzFM%uI7JL_{_^Mr|9JKvk||9JR+Tt?=b zCeMxv?^iZU?jIiB@7+APe^7Wo+A_I6HN0OQo!s9yy}zKn(w5wx5Z>=?Pwwv;-jBv4 z_s50z%UdV+w+Zi8wn^@f4)6DFo7{hIct6@cxj#I--?l??f4%U2=h)=_knnzWTykF+ z>}9Z^y?4jt{(n;0ENJiBDY-8UPRh59PwoqAllz^!B==tm?^kzC?h9*^@_oA{_x~8) zZ+l;I|LO34=N`%Z$HV*8J(K&3!ux#_lKa01@3&1%?k^1Qm-kNY_k{N=`y}`83h(#6 zKe_+i@P6OE$^F~H`_ZK2es_AGIDzxAwkHRO&$-Fpmyb7Gm_l569rwYXtE0}fx~jWJ z>@^~GnV>P_{=^4hBx3=^W|5!wG<*(Yy59Fh)6~YrR#%0(7-Lja*Cf?-fUE0!P1IFg zH?He{d|jLMuGMuj^(<-KjQP3E=ZmJBo2h4=xSkVyJ@1W%bbZ6s^Iq!N)YbEqX6oV0 zIrZ%jU(d$nAzi<7^=wQ%o49&D*GxV5OVu;c*E6y@r0XeH&q(SSMP4t7lp>^?>uLXC(D_yWuQA#%o?*v`<*$?aNlhXOe%(|G&ilzsUc; z(Eq={|DVBs8*jaJ%2Z;_{fIR`IB(UA_{^PTPe3|$c`V6E+S&->#XwsOO#Q2k@`e?z1*u@h()hwsvR1yCQl2$=4|w zo}{k+%0EH*;htvK|IXH=|KBX=%daSJypPKdO3HWn@)~bjyW7K)JU@O&9d^#K#scDj z%KZJ)t8ZM$I>WUX{}bc(R3{Ouj)SFgZM0-D~F?;*be7FvwxoK zceL`zVj#SJ{`yMCWayP0blX`i<7;y+f1u4c=VIq(`Z=N?=Mv_fXUcBTEt_|=JSlx? zn6En;Oo-nr&dE(dPa8eQ#$8Sq{c?FZ{f^qA$7fAPmx!hO_&3Slu9|UAoU4^DBlptB z8IE%})3lp@X`ZqQPZHmDOho=nc_DIN?2*v_OFvb6wyE+MdKQrvYp1+BkMOdg^X3xw z+H0l7*FA?^ z6m37n^{wc`+}$~A^>EhYA4$FRqe?&i(mH7VGWaFER6V~zcGb()f_|4X?)YEkIjZ!# zN}YdqeM|duuJ6w?iV^7N+zIf8obznjm_r-R*5S!jvQO1P{bvbh=+h3|i(bFr3E6(T z&Wz@n&b840w14c?CzsAz!=ARG;2F=vbN3v2#QX5MbjoDieGFxJjy*mC7QRtNmo`uI ze^H!&z9cxiih9|PJg;v^X`b|B$uPZNS=_h2)}xo$&nUZXArIaOA9h@N9HBToO{YCXJjiq$jSCr?80)bhEsh*D2Hu2nsgn%Tx#J^CkY>Hcu?`gilcI-I#4Y}U;;uV3m~ zj$PP(*zsJ)bI&$UGh%*c%cQ4NpT+0tdPV<@EIW4}?3%sM&C4!G@QZPrlbd)@iMqbe z+;BK9M2^!Z$G-XMmcI8WF1}jl*y(@d&!C&VU!~(7$;H>PmgbGT%^!mf=j&^uzJ<(v zEo59U8{Bd0$H9lI;mci5>_6{s@_E?#C&2-9YogZDA0%v87|&-ck?*7}cLt~0Y>T@7 z$lMTe8LP75&un~`{Ni2AyBe=M;dLAL(FcYTj6wD(O|0%Sd14f-{dKfWfVOeaHWk_? z;CG?z6P`Ag7w~PqLZ7YfHjnRaZ6Gvk1HBGcc=sxJpgLk*9zOpOVHUqPpp59L(w^fT z`XpQq+g(T*@#Hq~#J8*SP7@mo_9oP$eQ!_u4M80{_&T1(HmwhniD9jccK>{{ppSL* zU_n$}%f)Cd2UR+DH$6Zm8~8eY7POK0L7orw&)sg-5c% zH)Jz8UdexmSH}nae9gb}5;1Pl&tiS1A46O}rW{=9C?)+Ub^TSoCCHUDu!#jx+fY}> z#nkaSZN~g8bxmdt#BAak;)8Szf2nJ>*2zq2P7i;DE~$?$2QGcsE5_RTvWe-LeMqI_ zQBTis-jSWm^!%1C{#i8f-}v9|b@X(~I5}Xi>Pt&+YOgarEvoAT%E{g|uRGiJM^DP0 zxfj9rwUCV)2k_1j{+-#hbrA9veyy4je@ia!uN!JAY!KiB+ zVgq95_{;$K){fqW^Oft-U-K_vUmeYwmmC!DaUVT2n%*wF^L_aYF_ZN{eVdAHwMyT4 zejj{H=aRGQwW`pPU_LZE%!gkNUwO9_;TQN%yYBlN7qGrGp@sAE+BA2^|Dn(#nyAnH zKLgwot-_u%bZCs#tFrvRM00YRGj?Vi2FQ-6sy)f@63!!&kDxs}3uV{b^ZU=rKHnkt z{I}>%_&zaTTz?z>g8C0qtQ^$;UVHA_e^vG!Jl`<)e2Mfwc&>BwL_a)VrTV*MOTlv& zPw;y@eDkq|<9E^8OZfvE10`#@*#nZ1QJ`o06WcpIqyE#;8|hd#Iwrka>gU)S>DoE2 zz1qFZqTky`{EaANulUjK$F|fm|6|1LMKPe(G{4z8lzo-OwA((v^We3x;RjW#xL*sZajnUF|7R}JF z{F-P!!(&8$^pDEXAKG^qd!GU3hJGcr-Rw9(K2Pzq$M^*;ZCfk;T2Sg6>i$RNbz+~h zeWl|_72+P*Y#;xJNssj$UV&L1zd@h0ZqeN<$M@Dj-h=I4XVkS^IZus+ zcrT?fjdN?{Pt5D^Mfrz3?VW9fv_HR9rDJQ+?%&DN-V4s`C7O6X9NLp|*z=acVe(YU zhr~KNe7^8J=kFJ^}8L-Fc#pdN}+&C&Y16#7RzAKL$@hysVs4HGG6?HvJ zyfb?#>zs$kqIr{P4Qa;?~Mi=h4)u7~B1(?mMYlHZ#Kc`w{cM4ULDv!`R>d!i}B7 z^}1%0+S?+U62E17rEwzd3tuLYE3-K(o5QN0E8@$~6;mh=0S(uOeCdt(GAqZI@_Vrd zVJ&^w#`EQpe)w_|d=XzIljAi;l?-Kf*jvP0mHy9?{ox}k%sCwmU*r>K@vN$TAn%Wv zyc^8LS)aZd^r;wn&MIVaUi(?)9FZK1Nd?m zcBN;|kMnyUtzig$Yfs8n@{Qbcu>@sKmao-s#TfiQ4H~sRLDwzv$M8Y*DIfA5d_?D3 z(Td&tb35~`Bpalzr_uFpa>hC%waWORbDe0#c9fO84>aCTug)KI`u8B;_k~Z+U()w@ zZ}^D$YeR$10+pRaz3V0C>u*L`wPUu@xoxFmZ_&@as^{Og^q+cHs_*P0E`8D2%+=8m zS(_OFABCIVU&Q+WbG8!^D{Tw?W;>TrcW66HX+!US(D|Sd^Pi?pD?2k+_IJLl=oNn+ z4a!c>l|4KBOIt&(M`PKvE}LnTl^fQ;!)}43dNVg zc&;y1EX-K5dn!7B9xR79W*6Ox%_(!X$J76%tn2@n_#6V&*TFq2-w76IJY_iu_~+KW z>8z@5=@@Mst+5O5>)p$VgN5ha{H`juTW#%9esw81MCDkO@05N#j;tKL)N!=hYvUc; z<32u%*JWsUl{UiMz41`-R2nDM%^#(F%jJ(od%qh6ITgh%y%FEmY1>GBcwPA;{SWg; z+hDtHtX$#y6|zB}Kl;^~G4I0q+NiH6e-zQDFn=_ZILyi76uuAs3F>T+KROH@rEE?9 zXiVVOJ9DxdNdD;e$WgR~`J?0Mv*|@!E`M}AIOk}eQ|fvf9)x4_y8{k)<(tGv2)Ny+4D&>e9k|&{Lz7)XR?Qp=t71!l>ekJuQAU_ zvKMT6_S7`Tw~Y72t1y3bqJQVuruvcQkESR6D0LkNUWo^x4j0*k<3*4^>I~{|=NWmM zXpuflD#bZN=2kb7Pi``QG{MVqjL9;{AJKlsMq}Be`J?xHdMtnR3i@k0dQ?tFyA(g6 zyH8Wb$)O;B^a}5Y-^w5TALS%BpFfJv{^%!v)a&2b2z%(EUt#`eP2t(gw4T}QvGZNv z*{zPL!a`zHM;b#2?EO*mczc(!9u#|83X z9?yOa&qF+Wj{2L$vsN$5x_DM2n-tH6dU_1c?nQqM&)Rc3dV=f?ot1y8hiCWlj^WwY zDK}6&yTZTo9BqAxeua3pYR150vHN`78DdSMvuz{mVwdJ?4ZF0T(&Tiiw~esAdf4?% z&kMt@>BLthbX8ctC0N}EyZ*{}wFtX*Q1olW7h$S4d6?&v6$j5)e&OXp1zJ{r=@)ev1@1leEXn}MS76OuCnO? z*tLbPV^PpXh+QN7bHgspCmFuq3C(%zTHBWwcDeXAhg}zWUWM4D{eR-sKfyYaRsDXX z*!7;IAEmCh{Csez!*%Njuxpj@)y2N0t`EsydF;9cEDy2k``BEw*!3j3Bw5zMuE$zp z8|B+Ge!NsovFrDq9>cCLdwxq7my4!AM|bu*dM;(^Vb_;EEwaxuDQDPqF?$f*S;zgv zVkh`_zE4|6BkvHq1`>-^!aa|G>8&ElEjtvsH!_&OGRJmpV_XaAO8gPyvW6(y3XG{EZ!?PcFp2;r00L>ZRP~UHTdEuFhZ*#m- zzFYka@$4c0&I3*LBgM04>BQA3$g14`fPRQvFkV3uwmC;IlVa7>&1^Lld;bN?D_=q&12Ux z`4`GIi(QJh^4Rr+uj8Uq(>{dQwa7pJ1@XQ0F}2r>9%QiVkCcyXKI;0huj7oMjS#!; z^v?~uF7`Z=4IB=?GrR%2bl$1@E$njfZ4SG(^t=kO>+AlVYn$pvie0xS{U~)^$C#i# zcI9{xVAnT-I$oB)@;32)usp=BL#5kIV%Kyp%lffcj-CL!luwXs47;`sbo8sDDZs8p zV7`8DXgXSu7u(j;BJ3JMxq)KWzp)>^b13%kCUw1A>>ArhPSvq>v1=ET6WDc8Ag9G% zZ=1ocVV)O;U31Vy!>&H!sz%uLk$5huCa1qMIdQYScY|Hakge=L#IDonv(=r)t}1dd z>{>gg7a#O`q4}PSeHLKX-snUgyY7*H831Ru3}R^*zcPc3tl4m>9GXV%H`9xnb9lo@dhMHgqAw8)*JtUtZYtNw6&+qrS%b z;#G)UXZm+eZK@wBcFj!sQR@0Q`PI7EmFGo(U1tS#+$VqKv1>hSBE+sOu(@WjYl@d; zUF^!!6JVF}2_bg9h5nk3c8aC|yKV;a_4|3sI5`wx*IT?Jeha&PN4bGw*CPMU7TCkj z=~ouJg0se$!_CB7Hh;@Js@6E{gzZ-^n>w$0*&w(7qMLd5h_wk-=1bpX?o@N=HMB8L zIt3dFzY#qrp~3bz)zI)*ehyjl!KE_m6`*6-!033)jkOEtxF|=5jj53p^S`tQ-EJ)$ zdmS`~ZgaR+E}Yjbx|giK85Sk;4CDX^V3&HHiR&c*!#xC}k+yTBhs zp5lXaO8g!s9pd+gbzgNsHyG^Jf~a1tx6!>_PYy7~*RSU@{PWKelhX%DeWc<4j-2CP5qJppBFLbB969!5*ugmoYqKV{c-1m}(VwHTEKx^_*# z!?v8Rtqt1AZG^h^EC1Yd?OF85Y-JicWIFp(UrxGqCs+{b+7JD+1x?YJ>e@r{ zmtF@n=1Fv|rf;dP{lvdxy7qlfulRmOj{h5Y{wp77?KIN019Q4IP)r)yKu>mv$FXiL z;C%5l(38LT-#Z$eIX(H9=kL0K3?~PAaB+;MUh)0l z9RDx%zaRBD)*mMQjMy>6q=D+m7lQuh@kIOS@m(&4I9KscpeM(BI%gZ5IX!t&YlXOX zaUjD`PagHpe~Hc1;0b5j)WehKC~r9M6JN(Cf;K`uxywH{J-L?t7@q8Cbfa(cd^zdK zx9DexC$s&to13CD)swr8&b*$~^exqs+x$DGC#MJeACcp~TW^Uxl<#f^Pu}+~_2lnC z|MPnC60w8%ot7N#Eym8IH=7upIXyYp^Y>pT26_nC z{vFekF9-aejV*+BdXDG+y#a1D@;mdX>)qhVnL+>ada^rluIb5s%2xz!c?SDVFzf z&h@^W^yDkpU8pBl`De45qBGT#s?nL(lbXJzdUCCQ$Mob0Pp|mCOlx`r{=bO+>i5pZ zf7)rJCvERiPx^xX=k?@Ou)_3YtoEV=bpFiKImqbD>B*j+zso)r=s|!dJNxH~2Ws@B zZQr_jQa1jBCtLVB76omDdNR^KH$Ca`{gIxWX7)}!!+bgENtDx*LH^m>;7tE?rg}2U z=*;U$P2W;IarrlN*Yrf=L*xG=tOE`Cul;ZOJ=gjX>q)$~vtZ1m@kAC+((#ej#eV}I z=hx21Yv{Fhp8M<-abtH|*BsH+7#w57PaSwdL)M3t`XY2~Qe* zuPWtJ^^~aVFT|q4)kZu>_GS9m;@sAPB(F3fiucoLt#ofo=4_WE6=N`8qa5BV$8*h|1-Fmj!Kmva%=1;r zi|Lv63G|XX*I9to-OHRq!S^G%uXDYlS7tuZ&As>O+!T2FHu@u)?TqfzPT(9U%DX%v zeAC{S>*3)DT~o(a*sD8pfjaD+mo%2rJIcG2$%W~f_b%f-mnY&qx3`D?8`=AmwfBE- z@AEugel5y}^gV+P%%PlUDS4W-M_grePqHq%Zz%0|t?ubOk%wjDJX60*I$vGav|o}f z!YkuRyPYNDZL{L{qjgV+>uvMr!St~%UFX$(U$G$d*m!z2^@cpA{N=(K?RDB59B_NN zDf2j3DB47W2zAMKpdBdIwQn``NyQT?tp8#7<^l%W;l0{@!?zU7lBi2f4lB z*s7cJB)1_uehUBNun_(IldzS0Um}n0_RVlDy`Cufpet2u|8>rU%j$GsZ*qGaG9iEa z`*y-Fh&|%+hFfX9lGpWMOUkzbEQHQ0Z0-*F^)1Z48{@N|(Y@)Sduzq2!?{1DFhuKd%-Ket{9^H8h8%z6*wUHUBUoUDU5x#SCnv!Z^}+Uv#X4jBFXr(+Hr}Yi z<3;F(_LcQ92NM13!Y87rbh)tZ*aCisa|Y%Q-FM8efo`28M8BO6q2KFS`~=3wdo1I8 z|LtDS;&`Z-@7vPfl&@KyU+Z+a_-v}jkX7i%L+q1ID!$bJ??Hq6P5k*Ma5Ss;+4g>d z{Wp64{RUWOageYxTC1|S0(}lwkt4BPkBjg4UV}S-YDBc+@5sGTTM-!R-uL^zQyvw| zev^Le>(Cc`Ez-`iB?fa|IsciedfcKmavG^v33ub@m+-LF)8)DDQH*CfUpV9vzmBqa=j=iDy z=Lgj1u#e~8#x5Q9;SZ-0=eTpu@Q+ObEz7n1*5c*lgkC#MUKcJre{Osn?USv0l zLDKl7z*fQE$>_a{Rl!adecjLVe%Jgf3j~{4bZRDu4EETgY z)-|z=^N)XGPd;&sbfPLh#kHPkKeDdJAqN+iI-fCo{%B~7zvsdk2l%K8Hl=c_!2edw ze{#J)vQS(4o}Y_*#&ABA`1z;!owE0b3y5DeKIvwyfc87zjLdA@-$%W(TiKhrl(7TP zx6u217yP|ygR&oo4o4$8^h@x4HuOpFB>R7YgZ1he=Ic@1ot*JTeQIN+Vp`f@&HDm3 z4?m%HJ-{vahuCi8css;}s56v%)}A8t>QL$zjj}_Nx!G~19?k(w^(R};#%^pJ+oQch zoh!5kAbls+x%@Xr9}8lWU%)ft*O}O3ibob-r0qI9h2~z~w{MQho@4#Y(jt7+*ej(a ztw;UV_fm@bTvXm--*tj`J`<&;_!+0 zt*$LXcUp%6KjV@rT?(7Kt&z~LfCFv=&Y-Vv4`h6g|1o7OJKlcDPUX@<8*_ou+)duLbqn$Q& zezRgn_nWp~4}U|0^ilTmJ!sT<5jrpY1$dnuo22r~kCpxa?wTy?#fNirh70|ej?c2# z_c?4gKEsl65#uId#@XCUbuf+-O~11~H>JnLbYShr7@MYaq|ZgKjdwOgW}&|JH{N+w zz6)L(z9;4J9cB;bDy~EaH9lT}FK{?XUq#p1K3AK*pVDP?Klzc6?qE$s5MR47w#J_D zNMp=NoOe^vI33&*Uh4kEK35PC-#U!vnY-q_q5EU# zXhPMmC+TCGaFq6v zZ+voJ@l6~Twa#CLt~8=kb686=7@pM+^F624@GdQ<_OI!uof~SN7yF8gomd=%4g46K zch9CB5U)$+o{K;EKfH|3AXbnczFV;g*A^GfR6dU1>&VYhwhG=jY=H0W)EVcjIQxuu zALg0P$aK#stDOHG^jq)Uc}y&m?eVEbqx?`5#xBJB4^ck!KZYN|6~zELV=9aV?0I-E zp9A_2wj>@S?|6=_?p#}{W6r0mk6>AeZ)Qr*qWGJekLx=8=BD)MB|0lk`NQPC|Gp*jkUbGTqrdNLR{hwC?|F31gDimJXlSKha) zqO+FxmLyPKo#=*tU^N#e$*>jEl z20n=R$#f#bcSj#*+l+DZ4)7LQb-t7C59iuFFGpRsVE^Jl{u`s;1P9bFt6OnWUf*_1 z^v!%hRtLJbi@#Ac96GAkMqOX@ZPBl>!kFW+k6PZvIg)CeV@Ss(jB`uEEOe3eZ9VQ= zmC_rXxzjl~E35MQv8;wyI!;7Bj8l6g)2MT(n@6jt9`Y_42fKKWzCAA9=VI_{`WlsnK}XMa(rc?HDrM{0q?E1a5B2LA8P-$D)x&o~GxQu^s!NaZ5K&awXq4(x zpEp2X;LUxCg`rz^ayxiK{)lgD#@|QO_ZkajQHf=7@~BG3O5#7ge-eAt+Zlh&9$y<@ z+<2^PCC)d6wjy7_Z#QR^xeo@~eH&xgxLrDa4F1$GcO^O<{rwA1bn@Mtti8te!tTTt`pr|KCGeNG3tz862?Y#tqS^E<)0rG5UJLD$Njd5*33Se@wSS^4jP zMjq$*E<}&YI~gi}rEA_TdZ!baD*m+j63Mxbd)AKATl#T6w7Yp%t}mjm_I)&Tro&-$ z(B32HjOtADw6*bFH@dIbN9TLEwxVcN95-0}QTvX{pGvHnEtic8?iFvTn|}1;yRz>s zre3`_6Wn!n2~K|AU)#4Y8sm9-*364w_zTPx{e}7J=b1NO(mIG7+u-@%0bgz-#`w18 zfi&iG<9z4@Z@TgA?ZidZA$<1*`xEwV;eHR9H*#Z5K9V}IS++Rr! zBE0`)fB&D{m(GGMD;i--1s=wBpUr0*UPuORTm+9l4qa+Xe(HQ|J&PCVx1*JReG&P- z;N&|9`3~ltwdQ{Z+1`ea(KkzG%a2^=34$Gf|kQ`9ayR#td1EEym(|3hFo%{H5L%s)u;Jse0xV)H5G>*Q=-Le$uBWu_K#{ z5lu(n#|ry~EHieB?ON(nysOo-d5 zC|1p_Q@c{_DQ3mq&jjzP!jx9xS8((yVJbMJXPVR7TWhGmkeYRB`o0(VVe8bc_WZCl zV_!S6cAc8`-`P5~7r}+4VB&1*uy;;WTVS1Xq{{u+I<;ImQz%UV@GreV;^wm|HpXSz$c_K;i^AF6z$<6l!}-1o*XoPbb=ld;+1M(pE8Xad z^!Q5rLX6e5^QDUKL%D=Gtc@9?*VBWroZ6V?#|y%-WBs+(9O;?zaeJY&Aq|N?EwaJD zXQ&Sjd#J~q^REBC-qEaQa`n^Zj>0On1%C`zs>*LvN5%P#zM<~FeXpct7~i|rIMTnv zdlRXDdWE={`rY??-8XeFVgG^ekJhdU``)8?nfs%|`_MF$`wJyEo~=}#i@M!AT6abK zqd(IheLq69f+@*29d9ExO?_TgAEXPpzL96!RDF6(-(t~!>)Qh(`Tm!%(Z6GT>s8SG(@@8Rl;l|zRW!`}*DI@gL;Xg)Xp?3@qB-#u{OB+`4G@Q1z*wzB;G1Y>dQ zZ&p8>=u@PeCKl!`EP{$gc!$-?UqJe`aY4)+IR9SZ|L&k7b^En zoMLnO6?pJV>`3=E$Iod_|9oNq(V=grIC^s5PPsoYa8N zoq^7@V0TsZlmEgAtt)?oekdnuy39z|Ara;-i&ALR8qwDF`r_-o2d1f!>$uS$J()=xQ{geSx3gXM-o z|6*~^#TD#bg6BUN3b!-lp|yh$d5X7;?m2%q zC+hlx&ta(CuK4Lh9}8lV{B9L&UnT_&Q6f&2zaUS2=mwEU6@BhSC_JO&Bi{5^4!rW-R4;t z*|_t9$dd?1O?dr$d&?UaT4_teeF;_MxSF2ahI#Duou>(4`6|!`6a&Ln7|0jVN zSzYSCp1ZLxt7ipts11$B23pT~yleF=@-f4{@1UNU=#kYUd=))9FD6Tm_rJmV2HCg! zo)Z7Hc252(B0g}rJn4zzTI~Ki^fw$&E|VPf4$n)%cvny4*81prN6AKd$Gcxo-i^Oi zbpc~;(G|-s&QpiDk?QBKYpis{5hHtB(C8TYs<{BH&c6mVC>&d$@ZTEU#y{2gUqKgI6eT$FwV;`pmw0$;t zH>T}N#H?#l-*!buLcSf;k4*OsXxl1zH>T~W53O0+J_H7bwC$~2T?3hp3TT^@yc^T@ znL?SSST0OFpZY^gG~CfW!yUV)oP*uV$0Kgu4S(YBQofM77ov}fUv;f9ukr=|WDc*W zy(oJ7ntFAI>;9f+yj2t9C_ZKUp}e`~2dlY#W*dT6`FO3jlWn1*wB8!Nt~uX|9|-rJ397r#A-evBz~O~Dq4lQnir z=J0l)jPvE=6lv3OWAU}r}@68;ajjPJ=;j8FU%+vaFc;e^2v@e<* z&FIQR{~H$IhDBz zOfdRF8xw!c{=@rrFY%Mg`FeY|`PCl~pN#N2BAYr4`cvJu7$6-l{6je@^thWjkZ`{g z@?4PQFKo zCu-kR_a&1jioTt|Y>oA`*UIKD_Cjubl+k%gqTOieo&*lS8?YtGi!Fg3`RRWkqoQxL zJG>>Pd)mhv#_v!THiq1rcGX^5ra)(CXLEEWE|-l@qiAJ-+r{`O8%k_^X}sP=ISue= zdY^m+JXU<*?2G5y1hU*sn8CeID_#{QDpoa2tsB}c5!8Z!EPTe_B{ujxAEoniEobQpJp2VHdz0B zBC$m(mn_ZM{T|t8pLOH!+W8OZ#c9}ZDDR|SZKHH?Espwnr+K`s{C=q4YRKO5mu`4#X+d5@PES6hFd!d8}X zKYupD)|q}6??JKol#hdFp-vfA8&2ofGknlvTq9qyF1TbEQNdR`Ok->mH;k@6zd7xMu} z)4r~!@jol0w5<7pF<@}EOaX841z*Qr;&SF&)4J5whoPw*8Lg%-DQ(4ADSecjB6xM9 zXe34zU(3{C`ucTr-SqWh@I4$KoyBB-M!hrzb8IWKH!t^vk~@0_=+N0 zU98|SL$p4Een{STL!*;%sj~PJ&|v<)6B(O)#}nV><@*6IU)w*J`fGNLlkH{ukcK#C&BcFlj@b$h=>fb2vDx<^f$J{mOUtWhFD2QbS`#!GUqz=ocpFvsa zj?Vwf>R_r{rl*H_-;%Cjrjfya!GK+&hUuwWRcc` zoHjsN>=wx4H~6BAEZE2YYxrZjmY2mxkzHQjW@9^wEAK$R#f$#%IpJ=b{8VK2-`TeQ=jP;b$Sd0xe z>?3`7!^es?_jyo`R*T>KdXsdF1}@t?*6ZMq;tgTQXVJN29&1k2bpbf+=TNV9bEsf< zJ7cPFeccuGCCw3~n4|h!PG7kg&v)_F?n^eB=iQn%V*VD#sAX&>&xd!Ek51+e;<$P? zbW0~ZjWv1vb9inx^;+_7oFCjLGX^R4tNasXr2eX{HCHzcNPe$Da2-|1~V@hRiEY@~1L z`FB>8<3MJYf{)_2i^ZiUy{oU8#GKYF#nk#uyr7s`@8n|!Z*Ny8IT7Mtv$uQEo$6&R z^VG&FbiaC8>pXW31NJtV@ddfS*w(hIwY49(aZhbLn;+v0$@pjQJCy$k#&C)i=g~i> zTlk7^a~E~fw4d|H|ZdZ+UJy!spLr}(V?d@Ab9H&z4;bAb%^G({}6T$5R^#^5d5o_M)R-!zcBZ zA3t0%JY}z>tkY+%zan3zap!|^e*Ap$Ai`SWmLxxZ5oH`k=knvH(|&m0^5Z9QKgf^A zb~?V+PJbo4B6hMpbcxKs7}`(!j0}YfZVu>@sOu{QbSZ7Z%YT3eT925<1L<=|J7px}t;AF4o}P;>ifImX6M1ChV>-E% z^AzNJ_+KSQr2p--aV$8iwJ&ZD0CO<$**dNB=SUy<{bl&>`io4y0-idZ;@!C1@cC}P z80Dk`pTc)p-exjmVPf^yTYATLb!uH^+XwXj^{8{0+t2v?8I_K&Xsp1yA34ak!S$8G zOX{D*7(iq9M2~Vhs_~D;{*E@@yO8%JU-z5$$NTrsX526$k%`UqH0Jr;(3OcT{J9!g zJ!y{T`^r~BPssC55Wg5IUo?i)m<#~RE;z zY$1O&5dI#_dm(?9qf2`KUTpTn9DfZL=u1OCX5~U%wtIRv^m%q)Wx>Ky+Yy6VnNH>4ffO4O%)H9Cv7 zEw{%<=aVT<*of|)4Bf$;ppW%3JhL&yC9yA>B3y%y;?0TFQ^ikpw=&PVl<(2u2h9#U zr5pa8J$T1#xr29R!w1>3?BoOZE7j%3$7O&12e}iC5$t|kwq^ci*sFACvYjbVFm}0^ z`eRJ9@k>s3&k8)OiMW zm6^AJR>h8sh2vbu`LHaFp`DiLqpiX1Mi-S+*Vt4%dziXKZ`@DQ`&#`A>v$h_71r@P zZ#M&}<1Kt&SjS~P?iolOZ{tJ4I-UZH>-8m^pOVaWw?4pM>BeQm)mga}@4FNqk}>jx zI2P_K*mrq|;(&3;6*(>2k89Iyu;B=e`H9o?O#3eH1vjd@^Luw_K^N+MA7Cv{v&~n^ ze)8W3xDvg#eV3bhzZmYjT!OATeM4^|a(3<-nl9u$hvU#>-v?O7-lwd!`MAALxqSJz zy>40A#rrqA?=t;9!2KEni7)WN!}f{|yFVpJBrYY)`p$(ebd^3-3xk-=t5vZ~HEEVm;Uh!F|hbKDhLZg_|f{wj{>Zh(hFG`{qDSe?pspoiEToKF|UW;6i&FiY>Y*r6bJiMSkJ@h;os2-l8u_yg#1`i*@CQT2&WP0ND@L2j3>fwi> z_g=~i4?Dab&La0XzDYgQc{QOPt_OeDq+kBH;w`U-Z%^Sm(8Dw3hXXuZtXyvq9_IA$ zbuc!>n@;pOuZOo`o2G~FGd;qNO*j4{`zX-Euk&7B54X1W13i4%-Y?L@O>%np3V7Wd z9;SNuto#UduW7%0zsBB8>fz3T9xlz{;g1w!1$g*<=pE(tZ~?X~Jp2p(`AYnEGk7>B z=a| zuZO3gPlkuDp;`MikM+S7IeX{uQGm5bSJUk0}7gJt(_(?E8dN>=o9Mq&9{)w?* zsE4h7yq?bM)E%d|{^sJt1^IJ|E~I|L!%cE}_(kDgfQPSW+|q|kIT0OdiXH(?&9_E5o}p{7@>!^fosj+;YBtcA5Q(`ZNBOAS8GZS z|499&hv${K4)E}K;a{MK2REsQ({g!{GP;x3!_k2r-T@Euc=#FNbU}Q$VSygrZ0`qp zc#6GWpodp`nVFrOip&Ozhe!UG^l;(;^zhqYoZ;a|b9y*Fr-$1??^To+9%{da^zdW& z=fOEWv^f#^T&@extXhd3$3EHCE*0c@(A5^~dkO6+Ph7MYYl>nAo+-!m1h|pvbdXmM z*TYb!bfTNQiE`!#V+)ou?^Dhk`5%jps*ifU4!WZD)z{AmW4ZTPXjeIl_1ykLervAv z8R&8Tgm#`q_osM%{egN^$42lg%#Rzd`i9hBAJ#pbu{QNN?4++fv~M(BDxbx(Fb7;z zr`-!>V)w$flY6=SJvQg>a>AEH6SZey&eUikxs23T+5FDbi23WmV;!cEHx_-;7qxBW zb>?Oi^Yy6Sq75VSv*cyR^ZuqErOXSDYFXclE`{&E?p4ZRX z{Jr$*H1N#n0(}tABi75@0~RaR=!9nN6@3-EQQ!KLf$=cEZoTvtk_F9^4Qu^>8r4cDm75w7K}X^Z%05r!{5)Q;N>S`h-1GZk4j~GqE0W zUKVW+vo$=tuX%uN=vzy0ubtoWHUA4ALriLDyI{;+fqfn*8q5}GXLE4DY~fbo0G&PV zaFH^DeS05G+KVwM;R{%s;%t`h;d=kKGWJto2gp(Rk8n)&ZI2_S<3(-Sy{NJ}*o#|? z3+l7hAPmj*f0KYW{|A0b{xNUs_tn}hZ~V>0<#T_qy!V4oFON8bqO7Vsgf ztBq;>qvE|lR%=6_$?9TakWg0NKz7m%t*>jA{-*c_&rT;U5pIYU`Q4DFhk}^pf&%`T ztz_n$(Yur3g~J={peX)(fS4@7J=>=U{vCmxG}4K}_Gn{1J~*`P(C-K*EDkvXzGh{V z%2IOtUB>^W>lqyGD*j}Qq&=wO(dV%(w`b1wMS`Cy*jgSpr@P$5 z$oVsnsng2@7wfNY5cX`$dlplU;97B0{dGub|3W^T3GE>tOy?DIsXl&Z;;W2&;nOwH z9NM#;Ph_%7EnA0tLcyUrhXefbknxhDL7uRR-wIiZrI79dqOq_=xvRr zZH^E6g2rUjp?JLAuTe0*wgu}G1|cgO=l+>KI-TP^=3f`NpXKaT$%y#U)$~U~X zM)volImvAR)X~B zLXX)Rug&Dz_GEva>(FlQ#}?y$`q*KO$nD+lhq+C77VL*{_!jJkX~ib0L+bB``Mxl) z>3ux5ALhTKZ(sXB+?RKwZyPht{-w()*?1t;NBO`oc5TDfO*a!gtmm`vRoV+ON9!Qa zyGH(MXE$y%e^nb3&w-9`AM3~PJEp&3>>~TQF&JkK#D1*))jqyR>-Y4?&ZvKP**LZ* zo0IPsCbuc4#S8k1eB+&!^5=hl-GKH8u51xe?vQ`%sApohwJcE zb!xsaj!lezX`Pav>Rpb#E1y(Tx9`(Nl9BpE+5cc4qEmIJbk@p5&y8n`eXLtk-q*kX zzV&%_B=Rca*$QI7W_Y$c_E^)e9M3-BuPXNOJbMrQ8z|4Z zlGxhhmesQ_yZ*;=$m`kO$h?SWRbf2+>rc-faQ0Jd@6v%E@6)*N^PN7&`f?(4=KF4T zhyMFLbjsOaFXXQk`^cXuHY)UM=#2DsHZg-}iHLj6jy89BV4l-QJzFU(R~f$ZNURa> z&E~s~S=rVfSMEqZ-S{3~wgcDUp3#Z)D;zt9I3xOOjC7%7pUJuS{VMoQcV5hR%8sKy z#He$Ko#$#iL=31tMV#gEQRx8~q8Ry5^Q}CeOkbUEC9ZgmHr)9##2KyFs?LLuFMbj1 zj@N&W;>_ZaNzD2w#&^oozm9*~!Y zYdl8Pey@?xo1ERlIc{(Id6Hm^I##^J)(Ty@W_@X_ljq3X@Hfp-<>Z32czCO$8ckUhYC#&am>;)R%4}bD? z-kz)TM?SyUY@LS_w`Tecoj2s_{HM3EX6yV5*qx{IqFkLT3j5wzKjxxenH&bRo<+TC zE6@)Y=YJPl%j-wk>&LPxOtBL?ZM!be)Eed{lfao;A`sdv(I+P#HXtmYoH@x?2%G!I2xK7sYj45?ph)zn6;d^q{#{-mc`Zg}s zx24+GOg)d%et6%``Vbyz?zBqz$#y0WV}f?-4d)FWMUP|o<;DVx$ufP02kNu(JDQW& zTR9PQDXAO1$k%-jb*H*std~E>Ce>HR3+n6jzv=s;P_Oc1G0khcF_ie=ee37ZUG+IU zC)9LIvQA}OY5YE?s{x(A;u{$CEsd!79ydyefNT94Q&9 zuj}bdd3gU5bV2JwtCtO)r*c2yx7L&CyENT%c=wc2*Kz2O-itIwrjFONPkzXhc`D=N zI6+v64)0T&tDb_sOkrKJz70JE*_<4;JaQuKYfMs>zJQT-{USEABueKTeK@I(mcm3H$R>A zMGo_^|0NFhqe5R3wppaB-vYNSUNW7}(q4=ywjS$K;HB#W_8Om`6VDB8&3Gj5tMYtX zgugYu<)1CXAJ%*Jd!JJ&dLG8gPwx@ake18EgSmPA%ZS@DV`9$m{WjxJVR(K_d^b3h z9}~}jX5~!2WPPuRxw2!t(~-aM&*BxM^PR}yX7iUhIlPVD^e2Z^ylZXe<*-!)8Qlgx z%g3efHspx@^@9Oujt z9yy!A7f&s9x&7_nZM&T>GQKueIujjN)s*DKDRfx9Y#!1E9DKo=yp#|Ayx& zZ!IV6X=Gg52oE*>(%A4*#5A%GVZc7bE4o%Yy?#Eb5f%=Chc$gAe$;m%>3eqFUn6>i z$+SBhz6+n_BMg(qLSG_xc#)U;U-nFC$m{y|(Y^Q7u|=JC<#bl`#u&dSr8%_=@i1KH zaT@fbHj?t%_*9~e~|XJu6HTS%SgzQqPR;fKRG=_tcc7ZcdL zORA%i-7eT`s+ZYkKY*9@o}Ii~_$=;EW`2r(NSCe<-}(Pn#>`=i$n|u~iC#`zAb)SV zcMR=zwq$dnKZZ_W>k;U0Ep~a#&c&IQOTQi8VmvyRwn84w;+d=eC04IQ>`W^P?YQ?AA`=Rie z_q3kf>e!XBVtx+b6?7y&2QZv_t}Sd@F|POnosYS_^zzX$?iJ1fe2sP`r@zpyi{Yr_ zCfe1#CET0EbJ<6{|0d_x>&Wj!^ugq(`Ph7a`;*^W898KSbhpbp6w6QN1*ZC6iYeuOuoQ;<;~vrruAm&*#tYWXNDb$%f$24&(zwR#Q}FCJMD+DxawSVx7J?L z+*!T-TqbYlmwet-@!1B_E8@33lYC35>lFB8`?<8=CEU;TIetqIl(*1)QU05KpGJrC zSz5pBJ=U&rZVoW;@N^Gdaqq*;vEQ@!TDq z6`nPkKe0IUK;jLDrSMndPxVzc<^FpebH9;;;-_})V^A4?#+_k-&Dr&rN8KLYrLk8! zma#r$bRsui>OvOk7xCs}^}gq_34G6vZBqOa&RM_z;$>*`8{gqY;TT-^g>%I@VOr<kG76-Gz;<4>l zzk_(no?S_~)OQ(;S-$@v;Cntcn;7Ic^3R^yHI!nb@992nApW+&PN-ae(X5gk8@-TW|JR8=M&pT z_u9dsaIU&Npv~~c#q@#AnG9!RCnl%$rK5oie}g}5QrOhj4jeOH^# zHg!!NN%gt$K6SsO@jmY*_5i-;?coI155xN){twoSWw65g)9TdPb4*%1mRb3tHDi-q zdxmp$d>(ysdDRE$6a9UKdbQ?Mn z<2sz~P=1=m1c5H1Khwf??{&Q79oiJ%6t{`DjcB;mXrP?Y@PN^vIz$8ijfUMl4S$l( zLZkK}{2E>9=34gMiY@Hv<@6YMDW0!YBd4s5%0JYz?I(6hpP5|9g#~9I)M8eczZypW z%*SQ??kTK?MIUc+xo#VC`fr&&DXiI=kDNv zrGf5+<*w6s1ziNso$ng$$B3hA#)DZMw-~SD<1*Dz>TkU1_l~);HT4JnuE83!n;nic zwz~<~MSt>ohA~AwnapS)OWEC#4aNYQXJV0*XX^7y84L{kEk4oC*_e}?x15X2M+b4# z6%PMnUs__mpbZ;M<6ZNSwY=1r?jZW?@E=Q+<(blzeme2@ZZK+ zX4h`~fS==cX8t=ruBaC~s?Kd`-`OoTIfmI zKJ^DXN+)97vNl5u6Fy4E!#EZio_+S6aVc(?Tn4HmmjrV-VI85&>&2ns zWf+g&OyBbH_!HoTvn^up>xfk(KlQsu*ulNe&<7WfYn+mc$ES&RE*4~e75?--#X7uy zwCCLwIo^fzOFxxQYsOA?#%~z@S{-_y>rCz;KgZ|xl7LyG@afjCr3qfx9A+T1Q~2H# zdiX8+VeJ_n}C}JPx z+W1}ZKELMlSMVf1XY-sJAC8>=B6{HbUoyv+#qT1Wvb>w{`sczZjoa}z7op=We!gU%B-d(ikVw1@r2#{1Rx1CMQtd?jV>@NxM5$W|Dn{N+IGCbXGeZ!@8cHt_aR zyAH&U8U1=5$+L&p%w)ZHPSyv5nZj0+b&{h+HtMf#V%*AIU= zFW~Tizh&{m=i=xoR1@WV+ykupD|pQ?Xd)t}KPAA^{U2eGX5=MMOlS^GSwvUq)HEwb~8P7^nm?kj3bjgliF0B+f%2DLoSKBEed?4c+FL5zH$cY+hXEZ&VB5ujxYD{?(|OH zCFi=4-AnzM$5+i~@Lt%?x@w1e>Wk|AJiM^?-)WuN-)%JhSvdSIX_U_tjUSV4t)aSx zfxT<0uDyg=YlyBD__*fk>fd%lIk$_44@O;sl`AZu^}YUiJ2{mng=s;Vs&tfh&qUv( zU+?i{9VQ@`b@BOO`HUX&-tkUP%RTM3=b4zI9eoSuk-zH3Qk{!qcpD$EH>}5e zuY8D|6{0vHX_xUs*zSjA*Gr?WR>tw{L+B{+|EJs2KKEqn_@$1s#k&dkoC#TP^wg&L^UoYaQ9g*<)>5r$+u{HhDOE8Z7TOQ+%#birm&Z-&r`*q~;IH5eM zvUoSy95{@;d(iXgXL`MpZ_vG>aY^1Ll&83k`h|t2r_zb^-PB&$3+8k=v?;~+%**v) zKOK5s`$davw~@cf(`5K`W}SHaoz!(%opHiDsq2&5G~35uOb72;CRI9EgX4JE8b#OD z#UmAOAC-^sPgk5c*J<(??o#I4C% zqB8FHd)@Dod^sme?i-CQxv^SCzZer5rrUUS$&jc=xpe7==;qqdZS`x+sppIZCysuo zG`$*^iGTm0Fy=Hl)g3R_iFeTD#p|j6jCJ1zL!6#-{RHv3_$3~xjQp&{ zL^r#!g5|rs{^#sWe06$?ow@yF1^&utHa_KZ2jY|Q27Pd2&>ir}HF`nYis@!=hF*AE zzZ|;Nr=X5&Tt7D}_qZ5?XDZuG4kN@$FT-m@oBUNuTiVZe+Sbm>fd#nSR9p0k_1yeH zE3}K|&aJcdRZs7d{6_(QUJm#pU1UBfql=~!v$>B>XkC8KuP0^O^}4Z9tsDbh z>t31*3+T`3)jQ#vn_mg^Ka>~k2jkXUzre3KvMb-WLzK7jV}?G-3~ZB)Ua4#3qif&) zYc`Pn^4#KLzRwWrJ@2c&-K9gpT&m;&EwSDcAEmq~rfDvGxmB|DwA~)iR@Swr?N!m{ z?J(3clRxMALT9B%;}M;8JqKO*b8G9^Wvwlomr;tpE9hz7P5RfA{EBHdTGu2We<8ma z@X^b*fRBEB2>m)oQhPM!>=X6OWu4Zo$_t`@lAq%v^N7MN$45V2bn^DHcYO4C?(y*& z`Ht#7U<74u<=HaMn`9&!ovD#7%P>x@D2 zwkrMZZ9VZR`yXO_7%#a#1%90JvCokor$4^}J8N)1m)CW5WHCR+7hyehT&i(MV29!z z?G)fw5SJ*|?0KE9KcUgre~ybybAGWP=ev$%MV&j>$y?@mxN1fm=PKSy<6MtXzJBv7 zCFR1Ax$@O%KHTQ5f9T?6m36p@d^x|SCkoH@Ioyv8z23Pox7WL>Y?*6f52ruon~zt2 zPGS7Vytfun*?@~< zJXUQ`C&yox&jHR_eIups!rtoo(;q??q;C;AOCD~sDsh3!dmT8dqpwswN5AE7JDSd` zrg;kS2m4R=r1{*?=sG#=V?B1se>iOLysb)Kxz1z5VU#b+{&y!Is{hh)>3)C}UmSy8 zUcwqP>_N7D5&tjbo-02lEAp^G#`-8v5xdTCmYZ(DXC(fqpn24!+X;O2-AETn9zF zE42^eji#T?HS*g?`4iFR z#}ST(?KAV}K32o7GI~f%1unXIR_U7Ag?L@|`lR{pP-m8J1yA9#bZ|D$x*v?Xu2+8p zTs2jh)X-Z3cf@MC%r6Lov-2rRi6QH@e2BS?ud< z{E}=R?pGg#Bhq`{7wMt&XfplYR~TN96LkF{FProS-;wXn4X!^H3uOCa7;k(I@vNN7 zbA&vUPSxlLvVuF^91-1 z4IQ+hZ00Dv$DBjy(5R~%MXyJl?OjiU!QyrA+VDMmR%!2g%s-oqOkBAsnA@+W%#>;4^VcEx$ZNbwumC8s5SUp-BFrZpCNzOCA%54(i(B=AG> zLN6SK#QKq2XXY_vwqgtUIA|Hx#F`8@rUhRlbH`urZM2RLI2JS=*VJOtos?f z@ix40b!i@PX)@o|#oT%~ee6RAq8Dd8;l8yUe0!F&O5FaBlWw|n@%{h&@$$)!J-hs* zQ(pA*2_0@ep^f>3ZfNEI<*vMZg<*Vwo!)_Nr#4k=rxnT0?!t7zkj z8w6_&A9DNb5y?i>>rabR;Jhy7~jX&W%r{B<{GY!7-_G-y6vF+8ZYd^kfMih@9 zd|ah`TZV_DOAZGYN(Nr8Zmdmzr9XY5S^fbU5?@hCe8n|8O9z({8%*gHJCfm$+A2T{<(EbvK@_wliZx*N$HU1k&`R3 z62BxT$<<*o&n3epxwRhhvyzi&;r~Xd-gv)??3sJ+ao@+JkKOnPbYg6k+ynZPw4-;$ zqv(+7i0SV-k}-M{ z4RozRppRDP%hI)wr}SNY@NHeQzS`oR`O+{ayt(3c*^K9p?@KIi?xj5Twz-DepQ}07 zCfZ-kw?zAfL@SmA^9rUjvhC-YUsy5(+?jdetgU9Y&U$e5>OB_j)B4*!=-Sf@O)b0LYF@Uw?$L+@Th0uA@Sd4qVEuVgwC`C zm%%yQOnJp|_&1fS9u6JEC-*DXC@8l?cqTqs*#Pq^>?*@iZxC5APmMN1pnkQQ4~vC^-kAuTba zB`rxXB?(PvLR#8lJG=8e@45HR+!+QI(tP=n{(_!;X3jl--}9dL{JH1eVT_gU)v{KX z&z9V{u4HNRdk0};JQo+=n>L^2P;=Y3u6~j^tCC^5_HOiEwF!MsyGpa0!L@7jgVOc-0qY&HEouB-yS*P2uF<|yC(#WT z*ZZi)di~>Ov)z;X$8~F@o!G|=-FdT_#`*fkGh!diK2*9w*=+r%M84nee(63a@>lC7 zzVA1_Z>TT*xOJgW+QgK--3Cej{3>>Iqy6)52wRh6gQR~>Y$y#w#-B6Ou;eH9HkA!3 zw#oPh|CuP`@9}-LeOldj)jq7Of#_3bBt80OtRI>Ect+B;_1S5@OMkW2u~xq&KXso< zoz=Khdr88?&r+Ck=?k~K@Vx9@+>5Ttm`1*HyjzB6{FUvXuJ3l!x9_GsK6!&%t^d>7 z+#gZ-^L`weW6p7JGr}6%K3Cqp%`7|T)TG|GTk$D(8u3*8Z02ChwXHO+k~N8x!<=tB zrO!$I%`~njjeaRZiZrq^FX*(dEg!kj_YmE@%}XTWwaSHMr^5!|0* zy*Lv4-pqI8hhMAACp0;WNIL3_BIUc6HROlRGAH13-!c4;LAl4pp7S?lUGa#f&MZp5 zu*Ts%Qiey2x|wA@x)VPs-%VEeq%2RToz{JGt$iQWPW64%eM-}ecSxm)43g@={lAw) zFKcD2NxJsD+{1IYUwE7B6UHpw*UEg3b?~#sI@r1o{3iC87zbDjA{W*qR)37GkI|3V84t6a!{#_`wK3&; zZ5!p2vmWYsapo#F(?3?N)2y|&^b?zY*w@#mrC*V!HQ%W*=Sr4eJWazUBC3Du8PnAs%?FBK*n-T|d&$wg`xp=HzbkgDl9^@qZ2LgW zE!by#DPeuPPMtCesgrFzY3ZFc4^OU>gthJuzf3)Ob@vHff% zN5V>&AKXQrNqe}WN7i1vWf!dX2*mP4KJqTTgnZ2W&Ay@bA3n+2KGxpk_aVv8X2;Am zyPD&Qjr?)v7uaxqwsyM`S(@#nF>c7d)|;hY?KW)<*K~8uBJvJPzfN&Y&6kq=3C~`b zX^*7O@EI8@d$dFB5ouq~{O4ul8z1}DzZdX7DdUCYm0b3Xl<^Jb52?$Tc>hW6`-{Es zzE%3Ub><><* zO240a-GEI~ds5P$Xv^Q`oKZZFcs;o<@md=8MP%`QbTTdr<9GjTdkgXRm?iBksJ?9O zm05jEjXUTJwprP#m*2Pbg_qywz-@cs<=e3jukx(Nv)bum*>g`}Ta;ciFIH#sNt^fa zzSMVs?=oJ>ch2sgp>2z5eNS@;|#A62mdwWjHozo#LC7#p2{D8 z*|Vc;N$*H9?TDqf^y8TQLqCo&zO1t2*u&UaGwuoW6G!ejS-;&Wdn95T%hf$)OMlni z(=_|Tugi1vvOj)?HWIx{x9yk4%MjO5`~708Psw=QM;bCdN*Yl$UR(XxRtDR><`2vG zLmcdgwU2A;jpHY2?57`ED*tnm;J7|eB=4awe+NW9BZX1_8{S2iaCdr za#&?mwv{xmkbTHC$|>ojs=K8d$!TdDPAlH7Yw3-b_19W*u*y5G>{`-TO8dc<{GxsV;>bHNfMt`~0Y&((T7n!H1xdbx2m$gkX-^Ctt?T3A` z=H#>3SS{OFi@YxRT7GTmSE%3e;d`3we_H;$`&Pos`vf$`1H~Ne3au^e3O-+<^0lRt zMp%g}X;2UAUf1e2qkr1_Pgv5R-$;5g9~2p=d*S%WT3Xp2`pcN;n{5m;_07u1c2CXJ zQ)_-K_P{nb)sojC%u8=cXuH+Vh^oB=!ihZRq`dZd2r`$lTl#mKv%Z= zGW^fnYn*|2p0t(BDC1{j4@36s7~`dkrp#h{%B#*2TQZYAYSuR_<*~_6@>J_M$rFDG z_c7Lo@xGb5KT-GBEIVAAU>naLKA-t!y#JZ!V?~eXTa3qD^7}by%z@4v(O~>Mk}5ko zN!??1S{M1V=G%ISdH))I@h!;7TnCB`xL9n5tleb)2)PxrK9_w5sq1w-V{7U;_iU5= zc%S4M&S4~k<2|F)J*KD3jk?rtKpa_pKjow7M5=S4_Aw(>oie0Ol$$<|Ot|+c&#-LB zj+*n}6zx7|Yj>3=?R{-g{9Kee=YgMfk6PuuPwwNVso$F_mFN0w`ddSK$ib$2Npl(L zr_{mTR%#5xZ;o@wvvxa}QkRo#ujq$uEvM=rcE(l*D}8NC_~`hC)Bmh-z9Nz1Ulwysgubcu^TXc<|TUz_P(WqZ~u^HWO&(~_tNL(k?J2z{WThXUTT5NCS7V50_d{}qA>N48*75Ld~ z`hrz1uH8ZX?ETX!&s&IpM%I`(%{r4t_-z?mPs{V&j`S_3KMCC&9@WOKJN@mg+g|+E zR>zC;oKsV0)aWZslD4Hgw*5=1t}(fPW3{e4spLMZbS3)g^_(S2d*SQ>){v=m{fOhd zw)U%{19e+?F8!I<8Tz*}2bc4Aa#pVx=f?fUn<+E<%zdmQ?Q4S1oAKF?LeHNuo<(@p z_+7T*>rcyX2)=Z=>|x0M*Ih>3Gh(;!fAxpP{fQCM(6+Jmma{Z>VVmW-fjVT2ZeEu$ z_Eu}oBE*Khq;W3*y}6aMWVdR;tB-M(Y=_o*?J>)ieOKlolwZbhu~n~2e#RK1%Ks@d z-`|q5GbUT(vi)pgzUYIQpWJ&;=hkfJ)s)^cj*$0<(B(H|4=O}@YHWE`>d*5Cymz8m z`l3~LY@(W5+RE~N$|B|bF!LL+A1zWQ6<>R?)|B60Ft*AV_sVwavGtWx>^0+L-}-9x zTy3$O%{1e@o@>bbv&6CIdz~3ZD@9j!BVYEtRGrulmpW)G@B6Z<6J?R_>i1r-W61k% z^pHsGc2 zY`x%C`pT{J6`on!daLx6>yckEZjRr({ghpf^qK2qj&75q%ar3jQ;up5$6AUqtFvj_ zMXpwR(r@K+8uQ@f`!RjerhUjrbWr9Aw{a$mcXJf~ki9>2_RW%ynU1u{py(DdyV}fG ztug82GVdUL`#@QjGaF~2cKsSA-^5bPd`Rq-*_3lm3h!>qU#UR_hkNrpSl-Iy^-B%`TmRS zDWu8%mH43#T8{hIMPGDdkM)$MU12=m%~?k|&!P6)NLRu?DCgOZkoFP!K4q_=?3DBJ zhon6SdxkKQPO5y}o5|N!-sJM{+GIM?Kg3qad@@DZz9eORi#<(;_NDdYQY>^nx;cZ^rG>9ozpEn8J@(Una$ftJm$EO){O2!4 zM=U!QAG2PgPFd_@zSM{A$@n1i5smqCS`E5$dsy8M6d5Z0`6uo40h#w&>7WmC&n;DW zdG1clPmmXKdV6f_A^{)`-iXm0Q-esC3DZ0MNg2?+c^)Z z#&zVIHb(zP7G-w73Gs8Bz8^pCKU^m5kT!)MHjekEbB#K3t#*sHL;Pec?xQaI>}gDj zU;61g?SA9hOJ`{RKJUed+>G6z-9^*rl1{R(RuB^&&2Vtsk$ zGcu=1JFVno?lG(T0P@-4R?nGfjLYwoelPPTbB-N;tNE;?%*~mPUm^FYY|paPn&bI{ z5|8U=a58Sh=F-Ue7vDr2$~4CsLgoO^$XN=$YnhJIpTTd7%rp3&#&;>BWmBY$zGq)M z5Vw!~q&;6j7p1&%zC+3@bAOTJHGDtAxI?*LlXtD1AuMh8A+`1*?Pu^8xuCmphUmqY zuRt~_!f%6CJ0_ z@%i8*tIu~tk%v0p@#^*s&j+OqXn*4j9r-?G$i_AoOxiDHuBgrin7SAinbJR{uUl!A zAV<+9SqJWsdlP2++t+^k%r;kTiaoUUhIHC@i;J}*{Zjn4%iO`*XJYQ4(>JdZeMKgZ zyb=AVtTE+t1)qiYd>>*Hq&)N?wYMpKP`<0Oa!tM~J(SPpIi=V&IqOBaWuH~rKWONGqYMGxFE4))!6dYJn<$nof2SwBjfq03gAt=fl0hG%y0n^N|< z7W1$)+1n(pmZ8?VhQ7*oIY-3(gfy$}$R!6mB4w&E`;d~SEpD21Um(+nTO+cN>++p= zbJA|scia65+2gSMsh@h^*2l1+_Plf}j9IQ)D=(h&k#N6nr7hoi?(!txMaM3ec+Vz2 zYgwOV$V;9P>XYX!xlX<>Dt(~u+w-;Nv9DWeBHHFU+S2~K=zi4)QtTBX?=#d_(tS?O zTw(8|?_251=j*L{W#Ny!i^bLuCyjG|W6_r{znA{$GwULARV63$OrTZfdgkC(n)FxW zUWdpTok?!P4_uQN)~a88uibn`T;}Mkz2w|ug3jD)>Wta{R2!SRucgv`>Q32W{nROS zU|wy14t$HW2YwUQ+)egfYlR6exO>gG_B^dTV(otK>UJMPSFC5U)j5L;jc0OLPm0~V+&V{UU96RlYZ07W@99eE-fZiQhlED|)5%-V^JcCz|&A1S*1@{E*Nn9)L6z(b9)3`R=dvNc?rGIX#b`fr& zJYD-|+;?%`!!6?e1^0d24{%GkAL8tPB2G=qEKkF2!EMDkaNBSf;4Z|aw} z;XF7mE-jOIxUDz`ZX50b+=aMw+(o$UxE(l6Cp>NoZY$1#+lJFz_~W+Vw&EPPZMX|? znwx95Ex4^X2W}hg0^EhTblgR_?YJGd4BW-IOK_RE%W#+DuE1sC-h_KI?n+!X?k%{t z;;zEw;I779gS!@|A?|M6Ufe!h5$+z`ewVM`_wwB>el_^vw&1qn9Jp<`3vd_W(s38z zw&Ql-GH_{O!sE8$9Jp<`3vd_W(s38zw&Ql-GH{pRF2-Go%fwxVyBv1~E(`Z2+?#P% z;<9mX!MzoC6)p#NHSQYRwKyGj9qxME4Y*v~jkud|H{Wox0?a>>X z_ulAv3^S_p=}>Kh-|Nvs0lmpv3w>U_*;m_ym%rh7Q*F~Jy`BJiur}no&PX9Ke#JTw z^oC{<4(R?sIJ`pH-mB-V3P{wiTizQ;?6 zrKXZ_llKHo>P39X<*02sp#rt|ISZ%$&Xc}Z>RT=8Q{`r-E(ydl^0rDo9%`bWtPQI4 zHYm5`bV<`6sH>GBQBpH5n^LfROtR^De#I@2ze2>UBPE-Q!As{ecq_^*r)@vI!lCYff_b`)J8V=@x4E zx9QpW(mg8OC|N7@+D4;q#X6G+$7r$hlv6sG)g1H=qkUr5BcX+){VXj=uFop{-B`M{ zO-;3@Hk)pf7j5+#wxm`XRa80NRm?&r)?0OFo7|dP>gwo<=Woa4h=v(hOW;m4%$xvP zQkCu{9VoFpvG!MHG1i`|!mX0)+r%I#TNdxFY9c6k$X{YUDdQg!lPe|#YY_|?)6nz9 zllF`iGucv5ZPf-+)AQtiBiSd%uJllS>ys4osRm9bkGJMeRCQmhEN9fdU>6N=18teQm9`Ei( zf569_tWo*y@&$w1$!ev+y8_4Gg<|jAFA~W^xo^?0yGKvH=;az4X7YEkT2_)0NX4%X z?cHl6A^MgWPXfhkNs4%emA6GRJnXSj=4VRN7SZ&v$5f@GyhgX(ySMsqH8$1Hq@%i_ zkttkA)rOnRcjnjS-;wXx<1M_ae$SnSch=s0#~rnIANTGl@Z6q%XMO(dckVe}zb9Y6 zeb-&PZr3!akVG{qt!b*vd^d`qX~zQrW(JL#r=hv7w#g$?GWADO9`|BXO<^Hv!{7Yg z?4{N0>vbc%9(cmrRPPTwsU`%XLpPi0S^N?8z~c=uaYa9ZEur0HjYO1MXrjc;q1uqQ zd6()gL*3qh=}_t-jXZEK40rFPgkm{RywtE_M}C z5q=!y@OxV=@oFewLccKgYxMh$9Nwp&Y(Yd!P+hGD5An}^_sKtp^oDxp>#{57@F8LGS&z6)wF5dSQBgx_iU~CtDVdo^pg#= z5q-{wC8i3MN?9~d4n>Oc(tM$&fZrDD z;oDsN>3I)5Ty{%L3SO*p5P`DPs%nwzDzS~Hm#^m?J5suKdik47Zx88JRIZ)go=v8A zyPkJ&z4RE#jK5^LuOYtz(mPVIcKHf6ncf|w_t4wdPVbIQrgx{FSA5{XwbQ$Elj+^1 z=iPU39eufLlj#+b-m$fdS14Ah>NtsRus!b9^Gergj=NQQGRN6qdc}HP*`c+|SFF;L z0ceBi-KXchW1aHdx6yPC=y`|MDc1p2uKJCat3=N`aCGf@mZAS&t9uzyUc6tXl zn$988d0_2y4s9@=Chc54cJA0aA6mP7o3wNJ*tvrh_pP1Y zChc54cJ4s&I^`>*YpK7D^{;&F+`+?Z7w_(k_6zLWv4_{GXYnS>g?&4C?7-RuyKj@} ziJe<}e8@kr(RyOv?q4UJl8vT=T|2r?IvcfX`Pj9G*GXr!U2E|BcQW@o$=vMWibFdK znerygkBxPMZMCRo!sgWQ=AFjk0O4EuwSM?r=MR7LZL*NqX)Gk{nXz6CG_spxwDgAZ zv!bx|c0#S@)VhVeR#{G{6-o8+mil@Yi=Nt0ZQd>VZHd0J7&8~-#`?8Lwj1`V-3?<` zqq?@GRTZeoYpwZYzMOZ<&ixI|)oMvcvP?EoUN@GGDu?r3H`b4*s#C2Yr2s}!)n30> zcD&ZOk9hw5Dr=uY)|qU-Ht&`dXLZBLpx<27pR6|5yGbkelbnDsH@9MEA&jjpBmB<& ziJMhB_ZypBDxQ+ld7`N>RxH+i#{Qpd7+PCTsvXn{SvHP60k3hluD&6pTdTx)&{#MQ zX2?R;I+$VGA4CMrG{1fK&^Xr-Th$ngOs9UFG}!Bv0!TX8!|&_+F@3v z{`g(zq*Scy>XxJ04zMDY&{2(@bP9Y@C6Z78Bi&k$C#hg>Z}D>ffb>#F)6|VJm7G6N z%Qm9iFZQym_xMk#tV>R@qtDi`+VIdj`dH%74#iG97`F%H&W*&d0zA-&?l#u?jUBCBS{bcw>Qh2O@pMEl%n-Cg zh`l*EUY2zINpHgmUr3tJ3Se}LIhB7?OJk^kJ7VgZh$K<|&B@_8mclNfw=NbTDW8znOzWVr582G! zKyPE6*DS4ued10~Tw7!Q@pRNZsoKURTGssfMI_(?!_ZWV?++ zH1QC2d9^e)w*=+%iPvK#uN^Ul9YZPcz5n0?vF%>n$Xqq${Ra;*UA}zTry-DpPdzob? zr$Uri`ew%Kr)~?6^f_iC?XaPeAtiEH9oSdPEet8>aWCfF!wlP~hYGKfXR4qqi5gtE z?ZgO0-GWS5F)9p{e4%%9x8j6T*&@#LD0{@b34%V{A~oVI=*D>k?F4P}-vmVQ_* zXeDhEwHhG(ex?sxw9++{>46fJp>1T0`CAE^*9`x7*{yUA*zZ`IWo%l&tH2Z{95%ssf@W^uLeVv zfl=>6M-Lvf`_gX=BM|rJMmZNWN>mGB;#aI}RZ|2YJtSKOO;B1$ae2we%Ey_5? zqiF}|)tq!xr_yZR@%k9!wROV4nA11695>QqRVzhxS>=pf@HR4Aq63+yJ*-LNF73$S zgF0jF19HO`ThG5G+yi+)hFp90DxA!HR6yIMgK84(HDwi#QqmIiH*k=TAzfu{>D56w zXF}W(Z#)6jtEB6j4QH$;PN?2zb_SJTC=d*A#F3`=Tuc*fWh$y! z!di8JPAofD)p{WwMiR2kJ@k4OG%acnN(>|6YMD-)AO%C8W7XGK>F7a!ZJpN_@O!Xt za^F`;%M4=3Q5MlQU+N}$LSfG@V^a>6Ug?jh8MGWJ){~l2im91;ZXEEDZfC|f{GX^r zbC3JIJIyI>NWQ2L?=Y{#rUvFX5c7}i7uW_5@;l&fkOi@2^8@Os5;QqMnF;q$Kt~9o zs?4}#nP_xB?ICpzMOK}*%W_rSpEZlgO==cbbv{aJ>a%E_S$)oVJVZU|F!c?st1>pi zi{{7}-qPTgkz7qRV!c$=Kb~%)R1X>JBsET89nm9WN-BqTO}$sAS$)i>*eA2b?U zvDzJ#Yi_pZ&vn~4YcrV#O+~jR*M>Up(#&^WY7@sQ)ss$8kVH3sAcI(sW3MqJt-B8U%%hW z{t*L}>_EiyhIm$ajx=$mg2}cjq>>HMZSk#ehdASBtWhM8*>0x7$4}y02_RpwaMB;* z3%lCmVxUq~4INJ*qr`H=Y`Rq-jzpYNBYt9iQ-=#SH3T?=9@}?_%SY`eoG>*dmbO(7 zL$PGTN-3IJqe{7?qnOtWm33NQ#Q>;Pj|9`qiJR4#RWDcmYACZFkx;W#W5Y|uBA&X= zsU#+H=4h_2FwE$6B#*DTrC?-lM$_cFl#SJ1y?FCeReJ+aM!_RN<1- zl8_1Ej?1x9GZdO-ls%TUN;o0BnZI8~g?JHHhcgmV4J30~>AhC)g!H7z)lLr0WM-Ri z-Q2?Q*Rn6*vqGrs66=u=j?+fADx^x!oZWK7j0alQ$=3`gLv}31g!BwSNVBbX{R9j9 zSb$XVIdz*{y~O&Vg~m=D>j_yVtyLZgSKq|e&Z=OE>8UWWBCl>g4ok93OVGrGa<8T2 zq?8y=%B>QMWtMQ=SY7!7oFYu_XbIs|c~b^U%+E;A?B0nfu1+syu*CH0{Vfcobuubj z4cOACI!#l^5>Hd$q}~)-)pcawNH$olV3q-~{81g*zqhzpbUA(ci4{B^xhOu6Fa}<>+UuN7%0$ z7n;0vfu@)S+pn9lda{-~P6>%vdgZID^XooLihjI_-Q+qsp=(xNr4zp{R+CeO3}#jU zqZ&s0CWd1ZrUe1prKrW)LmqisRiAi#GcKp|LL%gN8tju93o2tJHaL&kg=izs8QHRLcyHwSK+-x+;?aoZSK6K*BN+lm?v#xio6Hd~z!rA$2 zDf~0=lPC5aIQhHZ8#?Co*OwWO^OiKonL7D<0Er5ZCOu{)e)4xjEm{x89=|et<^Bn4 zcX`U~5W0=0JfpzgXT`0$^=!-Dy~bH?IrU$3L_OA0!F@)jUMx=&8Xu3Tb-MBSKI2IZ zssvZie(C=X^^X%PA??hHSbuw-0GDlHH>}{hob#T*$V{NC;EjmalPEWuG3GzIA1F z_WN{OyAVtcxa`wcH4j(h`EELB$hvB8DeE%3O{(?m;bN^vAWxu$sW(NoHzWQ^zA@45 z(-Yg30P%*Y_4v6lXl_t8nNI=yFVau*pi#WSwqCYc$x;XVx#k%A*PQb@pXU6pv1_Ti zwq-jE10*(1&%5cCNAn;1xiMj;(71Q+YGd+5fH`Q&UKO{#|9^)#b~w2K&r_9Z{?-`c zgqqR5#y}S@re#+d@qRu?7$Q>V|I`z;H~n-sa{QU*yoy^B8(3yC@ecj-SwV`9yzi&G zaaHT_c(gif$;#bo&y;3o|4l#TwTbtsgwfVMZDE#nl-06rIiZ>3t{Ri#^)`+l@7=4O z;4QjOo)wiyGWTb4&vt9IheH_bZC9(O8Zmu_(Mt%qUNN*J`@VjLE!>e2`?}doSTAj~ zPR3c`kQ~t5XcRVU8hNP{`!%)xytpN})m)~?+0*KfeJ!(oCF3R7Tt)X8vvwmpWUkf+ z_62~Nn{TvI@ltF~4(z#c`)TuS#6o7n8HwJ-o}}G;R%%Ht>#7ZB$g}#PYIzWYQQmlE zV4hkb#kWVA)LX@hsKx%xwVJhH`hVRn#%|o^g|&v9)KP+_RThDl4an;^c#|WNQ0IB` zjulrjU>ZG^&k4rB>IkxBAulSZ_5@B=%g(DjvtcCER1}wt-Gs?feB)L|)}!n-sMWvJ z$e9@T*8M3|VckL{WQ{2_wy}+OisZdR-i>q9O`KUGE359xGiB{fgZ(pFA7cxFK`}uj z2^F=so2BnRHSM7aV!o!On$48WRpX~euSC~2(F#8`^0&6JHc@sl;6F9xR|PRN+!*_e zy~|Bjhx+_4+0R`qiuj(XF*!;tF>|&g6VPhzP4adRYZF+7k8MU8zWV-~e!ge2)}F~v zRsUEu4c%PNSY!UNslk~87qLe_H@kHBU(+vtR`0XeY{uGE*sSreuf}Y%6J=@0#Z1oF z`ZvFV}M&bj;@2i<`JbYTbcY=U&)m%lRYUI@@eU_FDaH zH-d3*@aH3vddFg_UHqTl547K$Pr8$AvvBL(Z2Z|ZPBA-{dM*DmSTX7FKkx4Aba(Y0 z(EsVCip<*r>LAPJ_pRc)M%E^iY|yGVXRO+_ie1UuOf0qI?QOF5ckZ)qj<31>VZV29 z{(_lVteM)akFB+T9;?!-%5u86`nbGiee*Jvb;TO{_3P(HL;h&F8Sj%#p+{@yXWgm0 z=N@w}e&cO=Q{s)ijkTz`p|$Z`*oKt%L2|ltGjXhH?c9$fjS*urOPL?L-fg_>_B*@h zS9z z=Kc7eSb2ZM{q~<&`IHL(==-0y73&gJfU8H>VWaf7sg`+hvUC{VVrM2+#KRX z_o>2gK7Q?sq<>u+?=Z*h#NCd&8+R}6Anrlj+i{QJ9>dk)8gKzz3+`#$Gq?}nK8*Va z?qj%5;68=>J=`DS{si|q+^e|Hmv8B4MwKacFby+~p zj0AWa%c+BV4jweJBftE+CBKukk9(_)-`J}T)t=BcnwObOitu}kvYCad=KVLJ0KY|Q zX1pr>{CwL_j2>K_Uj81QiI4pxsa5mL=~J4J=WxAJX8AK;2NnAP-k|RkhevsNj+s-1Pd%@BK+J%IKg^Hm z91D75zZb;oe5^N0sfD=uEj8XD!E=x|HgDIow5zu6%PL}=zmRvf-XDoN`CJ3tun$(i z1=t1`p~jD-WRymtS+Eoq!gg2%hhYfL!%nC_5Q+A~aySZm;T&9q1rL$m;Yid6$6+UQ z9*IOp;24~TlkbQ`bB<_QYh@%_3WwfF`mmxZ677S9k5P_tO&jz?qGd4iU6kYPnzrnZ zL_1(G5Qzqll5Z2?;Z#c`IteG9h(uT5*prcH?lDapXeE8vafN}NW3U6xz&W@Ci-sf7bT{&aIxPAu^?_Zm9FD*cT!1~W z>W^s`*aqid#-9+snDRjHW7I1Xpwg807_iDukKeW42u!wNVGeQ+6e z!mK}~{;&{Ez)H9XJ7MMl(uD~=R*akEHjP`?WI1X#z0t~~967&?h;25lc%diz@ zjZ$7%0LNejoQENpjx}C@PFVUm>Ir?&2RmRG4!|Bb4kw}G&m+-gm3w^K% zw!sQG3jJ^zw!>vO0DUjh?yz@^esn+j@Cy0Ea##*SFa$lX(r@4hoPdktq&ck+?{m-~jA^lW+ho!3mgkn0AGQF#XHe1DFTHuoCt|KOB|o z)95{PLkIR|@hj*TcE$a7qz7AJyVw^v04L!jT$JlD9;RJM4(+)pCZm@HS_JW!JhJL{i9D<8*2IedyFX)2lHOL*h zVC%orFJTW1!x7jI7vUt#`VaIA=D`RI!Q5Kv0gK=m^uSrz0WMIIMyTFbp$} z)6UQZCt)dEgnpQ@g1lfZ9Dr4D3TkhVFU*2jb=W&t1jDcf=KL4=!Xh{bE8!GugUhf7 zW_yqqbioB!0n`7R{tNS9FRX&&FbHSiD0FzK7tDhh5%dxkKo6{h9k2}!!X7vUC*T5{ zg<3u7pCvvlg=Mf6dSEZ?fQxVlYUi*YFbmGXJh%coV9p8p6)b>buoBL}Ht34dUa$m? z!XTW3?QjM5z)T}<3Z7Q!xA1_xjb9D^Y^ z1A8t%8%;ln{zE66ge7neRzdd_XQN?Q3434{9F^;DIvZVt*>64@&1}RzWS@f~~Lu_P|y+2)kfL4)LK6&cH6X1Q%dd5IX{$ zFnBfbVLSB0QP>4Dbm|LrI3xa0dpGf5E}Xxfcrg72;=vNw4a0C04#7D%107AsHJ9>2 z_l;+xm2d$DVb)D&qaDx%2Vo@~hiz~H_P~s0+8O4;1y~CGH=m8lj~_bo&_n3Eh4zJQ za2|HS2wZ}Ch;rU~Hd+X4phtMy*=Q#mg+s8%Nj>2*jKK7r$f*T6LnkbO<**9+U<7u- z!rjytR>C>h23KG=%zOg*y2uw!!Ya53!%)jdFJKlNf+08!JK-Xn*h4>flJL+4I|_&o zU3U;4X55KhLLZ!j{dbWs9EUlr*tbIT3id(|9ENRh5_UuFZu%cAgtIUNwNr$Lxo{X3 z!AV#FwY}&U%!Xai1qYx9PQoyphodn4DdYonn7NPo!(3PngRm9O!CqKWgr30;xB!Qs z_B8c}*|6Xq$^olj4eWreFu0$74g28)9EXcA^IqDmjr^b!mcw$PoBG2(H~`1sB+NU8 z++Y>Vd=KfuJUCQ=p2GG@^b@APll}xN;0)}75jY9;_j3Ig=>M<;`rvRC?F%R25Y!)| zedRh_ggr3xeaJOP{a~<(egr#UI~;(6a0Sl5vJm|!OnqS<)LO7}uo4DgAMAmna1<`W zIhgST@tz@nm;=k98+Jn<9D|*34h})>Ny-UzxB^RI&ik=5&<%T_2adoFI0pw|W-E3a z7Q$udgW1p0AD|14!wQ&tiuQv=unT59g&sm5oQ0im8BW8TcKSOkfR#_9|F9Q^;SlVB zD{vGRwNYQ#_CECQm#{m~31`FP3p1Y~U+9FrumldnDmV!X-jCk>GJF>K!d6%ghhPY5 z?Zks&I0<{;JoNn%_UQxE54zwItbkd+OnX8X?1H6m0M@`U@&5q&4m)8+2l>EUI0;MP z60CvkA4DIZ<5wszEP^wzvy*y$koJa7IQ?Pt6*_(uJ%>5JMt-miPQtb>+7tG{oL^xc z4-4Q5tb+P!>IsWr53GPg&<7{r09=BjF#SWc`>)fl;R-B=(;q>;(AABcU^yIv-EaX; z!pu(WD=dKJzd?E7APkE??1$;UNqfT1k0M{V0MkE=KKCGBsC^9iz%uBE9@q}sU@shk zV{iq|!;IgepTe%+M!vsFJzx=>gcWe9m;9mQ}@@BXAK;LC3FCj^9B~UI1Yzl+3!(*SOb^f1kCD2-mnll1}O*Z`F->T z4#6%s0|(&TAE38z1A3~&cF_+{~`7W7Qk^h@<+%4W_<=Z z{wCwsF!@6#EQhTy1iN7m9EYQD5l+LB&(d$;Ak6tF<%gwE|6}YfEPx%b4GzLFI0Kg; zlVq*nPmnwGzzWz2{cs3&!LkwTAN0dnI0dzj5$`4R1-8O6*atmu7`8#@pEB=(C2#}| z!C5#39lu5SN3mlt?{mb5@e zHLoC7sE?x`z1V%|f@QE2_Wvbvg@b=ZzlO7)XMBLmZ~^B1HTiv<^q>ol!U{MI{m}6R z@&0@EP_kW z107$XJ)sNs!E!hb+u$smhT8L#@9)qf*bU3!DD=Ta*a@}2r@XKNPQoF$1jk|eCn?7a za)uqS0?xoN%={|-4d%izSO{leC5*r}sQ1&Jumq04DmV#4a0&Ln?0-NnU=f^y9_aWK z`VRA8A9TaKf27{91a`nG*ayRK9J*(bGwg*qzk|NR0yqiFVAj{rN0dvF|1 zz-71ybH71<|6S691+X2~z+u<{Ctx33fa5T84!wjvm@$A|f-X1=E8ryb!xh*KGhU~i zp$|^MFkFB=PfWxp7&cHU9{w?CcLO21t;UXM@nS*NQAUK^N?RWpEJIz;W0PXW=N+USa%$xiF{lT(lIrU=1vX?a%{zVK*FulW-p9 zJbEsg^%v9!7Q#MQ1;=0*F2ezsQAIrHg!9l1BhUx+S7~=x3Z0Lgi~3;&?1JrZ81}&# zxCA3G>lev)oc;g{p$}HUb{K|^YVw6Oa1w^#JRE}=e@Xv>F1P|KV0I092g_hD^usaO z3FqMqO#eR`Kc+*wz$#b{gU|;@VHcc%!*ChSz^vnx7nVW&uP6sBfgP|4PQ!M%1P5S7 z9p!~ixCAR;)&%+s3t^!L`M_@21qa~(T!xb{-Aj34XFc`&Ja!u9!f99v7hw%dKS91w zhrQ4PCtx>Rf`c&qudxTv31?vmj6fgsH=xI`8;-&SI4{>>1m?Vp`hJ1=2`qsj=!3(s z6Hdb+=zAPJgk4aZ#2&yrnBk{i!y*`jRj?bj!x1D}Z97hn*kH_=~V4(x*!a2(b&W5-|{%={AdgL!Zamchyp z`U`tu9~^jscyb-i!Wp;>GoPfquaQnG<%L7g17~0d%shpifMswB*1!dr{uFxnW%%^D zXc2Tn4|KJmhtLlPVAgw(kNCqySOOi>v_H&)ozM;YU?rS_ZEzlT!xcCPi{4BAupKVL zNtpc=E{2B6xMenB{zzR4IgE0N?$PYT<6fB3@vy2xo7k0u@*bn`1 z6n4RBxC~cdWjk{Fd*lmC;5hWbIT(iNzeIdk3MZiNm#IH2`T%m9pzs z0z+^XcEDvgGQ@bZh#tcd7=cyL`A5hXR>48o`5F2N9EMA90;c~9b`I(=0t=yg82Q3+ z*bbNA0Q7wp`NBcC4D~-ozTek)K!WH*Wgi2Pw5O#fHx6m&xUZ_ra%0D~|Ddte6~g1vA84#7D%0asw*-_rj7 zhMb@qcEL(G0E2J^_P~rO(t$oW1KVK)_Cb9aI}S_W6s&^IFVPOL6b?WSoP=Sx1ZQFP zzmq;Jf?2Q8e$WNmVJ94b{x6ds?12$D3iba$Ua$m4U={3~MsMUh9EO=+AwO6MmtYWP z{U_yyPS^uW-~#l+2yBNre}}wb2^@n1a1o9|#|rg>IWX_YIn@4zc?HaY zJ+KIl!YVioC*cZQ_&)Y)Tbfq#1M~&m$<4#CU|(%63^zVL_S z3tj(8d>DkiZ~%_MQ8)uL{tbP*FilHero1o*mcVl8gWWI;^?%1Mz})|!ywD9-U_Bgz8;(N{tod)oAvg<%U`Yh|KtGJY zZm4IF4=jQ6&BpiYfI4#$;Xmk+5MHdkdj=)h^u$_2t2s*MT*AC*r66l7NuoC)V5Qbp~9D+kIGlTNN61V~fp#CPx z4@=+ztb!RAQ*PJ?d*LV?gHvz@=3EkuMxYyJy_x#KLO2R*VD_ct3k%>NEQeFj50~LQ z%)XNLhAxtEC;S3yrOK=RPzlnOn@;4*rw^DAH2RmRH?1dgU0XyIV9E3$z zB46l%E3h5rT!kE=8;(H_%*{rRU>O{OK{x>&Z$bZ{6K3Ub9Tq|#tb!dd3`gJqoP(24 ze=G4|7-n6KUcy2+3#(uRhGF(q=sPTg6R-jMA zlW+pg!&w-CnOCDv*OERggcYy~w!?PV3wz-(9EOo=&}W!^Eppb;7nldDpc}SA5A1?% zuu!Moum+C7HaIWWVFXUX-0SFvuoSwlqaVP2*bPVEAk4fTeS~>%0lJ}fJ>`Yja0-^f zc~}E;Z=n3J2=>BqI1D{-6869)I0(~kKp&tBw&qfQI0eHn^G4(kOW_2Zfy=P)Cghe& z`CvI*f+1LbGx`jt;1qP^5gz8j>>H62EP`QJ1BYQdoPh%{;}*&XU2q9{VAf6O5v+m} zFbo%AKXlznK5!H+L&t68b90)e!y;G;YhV>@g)}Q8=-S_J{M(aSQ!&H|-B|U>PigLD&Ji;WQk9x(j;%{ZP9VxxrlM$fw<4IrPI` z*aahS7!KY}zlJL?0!s>z`)$+%mO$-J>@E!6g}%dfI0Q%FG+c%&u&j`N?WEnI8_q)y zblr_TfR%6%X75FwuoEu8Wtg!OecOkAz#y!Ev#=GG7NK9T3y#6`dyp&i!Sr3o3p$~D zKXQfBFa$g9MXoT%ja=aboP$}#$aOdVFdODVC-lK`H~~X2<38fUN;nGra1QoChl}!* z&@ONSmchJ(v!h#2A7x@mm;NaV67pOf*yFeXA zgokODJ){FmV8uhU3!H_WFt?m`flfFMJK=)(za9DBPQK6umtci_KZ<J77exaB0!d^HF$KW!Yg6W0mC)A;%J{m26d9Vx?!WviwTVV(6h2wAx zW}HAzVJS?%8#zHIoPp&q-G`pRLf8XCa1>6!IhfOcp6%s2%!5H#23uhb9Ea^N^Ig;* zy5S`Bz$MuFIQ8GhcnAxj?x+4R>m>DuHLxFc!3j797vVr7<6aTv4^Us&1uNkkY=iC~ z?F=Ju91gu3J%c$-=-EBUrx`tiktc`;OP(a2_@5#_SpF3G!QuBJXPEH}?Ykd;SOk0D zPyJv$xIUbRa1Qa{FALX(%W2{n7V#*)B(wAqP1E_@ zq7_EUQ$v{SW>aS5d)TaFEB;0JyN$T6k6-ZIHsp5LimT(_OPET+geM zl*hwl9wdE{_YD3O_@_%;{={z)zxj)Jrc6Be)8(Hn>>J^y+BDvNWmX;%CYLbj=)ziI zq#UJ$(FxtrhIGgFHGUzihsE+O_7VF(}jO1DZGyV0REl$m+{#q>vDTJCkZo5 znDWFhZ@0pTzAh1_gfFWiL#`IOfWp6d~V!d!uUN_+uqN5dU2KL#F?urapAwAI9I5pVih6SY?;I1_{#{PcxR+IQ|3pSDI;7n6jM3 ze*yn0)Bj=9e;I#gh-bqLfBjvie+Hd%9Dh@OnMY0kT>K;WSD5J>!(Zof5&nfOvHok7 zIiZi#5T=MQX1y#ukJU@kX(vo6VN97=Z4&G65@vufJ%o|Zw)Q`0$tM@@Ny2m!rppLZ z^YM)5cBr=9-hELGtvikR(7CouxX3zbNto?0G#v`s1NRzw&{$E=$bCRR5oQw?F# zpWyjcK2z?5GS!-8l5ib_3!NvNPPie$%@J-rc}^2%kuYoZS4n4uFpeiz^@A!)o)RYK z0`4ggMn0#Kc_!X&!c-E*vNIC40>3i+3i(VttIZ#@r`<}Jnsw3^eeX5%7x|D*=KZGL z4dd@yC$3KL8Nzr7Q((lk+U9NcGDHXyI!`+Kh3w~^Crk-p{O1YdBh2~A+)0@8m3fFT z=PUCxVa`|P6~dga%sDjb`O54j%=yafACS&FxN<{{HwR^Y#q!rza-)*9z8aJ2r=qGR~Q4+xw9Cui&3W zo@#za-+I6-d**iTL*XxBq|Y5N{qyk8P2umxU&r5VhJVNmUx|My{`UEqPMDzSFQ08? zj@j`%yd$_0;)bB(-&Q1?H6|2)U?1RI?KFl zsf~7){P(U3%UEs5N9vQ2LD={3t^qz%XDe?e^2WSd^6nwM%=enLFz-GQ-Gc^0f3n)|bVnzWuq2R2NErELK0AYg2^Fes;0lxNJOsP&4QaAkzE+iua1O8m?5*NylwTOY)~5`R;!YEC2J zJMj15pR8wn_=i&XkKo^qzr>|{%1-NioWehhzkE(5OYvVOOdDakjWC%X-|}1Pnc?1H9A*Y*7-#Fy zBPALpCMr*!JCZ|1iVZrXJVi-KN{Nq{RH*0qMM+6{YR)L?xmQX|DoRpJN-9h&EG$Y= zOvWfF>4^%Hk{V|2^ZxF=);eo%&faH!uYS+(^*pcFzP?5lpYvYd^1xb|^_Y3+n=sx_#-F>} zUs-FHR(GZI>a+aG^o^xwW?#v8$(0goE&s{6Z0X^LuXVF#xgUqW z=hBgP`O43_SAWL!tIW8ix4PLse&`E9C67A&k7Le1`fy@??P0h5+QYoh0lPE4A6Cv} zU#gQva$69h%U7DO-f_A9&b`b%58PgQ z{DVz(zms|B3xA=y=|~%6)LXcq;xg&i+pD|Mb@jP^C!ga-MS>!18Mc*sCfv$&+}E9D zNoGIeh24Pd;GV=h`<(kSpSAb@?E4h!!Oq-FF4ps%IFzmV_QQIN#a@$~w-~>E%I?#A zdpeI;>$%lo&9z-bTFEh}W&9TOMqz z`>Y)=sW(TxfkwV>My?~D@B1-#?(&uIxcXx170=!CKws%a55BPOKD(Yry|G5t73P50 zpe^lFc>cqj)|g8xFef#bU6|tfWQ6b?7P13&|iGU+_PA~hY~&{#cdO&)*5sK zBWsWs?n`KGZM>BnufcS}%qB1rtMz)usF}FQ+Oc;ev75_WX}o%~Kc^m7x+(jIL?u{j zB)5;@kME+7!vB%ak6pQR^`ej0XFsWvTUa@loj!^0A)@Sl$Y-tn=|?r{&(~hqjk^{5 zu6WkNR>N(?xrOgpqKv9q5bC>}%XWz840YW*ecoE*O6TMhCy|nK!!qr6GOyPu(|(BiL5tgk z+lK22IBT7#+?)4!&21v?a8kyeo|B=RXoWdSV3=yWPl0&?BW3LPXA<#ulk?UAy5z3= z;(xZ=F!NqGIhSca+^2>(@f+9 z_*SIN9E{(Gcm7k=K6r3_*lJ9$CCoTXl1~!9S>;nSmQ4+oAGcKZL+b1(KRPMPSo9KG ziQNG8JMd3($Nk(t%4@Ot(|kK)2iqOkuwu+*u-a!NHr;RsH8>A$6waNFU&F-jB1|Jp zwBHNp4d`7k_-y&YcBXuhI$Llpk^0i_J~^umHJDZGsBzYI%pNbyKFk)3 ztV@<1vwsPb;{s*}M#Wd#G0%Y{wrwy+F!tC;-EQq_4X z=hanFTsK@&yfziX58IE>^Rgpe?7~ia!1&LpJj%H{88>vv6{`>CnoyWF zn1KXF`q>9FSn<=pz7wBE&_~cc_5&8N*JtrBNIt1wgd6(lv;HlhPoR4+mVcpK z*n}Cc!5qPiB{1Sc;}e*xFjA&1vyPV#M$kKu-POOh{V?me)aj?b_z|r?iY{%J1H0B( zm+D?Y5cKnz65KvqA>ec_a-kpOlZUTbv6koPNKO;~Cwfwhx)hTXqam0+>_lTUfj)xHkJuQMd2tw~ zH_Vu4zT#UeXPg;j-);MnZOdF&W5zk)|4MKJlo=D9d-vJ#u74uuM3})a({D5FFwW1I zJxtp%`7b{8!!*EjhwX5ER@paYOeW#l;G*-Sgx-ptG*6_?DqJ~I-$UOu`Q%RjZPjs-d3pl34d=+U(Xh|y zGx+5Pa-|E|NwrYGN*immH~%)1r{;dz4*F6=_u!AvSJ8(lTXWOQJMm$Xv(qWePzE1v zsNw?|p)Ok-;TPfCevW&S;`6`li(PAdFI|_tQsE``<9D;~Jz#v$bDw7$JYKTSRb{wU zxC!cD)8cqkWt~^n;p$6fyv=(rcF|kVqhoQ5?xCxBW!WvA(~qNMIy3C$w1)(;;ns<%{+hnK>r?SpjZ> zi{|APdMkRg&j;u&=&H|p-ZCdkb-K?|r{QbpOO^Vq=-ud@FxL7(ecV!G;qg)*Tt8gW znm7*A3lm)vXV8Z$`k8qv_LpGBV5Ds2Z|EGb4KoTO@wAxG+44j&Nq#o}mykc^tki)% zS;L0`n27{NVm%2n4#SieKmGS`iO(gaC&gOEy@j*P6!qsRyMO%GVk5Lxe4ol?Y3l%+ zr?AnyaGp%gp(o7+kNT}}&2Nv7yZGG;(*%>WZi}sPxS0f7!py=Hn51#u zQ%sy4iHA!tC$JOE$7b|m4L|cRwfZs)BYhE{t$Z{&EB+rnfw6??pzPkTXLF%>zodMM zj^l>%qt-Yc!7U~DB6b_UmNgwF8jm*gb#zSF{)Mg)`d~I-lE!5GF)*`=NgB6Rn6-$X z;{P`K4tg|3hv?hrwPMuxb<7DkFWgTvUShWs<~Y)}@O|j@?}+ce#ARGDeU!-uYyXiu zHF-$ZYV=pOt&dMo-6W!lqgf9Sm&xu>w!!Psk&2kt3d%GGjL z^lq3T>Pop9BYocDyfp|TeRjY8A)7PrPn5nDu{8x}_t`^VKrf)TQMUSEuHl8>gqe=i z6@5R(=Bctd1Y_9@#l@q+RczXE5u2S#xBZlIANoqf2M>J&eHnd-a$H+8Fh`NLJn(s? z7lXQbU*v&xp&0#M=wb9Cx@yDRdzeI@L$}9D$|dx26@3wXHloW|v~k8Mq5JPu zc=?X_SIAsxxO$^MZkI|Iv$&})*gW`S>`cEeejN}Wnx4)&3={QD{vcf;qKnNQ^hxv~ z%2pghYw0M=3XHv$df-z^?+n*9_i&|;4FqqKx_xqu`Hs&@8`iV2(hb?7>7pYxcd&W* z{xjDrA)9h8a~71%S0~$)Txt6T_QPLEzGom@KU^1FtsI_&JAxaejuo>|4$r}~{%Z1A zt;4jyMDuGGy#w7IL-Fqzy&b)lf6dQeU4%>WFAvkanC#y$OcRXlhld}N=#A(@lwoY!8o>xho0^C9k&VyTkTZLokl6JK3=AND~n=l(NDi1Vcu1`cC zpp)|V2W4E`y-!EtRk@cTzBPX%>xi^TENIiZr72YHQOA5B z$FNh&r|x=a)Wg9K$+6&RBgd-JoyXdf1tO+x$(l zu4#YFx+ddWhH*bqd!1f~TY(#-j@36~OZs*Ia|C0@#V}@x9h1`iQRq9XTvLMyx+4WHmZ)d2fKjH@!yKiOYvtD zW(-En4RbGiAAJ%%>gTD_cd1L;mY?CXX3KNfFMm5ez8=+kU`Al1j4d56GjGJdQJ7&E zKkh0wr96c`6wy8OGWsC;5M^xY90}#@8q6$=-8YGCp7W3B{#<&+pT@>k+IOEzd_NkF zo7j+Z*)qC(4%2;}Z$taQI-ZqBmj1@!!S-OI<#)^)sAKs`y|>7ij7mE&wc=fZ8~ok) zc#CfyXP6R<-@o_&No@bbw|(r*|6csq2$SpLtN}AYnON)fj1NTyd!sP&H-nF1hA3N1 zXgvC0+SkJ4Au%4QqI>8CrFT%a^2Xfr6J{PJkIl|7W6rX|Y{2xx@I&nAaWd~;5ay5< zM`0v>R$HMt-SA(TLsk55t)h#~ywcPv9S&tH})R(mSMJGq%7m7K67u@T8UMj^UW5FjE(!@Ke9RV{M;Ys zJU+wOWA0I>3H=CN^+o$@?l(%m=4s;uM)rF5C4L(^cI1HU_Cddd-GbfxxVq2te4o`e z{d2n9`PKK$O42s0N4;wO6MG)#nM0V7$Ix3z6Hh$skyeuodADdPlgzopP!E{qs zavsJ%^LP|@(EHF;U7c&Du8hMl=br+MpF_VGpF`58BJ3)E2iNb@yKDDJ{;v7yzl8fF zKJH*gbdR$8Hs6jOQ!{_XH-~f0FLTdzvwEm3e-o+Op@o@bY)!PZR97z{t5QhkY^OetyGl z!+b_kZ1rQS1y1tD^24m1!c4%lCHN{#8K$)cv!>b--fGMAS@eAv4?BKdRpMgyE@5&n z2z*xY)ngutZyQW|4S)JzYV~UzM*5ZH&#bbOrg3{W1(wglUGia>T4} z5}Rq5X&5_4M4zjoOD=o)fZw6)-nTAxwU75a!1tXzn0xz$*P2fd1@|QmvDGGjJN!(a zDrYt6d*ioalfNC_P1zdD&^g=Vr7k!Rj_uy=m$@zzTSG7%Fns}I?M-sx3vyi6$)h~` z1zdr$`*R-`dpg(dk@x*QDBsVcKCvfpS;6iiyd5uTZ%gU5=F|yX|D*9a;9;xz+c-19 zsC8WDv{}c+k338XM%u<_bN(1Wm%po?H0F~qvoLla#Ad0AF7_9dUaN20aQSQTeUmm$ zU?yOqemC?oAJLQi?tq!BQnwd<8od-ghh+5!izSY7E<17fdvTQA4}UCioIRL1Z1!aM zJ8G1zTr@c*HrqJU48r)&5nkilwC4WVXJg0MoUG$-!UZ<FuuL>$@WUv8$Fk7&+BIVgc1KfxhApE^ItwwVD8V0 zfBV>*JkQ^L=yX6igW=IuFW97yGnrK8LW3J`0}@bUiQeNiNKSHAU*|!Y#qs^(9{hIS(zO`_GHt z`+H(La=pg!NgbM{Zp(|v!wX^mBsN`2-=&NVy?xT66+#N4~#u#(#8UMH~JL$PF-t$XUzq($4gsq zbJr*PdknJ!leD)>`^_&VJ~xDYkv2Nem(W>8?0%SQa;ZB2vjY>&>9H!h2S2U!4$AaT zk45Mlu?Ta3&7}Bk!PM7>eU|tgpy$w&;wOGLd06ZX%gV{{+1j~} zKHM0eFVaQ}%mj?xwujz@K82pAY>k2s$ypXy^KB<=+?QYbk06k z&%y1(wFjJDJIr1sw$@>eVALG7`Wl{phj1+o$@$aJ&-{al`rC@$jh>W09=`O!&D~^t z(f*pXPW&B**@3Aw|H^R7j|=-EZ7icNp~vNmFgwx)Ob2CaY(n$t6y^Y?In0F4<=#uU zhXE7KuTG`c%C8}~>6_!@(Ma1Na?k5rRk-d z)3BSg)^x!%H-^VY+UQ4bLa&u8lW^m=#K%T#&A}|f)XJ3&xanKNzIfE%L!UyA%N2>0 zdp~W!L}S>jbYb+|^(c0rFThU+y3TR)zDwZ;VCru>YjZ3{pH_ObtvU1@c9Pm!hZ%*5 z*4;%PK_3bGdB6Qk&l#ZaFs%Wj_Z@Qv5SuM9OA-4rHeIT&I`7+OsI2}rDXBjU*YxWrh$qpR`PbY1R4q1fz)JJ2>i?{hj{XZl!xb?#Dq{7`k9(yu(po`ds!=6s{S!+QX|30?9` z=8p2mTyu&(shD!WSm&0~1^e^h^KdiR@&ZoVGS^0;Z@^5$3xw&Jpt- z0%1B~wqaENwNGZ>5TDlZX$MAl_YF&Sd&Vc>4&k;mcV9fWNShmQThzgKy)SUTBYdXc zh8w#(K3}9wIhT#X^ig)d_yOCu(C2Zy@8uFqNh=QGA{GPf-J_Z;ld_UP9l7|_p;=_Q_lk88zj8a$SkoLjkkl3Gt8Lwi0Md?ZQcVP4Vcj|W7ZZAy#=Q6Y4K~hFg=PPYv0ZUUFp2<~ct( z?T=iWLw8SpRs7X44!)~kxRS3P=qPr^o}L`XqGDA4wV&oWnzY-@d8UXRzuouLZg(G> z&NGtR&B>s^sC?479-0qrF!eBT`RZY!52g`jh_W?)=6R@xJPy-W!_F+s08HHeB6e0` z25Z>agBgYq9)EN^&Hf|0`+dye1f~V1$eB&r>J$E3f5o;H+AsRBH3cVmLwjbw7(p+f ztJvxInz`-U8C&&!}}y&`W$GU`@hkbe$#W(5D&{1N?URh9S49__ol5Va zY>k<@hLyU5FlB7oIU#);N1sJcx`!ck%5cZe3j5@tFDpH6eGz5{#_5b-gM>MSsfURh zE6I^*&QLB)H)Z!7^LAfDXAhU-GsPt?6)vT>>c3eF48FUX_%EVOH z4b9^v7-=Ief2HoWvJ+hRMN*e^v}ad&`Jo)ST7(b;#!#n8t**{Qh&! zX@c?lf7>s__a9pxTw?!W24LdyLHg#shBZCGCtl^4Q1>y-y#(n?H%vE-*u{VU*|_L~=zZw++9>*X6&Rg^J10E!X{E>YOX@DdQn(222A? zTwBtwLzo_z4a&sR+Gl$E@%P{jW17Rlg%yS232PQ6NGXAG9TQGKx zc<7B2fu58LbDW)|uAd9n-eJeH>bgCG&ED?#@sW0CV5VS_=He1e5hkf$+c48Gc8<#& zJyiDPxdr2*ubX*(#B&IFYN0+SCH0zqggO1qVH+NLJNhEJUgOkxAhhrGtGcQmI)BXE z7CRF#tJsmUl_Ta_PkboDtW@z~8GX5i4?C)EoDcr^!>m`a-}s}P$7$rcJ z<=#lJ(Ry$baK)ZSuI*8#Ju^WMzq;3*312X}W58E;Cr% zFncgb>%bt)evP&Y%Fc1nmd*qIfK6qun1@*!H0N~xo_`zsD*8J5Oi)+vXKMX@`#Js) zxlRTDc~$>7G5BwfT=T!*uJH5HfBQXKrHjEg-1)1Zht1|UvDST0@L3HjRy-)E>_vH) z-1nL}q5U^=LhKB~^uhE~w(OYC;tDejQ-E0r=8cZI{M{|TF<};A++lN_sM|8nGbKMd zIp6fbbW?VJ!3b0f4DOu^(~=7Tmg!-Y=}XBq1T&Ms>({%2esDvB?9yn|(%9mDuE3 z+5P=0{NGl14?b7m-0^C24X=GL_j845gPDLy%K1K+$poemJL50~7>T3ARQ|5BlxNUq z(7hlBthFlqH!_ytrbfeihV;3E^GpHV&+mG_&$0O}ew<>b{A>;Y!JzM?Z2?aBnxC8PYK^*DS)c!OX!( z*@|0epYMYy!z8T{+JDnW zSra-r-|S)E-zQ%4R`J8SPkQ#Vkj_t1>76E?dfd%DI**rI6f;B_d%E4wdpdexy0GH~ zjJ9KPL&j?x-#TGBD7z2;v>hXJFGylk!d5q2Q(()SGZ&Ta1^Ssbr0y0>p1M-TS98zg zK=~J!m*QXjTY|bHL0xNIDP7==O#V9N@mdF5fi?yMPRGMMn-XRKW(6j$Jz*wcR$-Fz zaSmn!CT^~XopqQsm_EwZ_=N6P9KamHNIxYmvgdkIp5`osE@OhO^UjQihui{_dsqC} z3eyAQ!o>M0?T*5n(pG`8QG|WN`JB!L6u^HFa z7R(%WRR48byb&chOG@2im<^cLu$|C7j3%b%CQQ;8biugqK05~e=ni_VF_?sFfs4x- z@pTTSxdyWi(+DGcTwD%d8eo$4hkBMv@h2`Wl56cStuWEN>rr)U^?3}ghx$oyKY|Ca|N{1)VQu{g<_AhVzoNwMN;!mrba07L0w*$pCiOsgsmH zlQ5?+(RIG0^d8!yExo>)YY~b43XE4iI}Tgu&FHn>TXF(7api2=4L=j;YE9PhF#E8y z-2pRIrEag%dN>WaZVGxo3eY2Un>1R^Q@_Piaiomd~`qSvsL}vrLA(L zEsr|KN>B2$>1TsB>^zsc?dU~pO1auxYeQbezZn?+Z{I%G_b;@UT7UZ1UyG-(xp_4{ zPbD`OV76fTgLbWTz49Ik@pTLC6fWxb0s0BL7u47BGUpS~>$xO4PVhsRb{Mgv@!<+9LjqD&3<@JQ`krf1*#Jx57(Vbh-YO zewWc((8mJZ%K7kT#Mj|Q;biVvaW>EKJ>&zJaTu{{F=m|?rk-1Z!l)SNybYZT+F>SY zwABw&sL|GhVv^b_!%Wq%v-TL6eZ?fTm3x4>QKPLk#Yn8I7@9RiV%rBJKIa2Q$B;Fg z+sEp@qHueh!OCzwl-+N8lWouJa~`kFVQ&>q%GKHweH~^6M)gmxZJ}{Hcno&xOMxA+ zS*dXEcf1 z63jGAea5eA#g~rpEwhG~Si{b=NI(-3-lpzC;=HD2N{ z$Jt8k&r)`;PD*`ietxgO_JdSK$lS=yR~8HDMgOf2*mm}f8&i&dC$7<>Gr-EH(S^o5{q_}=aJ({1&f zSK{NmgMPz0LBFkgm*MY3YlT~Xzwuf3i+Np%qqN-zw+(j`v^^L4+}%W_ZSi>$c6OoC z$8Trd7aJw?RrFf^i;Xqd&R;fp?tc%o=sW0L=yq&8^drTq+}8atxh;GnN!?A|Cdx7| z*pTK zbOIwjxbNg%KouXF(HGD=!}iVnHV-`yvkEg9X3V`H*^@UpcOAp{_vGF%A-*QwlPh7f zaWVO`!YeQ%FikK0)JVe*tM`=M)xWta&VFT4lJ*Dd*L zwe)8PX8hM5xwakH(Q||K>a}Lw_s68nS@i=v6FS4a+HjJ?Mb1#maGSL0e*X31Tlkvp zHQ}{w0K0v^5ng|!y)pD&bY?-shWzEc;2xF4a2BpyrT!B7Y>oPU|KSecx`TGC{#W)~ zX~(%je?Az_PY<~Xy`iG(aSolAyA+c&kA`5Hu`?}gRr1q(W=MRUf+@h%@^ule4Cni; z&P_5lZ6#tNzRK9#KQ8Tr_dM&1xIFk{?96<~ted)Tp>?zADrb!vOc%^{4Q2@DB!Th! z57WGy{CPa-^CDak&hEd|Uq_#*QD3-2IOoI3?FiTS9_G$t;JV-r9s@TFH~5>i+ns^i zdJNn$T=z$k?RoTL7j6Yk%GO$F=bp(OsoM{; zRi*AI`X+kZ+UZfZsOol5w&qLdjJXW6SFxk#c4!UWf!UAvAY*%^>dsKczO{xiC9UhT zre7e&E6LaTVlNLj23PC+I0D!6Tgh`=>=ogL;rasq^nS+ki^^w7JziUZn}L&ZwXqd_ z7iJo!In0=AAYq(eWbJ_&3>Y0tbAAz~1!e^%Y212XRukHiycmU9hOy5qqE9JZ%Jijj z{{wvqJ|F1Xx6rk;zy5Ka*?`-FbKme9v0>J~dmiBLKR#IhI{hI659S!V6TfZNe4WFg zwY}-RwV5u(sJLkVLU}j@Gf7)^--O@8XYpY{eE#$gMRHLtQYz1D=CN@M8#j((djsYW zCK}s4rQ7XGo}Hi{z@N>t(mBaAxkU2dym|J)kH)txOgBuf1~UTV)L>>{PVh-=R?BJW z!xD_dBHGUlrAPa@hko=J`k8wlYhDfi+F)wMq7No17UJJHjKm`9>kRtoWAJquuBC>r zJ21^Pm{XX>1V;Sxeu?{n5nny@PIT#K+?vn=GpOp0P-Z->wKn`beG0BO!Inq$1(-e< zb=J~z&0Mz%vk5Z@(?!|JA9K$@m?M~Rn7H*$>`!t&T7)T4cJF(&?YntzC4ceQyth*9 z_Pn1sttQWtQ5Y9S)-21v(0$sXvLo@cn9$m^tQZx09SgJWcvRhiX`rn*J5qO;^N=t_ z%I+IpCGA=JP3fHcRcE~pv@Q_0g#JmNma#VqGb#3-7qWMc{7t9_UC#LGJKQ~(No=mb zt2IdX!@O@n#v;cVW*^2Mi?9B`V;YNX>~{Z7a-2_KdST+?BmOo2a=^4wCN|o?P@MBH zeb|Xxo5jvBOb^U(*p9hIm3|)4&uJLHpLaaAey(Eophnw!Fncg@F_!+jze4W*?wRk$ zxB`U z!qcwy-<-EaA5(TZ14idm=$=vuW*IwU0i)++=zY!}XBg-Ag7c*N)$h043*95z!&Wm~ zTz>lFzsNoSQx5yYIV=2qC~a`laB=IG#IFyg2xG@j+89xKq~D?Yryg7pZjSm=j*H_tNBH zCSXPbdwP%4x6OS!!!TfW}nyRbtadu)Bp{W|AISgB{tnb6#8gE>lI{PBl5hLJLM%vpX!)m7_|)xXMhjnpZ^ zHT}Nv!+(#k#AgA$6+P)X%Y)m3b3Sf9bEL=2eC9}O9m90MsNB#DxmdY3B(|DA!1%+d zoYnK$%vJF>4>JrC?e75k5PDL7rHumIG@O*<@_im=3P!DG+K$ z>SkEyKmEPZhV_gi{C6Vim$0#(+)g`88OH9fw6jDzvr<>uc~f#b<6`3z@$L9I$oXYK z7_oOb-k$iej=jY{2&>64R_f z;FH*LKgj=Wn9ri=HH^=djGMK`18IX>f$N}b_0Qa!5nFvQ8!%%5W7!ISm%{{H!zYvd z7khJXZI6N5fa`e-+!5Sx4bG$8rVlYU;ndvM<7DQ38_Yass}hX=T<1gIEip-at}}|w zcm%h=q6@ooNUZJWc` zJlsjPd4x^pbIRtMlWjJFu3*#qe0Xj1(6`W=(WOj2>b)X#20eypg6Rtwo!jQROV-~4 z=cI0!1>EI=o{$vy6$0TU+HR1((9=?PYaX#2xs8k_&rJdX@l`#{QZfujeNg_-+%OWbh}-@ z|4NU6z}fv)}LOvrJ@x<9xMHw_ne#`may08^;J)UR-s zO<=^oc9=1ksINWfqv*AK9fO-oXhVE0!OYfRR$z(=jKqHzM&eS-C-=9=(*#@MQ!C7J z4W<`nu?8~+GhgvZ&pmU$UG|kz&T_jjdz9Ueyi9V!ob#*Q&tAvo%tPi(q33+)OmP5H zf|0mb>q6+hQvGiSjLL8Ao4E$?#~)@EJ57Nd|9uC-FD(Unk+S-dy3fW}VRlvBPRg{Q{V{vEpTi$z zjz#Q?-h|#4(Is~6=)LHDflbTr@SGli8;{f%yJP5M=w49Ye@~p0r_m?S{s}Z~M#Tb?IYdhR7oSIWQrY3)- z?S7cXzdAc+qv#Fjs*k#EXv~VLZd@EZ>@UMKVW*k0mCNS2mdv3%=OSsVK-vAs{le+} zOWnh*{5?s(|G&fa8hkgrl`Chi3ESZ&D}6YVcYgn23NZE@@i058^tgN!rU+A_?jU8$ z4-;SMTk|seA51<=-@f!x;lk(68HUWd56}z0``1Cvx_{;KI`(JwqK4n4fxij+E`4fM zx|C_p+{??OpJF?z51^l*t9W{H|3%_FhTekxnZQoSZ+QZ%?>EoFO~b{Fv9z}eQ-HDC z_RzP{C(+d$)qOSR8{to279w@UzlPsqeMax6Oh2vpT{?#Wb1$?LZVS%tpX6*G`YHNK zP(SwmlC(7mJN?Dlv6zQDg!2NMI_E-Xx(%2En521gsO+dYuH$0vjfnk*HO@Hu<{YFM zbFV>|4wyEWq_G)*X@RlFM*6(NSxnn^?YBPo9~P5+c>rga7lfeg;{}#i?K%= zt1wG2NqyUcS%*pLoBR6#lhn5&3B83Kzi;pVj@Z8SVQU92sc+*j4S$>5w^^7ROp@QL ziiz`Ea&Hf&5j!1}W!;j$;p9;~L2pNon+wuz^T)X!h3TOjUJEMsWIbN$hMR(mn|tEl z7U!>Nm^I4ot6wbk^nJM4&(M{z+xU0Jcb#+Qev5~#HJBEdq#W3X>8!!z);ZTCFtYEI zI8P116eznN?~^_x?x};=T!M#;#IJHzk~mMn?ZB}bL^xS-EBC1u;oL9P=CTQ+!Wi@(8GLpb;Fk!z=cFXo<#|LpX~)Zw1j z#D3Es6GvFPEf2jNeG9#VGX2qQnLSzfewaO&?tn4ZKBMUM|CoH<7e7mIV{mQ||FEBL ztoZ4{uEF;FQ~Ww6{_Ml#VWNFGMejmaanpGjx=!*od3FyYah4p>-y712K8~Ighe4RB zNV{TZ9KC?v8`#t_Z z-}YgKYB0G!Gx%M)L(}ihKut{m;;!R8ch8b`$-L^9cCAHWnNb$vU37Wfcts|meY zTkYtz+Ukd?*4AiDTT?M@l^;V}YgO8knCz&w;$kAqDa=$t++-}hKWCg^1}R(X9XVKe z1|?h%+!EYyz*%d0<-WAUcxQus7_OhP``LTMro~pS_dH%NW3%zg=B%&tD0F?a2IIll zIVAqym>NzoiZ5)8s4lC!}1-Rr6j8$&nVAg-9D7dY95iDO+_z@BJErIijxc zFlW|Pu|EZK2xI4<@MZMVimvOLJxQ1~80TN&&tSsr!*sy3Q?~pxaTF%E%^3kEGJfVg zsuI&Sm;#JC7l!WL%E?gf-AaG@;Re2PcAk!+_oK^RV)c!CIBT$e7G)eCF)s7<@!hZ(D)hdEgskILl@jWT+$c}!R*#x zreL;fFbgo7Fe-<2UW9UZ6J`fS&7-RG<*0@~jnec69D=magng9R&CTcKyFk>(i zf$ustq5a+cJTZicj%jlhU3}|M`e@ju+4m(D12FafUON^AxIA1^EaqW4YcLxy?Fo$d zbO_T5GaUGBjZbABNbXN^b{dB3r|kalvn8J`Hhf0Q@3KF_M&`0vm!;idm|2*?Gwp`+ zVg_y#t~ucJd^gYaJ@h4*^%~4J%xVqh1ZEj#BWO#{OTNV*xc-v1njc~u|0Dk1qQq)} zv(-Gz0%iC9PKlMa#T>7^Pfp@6iaqb)GqDP-doou`aIJ9Bd|a%eiw|-x6TV0G_kVs? z#D}bzq#o=MHo8xfW8C-!&bA4R_}B^40V8F-7kCu=(A&{vA4J#l&7ARs8HeeFkur=P zXLGMXVl}Jm_XmuXf0gI(9`{#brj=9tGz6L7@@za_3^m=cWG#g^GqmeJ?XOM!0XTX;{|g{yxgIX2E7&zE4l zz}A_u5WQtDu#*&<9vC;lj`U#^#({~f6`}n1KuU1A8h)?9NZ<1C*0_g02eu2-3NsXD zLgxqPugMLVj)2kQW%fT0y#;2xhCe+p6EMPC{+OH=eN@>=ienLGw1%B!m=TyHJ3BB# z35<-#Da-)OAZ2{E@+iD6xBLxjJlsIQ=@=yBsLeEk>UVANXT-*=L_E$Aocaq*V^^uQd! zB;~fWF{W%q<1}4G7e8{GU!-k+ZGXl$M8+w+wr^si@mjo}9`=r4>Ju38v2mZd3=@q* z8+wv`soMu5_U- z571}Oy8>PBORPOSVzRC|8vd4?gYyFJ%z7xcI$+8${Q;xr1GyXCn+D-F;D*DTxu+sN z7GQGDqalAi^jUNlJ!#IZ!gRveu@;-#G5Vp>?bygZJ-SPuuro>7y@3p_>|19(w=H!$ z{tmyZ_}q&=iJphShK}*K+v~&_%xo2(rsb0ZT!ULH0^VR~Tll&i(ZLoUJ$ z!*qrj^Bh$CYUVsM0pt7i=6Jt^?Sb8e9ir^M+Gq8+ko&x?ROPT+R z)qBF3_sn`QT?gzj@JYUIU~?R1NNiq6ZvPl|LAU=8Pn8%Yw!cdIC-5o<^jtMLAU^KF z?0G`I zV$NEtSFtyMy%|{A)N26KvT_Z#2eSs#6EJ%GL*I4g{sZ@jV4A~B=(@8NW*0_$wEAbR zGa8Y5VfJ984C9|kJc`4rZqhhR!yNzQ!`g?2E7_|V{;XZdheenT>`w>w^*rWn)0O^i z!L+%L>b*%bp})0u3^NGR7q(-b`-{&_hwQ;Hyq-0u4(%pOeAI39^VHJHYK4e#+!3@Hz!6e1G0CNb_8}v($S18W& zFpcMvnGKk(8q6Wga1Ey6pExJgU^-xyYcK;ayET|e823W3H!xpx-$U1Db1-?Br3?Y8 z><8H&Py_EFufuHSlH+p#b6kU||7Z5r>yquX!*s)p1^$@(`dRgkGx4V%rg(iaGXb-E zLuIW{^PTshRpxRTW)mi9-B?q0x@gy0mw3=#v9k|zj2(5B)NwY~Z{lpmW?|-G)+k%K@q@Zwvc|5$s-%$R%G($*GC^Nq=K>KJCA!sxX$H1C@Jg>yGdQd?ax2Qa0;A8m&(KdO6*)Z@8q7k_FFiM2Y3I+RwAJ+JwXJa9Uu!dSFh?*+@mYuQ9{=by z@!!hT(7ne4m}VF!>oa=(WO+NOpU_gb{@+*^Z%;m7w!;kGVdkOEvCus1huMdj#-^1U zp>yg4OiwfGT`(4F?&T{zSMTM-n|J(8vquIHY}JO~(_ zFFe!AvX2HAVZ!C%MxONOwSCHrwT?CSM_#Wn$K$0Dv3K{Q`d+i1Gv;%xVrvFw5Jvs| z3%kuXYh%JK!{xt*IT83~wV5>!C#B6@xY7R-KTjpcoPTFcfXPXje=;tOXeBf$(VsK&@}t)pf_b(_UiV)8Hp%OPpVuBH$6?A}n>=4Wow+x_Xr8o z3$qE6)YceG(^Ha}63j5ne9)HmhnvKedA9GEGcAf43jEPA46Qjm zFmtpu9A-i>9EI7d;ZIR9PLN}IZt7*)Ul*5QR-a0aNn8ArJl#NBL6i9*IV{HupZh*6}A(7JHqy&ZADlQABNF)(c38Fn;y^5GvR5NeVDF*(J>BP z$1K8}!l?C6+X4X^y`x6?^rbn2kFzo@O?S#($T`+|j%n-~> z4Q2{v7N#rQR>!yd+ zizc>{)~qp@&KgV!W(X!J_A4->Fe=}5E`;vu?kc7+m`j?GH#?g3+PTJ_({8R6H50m4 zY=N1B$!jsr&TT|a8CS)(vBPfw0d5oQ@Csqf1$Yc-f1m`#|2z#ntY ze}mQcQ<&m6m^DlHJ+x+dEJRx{Q-K{lFGJsH*A3JDjO6=VBQS$7LxCOLR%p)6z|6v| z2Fz0C%B@##Ve1^Vd|r~aI?VN?Ve|#1N8N3hHJH(GTcK<66PVmHIco&Du~v7b^Xju? zo6ZM~NnSN)GR_gqb{)T5zO;1H+hVqDf?x!pLfHRpKaD9y;hiYNoUA;~GQirovm}Qtk7vtvg%jHX3=PzG*)zu#W zy8OUMU1R2o;~d_Te2v-)Q~!K&H}GHQ zcUIr16yJMcI=an$FFnTk)|eyA7);@tT7hK0TY+i6*Q{ga z?_O<*59q=W8*_F$Q2cnHYyZvpMaHPU4&SId956c9&3q79>~_Gd!O1$P|B2pGQO0bkmu)sTg*BmZXxSTx?H;#Gwxo-nac)*O@Bj= zyHdyP{e?Pb+?}m+rZVnyoim$pPqNNZ#y!Y7>lt?^D|I*Pc%0DWw{ce&mincQQE8)) zIortX-R^M)Kgi%wcbkmIaMqd11_u83z3PW!z#XOIzN9a%07u-VEY>JNu4kP!)zOP>NL}ujwCB?=>?at`Y1TcgcNXi=%I-#H;i9u~-Yr~o zmM*xIXLD}pqO+Ql@>Xsmvv<*1xZYj5=&W7uZe4UXuiwk8U35j$gB`TR7*OxD0S-Qs!ow-H+-CRHAmFwK)`ntXAT*{^EN3mbN=&oLJ zN|)TlOU}+E@xaNvk^kF&MiyVbFY~glc|2aux}}`NlY?uWyO47Z>o^z{&$+8PXXza3 z%DJwU>m|HfIcNX8yO*mwJU@XlpKIeD>UHko_0H*aZuxp==6We_T(4W2ly_v+l@Ip?Hql(OHZOv7SkGqdh81n#u!%+|RlbrJod+Zb|Zr}8IXZ~C>c{O`Jc;%+vGVAYg_ApyK@60LD&)R9ykcph>@hw!5 z8V+RiXR|IDyrYJ?Uh+Nf_And$YSMW0ze_Ts2fc#7R`}F)-R`)|h%;55+=sT9ap&rs z)r?!L;~~7up0}2D*Xx|^th-$2%+wA0)oue@_Lq=(+{w7)Uoo5aUwYc8TX1D{s&fii zcU}_EExFEgox9^Yb9L^9>nzu~D=y1#B3qiS2B*@QPq>&pgYX^6>>G# zU0DQfYT>mWcY=j(*dc}nyuXL?h`WA)Sui%2bIMtFI>&C|o?MXd9b8}(+?@-~Dr25= zw%oB(5{%5wIn(Fe?VPi4-d)c*>*w9&oHKL5<<;#AtAV@Z=$)CyVeF6Pk1`oLIVHX4 zcV3r&*WUd#ncMT6+~zYATXmWBY$wyUu4Bn{RwY9e^}m6;mbqn|^yJOR##6U)$YI;w(Pb;A_1u8CsjU ze$pk{B$jdhRt7U8Y0DG9QpPQFB(q+f%Wh@1GwUtRR-L?)qKMg6l5d z^TQ4Se`S*;NIn|U6{@C^1*fP)DxY;_3p9f%wF&Ad(P7J?zYFm z?5?BSuw4fQ;9d^j3HNNzTr#6TEtQn_IrynQd}s?sit1 z+|wsH+fDA_lbrJ7eeZW}clYmhj&FCj?{-Rexa)U2OLw@-cRL4n>`UiP?sOMhxC`l) zTb$)5xHClK3GV6L&bCYv2J%K3|D8hxq3a&Ib=tCD-UA$KIWpnEs{ z7;uL&V;oNXODfZkT#wtQuYoH(n~A9#%=;T7D+aDS2FdsF?0K!~+Vxn+_5T*mI-maM z`pdkJA7eWo7?Yct$%{@docrCuV5AZKqp6JZ5HHH_a^~!EyMg@Q%}!(H)^?_Qy)Lts z<-9xXHqE)tq6-=%jf`n@FGyZDEB4hr7q!DRO8X;nkA-!XUF%5QX{eV@^wELQsDI)5 zxi)o?Eq#jAiL)JvmoX8{rT&sxNF$I&AdNs8fiwbX1kwnk5lADDMj(wq8i6zdX#~;;q!CCX zkVYVlKpKHG0%-)&2&556BalWQjX)ZKGy-V^(g>sxNF$I&AdNs8fiwbX1kwnk5lADD zMj(wq8i6zdX#~;;q!CCXkVYVlKpKHG0%-)&2&556BalWQjX)ZKGy-V^(g>sxNF$I& zAdNs8fiwbX1kwnk5lADDMj(wq8i6zdX#~;;q!CCXkVYVlKpKHG0%-)&2&556BalWQ zjX)ZKGy-V^(g>sxNF$I&AdNs8fiwbX1kwnk5lADDMj(wq8i6zdX#~;;q!CCXkVYVl zKpKHG0%-)&2&556BalWQjX)ZKGy-V^(g>sxNF$I&AdNs8fiwbX1kwnk5lADDMj(wq z8i6zdX#~;;q!CCXkVYVlKpKHG0%-)&2&556BalWQjX)ZKGy-V^(g>sxNF$I&AdNs8 zfiwbX1kwnk5lADDMj(wq8i6zdX#~;;q!CCXkVYVlKpKHG0%-)&2&556BalWQjX)ZK zGy-V^(g>sxNF$I&AdNs8fiwdDhecrQ&(8TCd7bC|pRMZm;w{(tzptpD8|vql`njim z{vXzz)SWZ}|EUOMRPDmI)cGa-`&%QwzdiE%FGPO-waD)ujr{&ck>5`uzhC;+p#4kA zpC_o_>(%dDFA3)DMfLkmLoi<-uYNClG$8L#zc;9#x2vB;^?I}VJ*R&D>!a6RBgNKi zAOx*A&cxG$NCc~dr z0(;lg@4B5M_4^YhgV+CE{eFu!`2W+-BQ^f%&mb?-IHYlqsh8$Kng?kfr1KzM2hw#Q zT?f*2AYBL2bs${_(sdwR2hw#QT?f*2AYBL2bs${_(sdwR2hw#QT?f*2AYBL2bs${_ z(sdwR2hw#QT?f*2AYBL2bs${_(sdwR2hw#QT?f*2AYBL2bs${_(sdwR2hw#QT?f*2 zAYBL2bs${_(sdwR2hw#QT?f*2AYBLk?^*|B&L;gU{%-KO@+2mG?f=UN=x;Cx+Z=mK z@X7sx`Z=q9E~uYt>gSgFd7yrtsGkkfL48mC>{LH{)X#gL|NQ58t@pn0C12}3>uJw; zTD!C^-yPt8PXb>CaPQ@o%rm~{>CT+`-hpNHvl_FdK8wGrepX}j_cQFN?`cS4G+T|) zZR{tsrQ0}AZG|Iy_P08R4(l&wuwJf1)ZYVP$xa>r)8GFP)?M>G5n=tDqrWd9te-ZLpiK@E())Cu^_rN;LnbGpr0Mb7bUiPoulWK_DkE*b4lwzlW6A#XGN_OnxCa0{zvt9Dg7YA|C(R% zZ2XrLufLzk$N0ZL;J=JDF8X^+0y}5(XS(OMQ=(_FwD6G5Su7eiWnUo~^$DCul+2 z?~2jKV)WSn|irMG-KXtz)Ksr$PViJR8@pBo;pJ?vy;jl3yX|LKnZ?_ZUki|E?UR>Y3h z-L9~Gt#`)gi!u6cjNb5HtJ@!m(br@2@o%bbXFW!5dR}!tAES@O==(9co3C!a6r(T4 z=({m`{pITR=VJ7Zd#dxpG5TDL-u(RPcDiEp?ihVCqG$P{F3H=5NPaddy(ywUS?OLx z*ZEnF*w^}6jJ_A6H+EO=Z%2&YAES@Q=%pBaBSt@n(R1Hi-Ott-y(dPWjM2+6`f7~6 z8>64b=wmOa?*CSdeiWlO+#9y@Y(Gz$oPs(x>h(~c2ehu1NwdGSN9^eQ>5b86B6^R? z&lZ)R!MC+l_OGP~ukG)~=tnWS+Y|1uw$mM>560+|G5S%A-tw*0+ntTkcVhI@82zxf zy8ZmOSJ%g1SY4lcQFVPkMsIj=b-q7FFMLOJemzD%x-ZPX#?O;RXG5KT^f~82rEf)a zJ-!|NVLMtMjM1lJ^o1C`>m}9g55?$*F?!*p)$Pp3=<6~1L5!Zezqlq zk0bhPm3|V@->LM|h`yn8=W)TglwbXS|Dtp^qI(>J#m`(s|2C!9NAwA$H$?RJE4?wI zZ!5hiqJLHC%@MtcG!TEhh%U!k(OV+=E0o?E(Pxz27STVb^!AAU1*LaH^y~OSP-(X_ zqCZRNT@n4|O3z31A6I&JM4wlBPelKi(t9KNvC{h@dMgW>_}L%PU#9edi2k!mAB^ZL zN*{{opHup9M1NT6BN1Ki`)iT=LRvqH=z1PCekTKA{nPqFjK2A@>ip`;DV*ezfzd-3uCLI4CQF<<-e?aLC5&cU_Z;I&6n3Zy{&YltmePw6{ri0VqB9wpFK<@-Y=nQW;tLV}ql%x4@DC|| zD#HJZ;uj+PO*e`E7oF({-{R}cuLysS;%6fK%N4&8;oqeAVuXJ{@oN$OgNiRj_)jQ) zBf@`C@v{;B5yfvs_$NLt=x;f~cltW>I>LXa;^!j#k1BpI!oN@P^AY|J6n_xmzpVI$ z2;X>fkXJ_${<5!MbQUB0D-?ec;oqkCr3n8%#XDIhthJthLh;KH{vpNZBK$SQuSED} z9<0cGZ;0^E_w|d;YJ~qD#WzLxpHlo%|%_eJtT!lrF2cW8@ot3wgyI(>{FfD`4n`QhaxWe^~LY5&ju>1@Y;L@WZ~|7#a+MsZN;^4AKgXcIZ^2QXOqtgCH0T zg0zCDASDQbAQ;3I#1#ZVR1geO;tGCeWzTZBcz4qGs>~m(HefC}e zyhTbdmA3z@v`Z4HzLcixu6i>O(pONNyRzT=)fj%ho&p;m(`fs4G5cFzX^Sf(EkX1TG!Y1<)sj{ubz4g#K~pM}_`l=v#&UZ|JS8 z%<+FO7scNu^u9FDvm8Rd6#90dZ-72W=wE@pL+HPUK2PX(+@0d-6#Apn+@7xx`bE%p z3H<}myM+Er=o3P}^&ZrJiiCbP4Ps9{@^s9WQ_`a5c+zdzY+Q| zp??+nxX@4Bhw^XbZepeWWTpStvJOgfd;Ayrh0tdS{lm~V3;j>fJA~d{MD=YI`m592 ze*PEwHt5@h{u}6Xgnpxa$-h(R_e%3gR<6*W1bss2FM~c$=;P4$2>pxD=L`LZ(Dw=b zAJ996e)4`4|A5f%o96cOztEon{gBY#0KH4-JD^Vr{V&iL3jMbGQ#_+We?pp1vWkR$ zA@tV7N$DOn4*lvr=!=EE2YQFlPb^NAYrnS=`aRR!p8pa0GoUXK`fH)j6Z%)6_Xz#Z z&^v{G_X8-NQlX!j=Jx!L&|d|;SLmCeFB1Cqpf3~p@dr}=-9mq8noqKPLSF^FN9Z4b z-Y@j;K<^d$i3d@A148dg^E}HZ^s}HZ7y8A}2Za7!=z~Jv1$|KHe}uk5=qI}=Kb1m% zSeo0<|3ZH*^dX_Y5&9~j?}WZu=zoVkB=ox+Oz}j7{){xY-+u}H4bVr0{w?UMh2A=Z z{Og7Our#;l|Al@&^l_no5c-JFe++%2&`&*-`a`YI`_eqmY8LvNp^pmv8_>53{bnWP zUnlg3r@8(7FZ7o~UoZ5n(02;`kI=`2-gOw|Ga>Zz(tMH?7y32O_Xz!$&^HMEl*7rt zPv}ob^GQ~t&|ePyfY3hyeUs3C4gHYN?|cO1vsvg*PxCx0DfD+j-y-zyKtC$@>HZ|AqcK=zT)}CiMM6{}=QDp+EF!s_%f%&rfsv`CsTCf__lwKZL$g z=qDdT{zF24M4IPWA)#Lk{jktK1bwy8e*=9|=(jnR@);5OnQ1=B8WH-H&_{*-Md(L` z{uk)$g?=9|)ptzj=cc(m|1b0pKyTrE^el`1ya#=w&~I`a`DY3JfoX1!|3ZHe^bVoF z1Nv5>e+T+(q0c&=^4Tu*N2a;G4qWIjfxc7dABH|x=o8QxxR zldOE9zYzL9p}z}yr_gsoKOpqqLth~DQ_CoxA)!AZ%_mtdp}!3Jq|mQ}zEJ2tf__x! zC!I+Bp-AXU(%c^ZH_jaY7eQYv^mjw=5c+qZcMJV*(B}yKZYNPZB|<+d&GW20p}!7# zkI;8O?-cqC(3cARw38|SE}@^D=94V1&_|&!68aaRFBAGC^lqWw-ADEH3H{78x99(b z{wnDGLjMT#UZMXC`hd{?0liP?cbY-*lnecdX>QN|3;hb{gF?R+`k>H%41I;rXP-j( zuN3-Y)7*ak7y6~phlKt)=&OW&1N7BGf6z>-Z%F8?(%k-zKQNRK0SoR>2PfZx`I3=JxlS zh?C0=**yLIAilpVoj@6lLhm|dlQft@*lJDlJgXk<>l#P@&=1~@b)@XS>8X@|4}2!V zrx@G|&OPH+@Sxz_zTvZ#b{D)GKGpEq9OY8GMEnE7hvUyZN2!;>{R$5YzC(cW=|Vi) zBcAKQz2Mu5IAg+x0?^Jj| z;k63yRCuq#J@d@*_!VBS@HU0_DBP+tm+MlvPvK#O$1XDacPV^O;aT&|J_QPQE4)hK z5ryY3Fqhk)@a<?^bxZ!mAbDtnh@w3okauQ?Kw|g^wvb_Y$+eTj73%*DAb8;Yo!% z7n&4;iC#KU1^TLLg6umcPM;7;aRK99#;66!t1Wf^uG-E_t0#*zyBNaRD2%~U7x9E zZrxz!afJ^lyrkaj<5PH8;YGKYeZp}w?^Jlh?PmR`!t?Ji>q`}0q42oEI~Crm@F9hd zDcsp$j^CqjzrrgOo>2IJ!t?Gj$5VNanKvptx!SBRUSsAt_nLW?!s`^?rtpNqo0`n! zTKAc`Q{lx5k10H<@VxuY<%SeqtME~UXFp)}FI9N#L9@PD;iYTM`dWoowV3tw3h!39 z<6*OpTj4>4#}%G<#Oxn>)XWW)xMOp(k5l1Eg_mt%_Ni8QgTmVt z-lOm#g}3LJ@CgybqeoNc<$C_pHjhZFQ@xp6zc$QA5Wc~`+50P zvrnDi`=3cZu32<{++X-K<(YkY1#g2-Ftzcc{U>$4Lh294leMkc$EEOq!XpZAR(O}f z`xTy4c+qy|_)8UDnQzud6kfi)S>LYkK85G+VD|AVyhh=D3NPN#>|dksdWGliWcCRu zJUq>;k1D)Z;lm2g+S%;yR(M3=F@@*tV)idmxL@JbyPAFK6yB%sF@@*uX7(RcxW{GI zS17zm;q3}{?QZrDD7;PKV+zmN!|Wg1)6A0!A1O5J9ebI%U*S=OHz~YB;dOhP%N@G*tw6q)0R?Q7;qg^%oK);sn$bHBo?6dqByquA{4RJdE=WeV?A z_>jVV2bkljR(PGlov97U|M7?K?{0+$6<+B!`$QD(JJ_smQTTwuvko!)1Qp(_@D7E0 zO3eNZ3U55jtZ!4eb+}odukbR3hZWwf@ZuxPYWD&+?f4y$Y{Zc!$D=6`p&f zxm>rxdlf#S@W4@K{|<#))6II9!b=q1rtogT_dbiB|NHQL(51Mx|9f4Rn&Sy5Jba8< z->UFFg^wyc?^v_{kis2avp%5kDuu@t-mdTgg%2H{8PDU$ziScwZdxYt-+W@Gp4+8U z;k^nkJlX71JR_4IcQ*Bh&^(Ig3K37m%uGGUQ={-!g%2pa)Nl4LS9qPm+ZEoUaBG&i z+>pZi6>gns_Nh>KLgCq`nSF{CUZ(I$g+~-V5HOb;Io-^g6h5TzXt~*^MdAGlcbsYV zsZ@AO;T;MeQn=$RbGa^sS1Y_u;q40VQ+U?d=6K2#KA`aYpxLKX;bDbG72dCK_c`Wr z%M?DO@SJnaK0$@Y72cxoPK6IBJnKAjJcSDPDBQ2`YK1o{yi4K33eT-D$L~^jtHQe# zKB#cV9CNt|g%`{<>q`{wS9pWMTNPehX)d=+;WY|xQFub(*%z40Em62n;UR^`6`oYM z^Fk#ah5HpAQFu(@NrgM+nd5OPyhP!Cg;y%PN#UakcUGC>^eMbW;R%H&6`p&Mx!h8P zS1G(f;iC%Aoo_C;MBzb&*DJhJ;rR>9<+>GKuJAgA_bPly;en7jp7=sDA5r*d*sL#H zWad7FS1LTBaNnh7{}zQO6h5MG$6~X8k;457x2nxPh+tPdzWs_-#|`>!$9sSaOHU}RPJQv);x)Ij%JYN1Snf2Wk`T*9)a(!9v zyfahJ+^6tLg+~?MtneX)JMT8f6I6Ia;T;MeRCv}s=5j|BUeIXPw+enc^51?Q^`CCE zFXy@HKC{oT!UOl4^(_kbH=Ffc3U@qc)>kV$p>X$FvrnbqGtQ;!!h!Y4H;epaKa}b7 zI`nzfl+UElmp`1T=R8~M%-pN+kiz2%?@)OEW9D*`f**ArE(LLWV!%6(Jl zyWYsuH_avg<_k&xiqJ>i&eU_9Sv_W6r|>p~CluZ<_>A)@&dSRv&K418)_dl7@)YhB zd;|RRYRLZ%;U9WG)BpNPiYK^~^l_n&e~_u)?E=!b!+*8VcYc_u?}UC7{tZGu^l_$s z_J!mhzk=d&p2EEfA5eI~Uzz?{RTO979Qr+C|J+FbSqYwpc{ZNk zf`)7?em+Mw|P~mZf_bPly;q?>D@ziITc|zgs z6V3V&g*R+u*0(FXTj660&)wMU?^1X`;Z+KcD7;JI*$#6&euY;myiws}3NPEtTyB@b z`xQQ_a96h3KcMia!dn#Hqwo=h=S?!lQ={+!g%@mY_9;_%rNZkK-Xr*K7tuJ@jCIIE zV%$h>kr~e#=<6?~anByE(|>rJ>B`B}Pn%Ev)?(6MAoP*RnfgZPL(tzV^krLS>bcyY z!uu3Htni9lvwyR~yA_^PxN|GTU*W?Fx3)I>6ihYqDuuTxd`#h`d1n6sg^wt_ejBq- z;kIU;PCPP0C) z@F9geb~5`EC_JL@W`%btykFr-g%?dT$C{6$&3!c<%0IACJN-6kezBR)u@^Fqd1d@IHmR_B8u+D11oa*@b4GVukw@-llNx zUS|Iag|{g@e{ZvoN8u5L#}wYI@E(Qd?4!h^aKFN<6dqG}o5F_`US4F5vr*w43J>gS z_Ni5PgTi|iKBDm4{mkVC6rQucSzoU3gu;^wcNd%e2NWJZz^w07_>jWK6kc|q**~oD zgUtGFh3B}7K- zcVA4u2fv$;>ECv!*?&;*qe7T}t0bS}#lAzihnam!6z)@arNV0!o=|w%;pTX%6`oXh z@eyX9W`##RW_^dk3y(DGLke$G_^855jxzg)72Z{9){iN?>{zqDR^h$Jnf2Mnn|XPe zna319c#>J4f3lfZ%`o#Og^$cM>kIv6UURCM*DJhL;nrzppJs)3D11!eIRUeOfx_Ji z_bI$n;Yo$(%{IqVr0|HsV+v0yJo|KWxh{p5Dmnd|2U*Gt6;TC_JL@28FjP zJh$9j?uf#jXPWh;3a?Ult-^E9GW*vmJg)Fgg^wyc`)qT$PKAdQUaRmXg?B5wC}@tS zRN(=Iw<~qcK`V?NR@Op(O6kc$ix!j65X5OaoZiNphd_>{y zx#n`a6h5Hv5rvnYZ}tx;yhY(jg*R84{fjR!bHBoC6yB@wtP9QlPK6gMyh7of3h!5V z?mTlmZiQDWJg)E&h38b6%k?U}O5qI(?^d|$B6GQ=3a?anT;bgcFPLvGw@TqLh5Hwn zeWD8QRQRaEi$Z4qR)t#^oArK$H!FNh;l4}E{yhpWSZLP!6<(w8n8F7YZiUU|<}18X z;mr!~QMk3pT&`2$B?>QBc&))RDRs_>#3vrnnQV+yZdYW8VTc%Q=auQ2;G zE4*cyS)WjNzrqV6W*?uzYZV?>_@KhAE6wHRE4)PE6$&3zc=mF0xlV;wDLkt1CWUt? zymW;*o+^b$6yB}yA%$nvn#(Ovc$vb76kf5??BA{M5ryZkGW(P&yh`CM3hz?*u)?jY z%<<$ayhz~zg;yy&s_+JdcPYG2;UfxnM9uXrP`F3o4GOoeHv9V(UaRm1g^wycbd9;( zsKQ4So_np?r$pfu3hz+(u)=+H=5l@4nR$i6>lHqr@G*twU2iV8SmA97&$_|v<5YNw z!UGBq*PH#j6h5HvF@@*dX!iFiJf!eCg*PdDP~lxSnd9-?Y~~#bcgM{7HidU9d_dtN z3U}OMF4wK_xWc;?UUaM3KcMiC!ea_|-DdVLRd_(*O$tvad`#h`adSM43hz*OpTb8J zUUj><+**Yyj|h>cbdx$DZEkPV+zk}F#8uP+^6uoyUadOg%{mz z)&~_{t?+t<_uXUmA5nNtqgh{|aG%0M3hz?*u)?!fo8xgQ+^g^wh4(6aRN-}N%<;4- zyjS5#h3DLB_V*~fQsK=CcQl#(OB7zA@LGj8D!fzS{R$sdxc@$L{4EOaQh2|@3+^}j zmnht)@Ct>;72c`veuZa0V2-mu;Ux;MZ8rNfD!fDCg9^7EH2dc%yi?&r3eQ@r_$%D6 z@UX%Y3NL)fT&_pqafP=kJfZM@g$G*9@l+|iMd4iv?^F1&!krJB<0)2nNa5BaW}kq< z;|lLlc<@oPf2YDdt!90-!utfLec^JgzGt)X1rz&$W13C`t4 z6`xk2Xa7OL*{4>85<<<3b{zDOS`tu1>Wa)+D~< z(tneFs+D8pTUdFoX12?w7h6`A_0!)OCieeBK6bwqak77&@Ske?jI*X%%!4rG_RV`D zGyl8W`c(gEe3^I{`a{8sJ|Nx#ej<4AE8?Z&=pRl8@3yF3gWz+)TgMZ3fnNsRH-UIX z7Wq)Uwy?bGHbsf_*EP1@lpn5FJSQ`s?9-FT^tt-~@M%_jhJ?>e|A$Yz;$yv@8PA>n zhfhNB$rC=;*=aK6hx1S<^!M52j?-{1pVm8q-UQ; zz^g=@PlMv4@Z}4{H-v#~&cqzsiaZ`yn# zYcl@7|NZp|^bNm~p7XyRyyZjU^uLz%9eC9znfg)ie%xQ8C}Ly#29z3S9JsEzUX#IF zKFajj9y}ME_X~A_51?Ol!oLWd$9eAmZty-aE_rR9YIjTAXJ z@iuWk;#m&v`#F|1`K~ zn0OO-0=#KG@f`H$w{4!v4?pM7zuN5%eE@NCJN^b<@g6CA;J@)AYWIOpiC3ZDZUgSa zcoKwuXYkmjG+vd0@0;e?R-d?U9t6FAbJDxve+sy(o61ds&jsh_r6~Ltf_J=2`eyJI z;5njwuLtM*IQO?(n0Jv6x7Xd^!?><<5l=ICEAq)cZQvESzf>WfXTf6_|9QN94SWp! z#1H*@;02<+J_ApR>+xIgpy+SEfY;(V&R-ARO~n5v^!=i}Hd~w;AAb6~Ui|*bP4lT%wHQ~OHc$1F7K~SX zUGEOwhH=%2a^2vgxQ@90d%(*EDF1`dpA4Rd=h$lSQ(2Gaja=|5@J9S@@`Eo1_kK(M zVemU`!&LnI{Lk0va_IXoUX`JL-UOaNKkR@{BY2It-uJT$O8LwCR}1uo-%&gv_&f#P zhWm05{3Y-P=$pWM!1JDm590g`d=&kS{?#7m!J9?@`44!#7#}7eKf{CM5J0(GfV-X| z&e!`6;H9Ep?E&t_a}l@uf#7xG`Rz#XauMfD@M7`1Ajm#qJe&t!iR;&c_!ogkU&zec zO7I*p4&Macf$Nm}&+Xu{|EvZtanN-g!2S0T@TB-%_7r$T)ax~Hr+6Og1#f?w;S94KrHFz`n1M^z&rq?N+7U*vQuR$Kz|1NOfYoxc+p8oRyc%68@eUkk# z-qL?%SucU-pxhXI-UiRZc;y3M58jS`#q&IWf{$SQWc?;fsNIY3`=}f~+k(fCAFfw{ z&C~ahzp|mEzYc-E7Wem7j916AKb`}*Kgu zPt_}SAC=4fVJ0~5|JDGrUT{8d#fR(qK>Gmkln?LE%<)_c&fjCY;PV+czt?kv`vT+%DDNynZ->`fg%hI8$tYpFR7O`ZEfCn9Wn=dQK!>j5t?-^LZ0a=-*2F z;Q#r@UjqvN2|m0J0LOWcJ+YoD*E&2i{vq)C9f{W>o;lZ(p7$Z={Ir4ddXEakbn*?P z=Y0&gU;PNq`w_K6zehdk`TcS+`+yIZQGWd3)!@zRi5Gys4bJCf=YjtYUU?7cxgASx zq;h#*cPI2$fQOfnp8Mw=;Cvn^`~TbKJ6X9Fzn^9PJM?@WALswlo2cBbQ#0d#AAHzD z`R8(TZYF)x3et0&$AGt^A9A^I@WSd$|8K$h+|?q~cUFx2W2L0${11ZjzU~~)yjw`$ zjr?=Fd<>qu6UEQ%KKoYE^S+eK?+52|oc|loZKUV(C|SP_oX@l9MEs3$((^e)Zt&dO ziI9ek&{ExwBvCU2T8=>#7$ZYrL!N-m!9)?dZIPWJ=4u0jm zRBlW>&wL5a=Uj07$2F0jziVUu7C4_j=!gG~_mQ5z=XHX&f%CdeZpSad`TN-j^he!K zK8+_+y-L9sfET_<+zH+a&igHJznb{~`SAYsCD1Pi?=K;rV(>LKH|76D*5h1*Y+NtX zn#rHPTjY3N1RpqeiI*mVEeocGiCh z?m2?|i=h7tyzMaZDFgRCg!uO&o(J9y9>?=-E%*lTtXlHt>*ACa^5JvMxZE$nTPjH( zhfj(9;yiWVum6HL$1?}qvyUUKEKmvdiP9pfpWOWMeX_h0oP z&Ue81ytyLqxlfqmSzz;2JZ-xg7bb3%&&c#e7bPo?}5+cXNdEDvphb8 z!6R2wMf0Hl8Jxe9ih-|umV7E^W?sL)fb%(Je0{|_NS`xC{!aJ|gIDcOKHRU0pEKv@ zM4PADi}z#XJiiUz@C^9};eYJ&q#wZVGOq6(;QW1A1@xP|K>C;`GtOz?Vfb^Li^2I^ zSHAB41>XKL`4q$7+e!Ys?vCSm8eINvI{rn{4=telgy3_)OT^{?I zzDykdrQp22j^lhAy!S$izY0G2UF6e$0dbE1UYn<$|9L$yw@U(gKCjb->-YFq$cN9p zW&J$x!t*lw!)M_9T|bXAOJ5})KEIFSpY|GY{_dFj^L%jrUXS}{gUwCXV;}Uq{}ua; zfe$oL9^A;!kqIg{bUpEU@HcI4^8X%s$7!Umg?{VT$tQ9O@lx<3Y`)rlx5n!{aT~EN zhrSW_S#IAi!TFp-&f6_-kUxKy!TB$GlQ^GG?L@hUf%Cb6%s&9Hv^GvFnU{5wPZ-bt zP6WCXoc9kef&T~Kyk8>6vjMyg^FjsCPkqar&%O;;34Rp@EKCJX%}2F~9T<-+I04@lpMahv(u;QW0DmpcN^=Rk4%XY`Q|@9)ig6?kOZ%>Fj@ zL(=m;>UoG~uFXyPTnRm&*VB!9eFx6_PjdX;kI0|*ab^Ct%}xG&(8uu{&h541$7cP0 zHcwq&d`=(dKL9>hLG@+67M%AZIg3I?5rJs<#_$cbPMabJ6@J7tn z@qG0w;6BvL3H=bb{9Wnzl>FP~kw3@jwz(;9$3xHGHwEFpNYP&pJ)dh_3H=VAQMtVT z7>`TcX%5jp{`wyJL{(H{zQi|SjBGWPodK9`XX^9VTa zOYDRGXYj7W$fp&2>o3WN_e*zzKLpO_)N}tF0O$Q1xqUAgBp*KaoS%zE!1??{_Mi6^ z>BDDJJbsjWEx4SASq)x*_gq}BXTY2PCLfMx`}O2s;iCL_;6DSrrh|A#7X8E5HaGR3 zpP{cup4q?PYx3vs7I}R57kK9#8>NjoZ}~%{=kp+#H-fvwykZx4@)Gjla=)^Xw z&llkOUIotkxN^B`!1uqdzcD zfVUxUh0uRub5ovwg5HPrDu90LAE?}>CdwPv>pXDY&!-Cd1UT;-UkX0&NAltQZ{6Sz zg7Z1lW#C67NzeO}a6hR7=kq$*|2uF#w#B-{D0`uUV+?hhyaPUTwY=f&`U6x{z3`7q!AKZqaa+j6-p z!1??su5ZB~sc~SE#ozt0Pr1!g&mqCPGu!=Po15C@-_ScSAI0UCjFC_A8j7qWQFy7%Q}uFmkw3TB)!=+?5%=5dze%5q_Z?ou ze-1dG@5Al&7dY>K%l+g6dv#H&Tt3Hy^%KSsFFq%;zGs2=Eh5hP2f_W~e(?)<#7+6> zL7dl&Cx1Ryu>j-k>IuXvUm<_4@8976-5(74%j^e!Q@h`9^Hja~JW%!-Kau#zLW;+h zrT@HTbCb^((D#Y?%%e6UAKnj{^YA8k$5G^8iuwjNCOz+S!ukIhT;4A>c95R;SB$_X z4&Jvb`4@nH3_gbIj{Qf#y>ls_xzIZ|G1qs0o2T;Diszyr^p7d}9)*9Q@ZaIj=QK6I z-?b@~%jcUr!DoQ;{$rfyPH_2q?JMxug_NIi_?)mA`TMYri0_;CgD3I)%=y{Mevy>Q z1D|IRf=?xQ9P{lQ=g5cSjs52~=!<7iJVlt_%bi62yuYgl{uhJS<2^zE{8n(@@0$5Z zo0AWp56ort9k)o2SQld@sKz^m$XrrxEiB-0n-kJ23upKmQ2afqu?$PTi7xBKeg6e)z8i z&w84!i#+gA@c0eHec+Sr4^mS7!GZM^+;5Ktul_podYohP$@p*o`PT}YoAPjr%~Q|4 zygvZP^ECW0`K0 zg)p8U4bJ;>vcA^l>3MkUtH62q8TzgbRA0{X>}}2YjM+TZ9~#7S)G#>jgU9hZwpe(<4z%>J`UzPa4pY@WJac)yDa(eAg{JQYv$cgkl70m`475emeJ)H8wY0Ur#_^J(=Q< zp&xp8CLi9Ho_}XO0bYfEUWhoiw;zO3`H|}lZUg82&79De>_Ylh%>T2`Oz;rq!MI(P zgY$lgoaf_rC7=2)ssEI~e<3*UOV8Ko_u!>i*Tnq1-N=W}=PiIw8+a41!vuG^NT2-$ zmCOC)B%7z!Lt0-h=i}e@+nvfS#d;gQ9!~^s zxsuwk1^SP{-FW^l2A{SE`Sd?VdhS=Jfy?LQHQ?EJufgr|iOo&@>L=*OmQnodzwe$@ zF7F@3{6w4Q{Ga?GoJ)c zWI58#N%%YVHrIDMo2UA}dm_c-g?=`8-Vu}s&f6v64Y>buyR1|Ee}|sWxn`d&_n~rm zpHChqJ8W)hm(QV>>j`Js3yf3o=i&J+i1<6e-JeqYyzjyB`;xwOF4ead<3u&1)q#wOMv;PEaZmQQj=!<_RfA$$x^nXF0=pjA#!`+Li z+_3olSOs3ROQ!#;;Kg{JQe9_tiN@_=SlDw-tTjqzkugnlo`)zd*CwVdBoCLT<@9@mQ- zEu&=0L97e(hbi0n)3a3VJY;OS7H4yc;U9xUOeudaWv^i z@-lfhcp>t`{zo4}`q~20`_Nv0+uYRd+Z;=J-cOI~b(qaf`by{vMl;*xUGSkc%4a#s z-O29osq2FGwd3o0w#`%hU(Wab0N#srO`M+<$C=}K#pWrW;1v`P=i&6@iT8d(ocr6? z;Bwt^?g^xq@8vHB=Y8GT|1R+2O7iD%<42pD@@AEhKkq-udEL_T~@2kVDzo@z(l2aoHUb28~G$B_?T zzqf$<`F@A-#N#v9>nxk6+L8C4XaARNo+_93Z~SjOiqGHhDfebx@4L((e?BkCkN$ZL zxC?pU_+J4p+%@w$DnEsMnz4>3#y;T1Vjg%HydUouIM0h`l8^je;A?PyGvzY`|D*k+ zcdyON^F!cuQ;FNt1?fLq%_4m;OnGR4j~BcI``vLq%fWN@ARm4ndl$TId1kp=oJ#)L z_@0jMr+b69ttUPEF9#nH&mn!_S$K|$BF+);e7rB_{xJPCD%Zk&+zI`KHaA@_YoO=z zNqIc%1LysX+5e&d`R5;=S>Ltb#o|7aJ)87hcy9Mhpnz@$Pv%np;eK`Y>7?g#b$L7~ zIs+W<-;r z>sxU7JMrc@DEE5uXP*UgiC0FcUVPo%2wwaX^#|6MoKJc_=bH6rgV*A{8~4Ko@P>RU zw*mFqtde|^82{~`SLr{+;6tZVxsBitfy?ib0vC|a;6)U_6aKZ}$zwA4UEp%P@lh9= z`_)-CPxUK4FOuu^0(jNNR4=~2yXTR**xWw#P7axwC}`=Q~dw`<4p6-tXkan zxn4f-;2~5mFaFb~!J~H)cY=QiK8p1T%zp-tVZM&XnO!cSIQjf^uGhylPvwpGqvrne zH}rgd7T3$Q(45apY@W)4e6RWhxWh&5%RbwMNneZmZwcx(2fTDGT^Ank7r}j4$HG1r zE+U^8?swd82f%q>clN*YQqqqNQ9jGyKWi~@_c@vU>Md|SCySp?E~qAb-dz}%FhB4n zc+cikUnj-Rez9wW%9ZaOegN;r{hHrft+OZXC{wnerz`9>A^sj-3KPP{_zC2OVSK)h4_IcXorgr}j`tpa#rwR9$ z6RswoBFxuuedmBTiTTwBz-52?7r5_SM>=wjXWTX9&*#JqpkD8S%k?>DTub`GMCSE% zHF)z@f@+`R3*DZ?(B84=+QXxQ6^$zuR@>(}n)#h0hH(H~BQ#JoTQJ_u=0K z`nTW{IGy5T|Hapnf3;W#_&a#Ag5oKL&lxwE%UxpgR6ozhcfv)KcoX^|$Nwg{T=#x*oc#H`TJ8^n;G^g#A^0D8 zJL&mc67IKi!R2=}--EY?DE~hATzrSQzt!73)!*9op?rFw-~LXzuKBzVC&y#+l)en_ zg}EKi0MExdrGDu9z-#edkNeMw4dmkz&mqm=a{a>?IG?}if&a9-$fs);sxQa$Dfp0> z|Cx0+=_}_^eM{i;6nNx}OrHzxA$=ppE6)F&;1zpi=J|Ku#CNP4Ta>`D*)=j-+E!+JXKn%j$to0`C!coG-bb{B!Vo(2a5% z!Gq|ZT<)$9n9DuZ=BfH-jnBLum)JZNPbcEx@$G)-qgdbKN4ddfv;Sh7r~I4neGNaq z-3i`|-w~X*gC8WlwJfvU=YsS3=G;Gj0ngf*;x9%#XRjrnYP_HIfRBHOxNjBtbAAp2 zm(LHMgV$U@EfRpwMJ@2bJQe$V3*IMwzfXUd^g|z0oc!I^tKeh!j+^7(_7T#zoKNL) zyPp6q-y=K>UW0k9Fyed{T&_RQf0Xck8TBz&!cj=|L@UC`ovd=AO4HME3PDc z1bnA;q^~`eIDcpK9C+W-%zFI*&gW%wp7(!@d`d2$ayidc;C`%U=J@Zix#{|9gI<2e zF$5mM`%w13>TxQ!;i}Aj@+El951H}Qw2{6Z^8+5_VLkW|-iPvd?tOyv0rXEN^v{7u z-X{NA@Q-b7s@Dkgt+-zJ`tm(V{?X-`@!tyGI+OhQx}N+L={r`Ep5t5y&gUxfxc3>j ze@ALZ3*+sv?c~Gf*73OX19*HLmCOFQPm`X{bL8v#ahs=}^ZEQN&d&$X_nt^T0hH@| zhJNSrd3G-3ZK=&uRSc>{lK%%qrBCDUjSZr z4aHf%DgDDc;0;%i565|S2l#m*U3XvF-1HpyD}3Cg zRIU^C-QqR!5A00-F7P0@+&AM2a6W&TJBD(92AA{d z+r3WyZPnC{K_ql8c+0t&*Xb|dPP}*Mfq(1`@^RxiisMXx%kMX*y-E7yp5&i}a-Riv zqu+YrpW99Pz65xC;d^yUU`>wfOD{Y>-zBJcffd2triRTa(`0l;r z(>Hjo%dk*P2{vF>ZpE9g}VE?7yL-_8d8Rb3(F87`L4cu`lmCO6^ zpYU(;@5Foa68J0y=kv!s;2Xdr`%|1z@CQC1ACHSTU+;f_%l*V=_L08hT1VP}`}y5A zH}%6;)7)N<{UyCOY{Gk@VMRagL$l8mo2T0y&qYpLr>nqg4?#TO8^FhKf8l;O>m%}! z>x5qgch^y#UC2+)$D|*9jX2+5E(MqK+dqO=i1&}X^^;Ff%p1H29@>fW&*RTIpOC%| z^9dZ!z2G6d#|b0O=fLH<-0eOkpPWX@5059uf_tzYpcFnaa5-P`H24tS8?w)m&&WT5 z^#I&|lHhXxt7`{HFW)b8gUj{O*`Jd>cLn7qfc7l|FFcvJ1O2KSTt0_v`UUyO@8#X# za$lOWz#GMU$h$UAy$7%PmHKlH%02B%@*lwW)SSni~+E-=v19)^vW;;$BHb1`|VDnVF4{u53 zRwJI5z-*6H6^L?icd~ml+zUL3-d`4`ZDz_QyF1Y`!2hW{MK73sl{YZMb zpWbY6C*~RQ5&wta-3L*<_`0|(Nj|MuXUq4IJ8W)h_coiGo_k+|kKEU~@F()mYoKyD zo__FbeDA}2v!6-da69SwzP|=seuwfEc;HsjbN}D^7xL*jH1mG3A9&Cry&L)QgV$ib zh<%oV4`bYRLVw5z`B&h5eF(e;Tz+5nGI$xTJFf2$8^|Z;Qi{g~pKkEjeBzzp9lx5} zrN`!}{*#0Evrg#00S`S)KK0;Bej~lyZ~tEK4$K34px*6r*h7!8&_<7(xaCd-4z64zEXLSd73+A!er(_}Ro5bg> zGhYvV$zxP5j{}?jMdkM6y&LCkXPcX@-@~9Uy(rV?P4EtkKefo`;eV6ATwi?(cytow zCxUjl$>yeVAB27k_b+?6NdNg3yaMZrIscd157?=Gm52ACJRUv_-Ywqaybtce_3K1D zOU98;tc>c*^SWD)CqA@y=D2!2c#fO&ZunReNZ&5*?+1XlU_UqR{{!Hy=+7L_4q4{? zB|J7y)wcxmPTcPEzz6Xh%kACM);B8{v&Z!%d-aVIm>X5hF!R5YK&w~$Q9-|5R4d8Nr@8j(UOH;qP#OA5Im7~3U z@cEND);GGq_u7Q?WmsPt1%Dmf@hatw*P8`5C4Kkll;+s2bf@4b=qF4T7#cs|~n zIKe*x@4|C6`)ss1`Gl^_^gj~3{Vd|_vl_es^M~Bep9XJfp?KK;Bk*FpXJ@|k7UsO| zX7f}YGQA- zna7Q_;K92y>pNiz>Bq!8^MT<0#iY;2xH=0wSG@oI9=r_icM1{bL0gi)7vmeZ%WUv) z2i29cO2_Tv8kip@>={1E!i@2S1m|IB>y>0eKr z^RNy)3(qsGKXiN2x8Zrog*g4-^1anM@Cv*)cS1jXhw)ZwJu3g9T3A-O%~N>@os(Iw zN5K<#j^#X@yd&xJ;m`eZ6g-CYw_LB?oun^qBY!Ufcmlj_4&~noK6NM3=iEzr_IUtY z?pL$vG}5>BlMnaXFt}VN@D{jSKlUSd?U__=J>uDNXYvmRGvh1;?^r_Z$m9Q7@Mwg1 z7(P=9$R}E!sb2*iKY+@0LH{Rs({04rKYthU3E=+4`de&n>W5E2@4uRSIG+W(l207- z@a!|2Ip7Kq+RCjUpE@2jPHIic@T^nK8G-9+)!K)-l5iYK=+vmIN(Yq5@w`{5tp z-v4BldxDF624dvT`t{(QxLf5s!)f zd=R)iU!c_HrgonVeJ<`t9A^w%o=@>PxKGSWEZp1dUt{ysxa7k=PMpv0!R0zb?>=Uq zl?uPt=4pRCCvf}|iikHXqkMAzIRv~C`%d%tTmdehXI6lZVcwp7cHWo#y_nD8@$*>l zrqijvaeF-lULn4xcn^F8@15D-+K>F@y1vc9hwwY168)hKyd2;AbN@MffAaB*eVZ=@ zZ@ZrQrw=~)#iXymdH}wTioxYN{FA^Nu|GyWe7bC&T9?v_efc;)12#AH!(ZSdf0ylX z0L3G}Q=S2y#Cj*L@7>^ipHu!hpF1B&KBZXq&U`JnZxQJ^&chEP{UCmKxKOX}!ADk7 z-k8sFlRoRu%yO>+m*)>2crfYZxd3;8%X!_xLr6b#1NCP=;#m#ebsm+=@w^A_!hMI^ zW!$0UlZ$n0e7!sa9(^YBdfB3c^n;iWVgE*(o7(X)=u7ebn)}tS;ANXp-l`FQ_%QOX zMgMeyH-mR!{(*TvxZE#s3_LQC^1#=f`*8C2i}kU`fcKz3aDK)gVXjwxn%n2M{!QS?qz~aeCg*c^@B*=a@|oa0ct1kfv#iU&^Mje~{yVsP z2`SnCpd+bV3;idK_=jz7YDeoR(pP*#{+yr3!CUSp-i+U6dA*!-Jo#s1{N((+3*K=v#b1N?7oLE4@SO;ct8wr;%;)4m{|b2jGVrkq6jK#;eMZ^rK_7yJioZfeK>KwpmeWNw#bKJt%aUY+~T zAK>AmkY~K7esBiqop^rWcF8$~xIFjmCU6&?lR3{X+B|iC?_0G=I`Zui{}<3#-J6-W zJ!g`C^{&+4xLsC&N5%T#&%hm6ufp?Jv;5@a!#+`*w->e`0w3|^2|ODg0~?L|BVMc8|x?{$lDpGkx%Ft%3BcpV(^aa z%p&7*p%WahW-X{d7kO#;8l3v&GlV;hPl4=Hc!>peFche{x%4{**U3t+55EoK+ic9^p~}t%~SP~=fa!{ z?iojU=5jv%}wLy9O&hG z&ga1^52JFqpX^;h{t>L3<#^5jm+R#o1uw?^B8YsxXLD0Lljo4X?-D9E0(}g8$VuD@ zo;8>B1EN3N0^Ww}h{yBa!R0*ajPuDy?x%4vcx`iL9(JfCeZ#Mr?OOw$gYm5#aSnjT zCXk-_^b5$R&Y#)tFMwAsq;_O|KX~5##QA>hy3m}@={8S~Z+B7r&4}kY@Y)D*ZpRh# z%szuQPx-Xtd5`Cd&aNWv$M+5;@Lvip-=j^qi1ZD&XV$9>Jb-yBj^`6_xz6f$aCshR zWIp-Jb7{9+K)h=Om79+^z2HULQU1ByYryL;Z{US~mk{}kiucZUgS)YAgva66!M#`? z#r^Q;i^-=N?{hhxUhtwZij$u!3NJCY%XFKk+NBrs5*+_Z@I0)C;d1W;Z^v`26ZP7A zA^Fr}|10)s0dGhDWS{NBq?hZ@*V;UFzlc1aop#QF|0mG*W1MIIEf<;Vb+FA-_3C+q z`e!|S#%!K?z6#;_YDe%LFC~35z7H+H@1PUF3#O6}w__uC{1)QH@ENx_6_4G||4jpY z41Ai+Q}J}|M0(D{Nj6W_H-Y;j*LSJS|MxZmCiY)e;7;rd#60gZ@~Pf}`Z+(JF9h${jJSoo-3>1Hfq5I;66+*qEFu5g zjVV9ezKg)+{xhG0%X8buz`eMCal2GqPX1ohH;($gWpmSgWC;3RjGydtNDcYO`R96Y z*YwPBVDeJZmmNs;a-rOtY@X^T-f?uD4uL-peN?>PKlKXo8O44_e7)ZQ-aC+4?k3Af zFZa{l8GHciIylb(o15}54|;iSSAK;2Lt_2p>EJQ!PvS(qdTeg;-vGUw2bg;$`Fk+W z%=vExA3T)uS%m&Jc{%A@@E(Mp!GLtK zRRVqzxWAP88|&jXH?>O(^d%$IuUP*hc)ggH$i0fn?ZCVTj~my4yYc&l$Bj>HZYuXr z=;iakc2TqbRGX*9)q3=Q7wY>MxD)3}1i_EFn)G?cQoVY>&j)vk_t%?VL;A{m(sTd0 z6I{;MeFHwYf%F_t)wSd!ztbND&&T(??0;h&>BAT|cwGGtcpT5AJPz!49qHvc!l!{3 zU_Uy(-dBM88>l}pe-&Krk1_Ll@|WM;tOGB_^AF!QU%SD4|N7MCsrDMd{HGK7Ke?W` zoWBi&cZv5njo|Y4Mb3@nQzhzq9JpMs`wIBLb#xtZKJU4Se8S>-`Nrm^`|=JqlfL&j z^5N_EUYn=J`L@2y=YbCBbILQX#|bg=k?#uv;9=~;#_QRh2XDc=AIHD%E#y;VQ9RtQ z{NODZ4>``oHaA_5Ji``ECa(}9qY;N-Z68Z|fcc?}` zFS~<$>T#|CKd0RRUNs}L|2Xd?z5krdesZ48P35kFKGaA)e1G{3yml@5*q?i(|D4@G z{*4aOmm|(+z~#D*@4&lOk=_OU9e0tBT(7vp-Nb!ZPs{$Vf@fi#pZkO39(q3aTt)Mr z`@sKDo2SN&l4~=|y#QS91NI$wrC3jRdL#LiVP8v-7Q=r^E9vDv$%li>{TZ$W zA9ytLIvu}`d?HxK#r0hPUWEO}A}DvlW2Be+?pA@z^&wLpCw&gShaG}X19%s{kM@H< z4_<=je~#z4Hu90@o?LA69Q?Qc{A(5TQLGc=awk4PK2BUm9{fj6@cOxwAMRHNf+sOQ z$>Uo!cq8`3;``lw;Bw!mK5)67dD@c{k3-B$F9k2ex@hhX--B1KrGC1W`r^%=KTq>8J zb4I{RFQELh|C!H_z8v#xoQDDMk#h3k@#o-YNk94r`LKVh%}wL#o6yVq-INaU>BRku z>$L)0uAhGZTz=2{C3wR|6es6l)91*)MC9#d@Rp}C^FPWQzu!3z;pfRm{{DU5=BelN z{;%lyd=TyOIrJ?hlppr_E3JpdVg+KY)A1 zc)s98(huNV$P$#j9^8rR*9E@AOQe_ULN5UC!uRRif1U*Q;dc=8-Cic2?Bgj;zOTIu z9(teZ<-+~$=q}RB{Zt>exv5{h4!xt8e7GO(^9tfeyL0{@1efQY7r#pS0Oor=$ipw- zo#H*?y|2L^-x2trKQn>&bEv)@;9cPIyN2r5Nk1}?^v%%U3og%Lc?VqX({<<@=KRc1 z__;PujZ04aes`kWo0wzXjQI=TE{xmE$G}@Lk5>$z)7~V1OZ@(N5xg=?_4PnM1fI2& z+L6cq3%kjuFhY9f|5EsVZ<+IObeeBw6^zk!T8w>+A6N7}Hc#b8?i*I}FXGu^ecI%= zi3jmMj>n&s;68lsoIrh@J)|$6O7-Rb*#Iv0Z5sv8!Z_f9&&}_UPXnGexLn7(h-VY> z=kHao19v<~JOrPQ!E2_|c;y67g8T8k61VR~?~#86=96R44}rU}9*pB0-%I*}`P7bH z=x+kgLY(E`8^E1-uH*b%_`bP)ue5opz2rRSD0uE!R4&)|;D3|856^)Kl=~xi0PDo~ zewY0L>ANv+$>U5Bcmne$JnlUN-Z`K0%=Ox*k9?}CC?2lY>&)@~(SvwS{gCwDH>h3s z9OqwbZW<3Kenfh?@9_cP7UqHd@Gl3ixP#*1{xkJs@{#LOE(R~ax*#8Xt^ser_d%S8 z&%pEXe$WN|R{i8J?`v0sH@PTp?DG-0T!*>&C*;$O`#$G03SNtLXP=kAU3gE#`VHVk zw@{u-F+QC4Dfx%-e8~CP{4?TitS51zUT1+f;k-XCcay!%-$A!PANnP;zMBn@emj^PbmKXp99(|)T?-zJQXa}MPxU3Z+`n;? zLGpLMO6|_|y$W1@|M(!d^O{V(<16yX#riAG{|?~U50KuC`Ub(ncrVZS9Jii)9N1Tx z$BotC@_f=y!F}i_+#inkntaA0ln2i9=ioz^6X*WF;}GeC7_XSOf#+cUho3Xw2QL@j zlP>#)e3}NypYPWjz(>%3N|6WWx8`==-{z@ym-{R&0xuEk|Gx#7`@rXXXZGL2<|+S5 ztk2~7J_at|Yit0Q>mg?hlTY4znfZJIT<()O{d>|6okMya4?Dmei!-_P1L?cOx|DpI zo36XPp&uF}A09XE0hj0Uv@1S6(1$QT$^FXnBgK>b0F48Dy}St?I+*%7*K0j^K)ioB zCuxp zZ^!;ZdO5%Gs?AgDKoaP`w+`s)E@Lv4x z;5fq@$Y1UoGwxU7a^Jbj!1GV0`uZ@gZuOhFU3Ry5s$G&;Pssft2%an6bA6=v{OdEz z*l&M;PaW0|$1rX$9HnvtcpvRSo^J;qdN8xScK@C9treO5|9YF7u9wx&H^wu47X62O z3b2nKx7P@G=cA;LAfCB@r2Osm7$4H_3oHw^+HIc7TlgT-2cX|-jCfybrqBPQ?rq>B zJFYrWXtO40TZl|dS-e?BZ+Q}JvI*4t?ri8p8iUAOP&c~ zNrC}~1aKAu=EXqR7lH|aO&~zNU;`LPAb@!FBs5x_&^e&OeH`_FzC?~mqpyMTw|smbsoGXF3A0-n!XzK`dl=l`by?~6XG<*33lxcx}#K;EbKP2jE{>KngM z*83dZAHCks|00GzF7^s7&-@tRp?&y(w7*~COErD|tiT@;eScl%(|9KL@8-GRC-9f- z@OpJWUvdMt|A5G!`h0&>;IpTA-}L-{P2kUwygc2{X9Pa~R9>&{=Y?M!URM!tqDxbf z?{-$^|9XMn^#blk(~U3ul5juI2b}y+N5v1N$Ngr&seiZr(qE{4o;mg4-;2Fe_|8pV z%I)9yDDRK%r!MfLH}HJ)K73fKkvVD|N6X^pT+GT zt8zb@e*THTKOpbAo|XQe@-M>kyhY&emiRr5&%dtWU&sAx`u56ab3apJ2h;Fh7WgN{ z&hQq!uIF(3Pl)`i&*iAV-FKkAU*MnoIOC6Q|1*Gx@cez!{^Jr~q1%5};16%}{NE_^ zdC|WN_p<;vo%!Ig!KX*ED<1hM3hTrVSrLPwFy7;yA zy8eg2ZxHxd>Hn@5a6fK7_>N&jyb__o*+ zjtKl&N4THuDPFIJf3Luop3Cs($$5R@S8@A~%_sN&Spr`YIZDsxhXnpsspqyXbn5udM4$U&H<1 zDET$|9=%QAcRKac{x5;wAa)E*$9~}^?q~GAF?!!G=6^P*K_+vexLV8$ICtYD8rxgDqh#U zLQh%(e}>fKT9SSKRe`&5`r!fpRSmU!1AGVU{8!tLiz@ctYX_!|W7 z+GnO-8a|h215W4SzI(nP@Ecyj^VI#nRp1|!x{-Q+eplcPu^Zna^Z7@CpB25nA@DaJ zllec3=dbtwPX+GAZNBhj-2O4y&xfR+qQD;&eMjHZ*9qLUlYH?s_wz}KkNTMOvkG|V z{@yL^pYwjkx0~f$zPiBuxbaS}75F0$^Sbo;{<*+kB65hPhgXW+k6VxK4+MVn#k~Iy z$UMJghTGrp^$gz@_`jHC_#5BFaD6T(01vIJD(!D@;>SKB@L7pxJ}doi&vF0my8u5b z@V83d(-CR^lH=UojeG0?9-7aar2W$+|M4;D=QjlI)&)I&g8O&lZ@*LEZd}bX=DGc+ zMXqQ|{|$jpi9J)};qy;&`wuyOx^2Ki^M9SRfAuz>dn|VeeAf!V@X)xIq`iAyk7>AL7rOPExc?`_{-){ep9}mxvA4|2{yghuZtvEuIw$a> z;xE?p@E-+!MDlJfNk3oy&D_s3zMa>5MBv{g@W&+|P~+|U1%C70$^H3ffj{tKhTkdu z?7Td@KW|Kd{{rAd_a5Eid#&&3UkcpyE6schx4(3X$8E^CKPYgwe#qi2+}_nUKP>Pk zzJdGE`#-(F?LRJY4toFpOyINM!uNV4KG+P_cWuHW;|1^!0K zuhH|bo#uYtvcTu^fb{bn0w0OrQ;+*&8ZP>>Ue~2F+>aaQ`~_#jc=8p16FwYu?9%^6 z;BNlKZwUPUFXw%DNap#{GWX-Y-_#TM{eLNZMA|cokC*?DgN4&k%9pEn5nJ(6Fe_xYa$?&fiHm${!0-OBUP_u{<*KO%bZoihLDUf}kR zi~gd|WkcW}`bA#v!_xj&1pY?xf9Zanv%>w{De+PFO8cJ{_|qkBS@-|n1^%$)lj?OH zS(W*SoUixc*97jyuYTnkx4&89E_k=6rfwGatE;>ZvofFG6u6t$_RMePejXD$$xYI} zCh*7QTpkek*HyUvQL&TXC-6rE?#6+CQI*@fd5CWixLXh8DeK(+1Fz=!>;3srfxGei zFTcp`KlW_i0lm*pxs~A`6+7V(ndest`~k7&Xnyy70zdK#$$q~0HtxrbQ`#2zr9RIS zflRU+CMDueuLQgHJ$v7z}-4;FK%)_Pl#RXe%ZG#+YImXivXv6cKtPfBk*;x zcdbi5b6ecreYg4@0)OK3llXaLo7=Zvk%ZqX@W^z_$WU{WQLZ&qd?Uvo16I9S)t}61ZD;rV0C-7M$$CV|h2zOUyq z)#dhXJ&H>LzhClejz~Z668IB;&GXlE`*M%_x#?}mbN>^8f8tf#{vFa!qtETnihiW; z<9`$Qy2t~1+?T(S+h4hb`&p8H-Vb<)e*VW4_~X)#n}@zR;Ql`-`Coe6hXwx06<(L7 z+bv&9DR?zQ{XQVeyZu= zn+1NK6R+}R_i+EWh#ye*^KAled@qly@$HWU?$)FDJAvQ#x5<6(U*Y~gDRzv$Y{utb z4e#>{0H=F&#EH{BFL1Yh#rp;RsMvq>{9o~^aR2uJPW|5~_IAzRKOyjSi9^=(VdFcv z{gUVfy8Ztl@bw1o&ska5GhWT@?-l=!zAq05{L^2}`=k5$M}a^81q`2;eqQvQ+|N7D zaQ`n6_=3RO-_7?zulN5E_~Www`n+EAueqOxoH*ET{;u%;tN~8@^XcE_aW%bqy}&$zS@AJ_V_%{Jg`#(YLZ-xj#b z?_T)5-2RCr-ZvdL_3HwE^fa&Qh^*^B03M=~|3lhe5_kYtr^SC#N-1ugJ|MfYBBTY&D^A+FE?We?VuE%{d;Gy-tSK2@HI$p27zlAq& zKdygvN8oN;;{5{uh|mX(pU;0I_cJYi7L6yb7r0BGU-$#u{st!wgeS|4-nr5j@xX_I*FZ{Xgo+hkqvUcfXYT z*_QrS?&tPzm3R`pp96ufe-qE=)zbcT0(ay6o)Gv^$x}EY?f>PQxc?8nmB-cl{OtmN zx7Z=iO8XB9{OMAEL-+HTz;6+|pWcU^f5ZK|b)_B^_(S5S(CxQ>nA?BsbG)u48Ta=D z{+{1p_{RnQwGVLnN56#Ue?;Wo*9-g^LMLaX{VxC>!rMQR_Wd8^{_mIemwtr%e}>qL z^ga3@;Gup#CheaPzp8HkjX%o$O#dX$Q?K{e1io|^-#ZO|>5p;yd;gx{M`S+VF7T%d zJ-J2T9f3dM_*tG1_-jP&()j#}2f6%PyZ;-^QiRyUjPrS_jA(zi8ph5 zjXyv4fxWbFTN{{F|k{#OG^z{&s;sA@+*1(*Fw{ z;`R^!9Ixvkfv*YtzSkw^`5A$~<_T`UF8FidC%B&nU&HP7y}RKj!~1XqaN>7;iEnAk zxHk*j)u&!7@Q*n0LVqjp=S!T!OQfIEKNTK#8*qwqJ}dR^zDnBv0N^yAS?3+klW$?T zd%oWx@Fnpp>2vu@fxk!cW%NF5{51FT_yX^bzNeq}ZyEj!ksma^-74_ye@x=@9}4^( zAL9Mj>;10+ck9;P`@eJlpAfriL)QC*z@O*X&tLRbZhu7baqg4$zo_AIuQlF2_h-2M zgWt&W(eM+1hw%TLv|oQ2?}NTaKMr_Eum81_em3>S>bq~w@ zU;8$i59;)Og4^pjrMCf2=l*Vq)6??9UkUtCshhek{cQa#w|C#he-pRY{Xh2?70rrM+9n_5T(4 zO%gAp`PapF^0-GN4oB06M+N@z67Pc^>&AC+`^Snrf4x6HA#k@2|3?M>QSr0u^{)OB z_w$6r)$4tG<}Zi$?Zto--MCrg{428FhXwBXi(mL|ZvRoSSL^vdAaFN-;U6?y{FSef z{-a+B&*u$*(|qPXkN4rMwEwWcKP!5x9=G+Y-2R7Ox|4p}-x=i|fU+^C8XX#gx{d||eU+tXlX9Rwe*d;VReD`~~ zp9jTHs_)mI03O2g|0?Yt7yp9BlUKcu`x%KJpev|vTSMuyMKD=GvM+BesdENY5+}@3o zX$$=9CgX`duLlM0z8C-U-w*qh{sQeo^MCqp^SGZCJDcu*Uf>Tb@wl^czP~5%8zi1V z(}%f7xSuFU2cD)*f;cjoEP{fgm3Eg{u1D!`Tw8N{)n9Kn#|`5e~9%ckMrK5%_~=c^{5Q|9>p- zXXy8a1pc_d9~S%7T>|g@Vfb7g09?_3;V*jrU-d@}e@Q*L|34z|*}HlE8XwAk%cB{K{<)vHQotK?BHgjzH zWyg-0|Do;i*_YJ^qt>zB)J<2W>a}(|x~EyY9Bpo$i|H#MfBsL_kI+P#fhJ8F!2gJD$L!5n&>e!DqpHjd3t7w5x`qRm#f z71aiV+SRDp9SyEdZ4PRkX4Kf}bgtqP>u(WSjuK6yJF27Q!pVNCS#RFc8aAVPuR9zK zcIu<4=#D#<3-eL5HQF9DYmKN@AGLOyQGd5ooc5uk0X_(~ryuN3Iytf1{MrrNVRtvo*+F^j& zsMl%L(^?egqR8zj8n(Jy?Ph7s?+JhRyJ4Z7@ zhqawHw~qR~cB_81M9t=LNZXxSw_7MiXO>q_Ei6aTN>n(xIp}qwyNA7QR;v@3kl*Hb zqdBUz+9z=#YEcVUVfpywg?4|tmh39j>`blGQO%Z%#}^itqf(`^T8Yk-qP2y}!iA{3 za_ho!c?t7&e=l7sEk-M=>u6G5r{8Ci->cs*tS*(7(bf7{yQi7i#7I`l@{-+dwvg6t zX<>aKs+R94+0D}@u&}gLv0Khgr*-_T7fTn>X<`~PC;ZmSs|!mJudY&BD=n;-)E>>8 zOf+4(eP!W7c@aaeU0jd6StOU2Y`k1rIpgeFxbIU7>x*Z-y(lIdFO=8TB5ql_u(p1? zI?v>?R1-SS(xq~h4>Y+;ssSz8-pQGn#DZ7XqVWaD(NlceEt81kSFeW;^P+n%dDJIXNyt0n%0bx+LQ5{xtpAGua zkF&|$RrkrXpqsp2rWsiS&IT^4`sB7!`OMk%@HffDm|@hBk2Q*%e(0OfEv}T7OAFPK zTsS(600J+rUbwKZvJ{=gzm|#cEfuj#l=#!sz@mT3MaNAfkn;1pf~9D;@^(*k;Z>79N&^n_nOY&rvw%!2=3m^)%I)p z>ouQsd$)+JO^*E!XPs$T9~!dgeKuD}|I8!S-q%^PwW+4};oNlkur4Y0KAy!vCQiU$ zbds|BU!HWnOw#V`DZ4wB{5DDV^N;Ma0q@%xch)Xf@V{_(D9I$e_OtGBcs#@ZWX9Rb z5DW3Yn)AO3a1{T$qVrvV!T2A|J0Dq$A<1w2Pu&f)K25M6?@JG}tuI|}!p(O1x+x9dLI<#N|26LPe;`fSmVlK$&7^EcYgD~F4|&)`4j}z{=xgMSR`H2 z7vtj|X3#^pNoHTwRi%6?WhJFAyNTb|tQqw5#T8a%o!)S8X^s5;W=vD46szYwgqs1L zrnK>TD-*>0bgL zf-3kw`+c00MyJc9x;T<>5x5!n-Rq^OXHtdC9WAb|RM#sP7ri;nm^lR-`u!f4(N*)M z61rWz-JK-K)aw*yW5z7p%EzRpw@*3No8I_q{D`u)z?bF4r~ zz17D|-2c>8evgS&S1xL`GDbJK(ebKH6TdHwGinE^p(tn$_K@hBn4#M;y=Q#3uv{rE zEZwe#0C^sc2%gf2`;!>#-ti8pcEu%aQn zDbU34%bf1%3S@1UB{A-CPgVoJAEIj9jLt#kb?V~j)1`_b1Weyjn)!X2b+YQSa&dWi zZQbgRB@gZ|(ZuU(#>{VZZM}SfI0^Jbcr4;+qw{#Cq>RQ3%J{$^hwdOxDE^E%zFspN z3$zL|5Z}hVzwk#gcoi5)4E|;u8^hAu`=gj!5r0))sg|H3!lW0$j^$v?tbIQ+Kk>&3 ztY{66r^+gsroDlJEAra=qXbTfxBv7-MSeW-sa`xKn0E0(bVsSO8uu%juGK-J7mcu)W+}4;QG?V zH3)FzD&q}~_jJa_aJQA;qcuPBe!_jjxM2EA%rDW#@6F6lua@XKNdTrldwy;!zeh7a zx}*!0?EOo&@q07#12JOF#5)grer_wjN5cuL3jy`f=7d%QzaNWhRZ2wrT{KB>ExU=| zm!ZD;G%dHiIS8BP1{(VP8j1lYVW}c+W#y?gz1sB1Yv%P?Feg&3CU_j#J>$E>zj;BW_+lpE|!;#txWX#^p<|lW>5I56^eC89M_D!Cr%@;Kc!RaMh<-tojUoe--B5z zO#>u`gfi`z7K@tn5&!P>QZ(4NzPh#=!T;?u5gQNPR(_9Wr&KhFi57?_m)I$%k>8)8 zFP3@J^kvf*tAXE-*{=ntjAvGaZFppc^>^O6P)omO!}H1FwzdpoJ&ft@m9eqfZ|C(o zW9IAJ0pjxt{hSeI?U`qP>`c##5GD-ebiSII_eO zR>hK=+R*RUP-sRhm~vrLiAee~IAha8ubJ1Ur2}!}TNl7NPc4_UJ#wA?68E0afigbu z#~}+9FEt(^tW3|cttzXn-@D$>%l?4>a`powep^I;B4KajrlIxKOSlRBZ8VJoh=9 zx?8hJ`_LPA#+>eHM0OEELRLFnzT|CF;&j7pz1|D%-y&bSJ~2 zgv|@wR(_9WRuVA~bnqr_d7z=!@2tUPMF^{jKvdwnRMHZ{_|68C__ z${1+k_eJMG4RI+{glRGeKy1if1FxUL%=&6{3i38WB@;elXts%8{4WcnzAhqc@{Hvd zBVV%x_r6lJZgs61Nf1kQeHChJXj6sOWwrHtCkjqqFw$w!g911GQ=4VFy+?Cz3?-jf zvr=T9mh8Adn6joe^!p{L&T7s(6X=*_x%8G^&+2Ya`^5zapwP+4mn5+-4kZ-+p4r&z zzep5`J~>lb;Rt0F;^cHqK}8wuyk3=VNWJP)j8-dUn9C3&5F{yLFWdVIe+ zmQp*q1ull|&JjCxN;9udg}u}mv~F1i>Mf#gSb{mc-#c#B_xAk2A4l&*JPrv!P$^5u zesU4loK|{kZ-iOG*?3~0+Q3+N*myOS^`Hz+;qW@v_%$qfjO>|T7W{@anxuV79{?H#ch&>|aEiWKe(u+z76iWW zMpITLZ59{!%J-+isRr;N`S}V>amSIHB z&1&oS9^@FpER_4oJvf#by+Qm>{IP;q!gREBDdepdtByBV;2VFmAZ1Zl5?ogbJ#jzq z#tBOm1{=?=R=q2oCRGH#@<$Bv1^QNDMQ}j24*44y_{bkG74KR46(QY0l(`JN4}9m3 z8stXtDpLkc;YQ)lyfMS_)9K}lu)ZxW!0PFdv6S23f8vcb8$88}D;d&S8g~zU6u26r=?8`-(6g&-mH0F3rGsYax{>U3|hWxqlm7)|Tl^UpB z8_rO(Y|Y=ReeO}T3oGo5<=Q&fp5Ig!m^Ou z=GzuvqYW-FO@>YT&>MFq!4u9dqF~9X3Je#E3qH4-bvQw=wLd~aI#=J20pbiwm;}(? zSc8-n5^kP%3>XhD0=wJ`oHjvc5oMiIg89ue%#C zApB{s_Wmf&e!JBk%o4o)cANTr$N0tgL0Bf)m@u9%R&H0p4`u|G;jJZvKk&zi*PzU% z!u1h3;L!kU4dJF<-^!Pxrljmt+_x8-EWVtyc3!Va_HbL919nLlO9gwi_eY8MU(G$G zBu#Mtvp(?0AzE){7tH53qc9!bqq#puaxG%5N#8OHm(*HPKFAn{wVcy1mS2QN_QJv% zISf3Evc}2!z#oU?d9?}rnH)}N2#a>4wDo#dK3S`Gc=bcGmuQmIhF-ra7BgAABtBS) z#Z3O{^`ImVU8*H+tEmD=O2v3}MDoaP?Tz5BeXSTgIe+cRhF-slGmA%;O1r7FVx0pQ z^rtgt&hd#i)-1_qX&N#x-Df???Z3gLNVyBNAj}5D8Eb=!-*HV!6itqRo{RN zni_-gg@h=jepTT_Fyo%3XjoMV0MdhwfKC=qTrSLBE>SJ6#a^SiRvT@r&fGTB+-4bN zMMJ;O)6djF=*L2Ve!b8f4r^P%2J@E}23u)epSZl-?4m#@^^5{yXPTpGv%R_8+8ERZ zSC?9YUBJUXl(2}13RI}PsEFFF6pCuC7iwM1Wl$)fyE3Yg*1Gj(X}2`rl0T#EnOnzc z?Y5mQqq1eGI?OSLf?y)$#*scwSt<9^8C~u4DpGJ+(X5+$et2D;eR;ttR#bf{d zNfaZR&J2E@jSzI3>l}*nyv7SC)Z1-_CYI;iF{hm8j)>LfC~Gi-V}TV0k&XtROIz)E zQ=WBtqc%k8TPn8-ge6RlH?cb0Xm=Xb8X}+zv$aM$8a4;5TD$eC=Acw+T`8Zhm*|XU zqcvDU3MhMvIN)`Pgh^iH2@F;^4isq)MkralTf-c>Bandf5#!ZKE!g9Ix*r4N0YruU zt{1m=eptH_sje^&)NJn7+URmRD$djHMf7U})orO%^hT|InVaH199PG%k-baqocHMT zfJ-UxGMdCqN*%iWmGiWelc;mt?2lTg&->)r-@?hn{>owQZ(GBCJvnxB-?(|aj+)$~ zX1Q8WL=bc?ND_z-fh3no=kWi)#-;x1`gGRKjdAkeGb=Lv1tq2#T!#q^&s7!*5s|F% zmM$b@uHC$Ha0+r_t2si+?0Tz%!tQ<4)EK1u(cpH}}aC`miM<`czro@jTcvjK5o9BzH6_*5w3L4ZcP z8SU2EJ59yDPpCgeBTxn{v2UyqcY#i~F$NbrNqC|P(;raK==U@C%g9r@)cwQ;eiB`L z=b$Q$1D?*c#Hz~1|3Z^pf~a>OyuYS9+x}l^vPuaSBf!2$$~E2E&H=afP(gq75XKQpMb{7v(;GXX`uZ*INtD`2&Q_+Cly#iw|9H*C?#B2v$W;iOh#?e|P zCe<#^D#RFrSWj%|lS3TP23}h0W(wA>9yGD>Ptf>KWHU!UJh8s{N;1=)$i*iQZt2Nu zgTj=QHU9bEoM>zro;4kJ>ew{6HW5eVX2) z(~qqUcQ&FjyEh}e!$v-|WA*w}r_-;23gF>GJU_TaChA%Tro+K-y|&SAPL&(Y#qB2E z;_LOBUAX#&Q{r(KWxO%h26(A9s_|czd%ep${rHE4LFZQVh$+RO z#s1FPpjV&Tq?H`&Rj1ne5A@&Tvjl<%5ROvBT_B?_!bHi<*px1+&C$AgcyM8aw*@;L z3{P`e-CU|&#a}B+nAGKqUHUivdU|-ZOG~XTR7bVJ2vh5H;83d6ckzd%_V8*4|6S~L z8%wl%J?f#_?5y>MBmNV41gYgtzli@}ESfv~dTWDLk6K`qCN-eFsnI~x7mckxj{6lFu$x@G};v_SE!8oiypmM_R0`d#%+t zt4tX9aTTY8|12u=FS)7E7TdlCCndm@+h|VfgR8eyWMeP^xc%DXJEq^^0VIc^bhOs$ z(;shDh7s)F(rw2E&CO%c9qVQDoS!y17(W3%;qljN!^^j|M%!{k43thWeyvT5pmp-R zWRq!6h8kTuor@jxf;P7eVAP=FTBy^`7xAa^5I*FoXnna#)!{B8fb0}KmxW)jaiuOb zzOaFRsRdMb@P4QIsYAmyZmX#y1s3Q%YxjmtYxUM2!K5RFsQ8}es__Kx9S(aDTadVO z{fjzW<*+{88{kHX4;_szTy9|6oz8lDhw5Sz$j;Lp5H>^c67b5;9C&4JNfx~Av~w^J<#3)$}U zs582$F`c+TZ;#@B$n5}n(atc@Lhsf3cC(?e8Q%#Gq2I-w!2qwT^77O-yj*o+^!q*P z==eS}9x9qqg%GeVa}$ik#MK_l+-NEWr?6bU|M<1Kt*<-nle7d8(pELHc@>10o^JzrVtVQRoz4Umu?L$RIuh$ z3q%aNN$2O$opW$(5Xztdj!;t@UZzk|{Dgt%sH$)$^WQ)k@jv`oP+r~nZgaE%)?=V@ zp7=VZDf0Pn9R_*)nYSdSc^A82cQh9@S((4u5wp+guYv{Wd37jfs!2)QE087LYeJ=! z-UyKk_*o&NOn)?3?_I6{h_0$v!Nk{HU)iy6(d31PGs6 zd_k)Vi|uButLUTonkN~$gNFVPBU-|Hv2|i}>Ud@DYD}8ECV)%rx}ri_a#}roni7Ro zEj+pzwC)58+38-u$*KKYY7H+FLRR~=y1`;5NjzV=@!U4Csi0R?^@9>s2-5}Wp&xq8 zb|PBpVxhDOZE$+HT^lqTQGZ0?5QtqUwz>#7!qdmKKDjUY14w%H9tmRruC2Ki!CJXSa{)fsMWHIpDdsKQRS^~#;5dIlPSL317Am8IkS`1av! zR-xF$so*Bz%pgmjBm5M3)AG@kb}4Q_;nU%%%zfCA1Y?hzSG7k2$SH{KqtHI!Qe`6= z;qpgau*yKNA0FG5Pn)Q)JQuAY^r0i2B1EGfH8&tjHro*3%F8WWtC}xo7pM7pLQQGb zI{KEDO3NDs;JZ5I8GW_(F;PF53@T~qq`KGi>^sFSk*2C5n071Gc?$L7un}H}ihS(E z5o1>DpG2T^5`mB%;3Ni^gFk)PJT-@B+6x6DCnw^))U=~a_6pf7P>~&IHtbqm?o5|3 zrMuO2Ulqa<-yT7i@V0?IRI2p+dv(-rm+Kg}fal={I7D@K=`|F2E?W=UV>5w~&_UWUWiGFs&n9OY4-8c+ z1}IDkNKEHK_>a@UKWp?~+N!8ZFYc**LLA^$cPFB=oybfI!I{irCO8_*MByg6%=wTs z{f?thHCLdWi_wg(tl|s>@F&oMc<>DTP{beGiLm``&E=4oPW~YWq zo==d{!)I`YdlLIv((KOK@#9ehO>nCV4&P~ZHg+}*UAmj>O~=9U!20!OnfPRl0pWOg)6fZ( z6^sOlB|a=1G2+O<&tkF)VzCy-&ZgY~Oj|GCT!1jzrN-5%P}_45aps~p8gEh2PRGc3 zv0Yt?VB`W$VV8hdph7l%y99scx#|k2EYTk#frVKpXIn}sD??5mv=E5&Dz&BfHbgm1 z*tru#e^NTx?BVXRRd`Nlo4z(_d&H5!dBq+~>p}#M)o%C5YDuBH<#QEf@Z8;tRDnyX zI~{X*ta4g}5t3S~LrX%=CoP3UwD^>Qfq@s>z-OG2zGD{h&UBmi;F4iwH5KiiB?)Bj zrjK;9AzFvCvN`gC!m3fR&~G*C&3jtICd=W(ucHwpHP}1b&GNZb+HTa=C{wUyskG~G zZiH0pHE0Hi%X>)7%n~7z?2Ke}PMPSG>TTQ!k`Trs7g;DySZK@k$(y#^DU@(yW7%N> zxI|e7bBYKOJxE5m5=zahn@@7cc`(eWVY9;q{N7cUlMr2)=g@qP^U=YXa}6e=LK3Bq8l zHba)E4zsSEBV?qV+L(9~lt@W=NE|S3m~+5@h^EByX)>=2X?V@iZM6YJ>Mb%N_qwB6 z3rzOZ)!TZ5%jHH%X>9RS1uqMB{zPG}-y6ckPAMQ9mdm*mh3Od1nRslMNYfg#cJw6h zbGO!J$qj;Cy`Ll)Ls&5!3WALZ$wI^NDP9O|Gl@b8G7j6_Sa)=^;!M8e)(v=_+cb8AT2pmA6*cwKS76ShVjV6Lf zRJSh9YIdm9JS9pQBnt8s5)W;;G7d=qQ6SiO7K98GEsi z?4jbEaINGSym$755|;})SJ&ObQ`kZ_xZzLaztGYH8Gy(N$A(}zQ%iUoM@u!Bu;*mS zr2w-ITmLX2+s3^H17KKM3g&MfEa#GRD9ElGnWvB;Qn?!)X8E{W(dFhE9Ux=B1*a|>{t=G*sABqYIRqia5{NduUcsk}e1St1n zpxR@a$ek=`@P$hFSqi1GHNOzS+RLt>tn#p{R>*gPX#`v+wpjrj zMxD}HM=5&BQ5oxbL>qm{&LBe?o~04sAgbYlX=;0D_s33e$drf15cI~7O6Tf`(Sq$c zbLh!zCeACNDTb|mWcF9bz5!K(k9H8k`!!@g+L+ ziB43RtL$AL-{u*j2{z|*Q-icOy1u(1W|)|lY8jSKtr=^byRVKZOMN>9;$9t|# zzR0YEQ>N93t?y_}!5Pd5En^L_9z*4W>TxMxF-=*`xa2e}m$~NDUF+PfwrqI3)<>E_Lsi=G=%u^;6N!q}Im7y5&nZFvl@uez;f97hWN$mf zk~ktb$iK8Eo;0+aP~Hn=PK({JaG$BTx+oc~mCj#d%9%FB$!SK0EFy?W#gt5yhs%gg z>ZdNP}kdbFt_|90T=X$wZ}e zZZS~_O6a)&kAl@fQ!N1`IhSA|;d`rsO-~YNM$ZEB8>~tY1e{o0#6l91IQsv%haRLM z$`=3=1@k?)V zMY8J@oq&RBt2SquC4B72=!jMgRLeioi z^Egrp9nE?ix~@*W<>iXnS^y|BB2Sb4GqfKL#(z z#g*YN&F0Fs6JRWFDfz^ov;pUsn=MG{So3sq>9QS#1vH!Gpwku-;3?%O6mVLC#SeME z0h@+**$>0>i>xQ@`K3EHt8|Lk^%LvJ?pzMJmKrkaP@h`Lrz$simcX8>90sKn9IEFk z#>zd{0aU4R=|E7pB-50tU@ehunjUc(brawWcLry8LLi5ZjKIVpf@p`CEH{H7V@V0? zE^MKWz)057urmtaNGOdP~CAhQ~ zG{%_-*&8g++NoTgHGBlg{!~;+N!XqY(LGMCXs$FJZMJ(gle~Q$?0Jp^CZ;@!Ri>OD zDB7iH6^c9KhdzjG8g1DY!yExh5V1xfQ#d%XK9nmdH=wi{4w_llF*B>^EvBb;qne9L zLM_oI6hCu*Y%wHg?!YV(5j1RR&|7C{$b^uau8&GgPqXp~In7eU!d5i0!{LI(Gfo-_ ztj$Esx-y0u24ZF@HyqPn%|Ui0_D>iEpKY({V1^CTn|VE*)-w=<=LfO z6pzD$6BQLm4~$sqVjAnWdIJ{{#!{u{#S7P7=tD5ExNk3*GT6LZCVCZKIuh@b;j8I~ z9^Vd*(5%pc4;4)Z583ebQ(qQ(!8nR<$#%Aj%61E&+l znMsFHI|V6#EZDK6i-4%ixcUo@w$&eSb{Fzn$tF!Ka{@_@g(4X_h;XxOoBX9p zbn!wBK-F|PEcTBiN+l^RR}wn4$yA$5hg9%Zl>~Pce)syPm5mg340J)_I+b2Q?dIMl zFlh~`I*7TQYM`dJ+c_iG^^6+Ocz%7*9Ct2FieR(RSzsT0_mtjFXP(Yn4LuJ@RXLT0 z1WTm)e-QtiNF(54;e;wCwbHBLT?N#*#T(Ch7hPnTjFE57yt)^4QNI8S;8J@1ubHrD3i8qtc=|LryDPr8q*LK7s5(i_SP+d85;`o`gkO7Lf&{u@>=7Fri zaA&;0Vx%$P7RT_UBMtg7l3_Ab6$DNRcV=M8Q+1EHIE}YQ)Qh+*5Ky2So`S}1Oh<9; z&e?>tb)e9CAS6u6D=0~Ea--Ly8o^MfgdV2sAn;(7^XstSWIo_phjR0*Bap+1YCBg{ z(G5iB>OxIa%tM`+Q1P3bD4KzBbicPdoHlL9%&0Lh3s)IwMqojyKNLjE$a_T|4DH1Jt zxY(s*j_x3YAKY8TQ-P4TQnQr;rh+^3F6W z?@;OMoRxPJ2HIA36`jC=n+TxLb|(Zp*uL3Gi~m%vVF(+a7E=Y&w4ADb19PX0cJj|A zLWkB=?2yt6jO~w2f9Q@KOa@Exp3nGU> zrk1)ck@DDyclI`Sfuy0mIZAbo-7qFW1l85z<>lMgpp8uV1BpllLkFP;5~5oa&$0QK zIqYGeWvXF$3{|OcRXE(0^o<@QeNHTgT)%NeYqBf;IEzw8qxavFb+mv?mxg&O?u5gD z(>E-$Z?l{EPBg6&sN(0KqNo6;0<929K(Y*leRIUQRg_md_>|iyq$W}zITW)g^oKW7 z+{4nh2Q@hZ8)NR6;d%QB=Fp`V=`fVT3)_elC6ddji0?HnT$dg&Y3G-lQ;qLP4mo&c z(sD4>8(fIyag3;9Xar#-FHE4wypjoYf?cpS2MB;p;qnnhh3k2xfFYydYVBPMc&L() z;~ZlUGf(_=?kCVElx1d3F^}y=vOIQ3(M-J;BxiclAz>Os7vyk6Z+1!Iwckfrcgvt5 zY*b1p=QBA=0aL*iSP)L>RBjlz@f<<2lE0h&Iq}-Kd1aBH7@~O@l^ufpizEG1(zMEJ z&%eEU$eI(g1}CmK?DkX(7~8(&Zo46#giWzLA3`kP2Bv3f*@i>3@@WwG7Mv2y19Y~m z-Od`94OI*As{NaDm2Tl2uarb43I;QTDqS66i5)md%Tx;8)U#gdBXSB~v%MdEEsO6q z-?S}{Vn_;A8W>r5`d%HD-F%Hq7r$6jf3s6FmTN-q>b~@SbDxWVvOn z4y8vIVB?O{@yrHlYs%q!>aCZw>2$AI@HI+Z(SsE7WJa=Bg(=Ic--imBW=Vc0%aFgx_50j#M3*<7>}i%z)fs!4e$(;vh&m+&~O$#(=>9BQedu9 z+f~^&vMT}K@>TYRlLrDks#YPp2G!)0Juy@b*{e~)a$j6Uu`)I*3`$Q~R}CEATs0%u zN-nQ0Z%vlY2Q^RZ6iMRtl@CR*qnU~?i#5*_+#SF6Pe_u8LonkS+&Ehr8o6C@Eq!Bz zrE=$jMB}+IfZ66k#}uvp(`~Cs- zoiwgbLodIZ<6Kb1HlA+^%Og{{qi{iqc7#__t8D~M>q~P;%dqHn9@ggQ&~^B}Q-!E7 zTWhot4ZdJAs#DNpenqxsHKRPF_+fsoqBtjWJYpd>qq2QqJr+S zRN6!VhjvUj2sbzT^nHDImwG`V7L#ERbWB6Gv!dx9>fl9z67qFcA=JxV3$BUmSTiT=Tnl+Xgo(Q%{p1YC>mXzn3X6^t z@vY4u+@?gY8>(C7?ckG!%Su@|Tu$QQcc_=zyb`L__L~4N#28Ux6!;3 z$vbG5hJB({xXqOIL4PBvEt;ieaMQxEPpciG51%S*U`mcqv;tI*U4k@K=wW;X4K6!| zz1-uBk@;zKCZJI~ax%?KF&x|3JPk>8DEttNNvG#R1O@#yh^T{}@tjS&okYlEg;`oG zDVjdXezFl92bJSpQr@u`55`5@gJ`-?0T$xYv}9UI(;Rp==8uLQor8=_5T!+Ib z*T{|iPi_fTrFRYzSac$iO~pR7Sy@@+T3Y$3kcbvjScqrRMK zvqIu6;+~XmOY`?2Z9`}rv8gx!Zmlj)IIANHc}De_1z$dn56xDsvn6p|bXy6u_Ij?V zOa#^RG!>81nn%j@!KNxYW4S59QnM9u$jK-Y{1saREny%*Ko4F&Wbaf@&~}ldCgMVk z4?SjFFnS#J-2jVA+*)YibZhC!ESw#sqDvf$xwY$6oZYzZ38{_R$HSrNmLl!m(^2WI zE&_Y-?m7jh`!+4da62yj4yg&s7X-nO=z|1Zrl9+Qk#Tmjg`$RWoycoKH66@ICzvN4 zf|(vC%)V*Cb1QZ*pU50Qg;-N7s0cP(!=%^9vREoXEcw*x%6a`@4c-bvU3YQCv$hdH zp_u`m&w{s}qsPFB5uGT___^<6#exuoaj%D{IS9?@$rDz$aAJpxYHeK zM%AFTh(?WjfIp=MnJN#Oj<|FqULlmk7Iw>YoXy1ZMsgglTh9z)f&quicmds^cd5{C zvYLwf#1H)15K&mq+isy!F|n6AHksVp2_{1v#@d8E95Z=#fn=B*=IQM1){?3TKsk`k zOU1jSte^_{Ot~yjG(xuv2t(1TNCk@qE+e=0E_+3JwvMEV`FgEitGD#qmpJN#J~?^) z%U8CwMVhKcDWRQ^#7Gd$SHh8!y;%SFf)+~Hvs8_1cKhH)LxCoy7jTZPE3uFg5=AC* zoVpW_9L$}>tR!}9ldf=Vdx|7B#q3?dlfLq)Wch1TSk4Z}B1Bk$zHoWOdYSBBRzWFn zKv?F$(NgXSx*)rGoK1+htaN+~2J=vwUl=>aj zuhjURSb9ghzJ1Fri44bo!Zc^>o}wrg*}pLgc~te7mAwiQ4CO--B2+b zvJeEgkC$b;J8h&~qX-%t6(HHA4IH1^br62z!AI41(x>+z+Wut>>!0wvN@Z0QVMcfo zOISLZERpcCm!g{6E(850{l!z5au{Xv-ITm^7PUVRi={MeYxgtV8*$KxIo()vbyP*t zAo1oKwv_%MhbrZzYsFp+zofvzqAA`6w1e1E=(n2n<~^-p6UQu%H$)>N1F`o3k<`lh zKrRN}91;k2QF^T>uT6r}!^3Jp$>61JLhi91Y82T?yj@_wlx%jp(?aN-*-N=8C3xsF zSU|yZ(<>5(ICI_zi~M45iIRz*1bi;`=06E!VaR7v`HvEQ>FA9b9*|h&l;QEvR^jXM z&;hc3SzmH7h^pZrO3-?i$Onk5xHp$ZqIpkXiJ6h&f^ ziND?2!4uU9%9BR3>B6tHQFbqYZ(wRR8G$s=zO-Akrhkl{Az`rKm}pZ11@SWzWS z=Mq7`82u)k=rou=!1(+^l_RG&j`zaGbpFI@o(7tgx#Bq^e4eqLVrwLz- z)K`EeQ7^0xGXQP{S50-BjT0?|H2cfypmqt(yc56`^0lTFR zl$=+Qz-AN%*uq%=vP)HiSYppZrKgoWVjX;HGKH3)h-MdSp~(^kq_J?iHM#w1Ng5J( zsPm+}nbRAFpUW9EHg+}xwJU8#Hwg!YX6m%^BuUq%k93r?j@B`Zjg9qT!eCp&tYG7JPXe?uCqfU@X5Ua^mbvWQw!;LIR(P65LQd0Fo z$+SBioorsIH~T6wS;?p;YK>hLo4+UJ9QR0Of)gvXR1q(b95sB7Iu_%O8$<=73a$)C zQW56CmOVu{5|;j$QDjeG|AA~^*S6j%AsPA=9f$>*!)H$*j{v9~8`|DcvMGAN)W24h zJQ$*I=wb}QXwVjkXJ*xqXju3?o{GglSUJk zoX+LV)~4%$Q9>9+h()c<@}5^UtOyXLhq0>HA0o7`47D|CzH+D59#SSTy~PaD1Ah^c z*Hw5dxL-#8p`rp>4O1rboB{e+ah?GEB_}}t5bP$TaFK%yR~qO8VjxB!u~mn4hJ!y^ zT}*$pI-C=mkKpg0>D85f}H6Fym;z(GpBPT|0T zr9I#{>ERW(1}f13?~BHB=%{%=jrgKG@?kkW_s#v|KvJ+JKjn|3)IBP^a;lTYQAboiiznjp^jU#6Hzv7!tjVS zK_Ar0E|^53{EDXLBp`tm*cEFGs^UjBu4zU&X~h**1IlyNPF7`ZKUe91Q({iLAyEht zdn^jsv)M;Dt1QZ{DbdKvXSd7F7kx&E#CrZc|AQh7P zqe1T40okTGO+(zXNZ2WKBSV8WFf$6HpaRLNZU$W3Tis@ZLQjK%nPkJHpbP7zyQxj} zaJ5Sw_5C0^6RUbAZkIw`Rkj-Cup(UU%xo2adS zwcjMqnfJ8!KG$C5tJVi_W&Z1i^Mn) z2Z;eGAxbYAw6?ZUw2H2MB5pu&Pr7avk#{WYbe$95fJ(%EvT@Nkt#Ystu@^975wR1s zl0#Al?T0=L{F%kT61~<%A}(HhuXp+$Jw>Z)<>-O)oh#=9JHfiZxf<=^RUHKy6^|88 zFsV}xtHGdlRXeQe%bk;@62&`$x0NajC!_Ppdh~&XJ4bI02^1BQs^MBO3M2_@ZiGsv zhI7;uuY>U-1{9$tOWc@&5Pbko2;7tS0V>+QeMpd|1RY)m9P5=1w!dikCK;=yQNWx=a;G4zXyh*ahcODH)1tDL?}6Hr0JWSWNCmfTrwDd`*eFib&#aOBe;&&eXp; zHtwY`TtdnHigs-I1f3OsByx2E<^fe2lC&iGO)0;Vw-^MaMsT_^svFT3BMKP*tf~L9 zq6&Ms$^f$rO&eKeGuU8a;}RtNl%q|f&5Cx%=WfVz(9+>a?J26~p4=PXvDD>|m-#+k zikGYv{4w-)M#}o)C5(-_%0{X)msn|{d=+Jsz|p|XN4AAf$Q!Q0hM{O^l#C4;cgx4n zlQo@lsc7Q09^3%^$}C;tOmQJClAF|k73@WEI#|te7oL%7q|^h&Y4vf6QAx|~6gL&x zK*QfN=yIv6ROC4#2(%8o77O=Wg?PpuDH9DYn`GSv|p z5_JrJ7AkC)j$Yv2p^zB`1*}*xR@pRwqrxQ&*^QZd{gSnFe?j zQf^L=7O0lf%s83og%bH0nFFYb_e>urzUrcW7ia~Vl}@!m{F+!+-4}u;{cu{q*+wO0 z;3>=wK|W)LAmeBblhUTgavCO+P22ZvKZ7AYkrknFdk+-LGoDfu9DALzz$ONqIKGhu z4-qkVu^g&KstL~|e?lI-ZgC4Fh}rd_rEGbjOz+FU4t8EUDX4y}BE1nxlQ##WU|D#N zH(O%3P)Z6X&?O;}D84n?=1NI!nWTM-!tfF%J?iGs2rM?4SQXdw!8xU&L0-G9n~Czx3v0;897wh6hR9TlM%4z)eB9%PG2*}!W?_8*lC5J_@I01 z_;6#V)5p8W0bf?yiR;^FNMFon9Z_fu0m2~)Mx5Y$K(XzmqT#jN>Vz6u*?^I|*`!2% zsEZxMe^WKi*!9LWACw@xZ!bg8EGD75I@Ko-?nOQ%JHjmBiQI=vNtt2k&5#b}YgZ?PT<)Ij|9nmxw5!Cf*65Cq$<(X$P$Hi!@@if3XLMaLWSh9g*?YEYqP|*mWE>YY(RBN1 z2L!R417+v4z)y)(fQ&dO5Z57uD0IgPriz70h9n)PtmVgt#*eS4cwi-Mor#6(Q*j88 zd#}S*C~bB~CrWw6R?j3L#gnxi>(ICo!l3e4*_MF-MJb0vu>jV5S#mPwsBtez*xoB; ztRT|hB5L|IA!T&VJYZwxMiDMqKHdos-g9!P14qn_y!;uhf$ z$SIVllFLj$Z&oZV7aZNYqAs^x0>MxYBXp*U;v>!!w=xl(7Y=Mn zxDn$8=xo)QL|v7b`V5uGTBL8miSU@8vKZ8m#zca0ZKK_E%CsGuV`PlA5p#*-!=VsN zh~82`3Y=bE+Q9l#kZ;@#50e&mpTe#r3gb&w8x6@C*{CVO-04D5hKH$%ZPK@nD~k3@7x8Zft< zcv2$e#KH(q{?wc#Z7-QU?z=jtE2i2>GqFe!Q={}XsX#zmX0R9RcSN=roX4yV&zV&L z>(KO@gC^d|LzTCU1n{CSf7TEU39E zascf_s7_pMq_cxODa7yKMK`}{E#xpA0*@SP9oCU5b@$WslQF}fxt^y!q2bcPo1d_KcMc%ulz?b0S; zg~+1%jBGlsOBHvKz(Elzu5;1X5Z#l?ucRZudZk{Hj1a8dY2p=c5|&LMz)~OYN}8iT zU;S1s%Eg>grO(F1WKT0-Hl^@!;~>Z6csRopSDkpG!VBGsgi+VV^^h(XCaXr{%||8k zO05OghJ`x!R`^JK4YUz2&V!rnLfcb`Zeb2`2r^lrfU2Pg&8Sgs5~p0yen06&f>>Emg2LzjwDM*Xp;&7hLF%8j`0(yL?xW8-f^#S zPqeX1Iu(UIKvh31Ti$HfKID9_vp3o*y~6blT9S$tKLAf6 zh3HuFeNZ;0oC@=#g6WFh6gV6k-aeisfB{dx3KKIq@=qGwCA2o8#I~%H5)zPj$Cr{d zl`??78AZpI_QIrKZ#-AxD^*$DPas=iW~trVqAE7!ZWj-3!q2U0z6SQigSU&1O9fc(;d7g z><0t`xq~N^w<#L!K{>`8^&tA<-b4nz0%3a-l2iAT?)Vi9kl`3{lZ<`bc^qVa56Hcq}?7$8S)h1vNb4o2h zAl$eY5ZWD<%j#L~D z9f1rQM`lX8Y8;=LnXZcNR3b5N=7Fc-#p_zL2wqH#A4lwx$O!YauGltE?;(`W-)){% zwKQ|I&5HWiBgrMNLLO6l?Fv2wqf~6%Imn81s1>#)RGmh#AR%1><67w|E=dZDqF#}PCcg08;8pkR8H;=;F*-iX(_a*5vh@hf+R1*cNr z!IVroscgoLCU2#SMlhnm#mni3*eNa?5MAkSDfcj$?_Jv#k?<_CtcL5y`P-1-)_3|8 z2!>ZW%gdc7uX9S(q|8~P`fy2TLfcGzVT6z^Sr2>CS=*?H1eun|9Z>++7DUBrO0X|X z>nA2oD|s!(u)=Ju(MH7TfUC0(%8lKUwyx&2*5W?HU)<2f*=5qaBbdU@EU%tgSdO9< zJf|vF;8H1{$u^kyk!GDCP4UdFcdX2vX`&kNag?QAZ=ncKX=c=-XkklOF(gmq2vbxN zvU#rOX2LRjVMbDocHu0A?wVB-2I6xp)GO>TUPd#&15AcuS_l=AzDmUH6lUOz8tn{E z*XW(ztA^%kS{cjkC;L0YZLal#&b$JcXg6tssW!rTJ0$F2&*uF2hNX*Z^xhUq3xizJ zoK&IKm<{@R?X?R!A@d`Z4TC^eg2u|FPO!PhrIsvATB8A~r4P$WKuTqVK^X)}vWdQ+ zx1|y3Kd&DUGAuV6i}MN=1;?L_a8ZmO8{2c}kFg98H`md@M=+-?p{3{pH4p=Yelx3G#oUj7RprBBV6HHN@I=(}*(dnITo!$;p za+f9e?z4WA7FXldXzkXP_X>8QsNgk5l6HJ8@n>Ah_*F0Cm~^ADKoce>1P2SL;kMe( z-z}(M%e`NiuK)!=z0RVDJ$i8281V@^Oyn9;Rv;&8#S?LrJDH4i3dRb1C+x{v)!}{x zE;6z$t=BvC_GBb3C8}PZy;a*(tJhJG+tp-n;D=Le0M~aFbdJxGmCKE>Eu>+J3MFL& zw%epvZ_upoZVq}njarxS3_RWKKV~m-O;}CS(pxq+$n9QEn#NPT%PKi|UfpR(8@p6% zZh;TRs9}2$G8Wc z)KvETF-5LDOr;58I){y?SMyjVs-R|N8~~sY8qaIQOeLQx5nz!j!B{C1d`XP0Dxm=6 zH;(J_W^q81oyckECrVAw%Py2dS%SNg3X~JKvcx5-)UPG4Jyx0F}IGn+H6;vO1xuv9-{K@y1MV(5bbfSnT7cEJ_?ilW~E}v5Kq^PP+%5vRRH6 zrK?yCuMS6eEJ+1JwVZ&y0v&OTGFlEvCJz`xUy}31;x#C>yhTBFBsBxn3`sr-PsV|< z>swPWqP}y8h#U5s_10#q9^e&00g13@Vowc(onjDFVi^p$qNK1>S~Y?-?xeEdwYr-< zer#L#j;4n37J@6`#!2-J?>r_MOlQOgZJfz#nxO-{7CNpDu!>1rNy-sIV}a7$fNFq; znqsv(M!sa|xxJ&=IxkT*9n}<5m-u8HUXZA$rHpHyMW6bMt=9YP8hf4KN=_|)a`Z_? zvZAUfaU~F^=@<7FxP|)n%__qO96PBvh#zD&kQ5=`xJZkTm<$efA7WiVn>P{B5M3Q+-Vc{G;)%oLN=&Z2dh;Q?hj&Ryu}^t=0sOCn11BFQ8Y{JoS8N znT|Dowx8uwtn_rAovx zhXo-p{fTYK_6X=YRP8L@YQx<@Og7ZZ67iv^!VBYD_(Iext`ZV_5;rb+mg^``6vbq{ zp5M#jF|-r|;GRxcHM=+l(?~&iQv^~+_gcM=pW9+5F5~{iYCi-`)PSUP&=mTe&?)!8 zP<_?R`MU%#EZv!8d5Uo`b0g{3&X)HTlo;i%(e0LmbbB|+-VskZ8L&$NtRw1&Lkts~ z0IHZS4ydxWVR<}Gmg}{5~vX!}cio%T|t6tOv2TGWlgG4&bbq zW*6n%8dVe_p-<`!omk`(UKPi{9G|WZ#8wz49sH91WPv;pPjk@@n-ue&;YAZVE6%LZ z_A0#a-H1l5PO}ksrJTZ6GVfq%9@@RSk1jpkN8n_>LN6dYNpX+N7`7SQpbQC9C4>r7 zoqc+dl1SQ+oB`_3;Se;anL5(hip-(c8z+2aa>n3rf*4f}k2B&ul14r#`<308X&E?K zJRy3nvh8l1Q^h60bARf2XBDttE27*7GGZ(q9ulImkyUS|QMo4)mjF@+>cN2tnz~0G zeJ7eIjzp^d16w^ud8+UWF>azwijm}6*%NGQ3rW}Tn=fgLEip0QT;?FY7KqXEI{Bmv z15C`RvefzrbbP&@JJEu2J!k$)WN^p*MYL}Z4 z=lX2o%$STB*V5S5pP*A4d*bF3QF;<30%6X?Ff4kNsdJ1=xKbQ0$qanMJ@!~`9xEW3 z8?_n1lT{BP^TbWZAht?r7{FKBSZ@l+614SQoJ}GE2_!Z(Ru*(l-3#3EsH1YI*j*CN zS1BR>Nf%i>FkLZJM1`5_2nlV!beCzI1t7HMvAly$LsM;cxWZu^EQ$|yHkznC)*Uss z;H#4Kmn6T?ym}A!#Qx1rAzeYVQ>_RBg;LmYss&r+5ZI~3R07O-7}d9J?7`g;_@TFB zFVQ7cC?&F#j6LIo0cE#IMn%Vi?XmZHs0IYvl%z+5#+UX^c!YVBX0Ek%oCvNcvhaXD zxvpp~W`L*!kB%c|a9k>`&OSkbtE2WP^so^mDfExPhw29SGRC1|yQCiCP2^#-(ei~y z+fA1@$RSS2B|Tyf6vI98Q7mLLglv(f_b7){mOO!&C?ii;oYAZBn|i(W2nUjjYYRAJ zl`}M!gf|GWu>;z$k&sQ>-;oryo3&_MFtccnzMdc=I6xr|Bbt!AalZk)hDGmddxHif zTO~+T7bJCHG9oQR=RHJP%?~ZdrWE;zlh`2jlSQG2r;)BY#x?kclswq4wFZ<5Fa-Z5 zL{@(%$rid~qgk(AjZ|&=(V*2Qd07|sUP1J28MibBK%<>SB7u6O!9uq93X#gMEPk0e zFA(I@G2>A4v`%dFdI%x!a!Ki!IH3FoCa$sGEF%5N+auQo;{OZIQbcBDDIJu~lC4s1B@#*e31Zz$}!x+gN%ZeF2R z=nll+P$jco>qfA%wYpnzm|GIX55b_G8g^%y)Kx9pRJvfcWB->4h+t=rP{6YiqlwPO zMv_C@dP^-zT7D%;S$H(?Smlrb8OD%Wa};kBcAFO&6&@;#K;MTz~zr$YDjKI%=(q#TiXf7uEVDV9=dj+JoE|s1-z&XY+R}<&Dh?2 zSDMA@GYwv3&PnJ>xHx)E8lSk_swH626I6Nal`i!uPWJ~b7D<}d~F)$8Lmu3mS`l^bk)B2~aB z$_GsmvXpQ(bk1`sm^LX}0?34n)v;pO>eWYWf2A&6ow&rC5>bwLOxTo|SC12~AnT%$#gG}Bv8{G1pB zfBN63cpT4^G_Z}F3mS8aXjI#pYP5U(W;cP{xZC(&0;D#_;ja4PrIY#$`paf+C(Td{ z-4wcUqC_E1E4@l{3u_0n9T?%!oO_bOjEVC>*F9lyB9bRuG6PqCp$#v*a^@17m9TkZ z35jJ9%#kfQ>2y}z2#C~;ECXvxMxJ7jglA6ZoX!|7q@B&ZRXU7Pc>;(Ng0WQ!18L1T?%+629sq-oR~=)uMt+1E0h@EMhiaB zrm8AqdB&8$!*X6Ako{U+vZSRX`?b78n7h(wwGfuZ`xG0f=5!&`P3Md9+eD21NjG2Q~YyUd0)J8ANXj# zUhCKDtF=8C~x2%t!}fSAC=NbI3Yz_i}voCiqp+2;@nm$;b4bK z?3IyEYaQwF1gAKdZ$Ocd6)3cY6VA$|a=t&qkWEk@lI4mh$Tqnr?rK8e4NmGpp!WKjW5*(&E zqtusJ?;%KH)j$p@q;4duJ#2v)#cyE}lnlN2G!LNKl+Pn)O7~7lw4_n^(xkYNYIVHY)O7mCaBdc;LeDo?0Ido(I(svPztEo;@e)J(4t# zIyJ%{yS)*_I~7*kLZXwZi`#BpZc>;8>8u4nRkUq<(_45$oswEfc%Ik4B1;OCh@Mwh zKgAZIU9qx~BdZ$w(Xs`PkTOD%04Zx^{Xvty>}}7>f5nD^^}}s|;QqKge1nw<##@d; z7MBCrfS@qc1;=5(CAqj(Ci`2`wF}QgqdBV8x5=#L6*EpMu7%^PJEQ*22xgZHs25h- zYL;44L)5kD_Z+biZVz4NMpxUy;qM_lg@VJvh?pv4D~%Jz0gBpASP%1|eZbNXTf#}e z$39}SX%I4aCp}ku@@cfw*O&=o6T}p>%OR$uF_3U)d5-qhOrdS3YNlBvETpmG%*J*p zs!S)m&jw+CQ{g8wo|rI-P-n{j9wZL!B(b?s?=s3&iEHf|g^FE+2@g$pc0C9#J?M|awTfsFiIF+QykS-RHq#ogyl#^KGak5!5kjO=8 z+!P0;>F(sV(kuq8H0XkMd=KTAoMiE4#^zOt;VSAgbEJZX=+1|OtXhISZ%y5?TGmVR zo!XUXhzHXcp-t0n6MQwerX01+R7MVNv-Zkx#&R)`v26Rn&s)(o`3Pu;Tqh{#knF{& zeIFj#j}85QaeN8A&H4NZ{}u6sW8##i@gSPUF=# z@=_#yPq^ydc=PI6(;;pOd93P?e^rG-6|6Ub&@bhJ@_M36f`X_;DL1{|A>pUC2kKM~eL>QW_GSi;&{0-_vz{jejjt6&@s*GK>@ z6l`COl0{JflF-!mm&Ti&RJIMy=Oera@>A}ebsE?AKTN$6B!?dJ*ejSD7-~YBCg({I zEejkbnHtVkLOq;n(}I;H{K14x#T=W~>yKute&kH?+;LD1f+M#5Gx z9WF;WrL1C1a1Isa3l_>_$AYo`dl*pvXLa8aBUiR%WoZ6?1VSx9z<>c0N{+PC9|@tZ zDo0hP?Xpd#-|JFOg2>27XVi<|i-=6yPYCsZ0U#tEjF=#L1QH`=3_wUAF<^pvf@m>f zf?&u5)DjZSTKk-Re)pXFiM)|ErK(ChG9vE1=j^k;Yp<~U(5>Pg?)iwvp9zNb&NW!3+be7|35)+b=7z^m&%|bG6)nca0 zB}t=QKaWboA^m&?+UbSL#-4eQrp~)^-bOpkn~AxdQw&V88@dBT6LIdwBy57V z@$pGdkB{sYR)Eg5foMD(Asupx+>h+_1H{a3(3IL;zec&I?k(J8HwJEtT+C1pCc%fA zAc?I`zJJSTQ6rRRYh&~cE)r%ziPt`PIfm{-Vdw60(XCA`*{AnQ3{HnvY7cDK}~zs*Vqyr9I_wY_b!@# zJtxjTg{Zbm-<^%v#RC~AI@bJ+(YB@vivLf?i=5iUu}0*C>@z+B&v>;Ry#_9C0eh8} z{=zUmmkv}vJR4=yVwJ-gS~$fu?uuT+`1;J5*DQk5x?1#&nHCt1o>_|D!X_mFO85B! zbO;P|wC+ZIG`*Oeyy>j5v;G!BocJKJLYaYl7^?azh!g5@II^x`9vF%B`@!(}zd9q4 zK0cxIK5V%d8fmty5XPt55{&A(T@@ALfP1jbHr6B^h;$N<3h(F5+mY#41eG~`5t^Z{ zR>&KntfR*3crO@_r_GTiiFK=+GrBF6ctsKA!Kg$R(He2p4SREkcqTZ>0z>r*6F5_K zo1x}#g$0r@()_NpWj5vNGD#1m4vXS|K_Nph-zg36&f`*`L@ABQ$EiF&Z|)DR5z zt<#}aWIW;KR2MyeVCz$uan`+n-T~zmg)o49y)q8kliMJ<&EtE4+XMBkyX!Sw zP8ap4`Ha6xkDa@yJyOXI-f`oE5hpDY`}Kq|^QWOCNCsb3m8Gl~*75*j4|sxY3RO~b zC#*uY8S5oo7i_J-3Nw%w_d(CCNufY%82XcoxzK|onW{5jYVau(LE&`ZKNgG6>SK9# zmYCTxNxjOW=4+m%N0stG_U`c;C2#S2qMk%JE*55IW(V8Qc_|D6)SYXfrfD4(4P|r< zI`b}(|6We2g?8v-6P1Yx{4DiUD`WYbXV`{pXMauPgKGUiVT3^~ecd32ZP|S}P4pa{ zB({{?bl-W;+0=GT3iGqbatTBTCwe(GD&rNbf7F9Yuhg%CQLOw4d1-C9*bQTgT!AWOO)^nDhdr z&jfkJ*rM2f)*WJ;TOU__FY`4EMOcDEdCIPn;j?AxPWB;xJ6v1E|0h>;K>+yu&GCP`R`TGL9VJJfD^3VERO}BIvC}OIGDhrhL$ppW z-2|xnxrdVY*0!t7d$9p%v4SZ`OhDbV03Jv~^>1)&w@Bm!gijTOhqs)Wgg~PP=1nFg zWAVTmHZ+%k$(c1ok|lyjaR!_>(6^hB)dr^vK5S=gKpCp`nDuY>i?mXLE`}FuMA|ts zDW3FXzVD#yIoz@y=VZfy2LwLoOt*dQBU5C?a%Q?bsx{{rs7I>0A1X|BYD1jbf`Y~O zR}+*Fba(V|n7uck1@|L_S=T$Xdyd!WP#CQ@tKDj{GIMH)KQMhGi^Fw@Kq+ANvK=C# zFDz$>?+}6IHfeIVr}``h1s}$3iDaUprV~WqIeE$HiJjBytd1@j-2WEFX|s7Y-A;x- zNMFWo^RY7d1Qu*Pi|afpWXLXJ%AN$pv$zcy7eq{YiQE;P-;;07XO%tnfPM%}EAh_? z7Pu!Y=i&`O0d%va&%&CZi}{2-# zO*d4hK`qC`vEtB%lkGJtlFuF*ug);I;iGDU{8`H#thNb~GzG(uV6?d2frg3LD@~8^ zmeX+)lVLzC5T=20LC=c}FTuqWX8^tSdU#z^SmjJq@`^sJ^I{sM6YEz;7tu%Dw~0Nu z!JE~}!~kKm-E*r%Yp_2=@2g^41n{CkjK76*L=gTOW!kIpW;=W{0R_Owx)C)^<3Z9x zb(z{E$Vx7^Z)i|->4W-@C9QjuA*~B@p3(mLbKrb2+H!<`K#omzgDvJ?GN6N)fJAAAbcQS%ie)m=z+Mhq zfTu4sD@NU^b^yLz4g9V-f8?Jb=i#WP^)@`4H8)CRu^6wb=hq)is;GUmRo9-5`Y~=W z&1U^dH`calomCq+6g0X_!R65&fi`&CT7cw+$0v&rq?5D$y1Y{PTP#fU69xjJH&1R1 zBV3bd>tIm2Y>_wYrik!3E+SO?(r`X~P!%#IAeE>>1|E+H6kM?{U)IUDtRcd;V-=R~ z7ZvX;G=F_WYo6&ZP0KF(U(kp0?kB7QCt5fM7g_T>%3d z)aP=tsOS>JN3Af=T~%5>EFl%Ag?*vVb?cjr^8i#@w7kP=%J3<5-!VGu9~$buz6DD# z2N+t8ijacU7RK4j!A(K{SH&)`Uz+J{>7nd}DoKr#PtUY7L7BcEAzh+L29RTGa=6HT z&o6e9IeM=qa{y*I7lb^Pu0;%v2}qdq1_nljUFgtSh$0&QKCUsT0Mq2Hb(h1JS0SB(yT}%! z3gmIUSt ztbg8%+A<16;V9~3X^hlRYKIk7xZeoSN3Ld4(Id;q%0%Q3Veg%7(km>Lc%)xH)9&LE=@N5t>SIEBejH z2ch583BQ-~)tHq+v0yv+ogB4(dZ>{m!^U~Jo1Jqcu#UtygjJTuLNof z_khgFZHGXqZWZU`kmV|dMiZc zPy(N_po~)640(DptG?`B;xx$e2fiU3t;Brcm0?4z0dKM5SxqD(9#YNWWi1zkAX~mC z>lshfs_V6$bD zE)VFC4_`)W`L8AV&`o_tyn4CjfB1&U%?-ZhHO|y2KtV>{Te^HMK45-P{_<`i)G^f| z)hMKeO&nqZ8+5!y;@@7jZ~{1#7mo(5!1B4D5#GHOJbNCv1M~Q+nL)rBn-aVv=9Bt7 z_Tb)Px|pD{D~R-_7_>iubonUVX30D2F=Y zK0%J#jdNu8kqAnGKdw|Sf^XFt1VxCk1A#dl)Da*-o|^s0up{*ST_TY&&C1UP;Wo4j z%3-qNWSa5^eOjkVW;Q^%yZ|YO9nXjc0-ctPr!(n~9}JHF2U~B(wmlquV?mY`P!=K- zU-h+P+A`VR0@SuXIY{qbJbWVEkRA~Q_Vx@n1Q)qd1HQMoUS_MTdUQf{gzu;xa;QU> z7OEWanEAySf-dam_JyqkSV<8hB|9 zI?5p@)~r;=>2A9pW_)Dc2TXBLwE8lh|Bm3MHyO~d|XPU1gRn(L)I$R?V72yUrHZTms{6;^IBv20AQ}fC%bvhge2xy;VxSkT(LUR_ObjPS z2zsc=?WhqBwKhXVq1Ql3<}pgD*3WkAVg4+S?K$o-5rA)lcITRfsZ-0|adg%XyRK=C z(%Rwm{xtoAf~FrnvD-EkkmKUs<@V#Un*4syilUOqJhSP>whCJA)R0TQYQly$B#YGd z_O>(&^g_58(WNbB1|QJHYg7utE}yw@6{AXd1Fi;DnPxqV7Au6Q3J1bZr=c<$@jbMyRWrq`_;!^S#5rD zVyB7uGdcY!&=rRfqZVEEhJ88J*J$Kn6@AKS#E=5NmwDJQWJQW$Zo|FRX3#smM*RDDr?zPmQ{hc z!fUF0A3w?9@S{?!1gABwO(1(-?MtIOe-0*B)Lg4|GG0!JEr1C6gqE=4dx-CvnfT(? zzMW)bR@%scaKa5#VoJ&ZN z)+hNtkvDA-GM_~d9ms8{hLd{sBJu7G`{tePlX}GT!RxAK`3^~w?6cUdJqXh?O3P}1|1`y!}@0}-g{ zipd)*t|CBm#*@Sz460$MDp9=qH$>8C(7rtGDu;?V0OKZOXCEL_0I zRM~%O^>{#P7<>0!MWMJIqJ7+V(@tr|k{(0pnZydGkA(MU6PEs7Yo8T?lSg3^rvMn{ z12dF)G?9q^d<5#Q)b2E2^%nX$Qm4?=mWYlMjbS_uW-k%jTomqCpxwQ+$^vMcj?aYF zU#9ip6m66c@R5$u?%rd8dcg?bpY9}FMSbfZ!zXmHw~_tMgFpnUv6;QQ3R>g?J(Amy z1EsW#ha_|ap(abc+ilely*=}rXV=*S0Ts(wI*qm6z^Ud5a4J~dx{$yqt~wwM_QL~d zf+bHMxU5L(Z#^iQ8f9L%V@rV1+DSw{8x>@#KKygevK5Ms=M`HJJP~sBhLO_OVQ??y z4arF}u0?TXvkc)WC=Q_74-y2Jy)coVEZ!oh_Gs7u9v^iUO;bAk$etqq7HrEDb7ZSI z%s365_F!UY4dspUi4{9tOfE8N!gd!3rf-O#9C70PJs6kqJdc75_ouU8644oj-;~u% zICa$^Cm;km+aV`E8HW00FoF*?KA)qr?DFLMEhYy*asfzU&YDk#>xj;VsN?2oN}2IH zC$lz$I7)5fC7G5gp|YWRkIM(@9r&}sxjgxs2I#8mTpb95IT5(AtTWMY>RPLS0dxRn zo*ImZoXiVj@<^$A3NpGk;L2E_D|x)TxzyyS3wG7H#NFgHsOY1*#%-f>jx##vjI_xZ z9fS>HL8ns8adO|zlgK1skTu4>q{ck)ViF}(zv^&r1J{iXzO=MZuVrL8z#9g(2r1Rp zdlz`tl+)VzS;)S76BRliduS?gH0p^a;I|r;*krG=QAsUdQ&Qk)7m>l=&8ueUJO)QAaBr?IN1O5T-Bf+I?#j$Ngzb)c8IIs) z++*LMT~R(;jx#LyNJ2^cJl$?rmqDhn-Q}|pCSf=XmdxhFHItVMusT`RbJ$>8p4B~L zgbBY42~2c4aF7oCo56ZEolM`)wo`T+qkj&=%||;=b%N>(L}OM3(RRIC4d1ON-W%-fiLQ&s zv2>xcBkPiLG+-Hp55U-0Rk;h(9fYHIb8m8$1z8w_+5oi5RZ&cY4o#UOULtrQ=SJ&| zbq8VRlkrXohCx{B=^{Df?4=%*`kV8*8jc3^E$sm>Rahpqg%+;DX#`OBrm=VUkbhMu zg!gi0OKrtsR@T4nW~W!;1RtNJG@D&DW6l7^c~Oq6m#0o9z|f4<9NexqQrJ|7M8L(W z1jpSCe(u!O8+^DGfEUAyWjS#^MVo^bc->B&>=ACz=asZ(y(BfCNs!zZ&ZOk=OGnZ( z90a@vq8+G}KZm-Rq|+9&Ws3o2UH$Kz+T^(ovAs`}e#38b6m`^O03fST&;uV$l`@1$ zy|%C!&R=5>Q380daW1yYENGSHkg)hi@ZM+3cQ6MAxunL=*Y*9gdsT+xy;^T!D@M{w zH2vxIc)n$B>qNY8nHlwHbizKP;CBoPPT>WM-*vWq2-5v=1}2LD@}>hL-f4@KyM))>B;o+OW| zg>C77R)cM82{rN_D#D*P(M^XWKyee};Nb_nCorNZE<|Zh1~I9Q0lwO-!lzt&+=veC zgV2O)#%5*BXqFruSOC>~-s#V}TZwJ(jd3P^0ocaF7!C=06&u-JzHQ~)Rob`qz^@*vMG+!wx zMR=}3^00M)Nqvi=ijcoB2=GY1s&p6-l(a50Ri(RDcay4wN&+Ptj)(QqlwCx+7gG4B zMmiCQzJK4jOkv zk`Jj9efVuo^lfF1{ca!~gAEhLNr)DhOM>R-e$oK=azY2Wl~TukI|?g;naU!H&6*O8 z%FQ+9%rw9@yMF)`*hxO`biRM4hCrQJ3)zLyquUk zBy()34bnk;T>UF#iD4)Oh?^;D6??a^VsC#8z`^vxWV+_GvWnZzjMW|qH0>DmhnGNu zuDg@>rqX%8-Ej)fMhJ`gkrqW%_?@<`TTj>5+bh&Ikq~?>l8J@PrZLbF9ub2IedR=( z$m`cW5gb2-)VMD-Im{qy?t&tzkYi>dI3MTJc5V%J;qP*DVNr+K%L~yn8jaN7fh9}}HHEcfhhR(6{+-G)F~Pj}fwvk4etp#116MdId8;?; z6l9;~`*<@2rW*EIEtlc4Ps|m(^2^!sqA~kY(3@17^v;=XVB9 z*?W;=WJ!d=P5t>`Y+P`Ha@X%sk($w@67rY>{Y2I^E<+w2eLG#U#r({qeJ-9&FUQw& zTe7TpguTvc1Y57+SyY}Fu=|CSiip01Ptg)8o`Sf<8r5JCYu#kXRm|^Ct>!@Wl0EOb z-^ECV-hN$E?@lSXw3e#_v#h4AQW+!dX(ohI3tL>Nj0nHp&ZpDLM2;Xb?)p$!Ym6va zVIHhO1MES#2U3OlAO_ZP3N|~zb+Ex!miJ_Nfnkcf>-Btk^8EX)L41(k#{7*Xc!R!a zs6%+BefpG&LV(ZB!?6Ll(32gKSYwL}?0@!!#{c^9P+EZ_iOOQMPfmOsf_aYNLc zIUCY07nE_bYf5s)D8~E)?t)s~$WQH}Ffyjby`rYsdN3EyR+e@x06W$=HTZO0`bpD1 ziFm?yGOY&bvmL?@d3?TFp-q3O1iwiz(RY_VQpCkvQJN$cmJlqVqT&g#mr;=1NN8p< zdYQl`a+T;KxEx=N8=MMuzEl@%G(LcK0Y5fNIhB*B!!RBanWe0mN#nW-Jt6i8xjJJKGW zI#q(+TW7hvw(cb^K-y}=x$-j1BbSwdP#E2N+vO84A3lJY%?zTF*^*GV;R14{V0=G9 zH?w50-ddf*azC{-$JI7hn}pv7V!q55gm(a~e83%vlF+cq8$LStP$Z$$4yD zZiw(~>6wk2-6`SHaVuaFe{dz&26Up`I5jor(4Fv`gA{}%cF#a;G(p;sria$s6N-*a zm+&Gk7viNH=my(eSXi%8+c_QCG;hVv$rqq}Nv-QuGp>#as|!ZNvtIIQJ2qqOQw+o6VN2SJHqAG1Rlv zpgMwMk<}Sm;wj|T+Qp~>^zsn~RxbEl6#^LqKVUnumP&{FU=%LPqf$_o*`m!=mZEIR zeFYY)8O&B5?8*IatvXHhOt|lhpwt$e*m;l!i74q3eQ#P@`tHd zrGr}^2K^bYmA;46^u6nzR#c#*(&$9{S{iB~{pc%290bK|8%Nbt4I}tQpcZm@!>+fP z&G@8gOPg4ZJ9YFZm#)|wcm_t~c;vDPUrURJ@N0(d27xsY5Sb{^G}-n!CCSubPtX3E zHB-0sh_!tD<5w`gXuSI{KA+tTh*;dtkj+FZh&1-fY)N#De*OLTUw(Bm8hr7ru~E2(^0h?rW!yQ8Em@B@+bIZ8eQR9SscY~YT5piY`G_k`k$`^BWUOXE z=Stx%(18vrc($8oBPzHos7VHB&s7EODgEaU74BKl_`JOci}dN#0i+?+1aHVs&*4j! zjGm4+qkMrRk1-|is}SQDPknUiicA2C22Ek-m#LN*vTwG!l6D%$|ZCeeM&*VObno2LW1Z; z8(8)t1_v;|TG$AgCO?7grHh&|z8xZ?R0PGo>IQAzvq7!AMb*SN*ev=nW6S~B>^XRI zOp)1{=QtcF`7aq`92M^eC1wn-O&Xr1Z7}alD0H%F2-YKNP}=I}h!zW>=6%;*+BS_; z3?qGH3q>ce$IKYvyPi!tim?wsHN&}7Fj+O+9X$f6$i-@5=aD&|bF>@3 zvtE6{-SisFIWUz}cHSRKW<}MVXAN=-YPml!b%@`4HX>dmX+3sIRTtgEXZ0i%JT9nN z=q?o2ArDuGTQXEBjEqXl+!hECOs=d;#p-jEB&)wA4Pu0B@pIhK!=ZTVxepoxB&*+Bpr5r_qwVWVER7hBWMm;kHIoKKfqsvd)}OQJ0b z;`?{1ieReY1K2rTW6d2j2u<4{2PF@}YD8L)m?nZlDoa>i80^sO`Rf?vjH2!{ya=Z9Fx7m62UM9v#}w zx~Mi%j{AQZ0>xDdl-1!kBao4rWo9@$lF6a()3D)g?K&Qd;bLLqXb5o#iUlV2ps7I~ z(DK1CxDy}q)Zi0THONQs3D}){LnGo8lkjcO&3#Fk76MEv6gQPk#EYg6IEgpwejD&mK4L3SV`*m!qE&U7>ivv<%Ps7S#)b&t z2dr3r>b>)O?85cpkt3GrHrWvybGBQ|hy#2pzo+52V4K#wx>K03LRT&QfajC-M|h(Cw(|s z;5nri!uZWJyE(*Gtnq4z3@gh8r1s5tKD%(KNi43e87>!Fd6$axuQPPUM_$9Z3J*1X z{q~L-(n&aRg6IlzE>t$VMCLo1M48I7^X6%u;FmM#sgivTb){biv)a6f)d9(C&%XW)*t?#*mH`NIn7^%tAd7 zg>^1DfCJPT@FpzI3V1VBKl(_Zy$R~<6{#&!O4JV?b@>XMSLyO1%qT4AS1(j(^&7sH zkZUs)_|9f}Io(WI?3?dOr80F;S(O zc8KP|!F^045eur6JZD>-o@$j!*v+GgS`}ma+Km`;(-i5I2cQz;qTU$3?^ zf`Udn%>nB+7E~I0$Al0}N7chE8H{}KBU_juP3-dhYGY@&msD9pP|{n%r~;==b(}%c zdL8!ybCbnQbUAwfNNeEAp5!QVG!G)ZNA^Ts&R65z@B*7+NlSH2OJKRQX)^FBOqZZ_ zAmYyt9=yKZq0jC+TZWz)M+gNSHt0J&H9n))Dhi6)HfBhC`2GsF8aK6>uesB-7w#Fv zPDlaoY@;K*)&Z>Um&^e35TuDMG@`kDZ=H9c-dpK#o2p8CCOlu6`^*~Ls16w50Sfpn zcqyvm!g8tehWx$iRqt?SZ3GCv8#h!fqK2$H5OKpWSSCT5zu?3%u zTSNYc{lUUD4VLxyttI4wvtTuN;FnfD>QfUn+AH)->m2BQwlMLk;qSCNIBhCC46pJP z9>M=NmUn<`Wl7N;_lo^|`U9p`jJD$sBO+b|GQYtnx7!3bgQF|pXW-={Mn#(wI~7>M zbQacRkj^_VepZ~BpgG64SLO=BejHCm2&%d%Xx%y&v9<*8ApH4G@B)j~)+H*E7EYn7 z)eCsZ&Uu)vb6K`o^MtcJ9KR_v$0sXw;tV6p1FfJu00moE|NMG{y!On8>wMA>ZH`4q zQVd*{jD@tkhdJ|zPq_0!qaQ7$=0{4me>fKpOIqoevV9URT!Zcj(x&P;xkDdI+Tm_B z?~O_-^%+Iug&tQ(N@*r^Z~2zT93wpiL$Rij;(o$U1?lkXqaflJRS7tLo zpviM$DlH+2AJYehq0%v(xUH_WwCaeZ?;bqn^mz;}o4d^7Ep35h?{2nB%=cdIE~DJm zC5XzR7Z7RSw586fy7N*Qs!YSKtv^AB6Yi$5V|}rzkTG)~4sc8OUcD>+Q8jv3lcBp% z=YkiPqoJ(i%g!|awvk7;liA`=_GGMd+);&$4A3B-B7RX&fl!|4DNXLKr~U?M8q<1qB-80C%cw!7=)^y}4Pjpyzjv>}`0aI&Z*fy#ncvTi2gQ;GM+IUCy8=s;H6Wmf@ugw6fTEK{qFV}^&W zOXiG-Qa!tXZF!a~=Kp>rpgn+?`Jo1;Lc z-4S0&g`dBy$E{C91l=?zJ5I!k?JU<0z9TY|{M z8#hQr&vNCW1dp(WROQ@rivV%H%igjUu*uPHUXU&R(e=^|hr^5RuB4h7z}NP*`N-%X z>#|v`{gbB-Pa`gNe}PUAAD9b8_Alxl=Y<*-qd`P6ITmh@RD+t*4qeShHnzi^MVT@7 zdRliE#NcLZdbmS-VEiM%V7d=h&gd!C)$hB{Z^%CFaJZb(;-jv@Hs|>is$Vn9L8Ej3 z0_h*y0P3GQK)pmLMjo8x*HAc0O$7pl4ncs3{|BiTB~VX6N*Kaj5N@bd{3Og?I-8OKt>t`7!JY99KBsfSX>iG zMQMw>1iP!*#M4Ljpl)N3Ue4)mxUc_g^y9RUF6qD%Iobcfn zF2&>wnGMVNO@_x#$WoK|Nym`=f@sx(L>Acn*Vb?{c-C>Zpl82PHD?R?5Fl zO`_3l*Nx>k=yF`|+rgI!z!C_$KbUW%TN^6mM9v4m@3lEXhU7P{rY;j}& zVHHo|-V*l^q2|6vnEd?g1b8q`(6b)7QIB|Q&QQoy-Wt6*JZCu?)($>?syDaz4VX?y zsE>*sM;=Xh$)G@z9XhJn3CWq%m3i^#9-a(!CE6ID(j4j!5nqP+x+h9eXUx0dfIwSh(NqHsjfDOSB21s`LZ2x3*icTg^*z z6j>~AKRO;s>-#|un81Q@Hl+_NSn@{3d_n*SHB-fx$1kHRGr1i4Miz0?Sx#5$`Scn) z6Vrw451N-Mb3izQ9J}203ZQx)c_$090bK!8Fxd^ZyAvLC6?LS=XjfS-AlO4{S3?k! zR3mx3MZBU=X7*kjM~4@l=^Aa(w(41Sx!N{QmwSDTe-Ww0my6G`o7Dqg&R&^}jl;kA zN(r<8)5J9^ixfnlCR2kwIJ(@Rb}EI`%OzT^Z&g0!y_iWFPu~{; zF!f4O)jx#Aha-jmoo&n*_JI2gza*}f7SEf6wTJTypdkvOl%IUCI=_$7Qc86nz}BM0AIo5#GO6c>9vo^3c( z-PJ?9?r?; z)Q5S}TekYx^K@2p1s~9-x}2(Q=iJ@a;}mZ5Qcj9mH&D6>_i8WdHt0l*YR1X2sF*Jk zoZbV{YO<(MWd!$}H57S~3XUbhxrP!;Uj+`NBcS#S_S0>>&B+Xoz?eN+U5>yE^=@iX z8{h{X11P+^+N|DZxPYUh(0}?3YNI0lnC+fOf3W1tjpM#vouwi-m|D0?a4lx08N zZ9Z-;Sc|O&c?UkwZAc$9AQDHJ#`%^}aDekNWjKpywpnx>rEu`t_WgK$OPGl{&yN7Y zdq3V>jK*NLH!~io4}5M!suXxZN!^MbtSWMME*R%Vxi;wM>mgz{mX%!!w;#+t*Gi94|u5LMiooxKW25i+6;ysEKFh83czfq_q6=hG0L5R2wg zy(9=a<#!kd)|hOOxMc$Ag>bMg(KOVe3NhoSlDtswiPky?My-bPUgCOoX?11wGYgz` zThw{;SskiC69Xv!x~iA@3$}3+2`7VqW;oO4*turHG=S0=9+~p(^uu&=vrdN(LPo^9 zI<@Xqa|?8XdabDkq*@k_muOje;1uQY#AJ7jlRx^f-Hml1kn`U{i^ed`1;#ESa~sSQ zsqrL}Yj;Uk45mN52INs9EKni4kv@aT)p&z3j#9{*3?EN`lf`r_CSQRyks8(A9FkJV z70z|gbK(@)J|ve({mFe-pcKcG$#k7=8I~b6l;XQJ1kk;y!lb(1Fy*IQT~Dl~dwzViXZMT5V$=hGU(jVuff~Lk3^o2l5>vc%{@-fBrz`9I7i?RAmb zGyy$CY=hD9*hcMf|OuJ zfY`i5!S2XFqOi{Xu_CPvpPhRUf-nHIO>`K!paf7iU%mGrPHtYRaq&*UXt=9+8|hpK zI_;GY26m)ldL^Id58BKQ@3FTHS1ZDo(t;RPki?SO%x?%t!MO7pctk`Nf?ZN;V+$@J z3%}1SAl$nvzoZhPK_66FX>FY5n^eDZ1<+r11yFD7WNJ~p($IBiQXNT|o85UtkYws5 zZ#mXpNGk5V2mT?AJEA1io~O@VuJ?;dL-E9osGMJV5E_yjDn&1m z>Es`TN?Hv7g4DKv(ilH6gW6HBn=Rkr|I_gzH?bWN9X>~8P{Fw1jMo&Cy=rww-0KAA z4ks!K2i8WuO8!wFQ3(Pt>?jaX4Lwd!{DW0 z%E-Gy*cx>vJT#gWG>--{!v%*oHbO!LBrpps+;Buoh`Th>LKZ3P$nN8M3Tjh7>Z8%A^p>(2og1Hn)Z?DcZ3fPiUS_MUNV*KX z<@v0rEUSu8+t%C5VcV7MS@UuEVr_Rhh$4S*EuSt-@)#A`Cow^P9R2%|ptOaySAse` zw<><2{ef#&F)X;W^+|asj@T}zN>z(ylE43-)I5 zgVk^uBk@a>nQS2`f}|?@_Q(F3R>?n|eqfFgUYk`LE=Ei8cpVohFRY@* z8j<~h{xSq9v(~TxK9_h{HbG%X)H<$3 zMqt8j3**Sdt@K8P58zg>VKV`doL(3wq24R;PO#s`J@OSt0!l=AXFQpXuF#k^GII`P zWDTtOdN!R*-_L+ql+#RmKHB-9kug0i$TN5cI1TtyUK7I3)Ox7+y=%Y2S7h6G@OsJE zXRRJ@Crn0C%hOSlB4Jg}moDp%Q9G59SDDeBa9TVXdhyhj~2@(aF z1hfo9SOW)v$KL?tQ_+a6g*#vf{3L4YVk)o}*#Dq9+#QdXQ<09WqEJDF-IG2-xL*(^ zOD1H*hh^_=s z=YmN7XJ!O5LxP+*8_nzO?%1O1j5#RU~v1EjX(-N;Ffl7fgN)KRmxnz4m~ z65)Fsk2jm~$CJs+#Zw=1K$3T>>k5f_n92fLp(d3D&EPtHxM|9o!U$t5$3tEX+c&t& zc4V@n;X2!Xnz-3;2nfV0cxEqW5V4jIPmw)zw&Rv*Ok>?F6?!0-S=H1~`BJ0YCRdkb zYc*#z1V?thVqTTwL~8qh9%TixuR$YnKA!xkz8iQXQVIGW%AbOom|bRfck1wTAs@lir{$4Mpu_1XUcFN7XHbZD}yu14gP31n{SWL*7Mnp!RMAUS)i0n zNzh-llGJG~`N^_nVuolCjD!sp>ih=&f?M@oBCJEgDB{rPwxm>a{?Tvaqv^(`=N7q7 z;c2*!E=TB-87z-C!b6XX*~4q4yhX0zgVA$*A>%Rm6+Fb_I2}=}<8@tw+eB@(G(Fml zEh$0<6C;dFHv>ivFgHCn#e+^VlVsoV%0?H5o^CC&Jc&4cz6To>eq&(GPhT`>vZ^i1 zY1CRzAQ@X{pkIA5HZv5d&|U1QM!Sk)4L+XkD@ESMM981=WY8-nq5hbObeqh`2aR2C zCBHGlCk}hxth<(o;7IA_ABzK6KQ5`;=F9YJ~_ARC%g1v|z-e=rRy47(P%ous&j~;OVQkNLOFZ zHd~~gSoYuWrS&lv&HqI zp)-SZf!Sm;alY!ibEzpKizE;mZxM2Pqi4xyRWjCi_Tl6OfF=kq zA66&dPJb#sn0`@sD4StY!b*%pmRH-oz-X0(%Xp5U9b=z`x?8x=ry zQ=ix#go$lx;s9->bURKctf-Jay+Y;t3DhibTL8M5Eyhaz7<6Xf!81MCCCe?A-6>Un zBJ;kub@ct0vs@ajuqL(QY*o}Br_O}Csz1JocMzk$Oky`*+H*`LcNBJ*djTs1Jh4oH zUnDtPf5pd>ReY&)z!W0!KMYtUzh2 zBo&KOiLm--lWMdpN@0}4%hkI_-%fXLXLi~Q;wf)@u+G6p+bVZvf!y?(U>}k!Kt|#E zi}ke!<}jEQuep^p`aIRFS=5!o*%THlm_cimmQ8mnb0(5)igXH=Ckgn!}$= zKd8l^c|d~Xe59L5qQjy-ilJ!As<2PvUvP{JJjEu-d}yF~g@$}i+hWF(xP;^jg-CpL z9_Nq?16``6zC>Ohr)wCcs#?y|OFc+Jc!@UW?Zblx!VvM8_Ggeh*-5TkwdM^GT zWv>$DS{PTr36eBfg;u{5=>+?p)`+?u-R!UqSc~W;pW-H=ni}CY!aoRDtVP?PJn&Uq z9!MRYsD5J%j?g4Qfsv+na+MM#?Y$3*=&MEsi5g&$8T%n5&sYZU!x)(=unthOvlS%g z$QnXt_vq~cpGTR<2e(5&QzZ}XzS=pdxl)i8>~JcQLjB!OF!#|a$EX^|wDk@J#|u8U zP4sG3Po{Dx9w`AGO29Sx>Lzf1q0upJ66YzJZK}!Ioc0K=zNc+r>9in5X%w~xV$SRS zbej)B1W~H3^BJKsOwWnef<2ibq>vQFKtrvW&fNy#gnDQ0P{il$a0yBAEg_CBYc2!& z&^LybqXF}>7w|=q|4Yyp@$Jd&yY=!>S4JB=a9$pScp~-qj9-+sfGWdG0}t2hPBqAU z=K>G3rwy;s(qPC9lA4A3fkKB7D=u+ZtH-!m7X#bzNpx}1fxmSQ#p>^{vYUHC-CITv ztyOBwLRN?Q0*9&tyiuRJxV0fPRF&F|Z`E7hyZ7&_=_)rQJ)5sWV4(RbEQ~tdE?7=l z2IF<>>a`VTjX2Sa+42dYNbwv9TEKt%Ng()=Tv5HZ8ktGByap zc7z4N=7nvI2!BgOMAil3E+BZ?X-6(I-3Z2v5Y-y6-iB%6%#>ZSE-F`d&z*aC1T_#^ zqzfW-T7U=Kl~WUkeE}g4Lb+#gC>LhXatPXb@l`4J9!@SBz}~-z(tSN2&JUzpsDD6K zx@@js-l-`nR_U{a2>MoMGkb87D6e<-KWRRPPAqWPh)Ke)HPS0AU{{H`4Mgx*rbK*o zJo)fpFrf9xg?a`m7@q#t>KVe+jsT{EatN>+NK+wg1aR4NQtD&eDIrIvJ{^aS4rtI* z9-rs7y93XN)W-5+`azjHzu&G+eNp)@7IWhjOP?HXpj|3F z&K085BL_+cj(jr$PTlTK%(&Y(JJqckEezGI1bOJSSWo6CcUB{~g5Y@f$4Oj8V)DLW zD|R+^Drv%fq;tv8%g!>SkfNb34+*!yirVse#T1Lp zk+ep*ZgSKQGCAth!Hbfscq(XM10|}w@u|!r;sC%43?>qKl+7(G(2q>n89HbSk;Qq$ zkz;~jy4|iW4Ht1gthN9U)R)~QBSVe=9-Z{j4~vR9US2QGp@OcL#2HR6hK79-2fMCl z;BT^BZYXaDMUb)O3TnyM61rY1E8#$!0{R6k<;Or5eyBx95A7IaaQ$>krwEI1>pIz&xvPe{VV1w^d7wts`JxerK2D& zalCafmoIu)Jq#`I$>Y7Sb>Nv}$Bmu8UX+C9a5SL7!#{+2oNj;xowIRidomwBKmeBH z$F-()NaC=;kyX@}^VN7aynqs;)hhe9R7FX~3choqsOU}W(>lA;PNip3EeTJz{3#$`QjQF(kFIBVZlR!s{YuW z#AVF0J29`AQeCmF&DE~r+?Lx&mCMpB5?cXlMTF2$7E^W96jCx-5;PkdmrtRcExgP2 z^#WW7Q+Q?R)Fb^mzu$F1)-0#U`UBwOAyqK_a6Jv zk#&9nDng-O;N+38R(~J4=SekhKICnzj`j11>CNQLz!7Oeg7%|;tVeo~1ZI7D4jAav zlJ&SYWyy0m`rS|6m}_VOQ0E-N1iZHKlMgOZ+DxoYnw<*v76E$-yggPg>nSo*Y=l{D zbA_kj@nk+7Z|G+r9Rt2P{0I2Drc%EDe)QmrhoaB6c*B`t0U-a841h;&&@`dC8SrIi z<}RhA%F>)!KL}W%7^6Z`w;c^(C+t?$p5~QcvCEU?DW50`1G=0>hBN6d=@ISPBsqBa zY`%K;Y&Kc0!#XhzP@0~$sF zww9)Uf`T{UqR@- zbC3=)cXl9sY4xJ4ewaDeMeR#2Cw&*GyA z^LabFlQWZHqOijG-d{?M9Z?0sbhb!UQ)*w0I6VYq!QJYjS`R+9<3vBDVZm+`$9x#; zf_LP`d^q`Nw`&=1VSF}(H_~8A)Z1|;z?CNwN2f#1EO5V5YFA8gMx5oST|4`5c_Sd~ zNTw(m22P#Y6YCVLFGboae$hEU#Bw_0^$MP)H%qEdv!E?0VM4ON=)9YuAcCgV9siIX z5+`N)ZHLs(kPF5_h;8>LA8Zdt46&I8dxa2Nr=zWUYx)S4JGRJzAw`mN zjTx#J77=^>LZ*Qjpv!6vjMOF-wJ@aZV^xyOeCj)8vJQP*Rni^Y3rTkz1k}T0;t&rF zxDFQ&1xX+aU(oI~+-zY24J0|jSf}T!47kQEzE)I!kT+|99<)}DaqCFPL5Pf_*3rEt z%vs2`uBwguDV$2USO3L&eIJ14944fF5Ca+8n<1qAYy#ak^iJSI!WEGicZlfrwS@!< z5;6fibT-)e#nEQ0+IqOIbRgQ4*l=uLAth?dxv6?o;zZ9@StTqBu>2oil6!Gu@)9j6sJ_EmgC7KhVSg!1Pn>ZIpv1(_Qj zCz^4#c!F~vbpI@MjaO?;71!FJoQr@^O+hN5)lW*4t<6D)(4)4S3o#@$GFK()@9j=A zk=vCfnh8VEJGnVWHhjIR%m;vF8$5|{=Yleq-0AuCVm-RHJ+*z*^0D$?Gn zl{j!bxXTc|p1fslcVmeN-+uS?e6^gu<>WYH{WJ(qlTN{7|MT+S6iG?^4>`Y*y(B(tyR|zim#ar!M-$r-HHNl;_A3`jK{&Y0i4f!=y`+_!PcL^RjEcG~G4jo+_yK7^)5H*QuR8tC zm`($Y4UlU=?@BwTpvmtkQMYgsig1%Y+Ds*)+)OXwGb|_Ql1g~~bJ23oHZ-pO7R%Nq zD2`{jU^}aRX>|YfRARFsZ{U!eV*6Vskv=AOL>Pd)wy+d;=+db`bN07HYsFZ@+ish1&rrjxR09EOO;*T;x!}2@Huk}*n$x9;~pGQEgA$cH5^I% zEqG_g@1~{?&ILKvb}@~QuzYsmfT5Ox^lXQYE+=gz`*CN06Pt$vpETDnB%d0RBV_n& zX%bw~ivDV0%P7k%q=5u1#kYMa2@#As_``3#y$3=^T(uQ7^XfqLEtU{>f z&$p`L`yeUDEHUsV? z*5jj0O9P9RQ$;@3fixn`zQOowXU|Hr2`&uk)=N&W(VPSv6NvA6g<#I?Wkr+@i6t1^ zC!g!;{0DIPnaek<(hI*5?RGJnzDZ+5qkC*W-1gCUjZ=Lw%oi|P=exb9YKrPXf{pMA zgEM9~)$LMjwP!3chIr?(qqw-jOp+<*&Di31CSdD7pdiCS7N&2pfi5 zK&0s?az^7L%rYJfn~Z4JW1CO#roD%}M>{b2*~s$udj$gI{Ax|;_6f_b)1O`gn9b@Q zgGcGX#beq4rCadKu!W#Z;GpCiBnn{#!Z`lPkWV;ea3Rcv;1#MlEKj@WWv!%PLE?~H z=@4H20sN7HiQfDUbm)l7)hi%>>7bEAFmJZ*A-_;LuFcYkYoTRppsU5_2NhKH+0)E_mAD0_MGR=g16H(28NNWiwaK zUi&OVHc|!|y%2DXrZ>|iCQ4$Vm`ujgK>p1rm2%p#Z4DzJOJnlH?XTd1B!$ZPMVKP6 z+JG&$lAP&3?U_{8a8KP&npq-i7s2?+c=761iD;a<;u9Tc1!Lm zw$~~bZ&K1InkqIyEkOuoTbv{u7vX~08%*!yxSh}zu_lfJOk)icUqV)3cOA`Ml$cZM zb7B6xI;q`fbWSR8wI&W3+)P4W4<+^wy@1o7B*z3YoGU+VQ7>{!(Qe6e(XBI*7?e{D zxJV5OrO`IOTj$I5lj4L~c}aDCG}WF$b7UJY$YkRv<$ z2Em8e*p6G4$$(drdM(FE8||Pb-c=K_*-p?cVojT~om+OJITS4d$5MZHLuyu!g*R$B zVFf4#u?UI{;G{qsm{Jl}A1R^8G!LUGSw;X+xkWD?G}`12RBOCet3s~vH*+IBypE_p z;BDQo#~{m`ZfMW{a=!q43c>)+6^1vqom@>X z;2Mq`;%wzaa~+hK(aU-Qi`cRxE9V1IQNhD7VT^gWRLg7yEhUVR7zr?$ z?lA`s#}{)n4sK>Bp#Kbwc8i-~*Jq~FFg(TRX7ns9Ci9mp!_s1Z`||ZyU%niTUX2F& zmBS%A7$$QEPh#2oa?Q_AZf^K3Z*lNVoz&Nsa=KYJ(%X-W<>WN*<@J27$=rTilE$hl z@{k;2Yl_T_xwPr%&1527tegjfax5O(h{7FPr;7GvB7}h=xfONxmXi?mx(`|C0imbv zQhMDgxWgP*g^2g8KZV}EF<;FQ;*x4K`{x4+I{dC=0561|M+gtiBLkBzNB4C>RLwJm zJSMBNIuLg<#SorcI~$cRm}F6P&k z-#5AG0S4~whMUc5^Bq{o2}3gIk-$sHm26*R+8ctNxZu-Cw9@st@}6^;8-IkV4fJy* zG2W`+-~d$KPj%Kau~9Kx&m7cz{nEDSh<6d9zbw(C57ymjN;c)GY{<#-a>a1^1n~>Y zEsh{VQ_x{{qo#9Ja?vc@c#7p2?M zVN6b5E)G40rD7Ef@RW(+tY;ZzRn|6>u!=T$4&JIzO>B3-0aOF9hOAf3 zSrvm~GkvBG_mEXNxl4oB>>pU5W5MH?a|@>@gT?$Z!eVM?o5__yyl#w&ks^dZy4Kg5 z>F8#**`a`(BDx?i>Yvz16>c#=tyfdzvS7TW4HgIGV*cia7%>|8Fh}=u z<9d_* za~1*OLr>+{ecf&Y;MkVn!@*rke4dL7*FKMAy5|ZhIMCVPD1Cp2Te#fgh%2^0 z3s=PvyW`al+3Bc`sx+B;`U~pjlVm_~Y(f%EP2z3d(?o)~D1*Qa40}#TcRgBJw@T7V z042ie3%n8*RwrzK9qSOu}n};)kQsIl521j@m7*aI-rnNwHi}c#;(cH({HECUs-K_at1Q| z@#5s-tB+^MNC1TJ?Pk2b!ZsQg(x1vbn%82K4aB|%U9hL&%pI-hAM+vxm^IZDO{sMjAmVy;DkUz^{uKMhb)o6g>!Rok2tt1=w0Mf9A92Dj`4#&j0 zU2r$9P(LOu+r=8RH7U=ft-olwRC(T{0QRYE?>Y1QNQ z8Qf>==sMBqx>Q-e&&A5lNB#tMlC)QO;~02Lcjo$rwc=23_n|Cmd99pw`2QuAG0X}f znnT>l!jWlREAuQG3xc0APZ*Gi{(IP75naE;EVv&Av`A3>;>%AvED{5fQ>!I-0j`R6w$v~g%s$htU7veDSRg5signtB3+nEi>I|ld!L>8dOBxc5SJsLs}kBRtuo)rYvb}i+L3ME|XCa=Qd!b3BDvr<)=t)DvM{7C-^U zPUyFN67}2Sc8VYxjA4M-Hi2j`hwA|Z++eKXftp05JLb7Z(4wDzkPP-NLap0jCyeF* zoQjWE;Fa*CNet{uoqJ#ZDaxSj3gCXfG`cCIRy}H=-Bt%Xos~JKToUxU#p1hoIIyF4 z?RYU$7o~ z?+b7T;{R-@#P3!YW+jwbwvE1f(F`M99R=A+KkbD}BCp|QC&+?#OjCa6(orBkKd+)aY{26?~ z@5O(=Q@=?6V1NEaeck^2UCH0Sz~BG(Z~yj6KlYdGAN1!hJ~tou1^c<*mwf&S{{9E? zpML(AUYa-P&nN%Q{GHe3cj)JT(Y$=`m+b5M`45BNA-~a||Ma(3{@q)%=>sP}Kb=HA~*KL4A4?DxyB^rv1&-=E(9-?g9D_5UCC^ZN6jaDjq<((C_2 z{Eqjppa0u`$6Q~3{>i@*d>DUBum9_KE&Tki|9kU!{rU4>;BWmu>Gl6PeieTH{(txb z@@D<{yR7``H>B79*Z2j$^B3$de(hiVA$jYs{VSeIzhCkf{4c%!zr$;P1pnzSwf($) z&ws>c|7h^}pWEyI++JVbhquT-`gi^LU+^lQU$6g2+1E{wF>gUjHxrTl4w9@NdoM|Lb417q&n2^ZN6DC7-`{ z&mQ=D=6e$HtK9$p!RPs1djB8n=Rerb|C(MXxxW3Y+&u69zI(y^K|lY;lh^rP{-ZyC zKmI_yrvLob?=$!R_w4n5Q-5K9>N@Jn{GMcsv!KZ{rYAVgaq=NIi4 t@U-Z&+n40WU;oeMJ_f%L|5oo?-^=HV-(i095B`~X@;{$^Ao=UP{|6>~FpB^H literal 888880 zcma&P4_r*o_dkAj+p_(^mLx>mAB50fD$$lC2}KAYy+TMrD2h!X3i)4z5JCtcBq4;5 z6d@!bL?QqG{m$KU=JR;lZ#^Er^YGlc``pu+GiT<`ow;}BTHN2$O(+nka3m_+DNX># z6-Pu0Mn-&QTvIyYI2mWhiQ%(7*G{3U`hUOtzXvA^#0rw7oZ>I=Qw@IczlOj2UsG4; zu{5tI+RAgpl>MC$zVd$;T8MuaT8MwgQaP^x!cx@YIKDKB64CFRQfjLBD~@BSLKnCu z^mmQ$D1- zyB`A+khi~B9IK-NDqNlprzK0(=8LW;j7#LYO9X>dEj%Py#!?I0JVAFI0jJJ6L$=E} zq1a3&jlSb7n60m&bz0!Ual;(*h77zYpqk{^!iAG_>j1jq zrjTK%shhWmlL=kI#i3#=d8*mKRTjHAxv9FTaGIGkP0KlRSSlwsa}jVaR5uBv5EhZD z78mHnanAA-2`7;XAmD;TnUIq%5w{ppcc;Zbp;(yOQzkVTyv{;yqa(EX#!0kALe53t zY9NxtyU4gyE)z=6iNpdS6jH@pES7Sr>s(di#CZ}8rvmuOUpbUA+=&cbq{EX5fNH209}n*})wgq&FBGF6%< zG1eAq&jOngk$bUBY#dV;Pyh%w1Q+1LQi#0(BI6tlRQq& ziEU(@wzs?~r^dG@EiF@RqXLyB@v$PYagz^bT%zk$v9|FpeNL>y4dk@LMJ?oU;`BS5 zcC^eT+Do^oHa9}bJ1mjewCLv}iC@aeB_1-N%$bXob0D!ATxh3`7Q2>kTAVb_*)^1p zh7n)$TtGA@N#(>66YUEoF51FuRa1|&M2V|V&Gmp-ct*%Yt9yxy&WgR#gt2*?P@bqS zlgI{2bU5SfoWw`ePnyU{phb5DOOR-Y+$Ef;wlr_`G3myO@@0^lZxb_#MA*We8@9=~ zgj1I$LD!?=oX44qIk6Dx?HD(0Iru3S>!wQ0SbdqVsYDp)1ucEGWiqLytSUfYw?z~uZo_$+8da+2OP2`5?vfa>yVwkxq(CZ$ z(_50K*ThU$bBGsYn$g!ejdtniq=&P*E1T ziJVm2U!(yg1kb{(G*>j0$u$JBF|yighn z0T=>hglLJ(BvPrZKq51ihy=4NI0^jYq$5==ERD@MnH%S7DoD)ZcoB$v#R5)eqB|T* z^i)kP-Hm*rHH5Ba{oK7K>UYo6O_h7dgiv>$%VIb|kWo^inF{n9GU$BF zT0?Df*NtS{F0pipSpak~oKy|km_=esxgIzxwS+dIuW6?1uHxKJN2;y+>pIsa&_g2Q zxCBnX@jVXbEF2DgSi1|w7db68nHF?xiBh>x)i_Ne@`0z~NGivL2ERfK7lcfUSTOz;?hcz&^l!z(GI;;4t6_AQNy5 zkOepi;N=u1iq9PQ%mrKkeYw6DdR&QawN`Kx;r-fFXeIRZIY;fQ|rOIzieQ&;`&H z&<$V-;KdeFI7v7MKp%h%08IggG92d$=nvq<4blOCK>%+6G&ar$FbcqnFQjAX*bmZi zfboEd#PK=-a6Abx8896%127xFOBki$kVepPB&GcCi{Uu>f4=iv418Y-Sn)snN;r-O ztOBeBtOsla@Uj`wWWZLycEAq6ZU8T7l~wuepJAk`@dw@rPCxB;wO27*M zFE1gj2GjuF13myg0zLu$SH8gauYjM#|AO=nf2;ys7eEEj1fUMk0B8cV0L=lsXhW(4 z&;{rLS^-)Ecrkz!E-9Qbz!cB{z>68AT>xDH-2v7BTR=~MJ)k$B55Nf^1MuPksVl$@ zFaY2V7z`K+@CNWQ0@9HHU%(i^Sim^I1b{zaA|L<|2$&3*0tf<32k;V1=?q9i05buh zfLVaqfG|KfAOf%ez{^5NqX3HlO8~KeIKWB(lT~oM4zK~Rk@(G!CIONGTL4=DI{>MG zoq*i{UeX{<2kZwN1Y`gX0S*I>0C+hDX*S?A;0)j_;2a5O-vB@9`yWVQbja}oGJX=t z-)~R{Xabr7bO0>@tpRNT?Et(OLTU==K;Jt-+8NLdUBO0fXrKU`U4nM$&iw4@SYUFJLUd4=@hE%Xmor0fF>=GNn@?oleKWkcI$40bzi7 zfcbz3zyiQRz#_mBKrCP>U^#%7I7n9lRsmK65&>%g>i`=8n*qsyt$=NS?SNFkF2G&@ zFX@o(r{fGr4*?DXG6BZ`rvTZ2(||L89Kbn1E`Z5-IKBY51jq+m1{45zxdv$=;5L0P zhO`7w26zN02RsHm1ylf@0jdBm0KB}U^cAH%_Zp7h0^R}M1L^@^0p9>W06zhL0Gukc zAAkw~rXdPZg;X8T6wnNy4QK)2MGw-}00a8o4$}5?YzV0lz?8o8`et<81=6knb3hM( z1;7em4d@Bz1>nVwQhP{y103mlUrL=Qb%C@Wzzr}EFbLoY;AIG<-jEIljG*uQZ=>MY zm%fjNbS%IRFaZz%2n6sF1nD$DFdzgl6EF)f7cdXN%X~<~={N$?g@DBXIUpLa1P}}O zUs(#@mk}2S=}JI6U^QS3U@c$+fR~MsCIhwrb^vw)b_4bRnCyk)bijVVVd9TMnguvP z-+A4WaC{nY25=s50gw;40=N#i3E<@pq{V;|KpAllDdm5E49Cv_F9Fqn*MPTxcK}}M zApHRNMBl$a`kjvX-+#jKZ-77znjSz6AO@%dGyzQk%>WWW3qVVNKA;VtEucNX5MTl@ z1#|#(1(*Z60W1KvfL;JQKyQE}zzN_2=nohO@Bj=3cmak3co{+INJvKm`~c&Kn*eD5 z;6K15KoDRWU^;-8U`Rs&a{yt0d4TzVg@7o491soQWeKFQbi5SO<$!p=D!^*M8o*k> zdcY`@x&nFttN^xv zo&a8YLFxc-1ULa)0B(SRfI)yEfMEb1zzD!dfG=Pyzz@L7I7r6>CIbEgOacS}m`tPN z5J*D-vjKAf;ebfMA^?-ca4ZMJ0G0u^OrB;J@pkH{q*)n{w)XSCQ*^vz@8hnLj(aEf z`xV%)Sa?i4@Jjl%FPlGAZHo^fWh+Y$4Oi@iU;jksH_Hn}dxYP4SUltnAQ&D*=~ zcpERBmB)9#voB8DDRTI-yzRt(`>yR8b+Y1S+|tYi{Z2Spo%_Cb|GdtcvVhj(hi|_o zQZ?CoH<_zFmy&qYYQSB`U%EQ++8){OCS+^(n^(JQl;)*FdA8rhy?06mjDI(%O<@O} z>zy(-gr`o69sWJHPyE!OkpnZ!CLeC5Y8BJ!%$ebvuc^3sXI8pC(}}6c+VAT7EUT$X zyzE5n(YS)At(*LgJ^NvJ!kQ+t;ug#65ARw%W%ohB0`LCT_ZO&j5c(DbEewm$IdAIM z^h)*G^W*DQw=?$-N)B^1J^fhkOPTML7J*#{Yk#QjYuh5=bxiALo4dBo-WHJHK4kWb z=!~nQ7aM53o;g11Z~UCjMelPas)#Jt4cYuWtV?x=<&gs;9_OgNd$K%#hwc~c<;8-? z+4loqcUZF^I!M^kZ^!y!zmEh?YV)vn+t1P8yVn?wFMVy2cG7w2?u;!zJ1pZ`;4EPd3n^P8V}_oqCWC{)oGL`I*Cm}6es zab0eg)>c)C19EFE)m2Yz4)^k{J5yzJ^~H*tYoi8sQ00c`7MHjE;N+bt{aEN{wbN$) z_ydVX1}Ar2I(A1``zhl?u($SyUhan~(%!|ip7i5vXDz$j9s{TMJ-O}l!680d#<@%> z)F>`$SE@hr$F`+qufn?cy7&bD7`RI->eTZU!KF2wF0If_s)-4h^}h<;#~v9)XKOPj96%@_2}do#O;KcAz@ByM&yRSnHnH^@Uc(Sf75rw z)%V*kb8tO7smE(?lNBEC4t?8w*2>lVbIVlM?4zNlUhQ6eWsQ-y-H^mCTb*9E+_s}= zd*a$h<{R(bI+wlM_|cU9u6H_25DgD_Rw?OmY{s^%aNpX_1AkAyZE0lNH|ySM)lmyZ z1q@i#TU&3|RF$$e2V%p0ZDvmIz1nu_1h>nU)8GHhyEbW^t*qi^TZ0Vg=krTXMhI+T z{uF<^z448t`>bmxm%U&Apx@pA<2AK!)Q(ofSx+tPX!v-{=p;=e$>sxf-Ttg8TGCwm zbiJiYV18)HhuM<4?-5swPoxi5t=GK&D%kk?odxSg&0eV{x80_4>W=Wk+^h1q&pkuT z9QE4NTJ(SK7j#7BK$q95TbI7+r0>{_7F)6KB1rC__A9$hFANktMpn{ z>o(M9z@Mha$3GeG=Y40)_g>Rq?cUI3jalo2-XjHri`07d>eXy;vF1?28+nos!ON$a zY&*Osp!Q~VUdyTKc{W2^?Qd_KH1Y13p*iLQ?j`Eyw4C-~SwBz7T9y9gg$}~nchUz@ z>ujfP(-p2XQoGRB<6-Llqk-F}?)?33Y;M~v?-JbIyeDKzKR*s^o_f}EX6$5n)$gOZ zqlMixk5=R~Tlaoqr)v|JM%^9fBD{S5(BE~V_H}*kUp#j9O7~dXZB1+Ds0H5anL5%t zb>XD$cM^T-N`qQ0Ty6iQRV#f9+y3t}?QiC|##y(`O=*7lkb&*-Q*G}?yy@h3;M}ke zeIzzsfh&f`ZXUYsPk3zKSDV^T*}Q60WK5^Qfe7k0SleU*O7|fZ`Yr&qGm1-KRYqH{Zh@ZaST)BT_<@kv6EhLVg z2mH5H`_A8~a@Wnfdb*6A?XdXtkWKmfS0!&R{TyO8dQ`_xf-%Nja|=7o*gAdb*|g`S z{vCss9n9+eK;yFH!=sP!6(b$xKIaEtnfvxmZOfDCA$^CwuNx*S77tXDX%s*A`23@6 zz1>!^QMA#l-TR-dH`o1n?^So%%7`qLoU4;d<6|ei>oIuzte6wsCAU4@yl;&?KgY-< zV#~M4r4Ll!`I+kFHw&@(RTS~3Y0}rXF*l!Xj&ktWup7=Zi~Hpt1K|x(AQ?mra!+b-#*awpS{wvZjPFIFQ>U_R(BG^JY9QP==_MC zATe(Fg{v|ey?C{Kr@H}W1FrN=9cdd|_2^-x*3YE=Yqwl<-lg4h-|e9*oa35rESOi#@ri74$CV2*E0#`} z?ch=1R@!f#dsrXmE-yE7w257dCePRHre6N_$Qk zo>RUwW88wrL6&ZJ_l{bAc875<4cDCC_d~qL&$>Ev*3mYrueQ2)y1T@5(W-FWFVZcY zlUHp>$edT(%-hXdXV<9S%m0(qKQWPor8RptMy5OEU~~WTa{@jyFGsx;@c&-c7Al$$cV_jhaA^LHfy71JJl!q+Z5;c ztv|LoYxEga>OAd{?t?k6ZUxjE+4nU0)z;fiwesoll6c#Y$Mf&5eb+OybM3tvVZOP1 z>LB;6wbvJAR$TsXwnMVl_|DI^PZhrUYO_tat0<9s+5SL((RAyKVV4fg>)7+d$@i=5 z)zt@2zLoC!e&@5?8M}HU9Mc&gSz&ZK$Z(!fewbS9=<{1Fb(Um^cBni~dVX-1O~J00 zB{t$a6Sl4YGwIUAH^tYQ9FKY#zde#RaRRqd+rCZ9i55IAJzGl#bce{dyH#yV2^R7-W zbCbHF5se^+|KuvUq-A>AL<2w%UDSUw6ZFHIEBUv0K$Pr=P53;$kUI@!X&XwfX)pOa(H z=q~KK@z}KEm1dnc{gj^4o8o2YR5HcLFYeB_7cGM}+WKf$ZtVQaW?MqvF{}IUElTX0 zeOu$}$5r+%j|>V3O}%ox_N(DxN#M5in&TgJ>vI2E6U$Xixjl=`GrMYiTHS0@#Jppx zw~w&6u{8a#%EVjh(dPF(j}AEh+NS80PE@DQZR68@GHp(eGwZ)YnzO;aY-(xzS9Qso z;la)k>8n~SX&2n?n7KE1=hWKGpJM&Ik6k(O7uWx<-vd8pc z<9~cuz21FbaDn`L$d25}aUW_HCGR%v-7-AptSD%A`jUm;1-=um4QVpHckRLn$G=Ap zwolvhyz9%{(v_0zXsgMqI{JHz8=5_4z02Ojd5*_ASX#`ze*F8w%a#I*8~3iZ2^;&@ z;>4`NE$#Df+U^?P{PMV264#n%+pm8#Fy1}koyncqj{|PJ*X%c>&Ngk>?X33;7dcxD z>Lyj+YE-UKx3GL$YwNOa@!!mAV#Ybd9MkF7H|k!pN8+L?Ev zceiDSOBa8z>Xz{vKh+{|Z{G2lO=}J( zL~Yo4sH^5^&Dm?4a+Z_l-LStlVSUQ7+xvpn-R|kNC1RdR^~%kA4yKyU3*IJ}I6o{00x&BMH82{))bM?2UpN=Tk9zCz=$O+B6 zjY#Nx;NFu?1E)9Z;^=Pn=F9BOllOHyBn`Qnvi_rU$%KklYYo?jwW^PEyVj&zxL0+b z82^;<&1ZcoI(x84-7suy-0V@k-F#2DwLVzV!FYV@ea&{X&wpk3Zt{!>-F&n8UZ+QE z7I&!{c{|HGclVDHow!-%uIBOGLAjS*IP<$NER} z>^CPSkN#kEc39So9%&cG{;1L|ys_zVP-4&7-DX>-eD2rjqH4XC+UaQ_```64`+41{ zzfbLjmhUVVHGMMr*p;DgzxanVo&WcWTR^>rplJEx{GwouoVR{HD_>2Pp6FF`b^nk9 zznt5n{r_bNv znsV=SM-SR|K47v(!O6c@-ZcAubdSb`4}GtBjFo@C`F&ZRL*b7toSSb9?Pe#lrA=sl>)_WHeJf34pnCq0#IHFB@&VQm-x z(fd?=!C~u(9bdYQ_`6wmQ^@<5aq=ZQUgYX-NG_jYG%(NOR>-J1f0}2{{A=sN`TwZ; zJgZ0A=;?3s7p!X^e)PD_6NSHuX5y_X+M95w$>t zmI>EFTib*FhfIkmCeEnu9n$u(cCVm~ zA@BPB?L2aF&$)l62vd4r>^7p!0>g)TAFDra^}RL1(|%8@7axK*u6QgtZQP>sYpaAv zPZRYv17|j{1~ zdvv6A!n7Sj0_JYo8eTQgXzQsFqmq)#_1Di4i|!38G%@coYp3m+n$N4I&!}Cu$GCV? z%Ib|?xBgUmH7(hE_0;w57YSoWr9lyjYP_y5io zMZ^_2o;g|g*I`(X!KEi z9vWywfNdB#R~^;;6k0UZulnz!6SbWVn^j(qIxtM*QN)h983te0sc*Yd)}zgf?|SE3 zXT6znc3Z`skOZRxmb06`-#E``ZFT9jYO|~+$>pOYhD$eGpV{e%r><$io#LAhp1FOg z?SIQhc;IZCgLO?JLcbfXwMs6O=5i;0?P#8IV)6YA3-8;`8?i%svCZ*N6F;Ak+k`Xo zBu+gC@4T3}DJRe|pNxs+742PmD>3I({$8lzx!;V+9cnOk@HZ}IRjUa`sv}~)6lX|0n>Zb}`;%Wkue8lBtwq!K z1^;NiquY%U?+;%&b~xpEtA`>JY(J-#xukRO?S4N8oCsTS*d}i1frW)W_dCD+ZWs{Q%1fuHzOcCX z_6@(AnPsKbf2QmT>^qKo+ z9hGCg8+2M&ubEsvJ1TO2mr+jpo2IRe((l~dsN~knPP?Oi2YC299O=7%bf3gVMVVj{MQ)X4INzvyM$N%a$%3lJa<7 z$k3n;q6u1^x_pnz&mDYkL*mTC`4Xq*u}d?;y?0)8U$#rVX?LTpCoed3oM2)r@=n%z zGw<~$v*Bg+KMIblI34vO*VLnx{d(8#8>5cCE*L#JWI)*%?~s%6cekc!MH)!LR%vf~ zHN!DU@9}qsNE64NJH~A3rW@wndEB5AwHJRBXRQDJ?7Bml&dQ8s)10qdQ0=|r;Lrfa zUd74A0cX34eA4?(_-(iM(DbSC0jrGiA3MyxH$(i@2>kkmF+jSvzakH zWMznGTw#ovr}cuqj;4X7ITt6c32I)J+;5)s(jT!~n(Vr|sPx(&&4V!>b^VPzqb?n^ zi^<>EW_9d^1)eIWgiS^J%eLLgEOXm)Q7n7bepc>~=&LbfOFPBbz7rm~Imq7Nn8T!d z=8=p2tm4nfc9nOivoLP4?Nmg2!P{X!T3nBh@|@c z=3a|luAE$3@hewfH_hRk$%oUs{ayQ5RIh5$X2AN#0*$OI>LUw>Z@JWC_LAL|W7e+f z>*)Au;m^dhKlYcmYOXYBI^J3D?#xb6*V`YJA6i`#)N|VJ!yOzCHGS*4?$kl?=!|84 zu^xE^T!$;eJhTq}x6Lu<_4kRDIZfX0P~CXoXXm2Y=U*4s*pJP-{k(qT2ZPoZKLTWK zy=*T#e3~kZP;t;pPMoP?Ql36wY)a|guid{lKWdY!waIQ)Zdu~>_1)K&$M3r4wajW` z)M>X1r-o>jg#LiPSBZXl?to3I?N2!Vpz*rTejkBH)zdbUHHu5VE}670dVZb z)FxQRjoRl}KJvuGb%B*x3D&S{Hb_neE z9~X#eT zGPwKdsEL2?_Z;b^FP&DLac^mh?8|dwTkoBeSFrG0Qb+TIadl(13?K2I&h)RLD&7-P zU*{>}Lg@1}p7Ik(I4 zJ+sR#japx{2GovJo&TnXbNG12Nt2RGobLafv&4JGz*kW@wx@#h$G?3wTVC01mErAm zcJ4NYujgFr@7wp|wu!vrDx-zQ`;yF*aCR zx9^-iRw^S~l}qaa$2pX#X!a<2Uf3&c)MC}pZfDM{-??$k+5=0)A9Eg>d?W*cy z^WNTM#i`$4TXlI?p;I{~U1iyM+g0w{}aVv@?v$8UNh_krq`{4J^c3o$u`45?T_jQ)T^Jz)uq6M+7`%W0O<-d^Pj7NiXR&Looe8i(RXKi}y zep)g2z_jA5ohm=6V_;LE?yGYUt?y{ukn_d9%Vsl`S+4ZXq+q8I-i@2wuF zwaIn)(P6Jvy9F-v$Q{^aV~KRciwej7nPm%yZoam~<>R;EIYS@iuY7R%SoD1t)%vjY zhbM;YPx790^rQ1fcT2s^LxVl_4$T<7_F?kns?}G`o7Oq>T|aVt%Ff54zmjPI|Fsyr zd7fH_{o|q^cC{|(+$+(qZ}!Dm^UNRI_+Zkia*S7#sPT(4^^^1t?H?24(M8W@VRNTQ z^D%*gJZ6okkZWFe`rij9_nDoG)Y`RwVlzHr=jphO)}s3_98`_p`)wJ0L1V0gm*u*! zs^j_9PnJ8ompLsNasAQt1;6ro+}<(b$!Vh%C$$o17SHB#7pr>rYOixDV$k?2&rMWf z4WF-R>ieUYYcpvF_e~35CYNpUZ5dqIa_7Yp$8S&R`D|}Vk>QVf<-;UZgVJ}{vS*GB zr7EyR$lr@!4Dzq2aN4lXO#L77^95K_^bdny?sRULx4hpl?{cDHenp>#`5V>^^M4xQ zADTC;zq(VyJip$xp>kEg!b0N)%G)2-@inBM;}0wE8_?e-ZL z{hBwNZ~sQxl^aB?q58cFi&`79Um|VT{i!tS^KhHx!45jo5kHNImK`Qm&dt_|its$JLG4 z{{Rd58}ievd&B|U{!`(+ zXsEqq?r8YDc-Kffu`_GfpUu$!G-Uq^Eb4BE-`PliRozIrCM;~Y9w)*^frjEXzma~p zypekO)=2zKVG~J1_V+?O8{!woG|X>qByIy6X@@C|^e4L-u@il=;d&g|NZgtH(RZS?T>1t|EX%E-|O?JVf)>nybZNO??%pRXeJz7 z8vY+qZ`QD%){V5Qy{!F?0WQA{>fdanzqN0q zeGY7-pW%8p9RK7-+UNLT{Pi041}?`%rG6A~F>DIrc^SX>@qc`Nk-?AOG`R$?CJJ6x z@d@iip+DjI@^NU#g7`V(kuO@0JYRlZS`0xu`Q~UR6lv}SY(V1e*i~b`%)UK``aZOn zQA+x2!7Q&IY>D=vS{3r$5A|y-(7u%TqOp`O@o@SmM5Pq-%|HHcc~gzhehn>pX65=X zANl-K|K_^{#+kf7>6?-FC;OXWgAdP(_yv?8zQk{X{@Dn&DqGO|BlL;4T8IAV7ok7y z#1Db%DQ~9^He>R_c*|Ruul^m(SE~3G*1%3dJ~#<^=tdM$cmnwhe!(yZi$fIgp~SOz zzJ^U%e7;`%0#*<&#V4$pji9*k3u8f&h+hetk$C+AXDqLb_(6+N-#riYSv(gjuH#Ve zSvYTE>)~x+<0Wq=_AmPHNp@z!#yOsM=NHw2q!7Q9{MkVMF#GFvqa8nf0W}C~SNlwm z7rFl%w~*Fo-@qB|8<72icq~^6zt|Oo#jT|r`Mkc!vwEzj^QfF(=nLYf_=G(Vun~{< zzidD9QsRs1keBm|enCQspEVin`&pxX=6~Wk zl%XAFf7CbR%Pg_HtiB4CqWwC4;W3Cz@yT(n0jTfJZzce-A-<{|>gTsWeO6!N=3zW* z_{G~GQpG2%m4=Opyg%Xm!fFs#;`>1ycwXNL?K9pBu3tQ#(-V0XPYo~RvuI<2lfUx%e*<^?3B(d^=6We1Gwq0zg=~_8!7~tLZx8 zNcM*|2phzC-h$%E%KJ%( zc2a3P7E1cZ=OSOnZwdin@%I>uh_=K8PxtVq$^qrEC4}6by z*!i+526Bd>`a=5`UU)^KM+5%QyNbCG=5<1WH$AC;neT3dWn3DaVUF-`DT-ybSJbgA^TF| zk9S30O7js`FM;sV3cg&0z0nS<7p@EX6Hf7G{Z1s>G1LfMZHje#o}z+gyPv0{bBsB zVAPk;JdV|S+i_Sf-F0Z6jVn4EqQ2({^v90+t2c&dUtjm%^5)f{ozxzv&+O~b`B?D) z?X!C1Uv0$uA3XBkI5^OJA&cV4>d|xz>Px8KVDW6<$aP#m^HVdLpW0D-cn@#6;qz6# zj(?|dV2;u_Fp93@UYpQQU$Vo$7=yPXr}+h|mnYCM^1LgRi?xSSG+xT0@e-@|nZ{@* zON?=2`ZL^-_f18f)uVn3FfUEdG0@qo39M23(Lt z#V7399ErUDD&$!`Zg4>RS{=|1i^GZosPFlQ>QR7(j8CDyjPhmvzg&a7h~FRxQla>S z`Kt-qDH?!wn4iUT9Z%Fl{YcVJqk1f(@tBl&RXQ(}uj6k-XkVA#APusC^!Gx+743oF zFbu-VYqSaD9L8_h1W8eR!beZ^zl`4?2_jW|!v5xEs2|5~1O;K`9o-Y{hboOT570cZ zjQUBaZiQT0iTZW?hGr1f9^BiY{UjmU4^;dL`-AEFWic52VflJgp&bK$Loo;|m)$7j z^OWlMI^Cc7MfYc*87t(7jOtNoJoB2`XBf3l)}H4LMn9!#=qJlp057Le#CZ?$tX$*3 zVV*a2ML$`-8{ow!JfFyKECpfl52o>(`$V+krT7)zD?#J)RBN;&CEij6?dzK(&)V&C zZS*IL-^2{U{M=)YyyupG`SsLKmMQg<&AXz0BELBqgnvf?FE(?~|4bTRv2puB8lPwR zpuRq;bFz8pX9mBK9E9;b==?Hc=NIX#_@I6mzu6t6n%a+);>r0S&*FK*3;mQ({8@RQ zc1AlsV^E*f<8*is%g6s0`7cxC0{g4sWlcPvF%A89CH^j5PbEt6tQMdh&-fQ7S z=9|><-#F~9K>OJjkpDmbMaY-i{>xXEpq)S|@_yvc?-7`wng4Lw$dm&!&_8cz78X9|wyLXouC8?GA#X#~pZ#_;pi@b$4@ihLN zN#iarx-Z1Wt1XN$UlEO?ywFW9hQ*)y|4ia1z>Bc>dXe)lDF9({xCJlU;`#L6$iwNQ zklxqOehSTtSv;R+qo2y-H_30*4^l%r{CmN8nPP?db<{sQBF$M*xw7~dKY*kVzt$M_ zmB&T%=z1!lmq^G-zh)uo`_g=wwUck^$SaSRilWd@oAa12^T+c6@>SKyhm!q{Z_$oj zmw)+vbU!?j?uScBe^Dap=g|0y+1FZzd{#NOD^_348yPRvSE9a+QacgTc*%g;tv}^E zmHOw@r)ZzWVM7p>OYve4&}HXuM|klWU+>@emsEhbD?Z`<6~|CtM*R}g*8&Az-+-gr+@8S8>3Dtv^*^k_i;d;*_Ulc`vr7eVD@{{eG)O$ARxYa*K|5>??bwb{J67u{z)_6%yL;E@W zi&j8F6`$~)8G62>KOF5R5g&FB{j4@dJ1m|tbCA#8j`~45_y?)JXg{2;Yf`e4dlmI# z4bTp&-*@H6dn%3VVv^8*<#CJE8`SsXUwQ<>%3D{4ybaxt@}+!V&Oke^M=;+o;!nAt zpNag-YCzCB6xW!-<`V&jf>vhRa*Ar-` z`YhU!65oo(dyzEWb0t3H725IdjQ&HnrjXDa^ZVaV{@I`z+4&bpJ<8*Aaa*5AWwf{VFXimml%XywOijnlDR< zKWKsaa(aHj;$LEo@s}t4Tdr61yf2QPOS1S($VU6h$k)()JXS8b9C;t5=cLIJ z63h4guh<+Y&u zrA)uYCA1TH65D?)>AO&S3!-syJ@M0MJm5p)6Xt&|onIw%ezA7=co^DGqV}0Wc1o5Z zpFz)wn128M$SaSFUZnG57(vJB1 z>`3DZa~fALeYa8Qr=wE;zo$Ls8&3DbS-Hm0^U!Lg_Hc^k3w28Kg*CgVytJN#`E$2D z@>+ENSw`jZ?}h$^({-Q4+5HFVOZk_dfv`9nEXLi=H{${l#@>+_+8_bv4FE+>c*YGd%17Y{m zPSW*4PW``^;#auO27k!M*?@n^BnT^4#AW0i&m!+b`aKiTe$Z63&-8cEbt;vAp&>{h z^|y`=XeVI^+L0=Lg?B>Id@9@OUw#OUAM%G|JCu>W@oUVtp2oMVKXIVvYoS9?pZO`G z`FJMHYgoN>qyEa2`m0c~e_;gPx2e7Xj)5@!V0u2ANcZ0u?*aOJ+_DqU|8%lb^a6RU z&i}?|=_~X(e|VkZ1kCjJK%IeL~)q>~H9S?O_A|!omNsv#JjDllT`U zf-pO?o+EGY0eN4=uP`suL_c+hqW`QtoNkHw%Hxeq=a6?yM?0Qm=Kzh9Q)wL(oDK>Z z;E(oeW}*Eg;+Jhk-sT90nCV>_W$>Z@reauid(5j3y#ZH4V2k@)f2m~Rlxvsiy}eLt2f z;TM(*rk@IN5THL^HW*J9AEODVpA>}ptX*YwM84krU*0tZ^G%Wc+dh93p}yiJsgR>7 z`MGHTwdby=FD3p0-R~-)@rDKQ?P&d8pwfDkE;N5syfhbdSvwCU6^5*PAI|AAc`lgzhZpp6|@suiFR1~ zG^Y7}5zY5m|6fA$qavCgF+LvFjq~SwA+-m_ucPNdj?~UszRjT^{JJce(z>j<@E>F- z>?n;BD*r?O)9HCSN9S)1%{$^oqyGsMw+nfw|EnePfyBqtIL)2LX{^2JEJb~P8qbuG zK0j>Z{V`R#ULOVrcwV3S4Lj05PUBDIan6zpXve)3`or2=#tGzeQm~z~b~R-$)?+^P zpE9yvJQDSlpA$`4fc~VOLi<`Y&a61hpVzQ1SLyoRo5o89i%_49m-<$qedYC*Vs<^P zLwy#16}m54bO3qQ4xd5uw0?}WH@)!~pE$aXv-r;*K;@*Fno%A2h zLj5?Ue4EkrI;9-T#qvEg81qev!g^=zu-j|o_31vjlASUC0fC!p9+R4f=0J?J5aQG@jR@c@66~y1{wL z^MReQT&zFTd5U~V74oc|*n$GDuRI=0H^6-B`eD9_c=_kf=OUk4@ozjY)BHM?=GQFF zo_T2hmoDmuQapFE=j>EptiST6`I;w{i=EdhG>%Q7aV+bHU1npP1L=7;uZe;%AgUY~ynI0&;d#2x*~qW+fgeI=-G zLE|5H#b3d2?g{eQ{0qZD*!eP){B$Hg884)HK@QCeWQxCn`P59z*PNbzMH24{`*!*I zb*J?htbG>IeWxtC@8nDRGn=A6roGW0KjN2~A)nI}c^2n~;23{j(UI;eZXo?1&ZzH2 z*K6i~P#g5ehQ$ra$;;ntjDI=xKVoX<+KFgKM)N*qr-at?Na=n8^M6xEw4*#e+$T>d$>BD;!NX4=H~-?o>flI zYh+~qLILKRP0uBn{qnPzuRqAdQ={l zKB9T3^0-ugAl6Hf(mZ7ubo6|=%;V609OZktGwR#XI#?O;!)X0LCOu!460co@<*n|9 zc38c?@<%(d)Sg+rKaD{BydsPbt4Bcu>W9*OQKo-r5Bix;_bpkzPX&sc{eR{CKJ6hOe0+z?_-C&Bl$BJF7&)V%H!(4)Xv@MJ{z;YXbi=H)(JEFBj`FJQkwVKiBR9Y zJI0ymA8UdBDDS`6^&Is>sh?!+!FCerhtoVFko=!Z^`g8U_-tFWqrC2MS186=qBQ>e zF%NkuofjRdXzugDz7j(q)2-Fl3JrM0DrK0^j;`NrJojiJ8#`=>88Slw;vzQgz*G+)afh5k#)pJs*_w^-t3#Ajt;zCI$< zH=uFgp!)*FJUW}+zr^Bym!5y+(etkjWalxh>rT6jcG!8XGY0c@-ADFG|M*el9qG9n z>rciG#quhzCmiUC`eoEFvGeOMjSubk|9}UC`MH^{UtVOlGCJjU`y(z**~$Dk$U ztBrP8y*%>AeEqG_e_gV(H;>|W6zh@kTWMSrtn?hDtrqH6DUG{Eoku>4=4&k9u}!GF zy)a+a9^&&+pUcL49m$_|w7w#o)>niRpGEuK{A{sYGUDA=V|i<6ADKDv?^>aKq8;UZrfX^Z;Kx5$1cBQ!3b}n6%bWP*-}1Kog?8-d`I;Z9 zb4RIN`BS@+60aSE{-jWO?T9}x8~JdWx3YRqf&XNc_h0d!MS(8Uzw3tj%IhfZHbZ~v z=y?iMheGzy^FFOy^q<)g(esyjrS`CFF4}RVdW6fFLORbt-kj!5te+QWpr79!{o4+u zbl*dHov;MXU%p(*Jc{bY>#8MI%F@mwF&FQ9#S%zx2pEN>yjEspYar1oDz?VstN9EWy9>oMP0(qAwW z`5>BSv2xX}MSsc%p#PDi--+fC;S10XtFMiH(I4gU-luS~PxBWRPv2bReW~AL@maeB z`SLQf&*p>e;DqAiY({Zo{ngN&6gRs6&+Ok2pg+p%Cx;(L{Zys#a7Vas^Y+7)?t5RO zago^^^oPZ>-$u0WSd8s3h2m2}^XM!Z*RgW#i$?o$Di`a=W>Wi4S8D$sJK}x^GRukR zV44s5@2a#8^95Zm%xGS~;`4VA<{NzX-};&nioEi8Zz0`(Q{Gqd^b+dFDZQuS$8_{3 z%^uqc>sQ6l!1#EUME)DM+QZ0)(>ga6w@P}BlTY<1qw+26a3iT-@>lAYR&eM~AU!9QlKsz@Fg|h5 zFkcq`AiAHEz8raIt_qn({{JHXAzTXiOzp&i+6k+#Z#ih+jOJy`e_uB&mpMIWVSL+# zR9@;2rDVSo-Dg(b|CRI%^{YFfpDf>Z7m-(9m$+dZwFfKIXXnMusmMnjL_Ue~tp*49 z`c0$f`^=xCzmX54^UIU;yE!4Ryg#d}2l+$ymtfkfkZ-+_S04X78HBvl7yAd6@1;A) z2N|NDtUtdA{~0^)XBN%7Sv>bnM18T+bA;q4$Y;_xpXqNe!Z;|8L(J(ugYt822Vc}L z-;4gRIDFSfURQ`b^M9;0`l%(w^CBC=z@^ju$)ek+&(4drG+$F5pX`~5_Vej{VfH(0 zL0&}d+@I|Kf7HDXe4IydKWvJjftnIZC@BOGt${!S(aDl6gHs&G=UBFE3rT@M4d>I{ z$-3lpcey*uaxgEZHEDoA3STo!Kq>aZL8(u(ThaJ?wH|mm9HKlam8m|%KPhXfj?VfeD0L#k4 zzVkfpf0>*|cS!&1$GHEUhq*tFSM&Eo{FC^~jpIFXIm6i~`lp_Em&pCAPvYBk-0w~? zoV(t{@b?J(vt=Fk$T}*VyALw_Q*Pn;Zjki=T+*Y$I|uFK6>2cpkL(c2q>&pNR~UMY6S zL22Lmc!uxlUF$_pULkt2?xXX>F1^z6*PM}K{yB4i_m|H1H{$QOOWw7u5dM4>2upan z`rCiJhT+^Q{t>0m&_fu{)e<+Q_+%x%v?TGR+W((#U^w#7d>0+b4H9~3#Cm-i!mg3nhjVK}GU%~U z^sGw%-LGW)-FVJlUd{bSf6o0CpKU9-|AuceeC3nXavlwf-|9Mn|J7G@z;(9Qhac9`K@FL3mp@-VrVx$&hBdIiJz ziQJc!J{x6!x&HbsIqrX*=%0$`&wk6}-t%DQ2Oal$kLC8k=gPVZo?HHr;q;6CqkMbb z%ea4Dq$nEaCo@d^~_z#L7p-zRW88i_MGUHtOe|3cB*PZc?H zn)H8}#Pv;{6vXG@Co%r>a!zUgPrQlSuXWCaX|Xd8K9BLgRp3ANTa4#vVh_>vz4R3f zr_b>}{==iWzx!VAlmkqM{3jSs#b;jb1+ISkn8OVJkOTjTDTaS6&v@nq&mW0?>*}jp zzRCUXlJ~00=X?Hz@frRhAv=KE>JXSKk8=2L`!9>e2aFYO;Yhue=kd5yOIf82h(lfU=o7c%_&U&ruqO*Vfe zImZvmIj-~i_1hWFUE+UNI{ZfLcqhG$;pn<-ejmek{n5k!#PhxW`%M2q!T*bI<^FD5 z{XeC+|L~h-eT7e+8WFF1-CG#X2PAK>;`6yWx4ZGTH=M%oZ~5~e{|x+&+fSSnJQrU2 zR}9C^!*jC4rCeTQIDJCThu_U`T)p}`qCc#>iTfWHI^6U+ro+nr2(HU1qPM&GdA=p` z%GImy{SJ@o#_zoej7{hA_2Q@KlW{*Y!}B`+1fH+b?Tf3q-HpS)Smre*ad)2-IR6EP zB=`^fmhsp19ebG2S?u0AZdT$I@0a{zx?eV4%H!T7a!mQW{|LiB^lFB`LB{?0a~b}m z#8WAMo-Fs=Lpzzya_p`6(c8HHNun2N{~|UZ;qS)Fo%TNNKmT@yqx8Aw7`LC0_gvaO z^J{KD`Wx=Add2rIXZZaR$E)N16y0dt&0 zvES!$mCjecpWCk#zod@)P|XV@@}tZ!n&h%lX(Z<33pQic^ICcS!pg zq92ZleyI3-`6hnfbeX(ggFD0gJqQ6Fw2s4aPf_@v!MRWGCbl~Htl$8$7V zMU&jVTlR~dBWKCI*wq97>nf)6o~xM7eT>eE9TIPF|2YgFY;68sx`q2+AodL%_fy~E zc2^I4&aI4(TVLQWBJ{d~uFZnad*r_9#zntZ^!b(Ie^ESF$^GTX z&w0MOE{_xWe9+N9KlmrSE_sRb(S3C0Zl2e?*nbWSK5IYC_&@L+hOhlc#6C77_A%Wr zZxcSeO!!do{Oj*Cocjb%?GN!_*8NUdcjf1=4{-nc#jm9Edd3GB|D$5>)&2O!KjZe> zh0lit|34P{!|7swP&%LfY=(c;TV+2=|IGUtA2+|~(IUeiTo*iFE|UPljL5! z`|lXOjyo&k^94Na z2I>Es(;5Ep|KRpBrTwK6Cvc_syLF$=$o{%O_^l-U@BcB=?U=+fC_g;?X^hXY5yl7N z(fobjjoj|$f2fGPbcN%WdCW${0>imd;xLsDe{nyzuS_vMyQTk?LWkY|#Of$#(AD7bG!R~;kUPQ` z8TZ76+`gy6?UzaWm!HM$t{>#Pd%68e@mq~a|A&ZO<#MtAD4kad{(a(qS2=u>-1l#j z_bl4~dE%FOK>RY={&@M-TV7nzr^sbJ;v}i2tHRx{QvG3a=Y&9=X{9!4~m{S zBmEzc{83}#Uz?Qn@4SWkyZXUZ#vS?^cj@)zZZMRePR!pm-cZ4gwr~nF8aLk?NNDe;?|$K4R#pP8;>~e zn#&S*x^j@m)pPKY|6@GwUc>a%`TncKKe+Gg$|7%FedBwZ7>?_A_}?t^gR5^G{TlZl zeKL;=v#|O5)d#tK_X=)TIrGt-++O`lre|L0Q<8T&Rj~&uAHH4eU^6q^U(c6W8TYO} zhOg^&?jJLrd5JeJ3H)Dwk;irQmWN%<{V$MvtMd6Pe#q<9kaJ(#udZ?XW#UiN_9s4- z;k$av`=7$?lXC7WpY+Ln|A_Nm@^$Ak{JX>+)hGD>AVQyc^T~|An-B1?=yfOmj_IcK zIU#b`)kD6toADegaQ~BJ+=&@(U;Q`SzERo_-No%ck-W8i(!N^w_LC0ZZW-cvT`uRl zt}mWyM_Fwfx?!Qvv zS{46~MCdtZ{~`CkY)I%M`|JPS%y`~#C(r8|8MkjA(;+YK>~z2U!5IwanCQKy37pYK zaQ_QL&jA~lzubQA@8&PMUi_GQm%Nv|pT+B? z{rei+zwaYKe%L$D?f3r+w^zY8^!GbCuN(5-WRja#yjb23UU@3R*LAu1Ck*F$c^9vG z+Ekh8IVf>Ky06n0bNiJNuhJ**UnTbT6Jk%$^Xro<8U8J2F`f$N$1h+y@0L7fDxcph z=i2Bn=4 zTWP&qed((Y<8e=s^Gnz9!=k4h+s=3@oof=$wsL~owf}+lGaT0r|I(-MxUPQkTTrc-%UB^dX!~GxlckX|W^nYL*!@pMSpMBDP z_j8zTH#~*;O7Z{S4|D$wZ)JRx4_6#y_;-Jr;p;wHJIeiC{h@ve!@uQZhCeUk&I+IW zMD9C^&rNqSd^i5^L;uX}x6Au|h->p#YskKE*5$=tW_;Xp{LN=@|J#1V_-_gX-?*E2pRE@V8l|KE%-KDS=N@O9kxJW1#y=exEa6npZj zzh^kAx4-l{hJQr-AcKPcJJ-p$qTgpJB=o;s_Pf&ajDO?)ZXL$o zllOpEK9c!I<=7*i%>6fR;dxyv<31f7XdmsC^ZhhwzXboE+D|&2`zxQXdNi-gwZfm; z|EUtcx>D?!I35dA-CNxc<0txnJ}hVtmF#Px$Fq7>@g1 zH2OeaR1vbV)%;Z&qjH@ z4mHNZf3n;QtZvgShWCuDONrar32|fxuwmpH-q?DgA$X55vFxX}s>r zKaX3@{g1tb+YiaOcZxpa<|o;Gg!|v{uiPJ>o4-xsPaORQw<{esJ)Flqx|8Xn`|;bi zihOYVIIrBm{qGlhl+yX%#E*ZP=(oy0MVPA>W~(`Q)jvx@(&?{I%NFVh3!|N5l(zo7P;zu#TV z{atf8XN{54T9=Y*&|8O3|*?55Gd$+(p_)dZ2yklDNV#eRitNEG_3ZFRo z>f`>9+i!eMkgw9h|1-k>%5T3BIXo=(4ds*PzMJtmG8&YhkCOg-XP} z`UL-9*0}$b&O72iTFc}9#IYMZ5Al|iuW9mAOwWTN=f5X@=gWS>^wj;b<0(AuW|6Bp z?%y5ZagS}`bv!8eOn#gD=by@ORDbxXoa386!|f-e|8aC6I&X0FlQ&(-@ZI{JbSX#9`7M^=7#=35w!TVL;Od4_Xb;(@CI|C^s@eD3)k<8w&bFaIXDAC$OI z<&!tPjN#w^6~?n)`rmmrx389ZB1feC6<_7{>z>5$k4pQxS1~@3LT=mEpMWb2kY;xb^iuC-?FD9Q)zWgBi}?5aXl! z=!EFK!+*te(Dr-QGd`}q_t!t=_RY^?I4flz-37)YzFqCe$xnTd+wc27hOg(^!5g^! z+7%2(`Qb^IGoF3&{-saobJh;-f0+}H^zO@<9~$y5BQO20zl-7AaGc=}OZ%Juh2f0J zxuA6ZqQsM&Dsg?<{*D(g9JlW9@l}G4<41dptjn?hyR{8v>u`JS_XDPufqsmfHs}V>)k?_D5aD^f@g1-*{`7Dv=heV#Q7Cc{jHTS>s>%noqBzBwKPTs=fGVakC=5rj==C36B>U|rz zUH9)tKf-Y=M;yQH_3L?kUA^>EkKp^-Nzz~8yz&ni&-rIFo)BN=@BAZ7hx;Y&PS^3# zU>Mq`uD&|<7Yygn`xwq4f%8#`x8L(aZddxOe>?YIE$5fg|N9@{_S@y2qU~S1gU5CC z-rN3x+i#cnQpI!NHg3QEG#*#mD{_B7BKCQe1FxIn{==Ci6=z2ZrDcs-H+yCmb zjOUo>Eehw*W0@cB5dZIPS(jgCL=K3YRQeo~`_4V$hgCfHUC8iHk$AsBfwNKa7rFkm zqnJL?$JMvrb{)45{+j8g@E;PPSHJAB-2d<yS>Qx53glK`vGZx^^M$r^@ExI z3jgIp+~18qyy`gjUn%}C?SH%MJ6C@`4f}}Zb&Z^l8)e+LK9T9;>d84Q9Q7Z5A>)5Y z`tK3F@eVofbbZgPa{trb#{JKf{$G76x1S{ObsMGqjS??$xxDL9{7*ce@wxWBOb3Pk zejH?k&jn%^)&5@)|L$ox#IJ#jXZAkZ9HzD;Q3pb*H(!e z8I$%G3m>}m>whly7dKuvujh`$O)31>zkum;;yYUMi_e}AF>wcVmD8t$4ywCcW*paGYM^gH4 z5WB|>V)sxykL+apXT)FAFL?f^%=eJYSNZ4o-!ncNMNTUGC!gT<(H}E?lpkJ?frw9B zz4sBfa=V+K=Xw1yugCL##P!_#{Ycj36o>v#JCpmLF8OM7+^p!6t3;pdlX;yj^BQ*G z{5Z|{oHj4(D|-89hq?V!vFqZW=I_8Wm~MAgm_IfCVc>G^f8+NUj>^dw-Ol}oC2m&v z;i}(b{I3`Pm*W50e`PqXK77Tw++Lbw_g6GMbxP7(6Ln@pLk8*oS;yg>z{}Y#SyIV(f z?5U?eF7pZ*=@k*KOkdZawQ?2|r&Z{5&b}PddVI z-1^AtpUQBq$_B^%VV?WnHjh~Zi#P?%R3I8*YD(Bd5h>*3g-;b z1Me0+P~mLDLKB>W7cxF7uYT`g+E1Z_)Twy7=Hv`SaBchBNaKrUTSt z^Y^2-aJ#Fo{^wQP?&_;=l=m8U4lsPhe@lt+KkmH4`d<*7@INf}hcO;@#h*Wk+aC}) zp!|G;_#N)}9mCf+fdgXqaNpzHFYl^ukatzeZ(kMpcI{Uf&NVXbe{AD%-8`jhM;ZUa z@8bT4g+5=eaQiVQ{&34pOrP82ey8XD+4ph#9pdLzel8zlIG_9l!&f~2^9c8M>w6yi zIJa+=`{dh9RP@w{%8dL_EcNHH@dL_fBBpXdT@+ z)tMdKzUO7kS30jl|HJK93ZLluQW|!GQ%Z6F4YDo|{2k+S_kB-t!hiBddEAZT%+JaXzkU?Y z%dNke-pK9!WnM4EfA32f{vnA6Q2IP>hWpf<6YdoYBl#){yFn2+`d}uhuU6~_ey)5_ewu_C->iag6De` z#-qRIuIF}FkG=izjOVTA@VF2Q=I@L?t{-~*AqWt{pW=D#!E=Vg4=KULtdAA z%vUQ`{7CLEo3CVi6wV7_cPIK(&*b*q(!VNpjJqGr_~fPim!gMUDfXxng6AJfJp81* zU%OoTKjirg->pyaq5tLiep2#gs9gQ&2N?gcU5tO9!1;*yqbKFPv$o&2jp5w%PR3K^ z-oX#^xNaV#oBxs9Pm;J$UB?UL+;roCC%z`@@-c?5iG3OFYyDe{&`g7z`V$T zK7s$h9)`20&ht{c&HB$V9ZHVf`>(Rx?&g&pleisM-`j8&_pi#mO#OXXksCLS@wi(B zpS}s^2RGjM)q5Gvyy)QFzRK%+jlg;2d4lIV zf_(Ko$zO4|(6TaJ%Aj;p4dfAt%1* zSs&p3H;VsA`;Wer`yc!n(?Rk4?(4Y!q{KsR7ChfA^7)kK^0-R37fbxVn~&)0HHPo% zjZb+Q({ujMS#GF&TP1dn+a15&+I0d)?vHxz)Fv2DSHHdJSKR;Ty*w{n_uv0l?tfVP zZbxKZQ*u8#cn|ki_;*Tt%MG6j^3}usi{T9ZH@7Q&zV--iKlB`K*LmIdb8a8~6}Kz= zbHrZS5PPYv%d39O_#Y8HP4WNTW~SSX68|=^n7i<^s`yqytcl>%Y;%9N=i{9`>hBJSR z;VU1$;Mv@M{H@%+TIPG+2A=PoVm~}8?cdzO<8Bncfa+=Qj?hal6FztI_nvtz!@p1R zz~3hDzae&^jbay4Jb$yE@jNJa>N;NW4sLhz`o8NW+(zPAV4FT9rL zwNmsGUB^?O&3N7=_blbB*MEZh-}z96(_Dqc+;ZP#W{8!ZOyu4%NA3+hmhHxeWZaV(ofRK^FVo?e<5yjg$?To2lr!~4 zw${jGR%AvdcVzN~T48Uo-YC>2cbr?QlnayDsZxP|3;!jPo6lyZi{)&ocnLbbXy-&@ zePKRVs5Xj~^0@=)IkZha-)?N`lro)mZbzrafodUBEKgSo7pKR!Ph?iq8?_uNfn;7( z-ak7=u=1s`IkcOBQp1_?sZ66*C}hf&d?7QHtrt@LnTgD7v0R+pH=7=7-7`Dy^c?bF z&J?nshs*)aWEG>D27tZJ=T-iyApzPXD4;iAmmec4)m#az8y z#cz#ig0mh>P~L|%oKFp8GWc$uKOC8;=hErP%-ELV*kpfZc5M5^Kn9RAnYnDH1O`dZ zP7Gx(34ou@7E6F>k3TE84rVqL8z9v**sGi?fTUxyQNN}1H}FHs`7k#(<&oO?DLav# z3Lg!>7*KlQ0OYT8jE*w&(#{Z%CG_lb7}BMa@L%W$si+SKOxPC|(ZhcBN$*Ke?D&bsU^QE;jclK&?@SG( zYqd&kN4A{ZTd1u!{}_Q>PNy*+#>m8A22F#dLcI4~gqNrMSgG#6*9 zrICr@%;;1qo%Yxc{fGkEiR3q0k|1CKX$qmDS9|AbGNVf&LPwkBEMl&X&tO<+&p5|}ylMj;D^!)PvVmY4~ zFVw+9^mR0FfDs&GIt`YyvvgCLsFNBu(ZKPa147Ql2Ad66jESj{$qa57lMr>dWI!R< zKCzY;I_YP-@PYZXt-P2^0ho(fTi8@F^Q#Jo^d+={&kZ79v zBE*5OSjBuQyEUdqzu9DTYbFfcj;R#%rMht8+FY#wVUxyPVkb6`&EftOY1J?-79TWu zuodP{%`FE=4S9QpubBYbG!mdBXBk>Wt5m#K=TJaR3 zEhrsB_F#aREO?Ugu;E{tOehL#i{&{;+q`#ewN{EbuW5QspU@nmrr6v$PSm8Dx`(fP zzcJ<*`PLd=t8*NtcDGm@t&ZW?4OI3uGL`8}EnD7OXpLW>dmM(rjNT(5m}T zu2jg@$dV9HX2WD(#5AIa6gr8`ZIKB4Gd^MLPp#)YRLf88W3y|*1pxa9^d^P~#XYrV zZ=qrAzu6qPTnU^S;sxQ=1rDLhvbIiE$(;8QOuTg%p$qv;wNV>s#yDa}rALEy;?W8W z%GFA#m^+XjNT-#sbGMPzluI|F+ zgai{H$5S9`^FUk1v6#w)OS1G_I=yQGzPC*1Ddf)6lO9HPc4N;FDYwJ7BXe$VN0W3T zPmz4wiX)>)7B4n<5D&wJu8NuG8bx)XP|SoGN(&y%&ABXuJ64ny0JC4XmeF6Wk%6ZO zYlr8fYOIF~GBX9otr&bEGbMAj&ygV}s3Q-^8HIH+qNY@7*b*fJYo z%MVYB)of(TRM8(?Oh9YR-5Cd|xhyXnpVS(;gwGYqjZ6ay!aCS}{bO}Wv0wrGqMZYK z3#m1*O*Zz`w?GxC)D8@fz%7=AI~Ou+xG4XPoqyi*+vyz-7!GENFEF0#Og4A%zGAIF z!lfA-*2}HlHMpp<^@!eVB{02V@t7|*!hxoS#w8rWIKXyR(icL$iLJ-jr7tvYGvjQ! z&^W);dXFE#YJ)H#=CPnEXOeug#@~~W80$O-m+p7pO=iX@8&g|50~F_djOy$iH&bdR zYO4P{1UJ-bn+pgBFs@&+UA9DtJ=2U*q7SNG1cmP@yU_Cn(lkkS+Fr*2-|NV9M?Jxg zw>U5!-IHurf6zp|+k)L8visysf}-|r76&@j=3f+1`emKSN)?@13Ze4kW<-xvs~+(q z2+pb0NPV=jH(jQvwPJC_xif{_MQ9i;%oQk<=0$5E&nAocx%8R_>Rp<%%5Bnruc#lW z=dvXefkQ~ACQL}x`Hfj>AlQS?ifZt2m90jT6E*jZ3eoiX0 z9sZB>99=O~3mx`u^$6bLyM5Tjo?R!b2P0|&VHCC(k&ZIsmLNF>^$7-qQn6f!1~g6} zcw|I&k-3O{GHK``<8aU*!~+)s^3dSF2zqP7h!v>SM2hv*ef1eb217swSRbq-eo$1= zi8_ubjSp*!Cd#`(pKB_GW%soe8Zp>ma0HINv^m(5`yPznK^;#Hz|@*C@~dovcDkil zDiG++B`_vA`$o#;LaiBx)Y|bvwNj(wb_arK5O#yFFow}>$vKUc4usjLra?b}k<0)T zqT|8$K6Q|@qIsf0n$QDPU-YtN#K$5+8Ir3uaf77OtY9!!trU!SNWSLL#HYN!G<>e< z=OOFDnYtxCK{zsdppa2 zOKew~#P;@p+%~WVajyvV#N#&L9RLm_c1TWCO7-;Emhp+o1e>WqZ;fA`Td}`}n@O`g z^QDZc4MFNyzl%;3^}8PZk-y9LZ5jtMsV3V1w)|!jhaDU$H874)a8H;MKRNF7WafyF z5sUL9qy~)RWF2EQpd+&YigHvh9^5*Ze9HSuCD%^0;5m~GDO;;$CH}{^JS`Zar93Qt zzN&k40I%HKWClmVXtAF(J=!A^bm10h5podlZJf`X#T{&H7U_gklWJNY%x4=}P&=E;L61VG zLakP=WJ(o$ga9~tZGo1GQNe)!plH6+(fT$8ywIj6G1;tYCj&vdsDd~@W&@8TwL12G z5m9Uiw@G3_3=tv5agcB#@Vr*)q6{SbL{)10Zf>*yjTMi#&d{$gFf86Y!v> zbkYyn9l={Q%Sg4bh42ZU5Lf>Lejs7?f{%~gD6?`DnDysOo*Qw!Y*Iy4a|k(}^qi%; z>}$G7e)!|&0w(>LuaOahHXaM zp7D)B${V@9Hd}>umDebDXlui{Y&Dz18wEK@>LLTuY7hV?(6fONsI=)_B#D~e5lpH- zc$Y>scmt6rQ}N(w^!^weYimq!^CPoRA2BODp_SMOGEYDr%@xGV=|S}j7;Ohrd48)D z0^6&I$3y@YBE6tg7Fxt%L3f~c%*I_RYodf%aL`Kvf=clHf`I3s2$9JfTNiAD>x&)_bU|G-V}Tg5l*DB`QYXuIgi>7h9b2T`Z}(9p)pZl<^BwL zEyOh1267_z{f*aZ@x!CF5W>-NyDpx)l1FWU*j);B8c2l1C^Vu0cmT-0(5~D7heMKi zAzN+Bjv>g7Y(7tKuOM@(Dr}cDbOv)`Gae3?C5+(ltcl)jU*C=eqZjoMYt>EkAh+94 zsW4xlD5OQ05G)l)PxqYBbG`R8-5E7vN`7QjC+6g$-XZ8N?h7{H==yiS;FwyQtpNs}G$+5Yp@&Szopgfph?R)NuLFb%U4U8E`?rV%M9Zj)Pd%KA0_OYuCK^MH zebrI{rsC};MrvfvWPZ@-Pn=Mx%p~>5n9LBPv;9ytxU1hM2qV=Dqd&71J{eJO0Rj+4 zr(kBsx=EO6%s?rtPb46#lQGjE5(=yk8S0lxl`8x34rvOCw;%Ec!$^{khc$Zz-&I8*C&dP(RG=09<38&iLJqwuv&l8k*3B6*3JE zWq3&zWJ@uS654d6ube827*3B9QsblTP&`~>Lk#U?LCrSqEe3GR#@0Lg38Ax%{@yWpr~dB zImF+>bV?U@0~#m3F>qQS>{nYMfh80xLQy{UmPm>wY#-Q$a|=gO0JMu@n4Q@U#0mS| zKbrhaRk(Nrky{H5#o5$!{Fx?ATmHy+JX#h%td083Sd5nN8)HC~#!aA%(y| zemVjtGp@H8SUY?+3Jt-E;w##3^)zfJumqlynOM&zPV7KBUt7QoXH`D8g|Z8RK+bPJ zHq6%m1AGRoLBa$M#vtEfFD@3K&!D6cMV;4%uIU7vm~w(}S%|6pv*f#facS1{qjSy# zWPS%UeYIkA08}dCs^(8L*$u751q}=tekHW>=PY=j^%Oxdbs@I681&Wm7c~hatdt|O z#fvai>xt1xY4Wnr;i83O4vtI{vBS@W*vW^KDVpSzwr>Zn?wtG7q|Py(Z*SGrXk?|d z5Fs_$Y!*&gy&zZ&%8i*?WxxFQG$l(7pze*c;DuoU&v(+rL$fdZD&WWqg&_3E_-C_B zv<}@Eo;=1F9E?H3bZZpjOX@iKCM9aep_(@l9fxWHcRLQ1vv+xtz}hik{GMqgDTE6& z5Q4V8A7&qce5d4)HE6_0Mk0E1_2&{Iwk4Yk2SoVnP50}u@M}l>=1NLA|4474u(bWy zHx)8Vda*J>nZB@Y2uh?&EWFJ0Uz~XgktSp}Pa#OqycxZ)#~7gCt7na=Du6iSj!-s*RV(>oXqOQ}pq zl1n@+G+}elS$q(o-4CooPH*J*sb=@$wXQ1o8JN0@dnKTLl2}QA2ul zkqgR*z?~b1N@-n?ng(_Q8C0twS~6;gqJbAqH4}=XqPt3>n+jO~u;qc}B;832s(62p z@atw&($D}?857E3HXPq~Hqn*G!1kj(TJbh!7X}_nK@lSKG|h_;fo5P(rYja=MtDp_ zTbQd6OoCBYIqnaL))xjW-^#2Tfr$}`R{hc6bJyEn#h3z;8;>GxQ8BRH>lzdnM|mPu z#+Z-PDC^l{DZU#w85mjnBjdaAby$42XsVs@uR-_QMf0!{vY6BoQAtBE!=SSlEYh4= zfp{{g6m=BoA(P1g(gZ@8q_HKI+6fUPag)$&430OQ7h^XT7@lPoLZ#Jh+w%)A{*vzv zyJ&Mkj7^w?iGQt2YS4&}rfW6}lT|ZjU*iK$O5mWj!gisS&?2y-42MCTk0DEx1iH5| zNQCeJqOOntc8-uhOB&H+RExkkl|@Dv!N6=6BO~y!9YQ-08bUi!$2mITJAF(i0EK`L zgxZ>Z=g0(mn%AqjLo8Wh1eVyxCdjztM3cx25jFa6R~zN zn7L~jF8GH*@>+54Ov40;dZYM7R7iA1ZtO)5Pug6mFug>hgHxcOL76nuH570Si=Y}) zAX3Tm?8)T9_5_$w#=L(;TAh8U=4;Qb=C_-mO$Y(j?)-M@57uv?g0}AA8M%FJ462Ys z-pbV5&itiVwf7;puRXl_8g3!7YOh*cZ#B#O~vk2>!drfhUa{tOFxMF`*UuIHbUl5bWDbXegZ^>nYaQ%D9(lWcO06Ga1#@ZwdbG zN~XGGdG-9@NStoOY1YM9ChP?-;qHR$=s`;W9-`2NYGfO3BlJ4IVYFgg=3tICfi6h7 ze^G(1j-bat3aus=z30{~A{EI9bWx%fy?iyl^3|2Hl$f8qB)M~{ee-G5U)$raR?*fRmMpD1R#zE~WFl-mO>(F|qY{zK^R68V)zG?XhDr_7wlXV- zyE|^hDAr@rl(wLSaU}pVcz{Ss`Ce^dk&Xz=6tbx`kUyNO=dg0!rIFTp0#OQL@@OuH z=u$222}Nq1=U30nN(L{oSPIb$AE3Aaq%*&4CNodiSAKJ6Y+)|4cf^RdOkJcwu=b95;Ty`=O)MKegGFw#CQhY$bFadctU&FE zu;W&kAk?pU_k_FGBSUgXC&HgT6&k_4!2}F%);w3x7sI2Cdx%}C-}2Os!^b}cG&zA> zFBeSZso@gUOMU$;OdusuOJTSA;8fEWGeA8 zS1jQ91NnHotmBJ>g(5IJkZj9;3Kf=!0yacy_K@~n50qSt$>9CCkW|o6$uoFUMKuLO zhY?DLy&_USp?M*Lvo!+I4j}118S`S~7d3U~Em`b~sNh_U#VhU^n03*F_8RqWEA9qr z$4FbytfyY|5-<(|q?+eM4TTW@IXKVze6iccusRn_Gz?5n9wVt@O%W^(W3l&|En4_M3~UA0)dg~NfRP6@y^A=CtrvcD(IdmSKd_q$7ASOR)0_1W6sVwh)Ye5TQUuLA zKnqyGTKN|Z(M@P}tsueKflv{0Ig}9EhV(77g;}cZrQUx$?!+-b;<_Ub6UAZ@?OTHI z^cOBzquqT#xHy^Bu19(Shg=f3a||Wwkx%ttE%Du55CE!M*bdwssNsfO^zeI^(^_iO zA8fW5nHZXGA|a3WOwrtQYA9cvo-Wi1sKVkI72w_12WpN*)<-A>4$^o*lY$7h6Zma| z@12aNsyGeN#)b{NeUgAVqvc4@+!{)1G%m?t{3)dzCn@;WdFn{PGEjX2)U&=1nRL{X zJ-PsE4wLN*17SUcvh^QOOpnOecW~Gzf%j23q?}k9QpO|Itj-RTNCN{t5sFQiFzzgZ zffLD-8@>)mQW*Ql%XV*`wF3n&Kp;+Si(MyD%_F8wgIc^3BZK2d2nc8JMffEbtiJ{Y zxcmo;WfT=hT>yS$=sS-qWnyjiq+8fEZ!XKZ?**r{2qWn7gfUa9eCi zmp!zkov=f2uE*09)#nS4#1I35q66@nB?5ppo$Q!P)OD$)OlV_jNiJqF>SBPLWI~#M(2lI~w>0)GGUC5gXGPaH#>2$n73H{b)Sz+V5)*A>11J$e%yA zNO*?WFpHAz6W`1eVlT(efkGm7TrX^fHsa!82Oj=)aUAdE6P_txdk(_b>76vj0_VpG z)1jSidJ?txsd^|_86N_AkT-;y%=fBFnD&9uh;6_$TYMF4YC>Y%se<{|5t4=r+3iiWoPMFMm>I&(!}h8mM1hoXoR z?G*f+;yTPPw5z8pwf%7GAVi6SzRb*V80aU4mblah>hM8P@D?(-<-!c?;TFbt-gGTU z!?I42SGwE7`;YlZ2Di}4B1jSSW^o`y{9re7sTh9ihCzEYQOBm%v)Seg8tj|sM2uSR zHfhZFsqh6wsvETt5t_(&RbP8%WvI-!-PiHoULjlbyundHtpB11JO1Q^L&X%xu zLwe`-^GoDTO=n=4fgghpgovw;-YcxI`!J4a!@oT2SuV-7tmcS*ew{{!ahS{X(jR{u zB_0&I?@-30&ALqBPLNh=&}|gC0 ze7{6%MKAuHHu_YjCuS_k)5Q`B)S5Tp>6SJ~*w-lO z0{H~o6k$%XZ`^a{((9*dcnv}?5k_c(C3*w`{|HPkuo!Kg%+2OXqhzYez@>5C=&nuY zjAk-BGbtZzI-M)66LSu|+MBM@{{SI3HwQ4g5F9Y>JzZ3;fSr(ip2*^YP$|k3o5?mz z%oLmw(0?GAhf&9>!d_KPQPY&-1>M$BK>pquO8gox4^p&`Od0DKL&00gS^ph-E9z3F z0Ob(6CNh#x*6=*X*n%)1{JP|Ik%xx^BQ)4T;X1ipU^Rpa)xqR0lG-JV6vp}ErF03M z6?>=C@I@0^e*TwGCzN(J$p*A)C=A{p#K9oM5n!mue*}8U?mz&#n;ZQCc)s z0avUW>H!p3wU4^y1gp&X6@BR`Y|wxMP>ulyE$IP@6{I&BfdmsKWT>UbY0*Q4Z3@6S(>1IB~vX1YZ#DO?ntkv<(ld2(r<^{XPSAS418dpWYeM#Q1OmZz2XP|M zF%r2>Mc(|hIhpLf`LNVE7z+BtbhZR_h4`hr(%k4qQX0mP&rHFUGM?P@NKM@B&pt+2 zVmdh2P0&oLj#Pr-hYA)P;gS0q2~da(d-1|yOvV*`HEpyj%?Dy=D^H>g{f!a_p)C}k zSUI@(p@^wZkaQNwyJ4#HwJ6JH4TQyRzoMVu(dr=ZFP7qKj?7-t@=Qq#%0F5bn27|! zl!I@!bv6@8c)ht%TU{Q=j8^EGdZkp%9Y~|>8hq*tK*l=V!)zzrxgwLu-7YfMM*tZx zyLBapkuCOCo}M_5Eu}s-p?Bo&ki#;6EXnxQDsRqKwx90IKJWRK^_J`J-!Kb(Qo|3Z zn!703#Nr3ki*1{xkJ%!dK3Y+=D}yUhJr}43@bv40_e?_tdu*SGWCk61fe!@QPg&yI z86>fX-Z9*1bOgoJI=U93E_iVkmXc}(MiXYKu8cN_H`|g(*c|J20c1^$5>N@eXj`fU z?)}4!i4Ff$RyJ}VH-T_YJzO3Jf%Ga2jxGSAjF2M8=~+nKbLrL9C;DKlMPI&t$EeDqK48fM6ZymX>c096%F02688^58Wrt9Tx*vbQEo<iLC#0TVn&U_Hbuw{mnzM_Ua}FT+y{B6vHBfJ4k=#rw zm;}QTRP*j;N~BajUij+p#yI58zcYfnp9DARxfjcO!zFfCCD0mr^owNOJr{i`+(>DC zv$b0GfD?7ni{(YyLPV#U!Oh|}CA#R$>Y9u?<$beLc&aj8s$?Th<i&J- zSU+-2U17m(X}jQOwyo}&+e3isHPM|~KHJFh9-63ENAVmh$L3y3D>W@X`r+o1kRjFB zU;wKy>=LV*%bQ8nMzy^aS#IsXe`KJ&p4Qz(7YIV(9BA&WLCjlWr(seFD@p*O;coZX%56H(}tK`i6t-Mpih!iw9%1)f&}> z@seC{x|3|J)xD-0qsHAcx-njY>qR$)b#E0-d!Z|gBnk%KFCrE9Zp=4cU+>06QfrmK8F74nS0wG?{QnQC${v3S=4_!y6xVLeS398<+1|Sd6zY(FRF2B=(7y zqB4AYh~?ND)9iy}z%AI3c6SPC^t?fF%b0A)j|aaClUn zg2m7snK?xO#&+^&MgcsnCsq&U(7D-+F8u)%uo7l2d zE)tRnJ2pl8W-E%_B0;tp^1IM2Po@5BoI}HJ14}VGtKy+`U(c+$xP@}0ZHOo^I~j>i z$EPxlTA`r9##9@uR+!3`vdF?TJlrOw>jnBfW#Y8a2nw`IQ) z?>FizE`9X6Dtl3Z-$L1`E!84wVv$p8IQ6AiNjZ`iQ4OpL04IQHPHTtsm~@CVe+sFL znc;gxNDNXquvKE(*7if=Kq)p_6(^5Kq{l$U>A~)cDC*4zRs6FPAetbO+SwG|Fn(-& ztO^qU@P%~~^CWPhQEH69WsyqdX0kPGuWYeVM_?6LB4_+C6H|DpM+C$%>pdBWl@0M@ z$Ll%C_&I)rAfq#HbM!5X0mJOPimmi!qqTY`nuVNeN2vR0=-o&(k8}Kp9EofPV6D?g zTdkR`&3i#qFw)=5W5PtGLvC#Z-sOHJhdKYneFE>F#5WT3X$U&InaX4eO6Mz zaM>M@hJkI8cohGA5oz5X&cWSa3*ChcL%U9@^LBllMO-j2IV@%HhLJ$m;?HOU#!Sw3 zsm2^B3@fNFe7*udoRwV>ve@84J;mVn31G3-WEd zp(3%{vc@+_mX=(P2bWhoDwd3NM4t8Twa%DhJK5LFUt&D@qBkw zZieMV6}ebvv6xn|7}(GCwy_8^@WT$r%#%k(YPn!*D;Jrl6 z56;6;y$tYia=v1VyEhB#s_nV6lj)7&OPHoII#9{I+AfK#m$-;_W^}%ehCh!7@32V; zR{Y6*)e`Jqv(?hb=xl#_o5`a}TKG&Mn@8}y@)S%AThW40&U(T0)I88`U&}&Hu*mp} z>so4Utze8PwSts586o?&UA-v^8K{0INAx)@hUzJ?%juLxKO(~CF@%LUSLxej?0;TZrHZL6)FFgieW4d!ijEDz?+ zY{_NxZxu4tIcr1A0kFk600K9Fo?+N@6>TOb-4r}C#3)@Ab}TnDL=4j2ox+^Ml5e-- zndVL0Os}q@O|}Tdd)XNz6fxhN=W#*EP3gRto6@r6&5()1xR}aZunEp{@wT8wVr`;HyhfmgI8!Qg zfsC5z%&rNp$OQxgq9x%s>q-;meWj2; z#7M$VwKg+}{1K=)Tsy$eAdn`Tt`p{s4He+9jMm9y^TLNFHiTQor8JeY=n7#MP|0F* zMqJU@zQH_k?P3_YEOgV1%PQ6!6>GdVC>q}x6yO(-`!2^1t~dF{xbQq?&4IU+HiQaw zc5YCKRzy^u;dDeFQu&Kc!3R+(aT=`9+c7IxgW=Rcn&4mox#K3q1pji{AW~GAI~h)r-OA(cJJx^k&L_^%jJ( zV|GMWB5X6zg~A-PVIrK{Dhgv_ya`*1Zj$mT=A{AMmCN9x=i^%|{zsvF8CiES& zMeHpkkhwR6O_IcumR}CT7LY(9LDs6@(oPgRp+@oJAT~y`W*p^^U2MUV7H=SlQeO1? z;uGOhoP{42K+aI8P+rlQ&^M1zuPntX(VNGxpn{fVPyCmXuw8rQ0KHfZ^H9=t+9o26 zDWMV-KC(V&3~xO>+;KZLz33MRi(Y9v>lyY^3!ip06Q})`ciq|n&YL24jlq9L=y{y5 zsOiMT_`tqmhY~R_M2yywol(UHRRF@O_(X=XLDFJ0SU&uE7U7Ru7X0QfAWf1Fm-N}6 zimHgn28z|9C()7nhtAf;i2;Jp_7caT8@Jt~(2d^KiAhfPOC+(knGWiNwg_M6^l*QO+_`w~i7UES3%d(7{4VVN8kLhqI_n=|j|u zV?pT3?MWE|$kJrtwS(fbIMkueS+#@0>jn+1`X}=1?c8eyZ zEH{Ye{td6{Ll+U`t%pWj#2yA&U{I>^^Khxs1dgYrAj=FX#pX6LNsqk4dJ>UH+O}8Z z$%I|nMtI2K#(JvNg#-iB6`?%dn+r-*-Q2&$$4@Z6Y|$FpZH01n3bEV*+}xZSGsSvl zf3Yz$LKQUWc7*MKJbrCt9YMz$V@N5bl4dze$sMTNJWaF<1vs}=lXp})~_->uf z8rmEsqqD&Mrywo-8r`Xk#1A2$hm`uz+n4jZ$4|0fJCcfj zxPDO)X|gMlE2S%Acu^8QOLlS!sR63HlPQ_x8zstiJ)0XmMmuQzc$mwS~(diBpWUBWwwciwLrM z3;1f@_g#yT{w1nf;}4_wYrP(S{Lq>%`XNj9kfJY;_u5J?my99Kl+5t5(fCG68Pk_S zO+NveS8f!wH%WT{{2~e&OU@Xn9pw1pI!i1cIL*k5d02p3A@l(80J^@DuFk^O8N116 zX8rO7&u2ppWl7rSQ`Nj%>+EF3?`5m?W1Sg6>Mx0^~t=rix7`+FFww z9@;f-Udi;XP)PVbGxPaO6oT!q+izj!}l)!@9c#I|gQuqzqcV(aX*J z)|vkVa&+%u%O_HoK>))5(w(YjRsjp#aE9bcphCV=%WV}6@kpF(Ekh(=FcX*Yx^o{u_Sy!BfOte^?$f&>q}rNnA;c{yS=URst|k&k5g zE#dfV6jsnZRnyVhXj6r~MX5O4{HuRBwlaT()cNA$f>VRqCdaU_;IPtUR#9}eW}Dh; zUQ$j|f(_<;V_FESVflfS5J;BxjUpxS=#F^QC^o|!X!_vSuOw*?54{?taY9)8v+UuDRuV*KJG>C)N zn6-L2rq0vbGnO+rY*~}u8We8KT#$xZCd@W=&4X{kN>v-%ej&-G<}_wLPcEQ}K&RtE z434-ddDh8soat))4Ry>It7y!GgOB&un7jwAlXHz5@A|~1%*Zy9Ulf5dL=h;ypIXTp z7qb=}VrS;Lm{#ze8;JH7U$wDq)rl0|Cj0uZKhr`bo%o$`(<#V+V60A=2QW9$!e6;Fg z9EMNSW0kN1D})TY4d{hS(m>`Gq2Gw%A%3^8TsmDcl&OO+wp(a6(2|rNe7!b2i%+Dj zkR0#n;B)-wZ0E=nU}ZZ8M})lMUT!$!IH2_G*ah2=LLfY?bE^wPuL`$JJV43l8fXd54e%bj8HYdtou*^v6B{p!Gw)2T-&Pl=-f1sg5oD^B zh>$hON#K=5RC{^|sYKS)8hi7VGkEdYu zi?~N?mTT)=j4kQZ?$nY*zb}iGH6f?b^>3GK7vM4&@=P3JqTTHBp%2Qz7~X?&g6TiI zFfY@+fLS|8qFojUmjM*-*j&+dd9VUyDTyiOx@5dSj@^q22V#q}Eei^^S-W=02x>Nc zTNQ9SR?eg48{N)qGg2q)N9^GqA*-k%KJdY$y_uXRK?hiq>0?LsGKsx^d*;ibgSNZYL>ZO?TkQ8o6rHg?jt#sk#3+pDHB$OK2!rgCX-nMuRo+j);O^>qfzMb|srBZmB-qoUtF|O*7Sr@@a`4 zYXnFRp$?S?pkN3m9-BmyvGl_si`;_E(b%X>(EL@4$CS+r9~$nOhPa_rBb;;n9Lh5= zj?)y`75#MgvsMoM&Tt#@8A$mKc%ZG+_gXI`lN~@3e3WVK$Z%$UDayAGGV8!dEEFgX z(2xp_nZdFDc|1>2imdoobsfO+b88*o4kmD9fD{Awgv{^4&*g zmN@WINt4E|0XJD9!zwGHDacwp4O5D=kkcVzz$m^TFZ5nvJWWyCncX85acJQ%dN&2K z^Fjva4(4WF$as$>HSLL4Rb+7mMM&&Z#SqGKILx^Rk%md8ip}H|5X}T0L-# zCKXZKebuxeniHV3l@YrGuh76X2B@>i0?*JEOF0vphHnL66 ze{uHVFf8vYl`_>zu?#g1;L}qoCy_QMN=g?zEz$d+>j%H&CM zZ%F01rrrRCsiAyvdb&_6pdX*DctVQ*GuxaIM2@)(69mOP%*=8uUg{tPrG2j+Kz*!i z4Ozx>6KJqL9Uuc!9r@Q$IcslYCIg{kGDB15=d_WEaR%-tN?Q>sF%?HFo}*{{n&G?~ zBLx_dZoOc*`Po(2&Z-unD3;|C=PR?3U1g3F3oc>*>bg%1^cx#IyO^$l5w)#m{-oa$G~nN z2@WlNe4W|559Rs_d6UMwLuf&GwLpvRM6kRl5{3e_c;o(~il(1CNVfzC`l*-O;Y)s? zDgvQH)NHxPSYzeEHRka;3zZNG_m+g&iS*{OXh2K6nwnpsg9K=~EM684+f3h!(0hs} z&su4fos7DY9NllFRFA%O&2KmPWHpt#>;SnC@xYRF+eVFSl&)5|NKI(QNhz5$_m<<`#T9a z(&hY}9J|9iJE$c{LX%BHO||pDx9TpxW4QxGdS(MPdQxkQTCKK3V>%6FgU6*rZqHt! zO3NfAIRQbgh|;w_%65uv3N}^+hShUll!T-iZ2Hl}R~y7(h}&B(FY$Z zgb5F6`8M03QCr?}^}UEDk?+j7xKT`PE{m_SfZrRQxJP5E~&cXld}}87W7TZa8@Qc+oRYa=sr99rz`B!W7}K6Um$mo z*}Fw%vaM>z2|c>oMk{npZJ&d&|B8Dh?B3MLiVCDYb#;vOWJtG?OSqTd&P@h2bG6Dv z*9KWfv+>eWbgyIw7b2EOjdj`lobiZ2#f>jX+^*IUku#!*W6PxFV--$^Zzb>B9X(XN z#-S0A-f0^dQ&_aKBnU+!Ej3fP^aq*3dNQb*!hFF;DHYo#YxJRrtOjC|fH$tC*xSNq zwzrp(sqG=R!}Syy)2J28d-*8rSuSX<@v>MwFqNuu_IS93b z#|{Jzg{eH-p{+m!rwtB^Cp8tCC$DdyoQn^3Fq<#IvsvSiqZ%BG2#quGNj6$$8rY6$pY(HFn(l_mO(m-SRCDjb2KeM%f zNO@yIA|TY$Sfc5Pb?dRfNM5ICP*$%9-+nxsHET2Bf(leYa2=wRbBz=%uM{cnsE70% z3c-!YrUdr_9GjULc;qS6@#1|5X2;}P=eMEMMR-uSO%pUn>?AXGOkx}Su2%u8+7YTb z#AH^+z@094l+W9{!^UK)$Cyk_MPX9vF(#!_6ehDh#$guy3Z@HrU$0C3{GGxD$Hmvcm-v&8_N-cQ`HU9 zvqHywC9WTu3!>(X&c>>=xB2`S=5TERI-6HNdN~ut^Oh_FB$pM%C{w2?Txs$dx$FW3 zs~cAtp8~!o+i1`2s<5GF8eK`ro~)zTX4;r29aAqN@DkX+HK&>$;YRK(1IBt-ptTzo z0nGLU3>r^B5)GRIjja+WV|MwIMIhrpwW3SuqM}NfQ~@`o_e{WEG?(~wtCWT*6hwl~ zdEkW;f^J`2W`mx_ggwSGPXQ>Kr99L!LA+t_vjEKs)Q8oB>nz(y#&A}A;C+DQY>4i( zTjkX0T*0rP!QZ~nboj~lfC(vwS0~Z|VH`uvZ^4m+h~9$Z^%D;N)Y_(SVfo!6fy73m)lw-dWdc>#&&Nt zmVq{BDFOjK9H$STP0&Vqp@)0pHI_3`mXjX7tSIJ(OSagaVW(2K8XS2lk>W<$k?2mn z8j#n%i`oQ=b48aNq0A;x%7PtXM*2jvjPHC7plD3V<+2^g_MpDo#YlWbpnYV*)D|!A zP6XM=Eu*JAz4!)35(9->Md&HtQ@j!#;U-n3W`@}weT27T~O=8-GP>(;gNP>EJ%m@vsWM zU~bpY5pI3M+9mPTQ9L`n>DY-k9U9nPsj2u6k7Deo&JsbZ;FI%BUetAHR8(&evP~`P z6Pj9fku`c^tNJqWj(wAxW@j02fm0*W!zj30GwOZ+bdaztpg#el~NLAAx48zJSF5Y0rwoaW;R z^U)Y#z7!K#CYgJ&(B{FJt$7J6Booaxk+-8-Qa6G*Sm2pQKV6hPFjAVxY)9IVL2Yp< z!P~dDwKDxh;mLO0rm6(KXlWmnR;IsZRGO^1Ei-@j5UWIj!zk{m1;%9iZXfT3E^G^j zYx{^Scv+K41)GAB)Z5;F99GC8V?p0V`c#hkGrJAF4wJJj>mT z?)3fFsX@sJs`)%S^fV&X@4>CWBF!OxmOIIb&uiE89VX<1x%=&`51tB0DqOdb@U5ig zgW*rWuF!%BxIpLk{4jfKw(yp;fO!ggqHHb)2h)t8b z$&9F(r;xUvE?k}AEP9>^iHDJK&;SP}Zgaf=lXSi;L8=^+fb8xIPU{}(L!31?3{YS_ z;bZ4@@8;%3HY|2YPF9mTB->r_a8|P-hanp|Q$e?P)3BdQY;3#6E4!Jckhc(cF1OjBxr`cf1gePI0#Jp+G+^c}z`I1CjRug6muV$FS6VZFnNOX1wiI?>F!@KJ21 zH=Q|@lAROsM4dXZkrd<(4+OO@5NCp8C9$RzUet2~NnY-7s+1*&^pgS$P&N=|fdsKm zz&cve(6d;kQF(a{u81&5CkIOLBqnKKZC|btl8JpT%&QD^{P&`ibk)mvodhOFob^kI&@x)Ez;U5eS|;m8#}d*e z?ZAM zy@yV~)03b>+pA@^c%+|#M(T5PXGBfXVS7DWI60s|nz9+ek_C7)>6gXeR5|uxIFNBp zD>u%txFN8PIU&4O zfm#l;DDR`%ij{6sJ{mF=gjB^=8wekZK@VZlOZ}|s4 zrBZ}+8ROhy3=0-Oy`ND%Mw2?s$jDH$B^BSKQg?)f^(DE1#RM6XI6wFf+)m9ZY4EMJ z#}J{qHBzV8I&Z&3wQ`30u~~zNF~KSx*gR_W4b7SFPDL!ZD7$2K67WD>DBpr>%8w?b zK(q?fHdBNfoHc8E*9Qt{ty+=GK5rf8g`ZG98nxMAMtROA5ARWjQ-=@qCPO)ff&Eva zsJu+*mLI(BB@eea$2=l{_rZ>kRvGRezF|hwq+Ym`n_r9UXZ8uwg>?(dlWtLTy!#|Y zoSa8ed*t?WJ-5A@cu?RC9kbKgra`zQvGp#*GT_=}BE`r+N~S-~dt&Y7uvDR)K?SDw zKz?102jlVumhRSKuq13vHfbD*;k8_J_jG6+TRHN{I9nVUYcYU2(_6)+I#fetN@O`j z9qUSMHe2#ec$Ch=T85OT#Db?Y2m|efrqo;y$4*ldf^?T?G)UJ*QLxwGkPo_w*+XoV zYeprSw%00j*JEpGnqw>mbS|!~4*iL!4<3)O^2ZDEW=08j;z6Macpo6*aOT?i`Y8(A zQPq5w4P&ZqrM~B_FFCH41_hr-x-WzMFbR^qA-<`wtZ@$svce8@ubP^!9Cc59=Chm){b*k)RSf6 z-_ZKj;Xd7S=>(f-0B0@9&?`(!S`h-Ki?vh5e6favOz!2|*ir z`XUY@pxmaTD~N|vcykklmj>?uy-{g1UzwHqt3JCjy}?4uK%YreX#t84>%Xe}$8Y{5 zcMQ5E`#DittrgB;3g$Cd_>o0&0H*q-XhCMWoGT@*h7f)}S>+M56hhKM2-Y>&Er}2p zmk&bt*V$#k60qyx>WAWCC>KjHHVdpZ5Ed{4M$&+pcou3_+C#Jndgv3Z319(rbdQnz zLJMXfe|bmiWq6uMnAIuqyn_`8V&*-y(0L<~`ZAn%u--w;yh{t6H&U!G!+AT^6?`Ug z`FwVv^M(VVqw|J_6Y8k2)+|v%^iWelFo1t36IsuJOr>MCt*+CrFmR38k#Q?6Mpc(c zdd_)5GKz?8tKSKzh>%os$dX$b<$1%2)?(l!N53&IhEYv|VOtW(txg$5r0#GU7qnO! zzT&Z45=gYAmR!^&0+|vVOGF?u_A)0B7Y$h&0`b_(oIqUWWN8S*V=s3CnV5aZ>7_W_ z%pEB~5#c44LZ+6?5>d#E-8K(dR%It_04H3`%NAc5Or%f^hFqn{H0y;<_9&CK25De& z!2qO=97|fA#D`7sKh_-E9VL`%9=u-8K@82^rJJsf?ShPXj$~?#1e3DbUnb{g1ITr= z*#?yhjT2#)S0^s`?plgVx}7xL!8qda=pAh9&uK0*C^w0SeT^K1f6y&Jd7-I4>Griu zK{Q}YXbOU5ZR=0uTx^Wx)eaE~r6CXs3dc1iS%8H%RV@~Bh5f~P!FtTp@TrFk0I;Hw zv2`2%6W@tl&jRUlwB5)oH_@Q7wip*RXh|dm9}5FCAd9Qk{gJ-Pt5X?-JhxIt2qNBE zAzS7)I?aL+70u8IK(#9IkCvp2;mO!{P3Zu$+A9XbRf}|D@Dn#1YQi0eK@YP39Fgxj zajbz%6EkyOsV>CMVN3Uj)keL!C1+j~iaE%gk7KWr@>DBBKuTfmK5POwu$ z3`w#fRj^53v?+r;M$wY34sLGqP7!wawJOsf6pRsDTO-9*Pbrv4CfUUsxOPK?OzJUp z;{=XP-=!8f{^Ny3RdVc{1hgZ6&skuJf`Ad-Qd*ZnqWe*FNCg)bMYj}4IqxUD+7!Mt z@0l*%q@X}xHYG1_Qh7ttfJaIFIKDU;TzKM<}J0E(yGST6g zv`726Q^=-1_&gb&)(#8}xFKr_MZWTZ(v%hvZ>>x=AWg7}?(saSXtmh*c+N*9{OW}g zg1mvc}6p)I|pz<~oa^c(MzaQMlht9TLsiJk67I5YD=8g@vteZD*262`n;a;^4j~%?XM(6TJ)kRP|Jxnf`Jo^-@?761a zXY=`5p*)HPa*?)TGS(4%(S8$q2?zO(sb}989XOeipyr3CgiQJ+kc%JimHVv zJZ9&}6*5JSIJ?CM=0x#hW7?&7C<~BSCO!^E*u+F7;(z@d{d9ON?U=p|Ws4UOR00&+8rKE$;IqJ9tyDf2EW3sP6A(d3Uz$YNI z$qLhmRnjH>Q7BQz_MCqum7J&P1bH@8F$+IYkR{cNC%}J0sP~glr_m`g81a&RCfE>K z4KOPS4`r&gN~4milptz-+yTT%RZKO@H~<(!gdR;m#iCDcn@Q#>xcTYYn&{ytUv2s+ z^m(TNYoi)wV9epv!=nl$iIc6NcXLfVI|SEQp_WCgcD9izl=JDaoYJp3xhulRtt6lp zl^i~5`_>SyykQ@31sx>!@>z4iFV?+Flua}Tb1xt3PRM8cLoK|-Oen{cC^;*-TGs$1 zC*Gf}-gQo4Nh4W(I+Qljv9-q)o z&0bWPP<}CEoZ+(^S~746rhhc@m0t)t-?mMY(B)1;Z}%N& z#_nP}(3VFQ$N{ZEwG;Vqk{$;}NJd&C+6=f3HCUO8ynW)Z64zpzwx6>%bm7AHDPMdr z_1*gT15Vu$dIm~{-Vjk^r{3yr3NOiRFX1Fp9>hLwbv4c!2)0jdU2- z=Ms%WSLnjof}qOzrjK4@VAAm{B?g@DW+^e?gfdHtf$_Wq-G9Ai1d|aZArV4OFU$Yt z=>^FoNYn$@)~=Jz?8zpNoypr^s<(ACZ-bu-f?n7oAq43H<9@s$r+yfpz-^jxKagz_ zbRNizl=Fr8%>U2c`@m;a*Zuz&2b~iQCn`=s)_!o*UPN za66FS<0n1}p3i;ue!o9|&bhAZ{JR!CQaN7_`Yc@-e=5YCy3Z+2IuajyNW^`Mf@8^A zNU!ne2gU4jyMMo5r=^;8GEonKJNmN~g8ug`I9+gE(sS=taESlz#2dd<7jT`do5sT{ zyMmiXdyW(L9l-eYQYWjZ-j{r(eb_+kT=yNZOTPL(FCpzEN~c&Ca4p!|gVhk*9C|-P zpuevLdtFQS=og9gdU(K*Tj43Wh{74TJARh{k3jZ$=cq1)A7NvUCFVsL{q&-M3(8LP zUTopS$*CSI5bR>LGl(DL?75=u$g4~jhC7eoF+NA%dG!Ca$f-(C+|M7|Gvvqi41J$B zkZ#X#QtJyBUHof(PQqq?w7EX|R^96k{WM$k8#H>s{Z+xilmGMeSN*ij9&yAzyj1Hr zGo=2o8N9K&S6gN9e&QGo#>C%e>3CM^M{FJc^9fQ{b;R4sehZwqtm(+b_J+NY)7!P! z(>2n+o4Y6ZZm!Q>`wf28Q{ic!AP`(D7=QoN_rCjdd&yJsZguQUC!Jz%qS7y$qwjP3 z+<$(Gy&XTwnhNLQ?)zesp_OzhoQnwGxAneRJ$9{C{5=j1N2It3@}#`?OLvcC{A`5w z{Cdh$;!@%VXFsYV^$%6fsG6-S346XHF7d7{{&jVy#?X(Ne*66S3ueZ?$3t&!C!b0W zF$k7xa5h;dbncw4tt_lG_~f2q&UFeyt6w)%Ca^)x;nev4wc?HPn{bj|R*reD zkN)b>=f3g2OZ4R2{HBE(JN>|SOu^FCIt@qm?)cCK)1UGhm!q%JW4-_A&Kqy$ediU6 zX3VY7wS2M5NP5n8>I=z&O}pn-sFQgI*~!Wq+ug;#RcWCvE}Zee>^qaUS)9P`YNy|G z_q`^%=*WwCUh|Cq3A|IDx5>v(#P{7^`hAzK&*Dek=-un6OV7Q%srKRh|6%*cYK}aR z()XtLbi3@6SY5auN&9e>{yut|e(H|COMZ9&A=-x!ajuH+piJw^2%s{G)sVn4rAi4R5mrgk1%JxQ6dY4uz_*Joe!2F(O2 zJIb-?6EYoP>brGcZsNt_1io(S48hF_*nmbi5XJTgdzR6CX=7Rm+(b{985_iA#d<5+CtEq0-NO7lNaM0*(U0)acwss-O7uw`K zY{6y5Gs|bsd~o{w*%fzcsl{$#JEC3cuMvM}?u3#QDfQLQ({;JHj#*Vb#4nz|e|qqZ zC8KVdH8;2eb9Ti{UFY{`&hADXr|kV1fW33A$lWwckHocb00~nXIdC`pib3 ziSMUN5|gL1)32DG`|R|IBJCqM^6w1Z5UD|7`Y*IT;t?FdZhCrfvt06~749UjXJM(aTiuRf z1dEb4FUFp|@|t6pe&~+(r8o5%M(j#-U?%z1puKh{!F!6{o7-_O+?4NSf*bUM89MW#)O)+c zW#+o8R}Zh45Pm!1u{^U=Q>#yM@jKEJ&xzBfpuL?!>AMN%SL!(=K}3DW;L#6&7T`(J z+RE_7e){y8_yWNba@Bb3VIX~9!Swkwd?Ouf^t}(O_4l#Q*LBS;4vO|%F@B;C#l5bA zlSfN?s_i|vv7@CY>pJZ4Flz7X+u!Jt!b6EYmqVQJ(@w8*Qc-g3TJXS#5dR~kA>+03u_3YHUShwdA;-kOF-hOPiW(D_o2RCXZZ_PS#U+_k~gz89K znhSNYai+Ugx8F}`9qTSPebvcL`2SNSV~b2)1o}pPtU8X%e-ejq;}4hVw-0kigxKlLwdFS|FV^S}j*e90zV&A1QYN5WrT*tGNW4;~?u57WVhRfVpq(hhX><8vn zBp#%9D%2G08jkmh+mhfi!E{I9k{9Bh0^_ZBp7-eicNKFVdgP(l)x$kC#V&r19Wjg_ zWa|0B8@D*(#m=aH?P5>x8|pq6;KWX)Y2Vm$2rG8qba3G)Uch_a=bh+xp2u8V@^$)D ztPC&5u`8K_r&}KHp{10EQR8Q3Z;BoL>$jM(TQcG?PxO(o_>11j_y(%*997~{9xe0a zrN+TY&)%nTV>`&$a@SGkUSEm8w+X4vEFAZRvMJvW=(NlPUEvg*0}H+>A-EMH{Kf=* zH*1>Dll}dkKmEm)*cJ3#lXs$b1?j6vZu_T1DcIOg(lqtiM;>M09eexMGfKTbqxbjX zIp(U47_gJD*GWFuxWD$oLA?0mQI0oa>Ql36pRkg+kT3lZ9reRX{Ial~r$Z(lZ`yOS zw0^9^6v4MNB)+`jzUdR2Z_s3un}U6j5LEBAqa*0vy}LEs1_9bNp=q zFPrM~S^2mxO-Oyptk2p=F;~Z(DD{S>B^tj5LPyZjz7NnBx%7P>&>t10J`Jr?;>l~I zlIQD{OlSOXF;o9^UaM(B@C+rr%Lu+3NS80X#x|}uA+ht3vCShG5nSRByTYNL>e6*T z+H`^|7GloCT6 zx;S{Bc1oN`x>=>ppFQWbDm}GM6vVdwp2P7ccKh%BNhY|xKRExE?i>a-^Z4=j^qYD3 zKK=Dv0UFiPSQcZ*Qf;(5r3z05+)*c&@0&B`(BfJyi=X}fI_d! zT6-N?D@c1}?Z|s*lDDiK|IIJK5wV^FeA*6P>kG$b;s9FmHqjJ=Gc$I#NGw>euS{{e zB;_yZs2C55MIM^{P;lo~>;S^(*jbhozjoY55mLVoJMt!z;3}y;hs;JD%YAL(A+zHy zE#)yDoXkny|Dw`vN<4sDlcqPsIxuyw?ktGE1ogeZPmile(W*~?QO6PhTVPMJ-n*yZ z+VbFlupTI$s&SvQK4E1{-?JSY?>3ftW`k?#VwX$mZT|oLd7P)lH6ohY@S$fj=TFdJ z2QOX0)fRmZYsn{`@q3g5u-cw`Hvh!&{Ui2t+L(3NGrnhd-G@DO=sUctGkJ(u)%U(5 ztK_)1oYcG5JMN!3zjx@F^Jgv6x68!$yTNkk+ui#U-))rgo~(YVD|pma;*h0I+s40` z?sZ&rlxErY+^F$-mVN)1)w4C8kPp20@<(kO{e2-PhwfsGqjr@D0)l z+ezp?%-ALvPr&mg{nQ5}NOcyZf3l?5r|_^@{0+eIA2*9#^%5VMKF7_1h(|wgmii{L z{ycCNe_1~vJCal5qkQbkq4jvg$!C3r~F^tpO~nvQ4Ok87V3PedrlpIcG+$fD^g zc=n8k^jxeNmBFv3X%>BaIuQg(96pxf*s&9!u7%O<%b8LFGyVpU3 zN?l6yh%Td;6}v}64<5lQRjU2e{Zl4sjcdBAXV`NWj|#5bS~yoHre-W$7<(XCY^}u- z+;`G;FhO^EmlZt0EKNjLWD{af+R;1f8-wTTXqJOdC8LAwXtuqe=V2&wbpI|g z;5AFP55_O8#onjSY5!xcIxIRkIvTviO)kshT3*vD7nSQZan|(M_l?Ak{6yx@wHMA` z@W9B03xa>noW5}O%*4MFjYT8#W6c)LntAQD0Tu@MP|v>}n5kb2XjCjj!q2*X{@jX3 zs;{3hqiSY(;pm>9$6l1Qvx+?oRW@?hxN*~OREoRaKEWC{yc+^p($d?l3DZZ7ys_6$3P$z9QKN~G_#Z=m8NL1+7&$F6Q0DYVhW`JINM`J(XGG47 z*?8MQv7ZO6&X7Gl(IU=4i{M|W{|mAW)PKpWLCbg^Bat`$`$3yC^t00vapDn!-wJH} ze@S;%!UVq=(a(~x10L|^e{YV!nP*6Ziv zj`;aN{5&IaN#f^0zK?$B8YLbSS)On+yPpuB8L3b3&%XNL>A@%7|4#73v7cu|HYRvh zj8Bg|o8U#)tjY{N9k+>tKV?5WGxqnu(_()`nwsMODDUI{dbQH%Ph}rpo@ig)$Jf3) z_!J(A)b#O9318dCwylgk@!>lYS^B>3GE%U??8O|7PMawANp8^R`{XtZSY&vhvc7jcrP($!OuG2 zzk>Fi@SlP2hQCZJxaZHdM1R$GEPN*XkK#)q_A6QN^Wbyf7h-%GeiX*1&qe$BJ;iAI zLwowNG`<*d9*-}>SI6l@%9mava8ts3CbyAM@^fhSz2-?@fUyN~R#dy+ZCbr*TJEd2e6*2)A&;OZ5W4g z_`BqT*ZJU2T^iq<#@Z(Wu8@AsesIwh@E%J84zYTG+u-`Zz?Q`IV!so*O8Fd!He;(sp z44;AVDS^Kn@k`-<3SS046LBixZ$`hW;m^T*)xbZBIQ8%r@;1Pahi`3O*bA;c4)hKaKCt4Xu%l zSSMNVUqJoY@Xw>aIq(Ouz2w4w2Rvw7-0`7REmce+cc1;rafs1pZcxb1D22 zsHY76A&f&g{0A|fmGDjQ)$pH1oi*?yFmAQ*C^J$Ae-YZ(!)L)az-OZVM)|tpe?Q`6 z!oLqb3;s#O$%cOl>pTbkZD^khzXRi(4}U%K7QtT&AB8^y<6I2?UW`KtJj#re!jHud z%HXk_BjxblMg5iVzrZh4!*^qTYv2cZTM_(wQBM?J2XK4-6vJPH_$BZasHYTu z9qKQG--bBl@Y7IdCHyR`hidq}@HOzihOdRM!2H(1&w;Op{~GFVfWI02ZG_Lpcs9Ym z3+hFNxhjH$NKMVDD!5_eUb;DPpedK51`ab}l z315ylS@7FYe>VL0dRC#%mB7a{#ebL!|336LAO3f!rwG0R;}C_9AMuaJFNXgR=CK5R zKk6@qKZLwx@TI7;9DXb2y%PR6sIwY=ChDny|2yibg?BNYb?}?fz8?NjKf}Cd!9RfZ+3-3s*z+fc9{tUQf2F5&B$5yBVP1;hSD}3rejxf=3_k;P zmcZvCekpu0>MVm_jyUD;JP%R{|7DC@HT*{S8u)d{TMPd#)L92#ivHHa-+?+C;Ge;~ zH^QHd@oa)0hW$V@{4=Pt1%561)e4`1_HFQs(XV#+)8RYde~vhv@K2+C7yLT-Zg`&G zjQk>8|DVFRWy05^Us>?yU_7(oKZSA2fv?7V<-(Vu&V2X}Abt`2c+?q%&q1BV@P`nm z1pZ@aUkbks@yp;Z#`u@RUx)ry!haQUs^LF@{?@=pQD-gu+u-ZqKL}qB{{^h)2Kc|C zzm4#N(Y^`(d#JM+ei-Jl1^%a~rxpH1#A$d%4yB783V=TT=q{5Mcf5&ZAaJ_>&h>MVx8 z2>mUAzXJ7?!Y9uK1>wrzOVGX?z6Rq^3BL&QQVoA0>ZyUBg!r}aA4k9H;1?rrJ$yCd zH^85UachKMiug_NBM_$n2r{h9EeM4T-6(=lJ!@ILyR13v`)%7uRl_2;-+}f~_-~;8 zV)(02X9@gNj6*5>=MldQ{yfxQ4*xjXSHfS3`m5o;244gJ5XP+*z8m$|!FQmZdiY0C zPXqi_h~Ef*KH4|IUk2X{|KG^l0?+FoTH&J@=Qj9HV7}VnPea}g_zsLiC;We4JiFj` zpugSlXTwL@!}XtsyqWO#A$}Ho4)#CU@CNG4fgg==$c4WQ;Va==;5Q?FEBtJ%t2X$>=vO=ZH_*NV{!Fa5PI%pB+4H9h zelYsg4gV3uiTo;D|C@TE=*m_2QK%;ieg(!M8{R?Q9QawNGZ+3+)RPbYEb1wOe*``X z|7FB4hF^?&O5h*Ee3im~8tu#AHzR&I{D&|OmGD1C{A&1O%zF*|h3Ho;d=zo&;D3g^ z_3(=@Uk&hGh|>r^3H3C=?}TrL-;42SfggqVt?)lUzuMr(W8T~0pF*4t_z4*QPWamp zrwe{O#-SVj)9{g9;rh=;otf|}(LM`)G{!$0em2G@2mUd%&xJn?J|BJu#-RxQJ@8TZ zU%(f`--Y^1;C}~S3jYz*QwF~q<5Ld*67p8U@5B13hF^;IHSk|R`&#&aV?68NUxu%T zzY*)90lp3G8{wPLuO|45;G5xjeQ68)Ftl%lKZO3a!CR=O9sXj(>43i#?K|Ng!MJt7 z7b0&r{Cm+p^6PN@H^67YUxIqF;Mc%s!+#6gX%758w9kb<3-gr^{|B@$g3rOajly4w z_QmkyP)`Z`cQHPt@Vt(_4E~!KpK|zN@RjgBd^P-S$Xf&dJnF24FU9s)2R{Mv>*1$h zoixC=qJ1O$HxR!GemKUn8Gbc<3;frRw-x?>(cd=sJ*cN0J_qeP;J*mp315ylUGTp_ zzq;Xn0w4KJxc(Pn{4?P%Lw~d2|ABh4;g=wO4*b92bKyPseE1hoPZ9i0=x-GMENmCW z@OL483H*B0Ukd*hgLhx(i0&qqBi@Xw-Ot?&lgx50lDz8yXXc{|`2pw3SC1Mpq&H^Fzq zUjrZcZMgm~!aQcemt+5&1^=H|583b^M*AH2k0O39{Le8j`S6!x9E#xQqJ0$pCe%|5 zpNV-Xfj1Gq6#iH6W$;hHm%}?4&r0}BsHYmf5%XRH--7tH@Mj}V9efddJ^Toaa|3)0 z#;poGU2DdXTi_H_+-O>7(NGnHsa*M-;aLf!xy6dBKRL5P87Za>#!L9O864^ z@yJ^W{}l3;!C#IzWaXR5^(627|2T^}F{0NLs zz<(Fh*J+gAM2q3eg)b$ z!k54|!QTeo4Bw7ATi~xoovrZShHr!4j5^!l--~(dfX_kuPWTD%UGQy~_ip%WP*0>I zT>o!JzcS(T&^`;k5c8f5e-Gm1z<(3{%7wof?epOuMEoN7e5|V|{FgBf#qign{u20~ zV17&C=OBI=d@1TLhkq4uD&fD4_SNu>XkP>WB*vi@ejvu74*q`-zaD-A;xxd22jkEP ze;w9I6MPriH^aXh?OWhq#Q3+u&qjaS;D3NP?eOhr-vM8a_MPzKkhcr|DvU!nyn*?R z{61X&hY=?e{;!CW1%DC7KN~)ZI63gk;B(;zz~{p+Lf#_y6<7~Z_;WEn#qhbPvjqNa z7>82$4`JTR;Qx%gdAy3f_0JwzX9#D;eU_3Iq(H&p9}vM^fw>=I;@i-_}efJQTUe;rx<<#dJ=pd8^^iM4TGZu{{rJ#2mc4euZO=LWT@IObLE$|xdb-v3*zU(Ka2SJ@DHMW5&Q=bKMMa3jDIowpHOEB z{636NDf}6TUk3ke#3_eAfcBN}H=zD%_+P@;z#FKi7Jel9RR_Ns^Ii}CbNB}Moru#2 z{|V%6f?tpJ&G7%ge6_&uLi<+uk73-};BP?tcKBu3-*&+N9CdcWZ$>>`@FwP^8@>ze zBYz0j|C6XQ6TTKc3;s&P&xW6lI&3H}_!X@;MJ@o9mt$9T5F7ocBl@HfJ@!~YNJ>3~0s@$7{ECB~r(ei`C)!w*CK zkv-x1KO6Bg;lF}$$bvryakAk@qhC4jmmq#F{5_~YAO8J_Qv`n@@@XE$V52|0u?@ z5&kClCip85zZrfd#-Rni68&n0AB+0i;74G5+Tpvgzd-vE z__c^%3V#=T8GIqezZ`xX`dbO#jPa?4|1EqC{36s}3qKRvMIHQDYsUCdNM#{ydCx7W@pf&xZdC;^)BEpw3+QFJRpA;U~fu!H>i^MB%>!Ukv{&;*`K& zhy6w={C$|0GWb07w;VnLc`M-`M&4@p_ajaX{ABoAcn7`?{%*vthyNAYH^5(jel@~B zjQX442Vvfu;crL$7WhA7-do`ppnV(sFR&ik;YXu=2mJl;o$&8Non7#sLHus`w_@HS ze-77w9^z!e--2<@qDOzT;r|Ms178E53tx+V<-`9Q?Tg^gMEz0tU!#37eDI{~&)w%=m-|3=;t z_}kFmQusNDUk3kW#3_f5B2FdzF0`+P|15kB{OyQS3;!QzUk86K#;qQH4AxHr{Kv4p zH^R3feiQr#Z12tR4OZ_+ThPb{q6AIK>H5(%TZ4!{NFHNUGS4pXE*$3 z(LVCmaQ(j>?K9z@!n(?WzXU!TelPl!1HTge%7y?hrbiP2!0#JCkkJUc`Sxs0bc_D zN5n6M|2M{=4E|!wdpUd++E>EwLHug?Yfyg;{P!`QweS@f&pPXe-&{W z;rGKg!M}?5&G19vTi{y}rxpGhj9VN0T=cgc-bei%@b@52Cwwv5cfo%V^U@7(V;x5J zh3o&PXrBrH4&=>(e+1)@4Sy}hAqRc}>db}rkT)Oxboe6ppJUvj@Po0=i|Nt61pZEp zLn(X%d>Q-%#4m?0L;Fhjy{Nw${tu|92L5{ZTKM;3JnP`|QBOVmpOCi!emUl=5q<~a zH^J{moM!mNsIvt=6YX2!v*Fv|FGQSn`1iqgz<&hwcfvPd9J=7oMVxNurN@NMu9Vcy%}---4e@OPtMo$%$DuP*qnp?x>}aP&9Q6|Vmu zpkJBr--FMBzZQA3;V(e@9Qf(zS1$b9QD;8&DUGO&Ew{*jQ7X6L{JT(3HvDJN-yHY~_+0p9@cHm%h+hQXf_kFx_hS7N!@mdPPy)XMaZ2HT zgL=x~zXe|o{}l38!hap{tKq+cbyx#G4!#zC3+Ag1{)?EediZvXTLb*3P-i3j+tIHk z_-~{BX83t%-vZwV-wOYJ#A$>70qSgrpNIAx@CQ(TC;Sx{pDy?hV|=>dXCiN8f4KfH zME#lY&!e6!_%q?N;Rj>A<-qSl-dyMw);2F9lx{(iKtgfGE-Rl^q{Zw>rA(7qP_hsaw8--I~z@c#+l0RMNyZ-oC6;y1zn z3G>wqKLYVv;J0JFwZczE-ZuD5#BYax5%D|VH(-7{;XjMKUGPPimu~ob5kK-`xc;{x zekS~{5I+n4cktQpgAhLl{=MjLF8r&AlMi2jc`1Uw1$9Q@_n>_-{0fYJ3H+6aQwslU zY%gW-iLW}2MJtED0sX3ke;V~v!#BX!z`qaUPzzs;_;v6LQD;4T2iiBlk3#%L_)lXT zn&2m+eKY)h7|#~?r%_KU{GC{bZSZT*z8(I4#P5KwMf*p2U)3~{pIThTrTehTWzg`b8v`S3-Umm>I^QD+qX6^u_Y{Kc5Z68Jx% zo>KT{5WfumR>UcXuR(t+;U9vph986YHSnK>uZ7RX`l*Be1?sPde-6F@K8iSv@Lxy2 zn&4N!H^Z;OIJdwLNBynv??OFo@Cz{S?ePDFaqECDMVwCfC5Y1nKOO6*8-5jhjsa@1K4|09fNC44sGSHn+7{Wb6gd@Xz(^47tB8tv=hGmy6dehu0;!k54|!G93G z8UA)`M=kJwM4hehXTi6@{}y$&!w*3F4)|4A=biA~nBOk=Er`<%e?IyZIS{V@moSf+ z@bAWY$b$bSd^Y^=;d9^zBYrM?KE@#*{too32>wcpe-yqBaf;zhj9Ur(2KZ9=uOUtu zd^yIg9R69mryhPP#=il64&pSzzX$D`;IBYE z&G4P@E%1LrJ+1J&;oIQfi@fddk7L|A;CCZ$C;UFN?}Gmz;&;RU6XOv1SGfLXB7P?P z=dez);NOS%+3+u+eGYsb^5()J=XUjzSP#Hoef178O}5&MC9`1{}+;Hxoijqon&X@Wn5_Ra9$ z#C)~De;DK63jaLnZ-bwP_U-VW#=LaE--x`O@DIRu!RNtu!+!|#80ikz|HmTd@OVK_Dz8QIQ;lGad`S7=(o+9{77`G_=4-vl@z7%zqzz=~hh2M?%W$+6z zkLBvLxc;xlddq~r6!m1m z&p>~(;XARea^SO2PcHo5G5-1R*T5IS{|i0}e+a%9{sz=j0)HNSDf|G`SqA@Y#4m^U zF^`q-J1}n5@E2j6Yv5;MJZs^nA$}eF-KeJ?z81a#{;jB|5k4F7o8Z5XdYa*`4b#L0!f3F|E%{^O{#2!0iO6#kQlQw)C^>MVhGQD-Uq2NAyvz7TQB;k&RN zD&gxe@73@h#yHf#Uxj*V;Uj2Y2mc&=J^Vn7Lj(LEv~Pqz1HK7<4C-ly{}}q)0zVUZ zTj84#zYYEYtj~7%ucCbi{1wRC3I83m?}C3B@w?$G(67k9!}b45w9kaU1oNH+UykR^AN5qi zk48P!@I$eE)xb|goLcy~7>7FeXAr+0-bMTd_&d5fFF^Y)_z$7K-SD45`^d}T`u{Q7XTmSVy3K;W6z#L& z&qJL#@WZfva^Z^+Cm;TNY)3`#v(P>YKMDORhQAoT1pYtJuTuE8qRuk-n@~?V{BJSu zmGFN+{A&1nF+Mf$k05U?{Erc*4*o&pt%pAYbvD4)Aa5i5)$mR5nOL{Y@LMtdE$~mk zx55uc{cZ5`QGYx91IXI}e-`Gk6MhTEtqXn#;&j7*A3pL*xc(o;IAp??z-Ph#1U?)7 zd5l92{OuTrT=*9eCm;U(n8zacTj8VdYcW2>@P9+z68MJ@rxd;d^HK)?cEm4--vwU@ zzZThpzX|?q)YA;_qJ0be57E9A zeiPca(WCx$_%_tv0sn5a?}Yyt+IPWsqn>W~CiuwVaQ#;zPA2>(;j`dpW8Smj*P*{T z@QV>A7k)PSl@I?g+84op3hOxvKLEZM{-3C)1pXD&Ukd*vj87T-aKtHxza4QZ;n!ik zRl{F^_%-mqMg6t#4`Vy6gP)3Vt55hd?@Bw+t@O2 z`c4dYq)R{7{bgO}I(Hq+GTyo}$GB|gWjO|vU<&VVj$2dc5f(MB$@L;hG9wK(YL&Yw5nAigk7yDpSYzzqVj}V*SJh25H zDYn4{Vh3C(cEMxB9(b(S2U}vp2=k8@o8XCJ3p`0|gC~m}@D#BNo+|dh)5Jd578?V@ z{By)6c%Ik-SBPzJmDm9<7Q5gjVh_Aj?1LS#aax#vnb-s`7hB*JVjH|l?0{E`UGN&Q z2VN`o!LHaiJoRB{>(6c zhS&rT5?kQGVjDa}?0|=gUGOlm2Oci=!KTuC1{a7OaG}@*j}d#| zv0@)=iH$*F{_$cHJW*_cCy8zFWU&LDB6h)3#U6N?*azEUmK|=3gf^ z!Ry5qc!SsmZxlP=O=1_kS?qzgh<&gpHU@|Jw~9^hHn9cXF1Eot#143;*ahzrd*Iz- zAMA^bbHe<4#3p#J*aGhp+u;3T2Yf*6f)9y3@L{nJHZ0}O3iD@(P4FPG1s*K6!9&Cj zc&OL~4-XcfESBh@Di~HUMlv%j@Y;)%)d-* zf|rXe@CvaFUL|(GtHmyOjo1UP75iXUY+M@VUne%f>%|s$gV+Xd6g%KeVi&ww?18t4 zeXu7shKKpLicRn~u?5~Pw!u5Z4tS^71@97j;N4;$?2C=d!u)&0CU~#d0`C*s;Qe9; zd_e4i4~aeSVX+T3?oj@mFn@;F1P>Bh;K5=WJVfk(hl*YBFtG<7F80Bu*tk5*KSFGR z^TZZ-q}T=*h#hdD*aeRfd*HERA8d&YGt56;Y=S3>E$}3<4W2A^z*EF7c&gX~PZRrK zTWnkr=AR=r!SloxxI%1$tHcg?vDgJK5qsdJVjt{?jVr_a%fu#lx!3})5ZmBYVh6lh z?1I;bJ@8tw4|c`IRbl>hViUYxY=Jk3ZSY311KuQd!JEY%c#GHvdtzfmn18F-1aA{t z;O$}?yhH4OcZyx`F0lvRE%w2_*tj~(zej9>_lhm>KCunnFLuBO#4h-d*aIIH`(Wcv z<A8d+^Yr^~^#3ndTY=K9LZE%6u0T+s0 z@EEZN9xL|2me|M(^N$ys;E7@jJV|VWCyO2M6tN4QD)zwB#6H*-8`p;U=ZH=4Jh26? z5ZmA?u>)Q#cEL-;9(bwP2RmZpx-kDTu?b!-w!kaIHh7iT0k0Oj;5A|oyjJXkU9oX} zn17wv1g{rc;0Vhg-aY=ifU9q<9M3qB?*ch+;`CKW0-%A*aXiLTi^<@4XzS9;KgDWyhQAQ zmx_I`BQ{2d`Im`J@N%&QULm%@tHcg?wb%u(5qsdZVjt{^jhn*!>%=B_z1RY85ZmC5 zVh6lQ?1DFoJ@6K>5B9`HVVHlb*aUAATj1?t8@xm8fOm>r@Gh|j-YxdQzSy`q%)du$ zg7=Co@IJ8(-Y<5*2gEM;kk|ts7W-gB=L3zRFn@;F1P>Bh;K5=WJVfk(hl*YBFtG<7 zF80Bu*tjLkKSFGR^TZZ-q}T=*h#hdD*aeRfd*HERA8d(@F=77kViP=3Y=I|f7 z2JaU;-~(b8d`Rqp4~zXC?&A6X*zu5pc%s*aif+vbC@FcMfo-B62 zQ^YQKs@MZh6Z>FWYzz$Z&k>v8d14D(A-2I)Vh6lf?1GnwJ@8Vo4|c@HX<`0lViUYv zY=KvZZSX3w170n5!E3}Gc&*q6yJF+?F#kHS30^O@z#GIic%#?>ZxXxU&0-I{MeKt; zv5^tx-zql2+r$=lyVwTr5If+VVi&wi?16WSeXuV!&It4G5u4z>Vhg-aY=ifU9q<9M z3qB?*wFbvBQwmOAvVE-#1?q4*ai;~JK&*W7d%YtfrpEIuqigq4D*i=o8UaL z1s*B3!3AOmTqt(IW5gbKtk?%zVq;L4f4tZPPZV3=Nn#s3S?qwPh+Xhhu?LBX; zi*4`@u>;;IcEP*E9(cFd2m4~%;6q{$d|2#*4V@1( zvcmisViP<_Y=H-hZSWAW10E`N!NbHJc(~XHn_}bKF#ia#3C-E3*II6 zz`Mmh*cTfYhWYo1P4Hf^1>PsN!TZGy_<-029};`u!(tz7=zO4&9p=vvo8UoW3p`kC zgNKM6@KCV}9wzp{!^J+>6dMtTKV7yDpaY+Rhouh0Kt6Fg6Bfh)u|xJvAR7mHo+ z60rwfD)zyS*tjIjzf5d`my0d%3b74dC3e88#V&Y_*aNQ>`(RgWTpH$ICpN+B#TIyj z*amMDJK#-X7ra^Qfwzc#uqQT#hxxaPP4G6c1>P>U!8^nbc&FF}?-G09-C`f?i;c^| z{CmVEc(2$3?-SeL{bC1vKoi@(8vw*XNXPkAh87=EVjWz#143<*aZ(0d*I<>A8d+^Yr^~^ z#3ndTY=K9LZE%6u0T+s0@EEZN9xL|2me|M(^N$ys;E7@jJV|VWCyO2M6tN4QD)zwB z#6H*-8`p;U=ZH=4Jh26?5ZmA?u>)Q#cEL-;9(bwP2RmZpx-kDTu?b!-w!kaIHh7iT z0k0Oj;5A|oyjJXkU9oX}n17wv1g{rc;0Vhg-aY=ifU9q<9M3qB? z*wFbvBR|ZaAvVE-#1?q4*ai;~JK&*W7d%YtfrpEIuqif1h51K_O>myr0*@5i-~zD& zE)=`qF=7uqR_uciCyq!u?OBF_Q9UmC=By&6`SB~Vhg-oY=d`*9q>-E z3*II6z`Mmh*cTf&hxzx2P4Hf^1>PsN!TZGy_<-029};`u!(tz7=zO426z0zmo8UoW z3p`kCgNKM6@KCV}9wzp{!^J+>6dSjM`A3LNaGux#j}+VB0*1z8=l`3|we@r3)@1&sKIi|N&Yh>9Tljf} z*alaL9q?kY3tl4jz)QtG*by6V4f8J(o8aYQ3%o*XgI9?i@M^IOUL*FvYsEg;6&r60 z^RE+|;Pqk)yg_V(H;NtbCb0|NEcU=##6H*)8)L)#Tg4`Lo7e(x7u(<+Vh6la?1FcR zJ@9U^5B9~zZDIaBViUYqY=QTQZSa1v13n;j!H2{i_^{aT;kG{K|J%;Z(D^|#lB}n# z&-wqha|i3^Hhw-t?0|=gUGOlm2Oci=!KT<45at^pHo(A1^k+6U7#IlGp}M7CYc6Vi!DB?186=eXuPy28Q|Ph)wW3u?4OW+u$m( z170k4!Ary*c&XS2J7VLsF#j^K30^L?z$?Tyc$L@zuNJ%DHDV9ER_udav2l8sf1TI_ zuNPb34PqO-QS5*>iCyq!u?OBF_Q9Um$O!Xq6`SB~Vhg-oY=d`*9q>-E3*II6z`Mmh z*cTgTg!%V~P4Hf^1>PsN!TZGy_<-029};`u!(tz7=zO4&8RpLro8UoW3p`kCgNKM6 z@KCV}9wzp{!^J+>6dPxT`A3LNaGux#j}+VB0f$u{L929c)8dDuMpedRbmIcTI_%=B_z1RY85ZmC5Vh6lQ z?1DFoJ@6K>5B9{y;4uGIu?gNLw!quPHh72F0q+#M;9X)5yj$#peX(&)n17Gh1n(7F z;C*5nykG2q4~SjxA+ZNOEcU^M&IcMATzf5d`my0d% z3b74dC3e88#V&Y_*aNQ>`(RgWToC48CpN+B#TIyj*amMDJK#-X7ra^Qfwzc#uqQT# zhWWROP4G6c1>P>U!8^nbc&FF}?-G09-C`f?i;WAz{CmVEc(2$3?-SeL{bC1vKHo+6c7I>1_22U0{;3;AkJXP%Vus;8beXuPy zE>7mx=YO#Yo+q}z6=EA)C3e7z#V&Y>*aI&W`(Q_GToUGACN{y##TIyl*aoi>JK)t~ z7raL7f!B(Cuq!q$4fC%Po8a|g3%o&WgExvD@FuYf-YoXOTf{!t6C1(FJcEN|l9{8}>2OByc zXykrZd3SfD6Sg zc#PNsj}`l1OKg~7{_$cHJW*_cCy8zFWU&LDB6h)3#U6N?*azEUkkQ z^RE+|;Pqk)yg_V(H;NtbCb0|NEcU=##6H*)8zaK}Tg4`Lo7e(x7u(<+Vh6la?1FcR zJ@9U^5B9~z)nWcUViUYqY=QTQZSa1v13n;j!H2{i_^{Xq8#*6o-E3*II6z`Mmh*cTf&g!%V~ zP4Hf^1>PsN!TZGy_<-029};`u!(tz7=zO4&ALh>xo8UoW3p`kCgNKM6@KCV}9wzp{ z!^J+>6dR+${3FCBI8SVWM~ZE5f!F~Tie2y+u?HS2_Q96eC{sn`cQVq zFBeP>U!8^nbc&FF}?-G09-C`f?i;bJZ{CmVEc(2$3?-SeL{bC1v zK{yV*AS@ksn?D^0?1j{_^dg(EmR3tlF!NI;;DxgSP&itReaPuROLT z_B{YEo)tN$@8N7JbE3CI2SjdC9d(h$yAIaqcVe}w4;TLZ)hF6tef5c)NX_IjCvwY+ zXGIUb@|YLt#PSRX{;ixEpRhr@LXMAB;MEwo^9h=7575u`cU|N+cO9JY&qO=92U4#wKo-le&S9(?lZ zi2knG(QsjG-fjuTuT1MFqV;o=#!YLoEs`^0<&y&ktn3^(V7}gKUvf1HIqO7 zQasKT%CGu^uOT`==vT0PXx-fs^eMJItMBgBfsbmt9_e@xClR-dY=zlj~OBGkJedYlHf-NpsobwN^I%DK<8Fv9W3BGd9s{R>sF&{({X3^uK)aC zP8D~N7M4L0`1y4r;9(91#5biUBK&{-*W;N5%TRwA4X}C-6?}{G0RBJr>^A62Pa<1;i zT;-e-x&N-C&egR&b2V;I@4imZ@98`D&VN$5f^~b9`rL4%a_iq_u)W=?TAx#`j}KTm z{Kc@AV1Duv^V7L6v5h?yY_H+`Z2gCRU+%i({3ypYN1LCPYg5k83w>(-is}gBj!(?b zJA(O9eO6FkFei6M4#u|Qmg{2kV`yLV;9vDS_o`p_Mh{+gTI6dVR{e+e#Mf7Ivi|3K z*S|WbU+(1tiS?!W59;4(*H_1NDeHggtlne(fuMfH9h#{B@}T}J2CRJQ?&v{FwLYmD zZwvOn;|8q!hw6SrueY)H3)*hZ>xd-wO&6_v@+s^ewf4trJ9$!Zg0XR*3w~y-za?0E z`ggF;4E8Ob3${7&zqH>A+Wk|12it0}ZoPlYT`^!~mz>(>_3XE%_t|f0?5!<1WV@`2@mvH)8qqPLHm3$4-Hmv9encE z=)pnS2gF|6f_~NAbueCwvHtADYn!b;{Y7i1CwH(9x;A?7i$QL^Rs{e4W~?8|tsD(6 zL{@$Yxyv$?TW(R1Tl=hr8=?nq(fW_gvHrbT`>7!R@3pRizHZmH9ou)@7~jSQsNa_- zwz1!79~rClir(AUcY=M4+!freBKf^vha)!}@jCp%;NIKRe+PY5oZB`2J+H&6{c?@L z4>bn8UWc`>=Id~d=KGxF+Ea{tuDyW+R^Fl5!M3wb+fuCWYIAjB{XHpH8@$(uy)L)L zUzeZj{kr^5y(TALn|rOn1@SeQ+^6ror**ZZ~hzF?l@ zp3$01zSais7kc)Eed`madab?U+}^d`64au&KhxULerU+B0rB^L16oFCJ?iyQ`{rO> zH(X(?%m`jP^>?|}V{)7tRC{fr_VH$k2RPWtd$FXtN{^+vEfWLHH9e!@eIZyim)wDsc_d(ky)#mfshXiXX*e6CJ z|Bt!%j+3g$+Q)CtkVH%jn3Wz!MZpy_vVinBt{B#Y7}m5sj=Sc7S<$seTro?lYYyEr zCdPz{zG582v|>OoCwj!RW<=E475zO=)j79M*YwQ5zVGkz`To(L+xOm6=czh%&Z$$U z>fSq8{9r}P!P;L9UVjRRm3u%3^EfAuV_}au{NInhqQ>*O!178KdF@h$*FxBu^5R(k zC*fCT_?>L|rHlNwQGRva2eED+$T&-$oNF{94n7FHVvJOjT^T-1W7a&{uoHbI{UL0L zb>R3wzn7l`AKH+}evJ02ekOZH;|m`gs$T=xm&H7W*96K7xKPrmh2HKK?sj z1Dkyx|HaOxRCqg^CUcZ&QF^QF0?e~Pi@0{6iE9T8sTokJuZK_vfb%naEp3hZiq8$k zF?thl*}e;)3tKTJXsF0O0a@PC%g@h9kIxDYU@YS9*Cn!_fLEvY!D|h)jrV6_e&=Es z=uXtbkAsK8FOw(7b;qx!B0KO)PoKcNg!JkgFNdwEOz9ghhD;$7&P_VXB(rtw8yg0e z%HUFzbMelCME2j9PchDE#J3NtIX^qTbX>@+5RE?yb#wiIwzQzFhvJ<6ZxM8b{d60y zZ5Uk3;aau5)`0j0*Dweb%i`MAxCRGNyoSEX;u>WfV}R};^g+or$bE81hq^oo{Vt)a zsmRWBZ5-gn=mhwV=@BjDT$^nghVzGL(}*jELoT>BWHIFLbQJuoj?M#bws!<{QT_im zUFl_Vo3egsL7I51Jl?^7@dX|0 z5N%sl=9R0J*5@F~9DIMoxI&pXLl5Z7;@Azji3IBRfYk4VPU^S2)Ngv1bsEtjc54pm zv#r!;uGHro)(36vPk)8+331Fg#>r2jzcF{)+~&0CUoSvCA)g^Tc2dvvrJfg|p3c95 z_hkI1j_pIgf_&i^l^e`I-`SfjMwZ*dF)-w9IhwWZ#9#PWBdPOS5q z@DXYoLj&6w3>xvr9rdy>GPaH8HA`_fMmhDzCxU;8KOS3K8mD6}vyuhq)6$IP`+a_wPrZ8)xN3tsefj02!o;ukAgw!n4f2#@2q58wFRN8V2Fh5k9; z2yE*cFOfd^9=fD`@-6;%s!y3wW&317&?hHKpY%grDgQ^ktoldLo$2Z$QI{ULuYR%J z{AZbUyi8k8L)#Bw>}h@Eq`op&{-5Gp89p(O`AQRVh|S2`mikATGc)dqQpjx<^N0nE z$*LQ-!UkeLw;Ezc$WL_RyNcA>oZA)srpY5VtX--bO(=H{^$=}bL>aG!d>HHpIo^e# z57-ZwP6ui*2gew&w~G@IkKz0y^sk>ASG9Z%TValvx%(>EAMa1cICf42a`?!j)za4> z20Y2Mp|D~!{`juikRBcDcg;GbZ8!t{aX8z6x_{&IWiFTfCH@2ROWTfUxoG@T%n5*Y z0q*;GPsez?6|$x;A>D3XH(T5}a^ySX+?UfB7jtvyZ658U9INr4ZN3A#Kp*R3F<>r$ zKJ^M@SkZ|LZ!9Cj29aR~{#a$m{GsYWD}39!ODV(3bxUQq5oEY3WT<@m8NLx@(WfiE z$?f94X$biC2VctMM%YLd_^bYGhPK4HDah&|$STfb;d~?9S9@dmOT@-*hFvmd%}7ka zoN9!8e=>?*q|T-gcbyGdpDXyM%N3NYl?;n!zA?Z^En10AJT|AHsJ^x{4Nnyx%}2M zxR-e8Mw*7hGf28G~^!)F({0lMv{{4l& z%YT9Y7qoH9AGFH2g@=tpH)sPP`PHYp5X2F;dVt`&T{w5eni;*1Q`k=vLq&x=ooft0&2ICj@LoUPs zy<+@DFnU{zjK|=7I_~GR$EX*K`vhZxU`!4%K(82o6pU|RKe4Q$fvz2!Ta1jyXcUaA z1!I4~I4;Bhy<%)B7;jjNl*d@zV&ptVUNFuUj4^_-Ply3}#aL4?9=8~+9^*HZi)Gd7 zF=h(JVS=%tV2lbeK(81H!DzM^NvxkjFFzFwM_ce1ZGy3@V5}e*HJ~LOZJWmg=y9AA z!S+5UKJ_V-3N0PcXQiQPK|3E5^fuvA@N@nl@#1gT+XBj6@ILjz}<`7L2z;4A3jaO@cAT zVnnMujB_nU+GA7;#%CB~SdXcK@l=Qbdc`PEphL@-VcF+i^vBLw3;i;?pf>spM0$EX#INrLeQ!8kC)0KH-)1>-2e*hDb44>3Tm7`+8!s>R57jIU6Rz67-2MM;m*C>VPQMwMV}9Abc8F}}e) z+UXRF5v}FgG0$S8JVstHwiS$jb3TpoD~A}MSBy6V<1~wr@feRCSYUNM>l;|~_2;4v<+7&(uT zNO;}-y3O19eS-09hyi-VxJWQIu^8!fTsw}p7_A?#=bAqMCbV?)7s!D6J74r6(Xk@6VTf^mXi zY#|ssg&3e$j1>gqK8um8b{OBITx{oQk5MZa`w7Mxf-x+_0KH=TfP1vlt1U*sW4vQA zGAgU{ovgMKj7Tt64KYBk81D(j*%l+co@+;|#mK3wJjMos@fqe|tm6{QC&9C5=b%@N zrv>9Mi;=2v7)T`GK*3081<3ISXA(K{*YkI z4lzKl7*hme3yTr0@7i&S#fW-3`)Cx5cLn1{!FV9V0KHt zEWdV$0eZz~6O2g~Lv;5!i&5|xje;>$FurCE5*U>s2Iv*z1;IGVVr0Cm?y(qAFJ~Wl z!RRj-uM5T(ST_aFqAvlxV%#Sfds&Qv$GFO3Bt6DV!Dye4`lHVt6O7kF4A3ja)q=6D z#Yk=B`h23rNO_Dl!6*nulVCg&Vt`&T&K8V87Ng)X4z(C*kC8}t`*>b3E)?J=qa;V^crwHQy<$ufj14SC=64R`2#b;P81;hjvS3^;7`KHOpjV8e z1f!qD;QmCkV-Jhb>M;?dQScb+Sd6HTvyYjAajszOAQ%UP7@${-L4q;UVx)#SS-Jg5 zkQr#_L`jd)CKyKw#!$hi4KYBk7`+7JPK$v(=YfA7{^(Rg2%`U#yr9JhhW?lVt`&TP7sX# z79-liwPPQP5iRHKLogl{jI#uT&r^VBu|Ghs82bqZ_a7GR++&Qg7)g)OCK!3aXb_Au zLJZI=#&&{Huo%fLT{~(lM#^I(dU*S|KrnU@jC6j24XFBu_Cg!~nfwyek;oe;D_9YHQbyrz}RR$7mFc^#$Wo!T1IHBuZoj zdc~M27!4Mq)nnXlF$x|dFBr=U#>;}SAjAN@V%#YhyI2hDKcm08)M7+^y?qGA_wRc< zpDq|Lh8Un%jLQXMbBmGj7$;kdq{nCzjCTa%2En*L!~nfwoGKWrTa4sz*Ny`%M#^I( zdV2e46^wHQ+RK7%mvQg&3e$jQ)Z# z-C`t1xOV)6a&joi;?jdwSw`SU|b*=H-{LY zSBwV1=xH%>9%Cnqk@Fb!f^m;v94{Du4>3Tm7`q6@R~XM?Uy>T_+A+*xw0ew2!MI8= z_7#j{LJZI=#^!=C&tl{}#;O*h;4$)oF;Or^3&!3d2Iv)Ib-{SlVx+fo?O1|xv7JZ# zynP79p@Ol#VEjJB0KH=ThI_R0yv0b4aTp&LF{VXHkI^O=I}670f>G@;rb$daEdhF9 zOkob-Q^B~Ph%qJ6>M>?pjFiVn^!E0#nP7Y`d5S(E2Iv*zWx+VUh=Dp_PYUeg0gI9L z7}bKYnqa&m7~f*=4`ma#1N4e9T`=~w7}yI$jO#2$#$(h9#;@;sJ8u8SoR*%sr7%vINWrA^Qhyi-V zI8rc{w-`zPJoWAtqu?>}g7KhWoFW)}o&r3J{Q-K#*i$gR$5<2Fc~t3SwY9~FR`B*A z7}pENL4v{ky(Ji+SB&9;@s7pFc#O3zM$%&>qKn+RL8D;Q3C5p74A3jax`NSaF`^!> z9hDX%V{nK8dd2t( z_vlM5vlywKt{tygjEu)CENQoS9< z`4*$#F{%aQ%XhplxkoT&g&3e$j0*%~h{ZsDi?TY_Vni!C`=}L+xq@+(V6=o7pjV9J z1!EP9k?!L###@Y}$EX*K8GF1LL$DnqEk?>?GzvyeFb)-r z(?blNSm~S!C%Q=koEJoU65S77{5xMVnB!idc}B0FeX}z^zshlNsCeN7}bJNBN(3u#(%Ik2Rw^* z4tm9C6^uhIMy8*`xXofjD|!16jJ|^Ll3;ukVt`&T?huTfEk<$$hjEF;NP3KV!C3gV zw{!Qt8_4>fAqMCb<1)e6%woU~us=?+7%7j@C>U=G#`V&UX(0ya72_1aSj}Q&R&*E# zSd6sCsE(#MpZJtuGz!KwAqMCb;~>HK6=O}&&OJu0#mGp12*&M#afD#}EyMu5V$=!7 zCl&+uDXWbvMo#)eFfJ90Jp|(~AqMCbV~Ak9WHAaJVZg0W_Z0eZ!lFBpv$BejZa$1@fq=`q>_V`IVK^D3-kBE$f_Vmv1pM_7!U$GFR4 zq&!9<;dOUq!I&!;pJV?IcouyL=oRB0!PvuMr24ydTwyWN9-~??etOH>`3%8$HN*hD zVq7H{+gOZ($2h}cWIRT#V0;P?7$q3vLJZI=#%h8w!(v2!fBP>e z7u$K#W3&lIy|-&~eh>Y%79-~|>IGxTo8Hc!5{x%O4A3ja^@1_V zViY{aIToYUV>AlJ$AWRYU_2gTfL<{g1*67dWc>d2zgmof$H)uDY{9rxFq%UQ&@09f zg3;GvB>mp=-7H4b-;W=H@ql2QEEpGs7@${-Jp_aA-w-=TZpmY8WigT-qfIcb6O01| zV?u}ldd1jAFy6KpDZlr8EsK%z7>P=69~r^eUNH6#F+i^v>j=hE7NgZ;R9K9($EX&J z!v$kw!59-_fL<|r2*&LeL-x1-3+2R@Oo=icqgF6>7Yz5l5%`G>Lk!R>#+SH9UvjC% zkp1m*EJn^_)CkL@Gv_qQKoF_IpmO)y>)j4K6WYKQ@P#W+qdR<;-gkFmGK zNO_Dz4{slj2*#O$F(t$Ry<+Sm7(ZdGiQAF!d+2{}G14BRS}<-AjC#R1EyMu5VvG`u z4=qN)V^mv=jK`=IjPnI!C&4%*!~nfw)Ck7279-{N(D$(zIge2<7{?06Fv0jkhyi-V z=qng^Ta28?_!i}2J8$(Eje;>=Fjf_eO+pONE5<_Hqn%%AF=U_qn--(sG4g^jQZSZC zo}wzm0KHJS6;igBP| zEWvmd+j*@FCG zg&3e$jEx0jw#6uTj1?_L!DHkF<0Qe@S}=AEF+i^vD+|U07DM*8FGji8&hZR1a{x01 z;{d@}TQIf=F+i^vKj9wj{5p#v``h2Q7)g)OCK$DXQ7IT}gczV#j1L7PV=*#*@A)i? zk@6VT(G|!oJ#gYig7JmqLZT1@^osGUU>t5Sl73%!i^WKLj9S51Nibd$jL)$DCu4mH z=oRB`!PwnmWIRUJVq`o2C~#W+(i*0vat-%tH#i_xmG@)*wu#`%JAV~7EI#i$pIN{f;47&}^wg38Kc z+$9*t3dW=m1N4fqlVE&-@htWwvS<5u76bEX+DBvLF|H7d@q%$whyi-V7$z96S&W?D zvpv9KBt1r6FwPK+k%F;Thyi-VSQUI$=q2v~L7s9`l)>E8zZV3yxjoAAeM7gR4(?qc zpxGdx86h-WUsjs61Def*<_beo70?V8nhOn0-+*Raq2YS9VkH8afkJbdp<%w!wyU4e z9A{|04QMKb<}Zfk(}3pR*vmut@O>#-ALcJD)LQ`vK zo(^c<6q;=f&5VHNWualbr}bfe-q!gUq1n*T+!4?`CN%39nwtZf2ZUyTq4`HZGgWAo zH#C<7G&!NEFf@D~-q!gVp=rmspzS&{py7Ty_SbiYWRuaoFz0L8k#=` zG$#qo8-`}zfaWNndCAc5{YJK3^+Get(CiS<>@PH2U)Hvd3~2Tcn)?mS76HwULUX5~ z**KsXEi_!OR;*+|vz5?XZD^QpwCx%yG<-j$@>wyUSx;!rGc-K|nl*%GqM>2_(qgSF zG~AD&Sc?Lh-a>Pvq4^@9`4xM5s6S~#(-zPy7MlGG&D#OZS3<*hPwT_{ysh&Bq4|TM zc`l%NS7=5VnkNF9*Mw$ELo+>~c|mCS{v)l=T>;HgLQ`#M_&mI=^TR^3x}mu)py7Ty z%5Wt^GbNz8RcLw{n#lpp4MMX7{$K0-w}9pfq4~kkoD$GnC^TOhnqvZ*jL>{!Xc_{V z(}afW%UYiU0-EE5W{#oRE1>y{&^&Kwb_!??5*n^oE7q8RW^bW+$j~s~X!~myp}E`8 z{4SuW6`E#4vwlFctEf%L@(TJ;h>v-qKVE4c~vHG@k@C?bw?`eLm38ydThfCp6;>&Afo-Gojhp z(7YJXd?++2L&N9cZJpl`n&F1#k${H#?O12Vd)lu10-9Mu!+1|=ZVzZ46&l8SN^?^{ zbHC6q-cy>Z0-8I8hVh=#TpZBcA~cNml;+%k=4zo~yr(p01T>cl4dXqfIU%4qPiPqL zDb3*l%|xMLyr(pW1T-fK4dXqfVZPD!*O5ZQcu#5m7|^7JhVh=#)CDyA2@T^trD6Wk zV(l(8jQ5mg^MK|LLc@4ZX*LRIMhOk$J*62G&}=C*jQ5mg)qrM*&@kRp8s_J1ovVe0 z@t)H32xwLp8peA{^B?3eET5HxhVh=#d>_#C5*o&PO7pLPW(oG_&~_N_DGi^8w^%<2 z4dXqfc`KmdemnBf_gQkE$SVQOM?%ARPx(9>(7YowjQ5o0@qlKI&@kRpng;`#=Y@vx zp3<}gG*1c*<2|Lx2Q&`}4dXqfxi+A=TWA>XDb3{pO|#H2-cy?M1DflFhVh=#oE^|) zg@*B-(lFm>`|ARsVZ5g_M+Y?L2o2*sr8zX9IaO#F?ScqjHjQu?1!+1|=-V12H6&l8SN;5a0`BZ2a?!faV`U!+1|=n7_1Gmk15xJ*7E5pqV5zjQ5o0uK~@OLc@4ZX$}r(CI}7V zJ*62R&>SH&jQ5m=`FUICKMM`xJ*C+`pxIYw81E^~h=67{p<%qIG@Au9I|vQqJ*62O z(2Nus#(PS`=izOgw-6e}drC7fpy7Ty+79DArRf*YB!!0Yp3+nXG^+^><2|MMH*$Ek zJ}U|h<2|KW7|`?-8peA{^Laq?3-%5oHFy2#| ztpXbEx1&BY-cy>P0nHepVZ4X+gK7MRNPcj~_kB0M3?A;=VE&t(OD6j7wYK5?-ATOb zH^QDR{cfs$GuQEZ3ExUhMcG2{6uy5Ewe!8=`t942Z^+_XzWnBFq6gjw51a@`ychh- z0>9;o>pYL}uKq!HMiVk}t-{dy_u^NyJchYF--}<5ca*bUlv#wMerHze2Kx3wWvX54 zVz1je>UGvX>XUNc2+o1Gp7Yzl?Re)r&qH30re9D7=jF;+S+KomgZl>WtMFT_PgsBQ z#(UK9U1IWs-1?aR{m?evUpeYj-o&`%rQ-K|OWKL=i12;)+8)TSKfi6fWa8uT_Z1dZ z_G!Vlqh_eixOcqcn+pkit1U|4z0SGSxE?hvdK>R`h7LC4z3*HzOH^d$Zu5%Q%X-{* z-z&m*7^=V?5nVk-xZIqAu51ma$(M9Orh5UAdm9zFi+n{Jf zVGwj3dYp!?D~}4{K^xI)_y#Y&BOA5z`@4Eg>1Z=;mB|*p4%&-col#%yTdQ|@+92BX z?u-6=2e6SL)RWs^HF{!Yu@b&V9m-;*)C}n2j80|ICpDv@N3;s{c0JPlf^S%uZ#zSG zTzm1o$r)O2K9|e?ta}%>Nxpl)UpqemY}o#c0_4#K zpMn0nnEK$$p)VkVieI8E`!qEzbA9y7j#ozCojtFbbW(38r^@I{d=>DDYW$)d8sBw1 zP?qI}uzvU-MTxBPhQDh^+vEBPk0Fj{Co4v2YyoZ zBGyIvNwyz+z#HF~@&6%%*%7|kp7{98dG5Z*$F-R>a!xwsX>~0}neiR*@E7OLF7B@@*j`ESq@lYF^9f=Bh! zOC~<*edXSXsjeOPZfi;1hDFX-vR$y#skzCOpi8wklh5QjocEYY+2jXP-uS-j&=kIn zmuTU4c{rxG;oHw~U59n7>pxJ}MSIucTe8oIj7sbK1ox?w)VFxw`2pO^!xxMNpDyV@ z1G|__-5}la9ry-vz_)lHhvN5Z%gF(I6xh!{W6y(^%~Hu?itD#BvKZGv7XN*n7R>#> zsnhQ|)TwKoe*!+4zHiAy)%nu?YKH9TyJ`0>euAFk8&5+D(CvHUvQL+@mttSY_|1Oo z&R!ZZPow?p@lQWi)T2*38_T~3erI;`8P;bz(=T2hDRy->;!mDO7z<8B?#9bP+f$SU zX;ws?y1Wlt@9O?sV0CqWd*qY5x(|OoLhJe^;%oOG?|pc`n|kxR%2m={wsU31vSI9p zF&A;jj5dt3h2>K-;9rNRe|0{)gsg(RXth=Q%!%bKv(e4wsC%p%2%XxB&mi zc2*)r<}r`oq44kNXv~`(+915+vNHac)KkV@=PSCHL)f~86{^#=w*p#wm`+9EU0( zU)N7xNk8Gb>n9v5V5d3I&|ZCfRMGM&<_~c{fu>3Jpm0Bi$L#3COY{J? z9NN?r@UgZO>fZiBcRS==)Ia$q9^#N6IAa>g-QEw9%teGy1u`=qdwDbCnRrB zj5Rv?lM>yrF&zAVgzqA&Kbeg>^1t&lcwVrn()WKgt~=dn_0JGCp{?xyiR^#$Ip?mb z1CNpi+UYl@!D3X59yhorwn$YKLFjYc=m+f`(%06_e`8SoyWbwpzTghIr!Zw)p_8oi85AB ztJ-tm4iTNb1Z~p1;|ic5_GDZ;u7j*P>MV7j3A#vqJp}&5$?eusePz8n)zNO`J;BO5 z?d3h4@`C)Vyk~R%gEm`vuPX9pexCGi@`!eSMZZ@GUlz!_1%0OSo( z;(G=@?p)qFuW)qJJRM_y#${|@Ft2FujxLPVHb9Kcm~N!Wt!my&V^GJlwX}}$*a-hv zjWMzsu}yV4dX2}bC~A7h;EtPPB-6Udpn*co+?(1r*{mq%hejk<0Hzva$RH?D8km|FXP5$axA zUo+q0JPTzx&peX#M(mqI+{bUPw?PNHlGW14at`8O%986DD$5$m5*WTt<#UCcD@+hs z7vuEO9L7}WHTA5s`{a@ChQ~3GUn!5r;`JMyf>S%xLfYHaPpI=^3;M z=j;pW%e#!xaDJokR~{Iv-P{;CiTFFgCn3)Fve0$IVmo!cA%T9UPU)P-&8ZNpvW*_I zqUG-$+7j?VdFCqzW1J~%b8%j$b7A(ow&hicRjdxF?bF_o%PDK`yjP9+66;*fAMif) z8Rg?|e1|>*b@1zV$d`}dT4yJD5ak7XiPu4RE#zmfh5m;5#!|WX^;_3>kBn`Pewp8g z`3~kiQ0J1hlUCS@(p?GtcK2oNivL@&w&3nRKDPZg=!()F1|O4_XCHYjf&1>>=VRNC zRUeFNx^ArBkhbHbqYw0x{g=NI-;2Vy&zuU+&w#JV;rrkDO3V|WoA;o-Y!B<##@v*Y zxexW@9A_YGmtziPq8RzX9rbq>_%4~Kx?8j#)g|Utv^@0~=gx){ZxQq1QpY`mI_?j@ zp>@nbhKwBUY2TY6b&XA9glTlsmN4vwlaqzt>pzdtvd+=*TJE>}U9(EG@Nzjf3 zEp2ux`0`k*V=~%Wjrw9u@WnB*zHtHS*O{%-K35jse;MCdfpg;WdDl|CD%vG!Dq;U! z-TzkRBAxj;z8|Bj^3O|b-C6nNyYL&V-UYG$zdMGZ=lx)Yj4H5Owl$Ie}rwOf1gbvk6JrteL?c+1 zdq zp&jBsau~0Xo;2gQ4u!oZd0@d-I;@$}#}FfIla9Z{-i>lVedmGKifAX-@pyeKuG0^> zbKIYe`+83Kt>o->K+6n?cWc#t=XbQ3wmGHd5U8neJa~B2D0OkwmVAyr|^A1 z;KqIf_p4Da;@<#VJ2r;nN`$hX8h=p5cEYY!!Lcj5{Y1+2M(nP>&VRL+s0%@-(Hs)eaG0$=n{L{L&lV$=(kbG z5%KrS4)da~&YJgILvl9OUrVm#MuLAG-f=uej&(Rr>XFTj#hOl!O#j@K5%z81cn$J{ zlcH?%0_3`|2A%2=O-=Wx#Sv@PxPI%fR6FyfO&BZkz|M_KwP)ama>v7m9iN$z$N84< zN6f*l!~VoE&adJg^Qa3rXTbi{jA@vc?xax zy8TAk_3RBWmUh*5jX~d+_uHjo*H^%HJ^=lF9eodu6T8vT-mt0CdWa3mJ^)8?+Gj^Xqi9RV{SgyRpz>Do<*)<@mYaz#;XdBx~ zoXwy^SK=D6*v8Kx2Odk?Q1p>jo|gBfkWFLEr>f;m$ET{r`bE_zU={ZTaUTq1h%w{K zLs8!X>7r=`;iM5U-?9sQks%3x3 zvkBK!4+hLE>cPLDr&bScbMv49ErW`BFsHNi{9m6A^`N{AwI6N=&&`ee&MzZBjhUGH zuyRlyPoTa@!{d~)JWB1CvS*Bxf$ZHFgYzvr@dKrOQf!Z#i@?6zoEWlhhCfsJz9g|_ zIXx)lSIkLh+sgLutSEc5jLCVy$>AFJPksXb#{Xl1DRx@6e@gr)=tm0uRE)<{*l(C4 z>s}1i+0wdp-u6piW7_s(B@VW2x3NKIbNh0AvTo2P9r(cSK7qb3^{b_QUEfW-5hW8o zKM!6}Ym_|&zKlAtcX#D-QI`85NxKt{UDaU@`wnz2e?c6?jF1;{Y!xjd5Rcljd?L^B3ch$c?1A&~A2DxseNp4)DlO!<0`yhs z)4@2#`9d1=Hu@CK^|`ij2JIW=S$0ieF?XNGIqVtl*IX_8F?47Fase*yJ``gmd_o`S zhN~}lyn#AUFED<4zc8%h_&o(}^7iW2C_@=S&%n1hW`+4E+lSOC+H!BmKy@O#{|Ic% z_E~s;e~i!ezSf7=@ITv%y+~o1!(p#Z*1$iD`4k)<#`sr`Pg%dF`!QvIhkM=t{lFYC z$LUD=h5hT|GxW#ysHct3Dq4ZdGKk+2kU9U;{`z8U=l}k&6ODbgjLTNJx#UpPnRP9V z=>oe)n_RspW2sjwbk6PZ<2B%NEuV7W{=QS6^m2&n^Q{6)b&{R%EXQffmv?cVVzw&kk6z( zA%1cDip%xC^NGNxpQ3$srgu%KtKL(c0!Dd1735q)ePf+^8Gf3hTm(H@OdW!ZxSys( zZju9h5OT;N{%V39xcNQ)e~Z3y`U{(7e!T#j{*k^0=T^r!{`1=N)Nzy>h&m{b7Wk>f z^!?!P<`Ovm1>?NVz1k`Vx_I{p6Hik=+2>l%e30wRLBIG6fv&Gj1ulKe4Zw2Ofbj_Q zn%AzzwX@Nm)ZxXjk79n*#||Cz2=fBQ>CC%wUOAU@DU^%#1vXO+Ion+1U!1<^oTqEP z)M;mj;QiQA@ZP-)Z(VcKwX3}_-ZqCF(pYt{78^%TW-vCS= zH)kJn{bOyPSJi&zSe>c#<9!w9>kIJA0BqCcADzu&&m?qY1MWS?j}#-ITao>R(ltfq~lZd`A3wm&P%<FBt{!e}U_eVRV6#3E zV}It}I#{6?d0lx^Z#w?;E3I&7?aW=h+$0(fbA z)8SN41n;eK;p?MA;*r@;;@UKcUW{e$(PP zvG^3my3mDUd<=bZF)yxv1q|vJY;pvig`1G<8I9+$5A*~3nkVEi*7JA-`j`3mB(A4W z$3d$jpG#UCCt<(eptYk3sb10e5xD2@s0Wjx?6n*R!Jqw4ecC*@|I1^g_cspji{5Mb zYQ3LCd%%Aq*q7zMhTa$aZG-Zw1ouB@|C{4zAaISp49=jXxRsdGutsc(zt1f@Qus8=#4pxs3n=hUa9#HZAXPq_{D>3kpP z)R*i@eZ%>tk9Y1%s7H=B>Q_K)=y*VXmR(9`B?`Zr; z${s%I2*v>7qwa@&tF2!Tf2DQw>z4ysK7cOgQRj$0Duq4ez~z3=TR_V?SRZu-^%~bA z&WTXwSG0}(ZUA_sP%rUO(?TEhG3wyzgR)zrO>E=Q>N|1mEAWYZ5cqy>&%yr}tVOWQ z)xf7uItXR!Aaft9>%PvixtyZ@NX*RkP^Nc5_TuxsJP}7(pAUaCeh4w(uQWb}UXSD4 z2YTy{oyDA3S9H$`aNg3oYBc^Zqpun0*=FGD#x!}RfqK84H_m^#g+{-U09pl>FytU*l*u=fimxb+PgI{m_+VZSS7a-aN*6#)8CgvI4$7AL_25{piC1{C^GF zSkkV^MV}Bc4ne!zeoo36|L;({L)kR!#nyRc*tcsx?i~wRIQzu^1A?}_f^*1i0Qn7I zU*f3pI0^M(UF%)`-uJ8T&jBslA_r?}=?_SI5^2c`K8tM{D92I2@qI8bUhBAlwho%I za$$^yd*M7o#}de?{G6_n=c;Wyt32#Aodbu5f50BR zpU}0Dd&}*$D*B1bBeWT77qk4cJ?)GY_@90Waou(F$#vd;KZ$!y_|JQM?x6;L;cLns z|C<H3GYB3_;Tf+{X7k0c31bEpFv&Xv5NnLv5IwVqy6FdTcDdNuX6pX zYen4O%{Yo}?JfPFv2xg+2)P2-Z~rwAt24&cnAnYD9BZY1jG46VNv^^3`ewc?pSKFj z7BF_lWz(f)JCl$0tKzsZi#Vv`UW~=?zuex;=Xei&j5@mKyp@BzU&naI@r7%*SW6#5n`)4lF7|t{n`-p8%{9H)!FLz^Q&%$Ke4{g&G%lq~ zDBEiI@4JJxcd2J$gQ;@!Vf4|Vutn(fh#dB1v0o0sIr_9XN4_P>{z~ne@bsSekwV{}Xrz5z8j1!#aJ~Len7{m8~(@BaBp2AQ{l?qjQMAri$=K!<<3Ct z6~`mE_KD2z7GWM>$2c9QTnvY?hwXB)ERMS0=61-;j^ll4tGK@jj;@{Vc}mSa2fCH3 z$lfe5NEB6MXURCh_2sJ|C#O4^D4W&)u}#8p$UaLuz%k$K%e(%? zUQ+Vl{Fc{q;L}uD>GBS2t7})k%(!1scV5poa4pu!0_&{2kLMNH*SMFZo`MJWTNCFU z9_td>Z&@dld&8ag$o4jNn4$kcS?*~aD100yWcV-mU*20D*Q;QgD|7yZ>l1;aeU{Sl z=+BqoBT292-;whFfuGnC7MTh z`tu3KJxkM{kO}+d9_kAIUru}E+*JDxGUEL+Y=4Rl`xr? zlREb2o2WOhQ-7MAZd6Er0T=yw5&IL@^L3;VpDO)1jBUofb=aTaaX9;}A(8zHk2Q(x zMA5N&o}=u0nCEDJxc)?0_Gg|x9{;sJ-1;caufcWpCw-Oc7x3r{dttw9rSqv-mDz_- zNBY)T=tn0n(Ea?d@o~+GYzlRVeIDvM%AVIHvU`f|ZBuf+NA_>H?&cH9oAtE3!He_7 zn0Eud|6|W<64?i8D=g}nKGA@w`?&)j%fGCrRIALIS<2PJ!v-TdLtZohcZ zXTgU#pY>-y=C)$|8ia8(&D=Z2arzYc&)-q65icBoI?>P4-|_rj@yB|;9_-~F%sp6! z_qTwo*OjuTvENMDFWF9W&a!>sFSP7@*uhFt_AA+|tT+S2_v<;!Hh{mD-5>Sh|5#42 zwJ3;VhC)uoxT^SkiOwaoEHUhQ*9K~5;Q1)(*o<-Ak7+*d_;)w1`th%VwuHQy%O5O= zvX4R@s?+uGF>Wpjdwme1?Bj#bKBdcWJ^?KHJL(PXyH?-%6RcyUK)V>e#`nKI zJ5!7qOWLG;&pyDI$NplxkwYC~{=lt9`)$nr0r(f?RvbHBybfNRbDrR2TRwhbTYi8H z(as^IaZQnz8_R*aJA6Pqc25f41?b{uJXhb__3ex?cs`&oSl;W0wlI&Y<2`%@*tF~TJF6a6*fva8Uy zn%~+0W4*2ya2&+@UfTZ%AClV=I<+PG0y1&B!dxouT}Zz=xz|+g5ofHOO0@^l)$HJfI^_ z&<4>DnhUuNG}OUm%Y}#ykq_on?m2Q`tO?#~x8!*RWWf3s?PEq!HyAfUCpyH9!@W)v zI3LROaC=cOFLiO`F7%N@Bjlh`?R>s0_9e5TYy;ad%-_oo#rxmrcSJ|1hc0)D{$yU5 z{e>~mJ)``!_!`~weh+l(DVvK2@9)sZE*}kC`g1q82M;at`gCBi40MR|X!jrGK0-Oo zUuzEfCM^$IE&q&`2mg>h3wrNQ{hs{$;ro|4|Icv=yxDFy*X4iUCz#iUtW+k|lnL(f z8E5vJ`bXM?9WU^l$M|)i`)5M0*=C*7;=PFNv>D}>jQ4<7v>c7G!R-SB%|X&8)`2#B z7RNJOZ$e#I$MsR)MTk{sM{9^3-GH(7T=c28qvuP<@<0!71(wyrB=D9-50m!Uv7#Q% z3H9(!>YC^w{gc(hnHbZo9v*3YRabg=n$%f!q8U07>f!E250_UxGJ3d$>Jj8>^)OXN z5B~!hJ3R)iVy>s<(HEf}a=)0>!0S=zuOG|0N-#W zWr4A-TA!nXT*BDmIpFA8tmfdm#NhfCZ9y>3TnY^OyCO#U+)fYZw9|(w_y)#oXp7r} zCVLi34`ats;H9x+EPv=vDF45*pD|{Z+XqzACdgd-S!2f=SYOC1=8ran{C9V< z?c)Q8?jFNvc|RXo(7Z8ERn0x*L~3+s;_&%SA_cd3S%qL*WQq=)z=#M9o0jtuLncN zt-hXtu|?@JGS~S5GF%#ceH3*6t-gLnTkA$&o4{XX9_j^S*b2x*^;P@C&KERZ)tsoV zZ7$8;_ZrCFjz7aCZe>p0*&K9&@r>?$-y7qp>npTrKwQ?(Nl%C0rw-foQrn(9@&cT5 z<&g(KyyX6KJmop_CA{}j98;(r1bal0e=e&>w;;E`oC5dy;rIe`3dk+wFrVPD4{{EJ zVIzYE`2C(dE?^!4djMS->;cT6EMm0*+hJ^i&b$3Rs3+#7(SU0BQG0C^t`#Ex4u@SH zD808e?$MSIzm3?U_`iLa`M-(z@AeIP`t}XY^&Hz|_S)&*Rn{}Y{;Y^`8|%3s%5IHu zl+To)PID4TJR4kz_h&+uJnG)7%yyY^=Wy)8wLEPK^#6lCqdXI*kM(2hC8hg9Cqwtx zr~IkhIj%<-tCF%_(+U~)L%rSH61*=0Z`zeR2L|)$^het|zcirbSMdAxsb9(4}pN<2amp#;!t%5CfEL+SOxcme7?xTGU zou!VjjM`|4&eQ)<=TT;>4!T{elkQ8P-fU;;^B`hOwy880D8748^GMn^3G_=I^EHl} z%=3`nshqoGjOYJdX=jMBeeN`o{SAGgelgEEJ!steKKha4xzqWY;&V@faZTydurcmo zV_$Pmo6}X;lzr~$E%*rfzp3aG<)zO>^gs-u&p|FNyubIOls3X=72L zXSTook9uX9Lv+`Zy|T0g_dnGu%RRa7f4WzeW3>C9>6NY2|E;~UMg8umXTxDLPHs!1 zXK3?d%SeAYbh0!4YLjh)@VyGiu5FO_+thWfb4 zHHK-9vhQ%Ngd?Au;;|aMrbE9u7tGJX^##Rm5v&J)$aAj0IoX4LG3V;IM!OvX{Z$*Q zh3-YzQ(KW7DZbsUqd@<9`Ss`{GUHZdOoXxp*oKY=&q5DakHN5g`j6ON2AA3k^_^q8 zmir%+qdmKGlwVECbHC*2jMJQ5b~nrp!Zb%tT>2c2!@PGZ!L43Cwx82%wd*b4D#c1=^Tc2rLXVI&LZ@W zlM!UK74*NH8xyW*`CttR7{MMXoN&7|LH1~ScG+$O}%I$Y~7VT4A zQy=Dh3HC~`{-u7U&iIw0FR=46?%8v5Gt9}Jz8|^)UE}#_=nu^Y>3Do5$7ARQ_dsyI zO*;1;2V5J8@xHIv@n(=8`{@DbRH@x{J~w;|*v@uQkJs_v@0IfX&;5CxPv`SNt>gW1 z->LjN$J`xlRLouR-(t>3Uv!5V&Or|V5AB5tqY(ZV)7B5SKxmm%CVhyIGe6&*%&&qGOl}B`}Gjj>vChLy{7X=wJok^P!0u* z4gCKrDUkmx@|CzDB(RqI{7Ix!3eJ1n_ed6>D zb9#T2HI>d{JLT{8yO=qett)w!+CwR}vjvV%z>gpo>LdLQ&$(CEk5j(h?pt!bBaR{M z1;oOnFCc$SS+NYSF?Qj38_H`OgK{Nn#I`P0d4}63hd(gjF zi*Wm856AdIK6Y$ZUbO#mdZO)i`qb?6?6;x6B+uSy4Dl6wzv_(onD9C5u^5vy2fhRT zQ(jB`9Cm71pTicr=%Sq`q0Vfl#_*Hq4;a(Win5zQo?Y3d-$h#xSOje((duCoS60SV=t#Z749WrqIkVn+Lw-NTYCa}NlUmVAU zC+4c-UWR{NP5X)0LH9mr;<+HNf$wh^=S!|ZH(ucwCA?Yse)P*1$C!Um`tn&?(UKIrPFh(fAKYv%Y^08htyVb<1e{+qi~0 zUO~V9%{`^QMDxP-od#*}=lNYO{G1%n zCdJe;8SVw9oM5wg#5vp>x-Rt>|0C*w#OCh)0{=~_FQ_wGW|l9r3d_Lfo`aaH3C9Bb zP9oLY<+Y#280K==HO0N7jf}^^V=VN<#pB@9G|PW;D30H;cl1AyLmt-|)A@Lf?`6dN z0CG4F`d&r9h5MJ@4Gi#qi#Aq?{l7e-9$&IvDEk`D|ERKHTM@g#KRG@X_}`s8wSPtm zk9SaJ&}JRwsq&=`mXq!CjJq2W*{P6IIoV!WMz)W#4BE1hm2Jk$_ULl5_7qb1Gk?l4j+lNMT2@DY zQFmw$EKmJy-6AvYUi43AHu4zem}(=Na125D71@8_+fVwlFORU1J~&P?a(Nr^zOxa? zh4wZLF-v!{Nw4MQJpp{E3oN&${pii z4Q=eMj?rqq+)k#g2SeuS4}OoawL2ckfu6^3!{ZW_7wWMPw$UArRF&tkq2VzY_N?{T zWNGBRisv!V@K8T~x9H1dXj?g7b2?;98*{#WmeXIG^dL#Qt{LJC5`z z)IW~*u8+av4cKB=zUD&Mac4T(-{|OCcL4+Z|03(2+`rBGoI(47j{X_v?_z9?>)qh% zGC|rJuXk&^P=<9KU8b#vNn80ogRrdw%e58nK_~9@-L!T0F51d`pW0)g#kX~cv~?qC z>vq!C^>Ka++RFHbYgdes_*@E)d^Vg%u3hoSyabQj$IB!0E zw)*BYo*|%}?u=NsJNrmWOmv9U&CNH>oQ(C4*IUy4eaw;Reyl^uUd~?5V?S1w+DomB z&C7M8Lr*}Ko!QHdMu+wg9l8!}Bi1!MqCVWuMtivu=POEE?}l#-`z|@a_uXy0S0+BR zV`;f~=1P$n=d1IC+JW+@-f3@;*W=Kcu6)c1UC68*v5?wb zg~;rWA~QaZMwz8WW;^11ChSG~dR53qee!AWRZFY4(}c(Fl~h z@_U~2bHhZoH(A+AtjPVEp*}W>KK`W}*{;!rY@3X1@0tpXnnZRn^MBCCMLa^b%ZY5i z#raVpmp+ioUscZZ7iPW_`iq5V&(g|$9C^atw++VerSccsAfDnp9Od6edG#0EKNQQl z!OPn1cgO#SQMYospF_XJWoG#@&qI!s$JVxgBJVFwFVjEu(mzWOqjshH_d}MQ`HSD1 z{;8Axxs?5a{<)Y()Q9sS`isdp=UB1HFZdn@WH1Z$L{4BxwZ5n1_|jZ+260de=w16~ zRb+>Pf1GcqNn}qFe(pZ-j<@H)og}zN;`&vF&l&-rdRq>B_`G?H13nXk&wjXmuHkb$ z+8)z2B(i(ha;V>~_8ff2+H=UG4(G=iev9rj{7`nZkTjo=ldteSet>53kx=0FnUCkS6E-{(N z%++g4CKlJPbYB?q^2O&VW&Mh|wOJTHoKI*)US8HVw|DUga)MZ=+Li6VTC}ruv~xJd zB8|b4e2*5cZ3|3z>Y3 zHswLv>fW!w*eEf?=j(c4ufc$p>(~#VosV)H>sZ$aV}v4ixgtIxfp*-*T0I-$~pOQ-B`ZMULr?bH=dQqegZ6x zd9FRlzOLW2-W#$lxOWU?iMZh-=ztwNI1c0>FIP6*H-1K)IoM|S3tms;`nomZCh+0h zdj$Ip{}m&9hd&>QwsS3+`|TaS*1j>#H0EZQXXV(2ykI@wHRy7+SdT)S4}BK97ux5Zzit3sv#;+ysGlp(90IOsJ9-E0 z__MTQIcZ049w#NTJ$VGbN*?QA|2)=UKOgF~HgrPU)c`;3+JQ0|wgX4v(T>=TS)kA3 z+{q@nd|Z#egVxG(wAAA>$_(_M@CaW2fv)FnyAXgO+~Houj@paK43}{80z)b5#7B?EPkZpU0k00)NoQ-?f^R$Y$+*&|hlLL7xY`wuf!~ zJMPn_e}mueVAEqd+VltL6K~V{Y-e$=tgf-p7P-dKA98edT?oFVihGGZsTrihvv=-T zk%FJZ^$16AdlfCu!A3d1)tKvE@N@nNb?t0jitE^18{+!%Uoj3i{JRAo*WLOJ?j=@E zIlAIE*E_C9Szdc9uVYGiQD(aSWcN1OvO8#5{f@TOV`<86t!4F_(92MELoKV{+bvsG z*1Upu@9DlN=Gzjweuej1&_0vQ5wNe&hi%PM#?_B*IWcio^$pd(q~}$i)w3!(Yfx2n z6zy8Ty)^D6&r049;a&yqC2=o>d*>qOADvaPRuccY7E^%Fo?3J{1H0f z)@k6&xwex+UUn?5MLjcfH$WSyAJjMM#t7&)`#h0$`{t)axzc^}GvMDM?9pE>D7P!@ zz?GXW z$GI!#p9`2019@YzH=a0QypG z`6uQJpuZJK_zB#La8y5W1Tc7y^KTy29!s$J>_W1b&nn^udmqn1tR?=-`3n3e)_$_i z8L!Rfgzg-y;ocg7w|(xr?7C(#PBiP91JSPk*Vi?#g^t^}O6Tl0XOxy4o~~;yj~LR< z^WC^E>zeE1UU&TdH|v_8W87d&!{_aE?6B*a(=mRqk8M8uJ(j`zVrP_LuBw%>Van&K zE@3QHS(@L|T-7SDM;Fu0V(is%UGrJAwX^y9Q5Z`#SGC`5z^DO+#wvoto7m@8wBHKemwmab5u4|q^KLY(Y05Y=cnnUO}OQKLjCNz=67tLtZPm{zjVj5+`8sku8$JgCFrAaHuD+y z#{CLgaO*<2mdAhC3Z8o?u4`TZ9=w(a)-^wZENGXs2bQP)a$WOu)W0(u$s<3YHu59n z&+>0Ge^!&ozA1UM*CppT7e~9U*@Rr0vk_U>JQVYV?qpMLU2{5gw7YWQy5_F%3*D8; zlv&rjZYi+Jt!vu##M3cv?+!azW?ge%l^1*p_Y-x;qujdYLfDl0rQN}=J09sS)-_Ls zeY3y!??flJw>mk?>*SFv1Hbte*7d~K%t^?)=9;wk%2GeB>zcPf&${w8dqa0S)6t(X zzf>Llq}l7{FwxE5@re4=(Z1lvH^zCnb`u$v+7#&(ebf^JsqYl;ci29sH-A37iajxr{r*%9#${x-r^xI{_{iADNUWGBBQuQ^&+kTNzwbh3^KUQun3tL$o0>#+ zRgu{Mk=YQD*~&PdiTSbibphk1`sAso`_k&Iu4}GN+5Q*nn!QA}pF>XN^wI5~4fL@c zGGPB7Ze=U6Vhh_B_-|d;9M+9&7oZKD>Ei`Pwx@_}pF*raEUwS9KJ(ZX=;I8W>$>J# z$aQJgH9uR1*iqLtD}s21{;pepfpyK(Aq)D8CX`oyQEpxHX3Dw&`P@$W=P1~O>X5EG zXQh9B!#Eb}zN~Ay^^Kr^bY1g#$ha$iF|mvONt*syLHcJu_6xA~;Sv4A`4IbOFPt;K zsq31TqHV}!3`yvFhmJ4JX=Z|T&Bb}Mj#ZP$juw7Se}Ol`o&$F`!QB$q?YicIfX`4{ z4txgNbMP4{e5!H%T-4v;!GDgxJcjLY=M9N0*XQGN)Ni0Y2j5lfIpnbd&h5J96G6Rt zT6&aE*mKltpwx@`dAqK8SHNcx?#J~4pKt9s_*4oX?r*p2n%4z<+H5)S`M{oo&qC@S z+Qs)5*mcdx0iRcGIq;cn&%tND@Oc*3?YibE0iP#qIq+eAK5i%Yyexbk#C5x_*%0u# z%a#M5JM205JSKcJKkxb*Hv9qXjJdegaQqcxrCrz5=YzuUrj+DvnWy49(yhpU!GHX} zl)HTx^FFpybGI)-1`*$Rg4`{SVeWPpjB62cw|55R_&kIwhup0z7vyeTyohqw2j#wp zOkFwTZe6(`ce|oNL`rW+vpCgcqmXVPB{#dvPxO!^+^w)>72*9pmE zU&eEIhVLuvG2r<{^np14oagY7T&G_Q8_nTe4;a5YeRsnnhzY!3e75vGnb41t`UBV1 zAG&b7<|!uO`<*#F7pMGzS&e5M?fWls;J;*|f2K)gh<5sCMdi6s_DfNo<<~^7radud z3yh)24~6ej`w{i9>zayHz#fhwZ|SQ}*6Z4#0|s~ z0H5&@`YJydYoQn;9z~sU`0s5#kfl7I<=%Y@9tp^Z?Qm-(kmug$%lI0eleGFCw*z}` zitXrV%dA_i<-=#~zo?4n5k8&OFW%R~wPWr{k#`xC=#K2|VXXNQ z`Z0&+#~*}VQf`kRMzimSq^_sA{~7m2(kWAy@De_Y#4}VDR9aI$!6XTVD(9<#X#-fSz_knr{#M4$VWcVDs|Ij-Y?|GBvD^b7yb!GAZ$Y|uR+cs~Z`_Nddb zaUBp(GcO9BHw0}TWa^TGU!fkJ4;m*sgB^(snrh%5r0wrBwgk@Jz-g+)b^JK5xkB)F zGcuWqYdgpF!S@t{dMuH=E!&yo^FM)1{s~`RPA0Z*T0vv`X1dgGXpEc4)-lfuIs7T; zlMf8OlLPQ~=t>T3Z=G-N^@6hs1Rvx5l)l$MY`F`6dk6SB{eZu{_2#bq?GE6t{K<5cKjj*oni1&hvYft2AU-y-w|{ZY0L(B^s2 z*&jLHpxr+}*B`=hI&_(P#Ukvfd=-9?z5{-9VGi>BsFL5NT-C)6n^6Zre=7QbBla%)xX3&>?p~H*8hkWVNIR9S+UM&Atl;^b*Ymf!j}AGEwDf-q&Yz zOYt~fVb#?*yfzPVIx7RO}>nr0r$BFLr>NwUP`ciCv`8eeS z#0{*It21==7&%8B-FIbej1tDk`i>g*DfN!}`4#k8#xe6f=uV!sm-jV21$}n-kniL8 z&sfQhmCYD)eLeKq^(2+)aYOuy3_`}p}vfV{OKF5UOk36 zW&!uPUQL~Jx`aCEyOPzW&_5VkVc(D+_qZ@VBE7~vO2>QUuFjySRKV`_SoI5KDG3PV~gs>2w_9#1-hD*iXWqsv%Pw zmp=;r#c`rzJei07F{T{Pc{kRJlvew|^%eS|&GlDlKUuwY{uSeh`cI8H?YpTtu2OfN zg|741m5r#+{sKOkx}kivURGahJ*@Bl3~gt<;BTD&U)J~ToP%-7$r(H}hNsSwC&n#* zK3<;EsB_a@TfHtiPF`?hP!p zgV%upJMjF=*ul@}%m0mTe}&jxbx?J?WIly{Kj%h?X(2<7ZJ)s>c-_@^eH5RK2Z7`-2g1;73U-L&F(+ybO*}0ccWlz(Qlm`s^vkem=|h!(1rB- zg5JiQr$VO7`mK|J@NFF5@~E@3WBv!ev*DrMc6ra)FYc-RM)D2Q{BY2~&NZgOdtTkQ zPR7u`fDinO`+jFv-#Xb0KAYnu>rpLjVjXh8t>xH(>-w#ee?iwC!5GA`{X^WNjMMP* zW1+)7=G8exF@B}rqwiro^6Iv|)XZ6!(j;&s{vW zp!hD>X0(ax3=1J&XNM^FBJ~c}m_wR`I@5nJ-m69I0G*ywk=raw-RAdAosJiZ?}EJ& z`ogkX4?&{EDd4V$`v8%3gxZ2OVh<5k(R6X!%YXFTuP z0RJ%?Htpn!vYxK0?hFJp`X{;#8rCGs8C zg?v#igL1KaQT`bz|1fmrrdYl@hv+C@j_+5N#0y0l>M{4fs=w7S53-PWk#U6A^)Z}R zNbUo4W#2VMU)&FENBg?{p4#nhUM=s#tMlL7a=2Z@t2X{%-Oq-ug>`r1IP_KPsBPkP zy{6-eeb1b}BmO+t<E@9VeMN_2Nj1pny2tF=D+5H`=b z+XleZ9KdtXY3gtMu1MJ7RQm5Nq2usnzv8~j>(r#&cyK2D$x?oMZ5}Y~x7U6{j7{Im z{LJUfdqXE<`yW$YKi>rp`f~c^Z&9A(8QXCQ%2RGDLa!FXhP)ru=W~j>SI&>tf{(TT z@^W4}pD?$KPgn&qD(4ec6MJPoP<_IgWqiVIlnw0~eYXq7mJU84SJkOc*oI>m%C3z* zRG*N&9@k*g7i0Wb7N0N)<*ZMbDCO@*`>v(T-8`prpJ2zXJyEa!E1BmhN3_TK;j=`K z^OdE3_-eKt@^}IGD)WOO-_Q?V%YGD@{}XNNAoJFZI+gi5;_DgjtIW@X|70D%fUYfz z%uhu*EAw}y{CLQGpkjo0nhRtz~c*hR6PrR~wSJO#Oz>Q5;h>rsJz zXI~b1m9~NLk81;HZ=gPM?vDDEox620CfmL!_Cxtx-W&9pke9Q?D&)iEJneFMJ6vhU z*d-GmioaJG=Fm8%M)q0v{fgr$`yy41zQDM|`MmQd{Eu%zU|!+A|KP4+9>9F&5qy5I z4*SzE{&Jph0rZaNJJK#NPLt11^i{}*Iv-Y-$exCJ=j!}D^39FKy*eDha}467Bl*lV z%DHbV;aY@u^i@;pHLfM^Rr3 za=P!v)5dMkq0XJ?WsaYTWqtyknZ8RJx1pYWtWKV>c27*&J7Bit6%X$owwHR;o_u?4 zzD{-OxsqZkwrZx1pGsENE=OG}!)^4T(23-{M!u??iQ|qdIF=p95Q~s)IoWYUd`-E` zBgE0l4*b6Z&(goBjELpvHr8(8g*qhr6|`IXAUIt~rJ?ZEt z#PL0qzw)enbhPHga{BG$f9XS5v~t0l^O+}!v0+>8p)IbTk34_#*Cdy0cL&!b^K|5s z-0DATy^&;k3B5|5qmV}&YCO_;VBCJ{e!y`Ea|XNM_vzUBgGK8(E=tDxhzH6G-8k=* zsN<{hIXI5^7w<{2RyL1D&--oRxZYxnAG_=qF;5X?`HlG?l`UR(b|V%_Khl$nVKd0N z=F#s}o$Cd4qRTH9=_NTn*v?1mZzUPPzrL!zgSC^qU zBF@mS@EO}JUT@#+WnHNWq9wl9~?2j<55QrqBXPJifQ z5qW^aCVKiO$JXCE8y^nwb%3zScunVExMEDHV-w{%!vfucwY+ZUQbye0DbBquet(a8 z)ozQ;#WA$_U3mG&W8`txhWQ*@8y@C3Y{M*J-p$>by&D@J-+^*$d^{Y>3iT^Fc2aJx z@kn%ec96$U!{6e*Yr3B29@!nAjIooOgTZ$?nJ2ULGG~HOhjZ}bI0xK+6Ru9q<<{L4 z>(7q?gCooH%-2;;C%o@4K^-S4_TXc}>v^&}<4A7afISe^PVqs-&*AHu8!O=aA1{yf zEi8*u{)yfydgg_AeQR0sj@&l|eron(k{^ptsq0^?E9)GLkASc5Dem}Z+Efp6+@o{g zwsT_KZ`zQ$EI03_-098C;ge^p&;Fp|AnPR7Q5}>w4BmG#qVLD4JM`f$WUN)rTwhLR zEe7SOj$f+Z6tBg&$bExkMD<#XCGc)+UmG_~bIy$JJy(j)7J&cU4|HulbxtnsoDk@*9Xo{UqUx>1IF~Yo z1@%|py{fesS|8ODzjR8!!CH)B+>znjba}7}pC9*v!Wb4n#OqJ zb2wFg-YGv9|DkJ#d&lFY*SqPb>-#+7G<`$iCHUC+I@b?IPhtH0exRo+Uq7NWQgN@0 z{mrjN%J#mm5!rey%2nT@a>VG7KL-1fDd7N)-Fhs^!aZjRcjOf%<3KoeP%eL~NWYE) zVvaREW^v=({=pA?8b-sj!ClDsLe1UB6qvSyon(r&&AK z>-}`U_O@~MdwiVF|C?s-q!^}rPnZ^V#mgK=*w^8TD*z|^1Q74`bNnC=scA@ z?fVpA#_5nc!oJSWluzP+^|12<@}}5npbpn(A?KQbj^;%j-*2FfJA*pR7oD#-J|izI zu&3RZ_1RQD_5W(=LB8bH!T2$y)?6R30`7X!%Y_9pOBUHN<=)t4`-s!PuEU-B?clph z^UHkarJ{W`T32zuQR}L1Pp7l{@%;?ql-bPX&dP)32b9m>NR9J$Emt0&+CPhryW!-} zPx51=%0*PJWKp>%`M4Mb@3Ob&ax?Ca-}&JFPUIfczN-5^Q5XBWXid1vIm@*b8CPTP zzcOnlmo1Cs*8hBKrTdfMfA$$~5C3O>^3l}!rrsA(TlLIqtWRhEO#N<7JQ_HMXY@b6 zD#I>*J-0h{fJbvBtCzmFHll=x> z$uDaq1|C~-dzCR}GGO4Ml3Sxc@IC)Kzci_fd$-_PM4P|FZyUc`^4rSq=KQwvtFfCh zznk$};aBe?%Fa=1zmCDIr8?Vj1zbxTABLO1XxDi!Yz)_@2iNc8`kHo~Zy`_2J%1P1 zwO^ph`YW+dYr=Ir4sOu=WHsXapW<@5KC?REZ1pi_a9w$hu65DxkgVXy z#p=%C&PS6qIBj5h3VBN$L#xL%WSeH}m~C&WS6z3U#cwj!sAIT*vfUb9Cky9Rl$`v| zBhL@ZE9T2;yDJA*nEHffA+p$tjYJUOm8m73GFC; zl&y%PPZ6hy3GO><`ev5KMu@rN6>rx^7Ukk@+(7pe8#@^bc!%76(A$2LH_4 zV9?d6XP7JLzT&!dC96GC$g%KH#QUu;Vgr|xflYnOUG?gH6*p!fJ}LL>&=`jHxgDr6 z5qI3;_F;;)=6ltb#OKOt3uTF)p}hXme)V$TLNro z{(V57=o@zbKK_1TB(`~+mt9QG=XBpwj5Ce^51Q|Fa{|;^0(X^#rH=nmPRehLm6qAN z(LViuv|_fG`8)V?zOE$hw{8)~eclJI$d29pT-?8BTjDu3cV&R#ANaZCD3G^i8Evb% zGG3&NMOFs#zaiU8Cd%t?$DAKkAYW^M-(}nzdCa#5`dH%6-$os3pE$J$!)5Jx?RBB` zs47di)jnY#C_nbsUQVvX`#otrVJEtD^WR=Z)e-sc!UorBWh)!g zJ5hbXi5laJ%QkymikN;=T?3Zb|U_K(YsUcVBR;oR9T^mh%&PMg8IaUSxBf8G0C=qJSZ73$-O|F}oEKHGEO^TaIaBH=z{5%=A!u^4`(yVgKQ z=X-sN%ZlvQ6<|PoA-idQR&xCae|CI?Y|a-3v){4m;n#HgWBf(OvTsS>OXS*2?5I7R zBzq^vaKq4=9D}UNSB&>Ai|?QPAm!c!pJij{r;8MeV(7!Cw?P+9zi?X z)P{msa;&&lc?0DY#gaCSOR6sXPW@@~t8VF9aiND;vc7-jo8aE*R?qY@mh7$kLw3Oi z7E88+w|p#_6IE7AVu|)g$j6c?)U9)GD8HbL*ZBp*^L;FtALU|+;-~ki`EFdIhgk9y z<%O~2vEYBjk}3GCV#(K#?MuQzFov7yx5u$$elli4Ut6(6vZOu|b^KY}>iNy$xO}Ce z_k6Kc=zCfp6vmg^{j>7J36yL7{tw}s#23L#$lD=L{hH^qF@^nzz@+qQesnYX)Boy! z8+IPWhAKZS*T;i7O|8QeOXg^QVEtN5*?GtE`2W$EneYde7r=K7m*Hc|1j;zj$^gR$ z8J;Qcbm1Y_*7kryt}l?eJ>a8ndtGjQQ7o5i&|a(G>3vA`%XZ?WpM!@C&dAU6<6kq$ z!D76`V<{m=N87Y5HSP|_(JL*{($0SH*OaKE$=NvOG>uQHzH|;H&C@%NV9ff6;C}Mo zYq4K^zwJlvb8gC(|0FmCx5DY&#D+|rl+lj-nKq(_d>fD99+N54zRj!0BQzGFzFK8W zR#wdqW@Qc9#{JTR_3!p37KHa^>ikmUb)E`-!g9=)C1XZE-jCI%SVT;yOy#{*j)w@B z^xqth&0v3BctZFlzQnv!fQMG6D;rmf-^r_2pWerD^-=yHY&1al_kG4EsP9GUa`SoA zwKKS!5H&q;1o5>E950utE7hlDS0D5&aG&iTrDIr`_WQA4_Z)R7XSOl6eUwkd<9KnK z8AHtDVmGhz z*SQ`2I9FSIUSmv-*Q3=tH2&m{+g9(;Sd%+0uimk#9e1tXv5_4|{lTo=-@n6;HO-02 z{X5pP^OgP`Yuj;G|Bf~6xVwJ`!N%9y($UY3TU$C-w&S*z4t;k#mZ7D?jWsF$bhUIe z+xgZ3Nq%hpZQrEUnuQF`&CeBsi8bN|;^0V&jf!K+Co8NWP(EVu@;c?A4dhbyEB=9{ zl`j1Bxzxw_bUv3_2qz@DRM0OUDXu`y*T`WctNP_%qHdkL%Jn0ojw|_<4r=96WzUhd zI4uHmVZZzw$_sO;a`3-$sV@?{_;c>j4LC1vuES00$d1h=Xxn*7JY%>2dI*0vsF};vmSW zZ&RL>!@=*VTj$pHIM99o^>FZsw?m^iXbS8u|M>*n7%qf^-xE891Fg3{v;iDEOLE)Om*G=-KG97YkKx#dDu z*6r9OD@zvdmIIxE{_5SQ{L0F_4c&+LTKrM$w|G@_E|gP#^aMK1<_lTbG_SP+y3F2_ z+9_Nk_9p%l-xu4bsQ#jJp-fJ9)OlT%T*Y-h(m?UdU?WjOiRFxFNOzh)4dW|B*3oClb@f4ZN%}}y;8h=V*i?x?}s?fR-QQ}>Nrm^jM$2A&2fGC z0^iyLS&6YRgR`IdwjAr4tzn#xOqzeO^-Jya3EVS&{H;tsK+pV)@d1|; zaXb$k=GQN6DLwmV+I$~+0XhiRFI@}1)xUAMCgp}}7?0vwxQ6in{zMP`tFqy#(^8#V`_XAlzEvA7zWxY*(oPWWowg;nh(heq^ZZ&2>nm2tw45nqLGVA&0oo#F^FufMyesR^>hwObS=MwG-{UYCn5n#l% zfqS>7J(5fErE(R-+(1WnfYo|B+FEg`hK}}uV~TY2PJ=pXt;2t6`)1g$Ufbs@wn{ec zRen{g?RoD12|ngNGGUzoV>pe@Ep{oRdFZNjt*5;|}w_ z!2N}D2W^yL`>dYsvDrB$ekbFP!V3A!#qhT6rFkj*{4@TyeO6DSK9}Et0r7WRP+z%V zPtnJjE0&!3wpGL-wBsSoC5uBizMo@ph~}wZbo;Bc%vSuBOj=VH%TVf=s5&Y0;9T1` z2xO@!XT(Nb#51S69`;$)`&z1JU+QsrBx4bhV|!o!o`FvGMknDut0Qu9XuYDv(AEN- z4D`0k>m;?u90EVHA_sF82z{f8o*eS!0h=xj5}wj4KUS59YrybBo-Uj4TJ9sL)1x#W%)F zU^iR?gYG7^!8iXVPl?B2O0%O6&U`G2O4GF8TRW51D*>h5o0;WvsvA;xF|c zN{;0CkUS=i)A2Zp=6_{_nAfZODZ*~qjg3|3{JORZxGK`4uKfZ(QmiYwE8 z(fb*VmZR}UE9Y>I!*aA=LN#8i<5_-;XG-8d$=G+!SI8?aO2&~bub^G8VN=7)zmez2 z9`@NDD-Pnhe_1;?ewE{7%!d5z3i=MRD|Srw+5QLjI3Hy!T->k_Ur;%J@t9Knp!2rR z_C1_eZsPNbi;C8%$)7HTn`7NquiZC4ls{EeAAYOcVlwvgdC#L&{*}|4Vd+J<%k1_7 z$9XLMY^uqT^`(#_Ej|s0Cu0tW?OJ<102k(v{>ywV!qG>+$^cv@=^z8Vf2D zqmJvrjqH@_Abb65#DVPf*gwKH-MuWk_walddrSvuc`nBrLw}ok-^UlNKN6|m#koJh zM?Z&eBj#=O|1ZGVd$rytEZ~58GA+Y4>H3-;#vQ?=^6gQ)>sXd-^w~~AF71W0hGgSc z+~(#YxaN)-V|*tVV|)-y+8ARO#|ILhgdOplxN{kP9%I(V7*jn7qaXb^p3^!Nousx& z#}A73i>lD4a`EO1r?paLgWkQuf7S+ zG+uh8+QPHa!#c>bk#EcHzAX=_EtslBbb>W_X8q<@z(UvBbEh#$H}SCO`HP z`#4x11SZ2hU9N_wZR{mK1}c2sW*nk&mD}>0>Mg6=JJCmaEgSDFE{VUK&S=xgwAo_b z!QS^`+nN8R<$bhM zK1x4-J8-y-`mr3VF69>+h~NBtTs!=)b?)N*7Jjd5ipQgLzwpD@$D}g%3?TQu0e;_D zu?IitMvqoTH`l|uRc6?S#^+oal(RT$AJDM@cu{Vqa+H54Mk?MrAHbjP0$Y_~;FZ{S zF?G5<+PGKaCyqZj-_3bl{|Uc|y@yx($fJrsLI2upp3S3EyE;B57RtW+Ch^E}(&~6* z@ke?chb%SY3gztqM}(LUeb#(Lm=Z@hj^!D*&l|rw@BSD+ZQ@w*TKjahMWdd~$L!)Z z-j=+(l+68)DUA&0{xh63&U4amCl8#YF(&q(h2Pxq!ub7^XXSGoUjQE59%|h4UF_?y zHlx(>74H87zq0i)_`lW%+_Hl>nQyQxSAJCMt;8wd%;h34vR=Txfw2N~HS*v}kg>j<(;5YoH#w+4!eXE6f-l8qi zrQoIK z*AEaT?`)oAdos(nD)M#8)%y|y7d1^dUuLw#wK0i>+W8-3@i+t`^PJZwv2J&%*D)$1&FTv-c3~f5$J#bW@;rX+uZ&2`_wObQQIg#75C|a^mdE8oU zbJKh=eHT)E^8Uo;Sxb)TGwY+1mYb!0F)R4q--sXjb$aF<_oSVb6|?QE_;zkk9{(@- zG|yb4vAaIe>H{Bdp7fVJi7&*CK^Lvx(Rxt(?cjN}*1xpUzJX8bUMG9^8p%82KWO4z zc5r<%zq)=tW$9h{cCA(B_qXVK;GL{**7|3DFF*%+2frPDSiZ!%<`YVj${Y_ok9X*~ z2Yd_|_!pkn8fSjp{k)6c&b1Hmse!y>kF5I*e4P9F{Ws@E_aWv|_le-*ef~ddrTG8j zg%kc?z&;4Nel-8T#s4MVdDs6(@&D_(-~T^>|6k?*(f zS>XOZ^J@41vHY*+wvv9)S-#wM;hO71bRFGPUH81?sZs? z3z^@?R=fv4x7*9u{Z#hdfD~Qht3A~YaZ29Ty4Hy~W%Kg*e)2Uv|1mz{pO^gi5f^q>8{M^mlO9r897m@u zzi-rM-7h@^Vf2ILoZ)+TK|2cmDf+oyWE~Rje2s=U6@>}uIoIzmV4%G&TH;0v1@tVME{4O z|3fl1UoW|~e|WyKesX^A@O<|M$@#B^=UWFR=f{NS+gp?KJB8;fgOc+j!t>o5Cg-;b z&$n)roF5jRZ{IjMzj1iJvPp7&V0gZJ)8za*;rZ4f$@!M>eB03E{3_x3^04H5-|&3b z=E-?+aAJe*Et2yeg|>-?C+Ee%$^C6#O3sUGlk?@RlJoxz&v$(}IWMkF?vJ)f&OaZX zZ`(FG|5SLsyj^nsk??%ih~)f(;rVD}a{j*XeA`!&^LK~m+jmIL&kfI4c1+IS9G>sq zDLMbU@O(5XIsdEl{Nh23=ae>69!TFZJl{tC?ayEB&nw0oFZ|)0WmjCP^ZLe88~X-q z9W7~7?lK~x-?A(D0qc9n?UkEFexHCO7WIw!Se(|y9^I<@Y%8msve>7&tg=R_tZ}Za zjT* zIqdDDw(a4|S*vx+j;CEYYf;YHuAH~Xk$cjS-t|&BBYin*l(+17(v`CY<*ez-dA^Zy z$Xis-roNn(?kzhWcjdHD&Hz`=gN>8}&#Rm@R1P{NMlK%I-jv`)?_;TNG<9jbC)$tw z?-c)gmj6A||DNuDC-dvZ)A)Ag`xD2IYmOz?+=cIG#(NuN#`p7m7aP|n&mF4q26992 zC;+R_!2_a-^7$|>{A zng!3i$umyJ$ddRB$FFgIJKe9C9j%tjk5_gvd&3jU<2NL~Q~4`NUSv~jcJV1!-s62`hBEskLN0{vNF3yk2F2ZtyiLyy%5wQ#SE?V!`PBV=uOBBbaw~UA$|NUN|61*cXovG} z$s4reIO=fwh2_qFhx5WyDDNSv3p}<-U;H|J{uaDa=UUWb^`3|BitIh!m#KD+3))`m z{$07h#p~X+zuYIm$F=@^*p{zwzv(`{f4SuT4uAiUpx(`BM_%VcD8u$hR9`?k&^@o0 z*aQXgMqh|)3l z&EY;buH*mzI%A+mY95a9D)QymrC)q+j`zJe+TRWg&X3yT@253&l*f-D7yOX;>tw0Gi%SC8!;dbW+Z*c@m5@!1*jC9q|4CN@q=Tk4M2 zP4YgJj+14_^CWnz=tYi^+WOuo#`h)es?4(Bs% zeH|`TkFKAux)|fvvG6qr+uHd4i|Ec_X(Hdgz}}yn>C41cs@LtSG`Oi_b1>D3461tw z$Jc|0nOn1`-QLUr?@PI~qe44gY3?)oCG;g+s=nW2JNJFG8E(vY{A}b@yDOA=Nv=Jw zOD>+7g`ZTWt~hJv2z0}k^K|N%K^@N5(aBEoPnE&F2Z(2AQ@4DJH9PYCLE4*>YkGfJ zziIpQ?Z`;`;mvJb0Y2lJbndPrk8~ei7pBbCod~xvwSQF@$Ap{T zt_a)szs-l7AkGeaEi5~nV;KsUW$VcL)~$@WXRrHlRpwAlHzo9vj87xC^dy0IhTVL#hkK??=YKt?crsQt2Y(w1Pb$ak#<{%R zTzOpA?}o_Igv|$GbG~~zSLZaQtTvhanmfzeb|Y+SW39$h-F>+a-*htRIL3o(OFGvO z%!e~B>(-x)i{?)6R~k8kHDbc*66|VocWv11NO3pkTNu~Bh2Kg`sbe#I*3EDd?+)co!efd%M3 z><3|6_q|gvH>at*hT^o_LrA}o-~Z?AvF!gNTS>Q+@g6uc9vI}?9|e2VwGt2SWrKT$e1Bd192rMy{q%L>i@W1Fjs=RR)aCYIQk|_)$APR9 z!B#_cuXHvYep9Txma%-(e>?hbj_$uT|jSW8&(t z-qpc_>iv~;;_Gcyj%R)stl@WhEs%Y_m;LgfjDEh1=TA@DFq*v9>gemQ-$&fHHjcKu zEsKNsQCDBrXL@nB^7tpX-^%z8b4il(-k^>oHe$0k{dM;p8+5j%*O}}#7%XIT!~HM% z`{kFL%8zoo`V-GfSBC}de8N9-Nki>eA@2L_ULHRqX$RkvY}_a2M{O&*GA2;Q#X%Xv z1D`kyj!f6OmpX0{-;HVvhlhJxx^Z#gVf&bWYuhH4=j=VoPr3f3tkqLrT(Yvrn%;OY`>#X+5L_p@T0jd*EisqU;1aB zqpo?BB~Dv9x%PKV-M8Kp56HcgH^XE5SxgZorW1E#A8Vd{$7#r~dpTbMJ1$P}KDg^Y zaef(mVfX9W-k&1&y=iNdiA&1AhQc-OThsHRj`hH2l24C$#8<)o)A-=_idWJuTPwsX26i!$27?iH?h4EX1C#~Ic z&$PwoJ^^pb*}3+p;}!mw{&Y_&W13S{*`I!m!!Gu(lD&KTLfP*b$bL$a-9M9;y^FZL zy=3Bg3$iEof!_)8Z^lH__l_~!GW$qum5YZLHRbCr$9g-vwLE@xtDCZC9C-O)wM2TLgEz8y-wYF55B}b z@bsM0IpfraU|+Fuu1{q8e1zDS_$>Kw$dlZ^-^$41){9c+la#64&HYc=|3aVgnRA>U z4Vb+sUS*Tts*fz2%I}B=5%VJYJx~5eoG3HDaR7Q5 zxIX8&R#7{!_r=QL0v@n-Si6+lTbuqSZd5-Wz4i&))V8X#^|)M{Dzxb+-=-6Mo6hZ_ zO$R8?p&grf9IF0EIVZOL9)6{3F2?i!6&gDT{?}U8SG+ILX6KX7DxcvV${DHO-H}mq z;W}=@f29YNr@r?`#EACgqa|PV*9X{j`5^V(hD|#epVWR=i)erQ7osJbaI94eIxvzwv_y{SNms=@oPb zI78Rm_$R-ArEKXTjUCnHXzk{rsyw6kzHh?!VR@PSfV^OV%Mqen%&*?NJHsu0KFr>` z_&4ov91oTrP`*eXrgIE^OmJ`px-qWrR7~cchrN&8!M(2igX8zWRo*XsjwU~9hYOUa z%We_B?i{|Q{e3zqyFwiYC@1E5Jv(i3%;%l_&$El8@>(4l%Uj5JmB#ZlwkCZ16k9oY zDdQ^D*BW&sd_6qN*KNq~2zA6cij5Pex^}q$ephOX>gI(z*06(evNYGKIku7Ji@C84 zz5g5Xp>#Cba-XiX<+B%Qe`NiJV;imbzw6VMqmH|qph)wNhTu*v4DvI~>~> zL!5H z_bu7^)&IiFDvDM7Kb(79zp>8P#$Hu}#@*aPw<8!e^Ek3)$ z>&M1wr;vL&PC#Gszj}PO2;Sn}Dn9!%<9CK*Wd78AcBz*w<@}Lz2LKNd)Y%io9W9q@rS8?LO#33Uq1x9n*QX& zc^vTnMIoOZZ8(5`rui}s4(bT`Y>K~bd^Qh#n2v5o=8SGA?-YN(`0P&bpV!r)URNQX z9qpgltf6+Ke71ekj#9^sJ|_w>TvHbTpKThHaj*En^V!?zBjmHM;d71h**joKcqrnt zV%wyA*6igmKKqW>x3IWc4x@UXP3OlOMhp1tIxma(Y&Y%;_$>a`bPs&CgMVfcbsdDg zLq1zNxn?eQQ8MnB%f-gkvs2f4HM#39#YWFvhXi(->Tw&|y%u*};B{f#btnCUlKdIn zJWSi_ao1h(97Pp(HIvty@3nsh?)tWuE#$7}XtR}_=dK0d#PKfn|B0~5ulc3&UH$(M z_hkIDfV)n?zIpCCSMjA6+_jyTJ>;(M`!dFylC~k_uH*dmVL=-ce+_Vezht;;2g3ne zbC@q+uZ9aT z*W+Fu<1X>DVRV6H3NTvo7#(aFE#R&bye#t1b-1s$+_kcQW@qXeg1tlT>SZoglh0b} z+HO!yK0DuRSB^S132e8o$8v~;T6}hp*H6M{{O|OQzJ^HOb>_{_kH=N2`0Q_twHb~- z3!iP}WefT2*MX1Z`RsT2gJI=>99ER?3OiSFPsV2p_-qU|&-2-M#hhO7S(BGN&9p8UT5-){gFAN z8_FBt?-!pP4F2wP8R{6-5Zo~r1-(} z+0W2<$Y-xmexrPLAy}%;p!07KJQ^02rqc{Dp@g8S~0zTW(%OXB| znfrRnXHWZQR>dFwO<5tIeVSbCB|pa$a!rcajcRh&A!aAI>wWpAx6?-6H^TC2ahLc> zb}{a{AHOy3T2uC|!(E3krp&!n+*M($#C-3wa95L;E#$6KX|t7`=dLrbi*eVcIb0~; z6)wJOxTrHOHo*I1xaMcD;!7{M>oMw=UPJEc@5|^uIc-D8U0?9m{{lx?8$Ax@H+4B2 z%umJ>f^o5@#VO9`qmCu=dF1?aP)EpJZ~N=UU0Zpb$pQtd=L> zu1Q`V<1X>DVe|~i6mZusu)qHA;+|UEwThQT+;uDW8Fx+ReSNpz|ET(FerEY+UZAc& zQC7%Zy~)KYe%}f8+d6Z7%Q=2rJmjAQ$I)iuGs7|Kwc_vMp3NG@P0V)i**y8H=d;Bg z%VBx7`0U^4G~~0ljT6wlNcl|Tg~n%EZ!aB$7_O;{fX|K$%J@V!_dYR9cnHS%r)ZwC!Eyf0y)A3< zS=L9fP1?8I$;)GW*6H;vEbgAe=+VRho!59zEk1jOXM|1h+1cFJTR!`~f95yTbusM< z`7C))Qu}3Uon6RTHZRQltkzuojhv-2b<8Y%Cs=EoI+@Rp_|?8k?=mN=`SvQ=n19_F z9}2%GIVq9B=3j%ot!l`idFE0Z>mZQhcb}6Sd*|dZZpSy6SEfGX*8E3_wRW0w=eqme ziQN~@jTD_*I@z!5(Ru;e<@QYr^=oT-Xq)EaRtkKlRda!yx4ps2@^xMNGwAwBD%o4w}Gv zyD8l7>Z6RAzKj!tI@*FduJP9$2Qjz*^*Aqfc$qsXE$3o?pSb5K?6YOij%ohd*BX*D z=9merVL903^zU7HPAO=c_hFv-x_^fKhTIyR)?Tz?zy#63w|5c2gbWK91@WUFB11VoN*BXWOkuO{a(p#*Z5a_TqhqYbcd{+-` zTo-$WZT+fmYrC~IhqVp8jTU3m1WN%9*7et)rjF!#G7q#uO{~49IOP3gMPJ5KK^-C1 zO8&ZGZFS!sag$pQ=F53s@do*YwHz9BhNto@(Kna5gpzHKSiZuHL>)?P*y z)Bpd*4E5nt+zONC1`&hC&S}h(5=Ti$IE4$bc_H0N%OrNPOkL& zJu0xF%Zt2iF7ns+3hGb%Hd?hNPQGdShm+3qW$YQ$5#r<&f8B8M5pl)jZwtyf&fh1T z+=MG2&w`gBY zd{T&$-A(`S$uYi+kwG0HPA2;6hLb72J;KQ?!oBCVgZ+KN$uH1n$S2yLLG4(rAvsf= zw3?h%ILWsy#Yu~Q#&B|Ap#OVmV~CSBum1_A{~mDiL&|D^Pio?1x1jxboP5{vAaHxD z)?4#yyuGHEb9<9Bhm#M<v4WEzlm>&*TP9xVBZD4 zj2nVFLYzF~uNzLb_3aT(E>?c!aq>5RpKvlDhm*hfYv(p3XNr@{P0l<{s@j&~hW5l%KTxhZEC zf1hyj6Z|gZldt+~AA_GNJXgt?;$&r$Gmn$1XHuLjqYh!$aPoJ2(Dc7rPXC*G{qJS% z=z&kZRfUtZf297<+RR5}`$O65ny+)RaPKVD$?py1CD~{E{@gYeqPBnwwaLAU!SAiK zQESib`ykV?ovy2YuCn5>mFhju-MaEJ^XJE>?>{!u?_&>)zGs#Mf?c|-T z>AW{NgZCq6Y7B+3!G6-2`is1$-im(Q`w+-_Bkj(7ACGrR^uB*HH- zFgBF(HFdc6+|Z-#Q9oR{9DV!5->GX5`V0HM?+{mPzm;0=23`h+i|k@!DASR}_LY$C z*TFC1`-}Ci^$Y3y7X^D($bL2kj{F1pzDD;I>ArJqjAyT=e1vP-4`MXqFV+ULA=q+b zMGFewzwqz5`Y|BM@5cIa--I~O+1K_9&f-_=V5ecXd^_^riun+n+nAA?-$(!Km7KgM z7IST^CzD;{Sc-`yxWvZ0YK?8_KA!X6{q^sXfM6)CJZf+6c4DgS>wPUgrT2xyeYpD&$Gh2w`;5S^;%_Gu>!(b+^STM~BAnb{ajS-n zuK=5R@1&ct%;dXEi%xs&KAy|lJJe54-(6bK;?O*M2P+;Yy}{!=wn=fjo`pBlc4aX! z(fN|Smz=$}J-U{yYCXG%y2VMe{jbIWucNN4-(}0Z8GP2ezG)o}wlOSmaKyLd-=j}L z?)7#zzQucjbNO2`kiEm*UMK^iB?HJS>eV$zKCkEf`=Qp(5YtsS)i|2R=yZJ1zKc}? zt1dR=?6WUAlnnLka}RB{x?pr?a|ryl;xl?zf06bH z=YOs5)c>#P|7PCr!XNiWzZGH+@nTM`{^n7{*KB^6#pDBZbbJaJ;yv$2Uv~a$ev-~L zB;P$4Mm(YYGl-!x!H4EE98UZ4ezC`?zBe$D@?1RS`VZkO7f*?s+ajae8;7=R2M^ia zI8WhEahzR~_tn-&a)v)rrrtYWNqj?DD*H6z%N6KI<=@41>3Nju(>G(-<8tX_n=8r0 z<2gw`JB-KRO5cpp`#e4#$NEL*`FOl4vKMii;!TR7p6fgr9&tWIy<1_E#HIyxzU|_i z#p;514A!opjFQ^H^CjkA*ef%kTaVe*NJ`iUItn%-H|vEH|G?;^Um(K7pJWQ@Nr5bpz2mS1q+VenP6 z<}x^5QNBQ3|H2nKm3s|l{!g}gA%3RK_eXz4?5i*b*U7w}?imOkEQgoBO_v`nWKN9h zhv|8~-MWtI;NJ6)!^sFfPscW)|BmKbt#X8Sm7{o^>@P%ls$)~>g=?&%h~F<7Q9ZZ! zBWxS{Yy5rK3@^2Zn9BOGxKQig9EPy9+1&UuQx5x5r8cfE=iB&Zt#a}ly)HV=*TJ(( zy2<0xI%16GbFCwMJ|iD9{L62bpzkzp7|v5UT`Z)H;y=AJuJ?cQ@h)s{Do46^M^PMW zt`Wx`#-^z}#d*enO#YP0D!;oQ+?kH@`a2QcsujnqooZj}F|iD3Iq9AU`R{beN3KC0 zmp|ekE`KCvc-8Y`K7ZVWc$Ci{Pex|tk8fJr#iQ8C_9Dx-qYaHcwSR}1Izk%N>Q*UQ)5E#1_%|AKc6Lv{UI zefoT!N)Gisc*XkguYrYd9O4%I(6!y|D^l#^FVyKT++GKFf2{M|**C6{uTMHyU+Zhhg^mHq?6}1$6h_`D3q zr$ZY{YHS?jK9?UBV_5Ua=fhJeo>CqZF2eD$wOrl3!^JPrZ%?=woxw%6e#7N8q`tx+nw^e+1c!%9J$l_bJ*+q@ORi>E81hHo4rvZ_L2FI z@I@a)b{>VE9S-O5z6TiS@B={_=D%(u4U&pP)r z@T@ZQzPa|1nXa)|uvuas87F^kADI)8L9&$T4>&x6*CFJ0b#X`jyc+qdlbt#zQCGNE z%yHrW$Z!@iC~sR28MV)h-dFq!x{m#F%sKVZ^$kniAN66c5uSZpePi7}*|1;jFOAmO{Tb_gw_r?f8v1cL12Wr~-r49< zc**Bvzs862IYU3r3}ce!7kcCSmuajg6YJ6#rgByHL$u#v z2bo`ne_X7jt-AJO{6YB2#~%E}^O5B#^=<0&^|Y+?9?3JK2FI(PX#2Qn{7JSs0bVtmNiTgo z=fr*GYR-3UXFgAMYK2SmfAZ~5&a2-jKGa^+J5}|qvT|x(@OJD{)Q%9hA0;u!{486} z``D_M{w3c>Ro8ut5{83(7M@S>V{r;jc?`dF*ZyZ*yw;a$TKd{aEG zRO>Dt2X{G0=#Pvymc&`;Bf{418}4e!vL zbXnyLLvMqT3w_DQHq;oR<_I~StX!M1R2P>S=NUymg#BvWJOcfQxr}|zl^oYfpC%9A z=X}HSp_7kq2pt>zU*cOnjHl0SZS7iD?XL{uJB3kcCFGd9sc>!OM5SywYnQU+JXgD% zHNtYrxpMfPXhxpHN;T!tcyJVz`>(0;;HGFb@PcmcR33=j@{`{%&dVGD-`tG9o2c(N z7TSU}Yzx^NuW8Nl_-ly=dOnG}!>h;rR|8Wvrx5yzVui*2%z640cHX{QA9Bz(#8czk zHQ?Cjy)zzb=X*L?e+}~4Y2*_VqmD0dFC6!FoA`qNm)HNRMQ?Y>ru^Te|JUZawYzJT z90pf+?Vi3i`~Rxl;ksyr?Y3_T33FV>*PU;v zOz?S3{`;Vj=Q+L?F<19H8+xn@!$r?vzs&n_vU9iQ339T(O3YRct@B61jrQF%Ff-n9 zbWq;|V5XJxmd~1vEN4@HQ+@jixz8?&HL5F$md5ei@{i(N=`!?{y)T;=O!D~|?dTzP zWuN=I;x#gTL-ALB2~Y0jkL{Zn_4z#A)8{of{8i@dU%>}nBqv(X+=tv~x!J#lUw#D_ z{Sv>@`WW{Pj<_7YQEb1Jm|y9;{4DvOxc4Uaf8SX%(bM(lX!;K1d85$V)?6RK_ivPI z^j$7~|7ny!``y-baZyXx6u#Z zq169+|Bi8^@oUNF7skh(*!uJk=ZbhNT>CXN$4yoSm@0f{uQz3HP2V--JuAEC+`gDc&mS-7Izpll>Zj+?-3Mo)K|9ZTkLSrBCKtX1`E;OgED<-m^)TMBHTSC=x3-V67lE4rvN`9T z#|{hO(CL(6&s?Co^bDNm#u|0ZbN}EuR|e0yHLd(UY|nGAJ%5@#Z*>Ou|B?H{If>P= z*9@*p7KaCZwKhrj=$zsXx^rWsI;Sy<)r5PlsomDcS3DMe6<e>}nBTPPyTh%+)Bc|L^X{ zl-1rWKC>9NvNQH7Z&TQ&o!2Ulzp|iBWp6LFMc?7JGOaBSVk5Oh-#5u$yM^3O-=7F& z*Zic}Dl2>ST&3C4@glLwaRK^sya-lguTgcrZ}O0MSMg;b-*3Rz^xYR{&({8KoOrC} zONhhvEsHYWeVK?K>D+PfxSpFrjFTMt?u(Nr_uZF$;dyI!M$c{A73%ry;pOpPt)u7m z05eU>SJX~^EgosULF0CYohE$W-NUcl^DHvy9>p#Bl;j)4m~X_N`cJx+#0+9t7&E%4 zKm1mW_D2clMMt3z#kFVPJk5h%M_uP)=QzG)$GW>Sb40;7Km4QdNT<*Iw`(>n&#jOj zN#e~$3?NicyuD%k>_~huz1{}ZOIvyzhV2y97`@2)BG4#=y!>J`Z_sA z!ugXd_7%zD`Um1&bo>F>NM%W5c)gtFj&}JA_N9#9q966c-T1>8_6NZ)gkgQRP5arn zWA6JVyq(~+Ajf%y+&4d_73xGbYB^z?8`oM0Ze2eDJPF6wp-*9|mQGR~qMW&%dT zcc}~qj;ls_u0qz^kj-%r`OtH4rLEaH7`rGRXC9Ps3g(@_&u=xh;B)^Dt~)t}eXgyd zeuMTbVq8Hy(~6(kIKzysf_pK;?)gKZMMC1={?2B{ueW&wjukc`fuUWFAo*}0wDF(Q5q9yKIbsRqdcRJj` z0Y6HfjlaV+-pdu+F3yKS9!N3&>E)e89ihC>9}&n)Jj=_wC$ZS%y()P&miI?s&H1Rq zS}uS46uP_|Y=^SlgYD|+^6WsDQ0h#o@O?oc1Qj-3w;WP3b$HkPfA zk5QqWZG1&*cU*tU_k$Y2;6_RL1Ke2b?aGzv$hJ)&+ZxHUv1~i}SpIp*wiR`SdizEX z{;^6R+lI-rv20&}>poAs?FvRh+0O03rmxTkFkcvvJR8e496$OzWt$8Khq6tf?e%PW zcOcub$+NL+ulas()}|?!ixW@c`7B3g`>Bc%if`SDZyb*ccz!JTyv`|}IqY(*GN1MS zTcJ;G>rK~+_-pv+`!O2_0+^cxtP8heR!r8?IE&-xt*AJi*v7~GY{ zHQ!d{zxbWz+obEg(3^`LaCOmp#UG+G?>klj7YQ~Jw zBlWIb=akVSCExD&zxqmAw`lS84VUMvG5b5@VY08u(n-D~-LwYd3cDhYFdx4=Qk@G4 zC+fpJ?fKaB9gguhn}O$7sW*$K0?dGgk>1zL{$`^n`uO71MnRtoeI|2?U@n>WqmTQF z`Uvz1@w-XR@6fUK^Kcx*^)CjtRQyxF_-(~=?o+!|)*D)n!2ilW{vo__ENnl<@zMB- z%F=kH^xdEO!aiE42kEK_%$km@UES|SH&teLf2MaM&cpXD|NdRVU2!a1K)*zKkGadn zC9~WxfHs?4d6~Zrr`NjnBspfSYlD3pwChQG`=ft@N>x;DA>d&|9LN{hsywJmczC&r;hjQ~3+H zE85fI5ZdFgOP}jda(dIv$KdcKaJY~@7k&BJoxX4N&5Vw0EqB)T>);Qm3~9_VUU2=s zU|hAtSc}Ff2ZDKx&(PnR@7A~C6XAFZab>^loc=AZP3x5&Y>atZII1UJ2L z&=ZFKh@DMOAA-p+C-^-++>>uSTnGPW<7=tz()P*M-#sk67V9Z(26x%r4;t&1s2NyFL))N*%jaH)8N0tmu7;iz=VX8J9{#hR@~o|h^P1y&pZ4flq_x>v)5JBck<&Bp205Ah z$>n5}HvwHP2Ny}t!#p4U4jtCZ$>!FG^%m2uKVW?U>)ZT>=Nt#X-9>$ydE|QH^@u%m z-gEk>Hy)L>XD2vTe{ht>%%m6of1Mcc722X>rw962|3ME~%ogX=-|`%g#)E8qD)Tqk z*mC7EvD?+d7@X!vw7x6#i-J0^*D~~7PakKZkEE~1`dHQ_Pm^wb7w9I;f!zKifnRBF zFZtFGxWaKX*!VI$p#4T}A>UD681bI`27#XmQ!h5J>3O?l_KR>(-q$q$EMFTO`kLEw z#JD%rm2^_7!#(OVTn7Gh-zEHJZIhN`KKmkdXYVP{Ef{)__#EF?olB{%_h{E3Z1krL z_Z7zs`IPJ=T|Fup@jdBvA!WFD#Pu$8Ve#aA{40zn*Yb?|LRZV4+^c7IJapL=o1zyz zzbE<^$NvpHgfYjh?*p?7hz~Cgjz%s3-`&VgfAc}EW!}-kZgZk*;`d;>=`WO6L!e*U zIKISwNy+ib?41N>o39$@^>i+b8L~x~lWY9FF4nO}g*G~@gL}#5ayenc=K`spRogSn zWzS9QUz;1OP1NV|3~Ot?4VF*;vxhh)?pU1Z`&7#)OpU>wvb*J+Yvp9MyjyF1oV{$m zlh|Ln#7B|tv}89@jhTZ^pP(iw=m<eO*8f$PrKz|!dC*?6pR z5dJ2aW8W$Eca76H{L$uB@Y}GBh8OvW?o06!+u8aclJ5-jd5)*y!^Q6{WOeq0=gCxW z+3R1UoQynEqDRy>RX)xye(ZdCR(Ic$TR$uwP<-j8Eqb?FxwdS#fOT@l3ri>8Hfw2f z)FIn^+O_WQV(ZNMUiW|BS&GrhH(q!DuP|%o7}h+GYpYuOev-zA8eZ>v|F656k9ac4 z9q^Ivg+IEfqB#lLxrlsyDtdA`g>cff>|(|G^AvaW|4To5OmSDwq+Al`QC0p%j5L2c z0PIwzHqBBUEy|y!HqYu@NX~@64JWUnuO9o_rq#Z-hwHCa$Gkj0j?Bcr;+$VG;ZWLV z@#G+mweCFj%a+;mX@8uPnQy0a(4W@6mxybE_1hP^|7)+`o_CP$@7?z-oL#v82Ys5@SB+;1VgdI(p1UvPm@0nx6!UMDT#oUOY$HFY zF$cV#VGG$hl4oQ7oXfR5e-_JI1H06FGCFqsf6JBFzjLa1mRw2WS}lG}V~Am^Lvgyh znV1CkTJ9owQau&F4?_%1-wPh@eHuUNz%QL2alAY+^vE9Ghk04ChkHLF#@llraO}nu z=fMSoOC8(Omr!hI58j7)i+fy585Z|v?0uL?)E}O=_hF9Vyf~`D{qRc7_hGh`t>Acj z-!A0DOq=}}qwD)ji8?M+zJPs`vaw4(KlmbLr~H_WZ?Civ3InOw(^31=H zbvn8iC#avN@#281AJMVP$N9DM@x4v6e~<5Fe7YbH8Is`Ma*>=3$fbn0P4O9(tMa;d zMz~FFk&e%WW0B^2w}>Ori~7RLsXmp1yhD-C^?NDvSL92|Ta!5zWEHQTs=R{ZI6u$I zXt}j;CL7q=ygAs^^`)poI_m@WC2y8{s(mIbL+=}fW$c0P_qL2*V7sskt;gtX85{d} ztDISF`7XMwRYqpa44bWLZ9u=$_qlMqo=xpm^L?b$mqx^~k#U2?`S+0~DE}RXUB#~l zaBSGd$K3mR#8qAEBxYTK|5di;|E-Y)EF|wEF(%c?IeQ;z0WyYtyAr;Te;?^-aAWTy zy@9ROhY8)Lz~$J)y^~%R{ zG<=f0kL1ha{^Wh6Ux5vc%}I`H%wKp`_BoF>>AbyxHWcJ7ktNl zY40NybI0x2-uXcb<%7cyJcd6n96qhSqQ~L=$Qj~rTiG(e;UDyW5e{?j#ZB`vo1O38 zzZ!>|pclj8TIfEH!#^30&}+n4rQvXGp3CE^uRYJbR_CquJhsT+|B>ewyLS(s@9leW zyDLu>_G;nK>!i0hJOSAXap=nnad;UVVmSPh@ukP%n!4s(3CG-u}_)RE`IOToP1 zaF*c+n;JIu6HhDt3Wot7p38H2T>adh4{$ivo-e@RPrXj9u2YEvjo~om!-IOl;iu%o zzp7tLI~w7`iQvj`c#z@5<8Z8ETY$sOk$0TO;c>(i@!?zG@kw~I5gdLDUxhfl7n^>b zI6O`HmdD|w{WuQd^2@?qfWsS=Up0Wkzj>L>&c6cBc^tljP7H?$p9MI)N&ZoQ!cv@uE~cra7gaHSnCUG#pTU=fkUmIF&y5S z2@bbu5Qq2p7-xN~uQV8!t-Ts^=}hy(s`u5OWz52G z__uvI4sbXohr@mi;_xyrv*B{FVxb3IF;eh`V4u7J&j&?M{hXVr~4#?p!%Hi-eY|`v;IK<=dH}K-c@Ma@?xF7e0 ze0UD!H^PUF;&A%_hg;-u*eTove0W@~{4f`nH}x_b4#&~nJPv=4PK*yV{+-Vcj~7oD z@L`4L@_cx+Js;pu>kZxW1vvZ__e-ZH-zCI>-tysz*!91`hp#D~Hi*O1@de|<6MdW1 zS5iOan~H4#AMT30M{>XTa4NnmK3oyIECx@F;P6fB2erigl&iqP=ZVA1Dc|^T+IWrw zK3pc;1vq?AIadR5`H#8&XbE1;<8b8whl3190Utgj|0uxWC+ISd!`1Ei0Ec(l^949u zE{DUn@xR{UaK2&%Wq;mrdDmXx@b6&Da5x_xb$sP=s&j&bM*PbuH;Y4pUe;wkNDBrcB(aJXd-hwJ8W zsPW|uJPyZr9Nq^n{ubU0`OxMagfWf__j0)#oWvNc-kZdpv2yb7SR`v#G+sU>8mTpX zGsZ+CGx6KT?5TfY^KwI-e=&xs@uD9hyXv<4i}o!&g*K&Q^y%D9=StM6c>v0?@%G4n zOU&j^61n?=J963*V>OzC(b$}RtJ>%9v-38`7LG?(@ZERNnK(dgSio<@yyiE+vh3W> z9GBkLUVshNws34ZlqapDcz(-`qf-7#!~xYIu6Y`pi;r3~=ERu$bL4M!P2T0wN>VKG=rzlniZMY+7-z{)yPi}=;Pw%ZA57y;}n<03ozC{@RLKiuP2QCA2F|}4Qk{2fiL9o_jCq-zFlylxC)!k zPu^98FP+UPPwRA~mrnUM|BKVy`->cFZ23Xzlf3r4=8M8Pf;RYOOYCI$GyfF7)q2lL zF*x;huYsz^{(1^X^jLKsJjPj!%gT_P(KU-IoF`_@n9yY&e>H z(>UZR{IDk*E}*PhHvGRjHayA28^dObIopfh?BIOY$2RQ`q*!nobqarpK9D~@=RA-8 zK^zl|qtiOVIaak@wmuN-+WO#)@LlcM>tYYrSAfq|-=DBSw#;gu%716*$9t!k>B%n7 zQoiYagm4V%IH4j4*A8;(Oc1T@-6e56}+$1ybsLY09VO& zdPaGGGBGFHo^L20rw(!kCrh}uT0NQWaIwbl8LU|d z{7G0;zt8QPvk-qpPFs)At@Q}Rnpu?NaK!a@(3{zC_&&(Rxj*>a)$;Z5{oOaQMJHvb zo~MZIKcX$JEb>y}al-_UHm~p3Tq;j&iQIeb89zc+@u7`Z5`)~k^zMBX@*L$=4{MKX z*`!TtmC+e7!W~ndj-NqZwL!9;}wv_%K#0O>9qP@lLwQIx=um5CEu_xfE z+RYca2WDqA79@Y zNqt#ehJAoAmWxxucNtCuPpO|*bE`14I`PWIMRX|rZGfDyEi8X2c#iw_plpXzo-4{Z zr+~jC=d|~I$T@FB-$gOMDy|1UC!1}}b6L4k426BF<9wc!wmZE~?Y+;%^&AhGUx<4c zd(i$@vQ-my$-Ku$?z&tJ-*}btdEdAgK9O&%T;&_)m$7a${WNqtmgn3!7wsO+as732 zY5PJwyzl!}ria9*lwT1?>gpRkt%uAZF7`%_to}VN;Is@U203C0UsB#x*7y!>N%)F3 z=K1P34y%T_;&niodPcxvgXH<{?6E)jV0|vd-G#Sntx2jH2b7$FCON({Fr8&JrA#2otl5F zHMYJD_lNRbiR~I0)6_Fs|8pnpad{irsaxxRoGyk{ul+GwbdHGegdJS}tBbve z*M~b>q-V-d3>@Uw{+M35H(B>H3%;>&Osz+BnB%z$vK&P_B#W*4xfnTrihnCM-HdOF zleK@q`kW8Pk7~t<7S@xC<6c9TvA)QE!R6Og59h^KYO`=2@_fd3D8qcm&g=fV=RTKr zU{asx?K={8pII82eA9urjWox}_eUz0=h60SIktBjHJ=jt&2)4T zz~VACV-c0j7r-OVgl}(lZ%X?d^>Xp4 zj=wDL%;Zif7A*E7N=Tx_6yiYMy}pZw}u zeZ=GV9bdl9m-X%1<4R5ca39xaHuLXosjqM-?^q5e zH-a0&ecgm@wJGXbe)J>or`$&zFhF&3Ue|}vRu_N3@hjBf_A($AuLPE~mw|A39?!<_ z9<0fJPHQB2=MTV;-o3vQn`;fb%H9Hd?&h(3JpM22E(XJ{AMW-s$bN??6NAz5`S=w5 zfu+LPmXwv%aWU^M2J<{KlC}=jb6Vfk$a81VUiCesBRzL~gY~Td&xg;=N1xgH()T^w z_(rZDCwbRE-ejK$#yBoP$GI_%q0-%Pa7Df!7x(|yn0=M+Q#sutzwO0cYaBp1WRH!u8t)R;Ef0Gcwq1F&rQMg;?&t6p?H=nVi}M=(f{dr3pM05n4xm~WzXT6U#TIQSZc3#dwxia_1->c=UkB4Q}5h?+dk;xK+0nLd9L^gj(5M}@Z-plBty!lF=yn*G`{EcDZf9Kct)xMx>%iHr@8k^iZVe$K2j52JQ}2c9U>Z7B0Mk*6n_hoHaS z$b4|dZmABWr!%OpNG2Cck@<4ss$Dnyv|W78qpe@N>7DXM_rKG2aX8lMF+om}TO*O? zD}ElSGNyWs#1X#jNlpiD#NqV)lX==L@#hpbXt)<5o5DGt+x|&71Wn4l8CP`5A4@=V$*`E&?`dwNu!ZEJx6W5G!UMI6m{)257x*j&JhilU7 zDL0vY?L9`#@5z?2%r*|0mDy~yejQ8-yLR8YU`uU)%bkrInJWvQKOXzIT!}JdW0jR~ zh2G7x|G|*@nS=Cxp3YT$H#_iUKabVQUP_hRzHJv1k89yn<*Hq$Cpnw>OsXT@BY*r7 z_(*l0z5X`3t94D!DxTQ&BCN~5qzm~9*E9P@W&0i3dOr%r{jJ=3_df{cLVpZ-C$!NP z#C*+BMm??JYWsZFa6MuEi{D!Q*DYCteBlIqSh8oaT#(CC##!9!Fh*Q1n#Wlk{z~R? zJ~%Vkw{4Wur};_Bn;E{L{8I~Zwp()Yo<^Hed2@4JuQuP*^-vm@ifm`+@?2Rjk6sAh zlApT#M(xKR9{pKXuA$&;WG%T=m-@c)N8u=s$)%Hv_xT_mUX+YI(TCJHm^kwoc+7bD zn%#Qd=i_xhhS9;j_T0}n7I$~fV}5~sKE8?{D(1q^$#-vm%stND!(w~eJ|BBfe|X;Z z`PhZ?nhULPKXI<+J|8>K=ZNu^>u<*MLus4+7>C9;G&i&a94RME$_6v}7`hQZZczktaa6V7|DgM&>6_=j{@$k3fs^d!-&*k^W(6;)%UK~FU=Q+ol zHR9)J+9H0_9M*|0SB+z);hkq){3y=j8^izOK|jjM&GJ+_b{WR!3_gNb^c#? zn=gBk<8W@^muj0EYlADslsfJqruGykZiHiEzpsuH;^T}=HXe?Bj?~_t`sNn6>x8qv zC+uF8w9#H9M2K6_-z!yy`E3FU=2{E(X#M z7tbX7!!U-cKF?BY;c^D|-K6Ti58rZG%DUO5l6O3K(7lGc5R;upL?b(oAV1;0b3A?v z?+f}HN7d;|Nw?CQ%A4==Eh{g?jAAJNXY%S0qn|eazK}YL>icihH3uBjt1HyA^wC~t zOl5SYzvA(NE;D$U$5`2nXynb_p${2TSkuWVp;^;69V#>n4NBkPQ5q4Bz)Myr4V?*)$(>5ccepoXGzO zwtk6zyEx+u;L6z_48?g~0Uvw7V?cU8I8 zj_Q0XKbO6s<88aYI#*I|ApNOb(Z^#hQSKuj6$f>zzsIriGUYVl2>HrL$?4ClkIJ*t ztZ%V&@~7_?WOOqka{YempY2AhP~S?qlVsb%=i*x`$5G#moO>{SS7Mw=eX?_ucX94N zWB5hC7YHXjYkjmglRlc?-*&j{OCN3P>OR`e*u(YFMpXCFcEuiXt`@KRGCXJdXw`NK z_c-6v#ZR+QfX#_f$4~He;V#8ewGH#%mWbEyT7Nk$PZ*W`l;f-h@5nFGa?|^CPirlI z2y!38bvTb@J~|Vh8V0ADpKkBxz_Q~EX}_)3`*ayh_&&RMvK#+WY+0UiT}+x+>X?Ea z?R~m3dJEsD>-L;k>wUTkF)Kf3c?{)g&hlk=R=P4-I!7g3kj&HEi#+L^w|xiB+a14G zPHWF+d>>se2=aF2y|d7rvlG7Q@&fJ|i#;+j;ESG*5*cPV&M_RNbt_MG{VD4HW3KMa;K_X5uY%v6biQj) zxA}fi%nFlJ56K^XQ9S69Fl1_*w*?a*KXQcH0HhydY2xKg8wZZTKPQpna2w@ zbDV4(8gs7WIrMMuHcHR`2*y_a&T+Vos-JQ`aBa4*{4s26#y80OO2nXSOdfkln9sX+ zrO?l%aD>wjx#LcXar_!iG~S~B&lHb{CzMmFe*lKuvG_=R4UJdQkGFTFlDq?cRUdYA zRC-{PAB()e<=B>+2Qi2>5Vki!rY-16Z7G`fUXu4K^qA+e6I^|{d5$0#HQa=;$K`>@ zEnM6Q-%B6L5g$(M5Obs9gm@pkZ&g1=x9yzbEX?~V#ORID_u`;GZN8lUW~1>cfEp>yO`# zJ;a6CK3WiCd~WeeJtwSz`$Mrom^Ti_?y`~jYoaGToAaxueY_S9EX@ZMe~%IIs+c?bLLs@YOFu8v2Y;{pr29QpQ!dr@tEFMblAezg{d8B ztMF;q3T^-2#Kb%3D8)o9Yo3#&lVkD8|7Y)P;37Ngdq2z0CL|$oR8%ymCrT2Mkj?C7 zOePzWu)DJxVn}w(k^~JASZ3J43^0KqnN5hIQl*wEnn^@uS9--Ox6(>0z0#Ietk}|u zN?U5NqQ#b0+HxyxX-ivn=YIdsd6{ztW?1}qz1EML&y(-re}2#Zd4D-CGb8!j?z5Ly zQhw{RjcC3`S)`5cl{QxW+}zWp@5OHS4(Ge56URGxd=~Ew^g(Ic%+IM#knha>?&p{T zWWRd@>8i1AeLuuD^ZqvNZNGM$GOiu%a@?!vWR;xvy}+s5jNW z7VT_lHUDmIeX1ea9I9)Mhua(ChBG|I&&2HRU2UE5y0)ge_J-EeVNSc+;*oH>k^VG! zUm}Rz{3pe6xjex=ggnAtf}609a0%g3!s`gHCuoFg2q8k4u*X9j!d`-#u#a#F;Zj0A z;WEPIgewRx4Lu=`;PP@z$Rq3}xC#3Rmk?YPoFn8B_7dELeS}L0mlE;`mk}-}TtO%x zTuHc!;32$}@G`>735A4L5MD`m6`_dmYQk#>R}+c}uO+;W@OpwqxQ6fs!W#)Cglh@c z5w0hc5^f;eNVthmM!1=pKu4^5aCWjIpHqC-2_*V zauf0h_waXx*y^zn@(6ngZo)poC4@@}`Gm^|mlLia6cF}wqbKYoxC#3Rmk=%`|gx3=^!Zn09 z5Z*{AAzVwij&MDplyC##M#4>mGQ!P-{e)Wxd-^^7d4#=$Jwud(u$SN_>?25@-M{~q z#`{%ULsh4;(2M5mu`Zf$C-i#B(}UB}wnk8)c0K)9oWSo#P! z+$DwOOk276p9BH_3Yk#oqkalgT z^xBRaw4e{D#P} z*3uAD9zC8)OQV*kP%@x)!~G%xpKcb+#I_vdeYsz#aTMq&?jFL~GmA zMhva3v9YtAa_!fG(QrdYSZj=g8=uga`*43+=Hwmn@pL;AYr2Ip^+%|=RxRG9sp;F+ zo;&${DBKX*()wCMYe?ly^@vzQJR(KaYix&nR#PIgdfJ+d`m%btqsF8@%~r@P&zAPQ zIKK_;?G0yk>$g3uH4vmNIvQHSdY;o=)5xT=+*^jt$%l^~J$CZsj^$7hcT?fcbgg!7 zceLBi{pZaxz*S48M}=Cj^--{6z9s%~nIB}RG74jHeY>zz|B`k~PcG@S)rc%J>RNh% zvz%Mn|3oa@>U3mGRL*!A-MOQ)v5~PVWpwJc_&?d!5{^fjTTg3Gx3}?st8Y5v9j-q5 zP{vHBM=-Zl8R=xzmuXLh8`&mlPlV5?;YMS0>b1+v8h)~~!I07JE z9@n03W=u9{9r1R_GLv0rOSmpfs_EbX?biLb?LXjhNr5D)UPPBm z<(a?rEVx{!+S-`$TV0{%j>d-ekSuNLA5Cy7%oy6HYSekdPWRCD#@`)bn#R7r*8OO0 zPlel?qHRyB1wi`5^?Edy$(`FZ6ppj@)84Vp_${PDrKFqLC7rM%-VhIW><_oZ;%7>4 z&=V+s_@-<3`)cksl5=&OX^l4|jDd~u>4?l4;!bkq;<88oce8PDrxtDKko`kL=#XT( zvo)NE(S2C0mGDl@`S&i%Aziqxqak6Hq^Tj=AzLe3KVX%MTAg5*{-fc9qnccFC>ab^ zKR2$6v?C`TxKnHCq^PIDnm2LmIR86xME-YNYi`m`H$N4YZJ{gP-l=weX1u7npo+TW zO0J^=m1g#Y=4ZlOoJqU1hnwnXo5m+>O(XUv!r@rviL@Vmbam)%T;-juvY%__BJP=R z=x&t{F8JhXPW5k9Z#IJz(DtX%-_sVOUs@ine@yR**FS6Uaf8npJY=v;XSwQudOwoR z6=qAqhC=mDsVDrEX*esBd8Q$qZl|&Dinl$XD)dxCM{^^UAQL5{{$w4ybN=dNf>K40 zdSY&9Hpivfr!ke@b(iV0E*?(AAM-vgsj~l;eVl&&`r0>K?=suTNI`39rj_XFjnOv6 zCp+8X(x92T>uWo&xB5d<8xzsfbooE^gBT$%AYNWbkg<4}9}rod6J zcmIBP{P^*0eK{qP{ayOgGbTy-jiinoKd$_E6JD=YewW%O#oC&w5!p1#hF<@RQ{Kb@ zFFDiondN9|Yd_T-3bCp;MwrnU@HAtS{Qndkxr0d|vTc58zIc0E)E4XEo4owfN*{c< z>ISP8!pycYDwHj8ZKs@9&bBFk7Jmn{(vv4Dv-@M_*Z*afD~rE__^YnT?(g6({oSgS z9?RwL)?NDZ;qOFs_WbcxGWx%ZEbp7}_uyNy`+L(a{oSUO9zJ@1c7M0+(%5*eO z`pfOR^ykOl$?Vzl%PduU3a1xYe|Ko5l{uQ@4&_hwEEnnTuvS`iJbV5QD}S;8U8KJw zTInM>@^|E7{T$UwkLSqOQI)Tzi_cd;D?R#9_HqW4Ke=?d$Z{T&c_6#LV;Ae^IDYQS z?&tVL`Z>Tn7pOXx)zN`n&T|Kt=K@C$WcRnrdF}x7T;S-z?EZE+&mCZ%3mm;QyT4t| za|f8`0!Mw>{q1s|JHR~m;Nk4^(t%yha|f8`PQLZQ?D^Z}Ja>S3?pXDa?EZE+&mCZ% zJ9;=r{`?Fr_3z@w*8%3aV-IA{-W?Y^E|}j=KAfYRhj*DT=C@-fk7m!yG!0%=-&+Ny(kBj$7p7vaY!$7PT(U@!L{oT;JL+qLsZlZrn$*o-xXWwyD4QZs*OW zoA2g|k&?Ec-mmIqhiFP;#^O3pzwdTBtS%4rYjP>Kx+0bvccHeh{?wwWIj)&kh2~u* zn~n#ba^Y$|@U-2RqaHeFZu?z3{duW%6{BBNmT9N(!Hri*U)n|QN%~3KCBK|@T%UH* z`{g?ABCgb|>ok?mj{SO1lx}-a^(J{-Yq$EgwX!-H<>N6G7x3KBG+SHhAk-{8AZOH50X=^^ATyLhEQ#k9$l9+vu7)TN~xU zR7SZ;fqNgylTBf-ug=zv&X_!}3x`bit`mAUwd(lZWA|CNgET#Hb&vbrW5=7RCMGJo z)#W-Vcc*y9K+Ed4R~RC;Xx8~qc-zy~`NN^mz3mOLh$AxfWctrIFX?gXGcLDWV(o45 zHs)7NIS9&M`>l8n(RrQq%R>8Rxf6w>sR~(bjGxERD;*dymP;ZHUSZ zPI)wC-C|Ry;>K!PJ%ZI* z^5BNkGMdz#rZB5;rWc}E{*8old$;#rD?>a-(>sIYz^q@h1{iqUXgpnJ%c|TRCR^dC z#z~`?F1<1C3#d%1r$zc1%WgU}&gpgvX(V9!Ioi;wTu_{lnJ<0D?cg?Za}!TNjNEF? zAtIUiH6Ei|54%r=xmA@eqDa-h^teo))J{te))nf|YX6-}uj{jwyG2cg#zT6QfL`w7 z4;?#Zx6-flL6o+ascJAB7`qTkpzEXsTBN&i6Rk@3OK+4(yfbbL8g5Ohygew-?3>i= zEW(F6TXj3`IU&7_JHz`8zuf(Ak&cI3ta7#=4@fp&gy4bbss`C97UDpVorr-Zgs#}eU2>e8sIbdCd^ zv1l_d81UR#C9U@w-XoBQEad)_5ksD#%Q?y&)lP=vN84IMdI_7F+dJZBG*^%;%T!>d zaN>b`k+%j@gq%Zx-sh2s;6dNrmz-XN!(0VuU~S@ z+8*H+tl>fWiFt~;GrN{qxOB(UMJLa+HX1dSkum+e<;}_;>l=?q%`T?;B1=P4^Xc?U z6YANUlvQhydeCQq7IQwes#OoUSS=#jli>ZmWGCHd6+tr|+Sj%$8S@onXtr17IwOebVZ+5Cl+^G>=1IzJg zJm~$|a4z;xNQ*@q8pDycXo$P*@?6DMHrZFnqh^}OW~FS>Psr@fb!^mw*{`%m!_B87 z@ebL^Wi+K^)9mN^gKQagMtr^hKh;3bJrxb#99K)L7MDMi;t}JFHMJV+fMvJ#=eEUz z^o~ZGWdmx?`EBa$Nc9Giu`@H$f3Qs|LAokCmPV#9efYT^RL^Z?H)}gBXVvo$Bbz(~ zYH5f!$`WAoXX$lD@p&HlAmwDhG&LtOMv_|V-w{#jljDD})>WQ!);X2f1=-buy$n3YWWHOm|gd2EI2@(HHzjPmcK=6jd z7ExN)VGSG>-sX-7GpQ6ty@F*%Yqy-I3r9fVybV)JqDLM;?&+ZVk&q)6jlKdpg3RZVVAMozd7&H8;xhI(czLWt5hdSA+CI zlRS@anLN1i(6<2F>ss670TtIwy2w#JTmDbPI_~GHBOJYt4s?>MO0)M{_3bRLYV;Fy z*%tQSdb>DWs7~YT?~d~-38%~QW|jRJ(?COuRDAmUy? zqS42FI_*;HeRJWZS)k&x2e-R7JE$F zh?jiHKrzngUAD~pQzig))gUsLv2im8vl`{fu9hQJTWmQOLajQ;uq#8)}v@&{4xx=3{d7SPo&159&IRAtith}+p zm3PEcsN|gGaOilNZ!5xf{*2u%uW+=-O>GXHEZLSvhfc2uX|`PFPjg{x3AT&R+hv)> zbC!=@s4gTMd7V~XaL$$=(KWSmYh;Ve=}&2_EN^W;UMk`;&CwGb`OenK@#NG=zLhg8 zF~@neYHE=--qOh&Xbzpq@3t~$db&Sjcso6A^|!6e>8~l;$x_-VtFqaEovmunw8zcr zX>V)M+T&Y>j@)HF}XFG z_F~k(L(leuTW!p8x%Hhf?vaOihe_5)c~jB6G}OnNneWWW%cxiT^c3dRDr-d8&+8}J z!;Nk2Ru8tHH>&jM2JY}WoS6M8(%2Z)B1|dTsdgsaMtPIXD7^BMJ}0*e-A>F9j;v&)W5ng{m$+1N+6VWc^b0EewvsF^88@8tl?vVA`+`Az z#U(9yb6(D14;a_)_VdQwCfVI{!;}6M=bELhoy~bvUn13VY>aZ=>8n9AA){To;TH)*2D@|clbkcp z_vg@wKT~Jthqkd7V9O`C3G(kgYEGE>`mxyL-wAcGV*3DwZq~2o%(u6($vd3fgQ>nz zGhgUEbVz?kPF|HPKcU{;t>%^9GVQRuO|KuGRM%(v@e%!vR{anCa!78T^D!5@>ETwn z{tSogw&Se8aouoO-n^B^K>D9=K6&Cn>n=q*54`l1z4S1{{<`b5{rmUdpgpEudal;) zVh#%(I>ZM#%GH7W4Zm`Ene1+Ebr~-!JIaP#MToNd&lXiPMQZAn4C=~QLvwq%y0@+S zxcEf1yXC(-sZD0EyR>JYyHiV7vaK+pFzVa1>dyf0)Y7$Yp4poGotmv(kjaC?JGCv% z!oH(0oz;4XLT#OFxyZ7;8L_MSwW{t;&DpL9(hbuPit<3k zxG~jkyj}TI)IW^}xak7hM%mU*w!Q3UJx9N9$@BA_dd^SHytduYHs=m~0+KmSE4}W9 z#|}LHv&n=VLF=JITPKsJ+PH?>c2uQj-~Yc$9Cx;OaLES`)XlEeIBx>z?W-?z>1>+w zDl6X47KxvlasJFDx0othpG~jis4RQQjI^h z%#*uMQ}#$@)_%x1vTuF37pFElhHGnMW6!R0>M>XL9DB^&=JSW%lI}Jkd#P-jmH&o~ zpzpb_``PN4ZFT(FKHGlLn{kC^o8fY8;{O|ti_Cef{HJBsl>z^=;jTWPz1?Q`r|G`W zmUZ8DZ^brpx5#$88vd+1Gp_1^XD+;cu=_TWPat1x-lX4IpH$v`QuGG>dmS!1LB7)a$X{dq_VHX;3M?CIWd(YaUc!W)x(U&Zb#ORllDNlCp* z*1ly%;F}ucyGQs&I%-0_k6(K7aLtWom2m8s49w9k(}8@;LtUt?rB0rR$!B78NA271 zanvpOi=Sm=zsxfS^%OwAYIMZC@$PMkm@`w3=uCzBV@&<)Qc53@?}EARI^J2O^31X; z^C2=~vR% zcF_tyG5IzhXYC^IGJpTXe(AD{Z#r{5FXWpRFT%ClwLu)C6ESub`qneu{^^-;*( zZuNdt>9?r|>APPqx31&X$0WC`me!fl-NaHm?Qpxfu3X%rapyJ*oR$2Rj2TH}U4m|W zJlnoKyHDO~t2-rMbGZ9DmGPeCIw5y@bfh0HwXFNY?CF``?snH*#x=mjx9N4xCr208 zqQ=JY;!|P!Bj5AGTSdExV;1Y?yQxY2;$}BP@<-n9E82m9$NVVf9ewAl zzN1vXzA!Hx&HEy9X~niu|8r|iSl<>+huU2usYNXNm7{rSsej7Dcui#MnkQW{OFjHQ zyk0O|r>jHHn7&z-H+Ob5o*ZKPOIF)nt7IwGc2@&tll+LU1Ac7z{UP`HKeqg-F8ssq z<9}@VlmW7fE!B@LUq9qN{KuBRAJIPh$Clq8!#+C8?4!*3SMvY2T}E8neDkhs16|kb z;kzRVHxq6p+(Ed9aEx$2;Vpzm36B#R3C)ByLMP!_!n+CYC;TGedBVpCpCo*m@L9sI z5xzk99m1Che@OTS;m-))CVbBbmyj3p_@|lj4)ae^`A+ji`acUU*Z1j%|3AWilJ1qX z!EVCqZTbToAGL*tIhKDX;|Fi8I~n7?KHuyT<{PZJ8^>p{<&)0$J?J+#cem-gu!COp zF>U!;SU%iV*AQxmab0ulWW#+Y57r&4dhm|Ay3?(lb&ZL{frE8bZE`QUjZf*HId<^a zF+DlbJ8*}j*V6DrxQ{AKT>T4(lpI#it%` zv$xx+hK}%k7u5o04eG?tfw~9ykYl3mqFn5`dheZqn_aHY>~pzh{&_Q1%kc(`LI1yO zrg~sE9ECk_g-@ys!%a933vc1G8S9&=GFSlvFa~SkBxuKJ z1sI2$un!hWzF-+#fB{$#J)f$DKG+RoZ~*qfNjL)M;TRkypJgrQQ?qacuERxGz&A;H zTF<9SU=J*Zytup36( zi4XhWEF6bxa2Xczjgg5C(uW1{^QjOlhrMElqi`M0!NN}R4=Z62-wYXn<**ml!x7j6 zXW$5QJwK5#zeh4Zi)uEPW@{Dt$WLFj{1uo^DIILv1pCSe(zhC#Rh zBe1BC^1^XA2J1gazh+#H!%4UbgN#q@L)0s*fCI1|PQq@u1c#to#xJaf!LO+F5un+b;Pyc|ka1pwb^lvx=wZpW>3$!;3!WisI;^^?vGPHa12J_9PEc{a2$F*!FYlta1GYLB36qi^uqi> z@(p`o7ufL*X3_QMgl2=|R6 z!zDNjUB7ldH3K!c3I|}}1L&b27XLc&pclsB1RQ{spJ)8RvT?>AoP-4rGVXtacyJho z;0El41^=6V0=;k=uKpJN3wkE$A1C-5R=~>NAs&pvUYLYqa2(FV6}SpLze_y@DL*WR ze&~Z$unNY$NdJWWa0m{=2{-}gV9^x)40>U~TWDA4g%iI=JHs{D11r8l|A51A0p|Zc z?F*}5$wTyyKOjBW17mO)CSm>z^#hCHJS>MBupgG3WL(2axB~0pCQQJEKP11<_eazd ztcEKv0$tV2Ltkb7hXGgx%f3cAVGt%^ADo0Ua2amE{D*1ZuhXAk4XlB^-=H5r?;Pz3 z>)|Bqhf8n@x*uVFgvBubkEvf+2CHBYhF}-$f&*{}&cJE7^e410EdNvb)1$N}^h0f) z{sb#w9L~ZaxC|$t=g$}iunM|rSbt#&oQLJG=9{b=a0vFpSvU?0|D1R*09|ioo?D>2 zFbM;21lGd*Z&7Zj!2y_n)55=?U%>_Fc^mByOJV-E>6fq^hF~1_!a+C+C*U+(hbz#x zNcxXaK3D{6p%;$88d&fh>I(+pFigNn*bnF7CftA}-=*Gb(Lo>d|0VqdMqv-^gClSO z&cRi<0*jXD&yU0JQQt5EL$DY2!nJ>)zF^5Z^Ck4dWtjL^+Uf1o`+rbha1O>`@qf~< zU=5srNw_HIp{tJa{1@#5yUsH|!z7Ht71%H5FOW}I4`<*)CK@H}=?n0_s`1%W}E?5PJVGo>v^KcDTYv{w&EA+#D$Pa6}MqmUcuDOuv zhx2e8y54XhwE#=t8mxeYO_T$c!vR&I3I?I)4*CNugGpEcN8uuzhwej+8(0KAt;_?^4~Jka+=M+aa3}Qx zYv2@IEGNIve;4^}qyOJc|Aj$V1qWdaj>BF!3x}ZV9?AvUt{CH}f_%dbSOJR< zUr5!!iX)^CtKlFVf>UrBF2J&*^eY&HrB8Am2H`l2z)jc(O9HeLtb{Xg8LmP1G5S?I z^$q=S7S_UbD8Gl7f1LQR8;-#NI0N1HGJatREbJhk_Yog!yY&cN8v0Se}Hnrc#v{-($8T99D+4)<1N$^bX7ABp!X5_ z1+0V{unQJHg+EvUJ&!VeU@7c^HE;;_z-c%L7vU^)Jx%-8Tu7C`5m*UlVHA4b%J_jv zI1U%!B6Pitc@-AHf&}9LdSMl;grko!?&a^ti4S9N9FD^USY5~bc82)~mcbDig!3>C zef1Yo1F#-W!d|!p$D#Wf#xE>}i_ix*VGYz8C@-vpL$DT3z!;o^Nw^LtVbQa!N6-t` zVKpp1#k>Lia1aLJIE=#u=xe0BUCay63rAojoPzZ*KSVphVmJtg!qhjMfQxVix}Kxn zny7E+gOxA=<8TBH!bvy|7vTbQpQeAnTByB~{s9AU1V-Q#?1rmw1iB)OBUl6%M3G5R$ue3E$w7PXTetb+3} z23O$_EPOBh0{UT52jdS0VK_WWqi_n&!ezJ# z^M9Ut=^~%79tL1PjKGrT=r^zuPQZ1z0)y{lynKNAfnGQWtKkythMsrPUtuYnf<15< zj>Ap347FdNUb-nCOuU9py4CkTyJ@ivp3yb?GH}t{z_mWSz3cF!`5A6s` z;S5Z|Rk-wXwBs+*U*AVN!r+G)7qI%H=wT0>frD@fPD1ww>DRCn77wsK!s?HcPnd*# za2$@oH8>B8K0!XA9~OUzb#9RH2y0*sT!y`H0}jFQUt<1(zW>4e1&3f!KlKdDVdzuL zKX3&0!lGf)gAq6b7vLJq|1{BG3@#k z+8s{8D4d5qu>aQ?zi=4N!ul`Jez16w{_;HI00!Xd6!ii}zCwM%NjM1;e?x0{Qt9$_FRrsTa8PP3i^ae~Ws7 z-fuI{z#!a&J+S0O{KFtzhjEy{Nc%xA9D@UJLC(WXSo|IO=SLYAFaRSk0w-V}T!h2W z^Ih5jdf_UJ!@>dj5iEn_FaW)ONxHBK4!{vO2`Aw^%wHm1SOT?=QEnK3^)LdHun&&I zF*pzBq4quc8?1!IAE!N`501cEI1Rhu5*&uEzaqb|6t2Pw=>7!#6BfhLzb0K6fDssh zePUl`9K#;C3znxCTdH;W_46=!a{t7RLULcyIuY!BIE^H{cp9{CnbklJtHg)ZZ~?~PChUXSFOhE;fHN=( zS71NX{tx2}R>B211UKOv)cyza2J}P!Khp1E6-F2j15|0((_EQ3oh z2t8}`L+FJAaOj^XCoKGD`UC8Ro?*)IFU(u842EDg?1M=-49DRNT!gEzV4ZlMCSTA8 zL$C_=zz`gOT`>P&i4T2n2G+n;*bP1ZlX(XE;54j(?ti0xVHF&KQ8){ea1D;b!p|@s zpdU{DJL$ni*ag>M5|(bTenH=VF#o|IbdQivSPYY}0+#(J?E+&k3A;9lC+FcD9EB@z z8G8N~elCz-*aK_eDC~mEa0qHC<}v7li*OO<|1xx?QeIfNCzYy!?z~j02UfsAxB_Ry zzBiRxf<I=X(!S6KJ!|L619?nuaxS1@^*%QTi41!ev+qi}$5c zQJ8@JaOTofY6@25r&7zX25!PQ)P9A24+F6LvQ#PpyJ0^Zf#Yx*&cao=2For_r3!zQ zcDy2$@I<9$_Kr$2QI)dIS=Py!D~~gRj5JtZ%{s12K!(T zj=(sah68XJPQnd14=Z0s{lE~^{x|Io18@*V;5h7tGjISd!ZGN6J@H`_Zo*!ueF6V4 z01GtY!*bXM``|bnfeUaNZo(y~O;A7A(C^_GjKf(t09W87biaXd1GP8Oe}0p4LO<+- zwQvM>!)Z7SwG#R(EQf2b1{VGn{T=#Y64t^o*bTkcQZ85x=V1b_!cpk?ZORROFnS&R z9QMKnR_sgqtu1wMqQI0L(9?Uf>`cgcEQIF2iLxe?uyj|2yOdmceQm zgv+oC=HHk~C1E8Tg|%=R#^53xfUe)ApTH8Be-r%(7Q=ezg9%s*2VobSg5z)*mXwkH z7a3>J3wvNS)NUqySOte*2u{F3xCAGmdy0C3rBK_SN>#uG7=t6U9csTv{X@U-R>}zjKH3NN!cjN{=ioeC zfhBLEUB8SD`d}1R!EP9W1F#E@!z7%86R_|$(wFnlJx%(s6qeskd|2$K|H2>~fKzY^ zy6+%eSP4B}q2EG3oP+f+{}B0wHRaR~?1u|*>2BKN_nC+8p?+ZGF#Q$|!EU&Alzt0K z1N2*%I8MI)fO>txf&o|#YhX2u!6;mU{m^rQ z{6RmQg+aIshhV`UQeUtHdVkzuegjKk2=>5U z7$UM@bJ>z@o3ye_=W7g7t6^_P}{K z0@vUy9H^mQVEtREmv2x`SOwQ$2zuT|dax7@!3j7ke?LY%SPngN)Eo4{L0AJfU>7W? zrGB6fPQY5Y2ouot$LOFJmOoBB7=m4}3nt+>oPeIU6AuRKsn0*5KA|5@z#yE35x4}q zVMzny0*=8cIS-fRJlurdQ;g?7r5{2+9Dwz35hh@=k@+2JA<7GD;2IohBHldf6b!&M z7=e|iS$|*xj=?0Hha+$mE<*R8Q4bNu3+#eHI1J-3)J(g;0XPfi;2O+-2kXfIZsZ5>~@W zn1G9L5N^OJSQIBcSOGU-H7xuN;~x594-CR#7>Dz40B*uDSlG#a9ERW;oPvel#r_oI z6PCehSOKH32KK;FI11qGH z#JmQJVezxnH`KbA$6y=|$ay#khv7V2ha0fyInwf7kTl_&UOu|Yy38S#^ee`!2fa7okE(qVxJo$Zeun4Ze za+u%CxQ2e%1MA@k?1Hl}376qKELdS4{dwX+?+1tnM_~^v{RQH|VK@sX;4++rn{Wvh z{vGM`QEnK7K^TD%*af>`KOBG~a12hvdAI;qVaYGjfBv5KfTgetR>5f)hkYL;KHP+p z(DNbU!z#E5>tW$Ppo4xm3TvUOpK`!rI0F4}7AE03oQFlLoc}QW1^QqOtb{RG3wvQd z9EI9PsCQTaH(?T%{2%56SP8w)(_XL=_P|;=2xD*@4#NfLPSRihk$wSva1qu(&kM|_ z&)r?-)9%bECa9 zvmIx!OMM!4t9_TSFJq7E>3beC+GqoN#ofGDrQ6Lmy3fqJq*c6^IS5T=T0ZO%>@|kZ zYNIZzu#aG`HS7-?_7L_}>|;h+?=b9L*n95b-DE@mkYP_^pTu5m*iT~DI3C5mf!!D{ zW}dA+DVjMnn`n%3dCy*QZXfk+l}j}1X!0wpIx*YC8egI*VxZQbkz-r?A2aJm+N>N+ zHJW~1Q-8ML+!d;AFHc@pPwUPzTs6*x&`sv>C7NC|6KHakZPf5%l+7%^RkjlR%%PdV zPm*Ku?Z{Juktfk@pz|Hht<%tH`xs~Fa@A)5%_tgkY#DQ4m04HfCxT`gO~yE=wQKs& z6d%c6=i+A!O%WQiPa0{x+3sf^P3aDPHqeyppeeqD^6#LjK(kYxLue$=e)4bYOZVIJ z+>53RO}4fzBy*&t&tDWKWOL|vG-uNuaO!q*A2TIqc_KqHMV68 zZW6kQ8;9$^X@KN$nVXhzXU*~y#QCyIR%`(%b)!#@pgVB-(_ev@YADf zf@d#3cbOVO`Hx%W(>Pa+u9|#T>N+z|C(J%9ngp5z8lz23jkTAN^ajy%q4DW{Z0%!B zDQXWpjjk7+eeRb2v5377yQast){AxQN$iq_^e1yZF1V7l9(!g#E5SatjlCTE6n05V z{K!15aafIg61yC4SC?W>pqW5p^a0P=ymNb~%Tx9`8bnk2AfIhOj~}zX!=^^^Im}u% zjAo5v_uIc<*ByR7U*@%RKVHROjS zB0fK)e471)_Ro97>;O{kIka=XxtS_v@8|AT+POW4&la6~)!~BIPVG7T{40~M*n4Ep zcfM=nLHxT}KPG;6Gu6c!;T}|Ry7!#*oO_AN^Hq7DFcJ{07wt??UpI}sn03V*$Yr6e zFDBTCt)b1fC-mX3ThgA|O!cu}ai36WuaV#4b61n!r9DTUe@*h$FBI+l4mtiaBg-y{ zH$l9Dnaxy*l=E{c-q60oXCFHE7UK2qJMw%mdE$i!_a3#!(}-8_QtFaCi4R-fcZ?4& znqf4ew`m;nZ8e&q9W)6vgF9#j(G2XMnL;CZmi}eSPo>%Kq+MoM54Bm^kTuVJsj3hA z{wMF-M$3qgb+j@3#iTzKD(zg}*&EN@pxV1Md3|0D>U1Bico}o!TXNS&2+e2?jf}Tm zG$Ux_m~yDOROaU%)~;df(kAYKaeEo9b`jkuIx`>cxI<^Pjnu_FI!Pxe{(?$p>!ZoN z+~8296RnGNVis+mXiJsWwpJ(i$*L|jA?a7Xob~Y`KF1~T9BX=VUyDr_LpOu2QrFqi zPTwypNuHAADgUINj(hm?In!Fi#}L}Umb93A;vuxUY%;TfJdTSn5vDaeHl?M&~g|A=?pflRYET?1t;zyH2BlTw+ch+@>_^CBC za%|IB{ZZ`QX!`M!Z5|ZOFq&aBGCpm7tT8T{88oA4jIx<|w(dbl-L9gUMkB{&+h`oR zU&%fOdsMfZaT)08y+bLQ{D*Uwx605IJN-EBd&bbX@MEmQ>b|9hsTX?@c4=!{8Lj=G zXhzW#=g>&G=Fn*Ae5$>jw9yLoGVB`j8vgCqH{?(6JQCmYD%RXbw&t%CdpULujV*tU zwyZ+q-$4^Y(NG54 z7)&*VZCO_RD{Ak>dvQFq4b6(^EUe`$4wP=E9jDD-`Z)n)# z*hAQJjhz8QldCUHqN&ACW?!1e9>*?eZP!+kpGDTq7@A^P=ic?JqBHkRN1o45Ui!i% zc}Kqc9erCNeo9|WS~b=^U$gE`n&lNu6`FoDwH({(x7yT5d1Gj1&;*8 z-7-33JXqJfW7yZRQ$$HaJ@?Q!oH6XV>U7o6E8YnUhSpIN7+*xhfCL>y=d|?;!0V1Si>Y;$&dR7qxSZ+;!AsuqANh>l6~q|mCkHW z_QAE2e7-b`_WK7TYR!*+q+GnD?}I6b+)>doosq8UO`u$ zk&epq>sYh4u@_+@M8C3uh-Kv^F?P&r&Wz6iY9Z8iW+wDldaAA(L{)wO(S(X zz&aK}BV~5K`G47cSW8nLuYRr@w<+_7rTr{TirVLQ$<{a_oM08HRhbf4LZG? z8uoHDn`mmC8f(56O+6Z4D0_KC(}TvFLnCD#K~sh%!7+KZwR3VG5ALkBdk$R}x?Fi$ zM>CGb=*wo=ti6`hSCPgTLz7u|UhERL7mcm%xD-m)OEsFJuvK?vA92)O0!Ol|XNzlHBS>CkZ4}Lo(7HD!O^nU5oM^8yw@$whne!b*6D@ z6Wwr&)mKbkj{d8?fpLT;TN%V2Ff>vg(lh}^kHAZUeB?u9vyS*7@Fd?t#j*) zVb54EtY@rJ#<^nV8R7=>DC2JrOB%7WSDm|3b+v-z6?y$u5nUSkVj4I2=ilEkp4fw`c;mmnq&9T zPf31l={WDRF5xeP){E9wE^F^B>ASDvx&%$;*eb@}joqF{DWea27xta%v=*IwH)F0k z?M4$pqv4nQnd^XKP8dcLLX(+~N$mC52lcqNdB=G@yNqrgU4=6Z>;AnO|E!5?XjWwW z|JN@`UTpIw*AJ{r#_(6qoc!0b-XyGf(YntN#6FI_d`q6v_qn7F6X<4k&}ry~&@G}f z+Rv;XtNlbXjb;Un(SD}JTF=Eki*OZ<%nxp#Dxa;soO{YbB5X*S)CKimyFSTdkEBoN z(iY|7_!+LD06I^Edz2c7)!6&7=gM;e&EO82K{O*dG?IrYG~;NbP9+cKv%CfDQ`^|r zu+MH|&%c2+V;j4MeHpv4SG4Y7__1$bFV@pF>)g86rD3l{vxX*HJ&+YwNY*1X^1h*a z=#v@!PVPbI^)3EJ@wf79?(sB-M(fI+rubP$Q-)@oV_QFehq?EXdM&z_ayhplVG`zBtB~KQS#i@wJL7djXE{!&awX=|W>~KPkr|<><$*%5ncWdp&T!ApJaB`c=tI)H~Wr zj!DN>-t;|tjZ;~rcGn*fkY$UL%obf%5KwG^RMI~f@TGcF<+?n%QWoW z*jKR|{oKrh)z2mFFdFx})9aV{e%B=SGVG!uUF$x~JoeHIyVUh6_7dz>y56iq?j@z~ z!)Tn7@1(9o7jo*XJ(~FPqv=Hx&^2aX-*O+b9^EiHqi(Eydjk77c6(cC`06+8QQfbZ zN5`{>aWtE0pJtso>U#l=>pj+3RP(mvVV+p?oWPP>dlsSS>)5#vBkI0_GokU zj|wzPXf*uU+TPl~NL^Ku@FE(iclRACO*7Bd`_$5gefV;{*V&#LCi#x!Fo)wAF>xhN20g=|+=nJc}ldW?%=+0GhEKG?Qp%a%iMJrdj`%(8T1L?<1N0BDpVb zz^V?lW_u1Y=bhU+E=vu2H9l=+ciac5LK7se^m#ODFQ#!Q-}zpP-H2=UH7l;PLzMNY z4~^&yt_(=zGje|~G3HVwPv(N-pF?228xm3kwt9B0!6 z(74b@IcyqhKGHBp3_o;p(U^7M*vI#w@$8V+7@C3|H1lYryt(qTVfe|FpJE^TfF07R zKvTFwS|P(vwzLYVl3p|=__41C(vOA=`53K*FMKSs~3$2jj=y6HP-c% z)XgZGd^GkxBKGNR?9$h!ST{w#z_I(*7p1&r9*;cFXH;IufAP{MdEoU;V*!yed2S~i z;<&byDZPe@&X2C=x3;vUWB;R}t4BA4&d)LFo8_|hkK(Hb%{ZE%uCdK!EG@>mE%ToI zR>vZ`ZjRkgz99b0zGghLe$0G)r{?lS{FeQ;H4j*OuXXI@*zNsTjtj`7AG^JNC4VJ` zeU4*Wnp`J2$6yt@CE{lrgBsCeXo@DY?`0$(Ni-T7Nt^Vnbz~HK8TM>_MD%lLO3+CA z6W1JLw2O7?Ld_Q&hOcaMq~xRc4#qpa?DeE!_hFZORB%ihW}8~~lQhgVXe!avb8ORC z*DD(4E;K!8e7eSLcWYcox+u?fre_fw}aL1r9aDWy7Zw@_l`gJ5z*P^qpkO&NAWrOMXPPidbie4 zY0GhZPM}fuQ{Jn5nqx%XhjrXf*~H)QRBnH68fQ#2%m?n{%AYxh9De@t^!+f6w5#qU z{V!SdW#-R(G^6@m%+O@3JE`|1no;~1eOSFCC2cT@eHy#bXKj7k`J83m&`F+beb3t0 zNE#bxrqaID+#q^SIb#TWuKfAX%%I8alU0UY{FAP=K8CPMzAJRQE#I#{yd zTxsuLP-$gt@AA9w@#Wm<)T3EOlbKE}Yu1v)m2_^)ozAfMn9iP#QR8 zt?0TPI%`j+aju&*edvlgw)ItO-51R;nld!m@+F!XG+s2uTxI6nG4@ssjpUuQw&Yvt z#&ZvOPp4(Anew|{@_QZjc9gU#uvcOaIDIdF7_bye(ag`6fyLJXqG`<~O$l-B^M?2gY-883 z*BW-Cp3O07)w5{2(fIJ0tNw=3)MVsY$~=j^8hft#ll(2C8=Kpj?@hxlz9_TWXUaGz zJc@7hvff~~wF`Ta^irexdj;C@Ki*oOHQ2|n=c>;xGz)0#>1!mJ#J-F@!ZGQa{rFL{ zEhLYF<=l7U_i+LoyX!t+Z!^c-Bfe+QR-jdL_U%e*wuLokOFb3ftP-6ZOMRH1XDh)T zz;3jW8Q0OrE73&INZur_`CT6M*b~@Gbh{bXnwQ1iV`z-I!8X>Cm)hrsQFN>L(wx4m zeT~@X(5#>d=^C@{tb5m@Sw~a)r`a`9e)+w!2pW5N#4f*ImcX7_N0r#)*b``MZN>#z zdi=<5fX<+?j~@+rk71AMam_li?s=DB?_=%q%zRXSe2m;_u1>ns!K^DYUuK5H}_|i&Q^Zj3x21y#Z}K? zCrEp2f!|5g+j(fu*}`)#S1^fkzGIc-<-5#9dcGZFJ+IfwwwhZ9(N-=w^R1Dl z{MMe>eH`1`#<8v}ps7J4bDph_So58Rc@s?tjWJi4ZEVdIQZMrReZ$-2!Ee}e`L9JY zMqHy0nPbT6L*l<1&BQkT2Ml{I|C4B@i5tTYWi;#3`V5AKc?pf@d)8-rOpP_Sh{pY9 z>I#jFRa;$IbF^qm4UJJ(+s><1Xa?|;O(XteXco`}IJUK&W6hF!89=v=u2k1q`|vUB z?!VgFCuR(Lv934k+PaPqpQ~s#@oArb#V)^sbQ7GM$B#<)B|E1(jnBZ}=1zCT&=~#6 z?CXy4P*BDG9Y5LntCZ1;rUp$t$F}x!+}El`lia~i0?i;AqkYYKu;xS!_k(DLa`=(9 zn?f^!Mvlpk^$dIg`*4O`L$YT0lw(_8x7H4^%Wqdo-b$Pr$9Rz6wY25mtWPWdk{|ia zOUZv`-sCqhC9d5r>GokC*dg69Lu1t2w&~7i@xOWz{yh)sdC2Bp^65h(`Oh`hYS2g@ zu#Yu~8#CQ?$^f^|vaPI2tMrCZv`TnnxH_D|UE+iU1%@T+}4`!$l(^(vYgH1<4+ z-F;%KU3?ZB_DYUzeciF|tw7U-PkUQR*=n#Suea!F2~u%UlNU^Ve~=Mzcscb?u6l|M)zZm1IN9QMKnIr@;WtEKE;pgE$nsBWUC)3 zO9>i38hgKyx{%-I6?+-S?$17FFOOr54-vl{oxP99ao09}rOXM#?}+$)RQWa6ZPuM8 z79D;6s&Re_ALID0<=ECot$PEK&N7=&LybOnE(z3s)9{ARHSjkK%#WS70%j<#LIR}s2wb)ymA zb&@p?O}4U%rUXr8jy#D*euuXTjpWbPuN`Yf)X-$>j}kY|`c#7-RkmlJ&04kzeAT1N zRklSm188!UO@0r!e}{aR7@A!9u0%74AEpv}o|!+=&(Jka#n4UW$af(c`3>TAG;&N` zTK6GFv9Dp*bh|k(SZlY`-yE7%G%;Oc8#n1^@uJ&6=lh49pMz@;)2{!Ry`IHa08I}X zx4wTg*D9V5IiG_^(bfOJx^6M^Z(X-YdHd1C(1>3tyS&dV$9=3_UD#E>IDN)m$BsTf zCwW1at6!|6nM9MTUlcvUyn@EB=fj*|t>Op zb3I!p+g#s;ZU$Yh@(iJw+CejoW&(|-r)Ty)?#EWPP*j9=0kH0%N0 zZjM36o=oaqetX&;&SkA60oX`-x-RN%=I^ks2dS_ti?fC0!Ylp~qSG(b>nhhP}(MlP$@QnT})c zK7?k#NCS=aeK`}@$I|vK=}P_<(M+N#clxIsOnXL~NtxYmXWxWQ?)Q?0WB;P=_R51( z{z+N9XgB|LYd@;QzJXo*+3MDD%@#G{8f|2j*U?7(X!8Fp_qZ8H<3_`jZcod)_LF=r zpz))TW2rZJmL=)V%Q}laB=_{5h{-wYyU>n2&v(JTaMg=f=FtSkTY(b4q>eWH_pR+$ zZrHQ+CyluEXljWo$F_QLtW7;=YS2WT8tWddwB-nzAv9Hb8=3O}*H3xYqN<^rLpQx~ zKDER#>6yAt^O;8l=5;jP|H0?#FIO7t*}<{rd3NxE=f#(N^s0d?KUVPZD?V}g;AMLU z^S=9?Z!do9FaCVNd{I*JjEo!?o?LEBQ@LU9ZNP z_^Dvc31XjgGuDb)Bp=TAw)Af|P_v7Mz7Ic(FXgv8IkvSE??R;S4~(Jlzl`78)HP=R zw#JHveI89WnrdBR&W+aR4MeknW(*B8fW3aKXD8ApTujWHXaXF%TSzh8cdYyMq6;+A z9xvbGi?K4C4LX~fgR#&=tj{kp)H@{vcmKK6v@%r@4aYxI+CW6l?SKI=%|EAhD({X4hO|4mx2p|1|{ zzt`{^-N2Ry>%OUmIf14GO|E`Eh^91$M(T75O&OXJj-^h`>*WRPe(c&FWw*^wd93$( zqez@(%I3Y=Xyc`K*z;?(v7}QP<~{&AIhJ%}J(0R8VV$YOuKM_}il4QQOWYWK=3l?n zkF1Sz*p;7XZa#_beHk7j9sN$nA?w}TgciGyV=mMH%ndQ zEN^`iW9Y!vbZhamfnBBhrQCj`oksA}a~r=8%`s`(+B12XyeuW%RdjRcmeBQXqhsDu zI;oEhbQQN-{m**7=sC@th27rHa$JhND#I@6Rba0aKa3^vWgAC%?=$%tk1Re%(3GPI=o+&uj%%|2%ijBd#a5Pk|7+mc zd$X0TsHjt7E|n6KloX8$YgAH7QZiIBGBPYoN=lAVVopU#Mm81|D%n(2Q&EwUQc;I$ z>QGTyj`S4ly^LIUK z&6@Q`_elm`H$3Wx;&z<}?*}hE#?9n50m6}886iLMZZVkQreR5qD|QS z&A~6%UR`iz;7I+a%Q*~Z4NmDe()CP$uYz}Axuh&OmM9+Pdp`a&4^LfEz8ouXwst71 zH-Iq$N5^!vz2^0B3UH_o`M4w0hwX4Ylt=qgoB=qxJms7OoJ}|#;W)+XQ1!ZVaD3z? zZELzsb8zb6*zH@ObtyiN$3xmzAFj6T(7vs3T6b`I;WWe1<(ArI6iySIa`j13oYL{D z>$d3NdK~45XH32&`=#);bl$5lPwY^KdN^4)I&MiFisy9n{L+Z2RXdAVR>$MudV3!& z{$@XdxV`P=x058CNjRnT_L0XtacLXV&I+6i)@{c!%ENO;@i;UX8#iJ6!?DLn6ubbw z)9?2&cpc9=KJyv`@#&Z3J z>$z0j(OcR!q4`wzx57ST2r+b7)QbbxS(o-f)CZp3j=kW+;5%(^WALWnN%irO{|uao z9h?lDaX8v98LP#8iRK${#^6ZXTO*t;)LX}iV_DsZgVpV0yF>1^^x?F>8tq*srf!!3 zIID2%HX5aMcWR>_cPJgzd zY*L#B^@kIKb4dMZ-5u)>uXm^V!|C3^*?`jt$4A{<_18RlGq!6uT*oL>4(!Dj#E6ptDSi`Q*fHXPH28z zfs=w$YnXvV@K8VKt5I}6=ymGwvt7no-v@67Uk0Bv>+`-9wGG*3t zM+^2l2{?lhjHyfUquch}Y0fX{(_(afNyi*T%*=Bi5$|EsP702Hne6C%XOZt9oDnRm zKjFvE?QP6?EWd@A3-G13)nmnfEsn9^96DCEuUpb%r5)ZnJiUy3i{A@-!Smq0;k@q* z?T<#`td%K;UN=Q?%Jt1MoDIZ@nY?)WE1u`pWh=m`x%|*Mt7_PA->h4FSC$VRg=!rety*gGeydHSEe@e!9=r`9PnuVR1bGSt4#lqY5!&I`vz+rQffV*~q<5#-knPu+QSX}sbwrjNh% zA>I@`z3j?U^D#J+aHO%QotZ}wCqr?x@5)QZ&!OBC$SaR!^`on_$8#9A&+TVm=|1;g zk2$KdSoY$$XaVmAFC7;?{7&#L@D?nW9J?Gj@EAC4FTC9q_Yb-qR`vcE&Lo!A&#%;FDH+Gx&y3b(Znzcu9Jp;* z4wo65S4o*W;bq}foAZHup0XlNI|p6PAvg=qKXe-$cW|BeG;wOvlJ>&xq$KZUc*}@8 zVA_@IqTkpQG^&qS1vs@=7WYdWE3{v##ROUphsGj1p>3@NP6HgNpLJe6a9ZKCV!322 z6o2p1Z4rml1*f$CbiR||o!}|6ZulHCe$C+hhi;!V+-10)sW-PduD{#9mPC8&@TRXS zmYM5??RvW%b>6kN;T!|Jg2_7>y0>b0J8xa)7~J8jx9h0itF2>ngHM6)RDK;}6mGP; zIL?D-18P16UI$*~X`#CuEST~Agw0-fpAqC=g%&IqFTZdmRW+R+Nt2{bR z8+e1oeefP|T?T8LDgLgaoj9B}IPF+2*{(wK=QNytIMTMP<1E1$DicSSa~;kk9J}u7 zcFc$1*7jALvvm9hI4L+iSS~4N@tmyQzcmeGPJt8ruJ-QdX)k>5aLMm#KKv2H^j`Q# z*?UHlaJt}>_M={Ro_HOWkq^&rq4TyYa5}JVpK*9TDBj=ZYaYk<+%k^dZtCGI!?F4? zbgsaM*8wjNPmlkSG8N|y-R^^M*5OdOi_gIv122?W*T=dua5mt?uv`+qczojnXW}@H z>B0Ugyl>IBZ3lCpuFnRX>K8q7FpFixDVYaeU7Sy~SNBG=GrWaEyc>(2?)L`0pSt#u zgUiTMeeH4`vt)mYXGh-@o->9JH~-@BIMn%#gRg;O7+Ep&8y?JlQMch7yyo6R*Ix#2 z0^e!<4R~>Qv^^ECDXM-GwhwUML+P7`rc%w*l=m!|8(4i)D-(Zu`(Zklk<+W#ZIh)n4q6#^H2gS$*wN9gD|R@wYV} zf15_EIe2wgE^$J0{SurRI6cPUG8OkvKKMGEH8|4zteu*-Vs0vvmmX8ia8}`z?vpg{ zAYQty>3a5oufUICxum^9->39EI*9$%2E0`)tB0SX-QqTR;k%ERIPjk8oB78$AI<{e zPQSc3$8g()=91M%;ix-I9BLPCyU>2C0Zs~emDWxB^>MRq3d`!4pv)z6zdlgi{ysB+ z7=FLpk2-b&P85#awkhJJ`Rle_1h0j!#}L|!=gQ!FclyT>d6_&G5>VzY9*|4$d%~`ZA85596cQ7PO;} zQGWd_U49-9^c&RnJ1TV@mJze+6+72q16~una&?G~V+`-$w8HVrIJ!)|aH4Q%43^AK zIGtZIH;%#UC=<&^tQk0MJ2)9QtvfgyaGK%hm?hg`aemS~`i}B#)CkANzD2iD9Lws3 zZI1rgejck2X8=ciiQ{bamx7n=A?HD?5vkYef4sECG6t`2o zO=MB#B%B16)q@w6mKo>%w%@7dmkIc)Kee z*JytPPCeq=>rR0C;O!; z4vkNaQ#@|cZCN#jITFqUmeu<%EN!#mHPL7}zC7OZ;dLYKCe|spjg7!5z|mu=WITk< zjVCFNG-h;OX>!W7MIO!;@{;1{bqlDcc7n0-#S3(qcnk%{6MA&;Z(}WxdIz@S@q2eV z!3V%&h^NN`zpJ7jd;+|*uk~^qJYjJkd=h*doa)4V7wX>yS~mv2WDJDP2du)GL7b+r zQ`{zW8Pwg_PgrHpybgR0ymY_sgEtc|SGI17sr{0^4b3ql6sN^FJoZB8osw{tP=-O{ zaDRlZQ%_-ESo=!zU09tEE6p!-u5BH$n&3%eQjd+AcVSM0GZiiq_Ltk=D}DT}1>O`q zY3%Cu>wz;3r?mZa9z(>fdLxhR^RPOPDR^^OUoT5-xCm#qjHAo324@CN8^yW&2-FeF zTi`kHdBcnM0bQW)19}AwcKpr9e+}=(_n21|uVdl9!Xftd@qGuJaX14e9`CF9^m`Cz z5Kb13G5;&-JB^mirw)q{Rx)ovc{&`po7 zuC)_bVX#<6u4`!9Yk{N6I6?ozse+@I^<1FOy#)PF>y{g1dYvhF!;bamz=yy)P27_9 z(cgHs&y%gdo7yg8a4o3Le*=65+=~R|a{gZ6<4<+(!9MrZ@^uJ0Rt(NOoHi_%%t08- z;opV&;bq~`nCJPdxUJV>^>H|PIC{CXy{5s}z{}Ow$12P4s$U~-FFI}kP8A$ICUhS9 zIgOfEy%)zk;H7!%Wgom2ymTAZyoET;CB^f&ofN0s95V!`1~GSPvnhCu@J5keN!#Kw z{_SmU5zaUqnumC-hW4Rra3ajPiqB@${aE!rv^|_vjIagjcQ|bi8SJwL?35%kY{Wg|`8(V+T*ywQdsM zG2yjfxupGzV^+6$J@zRhaDv}FKKxYemHo|Q5HY82DIaqfF=ycfF<-lL%rs(VhRerX zM$9!hLCi~cjv2iN^V_ZEWBS;CMBxN6Pue-=0AjY>Rz9ZQH+H}YV*V8^ze9U2BIdyD zbrWwam9wZ&sEz3wI)&staWn)`5E@Rsm==Yuy8myW}<(*dU)F`KYl zl5gl(a}Z7!9DDv91MdTmnstlMfJhRjxu547>|etBx+QqyR(L?cj96nJMGwHXedA4M#7dor=c=c}5&cVx;$-~F` zIXKHZI9qViJ2?LPO}kK;xc!UY5A___fc?t`;;dp>{qkI0r{b}g>%8?4Vou*t+-LFl z4DGWf;mp9%eONM_t{#cKTNjUykcze)%*1>)7CE|4K%`uC|-nuQ;;q)WsPHj>9 z0qmpTm1~O@I1@WKJ#fa$IJ!)6IHPd-u#A2#=^yK> zv+tq*;nc#B=2-1y;pp<{<&tfuIR9(DnZ)tU4&|x)pvg=7Qk)nZUFMdsQ#?k}>-NE^ zFO!$9?*R5kx;!Z?tB0Sg^D8-S4F8Tahj^24m*AFoxXQqc7dG+HZpshZ8kUu>Go@SqGl z7P0@@KGzu3AMP65LG5m!cSG9(Hsg}-b#utC;q8Z(bs4-KT*t%uT-MO|DbTvqUtFG0 zf7N~%ZHG9tZLm|E^L3qD;0!s+&u^Xr2K9%t3a7NseT3~KUaozI;A~)B zseN_bMzG&%9LIUY3e>GKs9X4Z#W;pU$@z^L_^o&0*b(chf1GL6FMO=Df&6>nmX=-D zsrn;^>t)^s^m09T4}6=qf_Hc_Zeet}R&Z`%09_~(UF$S+AQJhD4Y!ZDzNZ1sB9_%Z zpP}o@=g-9ZJo^xL4(m|axo?Z-Onk%|gOi3+Zr+%IliR_`z*#Tj=y8-o9lQzp-R}6H z4!o~*&YSfg$GirgwzZP&WqV%H?c4!x7~Y}V)%Nk3_J-h1KMHROUglAFX?R;Zc)H#h zz3+Pu)%%Pm>b@&m@48Q*KJd%crxtPB;7!BR%ZOP#cIW|5f=lDwhaQJBxq~weCjqC! z#OL-$-AeK>I?#truS=yO^Q~%!beKZfJ0gm>ic3Yu1<=Wr-6pp*g#L;7` z9!?ZapUI2MjQ!>IcQhY=>wwo)rrf$+2H|wWv5!5*i0k}NpW<_jlHh&tCk*H973ZbH z&s|)CmnoCCkM-B#WZ~##sm*FWZQ_(`vt~F;W#Z^Mbi+x*q48LoP69n0%-#NjRP zQ0{3eQw)AdxkJD6F2PwxoZhe#x+ZfSPQ&|(`yuYf(0-`qGdQ+~Q*M4}hSLhi59bxy z_PZ%gxpo|Z(^Mvo9xq8ajoalZ{$4XrWzdcuFZ#DNAB&kC^40zZh?ZZmvIwb#wv^EHJ>$HFLS%<?7R=$fY%6*jwU+Fq_!|Q{$g=Ms3@qB3T+w`s5$LTu$DBR|I!+H7OQ{YYD%~(bZ&a1e+ z>U9_4#Nf0Ur}!N#2R;BkZg}W>0)Gb!`r<+KH#~ZFd$^qZ5_81Rb#H{5y084$^5J#C z%fgekeO>NhILmPKGLJJKiwW>0aB5%P7DC_a=HP6=(Ph%>>T7TGy363+RQdKQz^Q># zny-#iHHYJW@F^@KCLhxW&#*6j2VWDsIJ^<#artmD<@T{y7n~fNe&dwvv%=S~j=+n) zzqn2CI1HUDNy4ey!AZmMcX0A>>dQE_C~NiSu${vxou4$XCtkYkYTgRo0$(qe_F)%z zGkDqlhtp`StK%euyi!75i;p6&HAh}P^4g-jq;hJMq7Q%|(YUs1f^#)sSZFt*$K<$l#pC!E1DjxOU6oPIcVJB;rTU#~k$@k{HcR>+5CY!*faT*hS~D4yPH8-R|o17{l9~ z*A2}>4RG4wbes5Gj?g)v4mcxltT9?VHuACVAe=EceI^d?(~9?G>N-!rS%TwXOrkEK zeV4wYna|HHz)L2>ec@x3Ecg_7BbH0rJoNkGCY)(F+IO{==5=2%@u@ys)=+(7aApvP z+SJ*0eQ;*sNOMTg|8P=pqNY6Czkd^t8P4q<`d+W=IfMP$BGzBUvU=ZRbe)R(a@TLL zn}|F3L3uysBTn5H(GNQ~F*yE*%E#%0GhD_A>JKLaM=zu7!STFaPJ%Bx_&oTM#eMi0 z@H9A$Q|`0kIMp#X;B3MvZEMY|7u>v_xLvkZ@C~aB+UWu>Sn+-E0pfL7E*W#h-mvtCU%XGNEbvY8?&ETzOU9Km_ zI}XoFegm7=<*?TGu}%iO4ZPg8zX7KMj`mCXp*UCSazwvu;&&N`+br}Ofsen%;0+;G zo$+}4FZQW+`rr(dadduTaQfi%m^j?7IL{HD=V#$f!iyP?+oyOgK*!3$nS)cVFLfVm z!OOz4w}aY$He5TU+hQYl27W2`p|pW7gEt$W+p0Le^xW7nkMnA95?EIIj@5D53)+;g zt@rWw3B-$iqcUvzv1XutMK~a^@Y9S{zf}$5!)A>7UOW6hQ1Frz*&G} zw^fXIX`ATfZt!{drQ8P}1fK(M#&SuyLv1yTeN!4vFBJ9HW3qj zT%0(ZsU6}>lT$9v5}cVG;;fTXE>6u?(a$@?X(p#!oNhSFW#Z^Cs?mKMvm7B*=Py29Fh?hjX7?w-gySUx^z{@+7UFVrap1OW*SXSqsV2!QtHQRMx zgAaEcF7o8<6`QL0uImHG;Iw|MIM?tv2;HmH2d5WKxjAwSPB$D~#*+FKk3nmJXW;a} z(aUg(=ls&NZn^%*!x?+s7MG#s{T25|@VfBgJgq|>JQ&gcRduK?uTKS^9~VQ~tr8Vp zzlv9QBdQSb5)ri;@sbtBD7DXSSM!x#k4jZ~Lu#_p8;z*3N^dfvHY>c@h{{)Z>4?fz z=yjJXy`@S_CdlukRz&L3c?{@0;*mo$d2E~7h@c#65jfe1H-*3z-e`ph@PF>FL34Dg z0aQFF>XFSzQNWpqHyuHDqV&giH=tgqDe5&5nTjk`c$>OuOnW}28;kvFwgNtu)*~ui z;Vn|RJ38kgI8#!g;`_a^O8j}UQf=(_wklJRh5g><9yPJwTivU$Jikw+_IoS)^m3v~ zt?l=wt5kNsH&><7Ap3`dqMXpH(#jiDbIMzbEFpBEQstDFRi>G$YmtSon!Prp2k)DR zMDB>RbYQcJw5%TC%~Yt&;ofpZ&D4R)M5T%!sLWTY)x#?nlwO|Lqc#q&%Y1L2Nov2XRecp<0%jrs$-{*}gEEbg7+UF(q zU~zI!>*jthwO1voy!2j`tg2kz+YPd|4?BjnDz(1fTiCBw_Iu0w_43$ZoYC}U!8kv+ z6O~U#YUlUrei_?~95WI9p;Arm^~RL`!>m%Nypv{*MFeztOeTp z$x3e_q7s!}3N2HKQKZt9UbaHbReIxo*Y%W{WdDSmCCL_ zI;IIyufhNsRPzxps7y)etJ=25`%(2!+to0(={|3)q`X(_jQUl2AGU_UWR*8np~kAb z`3kkUPd5pAYokKt_IV4H=oc@e2Dhrb^*w5>${X9OvQ@g*(m}7yRe9t46#kmsw}4L9 z<-iy?F;d%y3W|2uf!<0Fra;all+WH?h9NmsPsSgx{pR(Tk# zMeBdV!<^U1+r`1iu|t?h7b?_3rI)EtGnHPvvfGplr8}}kcTb<5iAJgGs~eGNi;8)= zpAO#lm`IIowKmlm>8vI$lkk>P<$~_^IA}RBfH& zWut2S6mRM=LFsdkRcohu8;@1lQ@x23RQgnJ_5?L|s+T`uO;>L7L^V_IO*~Fb)q8V~ zQ{!lj$EmGTy{zsR-G{ova*^82h&M)kcw?uU+~bW$)c8Ixsc34i zo*~Eg>dkIzkKXLo_jv0SYGseNg}vDxY+Y)3ueVXD7WV3yYGyA+z1rIA<&|3B>uoBv zve!%PL6*7%ReF2UBLmAVnTU!j(~Bq$HrilJAfJ@-R(1KXDK99!bHHhoN|jOQ+zAtB zx4R!jcmF?W;KK%0GU6@L78V@q^rIWlaXbg*B5gbeO{(ajbC7e&II>lZ>!YbH9Lne( z*70_j^AN{cNsr`5h4d&hc|1ANFpTWt6{(8FecqD796GZn9?9;DWcOnC*9zQ`b+mK*H7O(W?!|Hwha!XC0rtYf>?LIrlbGpGv03LI=9Qxxi` zWu5zI#R3kP@UlDugFnnvs%hmdR_a-yP}v#SPa9GR9G*l}e6KgJS6ssMuvZ@_Va^$^ z#G$D^NZN$5N9Ff=Q+rfyub17k2u=-9ZDsYp``KAuI|yf1)vh{&ZxhHK?er-6rnr@% zxTVWF;_+y996_UNF`V#~V!8syFT+92iWyNY4>xBB8F zG7h)qBRKvr+s}!SWBbtJ@ko>Fz=Si#ZUpmlO7~4r=OY`$aaLUisZ;gfYfVrI={Rt^ z9rPG%QN59Ysv-SNghws%uIg3&=8KAUy5S7sC^PJAw?WCVlXKp3wAKIB-v3+mJs3H> zq53{$XY+a@(_0vgcX?hd zZ9Bvg$7!%#6pyJ(^FJ7Q%Q5;G8WX|v9l@D^2_$O~wS;6VqUMocKeLVmQ{oB|eEUg^ zVJD6+wMfQr7>s0Esc9tZO64@%qn40l_NXll_o#IwbNEjAHb8vAL^8QgrIF0-Q*%hR z_NlyvRVs%BJIwJr0jty&KBe)k7oS%5t0a;X4xEvsapa6-{BX6Y;o&Neq;R;JdOKjX z8b^|?R*Ogq)oK>W`~kJ5A-*spnL0v^Vb?N$gxbWX_>pP`pQesfQ%Kg1R2d}k8nuXI zsYY#RSfkdE%p9dA-vM}(8bgveS}h=%IaS zQ?6D`B1s;rmXRzRs}_*paJ``6acUJw{x~)9PQc^UD3ayl)jX2?@hXL6HmX)MjH+cM z6OU2x1mI&-0iQM>qo(m`;<0K1$;xBZ5|Yiws(B=fC#ZD|Pf#mJrt4JVZooPfM>2Mz znnN;uqMAmsexk~0_&BwML?7mB_&BwWWbP!Dd>7zJDuHCuS7{`3zM4a_<*U4gC#xKi z)sxluy8%yDTlkbdMa|;V>M1ITBz3CFB1xaB(n!YZ)ux8^DvzX4uck25k3U|GBgsBq zEg~sAUd0E_8!2~)FwW~8`KOwO*N<~Bx?;SgCu^sT12vRy4uk2 zbhUr`Loo-Bw&*oMY7za=8@!^R0_%LlhulbPgcuFCeBvz zdjQW?1$^2(TTSEB#5rmL$;vru3CZR;Y97gAvs%}%S*;+MK365~1w2>9k&K2ptpQmyfK1D4d$vj1EY4{Yij%2PyCGP`lQ3)iIPgQ9ob5B)sNVcA;@)|x(<&dmC zO^xG=@Yd7R7Cxn)u4eIR_30{!B-N_2NYbq;jb!|MwW;CxDvzXazM6VJ;4{=XlI%0o zB9g*0)GU(uXR0*~pQ*A)rebRBe!!U8#HaWLY6hRCE>KfQ)-F&PB=HN?B9f&G)rN)_ zsx>4t7pch)0A8fVkR;mF0+N|FHG^cMO|5G9ER{i$dzKo-;p)b-)CN8+T&z;~l)G3> zB1vANmXR!6q85;hwyT1M?P?WCzFkdx5b)V*6v^_l)jX2?vsDVo>~qwLhR;#UNG2{- z@ecuBstWkDd8wMlr-=?Vfn=paEg{+LQ1eI@pR3k2e6CtSGJTm!d>HUD6-P35xtc>V zeYu)OvVOVBY1pZjkYqa5mWG{b9m(ADRB{^dc`AWq@(PtkGIxcVL$Y;+%4_(1l|!=n zd^P?Nz~`$id`fqzS$taUQb{DKD^(Ur`bw2XGJciX)bJ{mM^d;-O??#bYBi1|d$n3b zQn*^pBAM@2YZ`W|ERv}gsIiX$zCdl_Q~Vk=gHKb}s3|0C*QgAV_zTq{lBE}_4GmwY z){xBfsL2#yj~YXgc#&E_GV>xegJk1HYE{E)RR&4!S~dD{z-!e8J}tahrSK{DVl{~* z*{hb3EcB`cB%?1;1r1-KR*~diq9#58c%2$WvV5JIN0PrzrI5_NRIOYF)#ZsTCyCFIR~fz?Z8ylCgd@hh(~6 zO(R+FS2+!DP)kTMH>fQQZ&2$<=3b$ap8|Y^N+6lMQKga0-Kge}Y~85x8V;x&lGOn< z{%ODgwS`aVSE^ZjT79KTB1yeUWs#&`rP4^oZ&I5Y-lXzK3OA{#&j1dpaU|J6wTPrJ zsAiGOzgn$n_-d6!GW8lYHVgO~wTVyho7D_HP2H@fkgVOTGDzY>Y7xoOklN62NUb56 z`6o5`S-^i%V@MLWs0Ac5x2PE;8@H%c4PUD=NOG@LqYnVSR&C(Z!mvu=Q*KyIB1yhZ zEhAZYomxOL`g&E+@bzjHN&fX};z7V$)hLqXTh%<0{H-d5WOhWYXgH#lkxbmC;&Xtv zsRBN2-lnGUY2poP0?Eo7)Dn`-H>i0ei?^$F4R2Q~NTzWJpZFXo(m2wwH{#n5(rKjA zNY|0(G=CEgf{W7J zG>~@xx7z~$KWPEpcK>tUoZH~f-si)gtF6y<*5@;<&*xg7FS0&Amp2o!G z?(H<&=>ht@e5KhwaYbM7ulJk#gA~8q2mfj)eJrJe^mqI?8~m%H^gkatc#~diFxB(S zEPykfe=x#c@@CI73y0C?6)NB1^ciO+gMXEj7QSoBLt$~Rw)sOX{oRz_^Ny#_@voRQ z|3>9m{EVsZ-|6$ikC=G8Ui@>$=X!Y5P{zV_S`?R4`sn@Rps6qS>m&3zfnN*tzk~Fd z+o3?8M?PZy{$KQ&^WnJvr_(s~|88o=#cn(7wu9d0cE`c)IM^KryW7F;Jg_?t?9Kzb z^T6&rusaXz&I7yi!0tS-I}hy61H1FU?mVzN5A4nZyYs;AJg_?t?9Kzb^T6&rusaXz z&I7yi!0tS-I}hy61H1FU?mVzN5A4nZyYs;AJg_?t?9Kzb^T6&rusaXz&I7yiz)tgk z9wX)c4KDwGeTP7~(){22H@@S!)hwr<|()~B(9Clp}=P^_!FV16y@^ZJpHm&qf zVZPyJ8+(<2e1A^~2Cww^zMe2w#ruE4d=K8uXzqgv^SvJ5KNIG73?AL56XsPO-|rLV z`#tW1Fh8t#oqw1gUhLN}ulBg@!~B4^>S)VK?+CByOMGY1_8OJmk>2p{UA)E{{j~1Y z@V`p$D39;MD*a2PceKEd@%X;1uwUy9)BRdueyr!3S1P^ZJigy5?CZO$4&8n#ad(vO zNq&f(rB%@gcTVnLVR=JyhBm^H^u?0+g)IJEs|$>;lQ+5ZL>A|?Hz z+gJZHaSk0PKg8c6-l2R8T$hvY>(w#zKNIKBaae_nB98HC{3E?_Yy2C2==f>B#yuX4 zf9Q61T9EHH=lb&ascPUm@J#ucHwk=N;0prJ3cTuccfPFx?-O`Z;0prJS=_W+t(Uas zfeV9n+vYqAar?|!^Rrj)rH+4|bgR8}6$QeDzr~o|*rMcr)=ru>a*POrv$zr z@HK(gHo4=c1m5~&w?828l)&rHcE@QG_^`ky1wJqEoWM5)-gJ(;oV@~%3p{D@RInXI zy#_i?;O&U#f9AB=75Dp#Rvg}rmIS^g@cQO(y&k6RTG4jRds~j*Zu!iI1U@G4DS@vD zd_&;1=Z4GA??=p|*7qikGh@YJ9=q6G-&KKUE^+&+-OcL--X`!qfsYD&THtAcZ&>^m z>X)q5FLwky>NQ#WqYsmxv-}6iZ?XLElD}g4dGceHUyU7&KF(aVeErCD{e7+7@-HTT z&GKJJey8QXp8UM!zk~d4%l{<#>z1D;zt{4AOMb!fx5)3e{1Y&2bbU80UmyPI`VLxt z2l<i~M=Z z{{s1Smj7?$FIxW5*n#Ty^ez7>0grl1mj5#H>n;Bd@-vqI0Qn7;{}b|amVX#dtLySK zTK<^XyKWX`sR{;K6~l0R$tC*lVPU7ozDsO6s- z@B`kmxEP6cWEdS{Nk9sYZKS2J9<=;(y z%<>n>U$y+7lHYFmkC4A+`KRJRgnGPnTK*LQKj7sp|DVY3w)_u~zi#>8C%@P7tMQYb zZl8kXHwQfG^;`bS$ltL1w~;?+`45r5Y5CtLf7tRj$=|a4h`R*{LX+!@e0TA_WLICYb-xS ze#-K5!Du{@vtfEdN{NH(34#`8msPc$_Iuqvbz8;8DC7EZqKM>U{QiI+@H#F3Uh*3(|3~C^S$@qaCVrFUcLn@_*KPUt zk>6tZcv)Vs{q|V?@u!;jG0X1=_yMoi@^2u&-SQLU_gVgD$?vrM74rKn{~zRcTYi1L zDgS`ww+0-?|CWC(`Gb}}M1H^JPmn)k`Cle~(DMI6{;=hr^mx;r!g$MXBhAG7?s$e*l{Ey|2lRshk50O7>`M)QB(((^)Fzr8Y`R4^3$N!dpJ^724e?R$2%l}vMmn{FV z(@lM+Ex$S7IR3Z%e)3b6|90|omOoGajOG87{8h`}`$SWoS<62y;5h!b{42mo5K$KjthGzuoe$2>1bS&GO$)ey8Psm;Ait?`bme zyDk6XfFJPIE&rd$@3s6-lV7m>Uy$E#`Oznv{@Sqo=L8(*|1JNm2 zKO;YB`L)faJ!>q#J>Z!CE&r|L*INGP$)C0SzmXrc{Kj)ledjHIAm9hQI?Mkc`HPmn zO1^LTN1bQlFIj#^zz=x!mVZ0>8OvWFzrpeg?wbL*#E-{!htowfrNVYTDDI-;sI#?+Ew-FJ}28 zKko=P zmfuN!zvT~*-);HtB7ea0zf69wbS1EdGAt35$Q3_@u>u7I0j*K<&o) z{ypIM9z^Zd>6w40USRrbl{ov)3pnDueP1AQtKLdHPn=u!=fpj{9$x=r&h3`Clp_Rx zE?zgdX7OV#)aAi>EsAr5(djz&8gFoxOKARZ3HgIblP``Kf`6QEoxV_^K-R1NJ-YM~3cbv_a zxOv@mZXOf(w7}Q;+;Q5kck@nxcMH6~-yLUC;4=bW6nOOw?)X)&aPtO%w+nnk;L`%1 z7r467U7o1GI|ZH;_?*C(1im8h)&X}py97QY@Ckvh34BxF^{;f7XHMWXuX6iM0`C;~ zlECY4a>wrwc(1_o0v{d>$ItL_=DW>troMkq|KsDyo>zx`=Ia6Z>G96#>ngKu*4tiW5|?Dj_l-aYE}=LDV=c*|SdaXJJ(An-+j7X%)Ct2^ID zfmhw(_WJ}rD)32xH;=jF4+*^IPPZQy_=3PU1>W#>cl=I)j|qH!+#RR)9d4cw_@clI z0Qg9 zjx#0j1%X$+-yNq;;O!E>-yLUC;PV#$8qIH$(`J54o9V51{Oo_$9luN9{Q^%t;Epr$ zkehoCyLpGe#{`}hxHs>P-)iyKwwd{V@iV4<_FCUBX1?H#lNNaNi*CPF;0cSL`YhTX z&6@Ij(dw_w?}X!jm;9QCjQ>BD-=7Kl{QW2{@T9;O1)djp^n31nTLj)M@VLNd1)dT3 zrobD%?=ELd;59#R`vU?`2s|h7b%FO}-T5X3J}2-kfyaL6jvvpt`HH|-f8zFQf9mE< z0&f?1ufSV==8oU8;^rd)pA)$HxjW9Nz-I)$DDa%XQ@?QMtA6R`O#<%{_@Ka(0$&pN ziooku-Q^q+_^iN}1imWp*?)8ATm36HZxVQiz()n1U314*{~qSAzSzw34fMP4J=XV* zy5EF-p0_(Je)uIOj(4}2|BtZZjQrLeCn@j+fv*d^Ht&w#EbtzI4+}gY@M(ds34BxF zb-#C)zeC`?7Jm!1e}2NWe;bA4{!0A89e+{aS&P@VoBnOO*TnxrW%&E4`lCC3v&BC_ zas2yDoab2Mr0-AeI3og|7I;?RP5E<&6 zF9r*1_>{mi0&m=O=Q||uF@abAw>wV1zz6=}_TvIy7WkUL zw*=m_<<56l;Nt?XdBh#3LEs$%A3W%eGb-?mz&8ZmgR9O;|MC2m6nIA9YXWbKxZ`&T zd|2SM74A6A0-qH4mcZ*P-SJxlJ}dAgi=XmrGtbBAy02TT?{Uq_oo|=G2lu-D8G&a7 zz9I11eeU>c0#{XTzf0i#0-q50oWNHEzIIr+JRhU>Z~vs(Ki+P&|MY>d&*OhV;2D88 z9O;hJbX1t1{~R+OmOf$1^9HLty~l)oF3+&QX9T_?@K~)oez(BK1U@J5Wr1%AeBfAj zd2#~Z5_s=%?l@_IR~_&6n*`o2@IHZ$2z({#&UfUoZk`nQn!rcv+;LI@&k0TmO@UWG&Rw2Hfp-dgSm5IVPYOIO@O6QEC%MaCFYpe5_X|8O@M(ef`R?*8 z3Opz9g1}o&cE|4!c;*zhpBH%aRJY$Q@IHa31fCQ4y1=XJ-Q{T(__)9q1fCOkLEv?d zcbBJ0;Ozo02)yG7?)ZHI9})PZz-I-X7I^eDcX?t09~Ahq!1DsHX>jM;Ebu;oPYApq z@aogu`8El>SKujut0%hSM+M#@@RY#wPjbg^JJZeE&vNq-fhPr?5qM*hJAS*s;{u-+ zcvj%*$?kl61wJD1If3T{UUjxR-xh&S2z*xHS%Ir_-1+tjd`RGn0$&mMrof}k?(%dB zd|2Qs0>m&vWNHDDb4jpW=>_6L>?5+wT&1Lf{JmUlI70z#E_H zF3*U-69Qipc-Pb1@rMOo|8%z>7x=8emjzza>W&{1c(=gk1imKl+VkD{b_zW847a}^ z@HK(gKGPkiS>WRWPYPUJ;ErD>@D_o03p_3Gnv25uevi%@^nTEsH`rrc&rq=Z>n}Cu z8TvnD{P)p$26G=M?tiEI`v`k$jJO7(jC7;;G z1nzgcYuCEt^u5^4GXk&gb^8+n z&k4N!CGI$r0dg!X3XLaCKwYKkG8nK20Ar_5FfX-^H84K98TQ#aAfK{Krh3c`Ht0C>-Z) zmz#VGlsFl99qu@dV{Set@D+g<1g_o|j{l4+OgYCsYw9~=m9z2fVV~<86ZoLT ze?{?I9x(Cmwc<~{BOL$M=bQ4xA2j}?>(!`%9|C5&A^4_q&N`C+6O#F{ne(z-1=k|;Xd`{r%9(SC2fwu^}SKvzmufNxw zZ->A~1U@P7yuhpPbLSfqc(1^x1->lsHG%g|xyv&m@SMQc1>W+0cl<7a4+=aX@U+0! z1g`FPm#0(U!vY@{cuwFO0f`Qm`T}nj_^7~>0$&h#PT-pY zkAA{k{!W1p3p^um@00HMwF2)K_=v!3XWa4I1>P_4guwFxulbZa-txJ|*z9z}Ez>=G^(N3%vSsZr>Mpx5Zy~mHD2sN!PQz)cW4h_mDe&T;LM|PYb-~ zVR!tb#s5s@^ge99AH9Og`I@Uu`7_q}N&bE`^Lcmtw7^#cUi*b`oYzvmgH)b7t#*rl zG3;}B<^;YX@Y)4;oKAuF3B3AC?l{$7cJnrYH~+KS9~5|W(d{=2yj|eK0#68hTHslM z=LNnc@Yq+}^%@iSyuiJ$y5rOeyhGqAfp4bW@dv-|=3@e%7I@V++;Ij3J}dB?z&8Y5 z_b={zdn|rTx0xT7K56ESytSQBNrAV1-|goFKJ){(Uz2t78G*O|(Cx1YT>Z%H_X&Jd;8}qe1m5vu zcfLacPY66E@CAWy34A5zE|2#UH?J3XhroLUUiDKUzQ9`r-YM{;z!wCb6ZnR}Yk%f0 ze}lk>1wJA0S%EJLd`;k+0Mm!Sz?THRA@G3>cl?h3bo02t3j%Ncn>)_1 zz!wDW{oNg>Mc`xq<@Of^UbE%)+XX)Ph}+Ky+>e;^4|F=A_}s8=fu}0n{;I&6_PYIk zfzMaD{er+_hr9hQfe#6MOyEg@*HpXnjS0M8;BkRx1imKl*a3HWdIUZ!@CkvZ1g?&7 z=j#i+Rp8wMpA-1Rk?wpK1->Tm>Kb>Peu0k)JSp%6fj1xJ&bLG0eFD!2yzyvv{C0u& z2|O}-E@Ckv>2z*W8u@l_+ z#!qzfjKK2(-x7Gu@cO5@*l53h!`l>)R^W&I>Ozo4x7x175-MnQvO0$7k9apUhQPxSg3d z2z)@`n*xtr>5ktc@KJ$JTAas!uh5>}m%{lzmD+RtH1k~jI+{gIoTI&d=lprRSLGc> zW@-B$@6`xA>f}dyh2MwE^A3bl-k%SKg5!fJ&yimI)BGeZW4r+6k9dc2{a06J7mgtzNjY#$MCyT7t~GJ^`B9zp{+C;c^L@JLb6wT=#`x7X0(CH=M7ZStVW|&i4s&zr5~c z#^>vfYRO+F&evCPzr5t-#^?7{HATz{Y2y4o`0dX&?KVYz4e^mz7|!G21k95sd$k@Pk8%7) z^7%QL+@2pN-gA+O!{w|RH2L!TVmSUq;I8fTdF1nZ^%Q=5q2PMqKG$?g0f#QA+j+>ft(jfunesWJZoaefXt$G_obPQ1H;DW`@Xq6w-D#wk-1(cz0UZ2 zy*jtg(~0waXFlcoN#cAzZrEU!U)@gIdx1FL@67q$aI5k8{hky*dQ~&;$I6uG0r#QE~$@u)95uUG(db8pD z`z(+DVdCv)nsV~?{%+#@{2b=>qb5%D`Np55d~YMp?^Es|{!!vBbe{{i+xc%Xarim& z96v{#-*dq{_EzJspKtPQq;|N8IRB2o<6)fmxOF`CJ>q|DE}j#Q8esI%szUPj(NM`{QP0H*Z8**=jWd{QaOJ`oS$>W?KwPd_M3bjkGIp`fa`o)=zgmh#X0#M zhV%OcxF55``{?={<}bg?`24=(48{2)aehBw7xBlw)A;=UDc+98i1Ty!c^saeFg`zr z%BMJQB+mDzaD6N9Hh%Z@W<1nXniWnVK6RGi9Ovo8`FXbO6#res`FsU$kCpE-@$KWl z3F7>Iib0BVJ08HR+nKNP;e5}ZFr2TS-r_fq>j)5K{b{zUK(;(5G$9*j0$?`7okb1%7lqW76N{QEMu^Ml0s zdD=e3UnI`&m0*6&l!?RdVQ2oY#QFUTO%&&f_ZvS!$5%cT^v%TiKGs(9e@>k5H{o*r zgLr*&c)Xo>zlpybGo1bB66fcS4N?4$5a;Iw@woa5aemHKJNcJ>z{Kb02l6=iGjYD& zvWxtilg8g@H|62@cN6FLru9-g9QdH|N6Ggo&S}K?x+WfP4-)71A##1cO`P8k&f{>C zIA7+h(APp>u%GJT(9pC z=l9!kIZyhiiNn8t^-`P>;{5!pX5#-&oSzRCGjaHODitw*SS8NCgLA(3q>SH4 z^E2}kK5jVw&c*r0i1TxtxV|4_pYFG1{(a*7UThw3FZ~3SlaAB4eI6jr&-q!V@<%^u ze7>)Z%lQG~{2npxm*0T9`tiTW=l8*JyIneC;`4o5JkI}4oUh~J@qG8EjBlSO{CDE5 zPd5F|^Vr@`8{dx`&gZFKMx5W9=2JhujktY0|8e3O>Mt(mPl)q#r28q(4WBXj@^#VN z{>RU1e(1lJfFJJh`@Gm+CC<;C;Cw&)Srfk|Vd~XGA^(RszmLWz{=5f_&+k=X{{zJN zxd)x(AN8Q|mm5ud6eIX|E^)s9*C+lh;{05C9w!Hh^L+vd&{MfA~KDXNy#QAv$%zs3@%{uS#-{7w8?)cA} z__G}*zE9 z`;R#OO~gCtIJ}DDq=@r#Mo^KOxTV&FQ0gY~V|jFU@;{6#q`*8ET(K z;vXPBK<&))RhoF!9i~0G{6~G+#OM3C+bGT)alQ|$h4^2H^Yg&@_+;##O`Jg*Z#;f} zNSvQD$?=;PjnB_Js-yUSAkNqEb`gKoSKQ-d0$d+&^YgtqPR&;h=l2Ks6sL(eU$?{U z@NdL7?=c6p#ku@nOq`ZU z6NmW^i1Tw+IsU0j#^>iYbG_2wC*ykr|1QsQw#euA^>Cc$e$&L^=lL*ykod@(O?g(R zoF{zC_(T6;IJetF#QpAYJN$}xcFk~Zhu41F-R~a)*ZrQScHnmYE%C$;Oq_NqPwTSr z`ME9}|0l%xKC%Y#+rDG`y5EP(^9$nswPAkxca1-2&6771=jZj+(|mI0zZySxkBQSq z`R1APb*JQCmvOhlJHdnRx!*A3hvWQ#IKOw2k0&4c9{Fd7$J;Z%Z#dtN!|m`B;`aRZ zC*u74^Ej3N_8*uynS-WY-0xo{K280_@qb2qh4!bt6z7_(yS~HVy1x7zn=Vxr=j&{_-=FzI6URPI?jz3chwY&_-y*(vuc_An@yGwj#Np>E3=$tE&hLNsiGPeZ zzdyaH!u+BB$0iQnClVw7jl?_OYx3+oZkz^r-t-IOC%$OP(|@%2Lz*~0e}K2si+^c+zOJ>7;=GM`SC!dLxjidajlb4s z;&YsC;#)NT4AQ)DAMxyH*#9YbaK4VNH{<%Y|C@Wedo#Fh2fm(}%kwaCejX9G|M|Z% zK0lw0+dm1e`<<`%dxdBI9r0`9Z+^#=bC9-+dx`UNUx&$m%$o7}x%b_~r-}1>PI;W1 z`|rl*=kTnO|3%__pCkKc{l@sibRJ-o{I?O$HkfkqcJv_e%-;>~A^+LGHF50U`yL?9 z@5dP-|Hs7b{cl6w#Np=^bANr5IKL;9x0fFf=lgK^_%QZ66Q`fXRU_q_BF^_w`oy36 zd*ip$`8W3ai1Yiwx!vB)yk=hzf!pDQe=u>pdDA|v6n~UBKi7iG{|Ni^Tu>fAm#>>R zb-yuj*#8>wk%tWD{`wtp^~bRP!atfgwRD{ikK0>_cXb<|`}a%4`MqyE{{KSUJ{~y# zPbPlV@n-ww_8BIwXn)6X=85xl*UYoT`MUi`fDg?~mj0vq_xa_c}oJ>iI9@x7-zu{{V4*Ul{je zfq0yL@9-&3(|?;d9{sMw<#{D>e(yY&=Y)T_`~6kmy5H^ZWw#UO=lr)&zMmk@&n=13 zen)MYIB6p?SEK57 zp1j}Gi{rnCcz?g)T+Uw-=lf*2eJ+WZI6d@zpU3CD#QA>SX3F=(3gh$r@^!@1#Q8nr z+-~Pr8lRslK1Kcvaeh8I^Aqt!N!N>?7u`wzkHP6sj)+PAOg`Vo!R0&_FEV!dF>oE9 z-`m3N@Ck4|e)zr#9-qG=-##z#+Px-@dZL+sxW3i<-1%MxuJh&h*7;P6pAomOx2dW! zzWuw#^~Cu;L~j4PiDynR<%v@KUlZs1eY%N1b-#%pwXT=D7u?lui{$ft{aoML!%UpE z_V75okT^g0mD@Q-obSJ89y{E$Ki?n0wdc2q^K->{++KHti4&vioF~Xn5bw0k zSNw(e5}l{&A^)}`sXX+Yb1u&xiSv7k2g$#*#`ydmVV(y*OMH{|7g1`r6OJOEj;pyH z&LZAG*AMeNd>L_mpCidiTMcvx~*m+HR<3UtVlmN(xw%Ca%Scv zGwpnJ&Ph6{ASEg)2!g24w6_SNf*=TjsBp_IErOt}xOyq7S}iT!wf6e0=VL$nIddkN ze7ybtU$_0uI&1G|@3q%nd#$zC2Hx;;X}1Xc5#am3B=}t570(qu?KlVNIFAC?`g_`L zJx!=Y%lzE=e@3};T;Fxl@A?36JJ0Hu z!1X;uZTB6|7yjGNmv(iW8($!J#a3xo=kXiB_aYBpzxVgR_1>TEUr(DZeCpsA)plP4 zd__vyRXz^@-?u~X3XH=W&k#Nvt$F!0aIGJ{5cCyi;=52MLHYLr*Lu17UB3Zt+tnLi zD15X|zVhj*5PVC&j88k-y%xCE|5y6dSwgRML6yD}_#?}OkIL1%f$KXR+rWR)i-ga{ zTA|mx_j7p&vmz?cd%BTFpJ!15D%0POePk{^c{%W!jWRwB!1n;x_iB|-?mXc$^-}p> zoyVU6*L(WP|2?yWUf)M<0sofs1=l*{%4a`ttzV)0?21aE--}=ulh-H6n1b*MFDE}LQ>pPM$a>J<2VCEWS^@n1 zlu*s>>b>CUz^A@aaDC@jw6PQzxdT| zJ$41*+|P6KA`dsi{`?j21K0<({|^J7Yx!;J8-$N74=aFcy;<$gdx2{`Vda0n;;27! z7slb6jlzG56|Y=0Pw;KqqVhHhT;EeuIXR*RO+qehcBQakvrmT2ENV^FH9Ch+pganzK;a)%qCP|JMTV zgxy$y{=5VDZLqgh9&QD$??dT#U6v94l{hCB!DkI{eNRE#ea0f8*Y~rO|4qR6y+g)X z*U_WEE9VH_0RCqz7Cv^q(*W>Ok>|e|^w$E{cZaqAzXWdQWqB>aU+b{xcs>YR>u=~h zK5vQ8+j*7Sfor|lcJyaPt9yQ%2xqy~`V|$RUqv|UhlPk==(t@2TddHyKju5q}PaPH5&Pn7=XdVBzU_Q8(Q z{%^im_)oc9#;pSFehv7|I7h2IELtY?kHMZ){qRNL`u=qT_*`;{&>wu0jDz<7oxp8< zUUw<59QFg(clVS}<7Q*biURD%oE~QMac8^lgOm{&M@%gbyu4^Unc8KXsDU`_lFP5a{n)D)O)2^^#uUqwjz9 zgMSn79>n)I0lymf{Y}#DW>Ghj?g6g#PPN_PKH;zL7U?)V1bh?D^@?{~F7$h0m+HKX z0M~a9b$spyzUkG{AC;3=Um<*IE|l-xkN&R#u62Zz{)fQrIs;Gc7e4lWu?P6BmqgqB z8*n?H>eK<@v$a?HqwUTEei(YGLi-8awx53ieA?@TkB;+82Zg`Zx9PXHf%EZXjW zfNwyap^kIoknmaY2cai7h56@uz?U>pSDfC*0R`uR* z3BQr-D6LzCW+$CB0{RLWXI*zi;LTgmPqcdra9gj=D+nKbCsEhkM}gPi!fplld>y#H z!>i-`jH2+d@2Q;!d=&8l9fuvj?L6xAYWF_gML4f(tw;SB`tvj3wx4a%8lis#`-{re zJAfZVoc(t2nRS)WFMq9!o6buY@Jh(ZLePH|xV|U22l#)0-wV4!^~r}vg}>G*)&5+# z7I>}Lv5LP7_zavAH-rD3!1evO4ZsUm3!m*D6?xG9zwC8_*IE9bYk}VkJ*o2ZC*YIc zCVbRRxO$!N(f5f|KBv9jJzsMO=lR-W?eCuhz7X;KcJ%+LZxDK|@4627r-AD`^(t?_ z1b*NN(ofaTnKufbb?3=ED*r2h@48!X?f;K}_v{ruJJ9Ya>xGYfe}6IX_REA1740(r z+yJ}*c|mi*=SRS`{(l4T%r(N_z7KvQaIGh-@^c^I&!X|{{PJYOVIA6i)|=2z?3)!( z5Z3~~^N-R`)mIMzxASvWzFGLp7!rCN=Uu>ivG1sUd(H--xBWuz0)EIkH=Xkqq3`{t z@Sm*u6Zi(KM{W11*9!e8{E$VA+u6YF{Fyfc*ZQ%#jy?l?_i|~s8GIfCZlBlAdaJa1 zaGS`3^0^22I>du!fX}jxLT}%L{u^-H&sOs`p||stion}nDebD=vK#o!X9zw8^ImhE z@UickbOB#}XY_kN0({-?1=s$c@pkw6O%ZO+>4;}-!<#RI4s^I^!mPx;y(bc z^|vcP|7YNPXG?#Iz@PFC;iL7IRWE%3xZQ5WJB5DNtuj6;Ki31l3vpG|C!ZwTbx!;} z==J>q{oZH2OZZPlUaRVpwZPkPe%E$C4t(0}(w~L+uE&5MoFlmMxpuSg-}?!{r-Q!v z-57_{qxr7)0DlDZ6`;THJwiWvoA4P0{%hcO{#nLZ_1?gHh5pogqVn)1;C9`ByMXV- zxkmfBVvF$6cU^UTO?sbuoS#p)k*i#E9Nq|A-?LNx=f7X**IgeSpUZ&jdw;6`9|ErL z$f*3heHq%_`eC6z^#U2^bD`(|8@OE$Vd+PN{t=uQp8OW@!|=!IzWhtzyWsy< z`o8VbpDFMs>wLW)_+;#N>oIO$BiyBz?gaf7_>J$!cs~0^;otBBkvAQ;8-Y*#rQlP+ z{~++a*U31{1wQE};j?0^&{qKeC~$q>R{bZ9|0VSLUUMDjKM(vc{P5cDy})mKsqk01 zdhgA`XDiN$I{se=et5C;Q~CcIc+JCtEB{A<+wsuhj|zW%4_Ett>c<2>?S|<0eh9d} zYpmn>s*el3zW28Y-+K*k`yTGcfZO$*9tE!NR{Q#Yhwy*UsyFgQ;M?%Ms;}+=KK%{S ze~mA%{eq1PxAoq9}w=6!@Cpk$)6HFqo}{6^Rf}R zzH_JZSg}*+^&JXb7Z(6Oi1Ul~^KZaM@0IUTIeFoyh0nsB(Rq9$aIH(P^xJ_~BJQC0 zA>j7?wNq{p{`$_W(ti;6fsaYM`n~f%;~uvm!g;^z{E+njT#WO3fgiwmQ~BHq{Py=o z>Cf0Dd=4XDaXR$CT;NkI|NNc6i&k9h@4)T4wJ-Rr@V^;xlYQvtcYy0VI67ZzKPUA1 zuKfiP00o=|jn6g{w?Ruxfz&HG_@YxJL4-#(jo+-STic;Vci+VOQw9 z=Yik#YxyqKlP~#_&@a1D@a5qDN#HH8i?sj$2EG^jl+M?4_Xr=`uUG`$@M+;+0sg0a z8T8MK#@8+ZuJ4;@|NjB}Htah(?>Bu#`0Tn^#!dTq2XL)_uH)u?Rp|A7J*EE(aQoh0 z$6leY_^j}+LI3vxx8oqs{+iG~hV`ZGRspx;vz@^8{ol>tKMH(vi?plb_6gt{rptJ$ zJ#fa?rQOYc7F_3L3vhj(Md?2e{63u5l>Xdr2%mk31M2vE2>9*r^CzVQ+nPu-Uv_idqHf%ChrqdwsJ{-b`^)Ar+gt+-VcaNAGycHs9TzOVCjH}DNO zPpQ4}>hB2uUg&3?ug?JA`H+m8+8dYLD)hT7xw;OxU03Y`z&oK=wLd=wZr9m+;dh0< zzMG@t@LAw{J|p9)^ZOIv6|ggve*OWs{Jc2m84 zo+Es8erNx;@E^tguJ|DE&Cv7hXm{#u_+I42shpn++|HYt4}9zYNxLfNdkN>f3w_5# z$KfF8?Ry9R0lo?6dhKV+52Qa^o)+~>-vN9X>}Tb_>W4zV9d?oGpXdCK;QC&n;%@-H z{)^Fh+y&gO|Lol^eC#@7w*t5Q8%uvA^mmSkTy>TaSW5D&jEgjF7{aE;{ zuati3`uZ*KQx6HQe2RAny}qZZ^ScN5Bm0GZ6#bcfr_k?!oKyh69JpO);5y)sS^m9S zf$y^X{C5KHzgybX@7nwmX?H5@XO*|fKXtF`GYRMQ+kdO@(RH*FxP8ClPT&oF(ee2Q z@Y4omUi5p<`pQB7Kk1m5Iu1iW z7y9-ULcarc-PeFm`LN)s2ks}_wQoM-7s5y1iPmxcIq;2d7e1BX-||bLx8w5PRNRu= z*@uLFk7Yls1g`JYG~j!G2K=@^$oEzNpZP1{^VrV>zYF*mfZO${7XI4(y%!VC@?h6n zUJrcVy6Cul8~AqkKkLwL&2NN{zW28d_)mZzKpi9<|9=A4cL`Phy!0;Nqwl)vy1O2D z%`-)hDgCUwg?`x5WBY&~K-^j9@$bNUP+zM8{hWCZ+O_T%Tm;;{C;NTilaa@wd>#d^ z@B64cU;JC)U-3BUzmD4>;QHRZuHR*cg}&}#nP0U-PWheScAepkz_;QYq2u-?!g(Lr ziu{T*@Vy5?zZ-QwwEt)Qukas6++62tE%4n|9?MUF>pQ|q|G3`^AG>bMdw?%Q9*)k- z!@%wP#jm|r`0V^uR1R+dUJ1Xb?ss1RKKyr~--ho!>;HsL-66qM?_C3YJ@URtg^ zUHais(A)PiF8+h?pNjLW?k}GJKKv$Ww+H;c2mG#|3$F8C^+(}zGyMGeT?Ytv_2+KF z`Mj*}i0QgGN;`4{x_0$CTmXm|4cf{zZ%_#6iA5zg_`<=>uckQwslN!Nnju3Pv? z;LEJ@)cJoE{>yRhP=C%Q;G2FF9iOi+c)WA`pZI|AvGWQp1AgFv==l5)c+1bD{2v9r z`5YOC_4wX5{7v{D{)y060RJ=az1H}@@j>+G7cwv^55EO&`!_ClNa*eRQ?CK8?-c9& zPWijgFN5A#j{fumKkW`_SMmQL+$Fca0{zCR@?ARqmplyqi1R9+HH5qPyc_fj;fL9Q z{yg;&;nM&=q{{h^2zT+R`iIc#`)+y;{WNgf?^^v&p`Q*rU;DWZxV{go?LO^Mp||6t z=K#0srSAf6*SBwaO!yqeI#vE}0los~mxUOIihl|Hv~xwSRG%ycKIIcK{wgQm1^yt; zWz)guqJImY-H0#f{&EZOPULH8f9?mq4skT)f69N*AG|wU0sbEZe(D#Z>*BeTn8EhR zZpex9*$CXu3wrkB9&hxazKeGn_%9@!`E0hv?G3>7-PO&YZ+txSdD^5(?3*tH-a|O^ zvEQTlGVpbGiTu}q{`pT}y+Q9Xd`|S$7T^~UZu$fJO}}du;jGW~-AV2L`w4gDlkWzf znpzpRX7JBW7XJ3V!Vd%Ag7aD>=)VDcL#Oaj`~l#Vu-|5Ye#sMsf88&kfME}h0=Mte zeFAvHOp(J*@Ok+u7>9k)d2a)5-{bo#aQmH+p90^H@7fCfbDkvpr(~p`4Zt(NZU4^= zz-J&IN#(hM5;l0g9{h{+Q}?eMfggB@d{-U#*FRb4Z~L9l>pB_%zUQ~m@4XMWUDxQ8 zrwE_N&J_OIp9_H7^-=EwzUf}!UxEH~JyrPF=ZialFN8m*2J~l~D)cpfl5wlV{JsJB zCinx@Ub+wXLHJ8mpI`Pg;iK<&YybZWd;P+Ju zy?*Z-pCNpDa1Nb|@46BAo+|k+rT-)0u6^M@px;+7^Y~g>Wbq*Z>-hepi zc8tUIz#oC0pMn1DdX8IvewT1w*N5Rxr~se3=L&B7JzfiZIqD~9Ki>x2w)g%4e53VU zRnvsOoi}m|@V&p1@zinK_&m^Ca{FV#UHih5o-g!U|1ErUy=($LV_{VO?*P7{Ah^b( z>t7&zZmyU0rTy6neA{NB-;HsbJzeN`ep%X8Ik|*zm;8SY^zE=WRL+;5A$;t3%IkpJ zb&95(DfC6`r`peI;C3ED7Wg`>Yn{ho;CB6r_Ym&t|7SpNzdyC;h0>qc{ZruW@SBZd z-0lXx;zKfSx{seEBtj{z6adS6Ik+Mp&v$mi1L4kaJHlLJ>v7x?#vlNzkgno|3$#cR5&Df*-0&gbhtCi`Di3dZsnG96-k^@dO~9wV zQt0R6yUwIUYt|?I@H;7e5Ac2W3BBU)0lv^WuRR2OxGg%rXPqPb?Q`^M7hn3&>HQIo zU!ijH&N`vr@_eD62Konp@5)By`8D-IzYOPM?f^VP2hJ`LwP{oZc^za8)QDgUeI3jcjW(yor@XMpd( z`9i<<<*yWa+n!$yeD}-cdzJn@guA|XFX%Vnd$rxlR|$Xnov)t)zyGh%eqQ`)j8C25 zx-a|?__ogruIuHL2BEj}D=q|nD(`~y4y(~IzFUko28Ltpr>90+|{{Y;+&wJ4#;j|B|^Uwe6-zLfFC+2{3pX+ zt!@?igDYfwhS8sEfY-sVvIY2Kz-J;ptL;9gP5AVjE#tNj^dAGh9_=m%{$yG>Y`@)q zo3yL){072Z^6);;-}_wQul@Wf@OH#)RS*9S_&%Jcbo~xqDD7^+I@10;0Q_Jo+Mi7w zLca(1@AMskbFzZlbyO|^-j95yYB;jvqgUZ`1DbcH}wz9 zzF7Edg*~9-a5wPXDdAs%aZ4=|`klSPN85cP@Ljv2xU;P^4v;GX}PdmPAIdJ zQ^BVb@fqMfh$E;Ts8}g{?0S;*guC{I3qijF>s{yf5b(M1OKU$L20jcu zqu)ESO4_yS-A(Bgd@{~875J`Iz}vA8)&buNeBF6b{j;hMe5`ZX{}RrAn~ivX=N#}k z{c@qV^V7}(z7G4He(!wXc0H(m;M;M(K*zc93gK_xkNctG=S%+^(4YDJLVwp2pvRz> z?g2h(`Ae4%2>k~5E41DFfVUu@Q1J@~g}&FiPj~i^;8V6n_0r9RbG)x^_hi$<>GJ1E ze+T_^oEMg%|Jh;TQ*)v8e?9PrfiK7RYJWa`rOv)55OO?9Q1Ed9DYm1Pb&)jU5I<$4*G4t?YxCkR}1}QtN!pefZO>z zKT&$?{QJ~3!sjuZe}~cTIl%kjKW_(qFYvk8M^t~VzRJCR-%2>^H~W3P{{y}d`Ggwp zc>k!-Z-zfv_1L!vcj<>;g1!dll)32VBf#xtW~X{(B$r6+^;j zBlz67PU!7=<^!*H_y0YF^StatogdvFZv}4O@Aw1ob-4eo^uuouKIh^-myYKn!0mT_ zpZ7+guS0&X^0^TB?ypAoooj&K-YdASyMw^*$GX#Xx@^7pIj7*>llEsV;rzY!z0NbQ z;qjq&2M$a&Be@RsIad(Q^t(4k`*Q>E&A9KN^7+I!;k$6}uL1MY0DK+tI@C^n=9`6n z&lHh2)j#hCet1&!dv679_veWlgwHO-*;Rhtq`2iDoBbA{--2`Tbo6r+_+zkRbv(BL zx9ep78~7y4uDI-4;ote$sQz3_xN9A~6ZAU}Z_;-61K(dEhS`U2J`=aBvANY=x;Ojtt7`PoTXn8yMBTr)|&I|p(Tc9`ed%r|D#|0X!y1u^z z{m!pQyDC?&*(ChWt%~Z&uK}NF)qi*Z_!gWiRn9Mchw!=gJQ;`O_^v0sQ}BZy5nSc- z-N2_;5db>bJiG-~SS6w-fEw zyjS@2o*JF6EbuMJx2XaBAAz6t?`XTJEyCx}LE*3RKOeY#UwaMkgVua4d7tpH_s6S% z+x4|J1E1R<{a1N-<@??GuVJ`Vi8H_3Nty#AG2g?`l1 zlWzfTzw`AQrH4JxgYmrdgJ>7`+Eh-i1-=gYb1Ud)U5|EOA^q72`~dL1mc93s4+;Iv z*7;=y@R^-5Zq49x1Mn5N-#QF=d*(Lba~SzB6`;QsxP8uj6uA99$T=StKAVt7r{DW= z;KOr7u2g=$2i$(Q{a?WM;GCu7`H_zZ|4PIkRUQrkKWLp(UUY-dAAnu0dTa!^eXjf$ z;jVq^soRCmieE_o4`O_l5bmPy1-%_F{x0w>D`b9kUS4{m@VDO;`a19$+^gzEyYIe9 z=o>m@esw&*NVuy%KL-8Go1|T}qXz#=_}KNtwgKOOd{>o+s+)zrcbV{c5dHCh+xajb z18&DRQy&#RJ)ZFC2cLHU-wVHr&Tsl-LccX9^usz1guA}?Q=p%Q`w^9({~2(*j?JWx z3;$iv4=N`w0&b7n*MV=f z&kBD#ukR+{bAK%T*@kwX`8lDt>vm>=+wa4!1HKt~J8D<_1^7(VJhr??Huw#6 zeLV`?e&=^+x6p4xzOIhj-N5a03cd2}S82G8E1Ec-?1MoeYWIT6Z9A5cF z;j{c+!L{99;M0(2rF{Ma+|EnMe@Xc4s}?>}z<&eq(bJ`$HNc;|N9gT*;4E?sh$2c!8gHfZvmfA0I$P+E*2-CwmOeL2X5E9Yxt({vFqBsk8sy_eH!%J zaZg0$aPha$f7s{C@m){xeA_?yrUQcS$GNo=e0~Ug6md+Ihk4%<`Ub?E75^A;JJ00Tz-_k{d~XrBeb4AN z;1&4Z3ecbZ1L0%8WAz*0E%1lv_@sX*^y{}uKULm-47?rrmdgKy|Kpy=R}s$RVBdq- z3490Q5xP!Ow+sFLOJy9C|5d;%p&wMw{0sOr)YDj|-}@usv+U`jZ*?7gk#OY+QuBX) z40<~+;y&Q^z5AE_SoquT`1Aw6)4Ioc9q{c|-0GA&gwJNk?NoH?WxyvRA5#0j1NeT} z?Zcpd)t&C~zld-ie><<{&A=al|4`dK`zLNbuO^)N%!U6%Kw5Ac2eka2E@o?L&I@L2)- ztpfeLo^aQG{T0yL?-reYxA568Px`Y3d|H7Yf?TP*6@k|vk45#;FM!+cNzJ-P_%~br zoaMkP5I0f&*8|`FCHdY{@x7COD|`+=Meu#V-wgaQ?C;v2$%ozJ@FK!_UE6-?mB8)z z`!&EfVjZcRJneVFrxSiHmA9*b+xqIX{}uYp$kW?~aoz=d2JX!@0Dl1ZX|SVIuAcXM z;ZpS*73f2jR2xa+&vL@c$!l`yI))KMKA5u6luR*SgpO z`sJu6vl;x)yifS_!2hiBc`fk$kbnK&F9F|%_0j-7w*tQz`Ia`;;I_T@1>pBu@w+qsCiy2*aR2%& z@M$8P*Y#u8`1}R8 zjei&V>Bt8-7kC$NyN<^rzz>}+e0o4X{IKw`-*Ns3@Y{YM^cz9{qDO>&9pqN$Z>;3b{*i$flo$U zRr|A@a3fb=oosrtNv3Vme$d4S!OY7j&P_Ux*8uOSkbW-5cYPDM9q;=oaQocX z{kSJgs=#l<|L66Dvpm>!Gj;&K6LO{d*Rvik^yeb~ryb1J06&QRPWxZ=1fj1({=+Ef zKLdO!?3@bVGbRhYoyYYy;5*>wTo3xc5bm1a7d%n;?8bd1U0;^~-w8We+x-XdnU~5q ztN-NsQ-n{$T9JoCX!rNP56=~P9iORB68e7ZUn+-PguD9rM$p@Nyzc|P{vqK%jCQY? zBK$YpB)Hn!=R8^PJ%~f8oO}hieNXE2rwILuwbAeW1aP~4-8oMcdV3$d7kG{zKw9# zyzBw}0pw3MVEkY74B>Cz%j*GtXqt@YcIdZ5isM|Ma`L#-gpd6W*Xsy(^=BLCx8EiF zb$zuwQ}`^*%XdvdKi>^}6naU&_o>ek`WED6>G)p{d~=H^D&61z3f#7@hEEqh8?axi z-upIi`#s#JPZj!|cS`@ap`UL6zIU;-t8)JAXA8Z34thWEEmMR~1^9di_#@ZJIOsV4 z8F(e`Z>|7+{yFY(zK(F#hj!iX=ROxW{2@A?ZwI~|d3W0GtEUP5BQKWk(sBMA@Izmf z{@0;Dlg?jV7&*Vcuj~9(-QDNsawFZnvun=l?;Bh_dfuwR)#uNdRXwZn{8_Vt|DR}Q z*PP!yQtX>GG-<}@r0!gQzjswWcZIid@F=(q5;tO6D6U?)a#r`Gp8QCDRbQc)AMuI< zUU&b{V7@SEM$aVA>lyM^^$&IB`n{gw&`80{t)@N<4Gj0^i}{{eb(K|hZX$1G-(a7Y z8yU&1_40$ok+qXnj^qaNUeD@*fwlCL(0_SES&Wc+mu8DCsruo*e0Tn;zCzyX9vUnZ zM^<+iCwZ4%+LEgCyj8{Ck$kSl%XJs~*5tk6HR-C#I9eQ`AGqo153Tj|i^0`i_vomh z$&YsT<_1^gz1$!%?&(RlU)0xU(^Yw1t~fN%*PTRBRqJ^+C$G>qxT-&&ZjbY$J_P)d z>8j>9xc2lFhI7U4UI*W*RD`cZ$-`Ssk~Gv!ZI95{ucFT+aCYCS!J(17{0EJ4-w@A@ zH3ZDMC%-bcy1(cVhbz3{q5i(^wP_}+8|)wI9mow1rmDOJEp77}TRgATOVzI&85-~| zFANQqqMB12a?s4~$rp2d{q-b&Ij@f-uVwZXjs3&DxhN|a*@E1_fRI(K%D2qPE^TT` zXS3#a45a3)@_>2yk&&U1!6E-|v*$NvTGGw_f2U@z@&t;JrfQd@mo&Dtv^9ak|4~)# zD$n{`{MXg(jUA0kyiDswjV+m=!K&(2UgR(FUslrR#G0&Jm3 zZMwDDYn$(7(-$sHw>G7N9@6Mre~bOPs%k;nYj0VwWI+dwcIU#_G*nlu^4ugPd1c$Y z*0xSBy)2XM^g9&g6())EstFpMzqGZ<#f>H)i73vmIv^!UYg-%>oNAJ{1ezGPRBCB! zc4>QiTSsSTiBzRlc_BRhtLnC;ojg;tB-%PI@rQ*aJ@S|MFYBmhO)VLkolH!AtLjY0 ztv|ZLQhPj-^epK2-;OauUY*&PMBoyg5y5 z+b*#?Sxq`9nJmteWt)e^@N8^uj`0)Oc96t*MZTMPE$C=$Ul`+PeYZ^*=UW@@V+RSw zMRCcbb(}~Q=NS3wj-{=!K3ZQLki>bZ%$N~m!p?MSwyh(^GLRlOWin^_u*Q~-bYt@+ zMz}My2V5FjQ`G@yH)Wi&jwsQy3gRUb;~I=;oGQjK#kyn3(w0tHf01H=jurZQ`-N#yRjznz?bR5E*6r?=db_JQ9XV=Zs{c zHPgDl?+vu5i>3rOSxb%$Lv*s?X3@mC@fd|DwTIRgB%_r?7w5|RP>4E{^mNd3DE=YGc1T6Sk7hRky_f7`dB_@#97(tv5kD8F_ zG$f3glc*Fw`GG@JWESMl7EzoZYx6Lv_Rz-P*V$?a{2H z8^h?l2cQJOfnpns%{#FmfEN4S4BRh(n66J{G72QIO~ zWO73UaXz74t0T=C*%p(?t`#PU^9r=D&Zd`XCUK6s#c;Al-&6$zgmJ!s#>lkN4xL@t z*dCvbus({B#dxNI8OdZLdmMZG{Iw&0i~TyJ1+tfrYiCJYb2{dbh^*KcS)6BJandx% z=-6`+wh%3%7{8EC^fxw=<7_%)%qdifY%^Ys$u`85kISinsCoc z|0dR2O)$#-FJZ5ettxBJYOXds@?-oc)}HZwigcPy;o@{=!NSg%u^QFTyuCyT<9q`R zErLbPPx7gFY<&eX6V}i%vKY_M8AymPYNVZWUQ1d%c%A$oKKIBOsKgKA?WBC|<+tEI zOxKWid<@z+_h5hZxi`0^vkuQmO?anGAdc~`3f4Uf19`LK`)yc5T10Vv!5-e)#`?7Y_<8BWwBw3%&wa@#yu4ttxk`(yC7DCs7A1%m_NsOg!TenlWtQYycgJ1agIT+ zBu+#AX-8-?vdSHVF}^i{EE`3bO*eMXTy@a-$~gVyOKp7>mg+Ho7Hc!4kE1`f;}X8e zMD%g#-^5z03HFreFZmEBpVyH#MvcAC)Q0z&@_rj@eRi-LL^)*r*2#CC$u6V;mrgiG zd`@_WI08noepLta?yDuUJvrPQ;n_|gj`26Eh)$EVBokwU)}LcMOca=jG`e6<5WI{I zk~lA(2PULN(Rm3SHKJj2co|~^F+Qp4&Ngoz+1u%sblCgA(bC9Y;=fGs@wI7TBgG{G z`!&fhI40bpeUj*6T+P{#sdW~;wvG&Wb116rR4MS6js1&wODZ*f zOXRmqC&Zp>0;wrM9&dy9c`m&dJ&m6%#6b+pow$i6B$!>F67h% zuf45}oE5Y%!r>fvs1nFxJdN}+G3mM%lcLr{yh$a|KGjU;GFzrZddK`hyd510za2J! z=t!BSBs;s2V78K}V=dIM%=#T8txXb35jOdnj!vfyUMr`+@_rR>k!^~k-?~2yC;hU1 z6l<@VH9+jA^D^Wiakqv@SQ$Twx5c|_?1vc57IS2`XHE9ma()wQtwtr=%(u>+Oqo$D z94KXj%NCVdOCHDmX}oRT&0@Wlw)u!)_GVI1*5~3BTlNp*?K-6gW?oApWf#WsxRRvD z^`m%us@Ec89QkQ_S?ntVU024>;%z$T6>TEene8O6W~|+Wc}@Olylv;aMj1Gqi%IjE z^ut)YQH`nsNlS*UcakSdj^N{p$&vkocsp!Gx_(AldqEqWMeK&eo*erZ@s^yrK$JlC z;tctAVr?bp0>^J+tr@RUte3PI!{Z@oMdh!@gdfJ+jqFf5Z``iQI!S8Q+N;L+U944e zzKeZrD9^>WVPI*MqK$KR?ikoCO!&$c60sR$jl_QvZ_6nqqDRxqT+wQHbz+SmW?gxB&)*tt9XmfeSv>#q{7*tc!%Nx>G)B+y+pYe z>la44B`CEeAk!MdOW%^iwA};b3W5C)}Fz7x9*y`wmVj>5I~>G5wsh?>K%_ zW^2++N4lx4BW9Q+wdVLutTneSgw6QkM)IsXKQPILP5NQ1-Rj7mu&}AA#hcebj*F(o z_-m;g zJ+G4Mo}DqYVE9L|_M&^3=_ERsr35~g%CPpZy@OjGWjv#P2ZPi%}GdTHTJ_uyU|P4EvcF-(tK^XX{aaPo-6j6 zMr@i&p=qJp{jR^)@t>K2>yN1v|8+^eP{^%v64YJMII^lgf%Tj#TJnQ*PoFto+M22| zeO;0pBwiz_)Pj65Lznt;`owIZV{x`vJ&+sDWDE5F6>IwGB@241!*-qo4TG(cTvyB(dRLk9_6Q`jvb5nQ|hTQ zMt->1M};89(br=m_)_M%V_-T=;(V^Sk%#& z@)mcGcLg7P%jJ#y|5L;NQ_KG|+t{MM5i_AeECby>a{a6G#-^EL z{?#jzN>IDHtDP+w%3SFhhiOpH0%FQS9i{a(JX~_T0xK5-HgOG~L@VB!sE(eaoSkE< zG^0}0(e?jPXPe3XJu10Bt|Qz3|5InpQ58q^BT^~Hb!Z2Ua%hjqc_X^8_}ms!*Q}*E zsk;2*@p0%QYpRp8W2BW%%8s!HIw?EWLE7fz>=?6V`Nnf##=+stOO5w5#o7(1f)=hufzEY{}M-omHQ|GA*u_q;0Z> zs_0Vqqe%X8Us&kB$j{5whkNo;@0kC)l8XjwG2O!IE-xcNA&=_pax7(5cQ+MOo=-(& z`Qll#N3jlXLF-a4P1%wLT-e+(srlkrBl(rHyh}SXL6KfP$^=f?NnU45mMc;$rR2_e zja2V}%R3i_JUPJmY`j&YC4EawJF(H6m2HXI>EMN$e7PiEE)8qP)AQ zJLJR-|IwfG^2Wh245>=v;5RM@nrvE_QLkvU3lsBssj57UW3D?-quhN(W)Vvi)?3J! zPOsr$laYX9r;CPZf0j?AK$~CmyEu~TD^hHe1DiDa9bH~=B%k*NX-jp4IxBnf_-USM z=+t`c6a^RnQ;G)+d-*O>68V1837M8YS_rwgo~f#o1x2bYpBvD1l}@*GrAR`}yj1IA zpyjceip+~=r)8irO%CMCjU2aO#rR^O9xNw!68w%V7je%Phx;?#^r3XVe^`O5? zkcwixlL?itPf>MvD%{SVZPhTD_Qj_!WwNEg$7I@&j|dVs!ACO?-MV6l`&3d1d|I0_973#hDJ)3Rk=^{g5&JeHa}*cMnX zH>(~GjreS;^xsc~`$;Ya@&jF~R|d=Xaz3)mCRI**n)wcj%G0(L6(+NP^{pfU5<9@l zW!h;ZX+&tG>Gv|%W~b_)^?`4$@#e1{>^24q?^n$-PPD!#?Sf5n)djS4+x?V^B#ET9 z;^OEQg2c#UEw7zLG{Lo=AFpikIPPSH-In%en>{j>XgcZB=xe= z^x`av%%TpSw)C2@6a)uyc_sN<5PfAO$;Oqd=|K%Lg0hA1gAQdSHn?_te>R*k+NJ#@ zY&07x*&)HM9?V}wqK#K7N^eu8M>8&5H7*T9gBI4vjYfKkRVl|qBwjIFtS#~um08rr z!%eCE8O|S}aS!WmHtd|6?lIazNSxi9NbBOz$d*tUrA3*gEo>4-S5C`7idUe&GnZ$u zm6Vxk()qPU6Y=_vCg`x})R~pcyZmC>j#xL1vvr3YloZ&BX(D2gR#8#;m~AL(*59f) z?;^-QE1_+84SU&qKOH4f~$eQv6kR?2iK{> z{iU9?G{)PCOMtAUTua84*&xWcnjJB&!bz}iA>j%tkKykutA(Q zLSg)x!zErN7jK&Oe#-2l@hQ3YFAVSf$*|*s&Gh6RafzB+( z&>}vx7}egO%)erO)zHY=OnWz}jAE+R{|k;YWVxi~_@I)rZuGqjnbTLc zST&oRmz-Ug>+YtM*PzW}ZdICIf|B2i-z!n=(66*MX^U)MoNe^T63fn0lQLq3nKK`m z2IE%fMc;q;wqu@L~<&Z9>(N6I9L>)UUUhwTyc`}KCI&-Z*7|x}M zXn*S)pd1+96aqg%dE2XF0#a3d1&^Kt>&}hzwAg1>L*CT>PWzlImR6Q(;b^Ja;mKD?$76Z*W%HBf$>+evzFageBg|EwCFpz zC-xj3i`X<O}=TiWk@ICf%XBY8#ela$bq>=Y!All!g3kyeM zhMD8J@8lEbDsM049x4|Fhf2qck)anpyGPwnNz;<_nNBO37c1M%Q7@D_#l|_1j-99} zmFei|VTUnow&Dk;?{tMw;O(Qok~KoFl8VQJtXAU-g;H5V1zu=$Q{+w}JBak^DoP$D z3Uj%rnCapWjYMzetQ;D-inc#eNZfFTIj4{0jW3J>W%M2@T};aGZeC+@O=&d|PudU$ z*t`;L9bR582kdxfz$s`n{89faSF>t@gsTSvzVXZ&<4R8_*c}rVTV(~udE{fM7+j=E z8V8;x?EOK^$UO+r6Rc_~+DweI!>O$JJ=mzD(@2UD0oon#H;51I7vcuxbYs5t#7-%S z&Z+m4l&$gKkqppJeFK_YrAHO@OV-chb`QDCUa~gCUo4^Ju01pM^yHC8^VDcu%Eq3e^O|GeY4;)9BSWD;*2!VV zfKiX6iuv1UuNy-RxU_&v)ND6LF!Ai5j8~f73Lq5@+;WZ>AFRmysP`uf z&Iisq8OhT_-+UvN@@AcLN{g~1>%~G~zLbuz=c11`)1$ZtY37cDvaGveO;OjW4~SKh z`AZ@@Ko|B*$XcD8{sN9VXmnZg^sB3dqa0(&=drV6X%mgbreMtKF_De?NN>mQ5OJ-g zu-djSW@$vGdrcQy1OEJ$w@*0iyx7`|ZUiU7*~9v-mo80{#4cn_rCSdTxNQqimH@44 ziGxziOp+A*K~b*`CH;|?v#-bZospT;r%s#1L;84^`O&EOl3suQ?lLliLaV4QlG0?9 zKK-?Q;BryF>eea!g=GgiXM3HLQ8P4>8=-77zDLB~8u}M#`@-zp@G$55@l~;y+OQ{U zLh{Bm_mw}F*J-UWDzD_&spPHr=4~h!#s@i z>Z$cbm9`W**Ts(tWZXM+L1}4@chEVXX__KFx`;|4H#lv_G{>B8AVZQG$X$_#^D#6g zr5%qH_op;t>T@`gkwOCEC}AnJmq8>FB&Rw*!Gok(T|kl(Y0U2`$^!|UmPXPCO`2QT zy<|3Yuqy59B>RgN1tnBpWl~pIq`Ck6ZIP|JJRNgP))&bSMG0sNH@gRQ&9{LwM`CVr zK=z0nm{mjM_8eL%$>-SD(|y#h0*@H!B^@KAR@Q_}HTk zH-~Y+PN4q&h!cp_Y1EzNJC4aaB%$lhVHHrSF~KH3{l*zBBYceH9e-Vv_YDc*gYm}X z#EX-B0-%16k(YVWW?fkxT1~i0b^^3SL$0YBDJ9dlQj$eonVV?}Aq*Ahv^VB%e@vYG zl&`8zewNl6V+{GOL*IDzMN4&e>0&NNb#x*(EnO*~Iy$H%fCSw@G0`FlPR&OhfiTW( z&2kpCY;w;!jN>d#heh5%T}zM7>s`Sa*dC$@G0sxatMEoW)OpQ(uYxR)al6-?b_San zYsei-pUAs6Ig6okPo7;$<*@T2GIJ(m?>AjoBfaZO?(aq4oBD%+q;XXPc$MZQO zlwp-)<%6%S^cK=t5>edaoAYWs+PsGvv)YfM1gri?%n|l}^$%Uep$-aA)2Yg_8IdF; z_=$q_6<5*(`Gk#kmwdrAdA@r$NNj+roESYYo88`05yM;AKa>kDL!5x~nzb+DsK$9G zoW4lLJH3EIhXD~PHQY*@SfR6{TS<|T$No`WUm<(L6|`DBZq?{K;ZT#2u$3Gx>@V|; zx*WY(&2c-drszp+U_h3czfY8BrMWCkPVk}XeGyypxGBcwN>vxZ+KleV~g|7bc7CfNxt^u|x%Z)=fRV36` zf+o--YKuCrOr_I-qlP1(>__3o$0?ky<;NE-Yp}@cR^t9hW`0*+z#Lr*O8${jJL0Pc ze6JCe2-oBXi&Soi?^YRI*gQzz)Ok^?;Z)>2Z45g9!_l{6ah$L#?*v5^D!udv_7MB7!+$RaoaJ1A`vWqCYL4xJX9-}&}dhQd91 z+0ht|;RHu(K|AcN;ONFOV8=U?MWEZ6A5qp3n;i_X<24_5;}s6*dS=+QlyL2Zdu}C( zYTd?Inkf6!POOq3VMD4-fh>OG=S&Zna;C9Ci7AQd$n9~mzT_TiunJY1L=+A!E{z$* zIP&41>jjv6CUZrd%i)=FyWYw{x-@OVB1UDo*X9X3NyW@wldxx}+eXh&6P=)9-6W!^ z<$DsLdcS&|2Rr3!Oh@b2J}KnH}WZGC2T}#i8obB(SF`r=0y#{Da`2 z>Dma(a#^RU_*BU{UE&k)wzJZX=aK0}WpNWmuFZ1hHF?mAYpoNG8fXb6C_-DXb)Tb> z?sIS%igNFB(4YlbZZy!wI-}1ZwtUZzu7V7$BzWhQPMJ+7)K^KMOxV1 z9=oDpSypUG`hMeXxgIW+f!UsNmZ*%eR;7ZIPy+8uQVE=N`EyGL+$9|5>(TIp1-F1UiDo=L*zK6 z0=}%e>06>1)$IeU=0l^4arbhoN6lq5x=Sdvk=Tb%bvxMm4yB$vWD~Q_f>|=Adt7wO zxWQiVy>RxFM30@c3e*bJfKAu9{c=ukARisOIB+GI;^Zynw+^7!>m=wV_dt^aZzQ!e z^lZ`6wrAuqGgR=}DEnrBZr{W#W7+ofCY8DGM4}d&uGj)Jt{~oEd&tX9g31jQ>rE-B zKDzoOy-o0;mCo-l zJYKp%(Z5qy_#{VJ<{nw2@?HKTBNHRGWGYanC4Iscx(QMBvq?~jLo3rAJ(PLX-fr$F zj+s<6QFdr8?PP`gyxQ*3(G=xlxTB5rYA3Q)8j0OGebWxR>6rb(l^jMI{mo zn&@*3m#!+XO`Lcqf|H-PUXqCiSqB`hd}0nbfpbxOi^PsC*G$KF#m=Q>!sQL;GLpMB8u75jGnE=D_EH4Oydt9J%CXRg&4ac!jIHe0+=n+RvPU!}*__RN0ipQ! z5ie(_6He8xThQr1%Q_^aS}mtiFt>WZNbmrQ)(}^1a9qNVChQ)B7D+7KX*2XXzP7~^ zd{KEebW+_H|DsGaTO^!(C+E0OStMQcy!@Q!JWl6hUpXnn{`?^JL9c37iYt(^GfSo9 zh!ez2D3fovBHfAl%21@%(s8b%z$bDVjz~}?ey=B(CSUN%FhQ#)+63j&7L8t=7&u~s zvTmxGkOFpQqZMP6c)IQ|(yEGONBdTQ$)<@_a+2iyu}e7>%)Jx;S_#=pF*Cz5V5yI; z1ny85%W$VZkI ztaz{7Ru%PuWH2b2MTQg&q~0gvV;aW_q6#!y^zV?9&#lBAa!TqC-60>(U4wBS`Q2kq zo#&V-Nemq`sh~}xxXPwbVDHO8tRZ4iPu#S5ZwQOh;OYoHIN+AVm8C#)>&Bme2#qR0N((c|Xyp_4W z{Km90sD zv~s-sv`Ypn*mny!OT_b3QSO;saAY@cJ)hV^cO*S3Q%Is~sygy0NeC%)3f6Z8Lcw-5 zl}Y0iv``9vMKleilHBK)OG*gUm-6}gn%#JV#&2^666$ZiDeGPAOxK$99OU)05Nw24hj%muVZzJ;;;_8gY zazLh<9*^Myx}GU7HCW8`QLR0R92oyD$4mJMYqIU!Ce(xkR;nohBh612xJU4$*_lwW zg4PYk(@C=k`^Q`U#6utv0g@8-QtV@t0=)d`!K62Kc$Pmh>+q>oRZupc_+D3|)FySHz$91cv)5d(BEVT&}#Gljv4_nyh zMhe98xtT1DAa)=b^vGQCoh^xu!Kia4T)V(-nT&9&Wj4!6<_V$6>WvTigm|f1 z?}D^0<`^4aF`%;LRG<)Rw0eXpZTAh}IYHV7DPJj)tTwAE1=8^fH*8;uD-R))I~=e$ zlKa=G@a1a=2%iJoXO;=N+Fw>#p3o-6DuU(5Y*oH(jBQHKndKf``Jif@fN5s+uC7$} zC7Y9G=!8fZsY0t(UsTMb=(Ilw5#OumA!Y4t-r?E-J>x=Deh&iI+!OB zd#N@vp1YvKNU{$+usKmz)4%6SOTkrG#02h2q+`+FeJ5JTJ5+kxDA&a|yF2ov<aCE%kU`kmJ)EeiluN*@YM;`I17Q^2ix--e0rKjL;%?T#cuNXF{l zl~rW+n9gQmnc|74KhaI64zgUNj-4HBC$&OpBcpQ5g{a6{r>w5|eeGmP(_?O2aj{4y z)|#BxPqhHjTwvpB-u42si!*u5He%;Wx#z?je>%x`Juw&a$-XXqKbSj9tGymIf`!dl2m+HSD;fe#n`g*Vb{aU>x%=nes+jyqI=g zwJJ)FK)FZlz@HLUdX597pL)qEhd1ivAlUfxbKGY`azB~M3X-N4T)r(&(i|(mT*xw~ zT=UE#O*4E9u2;J_CQol14)zVKrm8>LF%H8=9b=C_vXw2Me7Nh>KyDJ3hw9j9WLU1e z7D5k{kd48H1|>TBzFNq}xHkgFTm{c6gbU!f1asW2Ik%mxRi?KmkJDS+t5?Za8Vc-` zhEI;7vg#~B{;Tm;m6={f4VRN%#t*55)60&*>4lA3cH)x9hgLW(5A~!teE{VtqKVBl zVh21a=-|B1&+ZwiD+YU*Xbr##abgnN-~t{e0pf)K$K{O^>F%HfX2n`5F@-BvATdSm zKPCzZQ zm3hCzHDH-wT;HL0y~@6uQ~Ic`3(%g6RQOP)M1h&#MLnZiEac9XM`_6$(YIpjQ3E4$}*x?t=wV+LeZD(c{pIUP`un$(0 z=SppmNRok}1`{nn3V8K9ofce6rCR(NL*&58=LV?U7)vx+DY4{@Am)b$!08@v)fgtb zKN02a7f#vYS%~EB(hg^3=<%LMgQsPCBzISH$bIc zbF1_3vNiu(PZ@@emz(%G~LQe z&tbU-{*;yMAXQTRvt-yjplva5^w17z%R0@7CF_KwDM7&->08xH7r*HT{_q5eX!3aIWYE9My5nR8|Meq+j+f7n6Vpk>(fuS8V#K;< ze>LY1sK2_qWuQJyPoQQ^dFGDBdT%ib6Z%ubf&U|z=;&n{i5p8N#@=Ztsou55y$r3Z z5;;OGANRZ!<(+}Em&_fX9M%a41qR5QSx(bzL;&>XEsB6TX2IZ>QtZGlgL+pUDE3}!kxv*w;$w)UFfr0DNUJDZw(+wi+2|BKX zim^92axqw&96KHOYNOVBb3L6?X-f2(a-GJ97Fs#w?|0(upv4C-bycP7ab{D!l`Tv> z+-wplGGood;?mZH?sLIm+_{y~DJnxZ<9REjBoq!^nExwAK6#_e-M5ghpsSRtfJ}S; zkWB}Uq^a{o<~W$`V?ISp#=UHL)&i*;rrUDiEXKfQHdmX+JNo0W$5r_KadP?qgU40I zRLe=3J8Xfp(sHhHS593+9+j-eC8A}7;^n^Nc?`%eoonPE=>ZC~A8B~xc6c3~%H zVFV|U(k&kGv5;=S7kOcp z4ql7ZQ^w^~%zE^4g+iX*opx3BvNZyE6v&qlDe6tfN80426a-aHURCUsKid~HdEp;rAEEb$S)=z*)30iJL}0(*3-&`A~ND21NzJTZi5uK~_bA zYAV~OY$Opr@(ihV_E;9kL0{)ZoRDO)kVv@9cXE^_c%#kE?RBUzO`>lqQP74v5Yw@I zIIt52OOIdt9PK>Gg`CJPZ$j+8keS=S@W%-+$UdmKs#jlk53SPjJr8DhBPH!9WVeXa zqzpWphNPC*I+Qa=5Pmxr8+|+#gOXs#TRnT{vjuE8gX}q~*Ju{p< z%i>?O-#Ewf5q!e%4=hLg!I3CJD&1oo7J(?h`BFxqgtFIQ*fSe^fcn6qPZ9g;vS93; zD*kPOUXj7cD>v9arpyl$0&b@qSg_ee{2U*PD5byF^R-iA&23Xvm`+dDp-NskYKc0=#&Fuu%`0pOL5#s^ z8-YbX1{E>{hgA(2CK$hPt=gRpgE zw97?Q#hISlHPI)H;`&$rOdeMmg=$I|jI==qS2mL8!5QiA1G{VvBi{(w3AHh3W z@&jo7$5Q^0BtT+MOMnQLjFH-Sv^zatZk4w?NOt_qKAOVCn?QGO&VRwVoALtbvaLXp zBWFr>`!lYKp=xL~)e~RojpPPb<(n%-hJ(#Ke`SWR*94l$muV`s!@F*YdM9aAHsKvA zQl&~IYYg>2v1$$lCH=Ap<~E_kIj`hwEKhPlk6(d?NI~Y7vzs1H6&>Se30S&HW00p=2yeuf`eqxqGneW(hA)F&g9JzG%f_$;FuV+oVy4YuY#*WiK z%HCZxOyp-R86_7@A~RmK@+7wGvw-D1BcBI}tjpc8n5g|Y zZzO!MGZ)b)1WMNoug*iq`Y*Tp+W^jTRS9_pda7f6>?*aXF^yna5edK_PPUoji5q*~ zT%p_WZy9Z5B#0NydQOgI!7evVd;}_&jTC$vHgA^(fBF?5tOqwzmc4RP* zx*M?ySAHTF+e_33dtp-N{RDJ2o5(@gSL`!}$%rY~m|1*z@W?`F4J3R|u;G;m2bF&$ z^Y<8aY##%+gnztu&ro?^G=NK?G%T)nG{ntoEHw&Onra>tDdI`DzwnrrK2KHkQo;p= z(9K?(;ip?kn1^$HBbhD}awjDnjh`N|m0)G1^xFb{1W66$)GnaU;k!-aca=MR5`1HV zi*-@DBOffxH?R}vIK|!lNA_Yul5i#n5U(Bk#S&@~FmcFfQT=he{Sp~#XS z7cOb|8?RtbUWolJs5XmcCG=53EQ=WHVON5!evIKc98Q1lTs_QrF$2T>8GdH676Y@J0*w@vf~Il5C$I)-y@_`;U$)R2d7q_mp}Ce1f0 z$QL8~1+xr!bJVR?uA7dNeA690>XnoA=l){K(606 zODX%(nHoYC0QFoG=iF96>F&q3S@&YK4T;%gNYrZi(BmpQ^v$O3tZ~_z$08`%us@$G zkl!}?>5ubQ4|a1=ag%ixve8PHfl06* zgX}W@Yh0#EDy^mpX~)^9s76lH;z+KqSm0`Dbf?a*|3v@3hVP1!yPkSYh4lF<8rA`J*H#`1_0&H59=V2e7V4k{7A~vbXkyA^KcFQAs40e&;qBkv z6nOgy2gr}3o+8PQJ_YEfGA1WDs-fhbPLWPzJLq!y0Eh$E>yDuh52RT=o8 z(X@=XGlEtw6_D-CcVEHRo6;0=lQMt>x|WcTO%rTbCMVZXQn~De+<=vxgBh%H?B?!E znxtOgjBTY?Q>Y&7cYPK|Sl2okBx1a6FDa znkC||Yr2m@X9~JZOloKer8D>E=@Br?Csy7pIhMNRX{Jdq$6kiRdAUzieNK{9;D~>e)}?R6QK-F}vL?urn~+asOEI7I z;yd5*mNZ;eid+#Q8^ZsrXsQ#f5u;zTv|V-=Qw6$?;7&e}YASkVJE#+4ZuHREY!Pvi zvzZ_2C&rfRNH?*#7rHJ(*C9AD&TQD^+9#b)?y%8ZSKpcxyAQkjsUVxVX-)I8awu(k z#yGrD5ZjU~Z*k!q`_${JRW&$6kuj`>bRTwh>O09J>bDK8>g!G}-$tY-2`%OrtpFB1!%+@V=Hr2?p!RbG|NO>I> z*l}R~-5ty&<4O~MNJ{DL-z)N-mA5rs-RkzG6L;&rMkl2(84(Dj^Khy73Ep>0)gcZ= z*d!TxBu_;e^?^)Ffs9i&lFo`_ByG^b(-8==0G!*EmH!niQWMmR80~z^n2h#jHQJ5b zl&~*JY)qQ4W?Gye-)}0#u8zn#?dZ2D;~nW4p`y*^Y76h&wxKO+j@Lu*u)JrYF8Lnw z2+IV${7^>GIqC!%4EkD(ylzhmr-QN}jn(Hztm#GzWlbeza#AIBKa|Wy$O+kwNTysn z5tmqXyE)0IB`0)9C#|G8ljZ3{ls4kZ(Hdkars1V;G)n3+RG!eqn~;ab}^0Od(?2ESHk_*fnC>ofBH&lOU<-eUhp(6=>F0RcH3_ zC$C=hX-;W2Ly<~Mca_#$5fdL=KR{;l2t2l=eWxCdCsH@Q8Gan_ppM{Aa~8osxR~f*jm2w3NzAVCO0jNdnKtq zKvRFjn@ncr_|*A{Kvp_&h_zl=i;aA!Bu3UW-F+YtRv^w+UmR^vnQlFL6cy0ACymIA znG9_tRvSD`w$bN9q)XMw1U__-;W%WV$kWAplRw6j#VN==pqQok>;T7-NK@LwOX*A*hlKbew`@JoF;LyjU4_k<%8BMIf(P}B?RQaGak#kW2=gv5bi`?ff zaV~_)2D@dNZg4=SUu3mD`7;9Uzid>Y?iOaw_z2dVoo}1UzKFdX0B!)+U~s+~OfHs# ztI2GISq-G_ws}O!*RTK`KpbB7NX`_ci=QjpnaS8h*wqx8U|SO9Ys!S0g(=;Jh*Zi% zLOlLd%!CBiyb|;wq>;u8`c5jDX1HJIjpC{b|2V?zQayHfuv$+6a7=Gda0Q=n$c~@e z$82e>!wR|od*W~&1{}_c4$cZmi>3$S1j)>2A#9Q(SnVXR_GhY@;>!C@q25A%*EAdo zbym1JIbc{{ND?cRwL3{iCqr~Cd3za!O9{GQS`LQmBBijlef^A|>YS6FM&MqO)AQ<6MN%T_ZPGJd& z!VuC#d0b6~V<=f^;0I4p0D$i~ zx>fYY$znYDIM}kX^W??5&5Av1@T(X2GyV%F*G!p66;$sKiP9~y;v=>(M2H1zdU&}; z^krLkVFN(HMvL@Ru69YKGH#Jcr!vP|LGn@gw zw6&X2!b?4az_RDGP`B1e0ZfA)V=KUR7*9>|`2?62MfRcB{HEjuTiAOzo^mV`+mM^K z^T_`4|0{7S^V*5U(xX?dmhlW}?o7qjz^4g!+ybb~u|MvUfbn{HHhGV?f&p85*G`|K zTJW2kbE+~pD{nDCWQkjRG93qa6kv-)&oL39m?YM;Ouaer&h5Mkh>zW~kZyrT0|zA0 z!i*E4iJs18%aPejua`UimoN>I8pC*TOx6Y_s^#N}YnKvx|cDEVa^1(TCGi%vO|q zG@IFw@{@CFocqE1r=Ew9ctgpsay1wpif9$(1X~V8ik+v@sjkv=?^$-x@gc>tl{IhW z>qJwJ|BEz{P$9sg)TR*o4c!eB2dAcVFXb7FW%mG!u_3V53@xm8@XA0X!HLRM&SnZY zIAz{oDKs5z12~u;W`3mP)B-YigyHyxj67ZF>92JjWe` z=v}(TeG$0;l9@R60yuO}kQOd^1o7gT9KLWMxOJh74;MWd4Ohd_bo(hYcJoeF@}bS) zUoSS(_lwC`J7TL4b`))?cbC4?;lbpi&3Q?rz=r2b09DbJK8i5M+ofwng}g?b(|eq} zxo!Gbe8L>mQpU6B=1@I0#JACu4G{!yG%zy(lqZp2CZ0o3^i}G@!b8Kw&~SA;?neD4 zdk@6dbZ#a-I`J3j2OJF0CBVA@YzI{uFw~5zR`1R2PBTY5gM>^!p1cGnfN18ap?2Y^ zVlWwgmypFdRDp$J%D;o><{C-GSCn}$;>l&4m@Kx;o6;5VPeUlgWc;*6pdQQW8#axN zY=~D8EOW;aLhz^$ziwoA7eEq1z#w{#o#@`9H`m+M^%l|Iw;(MK-%qp_t;Q7RJiq*+ zm3mbY-(eIM>4kI&ZZA3xL`#M`BAASc_QnILcgcZ0m%8K?kQ!d8=&JAJ);)xLgJ#Il zW#Z)CI&i4RQz`CQmR8ixjK1Oto`wVZ`R6S+{K zL>6ppqmumyo%$UwgNP|fE{yu9d-ynTx#j?_SWL>g5H+ZW7m7Acr9!Nx5b7{f2o>fX zUba?LIkgyRstv5lPQfSAJm|e5ilvaH#BZvoO#5*950Kg#EHA!8(5D&%qW?p{<~9d8 z?>g(=n8^{O;0=Bsh+FQYlpK_3CS=TdPLD@v^8}M;e0{!w8*vV$u2Y0aghWgNj|^}( z6VR5F8Y@Wb+ABlG!XRL@f2<+d46~0-~8!sjid(UTwjaJ-Wj% z-0Y26*IKLX(qwVOh}*&%VDu(V3e|&5q)qK7=7Sb)jkY4wIf}`c<5y| z&M8ZaUP+G(Z0<3DQsgbC%T11O)_=SnGqVY&C9Uq}zK2a>7ldbhuNBZKA4?;y61aeFao8^Yx9mo@beIP0X07kY?RKTc7D zWc4Z}>I6E2do6K(f2xTHYyG~2aMHU%4JcW#JxCzJFjZLqrw=R)(2FbcV3*I%ZD*k} zs}`d{$CC6vVRwcC7?}@S8;#2hWPmf-TI-Vh!9Gg%oUn67!c3OCNV~u((V1-U#nE~V zKhQRd@7@g_+&`l1q)Qz1TD=|2ZF~+XhkyzO@`9G^d+H5Sn)+ze879>Dq(c{;1F!`A+J<4rCTj9JCJ3FW!DAb(FJB5iBHOlt!F}1xxV+-*`YJx_#B%BNG#)6l~LgRcW`}SXerRcI2B_CL(ILlIi zSx>Xz7L{-WnU@^RmWzoItk!4z%UP6n>|)jh+Z2Uc$3ioGzA(l;YO5jJ z6jeow zv2W0K*i&@UF+IrNuCy`La%~*b!rAtSi?begBFgAE2H&)`C{sp?%-rrRNNKe+dPsabaFg?# z@k_DYli98lLT23#2r=ZWVrj>~y zBYkEtTBnCyEf_q@A$5L*iTRZ*iy=Kc}_A{C@5Kc7- z@OXe~v72e{;2WsG_4@f_GwOe5e5NOtc6!sv*B8^mGI{bBY)B)?$l991=sRZN{HzB- z%LG9y6fpJBHn_IYQk;6bXzx&EoB7DB1tUt&2BzoYcy?NJ1})hFWoFw?;L@!!{-SsH ziml$e;lTkxFKLud2bNpC--!@T@9i)ULOD`&j-_pp?a409h>PZ;&Z6EeSZT+#M|K5j zIn}sujMHi!gUG@pb{j?}fhtXIk;EGm1cgCy4$XX-sV5pE15TMrHD7VIl6SDbfGB{d z+#V>bfgjPP93mx~{EDzTr@H ziyrvy4DhkzFObzoXeWzyzB2un?YJxyS7HQxI3~@9%@q{&OwufY;=Imk=3-{yuOR*{ z(HT%cc&&&oj~5OSvUUkfr_(fhgUE`cJvb;ONtxidybZcVXiSjb7K;+heY3{uYFfdf z=W}CyJ-gg0mXjM(!fmx@!8(USa{`o7XE)cgXv<4h4DT@#Df{hAJR%K_U3aN@a zF1Z}mXYVN0C{@MMP>ZCVo!g>rNup?)PlV60vPl{cf;0T%wr3@uXfm~J<}Rb*#xv^k z)C-~v`e<<%R+Ae^3+yo<45?v-5@Up7^~!XLPKC3R+DX+lKGtcbQ8MH{slo(OB)xkL zs`32oXg1!CW?+<#X6RqTA=<)ML!^yn87S8yv=sdQE%(`+)x5}~eLUK=G}aa|dL_#2 z%PD{e=o-?^^2-4J0SaLd=H<4m?}kLiSjnmu1|sC9%+@rP>?+|2lTmG#(ZkQc?K%eo z5*N&S6zfq!pKhxt^0-a^0ouxO_R|3AE9i^uaJoQ;6BC9gzX7Q&y6jq|S_n70c@$(n z?JfN2w#Ky+-O-a%thqY1uo|A!V3bYRHe@FI2&@I3=+m02-^#KQh6tm~IP0XsYw@Hr z`Z(U2h&yAS$?uX~)qV7CIA#@>%;_tr#l$JBS#SkSY8cwFWny0~XO#+t%@i=RbU0)+ z4t`fvK~Iz$D9d5c{vz%Sw7)LshHKz~Bo~d62tuDV1jHGA`(|f(gAXs)mrdXwsG4xI zP@pVf(Qlv=&~;MAu;UX^DHHruD%X7=92dk;hEtC)gctC*I`c2}5gPT2c)XW>5jySS za<&|zkXb9A+6@y{Ff4Ee5~(j7!m>&~J+#;j@bPZn=T$Y!T{{)K#`CEBAA*m zFIzdMc=uu#nEto{0;=3?f8Byln4V~F+Ms>?ARK+<8Y9ru>Z)+NyXO`_*U4LyOjM8z zdcH90u8-^VZkGNb#sE*=GB{LPhSlNuLnK zH?K~fedWq@ju-hEasmPi!4_*bWmQZ-|ROQs8}k; z3u_&xWYhMDf}-vU^QlP>w();fK7W}psMHr4FgD6?!veeG581R#B=Hli0d(nRDXQ0q zEO13~3aB$xV6l}(ndV}FJsc9M_h2%H6#p*TlV)?FM}N)+ED)u~JRYEnxYZj8K_d;m z0$6+UQ>0q=BUBAap-khpRIT=cYB3=HGldJpW3#hb~3&_0LRVIC8G{4d=X%5)KR>)cEVGOKF#+?Dpnm0&j7 zuBZb$-#i+wRxB{(fk+QLRup&_`4T#nuJgW)O{s(h4~r~9RNWI_#BijTzxfW}*b8uV zo}2MVMmX*$Nr*PappVNfnZ50%V4#+J;E+-$QL4lCS>B>heV}ZT6xlp+HBFC{J6jfg zK=@6qjaBBb`JlS|jUkI=vAyA6g@w#V(xaCLp+}?-`oCC;Ggg) zq*9r-w{W&PcIrxH6?Bl$1Uz$w8tl7UNcGkDw<-?3lFO zvg#x0KATh)#XqXvNfdGVN?3y|{NbOq@?GJecoq)qHEB2xg1HqnpM49}6loGDDcicB zFy=Wpw7HPLzh|>&$sSxm2!fDA3B*l_4NXV_M_Al!&YlfC>>dR(hIXiYuUV-*1->Q# zI~Jfcc!JapZi>n=GmGAHHI!~re!#1k`^hN@@}D>_)k$Rr*i{Kym{Z@Nq~e9EbW_!% zy9`KJG?}dCJb*UUd-%*JCvO@W+O#WdBLSAOVf!f2?(VGD!CyE zg!3gu2*Z8IX`m8XWKjMZQ)#WLhakfJ6_&HmMD4)C&;xF9H_U6r;AJu~2`p$kXssy^ zK9mhinNQQ)0=^g65f`;@K#Fa87G+3EPM3^;tP*-NnfhU7GW9vTBb~k+!K)*RoW}ZX z)qf^D0v3TcPc<-wc*oN%8pK;wDt={~!n_w2KIAxruGtR9MwlVh9rZntFV4W}V^uF` zX8^MuKa9OaXF2RG+e8gUABO`p2>LkP+HD>j-tRrePGpMRdWTCS;Fp(!^>BfqPe$kD zT_Ff%g%xp1qnZCuo+5EGUhq0{DsbN(XIPFgR(G?!41(8Yx5~PYq_WaSvM*d@W7_su z&|nr9*VH-PM`Rum6NBo;&X2$|daLPVH2E;yOjvn=+G|Y58*F{iAD}2mFWXadXg5iA zxUWl1-1Vr#TQoN(sMSowoBDFjPRlmG#FviTaI3)8uK8>|k3LxJt>u8aj|&n%I)% zamfPQtk|)qdm5#rT4`>Lj*+=~8;;fEsA?^ivd%QJUn8YgEycb3bv+EUs&IgRo; z`vJO|(OVs3e+sY-aVbF8ZHrvn=z=Lqvs^lzARP}U1RI6rKI7U0Y0);?$(pfz;?7u^ zvTST6D#4OzAsy%qAN@)wU4;cBy&d|WUZP^-<1~1)$RzLoh@91C`qB}QiUDNS+ayV) zh=PH=S%-*{nL3~9$<`nltS-=*X4!Y4!zeIi;>+?C!5jSP^@L4MP!<*#zh+C9iy&MS zje}HPs+6HQb&VJ};MWx&dQp=wu}ai{B05n!--5_El3w*Fw1q8W%9RmqAJMbcE=hN z8ovxafuvSFhAAziI7twM2*#^eixdOKZ?yMdCKDObas^#(DWDm6i6&<}j=NX)axI|4 zeaaj#%o@oja9G=gqX%lV^rfZ)AICkt-ZvQ32Ap@e7){h0E0_l8qSUhx{!9jiFD1d^ z-fw@5K0=d^qsfYcDGDB_KPya0`6#EEqR)aAaCB){oO#(HqxF1XXFT10yOq-6&4?wo z9nX^#XBEHyo7>FKr%i}fDXE# zPZCQbMyBX=wdmWiPDI9XeJ>5v%oF!4nCQaK_M4tIsqRDo3TV~109KVhfv?+ldvDFg zKDrb+j%T_|aFS2w(l|YRj83fALnNc@jEY|GWCNg~zZhQ3CQ})->2BHV6lRoM)V<=+ zP~1a6Gpo-!+5GhM2MPFK({uOlQ2PW!T+JzXB5_y26M=bI4!%PUVQ{$^&2Cqb$r<9N z?}b))q6I_P;sWly^tZ4LF3#Q#-oQyq&SRx}M$|WIKTe*SBe03L*>(?euDT425x#2q z44A}c#wu&8V?I+B%LEo>THH|`*-ADYDRT=%Gi!sGW3VaKSM!5D60JAi{&2rxnIX~L(JHv~-)hvmoA&OY9 zi1doJ!8Yp5Q<{ISmTK7B>$EU}OtJ#jd{@(u@*H(aihWiHOa)Ip?uhJOXOG6|@x^k9 zMiC2Np%gW{*aN~<8knb^s41e`AJ8ALaUf(%Eqf?~Q{Mblh@?<3raN;tI*}$(o%Lyr%(2 zQR|pW-O}U)`$Q8Hx3+|3KADp%m#~9cW*bORr}a5eA|QrrvW1`aj8?Ry+u zPFc1FEJaku1`{z!Ip}lLZK0BlzWA}l_?MepE&wJS-5#Rg>i%I5AI%e@(-d0!H(A?k zM=KbOr)i-JXfU`RK}e%Tjl~pgL-?>U#R0XG4+l& z^&Ipf=ewexRsc7`)S=!{Nv^I-Two=glNW2Cu37?n$;tQ<+&ZBPde0`-B)MK^{%><@ zqk`1FO;ldD62YQ42Y|Ir?NngaQL-*LSUE(XtuoH}Qp~$YaHM-^P6-7~)C`hPhoWT5 zVGUJB`K((0c5w*g*WBc7Vr%)8SW9W$5vG-y5ru2isP9L17`ndAu5O+tFMj+Q78``{ zkHd@UO^;-f%@mjh_?d(4rxlpCi_0a70nlRc>9dnT@BRa$Z1Om2+uh7+_(HuJVHeUW zG5lLOVyVX4fefZc(}{x(8@lOb@le`OAam#$OhCsa_&fTj)r8N=VuoDB3^{6F*kcSoBjr}rZd>ElW9a|on^glSbJde0Rv}Q zVBoxrC^(UWPATENqg=NOkle<{s(*ia08;D(SV)f20?l=iqhC<$V;?j;`T`8R)vPd} zS$hWmCx}^1EqIxNvI+>FlqA!cTML5y82ogJ5_9=rvR*HigV}O4402yu&BDiw_rBSQ z*h^ppeM3?jduUn}8o!w~V{t!~YGpOQy@%Qu%rv&+6j5Mf}y}W7ttjo=DJb6P__zDp-H?+LX>D0(L8QF zy_1m%Fr(y7udHm&eO6ssw~2~pP!*Extb;)bh!Eev-JvX~8Kq?H6}xQ83N2_^KTlBT zd8X53;FhNGUPnE7U^2^w__}a1(N5~qmB`OsTPBO^OXJ*j{1z(Sth>Wrk*dff%dR>0 zNKLngSaV*H1FXt#N0`AG&F_IRq*NlR6ARB9x)JFZn&%J!U%z2eds z3G^}3J$%@_Z<;q~Nl~{@donfMDVM^d?IlOHy9Ee5uWag7lW^r|E3Bg^>44gr&*#Cl zdpd(lB_&8dWiccC@Zz$v$6HQ4x`o%2N?{SoQz` zrJGCiM%uKhH&Jq&T6j>TThHeq2ara|IoDnCcHtG1WQ})X=EIq{Q~XrSn|m313_7tb zN1GAS#x+p0KY?+KTegl_xJxm>Vkq6coi5Ez_X*`G3a?RIjR)V<>&-R`!s!tfcKwVW zP_~&2Q;I8>$vMmz9)%BL8WfgPh{6aGswWUM+o?>Zg5ow=7)O62QU ziR3jsO-*RKj! z)IX>dgB%Q=|70)b1O|e*kZ7TgB_k>9))dAS6Z8sWfDNf2I43|M%W46S198NDqg*oR z6J%!6)hL;w1D;>33``)a7FfdEG-mJiRb;$$=_P%LE1+`nvI|$uVo%xVOjh9RVt^vY zi8z1Zt|koki02&kK%mwH2J={ZAe4s>ZRI;}okB(NXz+rbytT7YlQ$G<%2(~iG$R?&Cmpx75y21%J4q-4e}<`0&K?1RUaW}vKt;Tqx-P!8_KK+fmJ@k<~7 zSSs)&erPzokbjm;LWIre%eX`&mbS~;ETO!G#;asytK6L^6^@Nb&WK4&s~q_K$@zf0 zE1<+ITHO;QYV1hXFk9ZDT!7RK@+^h_SH9`H}p{-DsY2y7cHpLS1bWO`L;At&|T5{k8 zhKcPu0|9_!Ma_D0IayDTG#H-K-I0B#4vc5AAAPtQ0xBtpe7Pgu)CX?fZ@|;IGaaN< zdju)$ZVevRri`A<^FwbN?80wQoMqC~b!Bs{L%^&y1T7}Y!31SMraj@B9-JmfBterr z5-Iezy37`&OHgPBL*9+w3~_e9y+@q`lIlEab|!$A0It#3gcY=G$OVK<9*kk%M<7GXc; zL=lb=lY~{_P5HoZCyp1XS1ZJBmlucL^W)-ASH^z7jPAd=_=-%->Zw1{&nv$ zW3u&PICH4XEj?6#Iyd)jHjC97pM04B9EeU;8$vZ0f}fOeYaVj|4d1+bpg68D0-HIY zD^vQOIo+y(1h7sih~YpoPvA(@<%gM7K%JL$S6D?<#M(Afo-A;YNV<=qE^&M z=oD`)xC?xknkVQg?-~@khK0AgkaXy=nW|~YBb22U&|xFz!r5e_vu``gTF@zAFXEGA z5YdsBRpU6&RQ(~@K;61z@&lY*!HC0$%aIS;D$cC8U0`wo!>|?hXsPv;RXyI{-(laQ z&dI4`Zg-VF`JJ#M7U;(HlAZ)>JZ2L#o>OEisPgc=vditdAg9<_^y9neVRI3tsoU#a zAX|i?urU=D-!$fBbYp9&GKDr))qy9Dd+suXRGo^qk7FYT`DF`W~S-N&MLqBKh! z-P~Yf$aSJAF~+l|&k8@z00_42dS~<7t?Go5_eXXI7di^URKIJ5??Y&>$r^qPX7C9p zdTN`~S_4Uh3pg!RopDvUm##QQKh3hnr0T#1X~XM}JO*($SZx!PNH(ZPCX9hm#cJB5 zMJffIlqxtf4Vf@$Hiqw}U@NRQw;zkIFNyaykmQrma5WrFx1WqT4KsbYQ0rw|&6)+N z0BJ;hMaI_i#A}o|cLKHI6`*-k$76LB>h-4+@v^>R^^@-IM<*_{GEPmF3akFL#z6?* zeIS%7z&%YKtRRC)oOU2xQ!uGTCd2}`(*9N~#j<=aboN=GyGs~#7kaH5B_UaB*|ex@ ze@8wIyiQa0P3aD&D%|xJ+YW4TloT8$t1*155bN=IO&8hJYQfZ(^YG(sos-(hT!Dv} zWcS-XYWB33;r^r;2E!h(%dvvB-71iO9?e&2b|rY`te)bEt0r*D&e}Gm{oWC92;Bv> zU>Gr}9f*g(+TI(SBDCnFc}8QRp7IFz!tZ5-{~0OMkHZiO9${=4%}!E49}Q zlQ$E(_cgkx34w%=WzZh-4Ps?+4iovUxsytxKiBz85wxDR8A@z=*U8+fX)>01LBUn3 zLp~O4t0D3)9jFLf<{~|*WSLP+;Iln4bl}5qJs!xkadaE$oh5(;6F+^;^65;AA0#*v@6o<6C!qyqeU|nPVmyTXn>| z)38H$6zptN{%sv)NbRG`<=UJd^r17Y+8L&J4_eqk_(3p{iItpJli6yrmeJ6y)10|8 z??BMoQ%aBh0xs{V<7#{VhhL z3PovA*kPq6|mnK=2iAmHJ&q;aU9!Lt>#XQt2p*CGS9e)#2#6E zN8O(48#eKJ%4xGR>|A~IKDZ0a!a1^G2>it1PG0Z+0HEZU`aZwLJs zz+t0ImwVZSZhKo1;+ocm$?;8kkIp8mCHmg4*WcO}RNyzjF|<*o$au)r^-dP`u}$6S zd986{n;cj=&Y>~bDMV|ZIw|u2L-NN{L7krB0b}Y%MoRbWg<*rpnNE`6_z_!QRd^9e z#?`A}>35?5G}~S%y;I78(F|B;|5}l`g!14X)Mw9cUbyn?bsDOCfBadf#>M?E> zS%O#d&h2&6prfEE(ke<*M44n@t>Y1-580)lB4ZfUR$@2QD%e>6Av*tSg2o4G2$X98 zObCg7lwbWUM4Nb3es6(wVGUy1O0_Zfg%IDmM92?aBE%adX@lEVbn*|MWRn_n(S;-S zVGU>}MU^`^b^d@w5xB;2DFD3JYIJIDc;R@fXl6P%Ps-_{kKTJ=SXq;s$znU$_QgUn zyD6tWz}|-7TFdAJsm;I{g@C!@jPuRv6;#QnfJ8YNJW+AJz^o11Voh!lmVE@s(7u&G z6M#nJI)l!5=}j9}jy!@1zLYI(py7UEh94n`F^@SQJrLq|(CrqHJl4Ze>lB7dAHbZ)@0qWUixZ?Mh2V9t(hhb@rCJqC?%H2 zhZ@d?r5OfFxSLh?#0BWu&4=MCaF{GJ#Mk8}udrF;456_t>V-~T$hV8-78&#{-wCfT zI39Zce?Q?Yb+`wIJ^WM(S=>D7DJG<{N}A5Y6a6iQQDF8rho{P~3X8#RQ?{*oAo|&S zTlFe6HA9Iehb0{(${C@sws~k%q_Q zPZ_!aQzj10_Q`a)5g|A_K?ahdn>lu3U2E8gItF0I49cO-Rqa8VIg?&Y>^vXcn_77#E6E-^_e7TTwQD*^Us#gEkDP zD#YLz?XtqLP$k93&bI- zzDC|lvBA7eVHvJK%eBZzM@VZmmNYK#vWEfA8YUl;fHN5zD^+b4y}RTN`pFJa@Ga+y zF;axxc@Fag(WeE<>NsJJRoq7m#3Y6yTBqaDUb_V4DBtY?h`;eva z1g_lj6RZ=Uyc?wKjsD+7%7XSEpG8@WCz6VdvqJ4@B{{|9a(pG36NV9x8ya`u3&x89 z&v=e7dG`^-u)VVJ3Ut+K_M-^^kZ{=$l^9&VTo1|t zR`yQZNg1&}Gg%t>L(62Hd23-pA1u+eZPi^?vPtd~QV|=h_Ra-x`O$Vd+dMp9&8AyM zU>nv0Ko6UojK6Ck0I$!v$g*YJ>!>NNc-Vy=jeCXsip?6iEooN^|LV7~FS@bmxt;tJ z1zQJ5cc3u}^vU!F(=O3@Ww^z-9>9wVA>|#sdU=al*gqJ&zz0fA2SkxR+{5x|mQf<5 z?Hb%Bz^SbKMg(NN|Kh~)?sZSDLclb)ism+l=BTo4u)+xf?DFjZC0xyH-jcPXTqpHaINCLaOtfXmIK*#JvSB~=BK*MSuyZ%NZG2$ zszJ-wWfrl(l0yvknC<-1(&R)WAhThJu7gU#caR1WsbRx)V-wK6F4*1P!FJhI-R0R> z>nWVqN*o>7_J{g1nV^kefv`c;OWPqTU4|r&C1AWdA?jOO z)K8CSdCc!VyxHNsjn_ai8O(_ z1|YHRSV`UPdv08gWWa(Eg(%;*#kB1f(=R7|S01j~(jk>V$P04{#QAZ(QOM{#&0NSe{mIRE0C1ZXi}FaT;V{ z$yRnrW62qE#>(al2sh;AhABnQa4~QIuG7`VZ;Oa5%HgXK*%MZ48`C`+fOlJ=Qofh6QV|5^0aCk`Yr%Wm{x zV@#n6MsOUP34abYc9C>-j?y;Iz=`tJdDo$6Tb7p>3C7sckS}3h317+E8py&a3t>#@3T42zA_ih1sm$1_M> z@d+~r#*Ip3v-pu@)G{U{OYYHkktQE9_^-~EF+Sv=ElP7PNd>#^nQRWjuhJ*z!Sq@5 zNjX|n>h%-?>U=yF=pg4lij!RKRw}OBuA!3;0rXtCI}}Q!Lie_<70ZXo*5meiMowG} z_qNvytD6|1sw9=bs}<2PU{?ww$%2FTAJM0FvfeB)Dguqs1~&(ZZ9CttX5}RQ^9%5l z89Ttjy#W$|kG1ehS1llYO90)g>Ro8!Y%%!vr{8H3Yal%vxFH!hqg#;wXgeYo=PE8- zc|1sqRaoDG?!eX}-VL)FU1#7EIExxHtIK`CyRss&M;KIV!OMFddwF4Cm*d4&%T>{& zR-*wRLoS!^5v0&B2Ml8CVp7w-bYdrgriG(vQM2!%WV7zKQz`$eK98tpN~ZS zP+DI;dpP>|v4@E!c?z!K8U)NbyrOPn0HVeIP1{so1wjwB7>+RLO!`au42ZA*Gy;`= z@d`*k23{~{dFCS)kT$XEXHd~6FYvt(j)^v&EXI?M+REnLW@UhF;Ue>2M5Xh9nzL_E zORf<(o~djrZ)?hvz6}`^Rn1r225&yqA+q@(v+WtpTHsh`kP%I07?ut?Lt1gU)G-7L z2wc#&?{x0_V&Ru@&Zg^f{#y|)O}8cSIt}L%Xd*gKTmWpFOa|Il9EC*S1enwYk}2K; z>({*#4IIU}nVdsa=xlRhRBWUxT7 zqr)o_#|QwGAbcJ+vS%HVR)SR4Y)@wx2QXdNB8C2_IRi8*dkZU-9)p+Xc=pK}OoT&V52_?}eMIiHnXZRxVg;pqF;jDp1XRu>b@B?8JZB6k3j?Yz;BX%5 z+$}pi;R@59n6dxq^<)4>1esRz2-uI&k7xj?)t_fa8xt#{*#z*Ux8p7;+Q9G-uE>O<3+Y^BDcXK>?NjXoV@vtsc$lEHIbD}yM_ml_ z@cQ&6W|s<{tQ~xHcq0z5Y%4BJ`dT#$JahUzj6B6-4?B91G~G=*EI7|Rpt*Kf_*JGI z76vB08kn{TV@r$(rse7w9=043p}LT3v8%muxc~oK6GoE3}9q1@Y_EzN4=P*?ClOre4HU&@f^Pe$56`uj ztvL?)Vj#LQ3?zZ0gL)vtTG+wle1)RR@6k>23aQ}nU^{%z8XcDar*+FZ)YYd*<3pd> z$?s=G=Vl^a#%-igxkywE+o{5^=~yYLMOJKXy9RlsHK{)L7Imb0({EyQbK&w=PX=L+ zDCvABk?qCxd^NakFzdT2mT9Sk%4%eM-pVv7IC)E=^%~8LHvo>`e*4vIxtP3VH&vso z)E!L2p8&JUr4TcjN@GM!@C51GQS~cU5-@&TiqZwFlhvVG7%hbLr&AT_ z0hmYV7=q3g+tJNjO;8@ksXU66#x0_@KDP9pFlW&0JZ9EN^9iO~uhslD^^s0tco#Y6 z!m>l8DlAqn;iQwS1mh-#+U;+rL4esc2-#|h7$ASA{gUwy*tuYN@!e!(Z(p-YFTFrE z+r>tqh0zd}&bfIsT;c3-QeP3{3?47RMk?hsG9CEK!X$QEOe((;njSm04vbUPJsMaR zMKQEvxF4vA#j5y(8CMaX4d$w_tD4P9Q+ z17K?2TLWxK#k{S@1z-}&famqDtPo!bmn#*eH8b-cd73)p$NT{0pb zxI^NZFzS8a{rcsPW@f%Dx&b&yr_R|fM;J-gFCo$x(cZ=XaQ3*_f|Pa*>K%-Y?}#WA zhZ607z;L#*rxH+Y@|rlRP{U~tC-(x@p*SXk)EJ!G^~;piPBQJz|C5p_ArdIS$}xc- zN0Sx3LjJ5kFaU-SbstC{+&4u4#Cu>k+~1*VgbmD{Q|0w-778vIk%qT{gqr?qrDz7u zN^1D>MM1%UbHS zj$nE_75Y>gGcQ7JA$OU&73UVPEja{o1IV!D^5i^ErvfhYP^2-G*G!`8 z+yqeu4V%5gwjhyJ#?o)?B<7?E84tKakQUUIfD@@j#cDTP6xmCP+{LzsMeZ?Ls2=Lx z@&QGguNEJziy0S0{j_irz9hbpm;;;u@38RUrP7X^T+%wRFRzdp!+2|=if%crCzrH1 z+etC9zFG-nszEV*zVna)jw@-K+(uzDL37Xp|rL5@lwbatL-Q4lllVBS5QD{POQv=A zO?p6j_avRb#pe-j8T>x`J3-N?t`dl;wTa3{(quVUN9Zg1LR_IR49dZDFEbRsfX3To zftx9-yz3TD;CKXrsm#80PKA**=+!WFZL8g#vayTFmZ=|*8*0zH$wtDIb>mYaTs7TN z0y(b$f2AY|GYSwb040@rd2RI@?nT8MGeMn*QL-tKD)KlEdNtp7ySajIXJ{aNtSihx z0ZK#1Zt#ANG!{z0*V_TbP2#n%nbL+HB3r51t<1Iw_93iOB>wo`M!FoKvAT`h(`Iel zUf|E7Hg42haep*zCx~Ah35(cKkf6d(=Ms$-5`x`O*&Z=u*e<`Ha6dS_!AMT|0Um|y z?$HZWv`TS9z%8%DM$b1Dhd|USQyPXG(tYkmo6S2_A)P)p01h(5kEapwL#fcP`#_N( zA~)usO68#Bm{-^9$>3(X-d+!9X^I(puJ+ePGEh%o&|FOb4Zt`_Q2<_iJUhK%(wd=9 zaz_R&rWUIp3i&aWED8*GpRpb4Jvbe%Hw?0y2$2h;ZzL z+El~YCe=_k2-?3@jdvgVml)4$Dy!lNB^XeGdW98gtt=0+KW7V8^7{)RfNGYKOd4Sd z@&8cPaa8pGuxJsQO|Rrd_q^V^=+`oOy(`B-BD7Ra5$dAeVY>phfC+lP=t9Sx3&Pyf z4ijUJkRGDvs)2MCHHv~+h=Oy#AK6xc930(jAd(==L6U&;pp6T7zR^H7m+Es~@Rw%3 zq*0P33yY#*;<~<2h${{>nEo?VK0sp}gfzDY>*%1maGN7|!AofD%L7@1?#${MC{6)^KQ1ndsWK_#WJ<&rYA1DP z^O<~KAw(y{eOjEz4|XXb3$}J%^Fj)jLD+NUH|mNmUY-d~60e4Di#36h6t*YF!SwMX zzPJ3=w}To78ioU4R9*9U50sHjWfN9#)fxm-coqqUtKit6Kxf*xKnH^1 zY5WdcLfG!%mQi4xKT?7CqzVH^1sA3jtby5&2!AT8*xtvZ<$Q%^2V@*=QU@GlbHH3D z)^TnRE9DeOGd2n%o(J3O)eJ3QW|R43u{~k!n+JEEuS~NeQ=4{XyT=7O2_#xm9}#$u zICF&y9j-7?oj*bB&V4Rl$Y;{sTgp?g7UhO_7Yx+RXL{bOMaH)bT{^DKbcAzXEN7PN)r>|g`zqpH@#nJt&Jjvr%3iLrhGIkWf> z__KuH!4+oMZv7gN`-=~g$*H}(=54yG_*9YP+CS2ry; zCU?38;YHjJ>vV{Az|9_wH{#&9}n~eBG|i*DL*QvD{AX zzh7M6zqp>x#=kxt-;;k`4L4W!?vFn$aHaXWU7O$BV89=U-_bt?_}zLk8}b7C-D^Y>{%{KmbaB0%Z0_Cu z|Nc5Y;!op$e?mV>zp-Ecmj2v+{gcVR@8jS9`;Y!;sh9m7`;C76M@Qy@U$oc#ndJH> z`1hZ~|MdEQ{mA@*e*O9Hn1A!W{0zPRub3a-`@8n%di`(x5AzHC`rrPY{>J?L7t9y; z-yh<;KMJ1z@Bg-WQvLdCf6@G@KEHmNe*F=C{r`gN|Jm=Dztpckw|C<6|0(=WpWpuY z-WcETclvoh`?uzL{d(_L%wOvL^?H5(g}wfn`LlaJ`;Yc|`}Gh0iNF3Y5N^?&fM z%pLUW|NL!T8FQgNzx|kdznNVBdq1Ky-@B(@^*-`L{3m_>e~h2;{(6@`u-EI?H}CaI z^EJKyKTobd_z&j(`t|v*$JeL#|E=WukN@3VuU~)VKgZXn_x~5k^}qQq&0px(!@m|^ zpWgr98^1Ht*7yRUR&F7@+m)`$B;jjM!ua~PJHw~}X&-r~^$0MfKuk7_J zdw=~q`|(bG_2>T;zu^Aq^&jo^AMN%3M>p1ffL^Dc_lNj~vai>#e$TxB>i5iF{>)yl z>!#Q1y8Im1hWG#Z@0;s?{`=-Hf3>G~GGFz2{rbO>>+jwBv-Se>IX|%f(l2_Q{`>#p z8~#k6{|EN^AKWw7yMt{nNU!I^^Z5_#@AZpb|CjU!_ACFE|EFJn8Q&O*H9!BsFPY~* zviJWl=Ev^8x{mq-eonGQ@~dC{1@rvx=H^a+13&#YxRE};>OcOKzPns#|8uo4&+)ha gYW!P$Zv9)nU;G#5H~;h>{(1T4ZzdNc|GfAA0gui-`~Uy| diff --git a/third_party/prebuild/aarch64/libmmpa.a b/third_party/prebuild/aarch64/libmmpa.a index d7c29e2b85f165c17eca9700d17cb7ec4e4235a5..7d042c4c1e13d2d63065d39ea6fe44d8b5ea2371 100755 GIT binary patch delta 13805 zcmaKz4^&lEzQ@lwfC66dW_SMCN@@c()KEjgys;VH40UmZ zX=MujNXbxdj*U(^$q3D(MuXmC^=!pTUQ#j>DZOmQSIv0mNj_X-_V(Vt-|_z1dOd4- z_Sv8B@88~MpFj6<{orp=AD@hBD;yJZ^XwTpvtZ$vOw0d8d;OS!yUhf+Z`s87-R4=- zsQ)lHDGt^wdwFg!Dz+*0T<&kNy^Y#A)Xt^Wpw^_;qBaU9FP|818zoN~Mv@G9&+-X@ zxTfgX`g=c*vR?h~DD%~qXZqj#{QP%YQVrv$LkUJn(Dt9T4715Fj7HNinuOOy_D9LS z++~#PoD_3e+g`FY#z@;yO~&NQ1b&B%gK^+qk#vbl-ynmuSQ*W_Z01+VwtJMcjk>Ju z4zewek+#uj8;CIcW->OSrSavb>KU>PTGDp(Ws7){Z2hLRjcuB7xpVL{VJy$h&Q$+d zGp+t!VcRUu;XmFvwdwwcZVO_n=Fg&x>*X0WdyO}PBBZpsz`osO6 zB^jcd=8@1aDx{c~*f%O~+auc_&P%u|4=SUQ%ukOUaft$jo%mNowxyQnwbX~=A_Eij;MjmO*xWuo&t z?7RgVMdw`uhJrS9iNh}xw?+&IHF(U?uy2iRUInRQKUE$3VWBu)sE!|mfH?#X7&~z=_8Hqsj+U{l$*S>97sacojmIi$#F>y=9?FFz4~;>rR*^Vd6b-?r zZ7-?Aq~@TJ@gwEz{Rzn#F66l+{}R3vbu>hSf1?eV&)U`&Wp4(dOmtqwPCINAoo?*( zLf5ml?+C>^lz@S{Dc&(< zcw%W-$KcfSw(m<)n@PhrBueH6T4v<<-YSbt>USIqu3_XkPL5}k{C3}-et|Qr5 z!4n~{AvNw0(InWt;kqjZspl+#A{e{Tw#ro?>rrfer5bY`RByC>o1NTVtjQ^@OBwls zB)4etUX|_GrIhU7Mnm_DwwKghr0ya0 zh;sJ+kK=qb@;;K!!p6pAuUHClWu&NwiMXYc=#7<$z2eOPFKv?3NS*?7H`&(oSg7A- zo66MkW3vF8B5amoQ;yA#VSZR09G+9TSHWuY9<_Swtcyz21(7JHLxg7PJ@@!LsQVViqzF{YUbCGoHG^W6WADA zp!x;dyNA?O(-6DFgFt@8O zNc{zz+H6~^v5IFPb&Kt7Cewx)XtRqEjC?I5 zkC5y0%xO>)N-1QL3I9%%!a_>Vvi}KAOzG@lr6mlyd-bg%&cnR@c#GQNXa+Stg zPIB^cw67pHap&2&t^L@}FB1#P2~7|c&|8`!9gHW_v23ELQfcpFyx4^Lc29OIlj*HQ z8J%Q2jk(E>V(p%lmGqRGDa_KDTt!^Y59iIqWfk=7S6-cr(^FsZKIqzS`{ai%xk)d2 za;tu96m;+RWV>nU=|vLa*y5ZBK{H{*p^iQ0e7xS#P6Or%QR6sCvaD+%M!>yz$jeRgd+<>03mE72&5V~Y`ys}cFg{H@3B`uv zaNf8jsYO90rW>%t#sS7xD1y84#7&(ZZE@imP1F}{}Z3|n;q z7cJsxC>Z)8t|KlB^0PpV#IF>59plZ!$7{TU#d(%b)DRPRg1yNQe9L1Vg2Qjw*#q<} z*Dt+6;(FD?$@0>(++OH=%M+-f=bIeEMpkPd<4ugG(;I@c2gYj|e}VB3<69Wdn4mhB zm~0d-n(SQSGVzNn&>qJ9j30==dl}y<{xox83&FwzwiTNKeaAdz3Is$aM|FG!^v)(z zZ@*w~809E8GMSc^GHO+Fb|=aD`8hybKR^8`^89!W`hrL%4YH(3P<_x2WJFL~82>f% zY9p=}(Lr1EWD@;hIS3^BP25~)s|7!IBSP{XkJc3&XaXBlzu71XM zv$BRG#2-O^WyoOZhe%mv-lgWFTJji$%{H$vMEbDT?yIr}m zwWg&(TbIXMnuZ?w7;0c_w+&0WbD^-?W%kIPubH)m0MLtLtHL{47pEy6WI6e~Sr~C|vY}s3NW> zXh>J9sh?yy$N*!Rnl>9(%K{MMWC%_kd2u`FlWH2HNX`?I5n#5QSJAreZSi0PJ^LOZF8EOV9R}jik*AIrDLzVLB`ml z_PuI&Ym z`kT}}WEaVDDFwsPjB|`1WjrSbJJP<8@e1O4`by##rAH1QbzG6XD}wKy2)^Ap@>*DV zbGVXESS584_}7ddWA-a%gyV2NFasYFGgVWEO^0d%CuW9yK4-CFXI+|CGX584-^=(( z#zTyM$+&B_>P)@p4(mqYqUUwRYzV#K$?hjvKRW{v+(N=le2d`xoj|(%jg?{Cf*pBM zIZq<4+b2h`&tmo=7C#4hKn;fr1rZ#ISORYE{CPqabebioWcHlbvG}LOcSsku`Unm! z%;C%hYr|+|_MEpxuaVvHt|0m;>jDO4cUdF#;Jaj9xed@{1 zr8|iX7bX5VTNUFmjJM{>SEs-LlVh3ez6}G&aPf>+Fz$RUE-hObPh|F;jJp|k%~A33 z_A5U}I$J3U>S4|KF2+5~{t)BIjHk`Th;jfv#xwk3o?@0bX$_-+30E_R0mjo9A7Q*K z9L89Y2lXF#0zG-iWd2FaZJ6=NjHllow!e;XKXLuS&nK?mmK%^SR})Ajs>sbO!4#Gt z6d}QIgaod6>dsO!FI?|b7H)v?8yL^X58G!lzAggqCa#aW2RYtZ(lMsmNB+H6)%bO9D7`6Et^qV+S*UkOW*U#TvgmCT{7Kz^>x?qhyj9%kIn+;Z=L zp!f{C;tpi84DMEOhkP%YT+d`)O$X=Deq@21Qr|9mWz&Q0EtJ80t~)QCEB#I>XZuOk z7eGF7{e-NzQ=SkjsJy2SvEclU=lsl2)|4y$Ia@9q@3$YQEee;!)$Ry9>8?xLmoUz= zSKTG=x0BmQ)`!+XTn!Cg`$$bAoc}1-oCCLhyx>}X3u(O@Q|r^vakrd?l>;Q}DF!3B zog*&ig|ExLg_uS@-eSa|OeeQ~NIhzM2Q<}B>L4sUYFk4KA?ssL_Bk?=q4{LEiZPTE zx|p2Z#n5)t&dqT<_fh(QyWP=1l2+fX=;$v zVc0QX`%>=te$jhKO}hvB20Y#ZP3|PQ2)+}cS83`YQfndePqr_psRN|$x(9~-?0KM9 zlbt{L_7Rs~WXm(;@Dbv2+MV+vYmdDbM<96~rH~})UMT;Q_n5(RSXYU>0T%k{bx92$BddJ@ z<1&@AtzeuFs;)#1%4#l=Hzn9G=$ezyyowj2zg~0+alPoO#p)*2wpiT(jV0(UYh1vh z`j?1r8}3-aRD-FN5qJ&bE1hA9wg&>A+JQc%2AMj*_#G_E5OL?~93kxlac8+X{3MH0 z$l`>M!{f)U$WB>`iS^k^Bd*U@=~5^@;IVp^Lj3{H18Jq`t)J0c#<#PPl@Qmnl@iyp z)s)I}<7;N&oZqR)sG(9w8}bBF?h9A(9?MotT+dcQT+i0V;`mvdPR6$~K8PG2_;;}h z9==a~1DL)Hjpgikm|xa1DQ6dvte00zTraP3nVgKtAr^HJOIor#oNaOYS3|#c2Z|}H z{NQE@)%j`ADvdk84QkQ2^V^^U8h3vBGoPdRO8N%ew@E7I`NZXXt*z=^62rui!)s+yHH-XIKZ8!#^JO!1>b|&f%Ql zjJR%^EgPzbXIcaI!=B+emj4Kx9G+v&g6qDSYZd-b`91kfMqKd^F{1d78sE%}EAPi% sC7k+Z=J=D3W6zz2ZGvdV!>wng#*Msyqz0Rn^AOA2c+yDRo delta 14890 zcmaKz4RjP$mdC3q350x*24h#cLDNdahzyA_LPimqA!>lEM2rwHLaRZ;nYf9HYi4D( zqehHlkl>9NF+ihPH#(f9m8e0pq|b^ou8U)I0%m7}#1U5=Q5uOOI56yc@7}U+9<+51 z_3Hip_uYHnM_0Y-H2c3u_`}x;;rXda)AI|8@?l|Wp4I*N&W6;1>&;YHy6nP~JIxcO z(ePnt(r9>O**3W2<_l8JG0LkABU7RcH%|zT-jJBwu;|kS>!m*@m@jRc;oo)a@28*6 zHjKM{sYZFo_Mfl}bAw?REv8{?5UtLz|D5bYV~q0WCMEr(?I79ulcnvb4aP-3;rIhG z9vTH@m6>PB^ldT-BuQ`9Pey)&Y>h-|oA8siJIFRj%s%M0) z9Kz?a{U=Oc!y+-PtzxK;KKL!P-Ezmd+r)^3#0Vyc8IiMLit=l?YI%iuC$uj2U75Ne zlCWa`sRuem^Plh;#=EjZe#0o2 zwx=E#?35#R`mK}?PCWpT)3dunfAdGEco_xK8CO}zb1CvkKnh|`?0eMxgR zFWa(oPWpLspu#$I;Nw50Wg9*rm5^Pz`1A<7NW$i#Ei;6fwdmsm!bI8BkWE1JOW1^k zuf4n)2n(U;9BudeDsYfxk|-RZbIKf zcX-;w<#29G8x{}mQKYi5cXilEYSy{39hD0|rL%_Dh_CSQ&6B9dKlh{fUfzE#QN?kBmDJ5rb_dxrbzDDZGgCD{IQW=tMKQ~-A>%i;^<5%FH`^w9wZ#0%d92D988kgfjZX^6VUqL7?KvoWNgg7( zhU5!T&Lz3jh1`+zP%b37o8%H|eKpF(Bpc(9*OR;er2J&p|HlIP!QNg3a0Ej}vtoy^dM-P^0Ed zG~SMd@gDlzPjb<@$ekr|2o5&eo~YB9R9eHvNyu1dTS;Pb^03=V8wLJ#ZtGlCGbci= zkaAQ7{;)0?URdXX|6AvS{p(!TwWFZradAUgXM2~6PoA827u}enwD9jir$H>%slu~~ zj13QFSwx-ag!NC@)(6VSd*4yV0*6;qbf(DMLZMLL3iT0%E(%P#Ud=0CPPG|iIr7Y z+>;FcM;-Q{V%|p`rx&2lCi1-=<)=vAbKyw21LZKurIS!Dp~=~$WKV?T)*O^YRUL1` z!6(JID7-*Ep&{UjRzsG)wTdDn_xw&?Rk;fWlhC)mE`pe zSTEGyliDy1vD({+%MYPfs2`I$mF`lack1{KjsSR6LtvG`_S}X;NVlX_c1cmiOJy`JO7d!wtKj0N zFCITYHCjeSYU-1l?5tEArjyQxoYr18taw^{a#GdA6~OwZZSO2H&A$?((QVwAuZ%o5 zknAVf33Ce^5uR41LR8#~vfFI$17z5y`FRxmJVA1&=4TTG+idIkRJC>4AueCXQ)GM=%m1BgF|*`o3dR*pffsGPvcDcCA1qtay8tn@VKd&cc`r{OSk&cwb^ zVVtk5yg4KvB3@48ou4K*3?5R8u14&rrD^if@4z5II_vM!pg{e4YHrLy}e}GFbgBl$HMqMdU}uKFv-sR?1LT8W{>YDno0Bj8D{r2 zHTq&U`mCnp|BCV{lFR(asW3*T%fa=!#O_4hYl#n|mEln`tZmOSFM-;2+bT$hzy=%M zZyyaWw7aaiN;ek>U|WmRp{`Lp#l&Sj@&?G;Y+Ef@c^+1X%1*5O4%Umx0jzuqy_;?C z50pc=7=!mv4#{J|-)?7#7ZaIb%{7R-NOeQ@7TdaTtXlrlV4(CnC7wy{?jY_bzMA-5Dvb9YlDkN*BKd&~`BnrAw>>hh zwD$H}Ypz?ly7;bD#p*43=O4FyHI_a*0kWSU(xd&3BF^MWPFowr6HTal#a&QFy^->H z~BOyYWgsl;WXQ?RKzmn1+m>@KKcvNM5FZ(!V~ z8sMp2ypO%?4)!tGx#LKKiHT??qZQz#I#)AZ$aojy^ms%y2@C1Rxi?BuiacmYfgTmg zvW+$1{1D@GGgbD*^inGQ&|OaP2IBhgHWJ6-6*oZV*fR=-B5tdH6hwErgAuWhm4|dY z7cbR$n7HotIB|W}Ty)2me)z13FE-U7`8CXcGxD2N;Om$$z#7bBJWXt)P7^ge8Qm4|l0MZZLnia2S{APn1OQk0r^8eFa5DgR@U}bH#N^wV+|T$*#&cX)k?!tfyoK>qj1MsWOUBFaZSdCjQ25qzY&ZnRrExY4Sp5FhKt_dC4YeNO2+xf?Pa`y*}L3W zk$xU>xQt)NghodYWr*>$jQ26##CR?}AN8d_mAJn2*9Z=Oj${_Jkij`3v&WdjR^sPs zyvv5jF1Iyg%g<$Sdloj4{c2{d-(h?m;{(L?GzW?6X$~_#Qb&yv?{OrsPh*YrSVviR(R^3A5j{tN-U(1Q z=(eJwc+(wBriZsoZF4M;F_G~tj2AM_?MoQn%IpJ-bNd$LIJUG3RI~Ot4Z4ZzUGE{T zcfFta;gjlY)?|mb@lR*wd6>2oo?|`8WqcdsZH#v?9wx3Q+C_XsqH>oJJx{Ij#PiXK zjQ;}jmqT3lmrHzvKhe(5{P2!f#_HRD+`judNa;<3*LKg8r8yPydTp!1r`CF%@5v9`aBc5Q zc(*sr+9PXLzYjvY-QL5#SRSvic*8#M?{-_p1;W;J!NQ1b<;rWTP!T@_BX%%w!Py&E zGyW=b8fE--#(fuJMW56h;`-nfBZu#Hj=QZrPTNqNw%x?#!r}w(niT8HE|y#FBnZDD zcK9UgDOMBLQ`|F2Etis*;kM51z5~UZLWPX)WxRoLKJME`@L;UPo)JQfN8|9S zOVLIq!~?X%;hksWGJgNr1i@!cjRoKLH3_czulJynHQ+tl z8;2i?!(G#232{HgF%C;#Pq)gaLD}nWs}hSERos@CclO5lad=4_zB&%?%u_+Tu}I8U zwMg2d2x1t80cluTkeNc@7?$5wkzU z{3I8homVsdIkVr(_?L`F8UKp$w4bX=@QfFG7Yb+GK&yz$5h`Vl`ibikHxS29l=+#$ zccdqoqiGkeb02(E6FG9xZD#A)FCQApZ<&jPeFd(Okeb^ETE{gM%m4wE3~_y-msmtM*w zKgoDC<9}y-`&Cf(zwY@_CW|l5e>`T+#7pTek?|VFM={<$6UxMk@c@&@Fxho=%v~zu zRg9-I-p+Ui&Gj} z&NFEzC{ZI5vRQ)xKZM?M2ahv3Ak3Ykat>Zee;2Z5dl=7Qe3|va{bb_Rs|l8<1TG(EHC~{DremJU7NHVK#mhTjRnpv zjycL_yeSTk5Z4pkLtO87;x#e{!S9t^UAz7)@7bo730%czuS?Yxt2oIKo4-}TA6S9F!NUgZ}nX# zUx|Zt^ku(3imQq1&YFqKPYTa=dmKOEIDVqY=c-%8m8^S-C9&c7C0?qtpK-oLtSC_% z$#4ne_qwey-eZJhT zq%BaDoCTsX-@gEZ=_6T8Tp!6w;`&I2q_=`0rUuY4o@s|6TV2h%G_w?Z``p3$IC3No z&-@4Us=F&^oQJProZl$J$j?9H2G>OmRPW*0el?2lP5(@DHe9;zy5L2uZ|&D(MIWgy z;`&Gp5Z6b1Fpi&-%nvQO^Hs!_g&0_VEuADTWdibOg^c~Sr%hA+q_)HC{r3D2sWLf7 zk!^bz4@f}AUd@~H_v5~W5ZdqdoYdq11vB7%kxFhEdT|nU{sKQ0P7AeMQyZvl1>}8X zdm1&hh14ckA=I#@hDhy#^&i>ZK~3!>b*Ky?AGtlrH;hcHo8&Y&B3u`0>N}(sLH2-c z1#W=wfZM9O0fq+L!8K$nM{_RON#8ATct;!_iNg~YVW!f5058>fB657-@r7MT4fHiu zL|k91Wh?-{Ki0+ZvwDOd+Q-S1TA3q$f9xjiECFY&_ACO|ez!NG$pbWqdlo^N80L2t zslUY?V$K6<0PqymT8m%Ut7DkB{QHdjfH-{Oji@`xI6s&35qInzUVvdwYE1L#DZvu?tQ^vD-s{!QYMa9cf7uc2v`_J@h<0}v&y4?yzbSQOs>(#7(@ z)6P`qC#BMUkn!c{K%I{-hUkC0t;8j&NLwPGhy{TqXdyc?kEL3}_%`Nb^%8K2??%^< z>?|iAEx!ih&T?{iBXhTag=uEo`FWF!)=gX=sUGBKZuN$j$UlyG0~N9EY+-5lRLE_{ ztpiNn%H$|tjk1%dCzL#-s{{?Y9nZv~OWD;*yPN$#oA#FxMap#A<(lpBW z;+>Cw{Jbkz>c+&9#9+yV!^Mkm6#+@I;)oa}OL0tO?o?sYVU`G#HS(R`+Iz6TS1ssmtJSaLk z-KwaB%T~Ez?a^H4@^?pb$5d3J(QNqs=nSiEIplvk%L?s-x^HKh$6?2}v#pdBs@+L^mRN2);iy!}5O)`NNa{ EAJC3FN&o-= diff --git a/third_party/prebuild/x86_64/libalog.so b/third_party/prebuild/x86_64/libalog.so index 051f85d9b9b5482169ebe578cce911ae560a3541..4c8a45a4fce43e3262fc1fc2aa162588cbbac055 100755 GIT binary patch literal 173984 zcmeFad3Y4n)-GNNL?TmKB#MaIU{D6pgh>X`1Ojw0XhWD0l8}J}LeeH3CZ|9Wpo!7I z5k;Kh4Cv8_6G4S$Mw}vuA|4?iYBeB6P()O6-?jFt&Q42z&+og>bMN>2Wwbvf18Xp*%I4m?I#ITMg#;pb+*CUFFl?RoTp>k&!u|_|m8UA)Ku4LIT|C4{- zbhu8Xv#y3=XCcbtBAoV~>bbpVC+7XvwhWDxx2mg^j-)A_Q_?vlopnu7>I~}|F3FqX zO6M-AYTz|Wn(&7Bxolf`mqbgzGwP|w}GyGQtjZsKP<^2D8*hFx&m0u9M)V5NB z)>W#*bt98C!8!bnu|}7sePciU_|E8u64zb3rBl;BGf$*0pp?y!z>TXNXA7K7+blN* zJ7QCC7ZTc}By^W;aEl+iZd-0Q7H1W8iT&8VbwPN|TOmf%rm;(G%k3?UO?~S^PQNs% zsnaNJGBmC-JhY<7@EFrPO9r=Du->@h?T(?vO=4Tyv${+e?;LPNzc-uqGVCuGEo^#i zb6d16#Tno6g-0t_Muk6QREC@^|E^U#r|nszDx=B%CgHK+_PDsj&^W_b64%a%USDL4 zFpSovHe+zgWg*8$+!fkBU@?Yhw7XtD(z zh3aC=!*#yg--GM@I3K{d5N9#YB{;3aBRA!^K8$l2&VS)tf%7q(*0EAv#z_FAG?U>#I0l$N2`%w{Ysmy8!Ejcn{YPaBdX530E)9oq|7<>uy{>!C8ZI z56-dLd*SSjvky)>`r>-C-1n2~ z09*&+yhZRJTyMh}hjTE_c$`CV4#$~*la3K`y^Wq9eHwJfNP=L&(@w>a&y4V6@H#v=i_=0&Us%ar?) z>wh0N;JNSx>oyF1arcT}hHrT3#pv&zynSEC`cH2P>9G8@^iOtOpZm(}vKx-PWAhZY z9Y1_u`NXMhuA3hH;rKP*zPxWmm$u!qfA+L=cba(UtY_xICp+{w7I($ed3QYj+lih7 zp7?yinzae1uV3}os%yTwbwbFhCYAAfU%F@Olg=UTE`q&9=K@Y_#=_)A0N1Pap7ark5(OhqUXH&H{begOSi4pP49Ww_w$4SqX)Hqq}Pkx z501Djz4@WVKV7}zaORX{#gG|D@uVyY^h({;i$;w!Jwz z^y7WE=Ke6|@gKi;>U!-4s;Nrn6PQ7v6q#G+z&gT8N zq@ra=?!ugB(ynQ@W%BKBYjYgX;nM1;-mJDps=iaSePMOq*Ip}i&s@~DV{*r`oU(`7)PG)g-?-y@e{S6|CG=$5 zveBDct;Te@7cD0@tGY0O*ee*ERg zln)u0+i$02bdS}hM8!M+4+}Ed1Ug*x}?<{}( z*d^B&89O)Ug0T;s+jroJMYKLo;7k% zc=e$3za6;x%G76iK6vc&kek*=HLoAK{e{o>FKFi7dhw4RJXHPWO@9vD`9NY#Qtcn3 zjvgNAOKSRFpMf8Qbp7Opu02P#d$fD)UiX?E*UTSv*Ui7*-|VZp-%DQ~+>@pS7d$F3 zVQq7PCskMsT!=p(ME)!$=ogY-+x)`(twHpRy%KYQ3zUb#y8QxteUN;g3X<>nhzpnJ z>LBu)QJ4$W>loH27vghK{tNMaz<0b*`Ge?xBZ&R;p!`Dmzie`0zBGt^rUj|*yddTA z1Zl5nSR7rbJeLRILt0)~epKrV^Unpz_xT{@|0{^yUJ26fr-PK|vLOCeiw75~@7T*P z%-6wjFC^ari_r`5?*?hdoIEepO1<}(yNI72* zQl77Z@EJk$n?d|)TadV#9mM`8g7}p?h#y`Rq}|s9@v9|4%GoQO^iE)`>i|xb=+=h>9c>d7RaLPuJ6w7!@ z%3txEk~<{jc}MbfJ)!sw!sm0lv@~LWQ~Vq1Vk{OtDYBtw7rwpNEmrJyo$zzTJ~r8G znl1d>ZIyj&w6TW6+L!DscB@EMyybf-qNh?eu%?Ngh3r>oucL~X2z${ni`%!QQAL}B zI8N|i>=TKeT9@K)R2SoEp6{X3Qm<=8egm%5zA29@Lev}ovFvbG@x6fQ7--q;Ql%K- zi;v^n?^+slvI$u$d= zRo~r8-tzM{;{Ub)|KD?|DraqHrQj;jGgbWAo1_SzRtEvabMAM&qb=f zmC{dd6#j_V-_=FQhe~-iihP|{2}(FHj!HRglCQX7LON*d$eJX zgZK7vjk4?$;18#z|JK-)yj9K<(l4TEBOS*e{%aU5t^OjL_9z}7|3dt?8J8RDU7dtc zm)+8i&Yu*%U!GngdaU=I)xuwG*+E{kSoVBT`n&gx5=3@2Bz}@|gHl{5 z{CmRNCBuh>zpT9~PhFvsm+ohb6}wgKRf0oA{(#u6?s6q>`A@3&c}jo99}@Xd(k>}G z6(QZr7!E(V1pX5~LHQ558y{DT9qeKU6pxQ@r2KW_|E~!@U-Z|C{)NInD0(Vne3W6p zxJl&ewktt;heO9x(q6W~is-Ir;|H;W>pzO5jQfbsfviA%PRl!DWuC#tz>$5@(J;`ma^(+dS4f2)j8 zgbe4#6_JXs6+f}!#u)MEx|fw6)jEg+XgJbS;Zywc>SCObcCpP>`ahL?AC_^mGGN?% zS?uhk7jrnKh@L}M`Hv}Lg767qpBk}`6_a}axIz5f6)^AmM&ekl%&+E4IbUk6$`dX2x8if6#Id?MC3vUkua1fd(C;2*Cs)AejMoK%@1>}39WgqFMlCANv*uj2QDefiui+CKl%!nOkd763< zDe@IEehm}(&!zuXoKb`|Pk3GYuqIlSbDb1)tCZ7v`I{j83)VO#0q=LwUuoI%Hl@e% zx0#}+R^qv3w~Hj-Xo>$&%g1jLA7Te6J!35%SMu8;iQATahDm=(37F4hNgSw?I3VHG zSb%zvp4d;7V#|8*(vGnK+7A`c zS@?d|JX7powHGC4`+uSYy&@lak>Z_Fo{3V^$;txgL6kjZSA{>CqQ?*VJ-Gy%{@yC$((^vRRi5sqalpbWqM{g--)vb!R z{PuZ~w>}_PBl7da-|Q0jmk9q1uH3#dKbau>8)DDO*A+qYY&!0hd~Ir$-IAFyS`sfBnFL~Ts#;MqVaq1qCk4{yBnWD=j?NxJJ z5tuIWu~^DewMpR^;ct`r+G`at%_Y3VP~^5K}(UuH`?+5*P4{?dPI zMyYl=qArG4>RTc8wd(bo*t0I6AN@<((ey)6-Mia4HRe+z^q+XRTDS6AD z*%<$kc8m^F#1P5%h{V0&7U4r8H!$v}0XBd<&8I z=5;H1S`ULt-dffmip$K&F+P6_hejO+BJtYp*N*$TA6Q575N9PIB{64Xp zUDm~&g|84lcYUgej>11Ij%z5(%MGM-bvu3V`E$BKL}(O(s= z^p}diwG_Vcm=d(c-@l~2Dy6+TOZkV0pLhet>v7U9wE^pkfmT1=tMpsr$mQbCHi?t8 zPM~8y@}crqol)3oua_j>I+;iPrXCwBVSjqR78|G9w^ke?Tk%!?+vi$MDY{b$4cJX-*ZYFcFDftO;Vn}q`hjVs0^+4-DmmR9wqp-$j=bF*~Opx z3O^AI!FG^|*cjobK_T&VS1SE~N<03`>W{mWpiCo-9b*5wfc{l2ep@Sc#_++%=i(1F z5>O`z|A*-J?pB1=U$VrW6^9j{A@Wrchik>~(=0tGHkH3Z4Dhy;{~hTUu^Fmf4@o;- zi{~Wolycfd&tPekoDx9;8G|D-7dWPGbJuAZ)gFo@vnVDA3&g^u)P0Gp0 zaKR9;0i@HiW+rE*X6KN)n-dC>pdrIOBri8NBh8(WPPz2#os{7Y;FH{`?!p3CZcx1vqcB1M$OnD8r_dnH|AMsj6cg^=|uGGHyZUGnU`LelL5tv z8M8BThUMqYLP0aLr_VNESoJU=H#4umn3XZBAj6$pKsHWu&BF}@NERh5%osc`5$$Xw z6x@MIcb8^P#!)yiF&f90t%FgB{V+PM} zMnY;&7sC0Z4~6X^0RZ@nW#JT_D(2Z9k4+`UQWhrV|s== zBbR-sKQ+hD*%`T3wTI@;<_9;^1FOn|=P_*=XG~sPPL8qbyOT3aK#!8?tUlr0GJUY-*aEj9hx&iXK?HEw3JbGx#E4%{R>D%LN1~Ky0_|! z%yJ${+0&ibt_(+NZhB5eK09o$Az2w|Gc8qUHZlO49Nd^Z)qSWgcvO%*J!6*3Jr6EQ z_o->_>^!bq-}t=1;zLb8cvbX46L(U5NBy3?7467^@3JCQAQ5#aQk zyy?l=+_LWZWNg>LS5!?NLydFbYua}h-q zr<&M69tDV@c`i5APuiW@hsFa~h~g?`)Vq=TxJ-B&Wu1|(e5ZFj3eICi0+Qq-7^KZd zvY1}Pk5A3dy#P(IuQAP9;xncdqRzS&rJeeGBfdW=SByWyKAa(1{q}LzlRjv?+s9cF zoJf2H^@>p*+|a8fa-i(}8#{NygBZ#Kp(W1k&c8V^Z#rGkUDaqjBsC{z8b&IveL4hb zfI|q&$jNqQ`+cZUw2tY;a%c)A2a{3`Xb;Bd1VflWmJLLs_RvMBG=9t#^&_^3A5q^B zD={}cnFe(lcCx1c$`cCwJZ2^|vci!`Xb6Q3k;yH{$mdLX>{Z(NmIJy{Ft{+&k&)xd;ITL@ zi!Zrq{I+uEB>t&Yd&?&7DK-l0#|OA-G*Eh6j@&nbXN8WX91Mt{jBl_>9cd!W{SDdGXo#MrIxc9Aiv= zHq9xrbEg}P>&LdCVl=J@S!9AZx8Wq2@X62PlPMF*Nw4oe4sV$5wRgp<)w3_*kE z(O~G0b0i>C(!AWX;rV%mt}%Icx-lsrnX%|)%wak?J;PmyK@Atz9J)zxrKw>bWAmMJ zFdP`O3Z|38E0)Zq=$y2yyg9k#b8*z->aiGuVpaV|f z%*daWT~I)C9(t~Vp5OM6K|LIKw07a85s>&jHz-0%Iy&}D>csgHBj9Y974S5tfE3Xj zK45@Po`cC!l7F%^Jvm=<62CiBmH@m4vU&`eH9liD76}RQypC|>6%dh~mXQ!|xZJ!V zOP)n5Jr%JVQ|wBOsX|^M1|&_FP(~&}@zr#Vo1UE>R8m@2M!Mp8fV7mRVthh>bZ6(n z39{VJ2PJI?oMhQiKI4 zxd{yc{+VMimDv>jELCZo#1FX!x^>{bN-OyQS*5M7^~38MnTprOC1I+OzS{cle?^0@r=`DvX?b$2zm|#r~{D>f9@D8kT*Mh zsv6HxpBxu@7t3LLfV>wzK$kQQC|rg#tR~=o1#;Cm3dx){kq3wz{!7n?S(9QT(|^b5 zV1B9)%M>6?U})jLYTBy&o1YHOhJ~ilACjdp34>RTx)4L16Ej(=2rg+deWfRCJvF`M zwCXk~1F8z>56LKQbOvmmPk)$;<9=G`5;P!KwNA*@#fd$=D*v z%}YivO664&8i>}nd70^0@e<3+-Q;}MHX<)Omli30-uYv$Q zYD6fNJe{!g(C~`&49~)OHjLQ}Don|<)s7a5ISh*~vU>q%0Rm=o=A^na^YUl$SOscG zPBx7as@IM#%%%1TNS84ijqc9y7ae85^j;OJ@e{v3%QBih*>T8@Vmwu*274!=akZKZ zvv%eJ%QQAOn+yg;{$dK%U{daLD5e?#$w$Y!8d;V7cC1Sbo0ylH9-p6$aXmgK$FJ3( zy}%1Tfadi4*^czIM#c+Zt=TQtnC1*(W51Z1NeJtgvXTXeU4Uj6RAJDcT6(;en5F(n zW5OdYoQa*&+%$G#RVjIbX(_LEm97hT4vd$O4u0@FNkKlVRDczZ$2y%Kl8RwG+dZ#A zIF33PV!XzrrvszEp==E-M<>6k(HVsW8I4j=qc1|1QQVXDklqw(Q{)v4dN+k0N^+YY@i1rJ|BP^l27UrX2IT!yE zo=@{+4J)4P#MF5lq>*hpVz`wT3t=LagKmy#7SG$PXVhs&rq1P36wV5$6FsEqRZszL zp}^E?f~tCALV7}O5|)Bic48&W-U6v4n!adacNR#Q{z(RkV#ECcvcd)mlcY>#t#+do z-7p%Fk^Pvw_-u?=QqDUwIXR^vt0lQGVQW-M&X*PhcjOJpo8`(!AM_6otez1``K$3y zqc(cfU~4~3R&Lx3jUHVfg+xQY7^*`GGEoj*=xTQU2S?@eC*G9YvF2gJg4oOvsKZ9em>g&;4q}b%JQd(+d?5 zv|dK45sgqV^5mg5=LY8X1zE|<|zZ#)EU^FPsM|21qJFQ z94O2dkuo(WZ#t40!xIw*4@vHMW6vA=`0soB?|L=dp;kEP1!1WFe~s^(Ft2kBk_~t2 zx%IcHUsou8HEL+4L_cG&5co?o!5KMz!GlD=7aa##X_qJfVJl*1tPyeJ4^O-@Wvce_zz%ox(c; zcnjO*_t}_D?+W0T311YzuN1x_fPY%}$^iaZ z;j04p7lro*@T-Nd3E*E5zBYjW#Pa_D{xy*|;v3t4o$$5*{vF}%0erRa(E0RB7SD+Bl+g|7w!2c?IO#uJ5@U;Pa z$jd4Y)CKTOg*S#awtoxZ?E(CcTV&h`;Liyk8^E{Ts^pyk{I$ZT1n_-@cLneX!WRYb zQ-rSw;O`c`GJr1#usVwOEf-C(_f+SF`E2JjUS@PS8BZc1cjfj*7#>MJyjZCrt#}F zex#<~tMT%wEEA!Lj#`aR(0Ehh<2Alcd$_SU#I0;tMT#^J5FF~ye4kcX?&=r|E$J)G~VdbP|i;@ zK3wCU*La)8%O}SCw4KKPuF2aq9zV_SA6+#5Vyyzv8vlo1+A#WQ{C653tMT#^Wq#_= z_^FzHr^e@K`~;2vm!>~O3C zrkI?ji^k(81O6jghp<1;k*B^r;PTKJC&jmO4^|5&N<`00xOsML7;1j&D_ z)_DAc#D7$2JpEgl*0EmW-?d0wy&8X5<9BI1esbbJYBU}{mGU3^H6B0B@E^4rkDo00 z4^!juQy2eHr}6Sn8}PHU8jqjYG#ocK`2W%d)(F@5QyOp6c>LtVf3(wh`RNxwvupgL zerdz#qVbPwe6+?-((K<)SMEP2QpL@>5$*<<$6kP0s|4KdA938h?exXK8$< zmaj|Wdu#k$jmP$%|0vRUub)Ny*LZB*`Hu>X@8D-K$I$pMHNH~gKiBxx8h@3>S82TU z+dX=`UgOtl_3~=`HJYAX8vl{T*J!-_6CV6@zsB1&`C5%%q~&XBd`C^bPUGLy*fsu9jqjrI^3!zI7OnBiHU0fGK1$RH)c68T&uWdApX_p~Dvj@^=~=JwKWn^Kn!Ks;zi51&#=orbXEpvtjW_x?`2WpXzTq0bP?NW5{7o9)PUDYi zyj|nvpMKz{T{J#glaJPT`N=Ln?WggZHThVLKdtc&jgQfIr^ffx_z4<+oo0s=jsH-S z&(iqEHF=lD%TFsg)m)A5rRgcs_$?a0MB{sFe1*o#Pl);HN{#QM$yaK8C#{^THQxHS zN+?;C#`o3qtk?LPHQuZ7t2KU?#`n|s8jbI-@%uIYCylSw_yHPkYP|eZo}bof{6J0q ztj6D>@y37#|Nl$l!!_ReHy`M+P2+FXXh#jX$sPOEmsI zO;3f!%TKa7)k=+jMU$`8_`#a~)f!)^@l_hXRpZxd{2WccSL1tV{4R};*YwnAe1*pE z*Z5T$U#sy$HQvLBqO@4yL$7=E^8oylQvo!vsrpKl6 zV>J1>8lRxa7iqj*lV76orJ8(&#*fhWl^QSq1Oq><)cAdx{A!KAUE`}X{wl3J>oq=2 zHRpZZUd=pKNaZ7{$Z`9<& zHU1fmw`u$wjc=#%cWS&{SL-O^-w4zt`lQ8lSDn zPtf>LntY1JmuvD_8vm@uyEJ~jrf06k->dOO8vn7zFVXnX8egIDi!^?v#wTffrN)oZ z_|+OeR^zKQevnqL^%_4;llN-;1WkUI#=oKQH5%{M`28AhXnd{4kJs`wHGYD|*J=E% znx3;7KT(r61~vHqB#jT(_$3-|(|GwO6!~d8jekhX*RJswX?z!rpRDPR)_D0RnfPfx zjekzl6RYv#G~S`{cWL^a8h@Y0Ptf?e8lR%^?`eFN#!u0Bm&PyF$}?BvlQq6b4&ht?}|tq_LhVjZe|?U9a(}8t>Kkdo}&LG+zEm zBTiML@$YMT_G|n!jjz@CGL1JizQ2}loyNbZ$)DBuG>tcIZSempjStuObd9%Z{Hq$@ zPUG{m^4K*#LzC~K@tGPQt?|<}zMsZFq3Ms+_?tA|q48Oo9;e1XsPPjtezC@wYw|T3KT6AYzsA3&$=7Q9E{!)e-lg$%8sAjYe^%q~ z*5nO&f87k6O_LAT_#K+OP2(dp`F0v#pvgOmPlP+l!nSYfXgEr}?l5z2Tf=Y^ZwuEK zSCFso)9LMwMt$cf{EfIK7I%c%bA7e-I6Ai{OgZ{$7`GuDO4!S|IpHRRs~Cq8Zc4b4 z@wvBv!w6R}K1KK5pBEaTmTTM&+ByoGQ} z!gj_R2)80^W4w-VYr+QOwS?&vm9OqRh|bRuwh^vnyo&H8gliZtC)|dxm+?}=0emk};vJcBU38uGaqPb1upa0=r|gy|KC&&hZ+;r4`M8IK@L zuPS`ej0Y3$K-kWB0O6|$+ZgvGd^KT%aW}&B3c*+RH`TurVLRbk#_b7rBwWL|4Pgr2 zJ}=|ugs&xB#W<936yZw7=iUVFOt^yaDZPu8DA0O z8H8zx;d3#bMtB0@6vmSX)6&7`WIUQMEg5{Vj7Jcrsk|?m@nFI<_4e5r4gLa1r5J#_b6&AY8+^4dI1^y^Na^E+$;X zIFxV+;Y!Bms(?!gS1>+BcoE?u#>WURChTH-gzyr=DU1&g_7HY5-a~jP;aJAI36~L$ zX1s;)gM{sjHxMo-Y-7BR@I!bOt|h0w?E+u!nKT75ne{PhVgR3%L#iKFD3jh z!c~kH5`KhmCFA*oR}ijXoKN^s!bOZ{5PpoXi}5tVD+#ACo<#U@!cNAc39lj?%XkFg zCkRI~9!&U2!gj_32tP&G#<(ZprwJR3yAghdaNTd*{)8(D*D`KT_*ud=jN1@?jFecs1b^#s>(mA?#$lhwxg$ zv5a>UeuZ!}<1K_=C2VKBf$+Zx+ZeAS{2F0{@mj*K6R!J}+n;b1;abM42){wNhVgR3 z>j--pFD3jY;VQ-p3BN_SlJR`PZxgOyoKN^2!bOZ{5Pp}ii}5tV>j|eYo}9-^@Ls}Ij29CAf^a3{`Gofou3(%`_)Ee?jAs!3im;3EG{XA{r!byG z_-n#W#-j-zARNnh1mSN8M>8Hw_&&{zk$CeT*3Gh;U5SWF+N84N5U?~M+lpQ zQy3p0>?7=Cyoc~H!m*5Z6FyEjn(-FGCkWdaZy@{=VH@LhgijJS7_TM#GvT^AZhyjc zglieEBK!;C8pg{BpCasKyp-^1!c~kH68@ENCFA*oe(sk#<(Zp^Mnn?-3ZqcuKStW zpD=BO`)V1tCrqCt`)U}sAskBB%eXmVdbQ)LVjN1Cwm^K9jL*Fa97ec;@hQTzCG9I> ze2g%C%H(q~K0=tbo_r~c4-lqL)qPIJdkE7fqP|$hy9v{$M80UoTL`x#Y-hZIa4W(# z#_I^TCTuWXOE`jX-AQhL!ZyOSj8_r9gm4Yx<%HW1_A*{d_)@}Ej29AaOSqEpe8Q20 zD;Vb!zKn1Y;~9kMQ#YTB@ifBpNxd(H@g&0Z3AWG4cr;=9RLmF4cm!eksfI6_@nFIo z2-_JCAWW|meKy8D3Dc+2K7(;L!t^T4SN9XQKVf<$>Z@hko^VIPHH_O3rcYXYUdGJ{ z((~Crmyk z<2{7C5sqcNn=rk~_C+(^Lih&4cE%eB_aJOzypHgVgbl`P3ExDx?gY0#;b_9Oj8_qk zAzZ_FIpLm!y^NOaynrdL!xgK;;)w-K&8 z&h1Y)mT)cO_JrdI*D!8FcralvVV0?=3Fv3NQj}abD z*v0q=VF%$9#s>%|5Oy-&LwE$?SigK`MSW)~2VKct4cDX{VXJp`G(yZ?n4-s=iK#c+ z!|P+}H$s77w#L1q?5aDFq%0}CbWh=(j)WjM+Ql|qMEgZTpP z=h1y{dR!j)Ejp5;ywCRQLW~%1HO+c9T||n%$JRoOck)(8Sudm*PAPg~w<*5t_Hd+n zS*7ZNRB;|}HBH7iS&Y|w6$yQF&<+vT6oEF9%(WP$O1*^{W=Bj{adVxctk3 zFeUMX9mn0|t&>r@j?jiWEyI{l?F{o5&36fkW+uW0o}K3RxWsw(L;5s^R!ApeM4Fu7 z*<3x2QjpnVyuL%&V92~N;+hhw3AUHx4P$H)>~ZrBlp7n==6RH?)EiOS3S~e$>|8_q zKqI2$Fyzq`F?*`@Si{Q<0BPg~_4bMh3l;GlNMQTfY3cpR0NmYb+Y z-gZ2QC0%`x94)XtqT~^N6uAu%7-jl5%0x{(39MDD53%jwOGh4?xLO-f5p2;rJWV1> zF9z$om$EO9T&Ohcg|Co?1Wm(YXn?s^uI^~~u2j-vlyn%`V4Nm;7iIQ4ya2^DK859A z(dGs$ia+KicIe1% zcejJW7*W`g6k1LcsIO>{5AzM9@Fqv`pk_z_l_*b3WOLwgxH|Pi(*YCh2-^&~5K3Nz z+8L*}LM8bl#HaW&A)zq2=X#XPEJkSXY+WA$+0t(c!zYWpc{^Gf9pEVWOL^pPn24e- zeKG%p^P@)~3ea8#x3zf)8w?Z$DjL;)BGsR~zs^@*UwpRS-GX!BmopXSaq6c#%@U=5 zh0^~TdRGFRfVK%@yqlWhVlIU8hJ==VV!WuKW^bxbIZ`7-d%B~nY3D9T>+r-!wL=4M z`cb(vt-c&Pn?~V+YkX8V-*~=rcn+$(u7f9-qmGeup~K-hWM)v`8AR#szYmsQfid53 z*O>QnmkL`9NUjiCUt<;!W$8Ick?3O9<6du8;s!Q+1Cx8$a2l#gj{Y-(2kJ;6#C#fo zEXM078xz$o-O;Np7aj#D-e$MjV^dF(y9}YhHyMpsVcxEk-ldf8!-bUYhdQnbYMlV3 zzE%)6^AN<@3mV#|EbJr-PmNtdMlg!&LuPkJAzLba-?P56)noBU^!ffk=6-q4pw}9u zFAKZu#g4}BHJi7 zxVW)#V-R;2oN&N#G$$yDqW?NZX_}Lrh*|5LqF`CxI4$oF}Ab1?b6)4+Ce~`{hhZ}=~TNoSZkHhnG9Nc;& zXh+$l8|@ag!(0l5@t$)I&&l|UPhGqvqV!ojE4$25yrXIP!us>)&;P!sV@>BG9B!Xy z3-oM0W?NHKgz+SPi=%v8sAsbyBL0-IIni^(QGBpTbv>He(e!SQ-y zbvBH>zw4t+JIAEKynha6sy-#dGzA z$B?Iy7dPLB*p!myD+@DGB+OJxUO<7$!WKcO_-yF?9chvQ1HXf(Twkk_aml{H@S4nu zVsA)%*&rITP}fWO(!<7=&aH6qgbjo$^M@n#^<{&iaqHP`W*_o*tsf3^*V;r?!oY(m z3;9=^$Cp%njaIRpwCpP07VVpb&Y|KQa(fm77cBcUUpha5OIeS{@pos~<;WSyaaa9R zNBpB|Xwh_(fSslQS+X5BG-^@WFtJ*6g38YBJJ0$0pE7k;ufH0w5`6T+A=L9rSgfLv@uaOD46j)+O}y$NLg4N?lxRgdVEy5 zBgC`I)2bHu_amE+h0WZP-v4uVdKs!v>Wz;WdKSj-67M-9~fku7K&#P0QR-w;)3F zISDhzjZLC)s2Iy8tY4v&4pb=C*8yc28dLAP8dt17??r1@a3B9Q$>I4E(ch^Jq47}; z%q#;Zy=F@kj%GpBk`K!nDG4I@a%H@evJNZK6{&E^SNe@u3H@0TTHmO zqwD5Th~}-XflH&&><*e99dhFK2Kv*F{=_@up%<2j2N+`>{Z@4;tS7J!kZ-<#I0FY7 zguo}m-U#gZ!-Rjrgk$|{m9_|v+)d107`-M;I^Iv>jG3ks{Dlibb1RkjY9yvk?eSHg zvlr=_zbRCqLjxH4O-- zUs7W<3K{;-_0ndDnMsrwYnTX!XD4R4=B4oa5^v$nv_n#l*DEk(7xG44f>pOJ1$FLRuXEvcV- zX>7#Cde1L=>x+MERz93`%?>XMJM$RUDHW%ARn)NlqRqGN)LkPs-ex8rfc-WO^n~q( ztho#>KG_qth0^)gTNg^-%TtHtR!H6FXEB=1?TiqR;Q1@bk+$FSo0)^C=IFmQ|MJOY zVQy$552g537M987DK89(i`aNehsjFE4m>aZA*Ab$%|C?tZ2QUFXCg{yEJHyfN-o2V zHG(8~e#IiI_(VSkqF)#D&9Ca~Y3cDNbxior5L^*cU^;umL%Yx_@#Qn?5n7>K6g-};m z84IO-2(ld<2;*JMA8BwcdjKmG^A@;$yayR!Q0NfiFO4HDC#q#cDUILopMKcnfT8@W zBlr(@i>;XzY!Zxf_eM2EWt&1C+iY+es%WEVjQt3DV50}-JKRNJ;Lb1%iqzCm?uMh$ z;&KE16QX7*!ZZ!LpFrArZmY(BZxzoWWM=SlM8Z6(k;<_t+XJ$qVM&=()$A!aBh$Q|X^n>Hl5J>8szRh=7Vwy!0SFC-r4R zfy5Vh*c)h7M9vZ9;TV_(D@934QE#Ov0g?L+P1hjk@^yk5^HL=qrNmdTI5xD*>YWYZ zoU3^;r8$Yg)w~JQ6!tQ$|5D)Hc?d;7X!SkxB?;*Jf7s7PFG6XH_p#SCn~d%Ue<>Su z@?YrVt)Lsec*Q0Zsos3x3-ovl=yPBlYkc?!ZG&2-lM9ze7UIP@%zPP&%j3h>)h(Jd zSSlIu@%US?YD4*a_h7Kl;_2A(VSiGu4M#+t-M9F>%{{dEd=t-U&);*#m=D7y=wA_w z7QymmVeu$o%uoIIKX6l7*pgLh-uED$d_cXI>dPC(YCaqDQ}Lk7As-SO@yPauXBFlv zhzl|zum6$@@4*}gYaQA>tnP}^TN`}Oq7zJ-81s|Ir1WqyvZN#JZ^z~zo0i4*Yl%1- zu_y<2@}=UB*aySI*BE}xZK$SWP%W>??!x1NS1nwQFTBg0xW}Zi4`du^r;D3bh4`j| z^7}7GGtXyg+WrN4ujg+}e;1sXfI4dZwIqt((7+l0gBEs_{uJlg5m7=f%}c+DC^5+# zwuveTYWr z>r1c|#+@)c(Q|TBD**Eu_(j>Ew;}CF+l#%Ic+X*MBVhx}94cw(z57t=!3#JsqCm_Z zl{O7&OTF`Q%=rj^aHvaxrCsdU@PJcoMz5z00@~QYIy=*HEHfO+F;$A;_MlZ*d7m(z zA4UFvLiFF5gOF$v-{9f)UE~Dtznup4LcTOS}lAk|KTC_5d05Cn~M_CEDsq) zmLLb@_Y>wt^tNTHRVXyY-N@tklU6_Z7re(8f~GY+Fsi?OiwFJjD7@0$j0R5hJ74$D z8}$cjfB4WZM^O=L+b|1(r%~Ha0D%mPkQB3z>&>swPf<2=FPH5w%0{!8N3^o-lW8@D zew2v@zLs>Qtw&qn{JJaV-R5^NGJM5+j~oo~VlQxFdDu)83|ZYx7tiL6x1m{iqq2Bw zE7J=D^xx{)?w(-|MTwB;X^J&F&6_E+;gEh1ikd_Qda}6(o_nqMaIxtD-i=zOLB5}<&i|s8S>)QC0HL)~kh4WAahczGDgYwZUpsn|hcZ3{w^gllPP=e>@C5UDkXY7Pie%G z-(eW?y@^qxar_UQuTkuc)|@LostZ{^tZ6wqHEfF&E)C1@mv*qA{&oU$1@jIyh_t8q zPnU*X6_0=!O#@4;VGcxL;R%nxjZp0#6n)DhZ&gLlL(yf}2tl)2Mel;5AAi*xg>F|t zHZp&QqxTQ{@R5#&JJK8hvzja5CuS{ZPuM!p6rfdqvSx!a6)fc`^4xgL9b7bDbH|sx z6}5yaLLEFF>)|dDCB2Y$`PBLaf7Zix5sRY$u@MnbawXlAg{3}-3?PYZ+rJ&yo@nR! zCBgGC-Pse%`XQLt)Y2A0J+=_eVw!hR#G_tZY!Q!axws~-_}ndyvdHrk`xiySgR$jX zQbV3h8PL#fwnr`EJwKy7cC$6LEPF+(iHF%X^=N{)@mQ{+2AJ9Y9OE5L>rT7`I$*j! zBWbK?JzL|7|HN#q5al0Qdd5-qWKq-(4OQBXyl2XL1THJ-PqQR!#3i6SkoCd-WpwpU>-4HQheE6@Gi_? z<30PeUfnQXAbat6M)snwG|XeDHF|Fs_UssrU71pQmdz7T>HTh@^jZ6=4-pT@)0HT(Q-p$zVXGO zQ*s>V_FO~d8^NtYqf)DND8*2ZsY-!7dCS1Tdk<(xv+9$E5)71{%6e#7V}>@=({2t$ z#!|IwxoVr0QfMc~uvCtwicQqi9{pR&XBMMfX~zCOGBRI6-TgN7eW=;+LF5Y6;c??( zfLr3rTHRSedq`pNxQrruz~NiX!)QC&Sr&FPNayi8xu0Jv`fosv<}m2De0o5G#xYUR z(D?Vmtnm~sqp2}Q;T2A+6sRl<`vII2b+GDVmVeA9iiV5(+L7k>(Kb|#Z>buGP*bUf z-Ry*FsFFk7uL!0nEM@nQ%C4BQLz(FsH`aOlar16bHG)(<0#*Lhbx2KnJh4=Nl{_97 zq#mkL`zAm-?mhge_uYzO#bZSnZaSd@ona01qB9JJK^i(k;>X+>KEQmLI>W!w8F+5O zJ;LmZ0TR=b^2p9g>>$LdKelAxDv=Q2zT*4rFKDN(cLb(EZUa?rU*ICSk^UnTy2>NJ z#I6{+;JZ+VjQ9sn?5z#`%TacD#5H-0mNcgOyZ zQp$(b7v0jVxw{6XO~nRAjCaT4^9W9(_TYkhSL+|~u@)(=s`HlA;!_C8Jo>K1y^ev+>(kv#sokSs*plmC) z&@keuq2;8%d^DaUc)p^!*htR~X#3Oi+eQkmn3L15Dz%}rBx(W(jGpE1M>&xTjS%70 zB%4_NLR1Z3U*#wKa`1hos%gQUNDF+Bv|sTGx=TXY`0#|XW(d5Ao^(XIyQ1uD3y4Kw zfJ!BmPky*UAfOrK^JSY<*Sx8}Pd@zuMJ0qfG zD#>_gMnaZIQC+x%rpRUz+72th2@t+b3CAJkVd@|B)i6pH@M@S|412a1u0f1qPypM;&h2b>=3i3HEi zh=*Kgar0Gf)QHj(Y{nz;Sel^phWRV9!Z3wF91vZ3LT3ZZqd|t9s=WykWnmsv3+u53 z=RR!)a?ue8S{~{NBLi`Kfy2!uH&%1C(p~IypJL^hRfA+S7Plk!`xVBqzM9CwFgRh7 z>Nn`mq0D8ri)N|1QFBo zb)x4Sro0HW<~|I-3H0s4!$XShy|uo%dojBGzw1SgeV)W#gBb5xpl~Im24+}P*2oMK zP>}{RWaFW^pCZjp^KO`0nW4}9yh4kdvop{PKOwa>jY~yl6Q}ylA-p#9C$q(N_JNWG z$lN@SP6#J!kIw=x#Yj$b&32}5Ckpv34_vYTrNeoA!ClUCz;e17a36CPeB4)vl3+*D z6ZY^@ycnKFVgD)Q6|-k!XgG?E_J>(_niF@h>+q7gAL`>MPmF3Cw;tYRY(SH%PtiO( zF(t+eDZ63LgAQ11$yz92cgCtZm*nUVb?N=$qx2DKTs38r8IRZyYd)|Gt%f>dvvJGD z9PsAfV5o=G0&Gy}f52-gb04e&zT|oEWno`=q{;5sN}hWyR5mo(20S!7LmpMxNJdqj z`_Xpx+zwcHWjT=yPRovkS3lK5pn&Eh=>K@}j{M>_($Hqsb3ky%m|r3WLtP%XQDl)) z)$vo-LJEdKf#s$%kcf&rcniL;qsp57&;tD9XEy$2D4*|pjAC&0zDiRvRzO&>lBQcU zO?GJVok25k*B`CQaNIu=iafiGt8{n=QjezvI(p*f>b{zWgYd7oNpX{XrB>5Fxdgel zr^jH=-43sIGPeU(`Q+1~_~bfl&@U%|^o{^|)wU{cjB+VF!uM3ojde7U6M)c_o6CHXnig{s-z0pWym? z|F`O|QvbK>|GTO`eByt+{%BCH|EB+~`m5Cc?fRcl^+#{}AFh9^m@{zx=MY0Nwp|_E z`DxvB0sp7e|J}-u{f#SsH6l*q{ht3B|EJzc{r`jit@c-`|GOp_{i|w!jG$Wk%l8Kq z1ok2b)}mk(1pkl5zt&j3qV~_e&$Ykm0uDgMplRO);~%GPTzlGk`>&(l7888*Y;Sefnfd^FOt&2NeS1M&YS>}#R8coFf?!?=ztJ?f5#SF_ZIv0_G- z=-${O5vBC&T8s&`xLA{6|N0W+gH0PB5YH4?G3KJmjD6 zVgTR#K2IB#Vr@*Xd~En^z#4^LqnXN7%no$Ba^MnnbtG-W2q*CC- z#KHGxV}Gab6P~0^gyIu6Ga?|*&o*$LEwDti$$LL7-%B{(rgIL{2NxJsJz;z2U@Ex* z(_v~b+NQJ7{0Co*qGWvzY=J!{SS7*q4TZ@?VH(FDntjqD$wu?Dwm91jE&b4)Ad}L* z&;?bmV=3?SgqinsG^%r<5eq4*UA(8&C!pf-{Q)lKB~U(L64uvPrNcc+Ab}|uOggUk z>}_-Ai^}gdu*$ng<fx14b zl)d~Rm8m+NDoaxva}iRkN88u?^6&?Ovy|X#5TsWx^zSQKtHDWr*&&ccJU&hP$S{jp zgP{u3r4Ge^h7>k)czV>_i&}1_*&=F4uk^QOR^Z(po@~Su_tBkU&*4g=qxn>0sfP6x zTLGWjgqMX~F&iIzU1W|xwdhsl@UW&J*mzoeEIt>4*x>ma#hTOI6Zw5%N9=1JE&j9V zoN2{>*SqmC93t7f@YQ%vEk`W>`V%`geQrd3QTse};OZ)}PE!;Pv)N4eOdP@=y53vp z8u4)6L!PTjpxm4ZpR{x#X>TNjqN~_&`>Ws9lyq!Rve4hZeD{FW%I|w(YhyeRb1waP z8GK&HAEALY{69E8EI`8`8vfrIAA0}9 zG(McMdOiI5AvoJV#)rF=d~?WG|7HoBX(Ya)^VzVEp4Y`j)Y*X_TXT6Gyg}KVsZ` zu8pJXZbwMX|KjaU;G-y$#{UEo2nY_WL{Slg1_ebWC`y1x0trkoD5oGE$SEj?!VHIU z1TqnN$5A}jTTypi4|F{i8PpIC3Fvx&7vO=%uJ$-0-Ux{KeXE}Co=%AS`uiU*A5Hgk zSJhKbRXz38Q_s=(CTbK2u9sCWvXQ5^*~oJ`&VY?Pw8H-M*tEi%xZvlG2HX^TKV3qw z2Ae@f>+NbHTGo@T77sM{$inJ989Ep(46LfvWo4Oxe(US^e;#}`@QoC{;>*Sg-pB#R zJEZ(qo%y@0%G$XLW%1y=Sl`-$S{B34nX!1Ed?R|!6|s!w1$uXBaqQZ3>p+q9dXe=` zsmuV^+Fv{m-!h^5N~k1nTbZ@5OqLPX*yrPutX;)cLf83zV?RZWbj4<9T&Z<>-Us3x z>r6x~*BpQHOLV_2uA}*@@Fz?Z^I$C2itL(iQrXSog6o$B^8;M@j>`owY7d zc^i0`Qy>!MrszT@{?v|Hfp<{WQvAq%yLX4>eUNPH_;HrE5rv$%RxHN8085pEuHZ_) zI*b_Zw(7+PE+F_=_+DVub{L*2lGTh-_k=c(bxtiTeya_iUsO|)MxAPlXaRDy_fcJ=6q>*{Yh;@Vsykw>@Wc+! zWjmy1)}{QPk(*lh=FD5zFKX1y%Cm`5l-(OHlJ}4y`DBs1!snS!b zhwO7Unl-xd>nJL(TGo+hw|^5nT(tTa9$uDQYWg+ETm(Imh1@-jf*=_52xjj_bTq8P zMO4fq$QmP+Py8CCY!^V0l$#!{sQn~VnkhZ8jxmPbFI)_R+4mWG=L$V%?~KkcxB={L z44O=Sf5?tneJo{+PJHm2KmV%p;`jjHPDDZtwU~zhCp59QpkmF72c)5$@2wr=b((!I z7HyO>0%wwux5ZmTae^!Of$F5Zl^fRf@MW~Cwz_}{VHBjpytQeKq;%W{vT7w-+HA(d z1)X_sho0uiDxf7XXIjx%dGs)k=6jFMmq*KOX-|$li+^p^K=nd%nQ1D}8Vwi6>Mn@X zO4Wc*r9$nUDhX?{4d5p8+{{|--Cnm|;e2XE7;WqTwmQR>Q!D#FPdWi&@>SlvhSuzd zo^l_(V9&-6IC#;_eOIxd){SQgqMHHe#XD4oBp8yeUnl!d^_d%=3D>YG0>-mRi7a`&2A0`G)-nHm~}tWLtJb`%moDQknUF(?{Nvrj^=_-}lq znYxP08}jsW5kG!Cp%c@@y8K8o?3Uotwvx6bmXO03Db7}5aiC==>=*Q_70BUTt5G>+MMvT3OhX*o0ZN3t2$`C$~KPV zQK|M5E2=G@-vqc4)~f~cw_0z8lZ0F2-wdBcZ8FzEGS*$@QILu^bzuLMc7uUutv&)q zjM{0U#lqB1>l=~R=k(kX2GB=VG6k&zD-W;`OOo-?xk^6!foh*!oJ=i@yb9eyc;XfU zPuBnq?l`00P9At`z}1S9x7M+rA_KpTvZ%v}9{i`;4N(Uld9nA_27af3&-LJ+X$ao_ z0K1^Yg=kISzRT1py!D4S)0R@(<;g}~vFsHFXFcV`yLhnu1U8e3C)OKd ziEvN1D+!Eq7Rf-B5%T$u8y6J0k5|FyY2QrZ1>HKf6gt|_k3Jwx+WwIqMD`3>yTV^X z;irWRH4Aa1;@0tJ1^<=@eX!&p;fDOH$$W)J@=x?EV{N5}y(e%!)OZOMr^cUGX^n4# zd$IR_yA=SioPIg^u!r}I!5e7so-lYJf^Wh5z=szEFL64pjP_ZteS`rcq$?GV`5pJ_ zF>|;v9<#{$S70d79qc`J+&o(4#xOjeepG(2uza|m)q4?BnpGmd#ROI+ib0(`^_heDk?8Vxm5T8j1tN9px*J8roxota&fD$DaYP4;8`XEz*KGPsXI= zUV6dU?Qd@JqCcE*;_icW#vg^{wK+rxT$<^EN8=?^{wYshojB)D!PRNGX|i=B&Ds}y z+phQ|-Ha4z$)sS01tOpGO7`|V!4>;@m3AW?Ks)8C`@e`9Xxp-{qPcnUlmhG`M6iFM z0-7gVA36bj+{Bx>cos$v>id59TVdcYn*z72E9~(`sIMfX1#BK9<6^I6))$9@9VEfgotIb5o`^XaoSCxR`o3cQud_N^Tg!U~P7m*9_32AYrB6h<#SdTmt%f7{hQ!<$VKsgX;1|&-&F4iD%9XS7D&unAYkpQu=nKMqtzApE;4jU+9V=ubQ@WKfPYEM*I`zzZFMIJGl~)Nbxmr(0Usv z(-RJ|K8cS@q1qFryR>#e;^Idk5%J;?=vao%35HIEq4PX>Q0XTw@e#>^NTR>#Cn)hE z1Gy8PSt1^v;zHWLNROk1ee*Xk+L*>Ik`orQI~csLXxQ<=E|OMnEr^N#NcWps@5Vwm zas?xXxvJED_>PAuLDruzG^?*m*&Oh*)TMHFsLdJ?qSLnwLC5qhbNtUaMz!HA$=9(KNd5T zFpg@w5ti3qr~h5`c{dlR9WYtEE(lBXz%m(huJr6Xg_=;wNKtXeYhbQ3n9st8n88;r zANJ+oNd-p1#p=d9PMo94^zX-#_B}kXs%fM;G^lqoHX4VUCC?)+=-n(--222(VUVWH z0!?x>yMy`fhdQ$+ll_x5b8^bBC8j+lSg|KPnFV}#kDPFeO$bDy;ryEL9&xY5X=$4p zr6o7{@mIMiW_zIk3`F`outGH=4hhKZ>!9QpaYf#w;UcL}@q2 ztyGd@c_mQ!D4>B=P?aXz9G&&f31#(e7O0e7NIlu|z^XQlt(wTT5UUtjk-9%8w7YzN zvk-1$dz2Zf>WtXUdS=bm4zDi$j5BM>L$1%ZK8!`rlq4cuG4FMW2i^8GCXm4D&grs9l%xR7X9E#*cn<-HEy zE&kLgNPb&7^Pn{4TBFlGVt8$Xx=>AX!GkVumXo zq8+i&)!GnRfg5UsRidkVq>03`f2IC3rq`7#-3oWD%NVeC(3IE6qD(zE&GCopP#gqC z8v-vH0`F-1Stl!_F7Xi?$K*>VWJ)U)A$MI#* zqaSK$UzGbAfpQxyh|FYNb1kbpVg-x2wQnH$!kj7>V!bt)<9I~1knDWsPsu2~Om85@ zz9~?-o)@|eHE&zq&$5lFm3`B_=50-2+1I=kIm^1EUEBHvG6WB_eik*Mm}uY>y(sWGA|@kHnfj z{L&p%io!633&efLvj6dn)VTXTV+t~d z@{&)t191+Cj%o$Pt^__1H<)QZAg-95U!oOX{xDD}jYJlW@!S}`a|{%AQElg&!nbju z@H0)}KWhA{hWxy*@YktW;_qrx5o#vNxA(13=5I0hKm0?P_u-4<(H079OzKzbzU(yU=#MXoMK3d;#VX@`nM_nLM_SF=U_Pf_T~(lE*<(y183@b$bTs zCl-Kdzfa3{;#(Pyw_K_%HG9#ML~tf;s}+qNckW20m|0r0%PzhL@?5*9frnaG?p8=1 zg>vmj?>8@>R=Ww@UM;;TM>OpvAallVc4OWpcXD0`VI*e|6`VPv1BY)8y4xAt>byqu zwA~k`5&%qL6w*GN@&A1k;f((=a(9$(_l7Lg{xcm$H`u4ch-3tsIicXB<)VKY55{{7 z%>n`a@RaKR5zI&g^ezH&Md3XY0`Z7>mewBW;ok^$SU_6do>&B;f)jErjWiP_$#M%A_`ecVD4;Li|D;97T z;!(nxJC)}f|Evwt@a#=@inUY+HLdulaJMZkFRJ8^B$zu=D5^bI;3VG^>(1#0Y4M}b zCTq3rCe(s6-+*YR5F-TQ7yO-ph^=Ys0DeS02yFoTfl3CWg9Pv+0HIRSj7*h|y%fuS zMqrzf@h(ECZkDU1eT=j$Cut>>dXcw|85YU$sF~LcqfHQ32-PSVo@xm@P)a%V;z{NZ zc^_@Xa5flQqKw6|V*)H-ywCH$m!TlUZg<}gK}$2emTy!TA!}-&TA* z+1IbK-9qg8XX#W23l|Obq@irdVaeeT)(@N~djz#-?%{X|6-VLxGjtv!P#Kd-RBW8A z(?^9=gz!){rsg<(YB3??-Urn7HmXcoh3V$gPk`88#flqJAgr9)3?hJ zb{Fs6Zc+z@X&0}i=vw(@#xMF4`8(}pA|4yfhYHJkq=kA3orhJP;WO-OZ#SGjFn2le`VE*%fgU&^iKTE|w}Vk@yo?k7Cg1>k5@mVM_iAKZ#(GCS2(-=TT? z5>W*jl4T6WM8a7_2EyY8`*_^Ddds^00&lVSYYf_7Y$W&ZJP<3znj^~O=`*HI2t=;Z zmV3b7Kzn7Y8|$M2H;&|4Y;soF_}!8qwQf^^>f}@bZ^~pJ$~R?%nGXs{rn@+wnMTWQ zEmSKzwsuRquXrURw%8e)Z0_g`8rIW6)=6QEXk=}oVSZJZ#_5zFv1@9#oew9a)+bWR zSG=T?J(&m94&v^LDUm?PrChvDOI(kIL}5W@<|Ix#G18;N11Y+npp zm*n9N$+<5VsS+kF$2V|zuPUU2Dm7eC7M+^gRSpiyp`t~xvdpy7{MQ#mnIT|>%EB$m3j|Nxahx#UlTQxDXi~cvfm7s~7SBvUX|rVrPPw=njJZ zOq|M{y^ivioBRPME#bTuf1QP8iEFqm_i_@hLFkYYcxrPb@og=yB z+yy${kh@&3x8**l*L!pSq1Q)ptM&S1?pu0&KKB#7zLfhdSD(M&xm%7M>FkjKL7WUTL_p))f~d13Q7-)D%iDDKd}v zwN~ia8)IVEiD+K1VqHe;dS~?it!c-4E^gg+JjAr)S82yI+hE0x#^z|=&I$W5Rk9{L z-h(t;M2S{v8a!>6t!G(v{KH5?cRzGZDy3pqW9>h7b^a|lzBZV?D>(jVyU7hE&K<%U zPEr7Y$n6vqZB8=j5FeDV*0FCh;U4L`E*KkVir6YHh*_eFouo*1+Gnb7rT;Yw@Y-vj z@-wN#;*DYpUYkWh_6l4IcGgq+pIE_*sDq$~4Vt{#6eZYaD`TO+J$91-%$H;K+?}7| ziTL>q8ZfqBf{P=%lm)~+oSmExVos517$ycTL+ka_+4#Is`f%s}yZZKnD_YL46r5-( z=D(iy!ull^%l;%goUwj5sB4+Vjo901NQ^~78I`*i5ziR+;t4D~8DR;k2x|kq27z=f4>>DY$81LI3u9KB^Kv?!t zyV^6-7s`O`2ZYpE7Y`wjzfOX_)=bQVs)q$;|XhS3-V{JTu|FelNJjT4w&$1x^2+6F)PBxb>DbG9$R|6Uafo zW9coq@*fWxDShXRQ0p9Fs7elZ4Q|~pc(IIkg>}K|yw19yZCq-QY*N~}+Pa`kk@aek zwWX-yv&P{gtj&ABSS=ludk}2+Sb3CJz3!`~Q?@PVMgCUwkvWaK;GGrg(o6HJ7pxAh zJ|61r`M=m2(2h`8eo9wGE0htCzILCrm8^dW<2zpAHra?7nxhBN7PFpdiyT4glvE@| zt*cnquC(Hp_C)X*H3Ov2OItWu8{rxum~1M3A=)BRO4Y`1Va2t789R^hX`(1?iL$A4pQ#nw-g(WC9cG%mh1NC4*T+s_Jl3{6#GLI{+D#j|L6+6(0Nw7oH(8&Kf&6@aqhGBLn}S z2Y(IlI?<$?LL9#&lchBDWvsku$7xI4j`_Qu2=HvI_`1sB35G zMV2WIZ(zKRpEP-)tUZYt7iwG|B=3!u;xcVpCESZEVeqiazEnF&=`;G%t6)oCRbh6r zJHzbGX~DIveU8(W&Ra(JyptbvEwkW6RKwY`WON6|Uk5FDyu_1n12EJ9(1XYBrid=@8-2r~S$DJ4 zXH7@&!<+K9v7mns;X=Lf@~|~Bx7QGVm9+~`SN0m@?ly!~hr9L-aCf5NPW_9KGh)6Q zYv3Tjx0JEuYxZcEO7Sn6zgXfedmEM_(^ugZ9MM8!jKC-6ZHwE4%-iMKQd-OXqkvM= zqj?|3Deh7r`x!9~YuCkz$(B^x;Hx^Uw>)~O^2RBV@R3#cbuD3WrBDr*Xpbkw6e{OT zr|rf-o`Wa`QidD|nqvi@<9LX7SVLOKKrzebpuiK<5frb=FIa6H*y)<}T&F z3VO+>ynGV%yla{A*p2E!y~2=~3JJ4d(OQI7b5|e2-fDbp|uTX*b<@L*|rClZZ z09eOfhJ^-L-?MH7nJ)Po4X2cXRb;GGn3F+BE1Nem3{&@U0j|GMmh~}jPFaK-hi0wp zj48271Euzm+-MBYbSlO4qW#H$8WJmD;`sdVYUM8vXc?YM0<^~){5%p z#rdDi8IK&7{#C~140iX}cR>?dVg9e2QrsMQ(PgxOLaA#)^v?Rnr7D`Y#dKuc_A{>z zUns<#9Km8r^3DxMR67*%E0z(uirlpTi2Fspys}pyLl3zatM zYbWDBm(U#$(l2>HYavrXnw78zLom@>X_yCR7($!50QiiUj%GGS3i=TM^7M0_+DC%v zu@bM~?N7f+8!eRaLrR|&B6Mf9vgFrTdttq;rW9COi|oq#ETj>JPGvoToW#TW6-XJ; zKFn+1I&-f*SnmnkBz0DSN}2N}F^D$#gv>|w1)!s$QqFxY=C(9e+=OWGmB5mXP}BUe zE?{RpoYdPZCCa6!!-AE7s*g~kvfp*+V8%aWAc6l1#H*Xh4TsdbM|j8Vzi z`=JJbTMdDK!#Mc&Ati5t?+y?BM$i>o&nz};s77O1l>7vUvgT6EIsT4^afyfVEfO|e zXZHuRQD@HuWkmc-U`gl5S7yxK7`qcV7ilqDo_z$ZMwJ^-@bt(JT`BjmI3#Af7d?UicS z4=sj{uA-%R*b^3tPKe{hmrCm1i5KgS4?TY#>Motyr9vcH@I^kOBC+^&RYj=UzMiHa z8V-BYSTxRZ-WbbI5^dO@QKHT)-}!+|Lw%LGVo-ks6DUhgt=xsTh1p1D^EbgI<$>TMTkfx z-5M+S?W1VF>PPvO$6tj^z14(sFk!QR(5%fr8Cw<# zX~4z{AtQ0F2v!{ADR>8Y*@>6p4%grds>lZF%ljxvmZx#L_}%h|_0A8M zpW&l$w#V`lVA>LFf>HwpzOL7D2m zS>g6Ks-&Z@_@Xpn?cY^0ZjzA!7dp+)CRwu4P}aFMa@M&Ogzk5&);`ZMr_X^k);fjc zyW8vEhB4H))-8y`vDS~JGnhw|Kse8U&7O`e@|_@KoMq9DRoyueL3$dTLzP&wz)zu} zI}7Gy5GFSLGX^BDK9*~&<)7dVgFEJB(GeZMnx>#uW${N;n?FwqAvoltDMn9wQ5hUVNVxa^;_;kTt@j$Km^Py%HuF6TID& zaJrVzy}zOs6pF95)>F!=Fr^e4X09}#jSVv)17p(0Hv#5#+l{2*v@kel8=N<}P=U4v z$5h~AKh7K1fg_(^bpwlow0CNLhe52qR~RA$w>t_$PXlPbhPHg;Q@Vm8@$w`nZmTZJ zst&V1d0S|n#02^iy1{z7{+v>5LT7peAR{o*T} zH_~4oE8P%N`qh|$*23u%_io>?T)wea>qGJ2?GudC_AL&QlKu<4X?TAae%v3TU%!54 zRv(Q@#{7^md&R;Q>7gza3p3I~d=}V07U!T{5wVQMok{x$ziA^hLT!Z`kSR)ERB>I4 z^l;ngtX37jH3}@njp^Hes!s@yjrPl6R2?1??U%*MR24?W{&90|m2(C&a4h1Ys^pKf z{;M%&0Cgr-&Xdlt7pkkkQi(`N5$dtECQrmL^E@?cvkt$D6RB)an3-b$m8`40Vf z3!iQos{$k_X;HDDO?oJsbvEgtGtBtGix;Bdwu^tO?UR-^yPL2SQ_~K;^CARmlW)s> z=9el07A-#GC}hLl4;&&dig6@Y`tw1~wC(R@m03dMMd*yw5cs*w3O%4t8Tqq|>Q=e8 zK1WtPxT}&sG*mJCA{>ZZr)V%94l~}6pU2SPiM?J4mz9b}OQUVb25GBI!=8a)56W!3 zV&(3_!0oSa_`&YIABpZI7wY6;Z227S#-P*Ln7kpB3&BdhW8^F+PP4UO`O7Mpwe|#b zC*u!|rz*ZK^hHsr9J;BOWJH~MX>F~BYWS;EWen1n>(WJPYr`Dl+hw0^jBgwZlCpwP zRxe2fzqF5^<5uu?{2%Gb-MApx_sn^-)Xte`=tbQM)vBd^D*qpO;~TY~ z1NTpRK0^2FH*px!_xabv0eJQOmeBi%%54h+EQL$-fj_4oa{PkqM+u$C`{Tch1Zv4)2S!|manWB?8OsqWR&k`Td^8CLK%&7bO#k%~8|2nhs z=ecL>-6S7#3TIUQ945{6b=I0+Xz6&}1Jc`zknn^-Jz*GzI1j<$NAP=re~u6TR2Hj) z98x-vSbZo7PreQvFW*N-xt$bmp9tUeoUgB3KYXsp@OVM8w%KJ=?+|u<@`HXI_)C!T ze}hl?_i@^;>a05Yul{`;iaDeLuG~ES(Pjb`Jb6$aGaRgoB!&yz&yg76j#9lGmB|?r z3qaEwrO_iDVo!VzI?=?COx4>fPwI0RndnfH#LZK&jQaA|_lfri0b%KP@J|$u+ABw| zA1b~&csl+(Pxo9g`W&|KG!lS}8*%?NOy5G^`+)inGpSomV|)Qla6?vE{^#M(FDBfh z_es3ZJExxY&u1a4+al2v$K<@{v=_BZH(x35t)CNrrP=;&eNL77oFw%LYU0gI**BW? zNA!m{jlGT!H306lle+RxOuFiKdaN`LXF<-?xvW=G4k34;q;EBf)4NLh=IqO+6vh^*4!LG%a8MM?PrC z$+AX5QbLqhH-EKOvZD5#6k7hTCYs4!+rR9B0p!EZy21V<6tJ=bp4O$@04NEYvcBmxV?ihr7YraA);}|H!6UOH&kq{ zg->7oC4S^wD3@&-*ZLAxj?wzphc6~N4qceXZu_4&bMx>hD263DhM9>L)HhP4}Q8Kv~<9miV)X>`#;Lquj*REYSOD#4xJ# zwr;(&Ue`vko?*YNHi{KWB@fOSR8-zOH!aj(0>AfY8+??pr?S z3G-3JV&9Bz_%s-3OR1tEiLmnVCpqeT>i{L6CixTcX_{K>M0?^R z2XanoFUBot4+coB@TaC<7Eo#SGK>MvU^GEvb^2xTp8D~6X#%PG`p#2h-nQcFQDx(s z)^~43oSzw3`Z!JbyjVtgS*&0ri>Ip6$mDBJj$7+3BGx{7}|-2NFG^~$R_*w`&H5ZMTHc|pfM z9gy_u)rS#PXGdE$J-0*J>I+AV7_m38Zr|~pS*!C_^i2KQ1}T{@?ag=~G+JDS-SGvP zvfM6%a0JabxHM2%hL~Y);vLG|N$?n-m}1VJhRG(zt5RJ3XXhK(SGkC$#{lkTPR|9r zykIHpq=gzY545(qRBkgakxB$A`zfJvo?(^jl&}w{p3ZmgV%eXvs1pI*LNYCWzRLsJ zyK4aZ5dSTjK@4g!f}CYvLk}Jt-x%&6HJIPRQGBjoirJBqWl>XkX-#nrQ#jhP9)O7S zK9^|k)6UtEp`*}>jiZdW)17!Jdy}q;1FH(&DIiH`ZIbUC^tRupWEoNiB0+*nv46ur zA4eopUG8bkr%IW-6UPH6;}!d2JT0yJM=r&*%vknU%>PmC^B_U~IzGaOy9dGPqCY#E zMyY;D?4JVyGm1tO$F9tj(KQLWLF0E@@6s6B>WnC|Q9HLE6elZ9|0!tF2}CnL?!|`| z!s)@-bs4mFD`7RZPlXX(br~8Cw=bg*xVuoggIM+jhP7{%wQ4ok`03CBaJY1HbQopW zmiRB{2vo`96^-$=*Ah8IL9{_rIodwla76yl_@gx$`f4HQbNPDN+!i&3sw+-KCP7_f zT?m^bF?Mp>zl~iEbIK}5m5Bw!Mzmvpr!7uH9|tOFTU2zHy^7vMR+K-*HE}6c=Z&Xe zeaB?2<4kE|vFxizs388cjQC5KUF_-8z%+WZju8S$|6)vDw8Ps!} z;!g3QZV=R&j+LeCSMx8?<6dqr9f2;54dZI_c2{m=-HGjFJ(RD4?|RbCK7?z>sSh!Q z%~U=&3af1?&vojtD#FE?s3e!xS6k9IGAx%csc0VyO+)#kM z6LRE2t zw-u)3EAjOqY1Hz9N?7!!TtX+vM9y&S^G(Prvp8}%7cGa?P2v~$PE_V#kkakuuur3{ zBzKPxkWK)Czv2ho3`&lls!3WddNoG{HbKMIcpr8TA3m2b>FbwnLpOwQb1M zhy(DUvD@%+m2iKg5~tQ~4ebSZHa)x%aSSmqx2XQB;}rrXofXAgD6ThJu&aoZg0xlq zmDmK7_6Fs6A&W2c;7<~=`;4#LPgI9;uY#;@Om}HE0w(!5M0OYeV|Uf@8+~*>Caz0f zZ#Sh9PP`-&6&wm0hLfVlpyGMfSK5vX-3J1lyw67B#uzI(&VnM64&;Sa$ z+7z?`PNdy0bX$`>hhij1lO$n=(@RLNh`KaUXyz(RoER;l$O>g&He#<$NX6#mu*u;Br?PZn)Qb9N|zO6{S_+IWp_?_I6iW?BN6pIukr`5HTkU)MQ|Nnh}VI4W@_*m8>S zFOI_&v-(QvbT=dlBfB|R?o7KqeWO|53W2c@kPZRllR%Dt3$*L`CTfeLes+>_ldpOD z0k5-R_inB4FL7JYjGsuHu4w-CW%NgRyXgdudz*l&)cP>_CnE>>l4r%nP8A0WL&ua> z$Uttlwa0mA&$&bb$ina!WwDMgMlrJ8sq!nB7(7Dy*>U4kiS3=FA-LoXg<$vt?dD85 z*yj$kjeX9f0aDfLU|UxP*>M>m5f-`K!!iK(h%d}+aNBrwU2v4D3I$9VJ zpZg%M)oGKjJ;eV%%}dud3yJB6Fw2=1et%PDkehT{j-13;=l8~o4>0z5CdAuOOD=LE z8=`;e&uS?F>R{Fln~us}B{_#1q+bEZGC;55=i}Qc)OENw3(%7QOf~?IVfNBRzTtzJ z28?=mjY{krY1%1Sn0_lU1&j9dK`+_)BHwRKR5ZB$c~IUK$*vx#DilG;5eH_0upL*= z$b?n5U^Ii|e06Ai(9Igj;$SCh6h?S$WsRq5$mAnFL^S&^Q}y0DMkREw>C<25VzkUe z@aa-P(VcxG#%wSjLJn6O*mq|(q` zQN^#vE&6B0fulkXC7&|#ubUtGFY>PVtq&hX=r4==m@oW{GmBh_pF%0SPi zZ*E{uR2jtY^Pn1~pnUe#V09F+j%^Ir%&md}ogX7b$)KyW$l>z_`Qwkq|UW{U?C=22kQU0i5U7h_-k- zfa04sAtr+m0U^sEJO%=C3cK)jnoBl;cdNBK(1JOSftJ*e{|H3n>{TO#Kx8jO1M1)=){{q`XOuv4MVf%WI?NSJu z%Ek5km3jDoiGPfXKGtAKF4J>Bq7~zSTDrIia#oxe z+O0>ZnT?k(<75=E%wtZs;zO-1JQca>pr;%LocZ<-z48!2f*u~jTVrbTl*i_$^nymt zD}mZzpe6t%f?5d}<|%Q9z1jne*dHP*|NQ)asgLY8dpmTlgwPpo=We$fqLn&B>SQY8 zUlkKBG?hv9iH5-)FnEGt@K}ez7Y&2j?HuJX_z$}m4Eox;o8K(5-c{3Q2;%MQcRomH za{jwzHkiY}Pl~Z$bO0B)rAcvna2+id}54*=co)#rT%O$)hj!wwU zEh4eWhbU^Ba&~zk4@<|tBC`S&)h&Yg-v*XQ64#`XMu?wtR{q})pdkLJdrgWezHZ4% z6S6Hb(8-aa80WOu)GzC|V(Y}3ei{GZiJd*ll-wQMAR$o>t6Fa39wS9Mk3Dn$+f6<>PWnHrhOF#B`)y@ zT(z*+iHPPb9b=M03slOu0-Nd~A*+*2GvmHk_SR)u(l*+#Sp0rpN#b9Vw>k0NV+QUs z2kti?-2K4S{a!9Lm}lU3I`DE%gj<8@0-ulXNVQLuHeE|)Hy$VCkExw*UWVGKhMIh@ zLk4?q`by7%ngiSjJb-^&s#Q7}V5!pUKHwi{7Y^{d5(-m)1H8lmPWXT~rGOs<7$yCP zcyYopd=jX47}TRc#R&^vIjLHjQtTzrNL*=(nq=U=KqRU2FTI@HD1qz&q`eTc7OAJ~ z)os6=k)!Q*a-H@&itIB7v{+cO70T_+!euHL(zs8~X+A_VomY`O?5JtDwwT6zZxhX;V2mueM8G^1N6cb&|Wm za@RFk;_0`P4#pVjOfl3b{UaI=4zkY5wc6U>VEWSVINS1bXicg(n68BdBG2dG{zkQdi6SgOLcSg!W-DU5xnstl;XgCwF! z@~fzPQC9kDZqkaP0d-KM<+J3lUHFZ*inVLaS7F=e8TvolccUX*7ddAMXDPHzAo2~U zoJtO~^?up-w>kGq+@gF$pfonJSy}qtGV49N^)SP9O(61x8r2{i$-P6>uDbJ1KQpYe z4B2PX=i&rv7N(lcnfMp^bkC`H9#cn}|Kax3LHwkasyJ8JE$vL8N}YhEsN&$!fylQ~ z&x(V&fk-yOJt$r>8xI~Z^DtRD|x0bcj%nl-y;Y_>iH$7s1ku~BklYwot zM`8maGI^uUhVo9@kTmKsKFaNdayuw{_9pAO_uBmpYrD9_`v8wr6Mu!E?%&%{p`2cc zW07Mx{gW$ipxea#0_e@J$Fd(Y@SSnY{w;RM%f4UWH`Rf!{JBKB&S!WSgp+@r2gD=% zaK|5NC^W!!mQd*m6^Wb# zyt|-g z@P{UJ8F;)7nAe>QkLS1mb{~gFUpz@!0Q3#}+M!3GG<-Bb#?f)AMZYOK9CX9zIF< zG{~E|#AV9c$>!OA0omvhkHi>Q^rDGhHYn^q{~z zO%p7JP_$ql`Kw{}tn+>Hyxt(skRkx`K{5AY~f$BE>Jk%qp%VRDr$V9tel0v%=CKr38T$!Df|mP{Amr~Z_TP(zO*~! z_r0)T%D>FRKdk}$74`5PJAF2VKgq-2jaAg=|C)OEolW^OQ~1+8{FM#h7uCa0T(A6h zOX1J<@TWC^-?|?DV+LQ^nNxqdL8D^(vUR_s8#6|V%pH~mxi8p4Z@Za$8S2lp3UJpNE`sZ#b{P#Wl zY4!0nsF(9xLLHsm2M6rUxH@&+uY_1fyOF#v^WN@0rQOM|WrRTIF_zAb_-nuPd3=Y* z)VhdL8Is6Z8^^h&0{auaQSv$QSCpjtkL+&X5v6+T)pV`5$)EVvTS%OrS`R#5(C4tt zTQaC4hLNM5D#gm_K)Uyrb=OTeyXxz#RQzNX32^Mh9{V4rUX;%gPB}zN7GZOMnlF#d zdZ+FeK{7rF<|S|4gsXjyaQ}wt4WC=gK*3v23q!*~$G~MM+aB!7Z3A+1`Xj;(xl8dA z%a)mE@vmI)?UP(7*;@r&fj#Q<2UPe(I@K{cC-g?Kfb3$i+Vd=Qx|mgdZv3L_c;O6v zS_gEaWL0Y@bF__YtKQc)?c$^I%X%>yTZQ<*=-0 zUD-cTJSkX0fIarng}TzAq&V8I4Tf-cM5}ilh7jLO^zOzQ-^)lLYRdJ^L+ffKck}0l z<=$CXd+ZJaSoc8Snzryy1!-x8O&0ZIMQ-vXr(cd`=b3`WaUm{eXFChx;&PmhTVgpS z*7T$1tmWbLGp7`0vTL zl$8&EOupTSWQ}}pCv11+8$Z{RFDtjQdm8o+6Ztl4oT}|DsO0}jzU#=g)|YQvpM1B% z4R-n&<)!3X>yuLsCD!z#=5^(Jm%(dZN4|}HoZ3`Q@m%>f#T7XE`91lzpxocF--Ikp z{zLZrMY${A?m8Sd_InXRV5N@OFQ+_Z?_8|az6bW>xjwGD!F2=j{fkMCl0DB9u+x>RypFf`m9!b=YWlU1=gC;gZi@_FUl%W(lHHWcO&0@y*&B0 zqx#pDt9*|Y`4;-P-$o;AK)%24AJY4&{WkUchju>uZEXr_SY6aL!SZxih!&bxgudbV^A%$IIDf@4IFNX!G; z-ycjdg1rnR`bHfa@G9P1>>%bP7svzVxe4h3(F?SH97}5 zI)~z1&pTbuT^3u~Npu)%IeMTlLXc2oy;qgZdp%H0Hnp$oVHWt$FNnuSb{U;xIOBkcj^Z#V^F$ax%vI|r$!cikX|LS8$j*I839Ir$o#62PC8x`)LJs5F3 zr}K@?=0FYUwy(U#Zzn1eNEZ~K7@_S{LbmX#1A zjld?Jz_L;i1o_uaG*5Dnpkn&%Xa|uOcvB)94|Y>Zacpia3m*o?G7j=fj+YLCpT&ma zSLCKu@<^#yX+=#|N&b(4iVeUh)q&A2H2mG6A;IXag%D&Kq%4|2Vw`ml8M&dJk|!oc zedP$}xTjao(XRdkfSAoT=b*?(nr?wJndS{t+K?_{?L=8Gg0ygqY&csk;NM<*mabV1 zM8dz34zo^V?kZgle;l8Yn8*`H?#-@)vsm}_xS9AYNExY*M~f=;L{C7es25l5V1Ehl zL(3&U`JGHIu%5;Xs!jzjG0&4X?d_u8s;}?4sE>$c|HV+)PKkO@e-}OFA}-cw3jYGD zAEl*_PP|BNE)`EhubH9ukkCW_hSt~6GV{%GndnwBz}^R;M3M@S41MNI?yrKH{Q^zU z@yCa>rv@e7c0)<{Eio-DV19P zxvOaJzF>5zD4896WzpVmq0+TEuxE!cO{2V1_3f~XV6;L3V=W(@D<2}`;HPiRF*D$C zKu4jVo!ecTATxo^`R3z2#Ik5Ef5x%K zt9az-;$_8X0d67$hT9*KpEdA(aftCXF1&&MxCm&xVX8mW=Lt0Fv}bg@j|n@cJy3ct zLv1*vJ5yi(a%lp~*M!YkVfJ$p+Uj_dk0EXXr0PM@1ZO&xVl&9>QU+3X>}ve(X0iT&M_%@`Ly_M=<6-I}uVs%5zi5f`{c4xPVn-`&u8 zg9~)#7&=}3bdnyOEz)tp+jpwlK;&|S;eFQ&?0kd$D)OZKe|va0gQsoEf4-!|d;2wA z%ur=A67Y2=k=#){kj~(9`S^&ni$kAknnI6WM1)#4zAyGaQ0#xfoM3c`W53x{uWGmo zbJzLN7S(03zHKnzVLY)HbW;PyYek-gf>h&L{2L3Y`1EGwmXIv$oQDs`?smezqzO_V zb$+Xl+Yy}RkB>?X{5VrzIi|{(=?WJ-7=2!Ag5{f;=kRO}c#_I$CteOCVz=pr&Kev_ zyq!noVwXyCwbPGVFpRyOKtluTwXkygZ=9&{KghhRhkcxjtzD3PE}f-NN%=F<6ngBA z49m4m;UZhv?}=}d9!KIbge2YS5}Y~D*a8@cOyY6X87jaUPU2W9W<$oE&8(x+v1OWT zk$f%W>~drhok<==HR{rOb+aTdlYq2B#MR6a6%D>qINyX=!Ffb&32 z5rd@&G{foft8!#sV9HOkp2E$sC;0daV1xY9v$EV*-5i`%|FrF~G&wWgdCO*vnW zHRVKo<;bqJelw5wh3LRwG369KXv?tdd zqHj%1sTb4EkP*P);&h(JGH^PrQd@0x-Q;r4Yg(MlA=3KNeg<38$>`9P(ad|>v9oD# z?c)97x&qg)i}GB)9LRb7DlbP=fyleUxt=KqyP^$S zE!%KgC4Ug9h^)QptmXB6MCmpS;0PB$-hO+U=Zt<5obgv>>9+jfy8p0IZ3N%xTpw8a z17Z=S4m(}jSkTgLIuH>`GzoUx%8soX`#CDh;XTOW#yjQJZbGKy$(&cBi(E}lZq-Pb zMr@a5M*uJGH9jj4yAtTK=o8{R*A|Hm60lX8zFXURNm}@2yDj!a2@=1cwb9qWP^^0e zT9XEI5>jvt@M*ds_3+6v-1%X1#^YryQUvvQP;pPKKrt?Ip^Jd|XPGHhRU1@0CuQaOT?S0HPa?4IGk$yOqz1YC^ z#G%6E;a*`o#Je3@SOi&Vtx65vP&$6_0v_Iq2Jn`%Ub~esZi7!ViRUTDLtz{)4$RP6 zPlcNC=kYc!MPLU(Gx7F8Y1IZ_>@%JXBXp#(2XqX@N2IJD`2sv0s{Cf3n7qe}*Xdf@ z%ByO7K62tPwwIW~!hycX1uJa>yo+QHJXNifDSMuZ`};C}0(W{O_~(OI)4~>xmMeN| z%$Txo(@#tP+T7rWy6a08j)^tD+lk8Zl^fy03kaQ$b@72xo;ub5i+AC>(P%fsgnT|w zPP-fVRL4sZj%$8h=@E_jJ>vOmOAhNDX()Wl1qyc=3f~i^sDvd->g&qbPSU$7k;EnF z$!>uwX?XnWh_?Zua=c3*GM+lH^SdN(ci!$&>x;yf$P7Cxi@j)+qDoUCOPu@@mCLJU zgLZSkYWlSzpPX(NA~|$ZI!$#mkPC4N(0&csPjpuG6j>~r*$XoIp+1g=j24i0N4&>i z_V65>s{H^R?>b}+(_elbK5qf;*}W)6WJf9b?H8eg5&bR7biBi!#GNn zx*Wjh7)2XrCs?)A%5N0T$2-e;j_#BHa^j+Es3UgL{0uGWIjw2;5J44MF0p=zAL%PE zlk(&&Oz`b|gSSl_tmKf0e+}-{;11?%nX+`=j-TM7+ne#7bW&uOS6!OI-foRgSlHZFV6*N0r=Tb1 zJv4f*$!IH5HJf>|ih51W&6@v?#9eKqk9ku-`P!L)%0CmUC_Q)m{-lcE(nHMyYir#3 zxavt2wT(lE@waK%)(D5Mwxs%7+?JW5sMC&|&zMO5<2B0Rkj+QPcGe8Ykmf<(s_Ar{ zMKW_jRWuk$Hc0^0(8SR3S7L9qL>Tsmcv-3wB$=Lp#ACcb;=keZ<9~N$jpF_z-N&Jm zL9a^j-ei#Klg72Wqa=_+Px|JDGBJK~>r+f_{S?k(N&dsgo%Knk%x$=#_^nc=Mn1DB zF?oa7)4nOn)1Tl;=QT9{{*aHkAfPPtmFJ0(7?<+Fcf<|e5&})d6D7rE@vz&-S{Xcf zo;Y5N@(h}aa+T7D*u-Ip3;*rLU-i#>`lI^#ak1ztORCn8zW&7X|Dvz*$*#V-&~6-k zNz43*7wGGJ(O1G(weHe+{!x8p(IV^VYXq*gj=r)|`kG?OJOS4gf5+3;1{gE?DmCwi zx^h>2$#P9s?so!;Yp<`b)&}ocqTrOiWXY(Dce%k!>1z>I)u6tb{I0%?KKuXY_w;Gr zU;o8_d5;HqS$G&0;-#R0N9Xw!PE@Yn zQ)jt0&oS4}r0(+G{JuDm&W-qk#6*!>_lIyB^z6^L+}TAGsy%`~pCbC~HDZ|gEMPkR zP+A{-iTwlBm5d)BjH$6#!Kcc!Py$2nUNnG4 zkaUETs7C+Db03UD^A>^vzkIR^7LtKnhEmgX$JE?l=z~nu4 z5R1X5T;eVOmH;dwyhQtF?0u^=pFHc02Ss;i<3qIM1Sa*q$;dV?S_Qp!ByZI_xR^t)l42R>AzY z7G_KRenl3}cQ|+1Zz2Q%9|^d;+;xH|IFYV>OoS5fR4EzV^dTc;0b6arcJfo~HGjM^ z{rJ^l0Xqf}I(2r9#*R6Spb`}Gts}GcSK9M4R0VAf#5SSw!#Jh#;aR3V=Rm@3&q0W2 zB$2p6#^@?;PX7g=Z_5A#P1CE@%bQP)Exxc z$mAO_h59{FW9$b<3@ft!<)%KDi7mx0VdDVZqQ+o#?0zn<#qM_vs@(0%f|_g@jb(2j zvR*7e(xe<(E92avXbCBq(D`yUg6tRK`+V?20!PcRFyET?Pr=5ng{4F6 zy0oSwjpu1an<|4``SJwq3A(9LcH-aonn?0t(+=3LK2|l^2nof@z)^=_xUd%hoS5yi zFC@C}a)sY7@zEd`o)}#Dw$BrcVlbTi5MQ`tF%#!5{U)sJ<}Hq`nB57LJHRbsility zqE)alQpM;4H{PR9OETOF6mf(tNgqjUm1G1eZ!@pcm{N&s=AO27BOi~GS+bzjCAXME zs#*++Eoj5)_9($I+0f)ss#m0~+vhiB<=cT9vq2&CGMtN>ind#0Fs0E-jk9AdXPhP@ z=3m(V?gEgj?uvQsJk}X+TZq|fnrEr*LU#gS;qD~=e*^amxTnn=NpnJSYsaYM=F(9y z=8=TgY#4C+veq=Bw?AQg%C!f3<%)Drpso zlObYvLmj$)E%m*YbHI1Adoq5fIkB%O5LwDSGdBe?g%DV}0`}r^TNa(2Wj}GGS{$bj ztmAp=bG#A`EJ93X`;HX?KAJh5rKrBFX;T_nW%Y}^N^?`EWk4XgzaFo8Jy#sQj!C;8 zMm4GQhnJ?MX);x0-8xfj=o4oFx!si>_@c{_r#kw522eVWD;b`0s0fqSV(R6CKj^`CsRth&!I4%!(?p&-y0*3#&0fwF+Y^X9LfQ5h zfTNl3X8M#e`?7ipd4abmqz%0SR913vk_1By8?V8JDrB(CdR$l>9h4D2)|C~A_eghE zu`wqhADEf69J5!^SechWI5D4xYTv2n1LgSy_qqK6?m*(Ua7*~MS3)f@fR|8Se4Mg$ zOnZ-|wHK>o{tip#lX5a|+Ci8>K52F3k;h zfX;`-IXNsG&^c(Y4+@>j3R0F2aU8(Hth7+m=w%svGmP`XZp#%kIu?`0krSAzMBj@y z3VjX6#ZFZi$+x_GT@c?IWUpS9J(MQErsmK`a#lUNQF*5Rl*kXKX}^60fy;xCqM>n| zRU1@EZ?`m>xxcYroqgCV6U+YFXcf%+gwQm!qWESwK{l*Uji=EDRXkQ^HJHdXLCB}a zZwPTWeQ5ry1pMySPfOiQO=*?ZFR!SmF+xtlQsb?-(UA z?tTkyBmRv$8X`>1gCALstQFwA1|$*X)2y*Mxeh<})2)3P$<#VhKg-a&atic{J?Vaq z?@o!rv(NhZ5Xu|IJ}TFxqV@RF$ZG5B{TY*{Q+URebL;F0q?4 z^4?>Y^NP6lmvgk(PjgeRXa&#Fz9hI$Y9GeARopMac+f|Q!8$Fp;HQCvX3tJn|KTpMJfED6`=JUXbE5J)*@Nzof=m}Bs*w{gibTF)j=FWtE2O@V! zm8=7G9OX=Dt+DH5HuH;aQbzWh$2%U8N<4z4ikth80_asc$$cTRciiD$A!sjGcNm^p zCm+Z7*KqtSHHOz0JYE;$epT-E;P(yD8a|6#0RCAIen^Ax@X?mg7IxPeyoXFV>pZ-p zeR#|5BuoN)n*lB{z%O~gU(BkQgV&ipFY^g$HXJAd2o3i+$}zRo)#N8!=G42<>8(OJ)pGV5t~#5e=1 z8t1>lGD<<6ze|vhknH2#SV^!(pHgQN`KI`5Af5>IlJA4CSW=nD+opP`5!|$z!VI}o ztWOWD3IsEy==`mVKG)4vp8t+tpz@LUNSp{Zl<;cZ5>rw2YVafNnED=;IO$Vgd^OMD zcN4d2@EbL8BbL{(o;vs%f5=Q?AkqPDI1h1C;j(jRTSZg8751dZTn5XWk%+gt@i1f| zO-98*o(c&6B9?Fd#Ct4yHf%~lcOjS<>zC(ijh2s>7NMb2JRg?qK4Qz!7e(NPDJs*K zOhpU3qcqo)&}DsAvXknU_}5TtuGHdZcy;@ZfYhy9TQBZ_n>(9(?R+H+*+Co<+GPRS zF8ga-EF6{?n&%pt{XLrZxipiHd-3bh2KIXs$DHcHULyf<#m2VEgIm6Rk2v@m`>a-4 z&PRh(5}WW$l5IQ_WY>*%tA}pNv(GaOK4K8Vc>)WO|MLq_A5rW=cp&##g9CBJX$0R1POmox$-DJ}GpeKF-SRBr%g% z&tnF59PTB4fmiXyZpD+!9lKa;;2s~VA{y*L?x1ZO5slYAsT_8+|1 zX1?M4cu%;IQ1BKYT^}O;*O>Y&F~tDBM|+=?zO_cUS?%{{ zGBOIal&&Q4BbipG9pxMPOP~)U3p_?{c8NQEPk9>uV85Ax|IEZY*Lm<`e+Pa?P^*E#Y*^u5d>W~nK;>%qEahCD z*h{&RyQ0us0gXMWKP5et!ze-XV>+Faw~Yi=8y$X^F&b>kj1JS0x@1lV=F$^ppgO`& z=_0G-kEDygmEMtebE$}wTMZ7=;O2Xb_RvP=X>%=`1KL2@6wxeOx*g8a325#6=3&3u z;hpUGzX2+*B}QHkdh$xg(KtqTZ?4G0n*|C~z9B^~-Y8pTbX>_V7f??{zl7&zkY%Qy%bQ7g!H^O~LQ$ulzdsuKPXsAs+mBb^PiU1OGVj z4yt+HgYV$M69CuReUUix3&tLD5Ml!kN$nwD1^o2oM6>=eUd_@VFJxg}ux4owy(J)9 zD$_Yg2}SW$wMXT-sIVg;1MPGn9v$zPtsJ&If(xTv-q#rb%KR#RPWB{x0U;UJrIuK7 zT6l?&{Esv|XFS+np2S+t3Tx{@fPWq#_FxdM7B zzGPd+KpXiOd~23jgpuJ(M$R&Xmyj z$^TFJpE!N&xTzCMgwp>N`z{}1U*l&?56zr0HD|(%In#4OGjdd*1tW`wo?m+Y0GD^Y zs!d^UU2+6(>Wo>l1bM=g(3Ba|d*-0R88gRB4^J!T819lYXY9=B-j6BMC(ZDFOqe(> ze1-GNHN*dzpVIUDU+}-cjH93NQzwp{skL;aWAyaDz(JjMDec*>Bga}iEtL!%dcn~D z4W=Bq_<0Fmow63O3Aep*{r}HU96sjEoD!PoHJYnu;!CAH&77A*BMwcOHW7aQTT@hS zQ(R7)F(Euv{KNm}mhEfr!dA|VNjVe7&ht2L&_{>``0(fn#!l5r{m*D=9lM@2;k1rj zyZQd~l%M+N^p0K6oI1h$t?N%`O;;IC!aFOQf7G4%`J6fBipgkU`s|#dVf{+ZFCH_X z?1H{UWn)S%EIEJpn3D60`j(Y+>NugZwt1(1V&4-cP8v&>=J<2REgG->f7<&R=(w)x z-0P8X6w3@s2#E=yOhV!~;K=e%93=$M&q$VH$(l&=PZEdGSTiFH8EJ-@ks^iA3Y;33 zIzd2i-I9V@f-xn4$P31_=|eQZxNRtEUoeKIAllURqbXHGa9w^h@7w2m=gz%%?l|VP zYrVBzGt2Ux^WA;+*=L`9_Sxs$J2N)>`Lu%{x{P+Bu}-NKijBmTE?BD2H7q}vGnmWC zP!j8?9d?XdA3aovQ)^2}3wvBL*~CaFKZNjakBM=Adq|!AlodIoy)2Z>Wrlt0N(I_Q z@pEN67rWul#&THY+)E)k5LNjByjm-V1}J|3!8;MkBgeo1n8CI+FXz8AHIf}I+{G43 z%RlCem`KeWV*q1Ulj*O<$k`$y9BZGw@M?xVI1*$Akd9LY43}W)B!NhR3 zFd>6z`r-spQ0t7}ohju0kSa`jg2{gt9SM#L*FD3LiC?wE49m*_%?1a%1s0R%E4sE!piG zYeMHE&iIe=Pa_b0MEE@qg?En@RJM^C88uVT^cw;Z&380!A&hKq%nq2~9vOh{aYtfXrtLYKcKNf6 ze_0REFx$-~22*)#8$!uU?rz2Czs?!+n0lptNtYz2DGiyc|9TwGBQTWvJu79h=bk=V zH8XB%Phj^rm(V*KJ6sw4DI1E9BrrI}LfJ%aD3;BueUeJvM=}Mh`?2_hV|Qet8K}w= zG9v>CPqJW}%d}_Wl`Ixf#D{n~;fhfXT!#VnNAh~PrglZ81jWxG&aDgODOm`p?$&>G zao&F2otU^@?TJFMTn_mNBXUJMl=zX~?}q<5mjGOhDq)h;0(_TS zSvAU|4pC_ST>SCGSZW~Af$giO5`5989>*Pfk-|h)CwGvbs+aYXdA43a^&(YqYo#i? z+jG`a?_mPfFTIy6^-JyQrT-59TyELBbj!J?wHz8)TQegG(@Eo;DpMC&2O^4HybO+Mth7(drVsTn_4P0vLflc0*Rh%6ODv3zKEA~u4P3G5Bb(p>3K z2Kf1%%R~neB?nK1+#BT&uMkaOKu>y3X#BDztI zjDbhWxhtCfAWu;-&*T=SH^}EIHfja~t3xt1sM9AiEvy}~Q|!z14t3eNdYx4_kQ}t? z(>nSb_zX^#{6aN&H;B<*TmeG!#IY1v57MtXBc zCzA0nHI3aeqXo5*B1_poc=^<1$Y%2Dj4hPSs6%7Z8ibRd;^6ATDiDrjLivf|-I?_2 zx4QP*K70;MO7dWr5p5X9WjmZz+FX_VgXh;}E1l8zPLCbR9L+mER73MQf`Z>!xXd79 zM^0;HMg-Zk+FLms+J{fE`1qdS_KY9pz^)yO<h$Ue_i=_NE}J+-c~p8LL#XY8DIB)^|IEAAdy zKbcP??ly}4N&>VyR2nB0s9vJWN&@xJ=UaBPWslBViar0F_Df?Y{gNTg)xO@|L}6+y z+j5{N$Ihk~u8a1RxRdu$?h18CN~BDvW&k$up?o5pRNEpgL!U-rZ?4Wq>gwuBaYI#> zUMn8Eo|2RwRY{+HYHYgE6FRhs3p4ZONH!PQjYslim`Nnmb%xRpM`w(zlgz1P+#(X6#`?& z*6mH5c&;Se+PrNuN^RX1ww}!*&|8}dr zrKQtq>)K+qcQkdhZr$9mwbcr5k3_7VZCm>~x>~IkNN5kYSmB=SR#&*CZCh_g_f~n= z+1-M#koL(OjJ*Vjt5B0b%09i6z# zZ-paP6ME6ti+|y5T_|;kfKEW+J_`>>b%m|2W~-xX8~!5JW&xygXsW5JwWo}zhd+LWETn$XfV7!xPD!>N28 zH(;VzA)`1(F)0Kd!{BG@nz`!ZhT(8hUF;J$3`EX%I*0ZAfgxsa*MVuqy6E zIY;&M_!XWX())ch%#*3;Xd#uhH~b!>D5ItH(YUq?+w<@Z<}$dQ@54ww+yogYl%BG| z{q=loFcE#%Xks)GRSy(Z%p^Jg^>a-FL-LS`PJnm{EGdxyY6ns=Pe*@swUUG2#_>oh z;W+*GmaoW7?oPR z5_lsrqT(8{H0E-A0*Cr|)44&spoV#FpS#!{ zt`yuL-$x&H5FI_IRxZCBjOx3uvc{>w(Nn{Cru5?F*UJ`o*#iF^EnwEo7RkHx4KMW^ z@S0MfE8u|ps#JC$AG&_{D)kti+~;EWVB&>G={n+aLaYz?_`wo4EI!*wo39TK>-P=~w>XmBQcoby_r9F_PI4BZRc$el zmr0maxjEMp{oLwM1}BfWBAEe}M%+xyByrLzZ%tG+>y7w;aGY?m!{+9EiN6y|o6)P!n<+B&a?9O+*)%mnIkUlB) z98pUeg`)k)(2{7?dS z^)b1rq;!G$l=$?M1|BYazDqw!EDxm^uj|Ao5TkO~A`iAuj!L7v&mUK+wQ4Ts)V+c?M>xYaBeH-TKJ$s;k3^ zu?Wo8gV3uVp)Eb)YbuwoIFyr1=tn=i9WxbeMkbonP zOO@iR@(}rKjI^X4J(8_;>AFJ?9JAtsG^If_dg%T-+9zWiQzn8E|UD{*IiEkt<*1h-bFJS=^jp}Jikd!ceCxJk2#Gnb690}x-8&Oe&zMXuIjRv zdu=K`cbme;h?GM(JN{@rN{{$E=AEm5_@IwGr=yOKjImup*}i-o!-@iyGl%fAJe9(s|5hpOr5SOKVvTW=iUNUi^** z$gktAS79#xK6;;%za$<#kL3rjn&Pt)X)!$9xjjD6qEglt6}wzeSg#?(dUg26tpfS5 zhX<4ouGx^RR4#G7YVqP-}=caK>&gc+O>=Jy@)xUn__&=$r$3l@TDfUtTKJ|aP|7Z&7O$u1>c{3`iIESp*z z&!u3aDBkK@gib%v*L>a7<d)^K zZ}?kq(@5Q0P9X!t7==U`nONsx?>z6w>e-GHpIF?!9c$zxg~b-w%v=3TVJR*+2Sq+f z?Oa)OwCakD%kg_J7Bx#F-T871ADSb$b#$0~B}`S8=P=~qi}F8wG8o$8t~SzCOHM4V zN3lL?dSEg*btF-WuR2!|)@w$i)>Gb!@doLmeX|FJ?X6ZoNgRnLO|^4b=9m zOoLc9d%YzmC#Z7$HF117k9z$I1Dc`5R-oEHAJH`{9qI;^J*u=*?&ONG{P3^?aYh1nZU#hPj`R6^pGGflQ zOiHEv-mkwZC4Q;izJw1~l>1V>bjdC6qtAG{`lqX#(`Y-tG*QX*xpGJTX|78GXcF^M z=zyiz?jZ7H#gnP<{R}#LbMYgCsQA4MhTX__+=g|kQp$EKJmz8Qn|>g^Y2VdhSPl$b z-SOiuJsRR*WxtJHN@%-$Nr6$xxdxT&lpnyk5N<)^vweoK!UG!03vGL)^amkTGw^rJ zNqo*<9?viV$>?LAhjB<(GtZ-r>F2;`4qJ5W<0VFOKBJdxZjHjDyUnLkJY6Ld`#4P8mVC0{V9i4$n9iXVNcr^Akk8XiY`LzDDw^6`0qS4y6 zkGEdM8tkV!7*V&mh7d#?$A{?Sx#q9LU3`38%^ewGmwlY-u^r)O&Qjix-z+V!CqrJ= zl$NPZs}Y&bF));;j)G-@qB1fL%D|pGb`0xf_BDQ$AKl%V8SKO-QIsFc8}(Ibqr26U zHYJZ_yLOhwSocz^xpT_;=A;$mzSL^&k$Yb2IZfs10<#wCK^|ug_g~dhSKO`K&VRb+ zu6AA`Kef$hI|%K-bIh`7r_MyB1@g&!@O^I3d2i3bk13^}H{~qg=MP=e?i}84x0}pQ zjOZr|OhcX+M*Y-6U_YSejF@soEnnuUULINRTU1u?bFD+hQCQ#T=bP9#?i+eP zcNxOB6AsDu9VK)g<4!p69OrqixB7Vy9EmF`F|XXDo9oxIHOD6&ia_-arMzf)gk>% zHM2g~xY_HfDC1pFh0~#rEBAM(qXc^Tr)(m+%T17T#058>n#?dpz3mpJePg$LMxI(Q zUofKt$lUkc=mYvp>Br9$Tn|EB-;IF*#~8!<;J zMH8|gs(z_%xkLE#DHrt)wYPu9FG}Kls(Ji_ynY^{#G8Jbarg3m%HuyyKU9aKq9DKiOLf{YZt^r@=zK@qThDi-&k;IV-yUDpxVZRBj;yxl z%m2S@ftM}t-_HU|URU*&rAPtaj$ehyer&NA!u$G}#bP^X1ay9_Wz~KW^kU1J2OYvE zE+-#fEcWC2Jm@57WK zXasa0Xzf=Pi>E>R{{;97%L@JJVsQa9`#;g{E5SFrSeyfG{IkVkeGu(|wu6SAgnpoN zpi`jMQ;WsJppBr%L8n1$Uxn*mhaAw^Z-5VU9`rD1|8d9#&4Qi=o&58~Vgr5&rvEQ- z9dr_O7IglbX!j!Y1GF7<>aU)WUYI`y}U#gm|O zpw=az&jJ_vchF1d_o0{2AE3U_dGufC4`Dz2hGpYFpgqtC=swU{(8E%G4*G-k|0CMF z6gX%j=mO|Y(CJf)#e<+TpfhqE^f>4o=sf5==vmMO(AvvD{|WsA?f>7<2Q>5}j4x;e zbPjY9^fc(qKf_KdQU70{FX-%#p)Y9mUl)rjUkg6adeAA*cF=jyA<)JJ=noos0qtE5 zz89e%X!fVj3$*?$^nV?$gYE>)f@VRd{sVe|*8U87fQCR%gId2pdsl!DzxFW>IuAMp zI%5@!bD*{O;i$&fqaDyJXcoWIF#{UH50e}h9CQJ68npV?P!GRUvJ-R;Gz+=_x(~D# zKi)G9S`Rua6hAsu{RZ&Mk57Tt;@3)sK<7aB3B@njoCFQw7fDu!K=DI9L!k4Z)1dYE z{h*Ve5zqzDENC^pr*8^01Ue1c06GgA0i6dO0f*u6z2b}@6@WWpx1qVF~ zI*Ff7TX_}a;}_@pK_~Gud^4c^_|?ACpcZ~wu@)b$nF4K)582@NcXmn{bRX!f{75Hg zJ$_DjUar3ZdR>k7uPheFLHj`ugIf4O=Xt?F&w_?PYp+2)(AA(*SEGH<(6#6{=q%`2 zDX)fptH1|Z584RY4w?n+2b~5T2b}|*0=4kd-iJYJL1%@&335QEKMK0&M`D0v!UK1Dyh$ zxdHPKw0{%$u7}>Bji7Tk!VW@jhMu6-TVc04)CWBX+W$7#1$6%Hurugv6RxjCxf%L` zPPL#uXe10i(Ee7~tseaVT?sk~S`XUa277@nfF1^&-Hi4?8{47JI+Q`%g?3#DQfFEo?zd-9jYj;9EXauw$ zbP9A_u7gg=_1mGBTnC+%>!2s)I_QF2zXNvOi0hys&?(Rc&^gcuXziV_w_FFEl_CaTcaUFCzgZ3JrR~CALX7iXgpz{Uv=O(m! z59|Os2|5dEjidjd^`O-^L+=UjfzE?QK(p_L9MCDygP=2@GoW*z$3bh~gMPjR@<8iB z>wg{mpi`6ZN6^rF(LU(R{pi;%pzlW+bYUO(-->#_g?S5VeE{`9L!bviBcMm+I_Mne z0_Xy0_P1fDw?WVSs0X_60PF->`ylKAIt_YM=oI?%cIf#b*b8*_!|)5x#*csxbO!V= z=mO}hT>mKe8`19XKrhfNXglZvXclz-ccBO9)bGLnn;`#T*a>vzs5pz^vz6Kl!6J=T)(+9j`BaY6Oe63ddXj-}IT|B%i_tYB?haOJJ9s=23n(t7*l zZ+X*(t7QdI^~0d^pIR)21mH{R?!w>7KUyqq6Ras%yC<-7d9ZqOO(9sVWZwsPC$8_5 z>sx}gy|~(3a}Ta+xetRL#Puq<&X?%*IR2)9%MSr+xm$wufv1-TYnw#8;*@?pi@$@P zT`XRp5dELvFXC?sI9$?x3BCgIjsjm{qgoI76~P%FIkx~m?1T3KpZ1ZT27cVbFE+X# z_$=^02DO@lwI2*L2Uk8=)f@~xu(Ub2djBh$g7tfsH3b{?T(CJfd12K}OO^*4fHehI zg9p5*(;Tc0*E9$BUDzI+x^NJ*37jGuf6qZ~=yQw3a6rr7A7~1$+*{QY4DDIEIXF`t zhzE~WM}xD~9iUAD>>$ui;2q0@A=JSbqMgk(5x^1f?*wfYetB~TfUN|!v)lzL0`Wu3 zXgZDdPk(-~_!a4AN3iz6Ku2)p1Jcj^OVO9T($7813d@75p_<*T%{5TBp(EJX5o~V? zM(zyOkGhLZo(gR>fvs*1X&Zmfqy5#`+cdqjox{P#Q5d0IL=``&e_0cwS*o>XoZAU zNN80OvO9v~-H^Rg7_xz_?$H+UYM!rIivB#lSj3W~=IbEmaTs_#@Q(_v=6MV=d8}p| zuFHIl}nTHr1&O?lP;3vU`ma^e$Lo?s}t8ZCrAYvEf4PnpwLy-r&?3H=9 zrwYCm`2KP=n-H&@{RjW*ql?9h!ES|vwH>tIBf#r{=LJ{xy$$wltGNUJZ`I~gZifC5@#B3769X$l=%Z2-VeMU_?0qn z=Z^p4cSnQcTQOM??3Z+58s7#8GrJj3mzuvFzHwv=mff;?nD9na7^1cZvv-$}2$0TT z_C3qB@LskjfoC(qMqAD9(glepa$zej^g#;i+<`h6@g?mHR?Rird8Hj!dL36#f4EEd;khrs@Ca^15E#vYmuCM<1V)1Ko zU5!g%7z|A{STU-jHSdtQFMLa}P|iY*q_mECifA!tW`y+!F0BFIN$|Z3e&-k`Kfi~& z*93kec(?|uUrji86o0cdFiuU?_9Z}jf!?96PS>P-y|ASw@JSD2)o+@raA2(UXyFjv zz)@@NB%u5A4D_7A*%r1ep8o8%`y=t`7vMV%zMs1JI)jsez=QUrZ&nU0{A-pYpTZg0 z`SR}q{}9g7&X<2b_#6IWvG{_IKEG}2qbHQ;^91;-|ML9gZv_AI;6DlepNM?SGv_*? zI{kjX z-Eo{9I^&1<@%_MOfhR;S<)`AuZT9;42(H)SjPZT;b*!Iv>G0B4lTpj4^zUi#jf1aN z^u&DF5nS0F3?J?-~KnUOy((EqL1pd| zbFX89KMMRTk1*;@L*M_5e@GnK z4>`yG8v8B0H|t1!V80${T1fi)IQaVi#+@e#{xtA$;8#k0zB)ml13m@(8v5@!_7}2n zAp!SEZ1i-n?uWdMLF8LF>s}>}9JTX!H6Ox~$E*1eXbCoS2ODn>HZ%w8+k(|9F>05Y zviBAHmp)MSU?9-r$=oHmmO49w4Q;{tc(5U&-lKj}y^ktf^=u9Pe*$`}{Ptq;yMn-v z?EI=;`XHW#{3@(HOYHkR_>Y7CkI26#fYdmEwaT+^yyR7w4<~SzE_{56eb)eQ2ae_1 ze#!g|18)T0A}B;osF|f%EgB*%q>eGt(Be=BMce@`wJuN6HIA0Lo7w_o;&du4yNN5(bq zB`pt`tlo9D)gThdhRsG=YtkMbq*U13iyAglbIm{*f>u@D{1Eh*|L$V(Uj!(Pt85zH zX^$)S`30~%1^(&pEf&9Y4!x_^E6(sx3TP;cCKMpx^&*)x( z0=p&9R}20$@CMvNf;-tSnJ3QyuLpj@uic||yKUl;;-i;f12T_$P`eo~=m+p@`aviB zU?1$_9(D0X=Mdwd$Afg#D7zIPe*yQl)OtrvZvov8y#0rZ#i!ikSZcQ?I9FZuIT=W2 z%W5X2QU7VwKZ$!{w__jd$j>(W><5sFU0(GJB>8MSkcxgdc=&Sc6fVCD6g!2>kzc9u zZkDy5-tub16WlY)NFPdmdV4VR8!#$1s-@#7wJxT?UyXZf|0w(_|G++MZ6@ zAx5CJCWgJnF!mZ-c;L|*JdA^n862HVE)l<5ijAe* zs&WK|#Y~v>`9v7?`%(W60bp0`n5w(6g%(|QU%2NRe4O@|I^}-fUL%8o*Qe%<#4CF05zgX<1Jg)bC^Wav|=04wPp!E~` zTn&d=z&+l@O7pTKIJ2T^MZhyJ)ylgO_2Z~NgL}TJ|H|*7@!l$x58fJ_UGi>c+RZ#i z>OG8l4ga=SY{q-X_}&@}?bd^W(QU)dY9(xrx5B0T{2A1Xpk7EJ`aige^*r#Mz~A7( zmtKbP1Frmx*M$EH;6uQT&&eghHFNLydFj617m2?;1HRA;Jf~B9@3QBOTz?VQ8|Au;XE+$C z`WXTt6B3zMm#l<6UR*5xIo><$4o@4iXO&vN8ROd2f~Dh~T0~Gy{BRfKoQ51ZKX>f6 z)Ov?==OqCg{D|{HMVV7IvVA{_vxV6u?KM4o*AI$4^pcGb@hIdS|EX`j*Jmpht_f^5 zNhHpH#BR@_UiQpl@k4m8?S`$5G%jY_QpyikAYh#a-(D9V?pc(~tLF1Y@J0R|&oT%f zy1geL=W!i1`6q*NJJjmK^=Vw+XJ5xY3W;b6*LMheH{QrKyoCRL36y7%Pm7&OaSZkj zFcW~DEhRl47k)eou^U9<&7J^~Jj4@;_)Rr^O18|~XTjGFzNbZQ<-GDMdfwUx?#hw+ zQ00otv7X`?i&u%CRIZORL|KpR`nT5LhC)NTN#g_KW;l@3tNs=scj_0+mvP^Rd3#qd zw9Cw89HXL5yky)T0sjp6&G^}Qeh4WC_owYRjglkFyOb_^em#qNEAcFdv9-vPdG{jl zS|5A`CT_I{m-erg2@U*qu|p|-89%PBx)}4v>BwLc`|m=%)y2i)Z?axlJ`Go|4m5kh zrgliN|HG(1zNA<@fcMtz_P)S#?jXnYvNYTF4`ZEjrTi^w>m@?;dh{aX&O+`x1W@ro z&H^z1T6G?Ii3Ev2(RJTV?6wB@S=5vC%De;*1785XNdpI6T!8iwg90%GjY4-}?$9?2) z1n#F#CvZP~;=n~8um1M|KMeeB(klEL>8$dbhk%PdZGy*wwQ?Ux^m!C`^(*L4?ZH~( zPfr040ayL%D)B!Hyuk;55qQMNfxj9#E07or`QTRrp9JpQ_lrKa03QebHqmnz@^0$c z2Yebh(hd71{Au8aeenB%&-maE13wB}^=m8ck3+t!7mov<1KuOjp}+GSGmAT0;bT}( zd1p)7eGdEfk~_JJJ9xJCXmz~_K_ z{q1?+$APPU;XdJZ@-Mv-@yiFl0{F@c%H!Qe;I%$@C-7?E-ge`_E#Ru%TlG1q=y@OT z)xekFy?F`#5bzLi9BZicUG{K-KMGv*amG{0Z=M2P5By2tSMgk;3HBJ0w=n)l{pZ1V z5`4;D@Ec>trB|W3u)sG%ha|e;xpUesjO(|NO_)lB!#=)ABjDP>Dk)L_7m| z3oDAntK9O?$p@CUxt}NL3`PR?LK;r?JWrD_FP3q+8csGBEEYeA_l|W|ty>sLu90}` z0;w_!92M;^an#T5!TH=0)GzsqoacSQ>o8cFt%tzBfcCy7?J0l8#sJR~;P$yX?|2;7 zr(abpPEejY$Hk`Gk*2Hhc@}(UYm4@MYM!ei`QN`(ol~plO>m1-PA8@R=#ia8@q7^? ztgEj$g7+WNn)&jYOZvMIZ17Y<FK~V5Ys%NPOAufp9$fTX1H2vhw<*ug!|MaLYb`NAv|AWFyTE@I{Ck8S z_TYTRp%LfDt-*S^1>iha!semTF+G%h9)X<3ON-_E(R$g$iN}@VoV5E4_~*fY?@P3c z;kJ)Xux)g;D|Wu(TEyGSip5cpqxM<)o|!)1ParD8H{=rdvfwKUpYm5351b{7UOeX) z`=!CxzOq=nhvVk3A5+be{Y37=;6M1$8C;*nb)@fTJgUhe)okxDNB->dJE0;QB#af2-8rUCLL(z-N8%UBFNK;055d z*ObfI54^z#e*}2H5B>!3eLnazz-N5$=YgN}!I!=X{aID6&lSK|``{aaM||*3;Nw1c z9Qd>kejo7TKKMhx7kuzXfv>!_T+gR~H+pcHZ_ffB0zNF`h4HrMo4T*94-FZvl;5m) zGyDVm2p=l1mi#k}w^sv~&n&3(2kcWA$4cw`9l`pHCyq7anGAKyQ)J;K{+))Lm8*;T zKD&x1STNy!a&P|DngQj)YW)LW1biSoU&0NZEl)~G=l{6SkL$O)<4B)8iFwElh&u3y z9i9XK)OF|2e+eqhzNuKe?L6g&!M}jdUYw6UGCvF8m(N(-B^|;&M%nmv;_ZImL%^SL zx2w+c3l+}rcgSJ?19qqFyki&SJqvkf>x#vWO7X7rd>EdZT@v`RZ@g=*sru0p+&SpR zqfK{!wt-dWZMR&H`Hjz3{8IGwi>FTc*}zYI#nS<0-1kG?!41XY8NAo)O%k4gyz1-v zT=7v{AIE1Za&G(J$<3<3zt|?w3xoLWv*15|L$Ubx)Q|f_urXT=3%;dAM zVxj8+M>jZkl2fl|kD$HmTe&W!^|}Msww?ff6u8QBu>WwL1E4Vuq`p@r^X_@@SHG>O zpVvWIULt2{J?10u_qzQ`JsLh^`<0$rB4;D`C*NMw(KyhKhX@O{9~SI&LlKi%k- zBk~^tzTksD3jC}G7e9XrxYblF-X!{$?yD<55A4w$fJe7E?yEM}1QNzTsHD$DSFD44 z$Vm#I;xq1dpqgZQoq*QhdV4cILm}6(kL7xDn>x2{c@pW?F+7)y*;TbyJy^Si&`x}& zVWqh)b@38;_d(ua$lLDrBkULW+yi6(pYnAZ9YAD#c^dMLhKt4h(g1FY#e<=4 zb;lA<@M1ns$O0qnz6ichYq9uXrMOsXw>vmnB@eavxT|jWQE6&D)}!`f@kQxJ<@*F5 z3pVtsn*xXj*wuJ;TX?d=gWz_C3%ONpaJZ0W9|WIWkY?~aerqstJ0efa+HQWo+2=lLC4_Bk}Q)jmST`@EId_ zJdjPr_v~=szUen)o_7#E`cS`rE7uG7QXr0IxL*qgARDE zpv#1xE$SfP60qzC|6%YC3qRHZ$%_Pk1i01b&U+>9Jpp_*@W1koQ+?oX9OD#_$4L;b zuo-Nt$=UBqabqbCCTF(6?ji?I=mj1^Kf<`NF3;li=x6Rkk2T<1xx*c=W!%ERL%>UZ zr41o7qgh2y*aCcM@Qs5{eI5e&634A{9?&Pl^gfJN7~|Gu#%&mHx&iDYfJZxA!EUwwPA_!+#Ham1+(?#f~xjzQu7X86>&{{sB`z#nA%b&d~i&Aq1TM&ur)K%j0R ziQPBe2>-sbsGrwEKRopUfyaW!0|V$w1hgqQ8wkLKN^;bGSL}Ho+*+Y@@Ecg$CU*_b?ua_bW8Gxth_?hpVch&ED~^Y=F-6HW{*^> zYFQQ@GtS7{1uAa#Vk>1Yj@L3xtPmAh&hod|Pz}qbJvnNV*Ni+ogO_YMIF4V;CpJ|t{_4A_cj~RzWmUs4P4&1#~{awlm z3oM_GXy89szQYLoFQyTWgIDY!j_(oDW27fYPm!J>T{fvDTuQo%bQ5VC>2}g2=@{u= z(gUQ2NRN;nBRxTSiu4TWviDl*bt&m8(oLjoq}xf8q+_IeNe_@7B0WNSjPwNQDbh2f z%ihQKNmr3>B5fnxPMRbgBi&1Sfbrwt4KGIwvlcpO_Gk0?j=1ydWiH0=`qq1q^C&FkSJwv+e1MENPD$-4)ZKT^tlcZy$dr1$F9wI$LdW`f0=_%4P zq|1Jr?USw|-9*|(x}7vhI!3ye^Z@B0(j%nDNKcTSB0WR8Y(Lv4T}8Tyw2gE-X_9n| zbT8=v(nF+2NRN@8AU#EThIH8jY@c)$=_b-P((R;4(lOG#qz6b3kscvEMtXwu6zLh# zWe>7_(p99JNZUxalO{>WNcWN+AU#BSg!CBc3DQ%fXGoWQknNMMBHcvVM!KCeNjgTl zm-GPXA<`qH$4F0*o+3R%x@?N=lddA&MA}BWois^0M!J{u0O=vpBc#VjPmrD>Jwv+e zLu{XP73n6@Hqz~+NzyUWy`%?750M@rJw|$h^c3kC(q$iJ`=qN#H<7lHZYNEWj*;#q zJwSSh^a$xO(i5boNY9Wi`v}`7T}8Tyw2gE-X_9n|bT8=v(nF+2NRN@8AU#EThIH9O zY@c)$=_b-PQqA@M@Bd5CHS>quUcH>X9r=`8ExigXn>rIL2f!kK?`63P@8$3PEHA}- z`Pw-&vKFPA!N6MdJS!^TaFR(1O zk@Dj#i*2NQlx2A@e^0V3wvqC2mM>s=j^%2WpJDkzmQS+0oaJX(u3`BU%PUxZf#p}S zY#q|$A7uFgmS4s4MJ$VLM87LozKG?kS-zO%wJg7y^(XPx&rS`CXp!q^JC#r~Lb#@{Fhaq^JCMp7MgH9C*sz zpUXVut32h+EiE^PR&U#C>1#RfU!7xLaLSrwS{PIgfJ+sltk6&f(nnQp=pzx$#$6 z=3L8-FSE>fm>a*qGUsk?yxQvTcOv?I6;9*Kxuo-Im9^ZO;yINYudyONc}bPE!pi#M zue8iLr&~_YI=-jea8=f;tk83Qc&)Yi$A0)l_PMEB{>7F#FLmRuw#+%P8@~kKF)tC) z{i?EFW1oXMuT)uB4I2IUFS8ok{BZ9nr_OnnTK`sb%9Z!Bt}Vf-YU3^~CnO6gUK0NW zpG;7`EU0)%92Go7eq$wxXM)$>srhAnmsbn^0vNA_U(=jd1r%)kEr{@6>5FB5-= z{H9-jq3}zs6&x3XpCJE~hyN7u{T}@1l(W<$=OT1x3C6wJgI}$1j2GpY9Bm!>jh`5O zTFF1leodl1dF>$H=g~*Li(Bjv@!<08vfz`H)2O6a)(462^YH&J@o|qmKL=dKtNtrm zfcnl*y!}4;hsbaA{|WJf9{Cr*k3^q`J@^&Gr#*N*a6f-|3vkhA_IKDJlD87K4!ZFO zaqs=0ByiEk%txrLUZV;xj}H%1j`x1kVZ~n_XTAa4PycUMz)vYTms#^Wu2Om!KYxMv zN#e%NOOT-Z$$w=9{0+dxZq^yy&t^@mjljikL-bqq-C4NSSV2w?@4i+t_Y~cz>Il{{i4)hr?N| zCyMIzapI47@Xu4ulmr&M;*@`axJ3okXG3uP=ae(!k+U30u(bQA2Y(&$X%D^zxY!{y zq8l{#FSik|y-(xn*;=$)DGub7{90?}s(`5PdimM^y_7%Ayu#ck{V;G@RlRw|rn`F8#Bb7*zW6y&;CdQX5C+J_`j-gIVX|VZNNk7o%_Fj z#qY+Xl&gUMCgo3lLD!LUGWC5pVn}jhlIN1M$f}*SN7~n~~%3pFZMKZ_)kQ#&(Ad{=YTw zN#gGz-oIA!KSg}X@V{H*#(zF%@D&VYTlLa(=XwC z482mfi*!i6Rw>*em~Yorz$27nJ);?Skzbw%GCtsxmL`AWGTkoRUcK%kK7WbE&5rv0 zz%N36656Rp@HX-vBwkJ2$oVwo%-y5qm^knhaqDlifbUXHvsy1$aFFkjfBHunyp-1# zh}Tn3gD)!FA%Jgdk+S>Q;hGBgO%?D?g(D8|L}8Wgo^?kB{xtbRyEG%hpn6Rb?`OO+ zM;Tb~)rr9H%_J^9`Ng|o7Qe4hL>w`l!sza>7kQRC`6PEhY#73BN_`D;D% z=wFC8GH-tq+g=*<_s`c>z;9AG{E+Lf@$GKn*=Mz2lb_#NK~A;;zQ@QpuH~3@?PC@A zKTm$|e&o-I*L(8V?^TfV3-V|GO1FC@?Xw(-is>UEMMA(sPJr)kQT!{c$Qcb7o^IkH z=4mEw+(CT4Mvw11C})WH^cOVH!v)~I#IsySjBbCRa8`DZBjlftR|piJS+L(IB-ox;iFAa5ss_8S^Z z@Y)>}_y@>8f4$a+;oiy-4>3;eqnv#epQnL`*p`!= zqWt+W&6wlGUl6Yy*SPxZEUvu@$*S0G{%aLkzGZN3P&mDj{C!%m$t!+a;bd}nf&Z1S(RP^RI&1WKt-{OHx9Z8i@Mrqk3uI}pz@H)ie53A=iF=d4 zrN5J07n&$%iu~sO=~swAvmg5S9Oh;pWWs2eov=}(E*{-wr$ zLVc>>@ZvwUo^|kY;5E%AlRG;ZvCi^3_?L2d(X=AZ+%5|mT_QH_3$7xTnx zhqe6QBK{!pDW0#k5&s17tS5i|GVy8Vb=*!^$B0jQ_6y%3KKCzL@Ew$Yns`6=3l9)K zOT6)Bt>-r4uR@3vdrm#7feZ9y>y5-~pVxSZ{F{g`c-HsL3MZ3;M94qK1lidCPU7{H zk7GUcN&^@FoaKGGS_Y=~kbjEnS3C9m0P+4c8W^RVhlw}d>F)2JD*0q`kZ+TJhI$%( z=7}%xT*d5n&M4d=fNz)MKtSx-_-PG%miU##XM43ib<}^Y!pr5f82-(g@h-A-R^Z=F z{<(V1{}?CaNCp1)k$;~1zPC~S2Z@I`ziuS{Y2s6!{nVclpQfG7y8JyOe?sfCm;L=W z;`N^Mv?^@0#cuu2=myQX_r=5)j%s`p^{F@fhc(boyaV_}xX)PYxj(+0{PPU(yU9PK za26b7FZmli`}YqLw_c|^bc%KU0C+9@*4*bcqCQ9d?9VkK_vz*JRpJYtb?sZgMgPVo zowv+$9ObtuWM1@p?wh|zIiV^oSpD_{>$5E{uv%5e~s4;5})S&-Pn1C_~cbuPr8Nm^$PO8MgE1aYr%uG+rJSH z&1-x&@m~_}=LTz-`do&D&2Qb2&rOOw=Xk%{>^C`|MxT_juX}E=ftNE>atm{YhJJIpXK~oOa9jpk1WxOKTZ8N z5U-`5V4A4c79(f1W(<)39SSFtgXG9R&HTZvkNb(w{GDdLmvw%hc=n72-a!0|#3K>i zAl0(|lK2AmQ=cdQvkGUyK^DkA#|^sr4Le-_C2)zuNBKN}sS<#~;w^^a4&qbvbF+R8Rgm+3^3QSIxqxy$Y~*}b17Bi%{uJ@)lNx6_ zXgx-}-*b-lgu*G*LB3l7e}QtWzt_x9vE3yYP%}3jVyw#)j{96(AJupCgWnYPwV;_ ziac9E{)&+9S3UEwkCOjN;!}U9fgk8@ST_=%WFBtf!xrN8o_M>Bcqhw8B|%kUyz_KcjHWJL+%bf8XFdsJMo* zenPylM(cBc_%De^x-|Yv+U>F{{r&Lm74X{>j`KGr4%_L^QQ|X?=?34-cGJWcIDd`) z?=kr362Xb zUnjogDu4UDM&WqQ!E;`*26(OHpUg8U#@bLp&RZ#G>VIiYlSgeKKF4^D<9GGyCtkm7 zi7Fd@Iew8iV9`%5B>#O1XTd=Zk$;-=@gI5ZOBMJ}l0W+~&G^TbVFuobT%2tpgzlGu#hd)sP z{~G06k7_;hwA*)xhdkqQn)vKt&1mAYd_lWkyO$^&c_TOI&(O}-5^wyzmUDvm4a93X z?><3&ZiXxP$-j;K)4$M+Tn?=m@hstCrAFA_gWeC{@l|2_Nl3~;}5q|@XNdE)sC#QXW6&<`m80t7}iFRs;s|B(1fg|pxw zZzlgN^E8GbtGNRIHiege{^Cx`iO?Tz59mg+Mn3a8Y#-E1ev?J?4AK53&-n=P8P1m$ z@;^d+@|zl%CH^_!es=r1;wO`nWOJE;L_Qx)X9nvAtifm&Z`xAnL}w~rV6kr~mx#qt zNab6{Q-UVbqxqq})Nmp*TCjQ(Bk`WZyG9fFLRWr}bz0J?#E7uQ6Jx!FSRr9)DLsjF zB9>3o4P-|01+<;5TW{r6+u5uYy`yv8y1}TFyG9F%aRuB28KRRk*%C{qcgF_qRu?u5 zMj>S+F;Hkr#|8zuaZtL^7FU<+McWKvSv6x^-I;6We3y(L_^j zFtTMJvTj2(Iu?tjF?P{{wuz`Q6wCJ{1_35=GzKIOXW}j`gfW@NP&rjw2SzcnjSb`q z`NC*2315jrnid)@3`YmjnUO@^ibmrZnlu_OWODgvY;@cj$P8!G35-PDrkgh2;Lj9I zrbbfHSS}Zvh{Ec*2`ib44JV@U(c$3VBI*2RO)kB(M638mG8yzjA z(r61+B8l8^DxXhfM%psDrgVB+K9N&CD%|;rJj4&&9UBzQ`ZAexOF9EB)0u%-I+jkw z@^u@m{7@{Hhz=))!6KF7xiP-W!M!l?P#TQp3K-UKVk|X~P;<<76lj>*-5wi>rxR8@ zml{jtqOojB)q$k8(UAe^l?A3;EH{DP$Kn)U$SBL9)Crdy#6X}w5)XP)_tF5HoR_-u z?u$1%FE(Yfausf_P1oRau_X5<=}{(A=(^kXHdt$Px1(%yl96lAAl}5r*+6?DolWF) zGaKz@giqV2#Ho4KZ$xXEY@+1ZoS4my#s`KlN!o0{wBD9WB;2h#2XB4J1f}L-3AVcl zuH4qs)YTeo-5QQYQOXune2vBm$xIHREs9S;MqAs-)gJD#qMJLro0~eL-ED2Xt$oqH zrsmF8soB@nVoQnVGnnUU#G~Qcw>EWkv?%B59U8_6s<&Nt$5T1mHXK*w#poIQqCGy) zlNcDYRJR)&H+Ob4w?x<1t*^VGRV!~ou%Z0GX_( zMMG#rFJ7orn#)Odt*x0B1fLw1I2Ad)-Irw!lT=HzFN!pgUGf@bf#%eRw26Sx5gYDJ zq?2eEm3tF8%rL2}jVPVZ4>lu80!8;WMI;Pu*6WO{5eU_Suj@?|v_0G6DFHUR4Ww+d z(Ke;({J=Z0-WGaNBWjV>BRqg%Pvyib#qOOFM`UeQnkgOi=;sGpu#RIThN5a_OD{`3 z)H4W?XEScIZjNOJ{D@9bzH@3v+#pGe&8hM>@XsI5!5#k+h0@!)i%_BsF5d z?E#bCt8`SkoFs#e zjK?P2S5yw7Csl%CReZE#oFq7Q)}sS53UIA%ZLUN4_cCgO(SteD!a`#6R9o9>r47epwAa_ zBLl-(H#m&6J2T+MlCpTZA?Sf&YtIa;nV!Q^;=Zm&MyuZu?-=PFR&#NEM_!MhY%UyA zSvmlJwnj(NsS(eN5l=u{-I(L@R=fjY2#B+<#@HpJb>~a*&VnM5bY@VNyj~2J5~SA% z_@%@F<+R&IQn0zYs2hys2H^#mu{t7kA&A8W6MeD4NJJ$~n2TU^Id$}on`Bn$38iNg zI}kypFK?R=udqU;3PV_l5>`G@5PvB1AoNI2b%r^WIm$G`Vk47X+zy)ux7n3}B) zq%3QvPFTVxB!70-GFi#6%r}*AyRSnPGf^4xWjE82Z%)TX?#6(j6M9{8S4%=ePe7Dt zF6My`%TjQoNS3`&;a-@*UE4G&9dKu0GWPEE$#ftFS=fcbu`{08jw+=Hs7!aysMmPW zj=nC`Hgg^sm+W);WOFDBF@vX_kch1j{@}058P%Lt^A4SC(E4QxspJI1lU})$9i>Zl z#B+G5Oi1KHZaYA8Hi;(sg zGWU3#L9Xh|t}m9qTVhB!QHZJYhV>iaqn@M+qhA=!r({_4Aq}%aIW+L`Gnr=EI2~bS z2B{AlVIdqCkI90GJ?p)mu|t}YAMu1C(|KhwS?6r~Lt4@-@vvGti#4odxDDc@&;a43 zBb>LRr6aq;b&)6W$26j2vuXvplw| zO2!mzPJZ#CdW`Aj;Yl2|qMOD72>`Pw06uOa@8?Gq*p-sA>`awQYU!pz8u z2Gm-j(%&&<4e8}bMyw>~44ZSA(JZz`N%0rA2yql}waUzHk!2>H#;Hx-Gj`~~2EAG( zGmiOhy9-b7Y)5}*CgpL+=p8qr8QFqFuwCiMi=}W%m5Mh{=$U1Se6YYci8S)I77HgG zI#?u;X!xx4s3!+~NQ+Rj2Zsd8`lI=DBH@WhFkV7B=EchHR$2T5YgyVG(V=_b zKfSj9lys+D_NaS%9Kk<(v7{1GQS#HWqWiR zCgGtUNV0>@nD7kev=g?{BIC>jp@IrL5l9rT`j5REV4VaXa80cb~*s)BS8Rop~HWc^jI6KN^ z21jtj;lHlQok!)wX{jB*2=MW~*088=Gk!y~;*akti+$rZ$oSP_ywix)J%rGBwto@ zSsm-+*iG%FwMshVLS3YteVY-KMLRT-#*!6IBx9rLLi2>APoe}oTpu3c&=IF3s0&va z9S~!nO-!7d^t%f88`MFUZV+Q`d#MVB(Sf16qsdsx5v(?@uK-LRAHf}S2qO9_%XLdv0;N+4nDzDziU;4OC0DQ7Pd1(h}QWx5ME zGf|F!q_?GzMrUGi$7%)hs@z~lEH~oF4OF~I3^(KEO-dhmu17;^Zip31A>W?9lB-EA z(L$m^*}e;%DQ(Wh_oH_-NK}vztPZd?s1*f4H!VqoY-`j+m$MJ)tvG@5Dzn7hU``j? z;wl$WLh41NtjQu2DywMBO@fr6FDJ2fYFE^DZ6>fbmRz%qNwI~=1lgtWl0*(-aek%; z3#)h`VNZ2OrnGwkZ1x7TFN3s22OL|=Fm9xx?r1KDdo(U@7L$wCa+C|xx?7iv(ii0* zxX-4KAM_SM9o{82hKxDZU z1=VfM<_V>z#CKb1VQrCJkY(mYEFLeP7_fURT|h{QdXhYMjKZRo_FJ?{?x)YDkSpgW zhTTe|yUJmd<4Kf~F~gtEH1&dO5(M~dER9Gl+G~jtvrF94>lm`YRAItxD|mQ*U?>sK zX40tv&jE(r76dav>Pg_HLtMxMfEix-?Hvo-70P zZKLV5>SP`BN7?ZzD;Hv;2{l4@VQYeHXLxyx*tp^7+I?)f)*w(i1c$+cmsZu~Q8)ZbYfkF`Y7drql>lpOkiY zclY!WlN|y~MQx3^-`O6rxCUAOn5J2n_8HWwD#6Vt)$O^l5rbxROWCnrApA1Gh66igWvRq7H$e8guc z4k4zbtLkTcbPe-fYDrrVp!A9jrv~swB2*o=Ocow78y=R&xBg3B@|>PLtIEgt?C*;{ z&gT{J_@V2y)K#)9`P}9i{HB?yzrcEi)-Tr;%yXCKxn3it0dH#XXX=OWe5bq&J-b?$ zrCkwi>aWzW)d(uGO?~q{)rOkiks~`lPc6@* zJL{Y8?>02#u5RmZ;P-p#o6o5m+WD`BQ&ZEfK_BqcH{S-VIXg;V*0cQM-y#IkWLHrs2 zGQaZ}TCaO#{LJv1`i6cL7vvd!Q{Q~Ac?0Wv?QQBADj$23{+s&d`_J21-|&G&y}ad< zK*e`Wee*r(5!UbLz!*6ztK!A z3JE!d;r&11Ph=VW&F3o`+jRZ$H@Pc>+(k=12O#>_dvH_#b`LJR@^Vt$lE%&d(`i>^UXsJKO^V!aqY1d%#Ot)YCL!Z#~e`**>z4iYOdies= literal 164208 zcmd44dq7oH`ZvB&DYDBU%QCC!Xjs}6EK5y`qiBc1I*R6{3m_Lo5eN?6YKjEP6H#QB zvD*yQbeU=09``fix`}5h) zde-e(&${dlUOORcVy}b*$Gdtv=Q>2un+=n$4?33i(mTgVcSbt<;paf-D3MK!J=y!F z_7IcKiyg-=Ld;`7gyw$cd2_$P!h3P!VFI1yqfP9kqcqcP)pT1mofqdCb&eM&Y4XtL z;etN6FMr+CH9o%V03+zdl;t%U>6qXD>6NU4Q#8Mx>K8iL2zs$8mE-J>n+FkQB20wH zafoXXp2Yp(2)E<@7K9H>a%Zl_)4}mdLY$0nx#IH?p9ua3ghOzjiBNV_N7bD({m~C@2!UWt`A!grZBfb#fdEDQKcs7C` zp$y?K;GRPmjeA}XAie|fG{i?B9EET%!hCRL2-o61{wf5wOUY8)AC1sk<*opqi~F$% zx8nX{#8)CrMc{Q6;zJSNhPVpxLWClODZpPN&PRy9@-^*pjlJia^d<;?J?{JI^FQK# zIYKJ%WQ3n^KM-*qLIJ{k2*n5u;4enVMtB_IesGnDk3x7GVFvI=h4(q z1Y8Q@?-8GgxE=9Y1YUE1|Bd(r#Frp`4Iv%(6A`;suBk z5$X|E@*%DR5mo@7g>Vo;2JlG4zeBhKVLrk#1n>GYZUzB=f%rcNVT8fJzel(n_q7Nw zA@J%8?pB0)+@FJxgK!tZV+a-CPewQoVKDH1h-(nyGZ6Y99FH&@fma2>Nw}Yja2>)*1iG{r5NdFL z1;YQW>!ANb#rywn(M*Jq7EZzarHTs>mmthi%;*n# zedOrejqs6b#`)e%j9^98J9;5L(#MkQmy&5x`W8FWE z`|A+qS)6zKsCR?-PV3&geIGXuA-sw3Cxm|@+=lQf!Yc^8o<+D4;Ux$8o#A6qj7P0tCmZNY)|k=Rcy^NFZHNO1 zM->ta#KITo{qcN=t4)5Q@4(MP$U<1ETqoiX@ac$eLVPYl1mU*`)d)U> zi3ssmBe*(*C#(k}fETLlv4}slp2gozhLQHeOy0%`7gwSR6!bEDvkuT_Ro^+704N{j zq7RqX595%KeC}V3f(a&aQVvr++5z-;ym}oK(#R4w&8?SfwS3PRjfO5UtTjkk_xS&=pu0f@~cNn}z+u=df7ggBnS}g6b z7wtT8kkQl3g%Yk4RAlpk)N{Bqz&}_RjwxP0U@~LA%`_5Rv~ga$AWuG(Mvdz-6FJ{Y zKPs@IImwjY`+8FKG<|KJX}39xr9BUHT3<4}*FHWe&)#}2SNrKu`|;-UFAq26Y*YjB z>|u(wL+bZN*L$k+H-{McTy7q4p*!RnDD8Qm)2Q|J#uC?t`>NL}wYOw05OI0=u0Gi4 zFQ*g51+{XGha=yYo>x>)ItLIgj!#~bd$8M6JiA(No{t?%kU2xbVDPQVf2jJK zS>P ze#YF}KR@rmZ=b7vquOuUxlR*t-O7G!yxkC7Q}H?y4NkuLeuIab$a!{v;gbuDo|8@F ze1d^V`8H0dxK2~~a;7q9JVSGgY<%=H zN$v)Nd7i*)R1f}enAo$Cv44K3`TExx7R@f#Gdd^Upit?vOzbmk!ht+@P@&Eu=M4zoyyGnn^5cAX?{5{l{YgM*ehuSBaORgQ#ZVIeu zpxS50}0wp!~|9W{!Tc1qO0RqJ>>LG!&t`?pmM z?KM+e=V;Ym{iYH0>VBL0^VZ%*5ZxzNvig(gNrs=G{G-bIbsg*RL%jB9hvM?uf1>tR zn;z`&e2>@dYCnEGG~-zkubX>lpH-S~$0^;*v#bX{d6U{<=s~0RW7U6wwnN)nhF@wT zXKoMqPL}l>=65alG?o8{*fac$ZV-oP!@Y`p#2&UALBGmB+{3t@sCsgB-h}zd^}Lo- zHkn{%s{X(AV9%{;Z@Fr3D7IYRYI&-EXK;oZ(EXCH3Rw})9a;XUmLG0Yu2l6W>7IX6 z{t#Ucr+#jTS;}AI`Mvdq@ciVj=ts_XPS#*~;OW1xhxXxg*v~o0d4dOu)^)nJPgL#s z2z}fnyd|i&A8I={r5M3#)Q49;9WNb?22a!W8KL&s6f*Lcn8(g{J**S{pn9rx-WjCw zU!#I}wAb~Zj=PYqXS{wd(|Y-}AJ5g4br4{`q(5bdvy|^UaNqh*SNku2(Fl6};esCQ z+^_58)cK~qUYWnsc4$<8dy?jRe-C;lpkC6yqfP(nvcNe321(wp{qFINJ1UfJS(J~u|9dp5aX54*77$`)_&Cf-K_T9_P7!B+UEiB^P1R>w11<= z7(G?`?1CQ3KV1D@uKK-&D&MMndZE$tweqK^eWt1(@aBsV+MdlcFWOqTH~ii(A#g`EB*xQcHM}CRnH7 z@PpcO>u#gx71h&A?K5=0;niB4rP>bF+74d7=Sjc7FYyHpuFFj_XO;GQ6Pb-|(u|c_b7!k6fYlmag-umv5!oTRC5j;nLT=&M4I{2btj2tvIWcZ`vi| z^6rXvxjNok^_*m;mUE-dn~hN;f4zyEFM9A}PvAM*zm+dXFq+6e4EO@2%i3`_3XjI z0Lt~G=Qj>8@}DVRtL;$!dqcR|&PmemDr80P_t3vA2HP`qhY|MH!_VuuF4v94!>a3e z9bZj44|@CkHqm2csJH#3-;f46#$!k#${ql*nXX|z&H`PSW zZf*bELrnv0Q2t$QpQa{5Jg)p;^*_098v@0Xt3>^S{{e$f)Ar=BW&4zC`<$)vmm(&g zdb=T}DSv7Y_H&$;zgfql*T37<&!-ld3@cPmY7h1nQ2R-K$p}8C@<~!J1y*#R&PN@S zjQ*KguRru)4`b9nBtL7E{Ghs8qH^-?3^x{`E!>%_7%p+F#|L8tgZb(_814 zkiOXcMfv@D@bj(Gzba%!{H#Xo`5;r#)AZSy>TeqlG5lG|&(!veYI}P9@~HZ&MqMv? z>!P=$f3g44{mT`qCq@0Oyj%cxtMZjHeic~JwLS1VwEa`p8Q~$C=5?K?(tl&hzsN++ zr9JfHAGAH2Pd0)RRL@Seb7zwwPC$F{dbNjsoTzpi`ohTlqWP|t{xzKabH2`Z)jEK@ zeBV>PJZ>MC-a|hQRr%C7JuTk4=w_2E_Yb@_E8jNI5H*H&CTlyC$L%W*QhNxgJz$w6 zS4917x;hM=XYtC=aU>sw05=PZ_mrMLjGG^C)_l`-{pPv9qts5C)xgKAo?~=eL_an} zy7Gs6`NMm#=S*!Md65ZOEHmW#7Ujgy z*z4K|wJ?dju8)wA{-H6;HK0+;ky#m)1>=!uYc23zE$n&1TDe! z>Sz3FCtiDItDU#T*?GIPGuAn}PX1c;M7;6n8scIt&`aVU_NuOPz1mwe?!4t1>6g9b zFX+GTc54$od&{p^{nhFxp;oRe^)u1?4bC-@^J))zj?s22*LL&zw?FE~{>r_>D1h6M z>v*;Q`1dk>blz#&Zsfdu$C=t+ZTjM6rTV>jQr`m|{|M8NZ)tmuk^a)mtZ1#;vtQTk zp5J&-^@lQzg6mYz-95DDcRH@yPBnrPG>-?xAEMuP7~%{q=XYApC|@|^`bzaT=s2w| zH^ffm_h|b!KVtNF{nFOM_&Qj}m%q)(e{Le@HqS4`ty|txf7@7LGW7Jw})sbq-sl(NIMg4P| zmeVW$Du}T>$sZd952~K|h{?;xnz;Amt6_NLtMiQh>s9`7ZMQZZ?{k$uu?PPg?SbE{ zeyQmcqkn?xd8mhV=`}ijn{`}x<;>ImDu3H3c*m%7u2w$uuHk1ZpQrOe@>E0Ir1@s= zr|t8cA$BUiOY7VEx*^oOoVnUQx!OK(7jiwQ@~JNye3|m?(!bcR6`Oo(wLDkhIosK< z{vXwp>s;}}3ascf;c;%Z%aAkm8U2}G@9n5u>)WLDy$0i!SG$(KxyTemo5>lZSv zKYI502A<3Ky2A*bt@1Z(JEvX|I^3an_W)~j64KRvs>N&B%{{Vl2~*F8Oq-?z2hlAkbg0rWSoBV?SKd;4bws>60@ z)^_mh>fs*xx3BhBRLgm~D$DOd{uZ@Qzs`?CRQ^tHTt9CJJ!W;z(RTL7t=lgWyTZPE zy^+^yIV;p(rN_;u>?OYu?e=q^`r%Y{B>&W>52;^@ZZ$-q^4VJ7)Lhf=x?FP(P(95% zjG#9zwkw~z+wikh&$&8|nqD--AmxW6_UpYJtZ|M-Ut7D>OBf3fPRjw=t%l6o3HH;VtN`X59(^3hg9{2ld_D<@~pg0j+_ z%5YvqI48%+DJm{4c5*VOPRc1LtSFpQTp2E`m^x{ENm*&()V$dxh3475pXB5(&ZC07 zlH#QhoRyZET~an@T4_mH{``f`)UvXYNmb#(#nVc&KnOm+Fg&56qO4+kSwW#w8LlWK zaI(tgWG|^iQs@{-6_mFWNOGt@JF_wyPYT21%SuZN^TUM&3z1^@nc0QmI6ga^7p|&A z#b%|A@DfcdxC8~2Bxz}xmB=)+v{-eBYGf?swdj#l-JKVuX|XIP7LXV|a!O%d!KAW+ zs**w|&MI73STeDqYyk>dR6NI21$7S<7dVKFTxMxeS*5d}a6x5ZIH!`Wkzc+9HxOVl z6t=2x+>$Kxh?7})5$aQsS8`F+g6wbwWy5794hkVTja#A`3!EJGZ2tV5{JHaUit>s} zShnFT+rCQ0)&W00Tv3uyn2)>Sd=#~KL19L5QIWKrKFuukp60V}bLJF=!w_Rp$K_QP z=SwLr%3FZ)W}*x*go#xpCDSX=yp_eF3l}aZE|%^W2G7q%Lgo|7pXZqc;l)K6c}u7i z9iCUh9HuyxrR5dHrQsqp4D_CvSt&ZuK$T@Bg$o^&q_C7$fF49|dbJ$4WJ2k}g|P;j zS{5iNF>NGTkcIXYI|_wB`m;iL6_thOmz2%UE0OwyF3e|HEQWBx!opJ1>RpK}T}V{S z?kOoObLDT^24bS~5K^oIkg)BD9bNn_gBizp^|pzc7>+p6gV`S}jmea8cnR5oM0U zy~41$q~XrOV6S8mGD|V`F_?|H3M*yIE}j!AE-wt`l@^o~R*3zdF@A1g{(MgrdY26- zJp*oPnHi023p}bUo>RD>JiG*E&ilOlaB&$M=ZxVPr4`|+b1P8OiDea1vGH^BO6L@6 z#k3%J9)>fjoIkxdJU62d_B|KLM@kVeqNEZdXc6Ixs(hNjq{=z4tWoAJP%#Gu38kHp zld~`{rv$?(C!9A&tc;x>v)Y(V#F&LK+k@iLd=#RL#;JXS`goqmEnT2s<4cUiN&y^ zG7RdX#TeFxGi~cYT_uKMS$UXM(FSMF)9s=$=pUKUncdU}iqOT(y0E}>+L;+BqqyWu zB&on)&0mUSX=jk1o>x)251NjvJ1tr=3TIcrhHNcHJ4Z}6eiW4(CYIp{$q*fU;U(Ua z5$LB2FYzQK(ZrIf%DK|vX_SWp41~iKXJ?hoVT>_jCb02&B_*@r2Bjti5TwU~yC^Iv z4i(3`-gcX)vB;rWjU1YXIiPI?!#>9gfh@a-Zf$Feurx6&g^nSqT@2BrwZ_8D$>BUm z|6e=@ZUuvU>iO`4&H~h^EMJ_0@d$Jg?CR{2!oqU73*_g+Af4h;@eN{kF_2eKkh6&1 zBF0oKPy%g-I++~lIxu?SZp4&%%Y2M4amuU0m7J^5CSBs5f9gg$b8_;f3N&ExM-$<` zOd@JGv!7RzJ@^Q6#tj(e3Utt_mNOl3MW+T~XKt5Gnnswh}kQeG(2WBy!; zO3jq&ynH@c#N_oGV{V6XI73Y$t#`j#U;h1g}7(wW!*4tiOGiI_}_jsdkIA73bK-=_ssz)BxljR=1?IpFh(_4B(QiaxIo%$k=J2&_ z4Lq?+(giOrDG*Iv43~%=C94OnC#>SK(_{P;XFMzmeJGt2m&KH_GL}b_O`2a&T#;FH zQCZlS=E6#|OvQkig5Hsyq-M_A9LX55g-#I$Dm+h)aoH6zRT!^4cR^W!Q&Cuog%ax~ zDhiwsI@+tJvxsXZBrY+jJ&KRX4E!0E)5lq+_OH6hqOtC7jL3 zWzqXlK&k8{rTOPqlvR~aExS03MFeH0VU!7nMckaia22L1MCFTklUbf`=4MQ;7cYX7 zcNSF6p}`xL&1LA4{JCX|N@;Te_PBYh4Jz%!1{=R`nTcFNqXmr$8K~?5(@)R=qX-pN zEGVw5TRNvabMAyO$1yODVv>p3gyCp*{k}hG|{aB9&CcRb%(X z)SR3}SX^bt7FTm}Dl8+3nX_g|dyh}gsk$&WjM&y0g$uEm&&-hZez2^PL~(v$W`4MX#t;BA`%m>oHGUcDliq?UKts)x~`nNfI1MNi}ABSesWY%ET$7P2sFw_ z$TO2XN?B5lAt!R!aMPsGvyAe^%P@U*K341)29+9H9R0MU&5~K8M1J!5L~p6<6vgf& z9U9fM$cdHk9hh(Ar&*yI`xcGkic#;`{K0gbs4%0In1~EVEEcV-2%>ylsq%@miCM`? zT63FS2vwE*A?1*PG^G$NS-~H|1!VA6nPpIRV0=koUMWyjWo&JPRm`}&ii%>mt(+Vj zI+T{>U~J^cP6R!`t!7zK0XCmxW#^ewA-XRpD=y{MBW4lvK*$rc#>J=rQgaQ?au_DB zVos$43NflZJ0GJX|Fp_7o$<@CjPVwyo{Kcw2F%VN9Au9&@L0esT{0>bVqqg94qQa2 zgbRoKg~pg2Jk7?O4sS@WLkM9pmu||9Z=w3G1XBvpOjsnSd&EOe#YAj&*fNzKfr^Dn z^1?-B6$@mF@pSV!lgFGr36mEy8edY(3BZh~DOIIxG!t<{f-nfY7Un3R_+PHZ2vbhhyxvD7-Gti&R`%6chl% zeo?kAR)&8})s(`j%EE4`Xbs&6@eQ#A$h%7^=Iw9~=EXi&6Gf+IV{wN?`kWlJ4Glo8 z-WGANU8K)1D5+e+sf&w2?5&+_jBqBAH5Z);7oxf^ft0L?P$(`P=^L>`HLEqQNJ@EX z1jjx%uM%3Q)GVQ71mO^eiF8&rPs~0_mZ6?-28J~TR8&dOUN z&Jfuaz*l<7MF_d5fhKRSV+k#D`qQ^e7-56`$Iy4$Z*KEXXX) z#y-r;PFo49q}AQ zytqh`GYwlUyBy1%Zl#obxf;KyY<$^*@(K(^6*k*#$tc4b6t_A)9?o~*9#gBP(}QL_-B%Go+ht^pIAK}&VC7Iy zf@Q;L!?}p42=g|Lw{!BbqKF}!73QCOGT~W#xFB}NB?q{i98S7R@iK>xb-m0-sd(hQ z)X9;l9b48yoJHs1!R*RP^MVKz)?G-MS5h_y$(-}EGRKY28GhRE(?-PZ&p;VH{><2& zyzonKy#IRPuL~teFpR<7?MeRazDv$4K_8UL zdz#p-lwD=$qowiW67c)?o~G_GC5)6OYhoksaZIzflvk7e;-1E*vT;}L`)18^J~PkF zPv*p0cTdxWz4~@(=UrEOc>20hblrJ5+WC3%3C{k|)AN5`8ZS1b^vd3q&ge_(RwwUi zSK983hwRwMb}^BZqI+(>v^K}ypQv_K8Jdz;wb9h&$&>(C&b_1DK0*k ze=%pe;==^@cJdUP^7M-7^ZvdfUW?1+zum-}_3tQpeA{iFfO+5T@s9pI5x>SBf9~z3 zMN;GV4CT|~_)O(Par|WEbL05w%9qFSmnmNz$In*YKd5{8=PI8b$CoM}isLJl&yC}k zC|@4OU#om|9AB$^V;sL+`KCDjHszb+_?5~>`P4Z6UFFl`_>Ywj#qnP&pBu-2uY7qN->H0c9G`fHvCqah{vhR> z;`n6co8$N+l#j;o$12|%$Dg2lTO5C?@*Q#fS;{-dcW?jE$|uM1=PB=x<1>^`jpHXN zpB~3gS3VTS&r&`&j=w_r@;E+E`RX`+uJVm>e5vwHar~9aH^=c+%17h)#mcwF@mDF| z7RTSGcG3~YU!(F)O853(rhIZ7f4%blI6k6$Y8-#7^67E>O65aw{Qb)3#_^9RUmnLl zu6%VI|D^Jbas1zuZ;IoeQNB5je_r`$9RGsyt#SOT%D2VwZz$gp$G@e#qu*J1^UDXy zd*5Gq{Dptfc_&W(3gy$|_zLAiar_O+=f?4OD_YC&%$c%KPK^tCUZTiS^S9>pKS3%Eq;K-53_i`#h+~PLoNOki%+%q zQ!Rd^#e3hr^KrVxr&;nri}$_}=i`vYpJ~a@u=o)cpKI}FS^Qj!A8qmF7C*+~7hC+f z7GG`g=UM!6i%+-sMvD(v{7Q=-XYoxIKi=XWwfG4Z-)!;TH|2c1+Tt@U`KZNTXz`mZ zKFi`;Eq;>4Z?*V~Exyg-Ll*B^{A7#ou=pt!zuV%oE#5h@%l@ZYe3Hdav-o6-pKkF3 zEPkfN`z?N!#SgW3{%r&AO0{_Jccu7jq{UzE2_sIo_$w?vXz@7~AF}vdi=Scfc^02* z@%a`%*WwE-zTDyqEq<}Z7g>C@#m}|)@ z+5gos5+{Hb|67Ysws`M%)%bXT#b0a5`z^lO;)h!NGK){O_!^5JY4LRypKkHjTYS*s zZ?O1~#YZfDhQ;4#@wpa%lf}=q_**Q#+~RMw_{A2#!s4qf-aE47Nw#our79Txvbi{EYW4_mx*RG0lfV)029|ER?$Tl}9aet^Y4Zt;GLf5PI2TKu0a zKGou%wD^%0@BJ#_5wZ$*C z_!f(=w)p2Pe!0c}!{Qq)eyzo?wD|QF-(>MGSp1_F|Dwe=Tl_|gUv2T5EIw-Sn=O8` z#lK|ntrq{X#c#FvS1i8G;$O9R*WzEZ_zsJI-Qss!e5=Jf1H0`14U12*_$?NnZ1Hbd z`~Zu8$Kw4K|E|RkwfLe>zV1 zpvCK-z!5%V@%kq;gr8yY`X_LN&$ajita8q^_`VijZt?o7$)abm#p|yc3tw&Vhgf=+ zTfF{iwa7PGy#8qc;a6IGKTA)O#e084mI)rUc>PmPqNmy7^-mE9zuMyUPu&Y2wRrv0 zQ^Iey_+zc|v|7A2FCrhb z_%usCWbwl-eul++f2*2`axMN$OMb4!pJnmo7C+MB7hC)&i?6nL{Zsc+{^b^bjwRn{ z@nbB0rNxi6_$G_jKV>BPAGLV?dvV^?Z1L$9zuMvh79X|vaTdSX;>TNjtHtwgV0hP7 zi=SxmZ5DsN#k&@-f8s(^by)m`mi%su&$4*u*e?6O$l{YM{$h(yw)n{wKfvOrSiIlj zvn_t8#ZR^PREwWx@gpt%5{pl__~{lOw0QlKEz&+Ai=S!9&#?Ge7N2YJ`lsAQ&s>Yw zKPe}Cxy4^$=~-;?ITl}S@wpbi+~W05)QJ8@i_f>@S6Y0b#Wz{J{%If4^Qgspe*=XN znk{~=rDwIp&$IZb#m~3+%@$u`@vRoWz~Z-Be5u8^S$vtryB1$=@f{X_rN!^Ic>R+$ z(hkmVy6oTn6YNPAAGY))TYQzp53u-!7Vo!s?{8=_H@wfMU%zQf}0w)ou^e~-mG`kr)ul;J*$PqO&mTYR#`H(C4ui@)FE{TBa# z#RqG?P72m14tZ#h6ReAd6Wwix;U#{}3rQQKIbuILN6fl=kkdKjGW_%%mySDPIVo>% z>%=u=3Nbaew+g<1xEFC$@Hpb$#La?76Zau*57`?;#9#u-35FQv0v~`;)98k1%E|+2(csh6Jj56#~u(v z-Y4d(-1auXTZj)OZWa6zaX;dy;Pu34T-0Ci8sfu=n*={a+@H8n@MFYB5LXL+fcQw_ za=~{K4BJqsNdFV361NH7LYzk2D)=Sh;lxqF>xs`GZWg?T_)Ov^!A}v7AZ`@=81Y%e)q)=& zKAX5)@ZH2CiE{<7ARa{=5_}`^XySCi%ZSe*P8GbAcnq;$a0T&L;$*?|h|eW<1kWZu zkGSJ!>3`yM;x@rkhy%o}f-fK*M;sMAj(9wAv*6Lh8N^M3hZ9dAZWMeH@kHWk!Gnp< zCoUH}kT^)3E4UwVCUHn`U*Zdh(*^e;zK}Ro@K3)3&LZ{;-bp-(I9c#l#1|1ef3`x7ahu>R#FL3z1;0c*g*YmBJ#jX1v*0zvQ;C}dKSex^xKZ$9#Fr3P3x0rj zI&rz+yNPEI=L%jyJd-#i_(tMc#OZ>U5noE2DtIaJWyF5L6~vblCkviOdlkP8R$XaVfDQ_!HtX z;*Qt zTrT)-;>E#hPAr1+?k$5R_y5MERR}rTQUP^p5v0rcn@o$Nf1#0|vBg1;iZnb;Bh3Gpq& z9lNCei5rRA1aBd}mAF;#OT;UPqk`8H-$vXlcn$IG#7%;qBEEyTQSf8LcM?|%et`IQ z#N~qTCSFOLD|iL*UBn^5Hxl1XoGy47@jb+;f|nBCOY9e1L3|%^vfz2dzbAGC&nEr@ zamNqR|HMtiZGxu|-%s2s_yXbwh@*nX5kE-WEO<2WABmd;4<~+zxKZ#)#19i!3m#1T z2ywaKfy9pz=L+se{3qg&;J(C<5vL38Mf^B%s^Fh)2Y!OsFL)>MpNW$Ne?|NkVn^^N z#7`1;d@ub^+)Ugicnk4giCYD~MEn$SRPcJ@zY#YJUPJseag*Sui2qL9DEKkrRm9bT zA0U2)xLokv#H)#O1+O4(Ar1+?k$4Spy5MER&l0B!UP}BNv0rcn@jr-@1 z{0Z^v#2q`O|A||P+XQbReuKDG@Jqy7h@*nn6TeB^EO-s^Tf|L*pCW#nxKZ$9#P1MS z3x0t3UE*@VcN1?V&K10Z_&wr~;2VkGCr%f3`xj;x@rkh(9K76?_5lC&W>~}cp$M$oGZ8=aXWEHa9`rDh|>l4BHl)vD)^@rz+V&l1@9!@PMj?GE8=g6 z9l@Uv?;!5zkp3s`AZ`=9h4@?IR>3b3?<9^2UQhfTakJnx#NQJ)34V(B2jWJ-j}h-8 zt`__N@sGsig6}5YO`I!u1@V7~LxOK4{)sqU@G|0`iBknHCH{rjFSvsESK?&B^N9Bl zJA!8ucM^B(kp3rj5`fzTPa)>3#r9Ug7ZCR%jtU+}+?%*r@Mz*b#7%;S6Z6$!d!yi! zi22H}y;|^KV!kSDFBd$JIEgq{a6e+c8f*^^A4MDzd?WEd;&j2wh>s>t6}*&~uL|4! zf-8vmim*Ld@I2z*5Icfr6Z4f|d&hR^e_}szo8T$LgNR!NUqC#VI4XD?@$tmXf=3gl z5H|@PPRv(=?Tvy@B0hn*TJT_Ez8Y*V7d()7C~>afe#Cqg*d7wxmzb{r+tUU2B0iZo zRq#)Zz^4%V1@9z2l{i`OSH!0gJAywUKApJZYw3UDRN^+lTZq$$TLr&FJe)Wxcs=nM z#La@&5T8lhB={-f5yXvxA0s}CxLWW7#Ag$i3%;9pByq0b6~v>6LxOK49!;DcOJCI3 zIfQx`vppEIor@X+mjtE-rcTSod0=v|;p&t=n=(@RV6Zy{!TLT!hT_X;}_!&+msH zDE}6mX0=a3590nE?Cc|(+$T{bIS6eS5yptiitG#pBJF9>2@$ttE|an)+`Eu)#;o=n zB!&9hjQU#0w{Vy9Jc|sKl~iA7WXg=pa*XmoB-*mb63c>Epyrj%_6@jk|AG}((P_Tp zY9Zms`D(WN5_Vmv;t9d}5z}wM>1_Sjwwn<}cDjDdqIJ>8hN^R(M-w}4D^j~Ze}x~H zV5jQ7f#te;HRkM1i9509p-3e0Gu*jHLgl5A#J8{#-r&BECPUr-2IB}swxUMgVzNeq z&BL5`X=Y?y%X6r;)~fwJ9D&&dbi`2NZNsw}_ciFQi~8zl-eOJeP20#sHu&l|WR1}r z)7@wyTYYsuFad351L_+@eckb5ZoWEtF7)v2PYrU?wzS|c12}8>!>vKR!C(C+$Zl#k z+0tu*n+tAjI$P*8crccS?gkIegA)FOKKShw)y{E>&MQJ=pv^}k^8=ar!!S8hY7R`a z(S6BcRC{2*Etf!nXH>t%5n21Grq`DknWdDOOc}L|nUH~sS?2Mld_1e`am~w}?%CAx zB@6KiP<9z?5i1W4cnfNf4*x-;HE~k-PwsErBjED*h05-hQ(Yd*3Hj$X12t zcS*X>P;92D#ovtBWh!=oCDur>;YRExBUZ>vFS2AxD07IB;j9V6x>Z$8wZtY+j5T&2 z$0DoC!Wz*_?JEX3(}E3&A0rNq+8vC19zF`MD(2k;h*uQ{U*6f)mEiIX^d?;x>HoxZ z4I>^9HN*BGeQ@2L-oZ#maGiTzFkxfxjXmK3P?)9)528YE494rL9y=3yoT}40bXh7rHKy$BKpXQO~aZ$ru&jMm78 z)i5uo?t`kNOI6-I2ObW?^Kh6ntY8`z-l)r+h%huUUels2OOZru%k9LlPm5l zwNHKG@1dTBH9o7RGhyM$DCiSL(GV0g5ZT=lWEtT{psjr-6vgBtW3T9zzCQ7_8wWW* zwytxJlb)mW*O`L^1Wks#8dSq%j#sqSF7j3VETC}ygbYN=UP$bGOpq#b*5sd5zpj-Y7I#_?$ zTE9nabeDe7*_jdfDH!=CQjIIz9$61P z>%L0`z&0DsW_RlwXbd@hjqPa6-@bNc~DgmyzT`yoHyMEa?p`Z03*2n z@374b!Mx7Pb0kRq ztMR=3Dip$9D1E{4C2N=KzD`B~y7gQV2sriyn0|thFR^OTJ_|-ZU3(BpfR4jRJR18# z(;$)e-E|llaQZJ+C39KwCMMEaOdXtm>JtYoH)9HwT93Un>a+oM3q;%rk&jz8do>Cn z$NA6_xb$+)RVae=H`TrMsZs8$;}}EMfBvc~>$L}ha|7M-&({2(#iRPfhi>rlAJG?$ ziDI0CVzhAEj}o99(xS|N8&*LxuV6Q^31Z#kt2-1GL07S**j1jtc^Ub@@c#H2TN&?! z8~T5Y!vj=s9xFIiypGY{!b1||5J3((ff?KGq32bLVHFP*chhIlnUTbu*AH^q|AX#o zw4Q&9$+%{B!lJ3Z+oQf!(MVz|rKX@iVK4Rl*8=Hc>`By3sugYF!H_zVDJa_nl+8^- z0Rl4vv)X@$9Ms>|_g~hP{&eXZ7~IEjmpn=l4NRZN^qaA6!h(+G=AHrrDYE=0SHtL? z!UoJiTaz&UC8A@U=Z5bD-}Q=2!}5O!aHKMqXGYB>0Qm(7l%&O+y65RDj) z%4Bm1z%v7F3O5l|K;C{C4(tbH!ubrDoF&TBRwWzj6OVv4iM8ew3^cT4YWqC1 z7^r0(1Z73G7vLaioB)W&OpA*_Gqu& zc(md5;D>PHVkWz!-7=7-Yrd=bdO)ylOC&4hpbTvB`uJ*ZLdrno*I?wkVC3xyX}h+i z6*c;5`SntL;#`!4E9nx`YV{dxFDW^wqAzeIWAnramdSaWW!1 zGBD#rK6DR&1Lxe9hw(q54$fxDW0+XNr5Gty`*TOoJ3KjaPYElhWH)$A#Fo74SFiZ6Bf=V->mnY;i;Ba63P4dtCg{Cvf=@lny=g z0d8UITM^Zt`VRTSQ`~--s-d_KOk&PQxIH{&LgYVz)di?~ORm=kQ)C{z1;@LXsOrNh zr@&N3to_V!7+1p?Wz%P$jWTXZ{1u9Mp-SoP$D%wF(mLCZL5wxc%2@wRofe3EpB;?+ z49g2yQ(i_&FfAI?#t&jgmPB1{3H&-&Ey21ENT!pNg$jT8`6Ja=dpe4nwhQxQi3-;v+aQ-SABAvx3V)2`kBm?T^rAH~0H(Tq z&^;V2SifS!Tj8c)hXxz^_1~mrni`EWbTnc;bWV${jrWJ26XSP3g|*F?wQZ!%5ALTJ z@=)*`6*Tn!6$kQ2{1^CGjAd;4Tef>$hW>a2X@e+6FgATw%|l^A*1QQFEk?%=*gm(k zGqvvbijjI-)XR6i$v4B~TW8XgcIEr6mG9|HdA++5edevNF&90B3}hy1=Hf825#3z3 zD||TD5X>4icD{Sq%&9135?5bc`vLAZmKz1hZW$F|3*`$&HeqGz?!d^bjaH49l5a{J zd98Yx^UdaCE8b$Dk+{kTtY+m}PDEzNBU!hdml*YZsXh|j_K4_7ukBoXSh{a*XXM*AJ8Qn&zu|mYg(hL& z0Zkhkzmr`;SN(!@o#B%?9DHlfb1!Z~Y1W<-NvwdZI~gW}Q;&H}7uyf)EBzUf4s1UX zI^Nig*5zlMBNEo(p-50&u{U5Wrclm0M-`?T$XZB)#n-YB`E^}HNTFzHlO|A&&+={D*=1skNadbj7i#dn6!TY zhi{oRo01_nHhj*c`m?hlKe|UsnZ5}|))N&2;jrXwxHFvOu1Ut`& zAfpmYWG{O9ml!JVA3*gOQKO?be)x$yAEo9*DJFrF@iW;R!hQ>}JA>SEuX5GBq z0D3!AnJwTx4BLXI@YN218am}*Ll}m}75ExCEh<$1#rev6p4{SKC$NS>?gV)bPgBMk zB?nQrBsZ7+M;LjLW_F1C5eY9-K1nkCCgsKD)@}FIVOI(_IW=WK>|~UOT)&X5d12_K zmv`Bt`!XF}L;n)IVZ>PbH&%`q3_5?e+>eps9)+?=X3t7%pd^2Ery<8}_i-MpH|b9? z=@*#vE%zXyw+MI-qgj%+7ej&iC0N)SSWCboKDHI=;rPcHMW5o;CkpaCr>sac6Q1!7 zOIHJQwGW0G_b?;=v=N^s`Qm`WZF#&)Tyk}1GR-%8kmmW1)LV3oU#>^sqIO|ZqX*H~ zeZ3exV)b=x%3Kg|%TXkU_d3!2opGikP2LA+oy*~(ygGjlgW$Bs0fW+V;BY4GPrsYjXTTP0%*uGuw^{RU)<`rz)Mxz_uDzvxT z2vum?G?VUbtOo*eM%HpI(`XBSfKF<^2i9lSh3)f^(%SE3yB9zUx-J`i2CugNrEu>V za1vwBT8ug5K9mydQO_gOPB#bLdnp!W&&UML*2Vb4_7L?bp!qcPe+bJa)Zs5Ei>xc& zYvDhH?pfur2p1?Rog2egrG~}`yk&fSK#3}$G)-<-;+0R^N&^kqTj+i^S`U< z)0`k5bzAKr`<2M+^bLW?TbLapzhZlG-Pbcv$8J6%2m(IhYFJ~iZhIiI(O1it^mQNj zYO&cxr_`o2BhJFWNXFsJO5CsIkJtbjzC}*tVh^$+JKW!Z!%6WCRfC+&hTkU19P1Wi zNr0wa&TkInO-5Gao97P(aC7jae(bN14(7j!v#yND#~5ZfFmzvRHEDi;*aXfH*GXc` zGHF{(+T%s*k`i|`1|4xxi3yf+e){Z=o!MVn_7#RE)r^T?`-7rBcbi$GK+3?>ma`y=MCee@5?@3Tb4tW^>ssc{hR0#kS{|osKjr=bJ_bcwj8@?) zorgtFV`+l2)geE+w}4kDbVVNW*mkeC4!#ZbX-`0ZyDjLV*m{h%o@#YuM#@ln!^B@I z(TH$!@=`L1lddd$cq6l{>54rV5Ez~Vxt?0}qKi;6h z_1}Zpr$%7c-qs62M(10vsMMqc5aKTxx0Kk@dyJEOQy_T9ezfYv}w=a^u9{|}a6KwZz* zRXDY*{WoG{iKRSjw}N;+Y|7bcRf^Pav{0y z^FEmg|K4_FSNrftH*3^3UoF1~!Ib1a3&qGYtNy~Itoq)=HU?K_MZU%QFVShZgs*CU zeqEM!RU1*FWcNO}S@h-+A4+fTz_Y+~^yg_Cy=shjwl(H@Gtj1<^Lh)WJ7ZQxJyyo3 z#>kAwR?EcUk=bawQ4eXfi%~XrII_x!bRyeR-2GmY24t)F-D28MtOdV?x6~HQ!_2i; z^JPrQ!oD`0ZJ3JUY8$pcgBeN2E1F<6c?jX96#G0Me&dT^!j54Z-J8)kvaaQ#YC_rv z@+&PeKS?>?WI1nq%M=Iu>q3+TUC820!0WEa`(A0FdorFMi30Mx;Sp5FE68b55L0h{ z_27<%K2vYMd&sLsH-d4DFCYHNGjHC`HT7^eXDwnxioyp zn9IY%gOMZke6Mt|9yz0kbLnwh_-uffby6tOgJyEkKfu{wF zPUIVl^H3(lk&T`G?+wfPN#0E0U2N=|jE0Gw)noSW8*YVdw8OQeVYR$_4Slw}+Ho&K zH)DiWHM`3&U7%4f1%YbayH48a2dqs|=N7ts&lP22rsrE*sp+)r$PaRsm*ifF&Z z!5DO^`pfOZMG8`e!nFL-8W2mtvWZg{PBn0}I3|Tz^ z6caecvngF{4)@iafx==n!b(`K8ciFBO4IJdQoH4P%LKoLp>12`tvB9;50ds9h#TZr zJC4rnRrC(>n}N6af2tUSX{+`s&{)9r_(B!+iF={S*Qt^94tnFm-dT8&+ODJZV^jz$ zmwxm4sZ#-5e*H*yEXtIHX?+0S8ux>MY#z~+F3X406tq&3`^-OiGQK@CvdMRIf7Hl5 zL5k_C`#n-%GnIh_CW`5}uOTa$54euRLSe=br^9i^I)29qSJfx(k3~~MsPnp?UfUm4 zyZ(BlLCy7T+gGTs`){lSV5F-y8^4a${BEtZ=4CL{xvfaXQ`iAC*>^GJz+|~5a_>aH z;SDX#^SNk~fV!&z@a5OVSY`1jXn^|=614vTx#D{O)WfPI&uzOZ zc>pW9n~Q`^?i*0Xc^V}+O}1tI_eWj3SF#nA^p<^p#bR>i73~AzGXGZw5ozz)BqHy7 zM)4U&t?bv@2f_QoIYknSuf*%l=jhVkl_9;h7gn;;uiOtGkEse>D;t-wC{?f_E9LOO zYFLA_2HoPUr2#}XVf}l!gZ>u(fO;=q0$$7wo0?L}@dpR$Y&01J>iR$+(8AP385r^D z?p-jVOtdPF4A-wM0q-8U(TF|?u*RhS0_#P0hw$Nr^@)Ek*G9i+11)P81iKo2B_6sp zST>*sV-< z31cbsqq`m0nErU&wSVBXW= z#OPYRNh-^dq*!_;vm}$*|IK>0GRm(FrUUzxVa*zJ*C|K@J>`)_nA_I3XFc;bd{8 z*P_7jQ&HS}jr8&Y@w%^FbYX3jb3r<1nqU?>Ae4o>8R5}9a?AA%%fgOm*zGCRkn|0^ zH>FYHKc+NE{Dkb+eZ!tki3&FJku{&0tzz8=;pDY+QgAD;%Nn!l=(z3RrIi>-zs_>JR^It*`!P`X3!C^_QvszplSY{a@FAm#IId(*NQ5 zADp%e#=iwy6D)0h)3fo*esgd8XX^j9@^wE*<-frcORZh&(f^tL=V;~lhwDQV{I}!Z zr2cQ4U>QEa;rPddXZ1f`9`gM>o#2~Tg14bybb|j^{of(jhNJdxn6>S(`!E0&gNx{W z`aenCz4kn7`(Jy%+V7+Q(PlOiv~TH^nL|KIMb#eBU(1ys{TEzB?7$5kaIyOIKMLsslx;M~J zB#kvtoiEmti4{7#uzk(0ptAq0sp93LfvBp!)PYJN4 z$gkOv?-rdL>3<(YCZv5<^K+j?vul3s4CDJESpJdE^MIpGEL``0fe3c&BUYi1D6$Vl zZux=*<#HP9&W&&sf#;-3Pn&%VP63V`1Lf|8@aLW`Bz^0z(k2f<5j$_2rR2IUCCg&{ z(LNu&W0k-C`jax* z5BVBo zd#PP@8V`T)J3jb*0dwr|10pz)qabb+oq}QoH=#tR^?LVa4D9af3w|GTbb540pEX?b zr~6iIs&Nx)b|?DAub1~d@|}`(J_h;KaOckA{z%kIHZwI}_Li@TdLa$nX9jl0-OkhW zLj-uzy3WPd@MEhb4dXh;cKRG=QQ39>;lEY7Xih_)zGLO?@{ES136Y-zkyr78GBdI( zD}wWFQ~)p5@mWQ9LfV$B$bYgT_&pTt$Nr9O*+&_XcKI~&8~5h5vgwa(2+z)poR;pdJ4_ zKNEDXMRA$Y{!i1I!r7S9Z;xL0^Tit+=bCqXtMJP&?!6Q-hdKc%?K(YMa9fIzBYuU? z^YNcxu<|SMZ4c6d#0ck�a!R=nO|`dgHFMl zuW#1yGkUby_-9zA$cw(^JevqaE=TB-()A&e&TsC?Xj^`VQB}yS+{@77+wSeMFI3^Z zHRyyFaTD4gy)g_%p;g*X`l0_V*st@OM)_5rv?$-((%M}gsx));K-^xj?FKV%K}`hu zaBPITPK-hnbt{_BDC|GM?EH(^2RJ9@JAG-NmU1XTihsbUZ~!BJI!#6bxKW~y*72m# zx4cW=4;a}nwmYd$e>b4zTB!0CGz(~?AGt-S*^JdkIw#eUV+{SvbtDi!{XR*6O9m(yj?3*MaI z{F$5%TZvp~(B^L#hp zBNyko|8!zN`EIxd_gVF)ew58OfImMUL@_4SS2^wx^fLS~>IGlzQY38Qle8^s8PSjL zhfMKWP(BCaS?fE%JW@^PuX}y9mdf4r;3Dty7q<9bc~Ybh;pvogDvHK-9XOBbKhun< z(deW=WHW}l{6gA7NBHf{5w)m0HVwvGiNfQio6$V9hi0Hz`XrU z`W*3RU30nkm`M=FWQB>yS9>MyYkoQp#&H8aBn17>%*d<0+MjU0b}Wg=&+e;Fsv%6k ztDdEr)OcfyKb;!%z^@?PjF{tBZWY z_B!luXca62Fh|zmS3qRr6cbEGOLPBor>O7$<1?0B@DqNMNfZN2G6Cadf-?OR(nz}o z4UL}SHvvZcR)~ugZM&_@{vdyskw3}E$4<`OynU#ry%!t#;YR+XnEteV$-5_Gg@WGi z^B3v;Q%&06BdwXuP(r@gGbP+X7biBv&rPNOk>7t-Nqq()O$$F#F|r#di`|^lzTOy1 zE$%Vx@`L&P=W-*8-~DrY*EF&F5o-e*!&CUgBthyv()$;Q~IybleG7* zyb54oIHCsvw3XT_!!|x40LN z>Bo)nn1RSEz8lQ%wf>4qeN==>J@%BgQ5w#X@&0qgMQkJdYS9N(1KTf0Qf;Su*iK-5 zwfy7^(;HWMuZqX7`Z(B-osM%TyjgQ^eB8|AzS>72$MKI=Q9Cxk(j9|&0Xpk3O(MglYM_kZJQ(fkxL zfFD*b+{;sqr2>kB?>(v9TBd8i89#ipQE?np#Qbn4l9|Fu`V04+9(Fp}Bwm3Cb^5t` zvcuz(>3RdbFcar1&(cr0r^AQRTa(>yc@3ot^G%Ob*CRo5JU&a*KqOsximgHeo?>R zY9|H%svKG=@}J3AC$olYL^v9PtGHRh`!R|r)<#!LK`+m34w_;uT$k)Q-ctA<9 zqoA^d_L>#>VsEjdx$E8{zvXi;^-(Ywc}ITHhi)GAtOkXm_CJaxX&A9ojE+ZgL%cR~ zKZM0!nilOY-)@ey28(>l@iF1Qb_rYl9iHyOv?#toUepJF)1Wc3vu6D-mBS)$RFCnU zR<#{pg%$h@Zel%t)gziP>YcDJ@Y; zWqW=G5FGZQ`~R``?(tDp$K(G7g9Zc_m9%(YG_;@yL`4BX5|Y3M0|WwyqGCt_L_-pj zT`pcQm_%LI^@EqT;;r6Vuf5n>bx|=~Bx|N+SdB4k>dRV)qLMG=Xsuec9T&1 z`TTz0f4=qAvYTgT=FFLyGiT16Ip^sQB`GarU5EO#=FnKEnV;wCkk3Bjx44|RWE0|N zJEF*KV=NuX(CBCv|HMEVEXjj*YqzqVeGE8am{4FQoECXEE|+dQ1NwaiA>2XYI3k#i z7B?P&&Ks<+pz}LJ=Pi>R>W5Nfye;$)xdS3vM`q%_hYjQj+T!+jodapDa2s3CES(r= z2DeB~Sj=8&@Fp>^<8vJ(?cPLb;C-2Xlld14naCB)DRWe*^Y9fW4D+`<1IaX9%3-Nx z_vi==4O}NewC9$YzmSO}yxjZjQYLg0*oP({AFIEH6{CZ%%P{=F`iIQ968B5EcPDMh z!vqhFO?u)h`gP~8RRgo=S24Tsvcv3%e-YD~kpH#NlQ52MI}w)a_h!NidN>v~p;l(7 zwID3f!yD4kHl=3`5^6$)3DlvIj4!hfF_?YfL(Je~hY#yr3`ROI6fQI`mN*5~h&3hU zEs@M@|D*f0w1uXP@58~&aG#FT(f9a1p~CUUgz|z6Z3^(n(d-P4eLXacnmf+JwJ#4e zT|`WKXs~%#Mn@L#O=AM#ep?WTM8jj-!@KOv+Lm6vjx8R4E%62wbaOZi^MYm$f{`!S z9U4m4Jj=qCoIyOuZSz0Y`%&DKtOp0VnMc`op^v^r?70lcZIh3Ka@(|Kje@okMEWz9 z3RKb^3=nl@41d`Gq7H}>NL`zu8dE07iqbmtZULq~%inr0p#DdpDnnL+it&L^*7!dD z*6;X1ee@dtqXYMew6|;v9iSD5y91$}O}qPqa2wmBSv<;&U8i?)ygc~%)ql}TIjqRb z8@O6_V^b5v3ttY#g1sWIQA4&N&-Yrec{^@`6@|YA?GcDbWt0EW*#if}rhe7*O~$f5 zO}qR1TYtuHTM9G%;zFXMw3TbMmDdJ;vid!{mFICqUu`h0u;?%C6?u_XHs<+WbXy7E ziJ^VS42R=5T?f1sucvFq%pS%bjm)r+VDw4V0cn)+dLSvX6*0gE`(~mndnl~oym(1Q zhuHdE#DSBVE*U-mF4+k4x2zRt7Rj<27BhVKPR0?1j3n0X#0|}o^)ctI?o#ugZMYhu zT=5q~JhfPX@F^VdYm=syAApl7+^X))MN#HKmy0K%uwv2g5s$>SpKLPn?l>C$D{ER^P z8l5nEK6R^|?E1$h%;c-}5mA&BP_4px+k2nDBf6ynh|??YSTX#a82c7~>qdUiV}-ff za=(-lWdp1o_nKeZ{a1a&uNoJ#k(jl9BK*t5_?^VyVy)tb{p+bCNf{i@iR#uQ|aMEdIJ_Ib=EF+{DP z(2>Af;s$$LFNrJWB%Wx+lRxygo-eYZh=}LL@bk~rgR6Nc&x+t)X?&q+JbIrtUZQ?n z4SAiX@oCcdeQHxJ^h}g*6&w7c4gTp}@TKz4VZOiMFG4v}Y>O`l*;`lxp7uY%BiY(Tm6<7Ssm_S5%jU&cUA=-Bg-)Q{Z{MXO0> zWm1-dp6Cy;d1oO+f2$m9(9;bpm`>!eb-p26AY?h`YDk6&*kqbp>*-rA*G5V*J|Av8mvG9pg-qvA05+#0R7Pbv4=HbsE9f8 z4k|pER&|CJ(z2FM#xu-z|Kxs-3BNyMu&>Oz6e_YfYPk{7kWi?i15QGLEs;F_F!_X# zux#fO7!gj^z$&46s8vCYxaTxRJLE!r?9g)28kqTphv;$Jtpb_7=NGEOhp+_^_*)3v zi7VQ~a7;|k7u2bHCF_;(0Wbq%YG# zZkz6UWLdva;{Mk6jq=w_5G4kiFQt0>hth(x_2;plr-KYs>k5r;+kzUGGJ+%qmLJy3 zKa!9RuT?)-D;!CFDmHo*-Y@<`w8gU2dKAOw4lInLL?Pw~#3C0Wwr-FO7*jymoH|AU zrwQQc079jx51Fm_Caz1Z!SG6|Oo7v0Ugp=>-2fhbj@pXGjO-Ay2X zqd7`K%Xv%|nf9^mm~^6f^Ag){$j<}hAY(vq^#o~_^}70y@MiU5?nNrGOOYa{{32Ym zz$Gf*OqAaGgl2QK=f9z z7^E|PL@14tS*T?h&2C$9bWE8%n^6=EW@WrsIQGTH!s5uE3ke=0AC}-^_WVpo#z>j| z{a9JFQI2;9hiwV&!<~xl)?50!|0i;#*Zi-xn^Z%dZ@xNE`HQBRXZ26yZ;zMJ;>HkP zUeg#~=v1Lo!E%%lIMRB+*m`J){1lL%g8CCph&j>BSKE{R!1dEkzF_r7+tGk2JVsU} zIb)>oI8O@Cf}vAHv87UYe4c;UY^grJHKz%1|1e@R{d_3r4*h&22aDqQlR0gCrr63L zY$eu`^DfV|fVfbW76fVR9}nP}>}HM9-_X2uiTH6eBsW1A6A5P#NeGXdtluyvs&rEQ z1>Rz{+mU-6PFK9(J zG6kljJm&O@Y+>MinCIiRzPC9Kv(rlaRH_T!l~z9FN9N40P^ko^|IjX2kJIgScfiT| zNj#9|_r=)|@3Fc5N1m*gK%J)pjNY#jP3XwOugfBUz}LD*XD2x>8QJnQq&TmsE_}ra z!PV@;b?%wrTlHfJC2yf^+}Yo5?Ce}GiEL@^F$j}&6V|R_s0dglJgv`?c6V7fp))@2 zG?;z)MUZCkC-%!hCDBAuorS1Vl;tJ|n)pM=l!MR}Jlh7RQG=0)+{Pf4{>c0<=ee|# z&U)foY{4|>Zke~_+(Zj<676rK9U+83{4KqtbF#HX67x%b=k_Qg&68 z&n;Zx{SnFxI?o`aKJpqRnG*A|?=3N}wC#y`Q%O#@Nv?P*UzM9GnGy# ziPq$dl8a-uqJ#WcNpD~A*gstwWk;cNhHqJOaO|`G=8I&G*kO&(AZwR(1tLkD#xsd$ zhB2Wa|6X7-`(7ZipjJCIHz51gpEK6L& zjXCG_7BXDA!DmUNC8u6LugO`#XDH7HtkIgvN zdqPvk@k}$0pJyC1Y=g~j>|?Gf9hR`}XFS^plGh*&7g3^>_HJ(<<j2v@gl0E-W`}z z%Tf%Ju_#ONh3c=w=grfHZ~pK4dpC14gZv{pk=PG{)-2eh-ihMbFXY54)(;1%mTBCG zHIJ!-wMHnjb?0ToGseBR0t+ueSb{3T+Qfbb_r%$bnCTBU>UO%+gf#T}10q8rjq96G&&iIe!T$7A})A;N9aR*tX%jIJK`n&>^M;So!-tps279UYdbQ3h_nu~#T49ULVxYR ze#v+#ef`tix$m;x&7A|nKM=xg>cRMIQ<=K6 z^)gLkaXnniiv@8HFLuK?yXBX5f`@T$orWDSAstPe6TQ*7z;>WU*5MN|L%qBfw(`Ua zS)(v0vQDJ0^wwf|6vLpnImZ+wb*!_d5!a-w_sS@G-($^XfY8q1$oA%EGr!yv@E2{D zuf4w9G(f)YXnrZ<%S~DSq8+STL<&ixB!1;qKzAx;cxy~%(#06lBO{UDx-*8&eDRKF ziw3n%2_Ohx`;>vd+Yf;pM?m_qUXsXNW{4@a5ft7t4HASQo5e8zM6X#ia)w`xXyti_939A9+4M@?3uN zU-k(f9C`MuzdtS$n{zj3_%L~t+qU7u{i?UE;fJv=qmSN8`4+sXc|%6=*tSa_4?cbr z)Q{x)oJ*)Jr`lw}RR&CPGK zDZmp~ykC+t1}|vR-&E@~Zw_u+<1`yT0#eR>q%<3j>AkLN^q_6WqmN@?ogGhFe=@UB z(th0W?{B#d3bN>od=WZRwwm*=(fMZ_bD=DavZJK~A_qtF2D0eK#+o%1CHwQ`_kb50 z9ll$_ia5DWJJIn8!$H8btGfJT-35HEfxnp#NsPGg8yt9+&^UMO+`wlT_(=x-4j2A9 z;B_M_QAg+O^}1}vGSJrmumr=Ko=CA<;I@Yzp3t#j0UvaQ6d zbCuF?7t$N}lOZpZb>=baNL>fW?Ph2x?&r7l!o9c>78{KJm<(*0VEH`%WEM`-f0qhKnQyY1CNco_geO3Bl_B~7%8E+{ zTenv?adA#Oj6d|($dBj`*O^yfH6=8uh}^E2gES7bGqphgy5 zplBM@Njx6HLq@d)zhClwTtV9 zfFItHyN%-mGnB7FZ+(B+I@vqyj=$EEm3L9HxQ*&Yk`4_&o+Z8}Rsobik1UI1#|4eWSm!(3Hrh z!N_;WblYtVlsBTCohmXaAC_mbfgy)MG7VF2BWmD&3VD>JdlNTbj@y>5$hn&dv-t(^ z*5pI6)|B>=_0#9VAX?E<3{JILo%l`ELc9K!dHj}EiDB)6>UvXwvBTP^*5q&flMgE% z$v~<~9~~zKeJ{OXN-FU|rbm3RXO4jl%`$7G=0VFfL$L6z$zW&zjr%7LI*) zc?EKO{Z?7Gvl!lGJ=ZJ!I`GP6O_z`$eXMW z9YXs;NZ)(_tt+C=X@UU|OpHfU4%kRTD9aGa5M$NMM|%^12LlNDiNeZjGG3g1j1^YC z_)f-Xo{aybKAT0KcJ;I5->Bt?yrQP$zp@kAHBDTFxE`zk+-cK*aI3W=3d}G^R+luxG&M)GycmrLrwF`TFRWo zZ_-NWc7%5-6DHakD5$aQ@GZjT9m9 zil{c0eG&w~->II$E*IVIqOS*CvGq<{Qi!7zsz>@Bfw{n=XHr70F&eUekO{^0v-X{eph|f_YK^(KyC4an2{yGB~GJx;X z4FF$u0W)1d*$sx`=>~8-tN{3~3-}f`WV$#JKwA`H&!zoc?fu)1XJw(u`u|aWN(pPTc%IwVByYnx8W9thM?Q#4M~FZr9UGev zB&nhYILLD8n`W$P*puw{bAYKUwH}iwOOFToTYe#A(y9~hy_5vG79^Tl|0(L>{H;D< z<9U#gxK;!!4zjQP<4}GScQ}qIR9|u?KYoiZka{+5pT<7Ku8)B~WcfHXMc7FJ(X(FW zZvCEN)6KHm-+G$R?eg9jcBC$V1O(3|P;r<+d#5w~CoaJ^Iq`b-!wNV0F2iu66ePV%ZS`chGIhS)OpVk3b#*q_N<0)i+v$%tGlCq(u9(4BEwzH-(UN>qJ3D zRX*##)idt0a$w64IYj+t_F)Ds1RA#n#m9o-?2F%L<9v)o8XT!nq;Ee*npoxv)9@QV zRY`8U((H_n@K9I;1%vq@m{z+6Kuz%DlnJf=NDAC20Ql(0?k((6($FW8L`qMCh$hF- z5*iT-b^)(hmq+9DZ7rEB@j!Xt?8MuV+EYPglt0ijYT{&W$CD9?%>1|wQ z8l1y=v1WlUR(LJIlUPB_b{PVbpN?b};GH-N;SPhllR!>9>$~gr19g2C|A@iAaj`T) zaOiLSqtVk>EY;A{R(@baUMEw4TwTvRZe6ny^ZX@Xw%x&0{0OH z<&5J*KZA3!!SNfMdV^y+@JBDsbivvFZ8Bo`7SBS@UqdDIe1Pz!5yMZwO8f>u>r_PQ zS&uRmjszz4%(3v88zI zKbDVEU(Z~=%X)#Hv#1ypR)cpLh6R zA1*hvzob*0{A!!^$#UuBXzO}Rw^R*Bb|Pru-dM!W6Nj@mhUa4~p}D!7?)ic{fBs~ZiYdXg#`8@g zL53~oZL_1RWCN4HH<;1RCKAktWim#%$0KvpJIYB&$s1lZ&er8gJDvyNq4Y@k>v+@X zvtyg|A2)(&o+^#TCj3*Jl$z%5umk-NP~#0$wt@Pk3-xE9BHKG6(xnz$w@)rN{x3ew z2HzKR%IMN7TIwD7lg2X*uqwM1u{`PIx67yHH;vEng(iz%nGAVe>M<^optrZ-Ya{QJ zWW0y9!ok&-8Q(LBUZ~wV5s(%)Xm<qg3oAnvL{QzY`O<+Ip=|2NGD0J9k){U6^e-A7wEDZwi9X-56NIHBFnk92pPztW zXU8kDU;lxMfFj-uTOpgiddnvAgf!mqD{ubNE=%ZX7vv8jFMEY4ndxBjY3zFbT%VeA z*tldQ`d4H%hHvZTA0`^{Ee|&s9}}ma$U4mYA&;-t8HLv}mK2Ldi=zX{UJ_{RvH~&! zNbNpjEt1vB&OHB5p2_n+y7Q~IMfZ}sxATUUyuNjF&>gCU!UFq8V0HZ2$oVPlYWo`F zub;LCGP}w8S=R&2ALV&g0m(fd(=W;3o44(l-bT9X;XkFpmo79td>!KSu*^2T9o7}5 z%L{|iX<5N2)p^pv53GqeJ=4Kf{4eSBoeW;N2Y~x^>7BhXb}D_#vz2b@tUBJNQ%3)4 zq8)#7{}wjJKurBq#~-ZWj9RaL6Nh1 zg^uC(qe>zy9VMOqERZY&t-GZ2cul+l@^|26rc1n@q?33HFXnCkVzK&Ue4BU>rFs9p z1takME%WDztzWJf`PCL#+k`V)zYLR>{U~eQ*Nk+$WR$kniS^@qf_kH}&R-5cx;_T~ z6c7G!9Nqvqq;wz=>`KCwuT96zPqPDZxHNAa+?_k+UJ|i8kN*WeM`U=EAVs!WM;p;aI*|L2Q)E$9Jy$|V*kY70fc0-->!Uxzx~-c3GJJ>4z+YUpY5O6 z7f;+X{xQPQk;>7ZmUaz6FlyJG{+@ATzt5h% z{H&6(e+~b2KJx(1Iq@@ov&T*P{cY?*^hC6*0S}o94UGzl^WeFITZcOgnX5yV%Y6GX1OQj;HSW=jD&kYelv;b&^b0 z41^6|oA*{b{v)yfKU4e>2*X7kKgK5>)B4;B9oKk7tFbm|Y16y-Ay$qA=K^4X26{#x&6ngMK@O{B}$c=x2gR$=XTK7kOi zfLt@^`kxbYUQdZeyYVM-pH6AYd!GgH%PnZe4^2me!>pT*SP9!9s(ACUS#?hL@ zTh%G{B=t)DNUlbuWJ&xD^o|qza<+&r7|O3Q7#c3>qAZcGXw@^{n1w!Ps%6nwjO-5K zWF|X4h|L;+iHdt}k)1jgiR&`?%m*f;FjA4l|Jl^Pcu;%Z03I{7E{c?8a$V*kbqZ4i>2*%97J(Z* zQO+ImQpd`i0VNEC!y$jTkmoZ;KBYa?8K0t3Uxecc_I$&IZ4YyVLEB*IPa4OnHa6NS zTcag11mqO~nbX8Dz5}_a^*Mjb7eY2R;Ri@LE_s%c$QG_DXn6+|9=|?>44dpIZIK)3 z54Wb6rFH8J`I+VK0I}i&tR@Ku(s_%Xn7cE(h;O4sM-LsC zmvV2t2j9VpeZV+lgotwVv#Pf3@jU{*uW-EK-QG0VON9a4W7lYicpoHWke{vZlseo# z*fC3(<<&uE6=5F$glzU|u%P}S{Mbyy{|0fZ+~&mY-CcFv_AYTX%JcXK>U9jg2GhUz zJJ516(+5D3?X{;Xh5k(X*Zl23e@lO)6Fo7J4GQ3wY?{JXjh%ZPNHaQ)c`8Bk$;A1 z{~+}fU&k}hlVc3;$O;jY9g4HgwDqWER~(KJhnWE*uPTK)8nA~ zd-yvI{u1-W&)kDDg`cv>ZtXfnrW4j22V*1;>`IvwVQ_F!{WCMzc$)c^MJGD_HLWow zIUt^c4A;rPwK~2LUlRH;MDH_1<4SaUe2|A?D-=5( zVm=XkYdPK#oLj*O8JynqR}S7D=E0c>PJ;h;T8~}e#<$I#6PI7CFESqo<{a)0`8#jK zfuqBqAh&9Am`}$lWmj>@`&oSero$XobXgqFPlJa8`8@%DB6CxRv$Po!jS2Q4`^ML#?oR<47@;A?)k z+j_2PxzEX*yk~u$G(*L!twyU@BWn^GCj_IJEcV($6+tJekYXo$SszA2gQSMV#QDgr zv*W+f`k}1bu&>V+s(n2I*D(J+A6kB5F})3w9%S;5^$txvu9$bx(W?yXN0Kl<)`eN; zz$BPdJFUZr)jZ=-JoCjeftAFL4}!0|#@xUxA%>*QN)V^+?EV0DF-kkf;*sy@)u5@8 z+T!DBRgSxVsO;r8Z_cnzrFrAeRxjpwNb^6Bxa<$jdwv+YtK-*3-V9Su{InN&H-9&g zOQI)B@dEjzzi`KcBk><1vgWUk4xK~N<#-jksi{oK5K`xE1n!ad+emGeamV}93-|iE z>8|uHVPZBCei+yv8Q7=!AcDaDr@4S-F5p$xV@q877x^~Y^0gR*UF3^8RKDCdAr+u@ z!d-NJKdsnuJ+G@=|4N^YT!*n|rE+~r(p$vuSKm@(nhW$G&>JFLSPm?b(k= zdO&MBP8A*ItJji60OQSH zU3cd{{4K-roz4R1D~QNlg|982tLH!8%Vi!l1(eoG{EFQhjtwJxwzrc32aorCXwpY7suO)CX(uiw(y%up}8V%>FX`E=VZUxJAKs>S?&^Zon zg78@8!%PGI3?dR?GN|0iI#&WD>A}znyTL?}L3+t}XbU6++T`F^ofxTdmN?u1{?Y*c z3_#udHtO8&f=N}BH5Mas{Zo(jcN(r+mXVoOF@6c7n=jYtXA+`kEI+oet4za{$P){5 z@}V$5@nh+OVcxqt5*}lixA$K=xQ{RUEW=Ql%@DW62}3%qTuIDk zEaGvi17&%}v#fVGiXa{>Kk}M-YhDG%Du(FgB#pr2uc2&jVxzJ*`K4q9xg>S6ta@76 zv13$^lkcoy0)$%uz9&~M%s5x85Ae56;|F=AeNKL?h|9S<_v1d-e)7zDS=!76*ppqY zco9GFa?*ZV^0K~b&&&KRGz%j`O|7zQQ)^Tj;Hl5LS%;Iq^-xfP%^dS$17oGqwF-$$ z$R!XP7I0M(pGU{0D|?N{UpJYRn#gfm3bHaH39j{G z|Jv$9^u>7m%UL?W?rs&?jlP6&CQH;$KvtyzxRth*7qhmNsi(JfI&+b@uzk>@FeIU} zQ51`Ef2&;dfo-I&dJ3h9?6IaLiQ8l-%M#FOX%Q`T+$i&m*uR0BYQufwfjbR2eJi~? z9)W+4$ci3Jvf;n=z#ky+V;NaVc$FXb8;z}L;% z(UlO;)P$w?DmXGc8XAf7O;ax#$mxhA{(iC@!@Tu129OvduI*f=Yms^~W_9upoaxuW zxFls2q1PUuek_+!Ii3Njx8d=cRjnBCx88;W;$mFxE8zXuV7Y<+RN<|7g$MpxfqxSLd*O2p{KL4pxZJ#86Gs5nxdJai zM-smE%c96v{+4%nNad~d0?RnPuG-n}!T(*RJ$Lw(qje6y2ME2hy!7&)hI^CRk+huT z0`4^UZ`%A{nZ!51*0ao@i3L!Rh(<8#48}Y#;;XxWuNCl79`HN^ex`u8bOAqKz%u~1 zwvyS9`mNG01I!^5aP>{rFJXae!6!NRARkr{15)SeeZp_S&@c6;(IYRRg(H3RAE!e; z8p;$r5ydC{8~N~0zJR=5BopdqACthNcD}0-iYL{rcE+LGeApq)G~r?z(M#p5Q)bHO z=DjYw-`Gr6M=&}fo8r0B_P+ds)m<+(=GLD9y4HYh6TOcRz)S^vLKI?E0^PAy&lkU} z{N+)N)fUR?eAn1J_-7jYH|2xXG?DFChl(`>bmgD9H!H2bs|`JFSWcza$I!bvNiW}} z_bRh{PxNvPy`ky!;zkMMlJtsQdaZk*_ul8CkGwJI^r{WL59P!3ccx2k^j_#)XXpjf z=^bL|-JGO%rc3XAhE-4f%{TPsrPF(^8OCCxrzYu@yYzmt7kZz3ru|)#PH&~5w_83; zf2&=3MSG!lr=hnZo!&4*?|~$}3tf5=Z}!yRnM$weDu9J#@6LB6Z^ym=mur8|O43{D z()-0;=w%ps)RCvsix_%+lk`@*^ych^-ou{;u$QL$(&>#g^w!CTksslY8D&=Qz0fN+ z^d3*Amw+Agdw!DMbuPU>5M1;mzXJ`uXVdAeHS`Wm(p&4&tJ({_4WDR#x2Mx9HuRpC z57XZ}U3%Gjp?9I7XQk8orb+vIQIg*MF1=?K^xWUUhThld^zJwGj!n{g*rgZV3%!>= z*8Xa_w%7j7H1yt-57XbcOK-?t=&d&NBpsb%&wUKNtCRFLy7XQ>*W2IZ^g%s`XMf+{ z`YITNoBoy;_~OLQl9T&PYqcV;!l~{LWLewh>6BH>wp+_RK(h)hiA@;Z1OZOlv-`G> z+(!|NQjM`w4##LQ^iyZsNLxZh*y3qG%K^q?!DoZanQDFvwzH!+R9Mc_$L_(%y&F#Us@y~51rrznXH z@uMK&tn_aKUr{wVEd6orOTPgq5nc=V0s8Q{UCZ{ZweAdiD(8^kDt^=Q$98KI!2s%F z8(H3mkezATyp0d@zAAf`9ZrLKYldENo~M2aD)ev?pyvD+Gi4CMI^SA*M51odBbSjn zf0%ie9?L7M=<&wW0W`JEP@AgMHi+=^9a_Nt3Tk?K#fb+A6c!vT>*IqF7@rirubj?~ zH3giyf28?Z%{8q*5IP)%mtz@R_~7#ex;r|=`HZo+2~(gD`74F<1lVA(c!_j5m9c@I zn^PG>;EM=__-2PS7Q^FaMC>Q~9V5l3n}FNc%MC%Szl*RrQZL7qJX>FPD~I;M5!f$8 z(aF0rW)eM5AgK1OI%oo~R@?Wof=JQ8j>p}2u1DcHoix`2P^lsd760+-DjUG+rKN{Ps+N=caTTE2b#u?P09)Re&dnT z09tJ?qUWjdeS=OvFJqkgZBrMI{Y%ke$JMI zx#sEM#4z|yu3yL#oAxdjwHm)Y3eA|&xRj>UUufKlzxe+5Ee51Q$6RvkZyAkqXBUV3 zD;K zC!6RWhaj5wbcP$Os~9*qm~wt#OipFdf;#p`Jnmk}*{*;}7`&P*=osg$dy5VF(FT2g zgIP-g)FR1d2{%Dm~ro$`)vT9U zLc{h0la=io3|5Za?krEcH@T=+SpS@cF?iNr$vM)#R};QGx8nqvx#Y$|(mN3367!rq zE>#mfw;Y6pVrcp*CRST|S@x)s*p)e=yIBAIN~Gf?;YlN_Yt{R>rR-aF1voh2Z)sAj ziwCYBAYb^U6Te~CTuMrp!tkdG)FAXeF)8Wb5=d8W{?=w;p?UK>Pe}&~j9jGhTZfr5 z(_9E`W82r^7mNfSO9l|k84<<@a$E>oV4rTV?*Uu%*9f|!zfy-lf;~?eQuyCsw14pj z0Y0)n{5v1mGe@U7t(qgNo@NF6B3m@rz{8TXxb)VQfVw z8?#emncwo4=3l_iq9FbmNvaL#_<`uXxVb&6Xzb_y=1ssT)v3|p4E&v;nZf7{V<1S{ zwCsWbU0I|1Z&Y{Ys``c91QiLRtXg+(pC8tt_ z7mkqvnX!H3mLodpF(7wwtiOPv2W3xTc0WVH}nDaMM8t zsQtUMknfA3X5Bm0^~YVt)0mX;G*rhE!-!ppVVOCmsIg^=SV&|~VRTAnVRQ!iebyh_ zhi3(`!YqQ)$7DzqN8VqX|J66a=#`>mDo{(JYmEp z^Tk;imvQeTiC1?#Ml65p7f@wHdbb-~J%0ip*v0iYD!Bw8S-SM~EVjB~+h?7|uu1&b zD7%FRf{-0Ic-t75@h5m>hbvDh#*^SCLSQ80m571A8Z(SP=D-{1YXusAnVb*xxz_y| zKCy?DOhVY#tbDbHQw_CSXBt~ABXgyAU3hz0b?a|A5Ba$7X+ZvC4fa7` z)BbxdUOsp_ro8w0NW8aO*F%KTOOSxa{9AJ7@jxbpzv|}Z4i4S>TVzM4q+$}5KUFOM z(r_?(xox>^MToIf#kga7v|n3EY|=nXSIQ2pyC>GXfyOBD$Zeyfk$1nw zvM&p%{Dx}3x5rNe&vf|;0w`s@ zI4vpGS|dGqoM%|6d1BiKt(Kcs$C*}-F|A&*(6oA$r`4gfD%g)_A^|lG)%Re7zgsjx zT{D3R`hF5C5w5TG;9D4m@G%NL%dY%oZa?dz*Bm+#vmTbbb&=@46$3Of| zfZ?#qdJm7Jcd40me6;tNeuL%xG^=-uds;RJW$8i2NbjTgfetn?jUm=EVQ|T_fan{7 zWrh6&%)#;~!*VVxBVd+OK5GK3+7qh;Vcgf*mN7rl`^EhSoA>R=GZQBv>pbY|B-J7= zINJLjppyKIPVy7w2l)B8SowMGRGS~h1&1fM$6te;1Vt?3Z>8G5_k5?h#IEo3X>xv- zLI!-2tR*D+dR2O&clP;PGKd~IxN@zG(UREWfgEWZz}NNi593KDzjwxpuKJ&vFG{j- zJ+gzR0P<@9#xO~7L0-o*(lMMs!;Cw;T)x z1A-IGa!j(_YNRO+n<9j3Z<3#O5+Zf{cnwVDL&3hFqg}!>V!14<5b(lNE3*8t^MNjj zJ}55pX#uqkp{3%Coirxz!3sZR-G?F2!fH$gEuK%M6o&T}^O_i*)Mq%ur49=Kb;dp;TZ8OyD_)%V(sGI=XADW3*h#6h^VxRE? zQ5!}Y`?;>2xYR%LIdAY}0v(D{d2O-$jZAE`>Wr^s& z)d_6{oy&hJ$iIxu;>h0~JIF)G}M7?BzUal9OfD&j=c0PCM8$x7}6>u+cWJ8C^& zxxIo9A}Juf)?|%zne?~rk91K@3Fd%mhVTojIY2u*`iI0a8Z3Vu-_tv3`~w&$s$aPk z$F`q=c2)m_(h;xy=m8C>kLq~YUROS>yp7(>E?oR`57{{O)p2ifo{kg8!qWAlVM+Xc zEPJ`Ze4Ht}l(!!dAhQ10Z(qkjtbhPlVX`Hq?@ui@&~E}Q^#*+$IC?uBNUdM89$}JH zzI6YAaMm1v?8__@>fJQX7e1MGJ7E=9yH!F3lW806+UY)!GQz+QCg<-&$DWjWJfYX< zEc%F^7Bij61p?7CvN(}AF88h6opv5#JR8cLp=?Xyi(+z{W5g2LGY279A7@VF5NrOa zH91+A{F#}77n35QmjslzQ3$B~r&GA`!%ltiqUP^1LVf*Dx4U&7i<&$43GL6n`-Lry zaJZ#AIp5;OObR_^T$yXPbOsi?B#Hj{YRR%^_T?|;T1^3J7CtB!3Vx^=A(Bp+!c{kk z^{*CV1D!-5)(hAl-3cP?>z1grry}k6U5=bl+FJEkE$PYh7o}>FXe#??qqp7Qwpe>u_CUs=nUiA#%A3 zxy1kK=~hCPFI|)KJ?qP2ZIPz0A}l*aUs*|gRhxFt7eDf%tFJ~{HTo*H`Eum0`g+^L zU=Yyuf~<6X9cb`+Ie4nCH#~R)3|>-SbvWH_^|j^i|3iI+${1TWkppg@(yu379lUp4 z$)N+It9sVcjIEq*lySeuIow2HOkhXWqUYS2%G@ zEW6m?^-bcjk9E8UFX-S6@ad&D*1b%%4!-U$YgVl+bP?jyS2mmMUQ;d|lvT;*&=UKa zS>8W&q6o>~`U>5YMfawh+@01k9wMo87OILllx!zCcHqt*pfOo}9V8(~bi$^W0%*zH zMrR#xyb$9`onXW=>roKyXPL%icR9a7^8#oh{ekpOmOriZ%ZWeh@gp{#8-IddZSW@< z{NoJ%aK+aHfwy=>b37#U8d-Nf!T*KF-815!2q)3=<;byB+SV>u5|P4sX6r=7&?~l$qB8 zWn{uBvoW!Xbt2HRSmy%xcI#x?RJrEKLMpOr{yB*P*kRq}#0m0TGCHyBzBE7o97H^J z90vY{)4VL4Ac;H=!6!iTQ`iOkt@BhmIY>v>OXkki_#Ty#^)|!CZE`D!l3(YHfh_BJ z1M!?PeSr(_Ofu`G`!J%Ez~d%kRT5O}V#Q-FMEshV7dtfjFR}*Tl=^aUYZzBEWDF2l z=siVOjg;Pp&UT7sB@zBNxX+JH&ira?@mTWs*M3VwqK!+iTocx%zmti~7#<0yDNLF* zl8JGRKa9Ndzj#ggFR@K|=kc$F3nYKGVP?k(+EGeeLd)&jm`d#7_D_M7lIh~ez#QpF zWW!f42gkmA=>f74cf}~o7@EYx8C&zCXAB6A-MT7U`Zpfg+3(@nVU5HF1pExZ<;Tlt z4?Smq=-9tUM*sESBxeL{o&l3OT#1j~I9HjT%CfFF^2RjsO^0KLX6^nqDi@R?ph&5L zFL4i8*d2ECGDe9VJ%F~La)Y6gd6F5|2SdUc*Ec~#Bh|!Wkd1$oTJ1z8j9MSUyeD4b zRLX^fv@_bd2W*7`fBa@Q>Tt%4@IwbU*@uHUiySN$ym5o&(4$cKHG6h@po zA5X@&k~-9yAR9hq8-vVJ>M z)|yTm+wgf5ym$P<*ncehWwBcBi(vT`mpxOp0KoA}_Y+TAt;*!htU`AtuWJ^B2tzvJ)3EcFJ3KHu7}zR8cw z$xNK550wAarr|ePo{`ItJofx#FRW&1`$sPSYM(jc(`o9*5V5vlD%5{_?_WO!d?$M> z#|l{Xb1ui46oKU$-c|Gaw%Dh^u7cL^!h6P5WW5yj!lkZ^b@)Y&A#SL zpHGwbC^)_kSrWEdbwLW!8k#%vT-owc`IH07cso?(^Q>Ay{1bIY+M-o1LCJP!T-)n^ z6gte{9j$nVJK~HzV49WhupaO}nWybN&oYS%@}*Ppor1*web(n z9;GZhH%%cQe5wj5B({ah5yv^u4VB6(ivJh$F59_^WMfB3e31sjPe{0QZTnb=_I>Z4tj z>dsIN%!4KKN*LLPKiEm7S@p2O73`0tW}JCUCLy$ga=GE6nh0I zMA4cC=A2L=wVV{9aqxXrmM^ql^xRC|{xMNTAh~jGteTNtQJ(s9Je~!``Ays z>YT`yOe}luTouffm>2^sUk?r<8%A7wH$16{MtXnzv8uWlQu?k+8As?P())FONTB&n z>o%AoI!W@cqmvl>3_aPBefJz`=xOos)`8+~B5kr=-4@=@#0_G<&ne94sW1~C;wtAA zH2XMcWD>pkLg3dL_?xje#^GQW%9cY{3G;=4ZYn};y2sk zvvERBBsD#aWS)%ALk#?(rhPfqYligTG~gzf$7Obg4i2tv7YQU@I##5m&)szdzaK*l z!5(WNQ8%+*{NpUz-!^}}d?uzHDHw%pUpW2<`27=;5v7Ui;3qO$WIXz6j6MFIXZdj* z79svzr5Ktknu+piwb=3;n;+}x5gv{7RzX8=tf6-(@`#`8O1A)N-RL7dzeamyp{dfJ zZ2=)}suRLl&OOj2u>$sAvR^Gzo)3^V?nO*Sf;$j_wjsqLEh0XV_x%QTU8U;#=PvZa zN$8~gk?$}X_qV(u=#f2K9=%P4suH#K9L{wtS5eY^0q<*yOU!1kwRc(X@)MEBic_@N zlXH?k;a!fAkb=nkFk?nD1e%a#O;cu0A)0)VbV9;UvJFL6K4aKrQnx!56gJJ%4 zM?Kc(h+ezeG0cevHyF@y2J~4M^f?Dq6r|hhh05S{2L5`?k#W%G!mkBhK#e5r_h48D z8-C6{&1-{M&zB*|i&_C0_PX#_c-i5Akpr!m?2AAD`=6?f!EW-jh zPjE4EU5p=#E4&`%Yy70+y$QFF*n!P-9q(nKvoOo(&#nRl-zbp5*za%T!x|y0Y-h}5 zGG^A;)khx62RF9k-ii zNKg9T!UsSp+ih)!+Ixaj=lxl2q4)U$&43m$upa%}bVZ)bQ{65s)qO?bZkKgA*pW?C zIL|wax(CqhU0glKQiLWLnYpO{EM;Ky-3yJoza9BF?tTH{S9vDo)6Lf1qr#eZ>ki@5 zrR)qt_)V5;@nx>m_jhQz@8j)**C0;$3-SQ#kJ_ct*Pt!kLVOn+yUE4=6ElW#q$Ri} z5AYo32L4?JKbnHi9X{aS>f*;-d}5tW>mT@JnZ-T*)B3%iN|z#gQXJ*B9}0gL%WnCi z6GZIU^3F?jmbk-#gZULV*Y60ORfo#=0ZDiG3kX{0snEkUDfu_@AJm95z84z%e8A=P zJrV}CHGBBO{~P!RFl+ku3m5+3ZsFnMDsk1Z>-F5XHH-WqGZ0WLJaU!13n z{Kf??Ndg-`-IG7On;zo^r$`8RzVU-H5W4e&H({w#U@U%cyuBYB6%90}_`!!uR8F57 zmY#B1iX&!~Q^z_zFCH;)3t9dlpY<->buQfXDe^qiz@HD@_>(Slg#)eo!q#RDJn}St zxq#%5)&($v7D#y!1p`=1D3`K;+?^&4nHn1(koErR%|U#F^*-j|e{`R*&rp^tsF&=J zO;`>lwOdnV7HHR}^f^8LO^+wi>Y085M()nsx8Q(X;qc)d%WeOf?|(Eir+HIG(3Av> z-M;Fa;^>n(%67)Cl33k9c91C_w*E-g0ajkpFd>#*a=HrnDOT$&5hNRj3_ozCGL5k| z?<|$JnSI^+te9Zk+r#YfZ%Dmq;==!SY4m}HXFgU6$@xX&xw-Fn;<@-#WoeyQB(4vB z{iXte= z62bUejs(J6l=aTR{LTnE8Q&raV>Z@g;Q!cg6hKI}tBfDHFIpZcV<-BA&@xQXnMq_u z`dL-D1&@+Rjr8jnYOllJDAxYnOaha>4TO;C+k0rn>6_Fhn(-#%S-F8Pf9tPc$jUNd z7Uh&XtcNsa%WgF^Uoa#$0CXb`j&`o>R{)WN71+~+?4TD46Mn58w*9H^&O4zZ+K9$l< z%+;v{^eqpHN<^#A>lY%iPxw+8rJ&gVsNeteq`njtFQ2zamWs2I1J~cO8?fj>x|ZaM zvcjt}a|p^ZPmc|3Dvk}@8jOwKYISCdK2rm<52YD#Ot$5p9?DHEPpPhyI8{X>i>ev;eq&rqUvyd!4Y#Rda64w~p?vkRxH zK7c>jg3F@L#`f(v&WO7z?%HhQAQaBZvOo^?QONALkdMX~q>wJw`&$~}h=$a5$!(bZOZb}rt$+_FD6=O4@qZupCUq|{fk^-Jc5{w`4p<3ko*tM zjgfjat=Z1K&L9r}+2ntu4l3-@sE3B*+`CrztrDiv!n{0IB=X9?$)>5Jq^VeTmT9J% z{IP~`GKHM)w$j(-^ky?2-q7XS=;r~Ke8X}e9Ut-z@tN@k7l<)VBm~pmCH1X6!zMM( zXkaB4>MwIk;&U>)_BdK;=pST6w$No{wL@GG@UBgCP{wDmfj^p+A&j5r!q4mh{tyFy zIs5}(?!pi54qmH~$)WX4&!O=^R+7wH!|Z8F<}W%#k-Wbp%y;o=q>B8lkHcqi^V>Cee3XmoOWG4K)A34Kq zahW35%RiEyewSzR+n9rlS+;@0R=as0j&(wR^R%zF%>`cEuZHhgNHSi}G;>|96z6*bPieK$x z;Kz|BM>T6*_yb*dhIOaazzoWMhI#*b0I7B3kklT;4%ns&#(r~lFGzYSISFe@V)u^W z_sEs9D-xYDFmk16PqcRQ{<^-b&zDYU2|G_y)|cm@U%P(&eYz^k#$;`9toO-+$uc*& z+zjU_t=IW*YFF)QdEUPSF?YH78sw78%@3^C7`RTVGc+8TzOR%h*EJ@`5N#3 z+QmM{!S<{-&ouZa82rNw{zDGFtT+GQ;veDS>w0q)Lm_p&8F|^f<0H2-k&9XIzJL$; z*VBK=A4-12`rQFC-okonqPPE9CPFYgwfsc8ETRC0q2-iKx#y3NHK=D}NIJ!pVGKTVnXi2;7W1^y>% zKbt9AkG-oO3gGwJxC|=V{~W$2hSKxg=Q{K?x9Y3u@`w>5d-`9fuCAtHNqKec$eOyv zBO9H6DywTlRSk>EE2{L_h>H4p=nQEbH>7f0AY5BhS8+i=fCEdzp{f;uMdj5sRh7d7 z4ONZdn$UzHd~FOhR5dhA7}7YRuY=bZDi4Jl1J(Smc3F8%b>#@4oJT_{0kr?K+gVz* zw6Q9*pfOO_Aml3QSMuFy3gWeO3v#tB^Zf+R_Y?JdeRU0#fX4iAJIs{-&EuJRJH`Sa4?XYE)`P1WM^n&E-(gYWjw>652@S9}gB>4noS z{KI!`DA2gPI#jXbf69MVZTZ5Qsv@EEZ?W(2A^NYVs|_{O)dVW*me&SCbpaLVgfsGI zO)s84#o=8)7u1$7MT(8X1A6xG6>62&tUtq@OqoSs&yg_^ENXO{u{{jbQ0NH#nc)H;t zsimS>vu4ctzrj>D`U%%3`4F3M#tWbS{~3zS$MS~iP?bAq>KpKNGM*Y%1{lPl>ZMii zv$vtB+$OnPT2~pa5&!W2xna_4%%Wqz|}Cg76leD@ud3!vHQxZMdeII zwx2gHM}57Ja}ezI>*}j&1C2{?$F67{b980p<+q`#yfR?49$3^+x3pVaL9pp7ygk$G z4cvjRFK=MhNu8STZ3}ch#mWqGG+@)0{)U!FD{7QOOQfUI>q6CwR-RGL{7}^}EUja< z0Brnnc(xBb*Ud_QtdKUIzn zLHfwB^_W^u3@;3abe5{F4Lg?N^qWbjD$qzQg8&^$UAml<^Ze@C`f%tRTQkWphH)s7pV-l2LK$Z%2W73Q8?I8dE*5= z6Y%ii@gEN7-L#i7Go*;ukuMGbyl_P*u&`VPkc6j=i)|J>?JaW~1iH2YUMoF;<-gZ- zO21_sb?E^O+j?rKT3p>oSRGhY*I*>m{XF9e)X}foFDJrDn!dt1a7OOm!=TgrHzs2v z)z$A^?S?hGJtRd;?)JE$%0&LIdvf}1(?&hDRVc@iP<>UylJfdST}No}Tw51n-Yu_O z=~+D_9gmr(>uM{i(k5$HTuyt=IFq?y0d`$kQzX7`-q1iiV0)4t<&cCYzFvs^c^wTuE>L0;RP&>E zKs$OyE@FA*p%AufFZEQ*VGk*q-y3{o)w1e}s$v#oX-L>&DxR76jFaqfiK3nD8P9RZ z%AZ-2CHd3UtMtQTXMKI%)tt}Mu5Wr>ZI#2KV_F*LB;{Tv(Msv8;_0&&%$~8JY2pd- zl6qOnJXTkcYGG-agcN!j5A+y-d6cWh0;FFTgD)F2ti1sZWX*6Qjbt$UKcNcFY0{L|o!Yj$LbdL8|y!707}iMV>8XWIRr z5VZS|HvU{wb@gP!@x%AY#kpwd{NhgoH4=E*GBS%O+lWLT4yGgDx^PGXZyThGBGTb> zFx1yIYHle|U#H1Tr!|P%@HRP2eQ97xxj2MCuQEhFiXUq;XIrq=^>X|BN)%m zt~=`~7*E=VomcHLGdcXuPm@CzL$mpH^<%SP;5<&DyBw{IAV-e&uvRhRWNUP7=rQPS z;1r}A+mq++h!5L}9l5N$LG#V5r$!2s$;9&r`BQk2|I*r7QpW(StZpEPtRq>IWGD?T z3DONi?vF{XdU8un`w|zrVsOML!qJ(c>o9TfTpT#HGv4CQjXX_eJf)xAv?HrSY01%m zOU7>A2-NPs<}p-hX-wK;clhewKFFxp6SITeqs7|oFR)tODcF3NJd*2e?H+w^D(?96%CCkM?7wtr^X>fqIe~&?bb-M$3W#uQ zK0K6627;XgJ4chkzgd zwe1ljMj*j7e4AlI!31}bkL0mo*dW=u`_IRJB=8>z{68lFjbJKSt@zw;BkF1@eIr9l z>qkh!K_rqA_ayh)T?|KR-ted!FeSVGG{)Mgdn~nD3=UvkHnzoxdiRO%s5E8PlejpZO@BJ!6(UoK$v3 zseBd|P0pWFGF!fu70AD()A@ks!lJp;ruj}Unl-(s#8*;0XeyB!a8iufWO&AHe02$W=t+FVVB%jSnA8C7qiRwU*Vi-{8cOjN_bQ_+c&w0 zCVkT;`HH8_;s2$+De^!%M^pLJie}~8O(C4wMMX4LTvqBUE2dHUR#;Z%qosmrQ)c-l zO`0(e+H;Evq=U0375OHsoTlZ|(j0_IMs{g+VHu=c>@}@Gh7S8q3Rkp zagZhy6qsXEZ5T-PO{x?HB&P0C8jKyMAyXA(r8XzYl>{J zNZ7fsy4C~iv?D*kb84zi;!ylJxv!L@Xo?8E^c`M2>J<|Dp9ANK#+`)6(Ptk$!p*WEA=G&mew_}<;QMIAY6}`klX|LUI`XO7t%Wb9U5{ZhG+o!73BSTPhlrTv&MxW4Ur=U||)t5wdKn zreN9pUQg^27ajm-4AU+c}t1-GW5A7wSQuA@(y!AZY zxpy1R)RR`0hss5;ODdCF*apdU(hgk{CO5*Gx5mn8fl3J}lkn749dpbv^I6v}m{vCB zoCPd8iwlYtOrFKs?~EC2O=n3yOC*HVEuOve%c@wpb>}x*ep(>nA?}LJ=~oHuviY;j zE_Mu5tss}p5+toxE?t>7>ubtGtYw#`%AtF`xb<01EG-?Se70s`ImaD56Rs@}&1Q?C znGCZhp)KK>gk5$QyznslufU!D+RG_1sEV5Ew3;t5RGV&19r~k^O_MQ|)B<`~_1d%B zKX{-P$1Zb9>!6o)o4&|h`?`pK8<$j3ZGe-og(O|<0%wxsX;3a)y(v}=oX^IrYe*jJ zchMGaL|YN+W_@$lib6NzURRAp@mh;O3~~r%*|B1 zCQE1?bL;BCY&s29rZgdafIEn;VcYPwI!pq<-Qn{^_De%AA&*ohQp=$4DI6w}r3;jgP!uy;t| zscZ``sBBo4;;)Ur!~>>|14e$svLZQTl$M9Ene_}DMQPm)F3=` z9cEH$DbvV0_G7{nHT!ZF=hdNPt*wjUrJn-n`jJMK?e@Iv$i)zbQTfd^;x6tsm5*d8 zwP)O=^p67+QkS92d`H+RMD6zFT!39_K)fVe?}fH!Jeh$OvG6qLy{QeekCzMATVc3d%{k*dFc*N;266+)=I zv4V+*dh8mZh3bNp6$KjB6sUhsc~ofprTwhTxH{68GWn!Elu5{aBwjg%T$wuRZplez zsotTMFIm=Ujf-tclL;}Bf6WquO8%K&S;-%hw2IY+pHdY{e^gc#viY9n_(%Z;veb@A zi=~v(2!f{t_89jeAw#IjrHW0?Mzf)grwk~YL z&^Xfk7Dx#rd-%OEj080RNS&Vw2(@&tsG6}#{{o9RWg$cKd)QB-y~Bdxl{c(p7@6ur?>Xge;giI@(vWQv@8CCsXj=IS$*^iY?*slNQgdM$v8NwjnlDfvw@%%Yzfzxv*LeQl~a{QXPP(X)-7x+dh z1^I3_ZZ|zWyfmGgUua=YljtHcxH(n z&(k{K4!kGr?`6g5Vh@{ZN&cLEGC({jgS4R}f=TPQz$dpPJw3PK9KMujP?A3mOI}x& zq}y1*x{)mpT&x4wGw&XLSR=}12-*yVdb
;!u4`Tw-{HQ;epW!i5iC8SIl2P{|t zaez_^lrsGx1TC2VpEhlWCjFrll4)l0Lnp~NGf7iqA<`P_XK@2!x1X}RQL)xfb`5K{ z_FHj{fZe*HM#YLN>&6wjU1fJ8)>;uW-~F6(pLgE(oteU3yVvzy*Z28eZSFbu`<&;T z^PF>@^PKa(nV?kn`{dA`x2JRH?HEX5%Yl7~%ys<+pae54PCwLc+h%^7$@2`-v0pWI zkOz)XrCuw4Qw^jY`{FzFah1u8N~GdRoWbfkh(bP*lG^9>lU=Bzi=1*D-T37(-AYp} z$>Mn`l^geXf4rv;2UTL0vu0rZ-;^D-eN}hlIWy|v@4pl-*B&Y8aWnohsTf{(Fje#n zx}2SQw|hpNEvbIfKKJ|3Xa8-yJiD9uq@I&HZO{67=be({b6*)}`pZk?w|+gp$YR=; z`+az26P4ez*0zT`aB%f|k@0QH6{YrX>)mT!s(08;@9#uRjcNT`f~Eqt5p8#6`o+9z zVDc5s!FH;Ro&+hOZa5LgamX9YU-AF=wL>*`q=#MhaaNDzIM$2&f(rdELw-KGM0Zho z9aX1|_|>GdbPV;WlPy`Pxd7V+_MH6kt&u$YnsJpL*w)c()i{<9>ML#rwngOe zdu~LsTst_RuY03)-Z^Cp#x_OtzR~*b(VOQE%s6uNm$&L>NVy{$)~7&R^WW8CHN8yz zikI_Wwt%Y~haH@84%<}N6iz=N#zTngTIc@DeJ=$#`roVnzr_ND^sA$$H>&*`#NvT4`v&9c)pfJCw zpVDuJpXq2>?kVT}eXkI{4Xj(fmm{v@Pj{e*_rBg_oTQ(};%HVG$yC6$k939em*@qP_bhw9w+E#Bbux>r}uu@;WkXQXTw1PcO)OxSt z8&x&elXMm0n9z&mzk$4pR8$tqYkz>Duhe^k2!!0Gft1giH{PwFSFddemu`D&$Qhj* z?aYr!9#g*-r5dE)!DO`V>Nh(clKQK`>L^Df9rbbTH9TTQ&a_XNTQOGuhGZoSruqx;p(@em(M{ zW#bh*4?vX`yZv@QF#8-cb`YzrRJ#o1dVC^GiMQCaC_J9LLl;%lhS=rzSS{x>kp_Jiq0cPez3f zvGiJ)HxEreVqfYprqlSjacAVE!QLfEO~AtaYWm9Q)n!;e{$G!Oq_Ow03@_U&1?fzAT%MgFVp4I;B4?q84fj?N_|33@Ny0qlo zTjyBTb@=x7{a{KbF>a1SteY$kgW*#7t6{YXDIlbr@uehcz* zk?%V**<--zN$5M@vclhmKHwCv8yNiFOm-L;0geI3fhXnu_h+&d0o3OQkONk}Fq3@& z7y(WT51juN$N|H^=^sH4aPr5v4@}|@qtC}*WEca6fzv-l`(cO5e}+EbB=8_G__LYp zB(VKoVXq4z4;&NxIe5We%w(;%0$+h1!GD{{whR9IOmmsxVI3G9-3=94W^#!*7dL}y#v`)hgz%X!H zcwpJZ=)d2<-oT09qCbISuc1C~gS~zSI|AE*5nvM74V(b(0ft_O9f08(ls6x87QQ^VJOT{}N4;%u9@D=&{fm6UEg7Z;7{6(cOz8inDApY9Ue&9545*WlE zk~k?m(0V881Dp?R2Zn(Yz;0mZov4T02Oa?q11EuFz>~mA{DG2S2s|(hoWLK{i2z6O zCxG@#8h^X!BybFW$ti^IBde4@Zv~tLCV}B=AP1ZP9s^F}Po|v#w&QQag|5W?>tR1& zrTk$?;3)8<@c7%Bp{u|HD}WJT1lSJj22O-gZ{hJb6Q+REz={P(uYf&(?Z6~33ETsm z0v-W|Zh}0}Du@27(J#OxFnlw{0k9o-5I72)08Rj308Rs^ffoLBeAz^=SfYZP+VDL874;Thc0wchaz+s?uE%XAzKAjscGW$AMG83E&yvG%$EQ($&xhoCZdKRt@w4gTP^61h^kKUW<0gdeH!V zf{myjaIy*W;0CmN74ic|f!)C5YUBf2%_tW*2@EcQzBRBba1uByxEAdfK7w`wgSVrc z8&PlIB4Bs}>;)XZ1LXpPTVQ8kCD00k2Zn&wooELz0*nC1fZf1p;4rXqE9wIr1&#x! zfX9I0yHG#iFz^g;5*S>H{<<6G1Czi?-~@0pFxZCi0Bi^D0geO5fL1%k126)d1P(`0 zpJm9u4dnwzfn&hw4%`Pu;;8>}_9A6WTb)E}5k zp`JIPJ_D!+aCi{;fRjUzFGss}!VbW3;4p9sxF48&ALM|ez$swpkD%vf=mjnU4&RS< z0;e8;USRnBs4s97I1Q|P5ca5meRpI00>d9byMg0-Fivj)ei&)s^j^pVhaW;a-Ua)7 z1o?m=;AUV1I0PI9jsmBFoNbpg#1339H*l{J~ z_hB3WEB^#?z|elw1K19n1cpBjeYZmH6Q~Dp3b+~A{ut~7jQknw0kr-c_P7n@eG>Cl z@YARtFa$gZ3xoCbdcbOiF7fz=`} zAGUy4OJXC_Yk;B< zjcV}UAE;;wR5k`08v~KefpEs1to6uO`+aNN5-1p#j1~D)&hru5UoYi127(`v@*fgA z@0IdL#Ll})TeU{ptegD4+CW8hpt32@*c6CV1vXa&+EEc-O`&oUQ2jc}kv+th-?%?( z0}CFbeZ%<%zb)dZ|&|MWs)&+(d1H-c#1AAsw1x9CW2t-g2E*U?G63oG; z5figK_HV83-=y#Jfgy~BS?iIv4teVW`|*DaI+B|LL+fy7vj`-8Yu&x9^vk?>4*Dma znaRQht9jOi8MXi)Ujly;{O5&N^K*3|7Ov$7O&8 z;P-%s>$j|WncrIiWszr;KrMdO;{HM0Zx?1mAn0sYBrv~0jmElXjy{JGv}^WA$zEU6 zGlBVyN5Ma*oB*;l-K+e*7a)LnXbVEU^w)h--Zy5l0Q*{iPmhAw3_dNa>aW}3G3)(X z@N=#1E|p*Y`0%(Vzu574@CSvr+g+>M9r9rs&aVjs(e4I6l=5?pzbUY2T_C(!&FMzl zHg$eW?6RQLvbwPc{vpRL{a&4Imt4Oz2S!VLF9-IQL;_^bPmT~9gD?<`%6fx3J}>nJUnPb;yPuYE z^ohXGT1>8un05`AcFmwZ8W>s)s!h$erh@an)}Pk-TA1&4j5OmhCrAGj8c!HT ze%}LugMQ2}e{J9h{!jSVg6uCihN}D}YiB{a1=3qoE@c0bhbU<6RV=3Mh%l?s@2j#CrZ`pJR^*>TesM$QlJYX(Uns!u1wUDUe;oX=0{m0p zCp^6L&-37qfd9|-&)m7#)BNPW)<5D0TA}CQKh0zxkwz;&fsXYdHWmL(3yB@>gIs8e z{##^m3I8bg3h-AbM*nCAJOMre{%ZF+R4D$I1q0n*AVAQ=QN`P@LthwYrwe5uwMm@W zBypx1kF^qKs_`iCjPyfz-W|_I&#k5*$*D#Lb{sqn?w}0{dwbo~EVCwyM=~~!Koj>7`K`4j4t5m}ls`hu}F=7wt*R5|s z{P{04*?$uv*RK+MV?k%XvcEDA*$eshlQY@BK1;nzZp5Z?5nPa6g9iV5^O$ZA10OY% zM@g;JIGG*9e8n??3lyV&$T+zS{5W{&dvl2&UjhCI_={y8=KP}Cr~3ZV_JxgN8i_EK zlWaF1wIf~$bgKKY4R((t%SkG@vJ0n*Ahp!_}mJ< z49`xUb@yYg-1ULUvXZ}$j&zo+W>NzAA4UHCc&2g(_HB;%sKJi6bLR{Qg)S+19-0aS z)OCK}BY}gLtP6}^au*Qs;w6Z0RC*iJy1uh7gg-eolRYf$$c@uGFiz`q4=VqISdU}2 z&fgiBkBuN`X>Tp^m*E-C%aUKk1K4kj$a%r8*}koIzts5C_Gj)o_4wP3{6~=g(~{r! zU?7NiQ~cax;3KE;Oig&K%W|qz>tBtZb^g`dILdf=7INK?yO?roaIYDkUj&~7A9VLW zCQr@}L_O02(eBfMMX0(xzeK67=i;}*|NUkr`*uy)d@cA2@F5Ss6?~&GJ1C3BACJ{kP(N2+!cI4yMj#6GW!zTeGcXNt|!roi~zlDR(5JXI_43glmT5&RdPEvokC z;$RgYt_n=ddY>~4cEmjy&cZ7cmd>FiQeV2MY zPx)4n&CO?6bP%16j>Noe@Z&gFe-tT8^3BBria_61AzbJm~*hsy z=vlz==sB;Q<-l=3_r} zPBwdo=tHx2`Q-em$-m*5K-ry=#IfOG+*%2_QONDFZ2f2q!ZUMSx_j|=%2DWD!Tx%xgP@}=4AHc^1Fy32v?Q%C{>GFk!3a<`mwTX_AN4Ais$DMww-^K9|!UkjViQ9nC}}h-GYRU|DbpH zysVC=@!W&$T@wgxHFFQgl_(RJ>VL?OLEiLZ&OQi`c~sYkQzAL4yGz9mdR=-J`O5rw zhNV@hpECbm1aB4KPk}#!a-8x~{@hDoZ}4}}A9}`fS;>W%N6rC_JYxS^ ze?aoL6v&V9A`cB@O|3BxZ0d28<_>DbdTL#t2Pax_K{h)m`jAIXjd9E&DHZpw!2SKW zzfg##Krn)tD0XWGKkneyl8=EuR)D_`{8Rz{QSfKLJNre(=M&(k!Dpr1+ zqV(M0Gy$gMdkOiD;GLUHfqdBX(LXo&vESwAD*sBzUiJ?Bz`HrGFkjxdmeb?E^7IQA z2q||f@{c3`OU&<_Pql$|-=dDRRNvl1Ydy*#QJ`eeWkjJuUU&6lve!M`rvqP94;LAlC z<=(+|T?T#%yx0CKz@I6gry0C;G0VkzE%ldjW8llcEBy!zjGgZT9|o`VV}D}!N5MB5 z9{q0gJOREPys{7U$WBD`KMQ^cym!342tEm3m5b++8!3MZ{Qd&`+$(UN3f}2=ME?Tt zM+)#O!H=5`Dz^gSxt!v^de6NQ@f>*V>7f9Z=#e)) z_JCJ*#JrV$ih*AVejNPzSxCSiJJ0jg`6&XSU8O$k`eo&DtlK!MbTKu4Y}H(hSw>Zy z{W!`wI6s@cS~>>haNk$x+_xc+EcqIul#2Z4i`v18{Z2vOqL1Ny6(16`pB*o=WLZ}) zy9)jF$V~P@cl$i=A;?kly8Oo#I4k^QpmDXl3(;5=sFZRO&=ml*r*J`(6a*ICnwZv}^W#=fN>l(J|bG zhg))P4KeA@Q^>dI9d7?7er7Hz83O+Ue0Ie*VR_CcOC!b?&ns&ovJ&!>kl!WpD4**i zD$DbgRe`V^jXO^(5j0ooGjz$M_XWG5Xa3~{_G4k$#IRhBYbo~-ihFmC= z&F+x#0y#T=5BnHeo+bMH@-JX?64XhVy>%iws%uf{uOvt9af^UkSnY*xt#PD%<) zJT+nvf>{aZo5H)k8}M1TSI6V`(ckUG{m^w;*K;4q_c-_p@O_dG>%lHDCkLX8=Vx($ z5AN5={d;n8#f#u43h<}EpDDo4y$0*i_4#@hfbTBAuLM6DBocAYy?y7!!iW`CsQkHx8D zI^+L~QvQv2=T->sa|)crh-bmUOr1P!;^$g_z5Ur9w+pU?KZYI|i+bGRL4|zACG)

zH#sD5mihS<_)+lByY;AZ z;!KgV;Z1Ug{E%HK?jjMZLf_o$F#lF$vrTUQk(=+i=MM6!iSLUAI|lDyygCaf((CYk z$`)WfMD=-D0{VkDXS2VT`sVyoVf{(puM4W@`Xhbu6!cA0WV5I7S^Mu!_=S2uUd&SG zQ!nEF*e%)YJ#PEpO**`zWSc-cS{W~MuZMqnS2p`|xBTsauzHnKhKO?b*h=$rqdXeo zDWNu{*s&FQ_N>fiuVOo$`#wYp9BNnzo8`q&JQG&?aQjBJAN%e3h=(yY(HJBU8igZ% zX{D9H|3lEf`BuCWEkhl4EU+&Pmqg*+T?YmYeptt~JO@4q{+0su`H)>79S2LfuR}hB--+<}AyxlFw!99AWqvHi#w&u~ov0H1Ie&`& z$Md8IbOwysYS(i&A zfW8&@9gec*Y<8~{fJdFNKxm!v0(f&0=j}UXagcJiLM{op`-}P0T>OOBNK3>mSLE+o zv|o7w`ksKkv31$(DQQRXbLvk7Dq7T`G;}5PWVH8Q4Su|NAa23I;5gmItdawE7gJq; z!Mo>DOjV$^vxvMX>Uut+&x6Ijk0Rfh_1Wx) z@L9$OmS;Q`kTIebePPx4>Qj(wZ^>qN3IRFnH6Ve_C6;sq2g0g(_Y&leKn};f_9f%+ zb?_75u}tuMw#9y6A@lXJrHFTMA7-~N$+rUhN$}f*#d!%r3G5d-Rf$snR@|>>wa-rx zuj1WR#H;e$qKW&8m5{p+a-)!IbkA2gas3B-`nexIb(_^j>hTovk8QyIUh-os$rjP< z8>Id(;{NzXce|y2r@$WpKZ4ILoYn93t8(Ru7-me|@v|47Yy9`H`;@wQ zw$|U_UP#6MTXFE;erGm&tCX`%uRpc;m;t{B{I|XR9`=3D(eH9_Vn2aQ*`ITM=qcz4 z-j&Vn7Cm?)&G#_u(tvY)d0%b4e#cDe@e<^Q?sl&iG7eq`KMZ~lpY=G8a%|FnyP?0kJ)8X;>(PSp zoBY^cqm%j53=5KnLOwu$?u@VS2KPlJHzW25*$-aTqX4n{E66voEjOOs`6hgL z6Z{zZeG*MLhM^1s&N8P*ZH(tZ&x)I1&lsK;N_}|=-wZwp{v1OBQKS_E-wpm3!mDvF zJ_;uc_-O{eXV@#h^%(Mn;@o#g-alM_u)rTI@COV0CoB+pw6O5hN8P-*cDed4&}h;J zncuB6H25UA^&L-qCK6?+=M7c^eSe2~Ec}#DoV!a#3ISzRV@#U(x zNY_V}Cb{;!L!+z@a-FNe!mBvy3adk%96x=#=FRz1B@3EEc~hQ2($?>?*?#$K`e}yg zO6tSuwYqQGJ<6#f)9l~U_vt$q6BiQ8iS@*d#7^QMafG;! zcz}4Ac$9dYc!GGESb87JCoUwG6YGf^iJinj;s|jc@c{8K@hI^)@dWWSvGk8vK5-$j zoLEoXNbDpI5=V&phzE#=iARaYi6@AsiKX|meBwf4IkBF&k=RKbB#sdG5f2a#6OR&) z6HgFN6H6ap`NW09a$-GkBe9b>NE{*VBOV|gCLSdoC!QdlCYBDfeBwf4IkBF&k=RKb zB#sdG5f2a#6OR&)6HgFN6HDK3sq13mLSi|wp16_NNgO1O5cd%e5DycN5|0y45Kj|J zA7uH&g~W1VJ#izklQ>8mA?_m{ARZ8mA?_m{ zARZPKg~W1VJ#izklQ>8mA?_m{ARZ+(_&s4iZO*`-lgKhlxjt z$B8G1r-`K>Wc!H=iRHw4;znX8agaDd+($e>JWM=FJWf18JWVX!!}5s>iRHw4;znX8 zagaDd+($e>JWM=FJWf18JWVWBhh4BgaUrpsSWnzY>?95nM~M4~2Z)D>M~TOYCy1ws zrT>HNCoUwG6YGf^iJinj;s|jc@c{8K@hI^)@dWWSvGl_%pSX}%POK+xBz6)9i6g{) z!~?{`#G}OH#1q8R#L~SipSX}%POK+xBz6)9i6g{)!~?{`#G}OH#1q8R#L|aYK5-$j zoLEoXNbDpI5=V&phzE#=iARaYi6@AsiKQQ5`NW09a$-GkBe9b>NE{*VBWi8`5B`)d zD#KO6|FeLIt<@#`ZI23*yK0#hR{l3J?ZapJe>>Ag-zKJKG37|Ns*t+^hHbunZB6miyz!5!3Hu`dX&{l=i-k>HB&AdZr&_dNI=@ zOy9uthnQZ%^utWw$n<`u!%TmY>7`76mg!|of1YVMmJoYuc z_oV;Alm40~{R2<>B~SWQPkPoD-1VF1NnhhhS9#K{o^+=t{XS26#FK8Qsd-muQN!8| z*M@FhvV2L{au6$)R4lnEv@E>zrf~VP<)KA&1BrNQT}w#uH!Q1=F?zPE1S^a=&vMF@ zV5KqVSZ=<=GUr`xezs-Kh1~o(mN`#y^QCys>tyuj5}bCK^DXDy606KI=X!2_jy1*e zAUA)WHC7<5D6#z3L_vP8WzGrRdd|0w@chut2dw0G+~Y#-msoGXckveDgZ8FSbHe39f$WPa3d5T-&AC#V2)`vG5z!xevLizS$+SS{FiSlnnybrepvArS)(5LQSy5|{AZ|Vu1C+8$p=0Bx2dO+dSsoJ>&N&n z^(phn|A*o)vL@Is!$`|D3-*(CwR-9!-QLv;y@VOZd2l^7}n{9wYylhkug%n1}y5_(J_V30}&Lc%CbKkNl{If0?}Z zImGY4iyh23M^at#og%ZW{Q0#Myp-#G&T*@f&!4AT!56B(eCJys{(VZ%Mb^~KSE_oL zar;5?FOWCmb6*iXpDMy1QhMfDA&#pc8$3}&{(Io1y+IjxxQtop4MfkZ0sFmI3 z>H;NRlItq)h3s%c5&qpp_{JjqrXqYCywt~9rt72LNke^x$+xSQi12G;w9lV{mvORa z&6TR&#{Pdveu0PoTk09E)e6)*jL3O{{B)h>)w_w{{ZJ_S!yY}CkYDWKuO}b!@GHSf zyOMWl{SsHp6(>KrUGwVQ3h>3|*Dj_1E!MDl=|QQme-!GkharzIZOvQP9#?wu*Ow<1 zpTAE14du;zG=&}mFYOxTdMf(?xsDd$f2j22uXq0j{zB-PI#1g{Vp_R=OMd(@omSt` ziH~oAfXGkWsO8l=IwdDedpH{ z_(TysU!eS0N|*Z!%1;BdGA*8Nx#;Q?o(Go5j}TOzVaPf-UM>ni^xAf z`QX!9uhBnBe)4~6{?qKo&wvl2-@Wtti6Z(ZsAr;AEBKsNXB{UWS*Q6b>i<{rm7ePd=;pM_7n_*Rr&? z-7_BkhJ5luU9Oxv$@LAxU#j_UXxjP_`7yT3jN5-vJcS(aJIYtG+&R2gh7VGoARXJo z&mnE+53pS?ksp0j6PwBZ(#U^O6LM}N*Bl%mDtj`}xL&KXE+;?r7ELVC;?{NK z!-zWM`hZX0xQ%@KKk4*g4OTsPJ@QEqwkdfxC#k_A{D-JN@-AJj`rRDJf0}%V>*fZk zJW@o@^F?_1`#FWyqu*11@Nc!^Ur~P$177SFIjD(qXtxFA+n>?A`VLWitWZ2NJ79eg zzLR=FU(>>Y7zZEpnUsQt>;6O zpC(`V8BM6~q{hcNaEyiQd8Ok0)-=Z_+^o88DI&ko$TKe8M)^MS!LMk7?!X!;qUSHc z7m7okrk)AUy7_(bk(Ii=r&;a^^6g`qSWf;mqlfl1^Q9CA?gj0jc)u0q!mhr954x6; zAD+;PIlZj+7@qTSA!>aL_Wkg z*6^PoAN(6FXzc%2K;)}N&z+j*Hp%*~;#rsjUZ(u?Vt0LhQ$*e$;C%eF zR$}%k?<7CNk-zh47Y4}gHu=*Pf_$Ys`t5AJr zz&oCR{^6xs^pp30RxK9 zKNo!;^-pj-tMBwgzDv;XLhbrv%8xD3{WXkznp__zKP?AwxYTzm06%BsBU*l=x{KdG zQ9Kh4_y@`lbKSXu_f8d&pP_uj6GzR#`Y-kmu^(@vo{Py>{*@-m$;)?|7HV$`<&$66 zg2q2=Dsanz6w&iF@Tz~`rTdp*f%P5oA@;lB|FMXkR~3&q zlL3@z?<_bjvCk;|+pVn6<>aUSUK2IsZzMl{JNuCi^j0JPc`az{vr6&I?0_icCoa=^ z26!)BM1F+w?eu$tl>a#S%DC3^3i&UR57Q4UAwL0L>^aQ+LKS)W`*>mp^~YVn89)3l z&Q3AS8_cy<$jNRvQL+5j7&w1zzUeB+tpsI<;pZqgToTQ!aQ#^$n@Ng0Saq5YD zObeTM_^*wAZY&N^*E8hDzOMvW_x`Df zyzgRN?(`b1-#iz1yW$;+`EfDjgYw`H*RM5LHAUn%6yYDBp3u`;f$`g;dB76t+gukYRuV$Vh@|BnC`m66w!o3d`(erW2k6*9l z>!{~R^48yJVxdptSBmI4LHWoDE%&-sXT4fPz7#J^$v7Y81Ty7bNd`ARyBKPUe+@)qN|aq^So z$2{xfPmCVMZ)V^3ALPe5z-y@IHS!Zb)kG!vAQmjKXJo!^D3=o}1U`uMqJq!SC~jR% z`EgF%5#G2}@kT0#t>9(d2{GQgowwrD6JdNlNdCbhdOlx-m*0C+^&iy=K1x0E`)|Te zZ`JMEN&aU=^!$eM)<0`OOj~vJ!m%LK%)8FNMD*uhS12Cwe6uds$lP2+el_KTTwio9 zoRgCu<39af7PW)?B*)c4^6w`f;y5w(e8|Xq#`$N+Pcu$7ar@KcBPX@ub*#{HiqCKF zPbfd(IdAzz5qbIY2V)bbwzpndm|w1V%y;@pbcecbBR}y;oo?4)Z7QN?2j!Ero9W*V zkZ=E(R$$_j$H2?@_db_7Ncl0w31;8%W%5&e@S?sG2r6G7AG%2!@_E|x7m8=49q@Z2 z&$u1aR$cOkztlKy(hA;BJ(nupp@1L5l%Kjm3mQMUhWzw@XhHSe?D%*m`H9=K{LiU> z2l?ToCSE1~2>J2PYCWc39#=do;eanuJ~XAtmwE3f`3X|0v!8m?1e^5JuJxn_PV*ee@*M5IjlDF zlSeeal=ZnEywtya;gyOp@xVSMPay}4Q%{ohG0z{5k)IChdUCp2|44p{^)&ULCLjE% zR{m*I1_rQgMv*L}?9Ntlc@1>rx#ah1^pZ6D$e}wYm zoEN5De@Z_216`*2n>6_NdGe7TYJQt8#`+TZV5=tfvZ_Z7|CAX;@fJVw!p4Q|Yytf&A z$RTGw%6A+~ea6^c^&LpKxrh8X_XRYQ^(pexZ`Tzx{_uCRhez=JI zpHY6?v(A2@i2Ta>W z_A{E;M*b`0$G)x!G+$lc1RvsOC%jDgsa3jMGiQHGev*2w*0-&)tF+yMp7YZAcd1Cv-I$f-A)O z@1Xp!r#|l|Kl-v({GHSyOz-bkkUJL2hdTcW>fNr%qM)XKKxbKs0#O^0YT40~>F8Ej zLGDPQPLWhkUuNw}hiysZ2UE z(AkL&jKx#&E+~zs+A_Uu9f|(Fc-m@fi}llbZLv&$D%}GYzdB+oN4lq1OKXL`|X}l}q$@L=(|O zPc*$`xe|<}2AL&=LDSPK;;~fEAj}+1_QEY;tiS+m{lXw-OMncQTg9@I1%Td#Y#HX$x%Y5DqiZ~tI?4SXaQbrxR1@4}3bpCD zS5JaYx2!R_bYujcTQ#YwLp-<){7qmSIti^$xYj(aGX7;Co2dQuo8GO1(G1>bEdgM!hY9C%U8_SORWHof##jfcg6^({XM~a(44^J$m~}f~V`aIPct$T^s+uCMOeLx6olP+do&v^b zOG{I=$`)7}J*(V=R?*#Cn}H!y_~))sZE8^0U-~DB1v8%MPkJmX{f=UlKSO&ls<$k? z35Cg8R2%QWr=AY1Wbs%u6SdO)9oxNK0}16?ax)uU;p`ddmRj_jl!WHW+NBCYskHOD zM2y2_=}k@P7F9@n42xAyY|ug@AQ?%hdDSa@Dq}!*kf=4~*T!WjsToM6;OC@FZ5*Xc zq6n)u-IeMXbXy3SA&Rw1?AMXpsSUk8o=ji}*Ty@e1Bp!aPAR4pGpDwvvy&!Y6V0ek zsM@+@CYtI>OGUM{YY>oRSbS9rZe|9OR#bI?yL65i-Ib;tdXyj~Cc$VhifR(^Xde`% z;@(C`)uS>-(tSx;wmRiwCIwH>?->x%M`9XLtp>Lm=1>_;BthkBw;VQ>wXsJPClxq-AU zt!#{mCy|>7$US2yRVyKCIuX&ebqqx%R=}XS*W=z$%|zVe3VX$wxLafN>h4<`-yxP# z(~|z!cC%8b)zjCX>FM0rmg;TW5k<(BQ?|ky=u7nUZO<_@R$oW2$7i%{xkaXh_PN^W z!aU1sy5k+&Q6J^TuqcSkrk+gq>WxZLO&_z~+|%FVXJtO$ZmmRGhJeBpo0~vZQ@T14?b|L(qL@KPUQM+k z8dD3DSxs_N%<%A-sPu9*^rERghpn9D*oCU5sivm8docptO|Z*bA0J4^WhVzO4Rg7r z#7G;GSZU&rS0)`q3uKa5_3)kCFJdK9B~kyd2U9sZ%`8-hFi8w;vUlsXT*a+j*;$e zI7($Lu)HCPgB@CWx*|Qvc%zCrQ#}YC;1alAD$i&84do3GsAn+8bq8b3!AfX$f-qP| zCgCnn?W&x%t=_2zn5<$LWKwxc!aD>TXoPZj#>l9wcwA$ZUfE=~!Q;WXy9+^vB2|J% z?$fKS7!4s6bSm?s+=OZ!EVx`sl|uVsUVqruX74d|xScC_`I2Qa+8t|)jF_$h15)`1 z@nPC%h+1-65_?$7eFu%Poo7qEM?yta#nt|NJvL1$)uHEAu6nW(s5Yy(M8>0CRBqZj z3T#U2eu0hZhU7p5ZIfBHu2c4K7%3IXHW7)Gu!wg@)BP#W+(VCYmaj*M6ivsi&h*Yc z&j@sMuI(!gr>3?x(mELpssXF8?6gMH+Yu=2K+NA_$NUg5E<^>Yj6GGO6Y+Rb#nL9g zMoT2L#hk!=6cIf;dA_ln1DKHwmANwm(IBJ2yO~;fX4+?5GAskTC+o!IIU{bWeveq;tQb zCpK<7y_F2AzJ7NNaClRoURVHQ`O#C0igCs8YL7p)w8=O^i9N(Wic{AGbCpG{F(a`z#PZPA z(Y?K`Guq?uyDKbpK!s}dsy&|C$|Tg%?Y4_OMjabiX^@&HQgs|iNJymJBUiDyzCjgQ zFIUk5`bhDZFD+plX&~mpu#p(tajMxPd{{^O@Chq36|16gf7HYBwRkX>YQ zM_z!5Jjk7!ZQ44$o@FA2bKF$_PIr5eRgFaC=Pq9PwoW+=a!aDVDj*TVI0BMRIBB<_ zS)9z0-P4B{vqwjDZb{ClgzQ2*+ftc3$_{9_i5}E;&Tf=a--CW|>x6~keV(1Bbf6lo z+WliXqPa4Oawb!E)X@-V`a9_c^qkuhqaa@R-97DG3;p<=3` zJrV3F>}5d)63%+^7B=*Y@#Jy{QmgyUs*1(1IZ|=E+YqQuOwBTJHl7Ya8`k&qsokX2 zTOb0m%My!3v6%E^cFOqE6;SIW+#Kc=njm`wcLTK`vbv55a`B6-;?iKflQGMntzOfn zeQK(S55V+TzD#wvUX?IB)TGiboEw)xt6o-9=xXh+?ZJ8|+3iV|I}|Y5mAU_$mE3H)-1fi$32db!1=mc5iS#VyL@ONMcF5kR zKhe|SnW%_Yh8Gwg%D? zOt_n^Ods#9#?yix9O;;hQdF)s+!2VZ>N)swIbPMC99i^;LtzY>)EX<&?)AhGs6bYY z?U)>-csAAA;5ZI%s!c1;>D`#adUQRjLq2rra7F=k+v4cRB`# z)Lk4!uKr?-Whh0pR&Ebkon1H)aMxV#Q|;hT6|Co|Yr=8qloGJ;r+Kcd!d}#0717#{ z5K?!aI_Oawd$km~H~x-+tNe@cDAJP>k4U|>n};J#$a*HBGB#2xOpvLHtLntjjx;MC z!)<+>uGb*c0S8*G4X~XIRk2}zXDsS*0?NK(d+`#ES>S$f(E?}BBUt@#K$%}$R#SRy zG&7KjCeXtzc#J94SMAx*2i4o{PJ>0YtJ0HRc`7vp^-MK$L@mLwLScoRxTZb5W)B|a z6A-j_7Kn7EM%=Z)u4+H3ENIEv44);#(~=+3(=AE(NqxQ}p+s$b+W?RAFb(gu9~`La zqjc@X-H^PLFBjI zLI*k|phvT@CSb-~=ShWh=%Tx;Dc022 z(kt(3EWJr)jW`FMZR1sioA5fq>W%s|9E!D>U5C&vho)SmEr;k7P304}wze)j`qv8> zp6sFnHegHNkm?^u%6VU^M?4TbmD{Rfap<#J5Z5W$*8X)F>>or5Di~HOH;-hUoFpsH zYN;t_PNvQ_;ne;{d^UJg}Hg97C_T$73~(N3 z&rYxq>c?4d)LIhn=4&3^F(@%9omIc@p>vqeC?VS0(}6mv?1*8kCGr*sI*RWc{TBeQ zm8L&RGa+f{q<|X z$vuQCBQgAc-+{N{UB4OWVA9QRFr)Ba7kC;6*I2$%UjR)gm66N!Gj8fTYp@<+jm zO-k`!F7v&u2F?4&`mQd^d=`}Vke&I>_q!ULkU|}oc@NoO7w$Rpo9}@&7;@y5kEZ>G zPU3?zzxn+tgRi_~BsH3H4Lj(`Z@yR7;2a}J6k9v5`;d_GP5+tipSAH$r*wY(cYE@i z@2NGo)l&g)`Hy(=oA0wVc<83Hm;V@U2k@W6-2UF%@e6f}%s4SxjQtHB#0`0;*W@?f zk9&;yz4kWw3?4#W8G9zb`5xUDn7^HdHTO+Eqjx*++uzF@()laR#9;mq(ZDnKAi7NX=J$Fk7wY_H78*&7A$QU`hP0F)_V6aZ_!s9T zvT`|*CekhR3b^h-f KL87U*q5lu&?{icD diff --git a/third_party/prebuild/x86_64/liberror_manager.so b/third_party/prebuild/x86_64/liberror_manager.so index cd9ad8bcbde41fabcc1ff2895b3428afcd04ecc7..d97e6ef1d987e5ac05f425392759f23c7a6ccb45 100755 GIT binary patch literal 1168920 zcma&P2|QKN*Z+U+HCVoDIsJiLq(~`Smw+_QbI)`rP-KDD08Bs zD1|aMEAd+=r|;AAeeQYv|LgU7`>eIjzWeO6&p!K{doSBD15-mHfk1;`MuTV~1n`4) z(f`S4WBI&JcFaQ%ZA1+r!O!)`x<6&<{8d%=swUrP`U-5zhIKU@A=OAS_D@K^ut&kA0*((_n_{~z*qzt-6P z@Y*?Iv{KPuMf)E$bibScIsmqs+aa_xH_ut}bos_;_PXd;Z*Zd=H?N0H2pt@l68a*5 z4WtRqKVu9M&r8b?H>j&As9jwY6-kQ>vRv@uurbPFGiF8vwMUNsKq6^bDCB!?2p?ie zNs2ueNt4Mks3TKDMkx9$$cU6=EC*ZUgIM|p>vdR~%Gi%X)oY0HX-EK>u#Ha6gnWI2 zh@5J-pbQ5)!lmx*grqIw<@KJhTI-XZ9c5o^s6^R^ZWlEtbfNG6Dj`%o0v ze4+_7ba|Wkbt#d^DcVSclgEsf773y?w4#Mtw8KcjB!$RQ3F8jA%Oa5?#v(C?#VMt% zW#=DgFcB## z#uQ<*47q6KHAICU2oIMb2ck=8SxYOIq^pY*Ay*L}=xGv7GY3UVSSp?*#~_-aPZv*W zf?i-z(51j|ej%CGBS$1AUZ<5q4p9h9cH|<#ZXRKZsE(kGnl_S%Y~Z305Mm@J(->_w z;+BCZsJpwJ)=48!R-H=cq4?+!h)D81Z5>JzyAZE9doWrjlTovLYspaiRiYIkDoyhyJFfWoYjK} zeJMSWiQCD*-IJI_N4!A021)kSMp}ucj0izr+*qvjLpo(hNgH7gj$}pX(eXrL5g`b+ z2}dVo(UOE1=B1D{{VtM~!X;iZiG%hr!v2GHby_^BtOsEq)wYfb;*@5Ov=b6S5HeX? zX|riqVDCT}zs)AjUSO5xdhIw8`3LX&i`9gSMqT z-F-@TQxG9icN2xe0<#zsttlC~&Mw%LdX19ZoRBI-rnoSOb7Zy97;)G)s60a)DS1ZP zL3%n(h!hE7p=XTbxfb&w&enjI#KKu5E)~n@6r``ML>^f!u0)EI)F0L7a6>W>S~6{< zI2p;()f`>N z@qu78LA;fmDQ;vcNl4Qn6!Anx5y?%evLsF#b4w(T+)T)#2t{f~(hvzqq!&8^cQ>9F z^q%FqcA~s28|1pls}ai>`er7gHi5)Rix6T++WI_Vw6uvdW{DJ-5lN9Yv}8Hjk##T0 zwn&sXA%hlyW+72%w^0xuVT5GGWajP3#6rq|#=@{=(Q~7tM=XuVajcipM98NjSxAUO zwQi{(C5u!nLW9g$yqg7g(T#97TA@cGHjHE~fcky$y=q~cE@{=5Cg^CUlI(9g+NMC!=YIzkYM0-2pEkl#u1l^4w#-a|aj2k$- zj9`p&uAvi=4-(WbXCk(WZ#S`Y=Lu}$ERHnMj&yq?;lRX26O$-(17>t~G7>GxTLGh; z-H1TkRc^8;%8-R5L}4kBQ)5KP2hs&Iq>?M$V{Wj#y#=EkXa*nR|}$8;G7P z+7zZ(Ar@PT3_%ZSrm;k#3Z-Z`7*zD>QiaM9M$*0~k=aZXq&XciI_Fu$p+Uc9PCMEU zA(_cJKzO1~eI_{L1&B8^Xh=|G=YgdjRZ8v==>Xhbw>CriWA1i^%y7F5JR zCsaqKwT+~uS4Yi zrfAu%;2Q%bfayPZGt9TZ)Ed+V^KC)xF(0ktzs&)*Ibz$LK%Ft)1=Ag%J2Bq_Q%_KD zz!&hxa(h4nFdyAV5cvDBct55GKo0_kfWuhs2xvIwM}kHJF+eO32OI}biw8{rl7M6& z6+rDIXc~|SWMR1+&^+K278igPVtx^(=Riw=^FSF;4qO5*1C;=3S3s`;*MS?rP2diI zS}kZj=A&@~_>I6lEO#IDA@B%j1=@hez!RVY=mJpd2JORqG=2(xKQM^po`b#shJaVV zC@=>6wRhmZ2gZR3U=sM(K0-VNOan96wmD4aK|cduvG@n3=<63Se-YDVRAK6)0T=+7 zWB(Wx}prych-~vzvTmmiw|60{Q@fGl^vG^M3b>IdT-^3Js?QP7j`zO}`ek0I? z<(fh70}rvd71MwB{nu}YZBKwMpd07`P4^sost(b3&sVS&AU;$VG)&Od@p!R?x;0){lP;&*{ ziTQ4zy8utX8`uK`0l~llAOr{n4g#o!fgS;l0>`i%nvVoO8i)ao11ErgEdk<0AQ?!- zwxxk)0ogzq4t{U_jPy<{CZUBGn7WlP51JDHA z1DXNU9%736Etua5+6Fwv;x5o`pa*ygJOc&*)Lvpb1Ud}70$yXe5ztZK4e%Ct51=*y zIthFPrm!5EpTqo5pkIKmz<1yW@Do675%f2(gvDr%fWAoq{@dU|cje7FY+wMN5nuw? z05X6YJ17SL?}-p@fDb@zEvNuM!D2yBAE z-w$*z5C{Zex&5FAfDkM`gem&E!{CPl5kM4{j|Gjxe6-97%ufVO22KKLSPtEu4nDl6 z{Ey{;p9|#ulgkJH6c(QbJ%jmYL5qM=;5={LKJc$Sz+&{RDr#+@?Z6YD1Ly=$>jv$`{C?19zyR3k4gc>U-8?Xu>V>u2`PJj!G*MRb2KKdGd z@Ye$ZSdN0JAgB-^3T(h~VxSV3kG@tKd|5yakOve1Wk3Z`1yIuf-Gup@LA3#0Kp)GY zWemaJip9pDCV)9$0aybz0BR1Pj(`*347dQOxnk-Dx(o0GyZ~>&2k-^_fWNi}{6Jto zZ~zDe4gq1n5#T5g0iYHM8jbm}pmCVLvK8(x5GMf1Kq`<1{A=kDXJENZQ22-f$p&(< z+$qok;4~JW0WAW~VR12N3Fcn_y$Fm9m|p=}1ylpqft$c>;4V-LGysi2GtdUK z1CN1D0JSdAZlDL~1)c)WfakyqU>HE{HRuR1ipB3h$AL*K{s1}!%wX{>rgNa5fX~1J zmP70N3H~o&3C+Pr1w`Pl!Dm_sJ-~?Nm@#DqWdqoO)c_Z;20)D)lpk0R2x2)QP+>q6 z*Z_zF5`YvS3&;WTfC8Whpr(o`>Z@bE2Bw=pw*Z=eHlPFO0(yWxUU{00aX2 zfc?M$AQV9DAf~8)ui>4a@+uz&!8;_zEllsQm=}1uOzfSZ-wt{ABo#2Osvs^#suVlcUFchJSJ- z@EHN7e{#(K_$=VB0>}XSKY0$!Uk%EM`COQCgYp1;06(x6SO=^J1OOpG1VBv`bORs; zNMJcMFAcsNpn&C+Kve)$ELH32+8>0B(Q>@UMA6><#z;{=gm}5I`*$bUzRZ9Kv#F{xJ9vKqL?a zL<2EEEN}vd2aopdM(#a`!;*0}rtHA*L;$Z9qHl80Y{xfo`A|_-lRO zKm8{j#Qf)=FM%Q8HI^Fz9R=P3W575t0el3efEfU_Sxi5Heg?i^@mJ6vzycPpl!j{! z;$OgTfB@eE5CIwh{XUu=_-hQ9&xk1#D0~a^A7cfd4OsP04&BHB@fv^^;KR191zisa z0D@Q!Ei3eokH*3fivk+}F+c);VTX=LqTqI0G(#E8qrr0A9du;IH|E z9{>ab!N7hX1PBFCJB;ZO(4#;&5D7#BF+ePUT0CeHkOHIvCjr#bK{J3XAREh}`5es8 z#WWAJ05}a40%w6Dpcp`{6w~vd7l1M>F30pTXeCgE#aA$`#`HSq4a`UDy9xd+;5JYT z)B_E`eE_uwpe;Zv7Po^w#{3S@PR#ED?FM>)Uf?N!T0iIj@EnWL+$+o<1s%iuaZD#M zMaz8v|05R9g3kZ5?ejlAS{AjhkoyLF|7Y6*=Klg+1eSnhGzZ@R69HNPH9Am6fEi!` z*Z}xi7U2L^18ac4#sfY-upY|^fKmVI|rYq6PPZ`L`0SbU3wp|HS z8BhUK0r(;hQ3p`l1iBf}#$tU?1Hce42DSmH!EY-dW`Gr74cGyw*<-pAA;@-!9RMf5 z8E^qyft`Rm-~sFcynx*RYQC8IgYL!r08E2G_W>b5D3&{d=}}CNVHyb<1;hYxzzG1g z1kgkv2}l7_fixf;_-h&9X9BrE9&idM08Rsi0BUD3Ey1)D^a5}ZD8q6Un6A76t{I50 zVY%y|H-MX1d<*mra2Kcr>VO8I5x5UL1W;?iv<o}8387M`JZhp;Ijf`fCJzJxPUbPH}K!a3pqY4F91pb1OXvn1Av-1=te*ai_x4k z__A0m52}RuDxj)>I-mh;2DSj2fDWJ!7y?FsF<=6i0%m|Y@UK~5u_dO~nA(Ec0rr3+ z;0(9`JAi-972=&(&K=YP*adh3=&!)-27Ca2U@w4L5a>Q21PH})Xg=&8|0wtoKqL@_ zjo?H2gAfxAHMKY4U}-9K?X_zl24;6CsGKx}ecyIJ|-Kt+A2uZNqHX4ZYrvk5zTp|Uk{ zAz+)E#uejODCvfJqA6{%yEkg*0gEtKjs{;<)d*qz;tf{DZ+E7sB zX(IXd!QhizGBOe2o;LTcZAtRAkc{m+G99x+<0Dxnm@OwP^wC6mF@Cmvpa@P%SW)*kc?ZtL@mhlcLs``SLjJ|{W5Xj zYS69muwgkJwzKOWbeT;o=Ux*p?(6M(O!0oeQuh8>kC?!)Q=^CrtJ<9^f%$~ZgX@2O z*eLKwk5Rw+l-6|lj5jci-yFR` zUE5Gq`EuI!gV8Q5LF1*qZv0;ZrC9BEuBvL=Cc@{~Aflr!u-+(axQ<5k`0Sb^sp_?F zGmd@?>zg=#OsYCh^6~R9t8>kiqgN~9E~GdxJtWUF&m9?Ra{f7H!PEO8sxoy-vGPLR z6a9R{u9CNxuU0zWXFa->>=x!FTJTHsM2cek_Vv17Q#(x!t$1~gS#0GXt>eykUA{qa ziK+F2THVJR-j0M<_gQ{l@$_ns^OlRU&5MZ~ekH;@Fuo_NXy4bor&n zXY|UqC2w347TR<>c*EJP_2P5!dAd!fL4 z>(`B?z1)7KZsxwbBNBi3p42@5LCob+H^Pcchi|cSsBoUE3pRc@qx=4)qf(%Mqww3` zF(#60Vg=_P5k6Y~ENIp#CV%1N-6~#D6_jaGRm*u~qXToVq^HTVsk<+#nMsUh-p^(E z-qEjH|4J{+GOOSuy%59Y-%LvWx3$)Ko=zGL4Y06#Bh?ssA*05->!20Zn8iC-s!=3WbHAINtiU$7qh^3&Hi!=Nlwg1L~Bm}u5buVQk0dgp7s zXHBa4EZ%g}E+Y13N{h%@D(2 zApbb7?4xY5tw%`so{Py2Ra>VOr_~u4M0Y-xUH{uWVj<>1HeaOw>PW5wRuUC8-onu( zZ=44BSxp5y-W#2@D0?cLaO?L-_AYy^M_bl3^NX?^Ssh+=%k2FJ`)}XRbUHWguUxBK z^HAPbs^)E}v8&u}Ka0J;8=gHoY7<;iKFV;%FCgZ&@rGw{t8drnlQ%ehHL|CD{%H&S zby2~TP3&tz1=A-=W@c^Zgl1f}m7P_l_nxb}xAoPId8WxN`()>B-1RQD?c2*6H>9s4 zWLDf}GB*{GU+Z1A{(*6FSfIftClkIWqNaH{6lg12fVwzLfiXf+@=7k{Y5tNt=B zW(iC!3QL_#a}c5Zc5akAU4XYthu&M0?Y{MRs-cAA_6^F@)lX%Q<_<`1*D4>fCXXpL zl>9vVI%2_-SxLj7d0lUew+CTOnsR{fr!Tt;C(V59>WC?H)eGAqcj^iJSkuVs)uB@4 z8bN$muOF1_yJY&oQZ7$Z?6y8L$0{#r2j>^&LuTWvVEKP8M+FI1`h*)_*m3FD+gb?ojNe2&}M zOAcKbQPe5370`MsE|D{>R-`PP|FUd<_|bByt71=rpzKA?TQekzlJ-y5kcd_hq5wM!iLeshx5;du6gL*h*W z*H&q`Ge;_Tr?98f81yTT-2SpDfWL=@#rW}hS7F}whTnMP9@|*8H<-)Tc?(}EmsO(_ z+_J0f4vUvLXK?CHb8sl9+>CmLO!`pBCBNR&jUL`J+3D;$TxU)U@r;`b1y)J2_&3Cr7K26MhvyDA1LcH4QbE~3Gzev8m z!|3%8Qd;19uEMjk3WsFOpBe{Uif;BfIdwgW?~GpP_ylR!9^>)wK-wKo_L{D#It@ zOb?Is(F|o@E{-Jj6m?8 zgyr-(ozeBdPbZdk2fm?a(z}or%9)ivlEg~ zx;eb!8*k8lx*NAi8|X(n8=mjJRlVl*n172D>)f+E=DP*nerhjXykQ{;9B__&8nJnU`l;zoF{QONpb(M^qb(MD-@ql^u~VUb;X=-va{F6LbGm$mq?}2K$@-+Q>onZ?X@O4ebL~^-qle~s zZdD$O9TIyZ*HOpt^K|y|{%9+g>%S(qrA3TA2zoldp(pA4t2=@T?75fbbmp%fDV&Jd z$aYn!q4{TTMZwkah&?AJs>U0#!}C9Q?uxzp&QoKMBXJMulkL^>O!vMr=MRZ`@LHGW zbg7&uSiN+=vPt?>cO*?}3Ui&l>tvpS9 zIPvPli=d%?oskZH)*q*L>}$N%xVm`zCie>B$zTgbt5rJXE^^rTqWN)?eibi1OOC{PX~vvnld5X|!!j?` zejmE9I6v1OEVX|XjjiX-ooDLlw>sP~v^qt4q~Rv`<;J&ZP3ub^^qW-#-~NgW8XZ3V z*1qp8o5Wz}-rd4q{QaL~-f2{};4UwtIo`NV)oJTgj7RXSoWabcUlQF1;y=qASs*xA z`&&O{YLKay$WO@{k$vj9b1rqAUanQr=PQ&=#i=bPb)ENV$g5m*O%}W)-(||K^>K1i zzwe{&-GaCeV!zm4UU?uAHzp`*l+*3&GVzu z-*duG_~v@ArTe+#hWx3IxF=lJtUsPStH7+vkoHDLmR$G9yI((Ek)NxllRv=9HQ3mn zO|o3{81u)Nj@O4d_n(cdc;rYU7$SH0eg5F`OyhDT?OB@8J%xl%q{$domLtpA0n?hV zT{>U+-Qus7a`VZ5`6F=jjp>ECEy>abDsCg9N;c=B)C#`(3iP|MIg}VjFm6!$_vSS_#(L>4ANxGifgt zT+K9E=W69JcenOgw7WnLo70nJ4eNp3uiRf5FL$<-n_t@5X3kKk!?b0IwZ_%1ckk>~ z^^Q93Q_=J$(We~}_HEx}6gKoB>T2rV46p3phxKk~ah)2LFF$w9cBAnH>CiNe>+^C# ztF@jybv`P*StV}Mk?92XyrZj@Qm;SUwNJyuwdL&*=ZUv8-S?Ps-yV>B_igmz`d4)f z9y`v-Hpbly+twMdeM^UfkV#W3Qd@H&H7)mZZ1}zbPg)+WoDH3Od$huz<>!8p2)E&4 zo;jLzuF`ID&ydyFx?@bL&hh(g({-4K6;_R3deRXRbUw0XlDEEE$c3)IE9_QF$-dWF z5^iVSTCzSBJaIqNRK4^(bGXR~<1zL<8^=EuvwIe1(g+WgEz4z^g-bbkzL3g(R?Fw} z^Rnb5omYQdv%0g#wPW)N(v`pGDK2L-tp%H2iD<@Wm`7?c&mA4rgRjp=3RnCnseP7DaStwPY2Qf};tFZ3&JO>2cOvYg@AmkP^l1ypv|IK`=ttH{D$=9D+gkDzFmMXrQ z^H+`c9G-jqq4!W#y|LQNktenT7r)CaAmO`jrk!%8%Ul?3=bk8Y-6dZfCbvzR;-H4y zON>7MM&jdw0FOdLX?}3UOP>2VZ5KDbb#v0NlGL~m#oM4Zq9S9 zrhJwA!OSr8K0b06uR+Uv$fHPlZKhroqsozj>YEQ4e_WB0^A3IN!Eq+&sJzZ)+qr(9 z9dc^dgMDcR^;$Zwr_UVkW`3W&*2ya1X8pj8+06#kgF9Hn(n0aANh^eO>&WcNd#znyk@|_^opV+3 zkZt(3GY#JHUzLIjv*S+ez5TLYePQ;|_r;?edIcXcp5Iw@WDQ5_`2Y)#ket1R*AKA` zwDO*NU&rwNVBG7ocGp^}L^oF!oUyS}8aI(oc+g-*(`$7?P&n*V(c!q`M2A+N)$K{) zoL2^eE_gMy-rp9tbjn;)F{$#@jfx7Bxj@(OW%tl0Gs6*!eLL9R1}x9r4sA4#dStmS zWb;=fENKqfnp_<^TbT5;+W%Y5Go7S@pAA~B?df}(9-p?jF{^H*Bv5OU+qUzD!r5cy z4T{wlSRds)y45alQH%Mv{>(Z~7xumL?BQDoF)7^T-AuiE9n)VtShF0wT{(7aVu+?L zrs%-bn0?pvfS-kNENTt1?5n!m+uiwN6px+gjyG&$=$k*Rp)z{m=~sQ`Rc{|Vb_o6O z_k7h~Jy_ryoMHE@Z5`Wat3-2Jll+$PBfngQm1(^P9xL=-kjefLuq=>xxO!x9)N;qB zFAAyaSY37YljjyB=Ss@o3ru@676=Y~^&Cu;IDfu%8K+ay6p=M#I!1D!jUn~StMi2;=L7B%S$me` zZUm2iS1Rn{n4oJ^9Mc#VsBz)_J$vFPEyGk+d!U#U^VJdAdntyVGCgU^Tk8!vPOX3N zOP=vtyp!=QL8CF*##soiR|$wEA%2y@G!u+I#1OiQQ(70dpQzPr(BL0^yzG5eNR{bV zuxW3Y2!Y@?^Q&oZfq$Hf&lMW2FBgBjyx}*te`(cix;L+DQog%rRLvfpczn_)gnXix z5GC|Eb3*jw;ZwSweq_d4<@{E>L|@}2e*2|ug9xLXKS#wIa|SC`C9l)JM9&-EZqro_ zBb{e8$Q!mP@-tU8JJ(Dzk(k?MK1FPABG2m?^iLgaUw<_-XWwjZa5p@-rB2bwXP8TC zRvgc^s8{6W`CX_@8m zNL=E%U9?{gWQGbpIM8pvLB}ANwvC6gRKxegl}3t{nt5fqqCtI#SF2Cc+SZ5%-Frd` z%_X|*Ox0CBPrcx^lz&+4bR z9~LzyB>OKJA1`JMP>2Xm;!zx^(tQ6?Hhzc3)osESKiYNp7vFq*{+xXc!Rl04sGzdu z_3RgGnLILD=HuyIQt969s+kZjy&3+lK_uEMBZ^mImEgi!u_MC=`_dm=_|5cbLVYwf zFm!`=UMd^=xpch%Q>BTWSOPKRLWradb#HIy;kh=A_;vX*Eq}T9CLqg zjjErVI!|B=8aF59a`y#_1&nUBzLPS~_OWM{t8X9D@G|N-Jy-dz`sA3l&&MMZ8+Z9V zU`f@yR!A3}8oS;!u6gHXFR#Ogk;ZLC1+#C1M4h%Dtuz+z^Y}tDXh^)$u}%HtEuVu{ z`79sQAB2>9md4Ph3?#%nKj0`X+jZ8?^46DDr^tnlq4LYCGM#E!zLHfUSsiHtZj$JAWb#n?L?Ln(%Ea>{aS?= zA8*;4W>+85C>vAj^*dfJg%$qhDzlv@H^h_R_H%51lPfOoHuJ zSs7Ywbw1s{TXl&^>$;~`CSzgC+eQIfVXa=GM~dKsr&C^5uU%cN&aOIrnnQ$XVg*JcneQ>zeRP@s zd~0joE#7MnLSlJ&FFzHH4fIm>Eh&o*58gXE>>zcr@Xlp(2mg-`5)^&=s{?ChzXi_J zpK`p%#7@VGTuVJ;S4H~W!1`?OvjZvYIWeo)?_?Mm?|Gi7R^Z{uZMltGJ#%B><3oh^ z>o3VNS-s>e(!c#&Zsf>YzdWhBEcHR1z|1UL`$FP(B7VYb~}m z_q0VYy$n06yso@1;{f-UuhA{5vbb4If4cKA>YaV#M7-5Qys{7@d@X7~_bu1C=7;4e z`HXW5wXHR~=_C$X-r2uuCc9{~VXvO~oboE)-wor2cFU7W-Q$y*%B$@+N7|lUzFs6J z$lH8MI7~&X&qLgk=G?F|kKfJG=@{NuovqG_u#Ha7G|Okmw5xG2t=+b#v9kM2TDoy? zhUK}pYVmiad5d)y%7ivSxy``Nqg)=IKWimqhpu)gg--e)N-;}xwq<&EW$#Kqx<|LE zdLkmf>+P59ZkeN1WxiTYh6n0Ivv(REb178%k$J&hSy&=wRSC}(UPe`|5au;cME8-U zYc*c+OAZ{S7o8WZwa(E$an)qKy=&H%{ZDo%B-Q65v-Kt0Z;o^pJ-ZZqDsoAf`;C*& z!*LsNHmmodV*b8qOW*wx-cKAEP!6?=^av1Ne_-Pd))$Xug`KaMHa!Z3b>2|v!t5|EGmqTyo^}VKF{SKW> zp1$QRe8=$KErqePt+O4sn_?aWrE-3woN{8h-orXom28uIl3|rw<9?m)*fgoPF9M4C z+P1_@T`%`~RJNKRUe8#0+)oEF=jdNn{E#1zXVh3Mn|y63@9pL7{W^ZNk;{Y3y-&Z} zN7N1`93*Ud9_+9-MbjzOTH4O-j%fOXZ04(NZ(PF~lPb08|~aL*4&jy8O+xUO(z{pC5OE*Y56R4oB2nHV)pTO>TJ3FRidrUnXJv>{q&vMo6(`?|tqEYymYT)9ThyJ0((9N3YXJ zRrWRavN8Vc-z9%z_Gq&P$JQo+=WN-Biu`29$X6cm9dKSqpm3#itMdfU@@n{O-ndU#N-{XeN4CF8 z(S5_?r@MRCOx_8<&8`>f&KGVU7v8tyYsjhZ5s#Q_WH%Ij%yp6+dcA92tB_Z5P?)h( zsA4%|yWQI*`;61)hR<&H9}N9DryHU#a%W;+f7%}3q}b16g|Q8%T07HIG&cv)SnZ7M zXYGELl@^#jcC#`kT*G9;m`d>MH03!zy|S;#jpAY72LjiebR3mRq&n&x@|Xn;lH6W9 z25cSl{uY+M7+X7EMi`&wFXIuiVhDGAC}$oT^R2g{Lgv7h3wN`_N54q51T6@p&wj3$ zm!|z>`|AkP8U;Ut#~a*Cb_9hqt+~OFKV+*kqdvBS*V=3Ci}yYmT`F9@j7=Y9bq+nW zWfwT=zC18|b!q8}DW^zhh1*=__I-)l7gb$_J!&_qhAi*qy0Xn9!%QuIpAn+=E6*TZ zV!ZXr*9&K|TP%XFD&{|-%;uewNu;cO%sRE7ekgaVfq?JFE%mRj&HG+H;Ji<2c0nWBcMTV$D=A$s`$uoGqy@%}y*hEW{nOg7$4VcnKGS{gRvbV~-egZ;s=Z2gBxC)d z`Mowu{+nix)DL!#4t$8vk34YUm`6ZwMGP_4jG~oka6%zFoHuD+_r*4j#8ie`M9o%v zfx{AvIpOy&tF`s?b|}QA8q`kDwLj?Rv6i^*zR9{%-m^xg?JAw)Os~*~{$mF99d%tz zu0A%q=srze-dC>kDpE43?8!^dFYAe-JHL?W1lOyzmZgIm58w0sYEyMPQ&_%>ghzfzG(@A zvlKVc_DvE;`a2H>ol1IJvt4TG=%NMbgUt`h4b`dN)7Oq%*z*&`7;%Pt!3e8&5pSMD2hlreJ+e!KB~gY!93&iij$3)b0>?l(J=H&Wm= z*FKWFk=Ks>O*V=A!N%sqrX4jw!Y&$O$6j@s-IRSHTvyt`aAeu-VBYl|xp!mj{H8T0 z-jU^Z8a|XKt6c2L`w}?n+n@CPvBPWa{xH6lnX`1+w%<9HI$Gl7kM8n6YtigGd~^u< zxm15CtGq_?qZHR11l8eUthv5_BtX;U{87k^TNkxO&``Kiiq z&){`iRRfyHS`}AvRTXLMxus(+R&lxQOKhIZ!W>tu1EVeasdD+MK&D%lvY2JM_WV zBj$z-*^}kF65GQa!6x5JJt~W~KFxgjJ;Xw(K%Rd3)rlu}Uz!lo>+bClG;U&ub!=oP z<;yd;f9BvB_G3yaX)c;fAGh=+n7C=OyFct28R&V>A^iL2e#3QpXY+U3q_-q@ybLMb z%$Fx#de34b-Kav^(75x!Mg1PpotMkmJl3hYM3i#Bo4?{7P`=l^WO!ixaYp++GO3Q{ z=}WPFor71h$_F-6emP0&OC^2#Ij!kS=e#GQ>Vy8KxS-Kr=BMA8fBfe5eJaN25WH4? zO|C^L6N8VBir?>o!Kx4P$_6tZmL{jer-sAUakEOr){VnV3lK;2Po(tZ3i&sgDhB8&v8?IbtiD9KJ9~`}X zdBJZo;<@t1-BaOFH^bIH>dNfjE$FhddOXGX^tFO zscPDzCr5+dz2@^a-uf;%{MOrqbarC{kz)r4qK2|$gz0)q!v~q4WOnA6zR0DA3_tDk zC4bZ(xR~jnzqD8@`V~#`(_UvZEA&q^{ zHlIAoCR6fy9V)mT(v!TUGuNWE6au%g6FT@^tAj;a1t7Ssze&rb`Y9VZQ67q@#0Pw&HT=_@)lkp+D}5A_J3O2;GrzM zsDCoH?UuLK2S>}1tCOvcAHFbz=@bu%x<{}F&BZg#*mAi0Ed+*0%1@-M_8$!4Qfcfj z6umBptmn8DrgbL2i{?@-AM19b0~R^Xbl(FsTwA>|l()4S_Ioy+sO-|N>Q7)++cmX% zy;)LU!Rf~h1;?rfPG?5>5V`1X=*b>_# z@bg((fOaioVP{MuQZi_`@UVySAfa37RL)YfxUlhFov^ZFQgJZ1H-*gTgFx6LO zOZ-Gh7U;_ODZsI!&-!HJRd(G`I{2ZPjrgbaDP+B@aDto zH#@AZwJ5xJ(BX8-#xCsRvq;01O+}ZdO6_%w=qo;HbjII_&sLO9-Z3ZAVr?>8b~^1B z<<=VhK<3q_756un{#e6mv82=Y#P&h_+Hk(fS;tp*{K`z`gk5_TVHNN_ho*A3*8cH-uM0^ozwKMwQJw%k8+^?Cbc@{!G-|zhd?qb+54(Whg0E>!#_dgvo-Aq}-Yvb1 z9NdLH)q`>)B1JL00giQtPh`HRA5q)dw|VhuX-m6{Vz-%g>#n)g485^JW%`$RtV7s$ zt_uDfo#!Xb<&&)}S5~6Rx=od%ZS&omRW0@$fw`x&d`8oyg1A|3_bmHHo*sPCUQ=I! zNS+ku{r$7)MZ?nUE1qP(vmr9#eNDF8brAJ~+>K`uj0rkD%4y^BLsqe>Z<|vL=rJn1 z5qR>xMh%OiTGFRRvzCx!ypK(`>NEAm$7l^S&UQ$~wRc{(-Dkzs{poo3mD6vzBi6D^ zntaG!%N5inwEXhMY%xoOzs|YPx8cUsgIS#lW6fD>_d7>#^l=w!b3c3_jAQ%8yXkGa zO3E8guX?yewKqS>;)dUGkITDBJ#Y8Veix|9VX_bS*xSZ1F428^**^K`0~3Gi^ZYd~ zuN|+oe5AiR;wwtK)8Lua3DPTJF5WsWKiOleO{_bU!hPip`zE_4c|AD%ziW*g>M!DE zU@1EMWae}C-O7g?a`Hx!*-!h5tbaS`+U|+_#^1DNctPV~;`4_Z(z18hVmKdh%y4j=GN7mdJmsULWB9Xb2!L^$ru<_;_OR49N=ar(@aos(>WSyil zn_uBmT>ZT1gFX6td3dG_eD91srT@Q|CIj;|KoMwj{J-q1JCnIQ1+l zP_IWAr#ym_=f`QkCahgS?YQEWsOPJ3+QXzrz5Ht?>gPqpje37>#%a$yEHFW>odKdm zs`;C+m;<%)0yy<(;hc9(Sm1(MJ*#l~*(Qp5`Ffo4r5maDTOeF7)b_hrNPYYtz&XF@ zvO<*qTs{Beyx^z4-#0kzoYSY?o*JC~ndGBhe=5#(&1Xoxep#IJLXx6h|JexY`7UTD zwd;-nXFPP{^n)BuKi?FhzF#h!{_%iSO{tyV%Q*8@qc-*a8OEt!A17anGY`AMf?Cwt za|-8pmvQ=s5of;~IQ`s#Q$HO}{g-g^H*v;K0#18a9H{r_cAWdm2UygG+W7h8OuZfn zoc%VRq2AAGIOUgd&I=39@haO=Z%;nX_4^zrf3ApnJ9TibYgwH0i{MmN|B-x_B; zWSdg&|ATtek5@mBdig?}e&)ex|7x81$5e#+eP;mYI`zXjU&$`i+rI{9-qwKYm)bZq zfyI`n&GSn*?We;zj}ti87a6DDB5}@FE0m|!P6eFfa)I+pZT@k_Ilul;5w-FJoOW8C zp{VdopqQ?J`b1^El;Ap?~03@_(!X=eV+P@~SxX-^D5aSd;qslEW#_ ziPQhRIL|ZRar!~Xf%g{R9xqfGHjw?!;dOgE9 z_uoTm)XV$fjJH^v^U{ga&P1GgjB(n31gC#uah@BcarUdGOZ~Vkapt8x@V<%Kys-u6 z{Y8W^_4-wD=JRtn_wgik>h;Xw^s^{V{daKs^BL4b?YP>vQ{S)5ChGZTIOoL#7S^LS zAFAS9FQ%KRmuJU$9%#nNkHENvb(a5+9kQcd|4Fz{QETVbDeCuA7o6i=#_8uVobz=S zr#!k|DYf>*;_TM}XFkz``#!bvdjO{%e)#->@_%{_{Wet@ExNWeweet&Gtb-MjJHEL`~8M9PU3OK zza`E*wjQTFFLC;(7-t;X;`DPbPCb4&^WG+$^ScG79(SC2oM6FjYW+WnQ_oX=>gA8& zwC6U?d1u6#uYzHLZffm0k8@r$aNeI%aLR|{>`#2G(kIOA|V z%!fi$?o(qp_3y$tkKc8uUta|{^*q3te_C+H!$+LYnJ?g!-;UFtF*w(S5zc+#0nYRM zGg$bT+W0x+L_L2H=e|I}*{>ou_5L)&=}#A&=k0ZHa;dd*CY^e|2&X++IQ^W6a~=8N z^z$>Eamc_+y*;!z`%Qyyuc$qje#QBG&j{y!RD?61U&Gn&37q_6oa0r*>Ca`H&&^_B zyiq$}_BiwWSDf~f9jN!`YMlAO3MaoCo{OlpM+4{cy%3z|6O#?ppZ`U-Q9s^ToOw?S zXZ*Y4T<`WN=={clG@%Ovfr(&qoiF?&2{a#%E+ht~iRF1J(a&dCkZD%KxoD z02YNu>)$88QeF!y->AP*J`!6r@^5*^mn-eDUbRvlzLx!C6gC1Ph;Shj3-AFE7z~3y z=Ja@_o(Z*;{lYZ$$6nuD$(LuWcFdHC4jkFD}s>F2r2E9L*zUk&eH(Bsl(TRE=3 z`MMt~^=M*?SK46p2(hk|k3bih1cS@-k6nGga$W*C|9AZ3**A_)o@=x>p6fe zmIs&BAM2J_$?KzwGlKmcx4YmUccA61-~R7@a2ik2iN_Xhh1=5~+orIRr~R<90P7cQzkyFz z@|)3xNWuQj1HpDH`<*}+2n73EPayoG9`v~OiLK=Swo?_}prZN2lPmQY{dpC<4(kUw zZ1KRAs|nU^(m?x)1<64d`32L*=9x~GmHG|;Ydmb0TRGl?=z@4)f5+APnw9-JrLXMw zZ+rH`KLtVCql+%k2ln^;`oeXM=DpEH^T4G4{DgV5cqQMBU0*U-e*gbt?n~f%KDPFM z)~2MyS`>}gVkt@Ndn{3FL=wBoE(zHr5o<|N`x1&0OJk|prKqAx?7N1ls;a7@cCi#y z)&I=*c}~cP_TIktzVCnD&)eIcoZmBN&YU@O=FEI&rUIwKeY97`;~RTX?-5tjE8}SF z4AA=)&Z^H=<$=GN7kic`{t)$GkCx{~1K?L8y>0|>R#yN{C(`ST0)HDf@T>6KDmCdh zo;ZMC+Pm9Y|0?^}m*J_R_i1!#qtveJ9j`A6Rz+fuqps=>uS_de_qSVrRd-eH1bF1CReoEHdY|}atv9hcaOUu0)i&h6G93Jm z8-m}F^b+c^CS7j|FY6RNpH}yQYx*f(!0bZ)0{1|ltS^mKMf}_Ja-g5(h0Bx4-*PPI z$;P}Er}B*&KchJGpt=c#{v@6LMC4hi9@f(Qm&-suatHguFyPOB06oii^ce#^r|tp1 z+E%biYtFytdGX-ur1w>i$Y}hNZ-8H!pH<%P1o|FcHY)z4qqWa#Z*!al{%PcYSQ7OP zeiQUcq%T&FPig$p^tW%4{(U{*FPw>Xy-m7{*aQ8)%(L~rM|->Q;$P8Qz8b*aMSB*# z6Gx)Wp0@~a#GVgq0ez{yKw1^? z8&?&B#>uBI=~hMjO!?}l_Zxi?tSaL7%5Mif|0`MbX7?EFa^hvK(ynDOp!*gCj`+7V z>VY#|uV*~yqMwiHho8nk&Nf!BB$Mlpm+8JUFJu- z`M^041)Q>gu}x6xVlB^EeKE2sg-9=}9!%5pjQ6tg7d;3)gxf(6Qtxo~S8K+l6^V1D z3~-tn^ToUBkvNSX*dO>qI3K;H3_!<$4v#QWoQX3t4gCFRZ{DN_sda?Lsh}_RRYmOQ z%?6P3GVS(N&c6@Z!Tw$JrKPF_TmMw|MRowr(wxBQK>B=d;EXy8y0rJ4dKggG+nOF* znVD5)&qrLB@+0uoveYWIyQ5!v8uQe|Ho$i|41Cc;oiy6#Li9@mH=3pynb8lIlocN!8(}8od1MqwDuM=D#&xmc1$BFcg4$#{J&M$pP zZ#)V(ZH#zk;eO!EX1rI4{FO!n=e)jvRh3xMBh>z#ZdVfLqk5#jk&1S?4}?69r0;qH z{>09ZU&dF}3gF+&b%8JWYYzc^&)47=|IkSdKwWPQUbJ76{9iPO{1f%1&Z>BkzL5UG zekc06M06c2hcSAU9JnNQ2xiA zAWw#Vj6fBU=QAs>Wp7K>m;I_DdMK|RB-A*zLC~Ajd(;#33g3e+^Vf)}kaO)-$SL@q z>Y+J}vndzw#h+J>1ivTOK_dUeFwheZ07r#AR{3~0&ekaOdkW(>P8YU(6F^VXmmI6|5&6I7dcmLTrlO?J>cer$ z%b4?!?)Dwpo15!*(Zi@X@H=b(zQlpWl>^l6iqH>ZsFFY&`@_Il{VMon9t^wz{!{u9 z09B-4p3)9I$uIt=%PH`0XFMkJT@E+U!#S?S&+M`Gm+fuo`a)_|lC6KLJE5mS{)}Uw z_agn?+rSCT4|$|r3lxKHmlNX+@ehsrQ=a1B7dyWjfPSx|A3RV+=*@FL59?!LCt~Lp z-GMX44*b&ZUvu4bWdQh_S!n9Mt)QU>R##<;O{UH{L8uU zuB;w0)%<4+zt=Mf{Eyp#Up2=ne=shsQW|=bb$apTXjgY$4lm<+MJV(fvl9INl)nw_ zCxe%H%eXF~5+hokROUUzo_mHvo&)rUYB*SB7UP`zoChT@kP_N`SEhh| zhj|{chv*-Hqu&?TMaIQE574LWg`CpAU%5g*KN{=wrUBqD{Ui9hQO-*0Aw(^YyS@Zn z6=%}tL_r=iPR^AAoW{JAU;IPEGN5O0-6!(QQV$br{08~6>hnvLSJ(Lm_sOWYBa z@&x3DoZIwc9;%3c`|=#<*Br98YX$QyF3h)-CQiI^L|V=h%#X>qo6S7icEyOW`PSfX&V2+Kj~m`ay`>t#-eg_oRs{U^YawT4 z;&`aXc(vYka-A#vb&c&Uz;;P{*K`E_d}I7BRS&FcoEXOWqBnQy$JxLsq`r&M{Qf)9 z-ZPZv?iT2C-w?6cB>pjUDR zUGf8qf}qbN{Sb#LLeIf@um$JA@|6Gl3e@|A<5%pYrkW_Ve#TybKE;pCIsyI&{rHY5 zB7c$A;I|$-Q*`04NqJ%ndB&@AZhC%km;yOve(CrMoezlwGhyHJH&NCIr0zu8uW&rqQ5Sae!L&_nZo%+?Bv~=kf$`)^+NCM1ODZ? zA*blUPCa_B>rJUi`RSKFRr|%7?w}uMQAPZAv00GkVn@hRhw>Dpzdbic?Pl1)>GK7aPSNMEmgm+H^%|kVHL{r zHvPZD3b>z``f7Ycgr1JHxuh3m3i%|FXTMM{Rr{%-}DDg!y~{EKTs?YbT3!n z1OwjIeH{ANWk32?{KM(?zz?RMuSfnC%J8&4OY8#vK+;Q10#4dt;3Sgn#r@M#*T64! zIH3pR*_9XaNc^y3Jm@`pf-d~toKH7%K9zcFp96m@=KU(M-m`PzAI|fk9`O&&nWwG6 zJgx9wn1Ob^O?#GcF;z{(y1kyXPtn7KKR|DC9PR4Ldhe+3jI>`$GwNNUEI=ox%)OWLcJ7goFS z>xU&(QJ;5Nr3d9tr2NWFS*4nq5Van*aoou`>dgI}DDLlw{cIl#{PWGxF41S3B-n{N zA6#lp{DX<0dodo6b>H!Mz)3XbyQ|8uwLI_hfz8I0zr#9x)#6O*#Oa zt;ElB12}t_4-x!6-+(@LeO7(`5Ci-bmtYUFei^}aN(|R2(}@3e3GhGQes?J8{pvxU zvE1(tB)$C@)LWw&>Q&nZR*7QXwGH(t<8cl5j}kc^#Xs-<3OKFS07vw^p)=%Z$oO2w z`(roI4>8^rduzn`PLU+7K0#jQ)Qaum8B z8{fmYI0Uk7%UJdo9}jz<^aZZKKg)UcE$U|m*U?^FM|UBAt^nYBGY$+P{RsVAXU_Z5 z?*|5>-qbCSU&c#C>L5k}jred3FYoqMx6NBpf@x2 z6Z7$$K-wDMG$sF<>EM4{2soWdpUODrOU5}82ae-@`N&8az6q+d_ww?A&}ooKVqm#ThgzqiQXF5 zv@5B%0^60!e2DlHpD)p_9eIHxcD0E6YyRAis7(CwD}aA79{s4y%qmemL60pAy5QFv z0QxSjAH`n{W1jyU^Y&tI!+(H0n~Z$U=pL9)ljcFrWY+sl0Q8W~b7cidANK?5EzbR{ z)}+5*8vNZW0!R37ID?+E3g{xgS5Ndy-dE5sGG2y10RCjg8{!9Aa=##r`vnzP@BIP5 zIm38TeTHL|qg_G2!gCQaP7D4C{P(yoDg9D)1n51Pw^7|~l{Gm*_tKA%t0I22##GS# z^<&nml(7D3vpM>Me&jCrCH^U>8mQya7$a}y?}&b>Jqmh|@%5Ps2(`Us7^su=O>fQJdICa>rq`cr?$aTHM70KLx zOyd5d*kRpZ;N<1F6Mf#KJ@2Cb6giJ`-gjZXNalUtxxgvP`BC_9eu4JZpA9)b=Dg<6 z0{Wb^2l{MI`Hxowe*yh)yDD=?AN3yiJ2TFgcK!J}>b<~q(}(1rtG+SO{>a~bCM(s!ZW_leW2JmjCh z0qqs}w?zU!;t|>_<7;a&=&gAUvm0@a6+pXEXb)9LZ^3g`yNUq6d_MdkPjA%wm~n#i z%ST)nwBfoyg$-6oQU<2=R*HGQDAEfJ1pQRata|Gg%66?my%PV=RKL?WUH7A15X06dA$Il5gJcwVtCal*LXN#lB_80kmVeP7z2_+NxQi2dYe zo}~6t)GPk;S`g&f#r-LX|JNm;-b7c()1LSf7}rf@T&K*{Dj%p0&~jemd?fu|?G?1^ z#(Cg~A9&IS_5Q;1@Y3E9JXi0iAJpXFKF@Lzz9{9rFcRKXx$9%FY`OnZElJuj_sw9*C$|BUe zwlnHg*ELvWaZ}(pJqBI+y;T9kS1HE%|3^F*XP!^K{3LsPckV}z=l-dze>bSwwH`j_ zI2C&s&-IJf6SP;x>FuZB&&@o!$myQ}`KvN6mHGD@+O2s$`KvbI56c03;olpNew=y@ z^zPJK&IIt!E(Cgi(wDRaz0z9fO}Q_t%!>t1Ywoj4y9%m(G2Px&&I>?O?=CBctm(Uc zgufDh*i3btraxhvU!3@J>VWR>1pI2+vP##Ys5gWAN71BrYYsUFvww$?zKwQbJ^rm| z5+?+{4*d08=Stl2s3>rv7(dAPs>eKujd_yE#4oWK{B}H#CwBYrHPCMjfZpnpe=FA` z$G9Gm`Ed#31ZTzxqMx4JpK|AXXHT5ZYXCp~SKxOd{ky)|k5exdz-c{S$$|0GjQr#3 zg5HmL648I`b>J`i2t6p{E0}Sz^@zH{5PkmU1OH>=xnHq|^Qt;s@A%{BNAV9!)VfB~ zM=@S)OL;n}j?i>Zp4*Xmyqpi>#);E`bC3S&)GhFzVBTAWNme;O8}g*-$I?|9MgD+b z*k`)2kMm7U;4C)sHj91*y)fhOw=6W9?PovGQyJe%yZXEjeFk$MO_{M(?r_{)=C~93 z!3w}RXRN1mJG35NIf(W)B2E((*lT)nI{IDwM)!iK*NOT0O5{Ix7dXp0g1;&0wda7| zkolkdr0@9^_<5Pvl6I9V3%!MLT_gP)mk9oiuCSAa#Bs|F{0B!Mzl`4|%HXy9MNfb( z_(LKI2^3y8U#)59o{aG1DohyOfY6A2l`oGQd z+uscUUHrf=XTcxF^{FTEYd?UV19`qk^ycyv3@1)*%?!5|e-f#!qF%S6H za$Ka3LA|YMw=(Vqj|6|w#^B#aoQIs3lQ=I+zx*)(I9)HJT{3?)2!j0PcN{xzfF8dH z{NiUG2BY4MT&L7#y-j^VpUC~DN~E9T`K3Laf4>#_dGKHS67q|G?q36PF8u=aO8zHr zMc9eGGwesk*V2K&+2jHoX;&8&kS|i_oLzX%8EEPad5+VeoKK~{Dtw4~Kj%J($g{RL z^bp28z!#MNQ557!HO^HI<2>lU4mGwQ|Ia*cnQ;j6t6^xB)Zc+`p5H5W2kq+4eY!5> zci=hd3d|FV9&YS_{QY=NqY(KUwt$?wc#cKvc@E?J?mV9(arnI7Q15gv=u^g*{b*D{%PNumpob^S<4L`_IRDxk z^Y1XuBTYGvNWa(QI=3~~xiT(_Edc(J;%ILy>z!~E?K&_9@=ILp90vZS%uBZ-|Kt+j zA9@`)bx5zN{zb39R`yqpT9q64v+b>$(9i7GpifWozosS*?N?_S^JcH2pr1D8g-?qC zXLu^)EKi(mDsI;}kC`X*COtKP`1c`?#J%^3f8W3lW!&OE6LO0G$;l;ckF8>78I-%fiLKUTCn>OI8#fY{IP%x~0R34H0tcAJ1x znCBK$w^}8poAe`-DCv5Bt3)^dNC;Oa$oHxbE)C_-EBX;CpXHy<&$&hl77N?OE)kc^LHRPd_H@ zIu`((IObbK9{ZQ%o&1^K^XK_Jd+K3e72wBnKhcr&W5dzEDcjI4WlmPPxE1_&XQF?H zk-mraQ=0Z8aqm{fd&3#;ogn{rtAMkf>%o@P&kxam!iiIj=MHFY`INs# zAn4oaC&jMxXRX?gl`z(8Us(4EtMeoaAis?F`n|#LWcc%bpF_@I`UjaeL%443%XO>N zyXHLjZ_I)G{i)A`jH6~A&pJ+%x})A!^d~ZYKj69j^i_}(Xg1rpL*Spo`y8qe|Hwhm z`x)b+^nB3G{g)5ayrb70=|;XNbU)-b_g~tz1i&EkN4Z>rKG8ax zZJ#^%OW6S@ob}pu1pXVG$HfoK*oFT3gz>P@4<7~HIY02F-|cwr&T&8J;&1ixS<4w= z_<=5}9hzR1=NM&vPjLplDE+gne=k=9y)gS-+M90-{q`Qzi+`$p?{6VzZGLAb`i$hd zU=;N!^nFg?PvE|f_|;k}@YZtLJyi}(6&ZIIPJ+Kg0n{k^SsD)htvrXYk$Ucb3H0?l zq5syT`!|RDqqyD?oHdO1PMrk57x@b>0nTP$&}E$NE02CmzXiJZ?dxBH-;4X&GF~pt z=e%R&`S)@^F2%^ZZrF_WZel(}WH_Yy;SI`I6k;Edc0{F}5l z>6c=aBm+pKM{C#O}1<60u4f4k@u1F^R z%QWz}GtNEMW*okn<5%X>Ke&D@%)E-|*{dk@X5PaPIR*He@1eaOtalX8Np>>g!-eI* zAHjY0+vKmRIzo>ZGv9KX^TKDG7gYMiDkmO-f74gcrw{29o;cL(@a{VZJNKlH>a*p9TPU7uRg`uBoPOvMP z@E`jP?UH`{u`Tdh*n=+mEcXQT>WmL1A2p#r zoBbm8e4T!22K~}o#Bty`-Y}lyRnx0gwpWCmj9`2y@%9^xyB-+(8na%KCvwz(Q-bsauJ?!29>hOSDvo}sS^@ncey<*U`bWtMZUX;);0Em-Aec(EqUR{kFF`^8A3{Uyg?St&Dkg0rP=2t|LCA zJXb=%e}#EzvF9r4z6-rBIPg1qMCSdOk&rXK0OXW;=it|%e>??rnMZEl2ma7w(5Lj{ zJ7vNDgy&~OpH+WGzqBX{eTv$V(7xFuN34Ap@S!Ds&*C||IOFWkP6nZ$%^F`wK=JR}M z8qb#seQGe|Tt@qpeoR!gXg^b*eo4l43+28veW@`o#O#4xncr))REDbgT?{>xSOI?D zKhfTzXqN3~u3Ou1-k0`fa3B0E*P$|wR^n6JPto7fR_lY$jPV}}j1oD{Qp+pSFS zsMow7Y(PcOPv6dJKa<^o-+UhAl>XhD0RF;7Ku@Kdoj4B;;yft!pEeTmEMwk7aGaUP zIFJwh;zyjkjC<1<_bPL>%CrwKPR)Dm%8mv8Vq;!t8w-Aae(xam)_6VIHTV|f7deL? z08ZdC=;3t+g5x8Alf?M5IOQq8edpd^0H+P-FOEvCuJGuw_tr$PcJW}E%_ZIf94gMJB+eHs!zJq@1aolyHoCE2vdftWJB!B3{ z{ggMZquE}F{y5l8Ni zBon8CC)%56eE%|md6fdpONgHLsSefs=x_96qpBDeq5Pgg8NM+naH$=<@*Q{bOgL z=O)BS;<`0yHtL;5`h|nQG4pui9Z>H{?z>6+dAbMab&P$SCCb3G{YNlQUYYpg_}x>= z$Eddg>FsH6rwn_W!Sm{=Q^9|U{B@R~-ki*{iJv@u6ZMuofc^>~f0L7F*G9$zGEXgG zJn6uAQtW&~Uht3QdHWFJv{T;&XuomgaW#vgrW>mqI}UlwbFl9=X1&Zi$@s0Jey-1V z*?7Log*Z!d!yX)Yo>1&3jpNtJ*jH*$8T|3(p&yCEf1L_>l8wAdw>HRAEt~{lt`~&=W?$-aCG;bHd*$2c zmp>Uli2dCA4tgtF5d6|#*BQ6pWZW*}bdCq=y?6ui%eb4M0wlffb}k%rl@_qdse+KF zA@j*S*xriF3#C&|sdpOvvlGwF3VlU2$oa<>=t1Q1$d7i}@tzIQ!#GFKy|}L-@k!?u zpts;UQSygpC{GIWR5HK3I~46L#e9pzpHAw-dEJlIdGD^QXYV!wy$0>S2HRWkIOtc5 zJlpQN&`+-#(7(hdXMRAvy%|pmeL;25pE53WBL3blh|heHtjD`@|DrkfFGO!EOQGIk z6|?qxEc1jm#-@A1^$&q z;IG6l9h=E{>TAd&{kUcX_=od5N*Qzj*cZ5Ho3GCB-DD3kp<=M~q z(b1S6PdNjps<96{PbDyQdxJe8zw}EBFUX&o1o>rtnR^EODU5I1vfg7ncVvD)ZpZa+ zy0LDW)F16V%kvDoHbp(xjzl}PWa4lG&ryahOd zpK=`d=65UmtAqdieek;yCs$4I+qi!x{vq~0=nc6}S7KXbfO6ov-r|fm#GWr~27S~_ z;ENw9=LfsBErp)Ne)>@V?$bd}CjP6cyET4Z=Ba{7f5iRb48uS4r~tj)-vGN6{t|(x z_dMtElEitUHu5#T`CV|Zn)kIo^!fqvs4&SYRo(^vX`V+C{U83eojW=DJduw^hFO0{wD4*FJOU0h}p=v}z~CGuQf19^JWkBOgj zX#o00`cL7n@CWFtS3qylU)$6NBU;X7T<;4`nltpd(i!@ZdA}&_HidR8IM*5P4IK@9 zvCl24;Tr$Zb?B!z_0W-djC3Q9QR+j;c{UO_64w>sdM(xXo+ZT_{N{bmv)iKH(%eU@ z$a>!mLA%mdpj|~tcRvjNU7SZmZ}}LPdND37M*gRRfRnlzICV*n9Syqw9?;^?aIgb zXaMC2;CGbn^s9|XFT;CR9QHx}s-(NW3mkX)dFl7EBSB9leJJ_cYyf>8J@i%pb}+ z)F~(EL%Hs*O#YM13z_S$@|Pi}`TfFc?SW%k2S3w_IDI%?%zM)E*96WL=IbQSd}1}3gsVD6a3%&3O!5QwS;-j!B2s6 zmi*h}z;EW;Px*p=hWnwShqG-U|7@Py6gzK0J6udVyhnesAP)LB-$#q#eq|c>D`k95 zQh}vzuM6|-1#&{rCe0vE6!R5W&Dm^zd4Zq8e3ZzONIzh{clPQr@JHm&>aR+0zJ4+b zdTUJmEIJ2yQW>|4J}0XK5|+L3yqqWbf8lu~b3OYV@B6ZGA2XT!3-}$Y7r$c_|M@ok zlA~+Z@iJREYF)4QHPD@i^DXo6EtnsccilN>^T<`mn-sN5Bvjyj` zI;3wIfc73^oGE_(ZLW{TaeXB5@JDJPto38Q*LJfU=u(|#5WTs%0l#52@JpOF^Aq%Yar#f;-*N-=a-8=&P|u%U zfjm_gpubcYZnX>n`R-n^L*RE|94`5aw5OnV_!{!d zIE|yfI)4K77Gb@!yFvcOdC^}IcP->Ogk-LdME^~=FFBLzuS~zhbI2)1{=uOP^k9DH za%vpp9Nz+R2CyG*3_-sf+Xi{W-Xa-qq;Q`|{OUrU8+YG}cGU9@-}y1< zo6|uT{9`InulqNc@v|~Zt1RRBy2i}U%Q~z2ImnsrfO-Z0%x=;Re-aT7J*4qGKtbXk z=egf}j5lOn%f)2XIRZ zv@6G}zzL!~6!d|dNoOIa#D|3@fu3>~^m@eqUUiU_=YI%Gb|!tZ2jr}6>^lb)27T8R z)Z3Z-llueTd{59xO%NJ?ys-}bj`L$H&X4iLarC6U8TYU5Q4Uu3V-oK(QRZru@(n@v z{}cTy@z@T|<6)e~W!$B3|M42XzZ1I+?uvZN#M!`6;ha?}vYwL=WE}l)4g3pvj!5Ry*8I-;JpIxc;!ipZ{?d%oB>(VPec+q#{pHL7 zyIRll*kb2*`Q1v&evC)y$F=9szvlaE?IOMFZE9C1|0KV%{uErH{aJQ z&iU7T2dyK&TQ7DE_|lI#Xb)z7si-&RP4m5^geb^);v#Tlf281A&U5Ra#7+1(? zzO(k|2HnkYP}jby55=0gS8>fs4%qm)&=kvB|Vk*Qkw6Z zSL_R%WZtV#f&6VogYL^bkHjtgRLALgq`onaIE)3q`M$Z!Tc9uHxeJLC{!j;mwEP`h z(eIU5Z_IJ<-(3TH6MfF{fga3z-B)t_4rCruaHcE*|5V;fqoxn5tm6Jf3imIdC7bOj z&pDXy=w&>CKCAKFy5+>V$#dpj%(u%t^5NT%KR5F&vX02lb+P%5`Jc*>YCV|m*6rK{ z`sSL@gUm-GH-lb_`!X^f%dQ2zY!di0#|!gOj=Y~r*^MxVLl{MfOzrB12 za9S{)sY(8~XF(qGox6%jpx5WVo7C&b^T1cRZ!7(HEC=c}-?{VlM7?!$K^|GZuiFUz z`OGf~PCEVJ1I|0rkDC@ko~RP&uSt~u%p&0Q`w93nNUz=-^}17UO6^ve`8)U@mcqD} zdPlzsecpc)^bX`d{2uBx-*XSH5B`~q!vo2GlIJnPcpgkGbF5M<9QB&-wom*5_&>}= zyJWnqX%4#i{@t~*XqWjud$V!izsqyBvnkK_yf50{I0xuc3-T=90llf^wpFI^+^>0G zVkpjHw{3_20 z$huO6)mFVn_?>to@*jH*IOe^GckEE_1?IOs$v^)Q=;r&sP9xA?F8nUZo&0Btf`2jZ z#S(qKqZYzi59Yh`MGt^(ey1G3Jhm6#tq?zaKLPcoehzzR$a-I8znkyQhst-^6Tlx# z{yvj|lW_#}*GWI*2zku+;unpE-sU%eon+3Bt#q9Hs?Ggk9wOj>K4hFWVqi%Uua~|N^-4bue;@iuVje7pdbrIvEtPTFMAADrquy)0ce)Dc4V&TI;CY@K ztVVjJlaT+Z@%{B{+ z@t#KW9p2B60Dm9PSA0kL7j|Vnoc9AyBRz`eOLuS|qz36z*e-W|$5)Q@cT|9+`=tfr zd|6k1_W<>p@19SZi+auXo@~z0XBxkQt4jR6bAi9h6a6dk=fpOS`+_dfAaIZgy2-` zK|S-`1kv*zw%2@L&u2L7)}Qw^Nxu5*9N;gY9;CespMY+@w;sFz`ZV8xdN&mO=6mJ2 zN^E2?DWV|hT z;|VIz)%fPSZ}-9==N_IL7du?WxMI@)j3b#ZGG>6^d{;g4JeiiIT^!C+C@Rv94EAu72De1yr))o8- zGayM7;?IeMoaX(L^GbrAm*;HXCjYP1p#P>khbj0oN`T*dx80)#=;r%(&g+5Ghj!SW z_&azW*n9_kw3^_wU46;(u?gfKSq=J0U|i%+`U=LGDaQBjpOuFmw(`8C`0azSz@JzR z{VViY*TBD+d2F%&n*8qg(j(|o;)Dj(fpd}mR>tqOf?xdhig>guk$GL|$De2qrMUj8 z$a=l%0e>d%hY|aHR7*9xg@wv)`SX7F4=Th_am=iYDe z+`H(r(f81g`M#S!*GD$vT;(9fGZPulC^u!5R$MQb?{Sxjg8c5(gT%=l9z$4@WIKzaqon7sp-Zln`%R!=6m8N`8~+s$;y~=sz|Ey{Yk?%gKwtxAB~_*h#^Lkn_SE z;O8U$=NZ5;-&5}S7&wi&pCaq!rD_1`cA4*6)g8yMZ z@QYm~Gy?x<-X|k*`$3MQqP)Ld{LeC;d-3PF7kA>%xCZ>#g}_fH{mwz2Uo!41KGha^ z8~=sSrwWg)ve^Ol_JI@VB4@;G^vhh%%QD|p2J)}zivc( zFAYY!1g9M1H}k#7z&FreH7laMRamdxap>neWBu-(41Dt)$p))IPh@=JNSqJ%pk1av zcbftH&df8kC4cX|;5Xm(bD+Okbp+!`*6Gt1fq$zv^d|Puf%|SQJJBxnbF2KC6Yc$& z-&2S_GY&(Zu3XQKCVz`npm(4iT97{MZPa^;=RX}ue|sYIaIO#ZFY(o>`@qS~do9GS z$}9qZKAx8tL!7`Jz;DWXQ^h_X^ab5~xAzsE$Ns%4+AHI38uOf`e}p_z?=0n)w0)lC zITl%muAT_G`R@EAH^|?5J8&fL^xh)S&G#8UQQf5R&370#v;hB^`M~jFdry1-JzS-K z6Mxm3@_6z42=N>H*3t;hKyBMJX& zF64A%-pQVFE}MpWa~pa-_z~!>xE>KZw{bo4nz0_~sSHWaYhfQ^Jf0-}8&!bQ`c76q zws}9~FUAdmP5SNgH9>7Ql&UTOy9G2iLlR2KB1^sC{VA5*OJ zY$abf&-Rqx30CF2Ch=SKPLSsW&-1jRoMlwtr`x-jc}U66+?fKsJ-r9{JCeT+&wD$^ zW^GrmoZvr3y5JO516<=5GUCr=+(*m7`BBDK%aUlXdCyo=?q{W)Lw}W|Jl`{aVl(n5 zcP~Pobk29OevE4ed9HAO;S1vIodEi--e{Nj+i{zqAM+i?X|$6+`Ho~`_Dh)`X+Qja zZ42>_g+iWXJlCh(rd3X4pkCi4s8{^$@GGE4wgX-KSc$>FpU-oiLjRcOdlRW270y}Z zdJF8U`tyEN$tTo|HyiQf?wPIWDvqPN1cp--=GL08)`R!Mh;{Iv~#>%sHz z=6iw#bE4i6%&+FB9$xJMoKak7$^4Q#9dz^k!Sg?YzHBz^;UnS{;rYxWT$eeMerFu` z_w)dNIO!{9L2u?dZsQ2>$JYSAG8?O0JP!F6u7%zLNPo(CA%^q9Ez<8!!niQc$&8Ew zJ%;OIv4;vVkf-z|)GPi!BZ>Xh0riSKc=bje*xm4BtL8y|^S-qot^()8Zs2U?I4ZUk z^qtIC%lKXD5Blb!;FtOCL!MhO-v_+A4E45%gZz%v!}mP5W1bstrvSB|Kf?P4#U31Z zZn^d-;JiVc8r#9&m*-bSp3Q@xPk-Lq(473QFkkA+eG2iDO?dCwM#lf5=dpu{vjui8 z^H;;7XqWdC=t1Oh=645CJO`_WxmBW9pdaV{3Hc@e*{L7!ZCp3yBY(~bXqO|uqnCEw zxCtEde*R#NQ}aE(p5wruoPm1PcA-^1`yTW#<{zYgZ@h_ix!gj#-X#A9ekYZa`{ok= z=jjPKPZ!Q=50~ke8Zw_R_BqXl_73O1VI|^w`vJ#%M{j&b$m7U(L*$HnN#3Ic`zwR} z-kQO?Wgb}YJCtX; zxL>ToB&&>50hPv2<@bFRNq49VoF~LDNc#I+M{ngiTKcgJ^K!cx7m1xzw*lud^QdB< zf3jb?vtOzZ-~$A7ZH~fzWW2n86ZG6X7a`-((H{0=u3LL^UQXgYOXvUxs#8S{|lyMD)2z^3t?8F$HkR$vDD>oDrV)sFcx$(@4cip=&Np{y{fyda*p>KB<_OV z-X?uM?aF)~FlZ)luF(&Oy^Ws(y7_M4Ip&RzGH)#7<;Ee{kNKY9xj^C+L%Sqz(2ncJ z-8?TOI6sC#KjuA+RZ0M-F~8F(#CG*fK)pE&V_Z9M9{ef-ID?rV6aBx+`Pzl^HKelH zR`5Nb8#SRfk!Qeo>Zc_*#UGa9dEgby!wCLk?niGu34YOM{X3AytpM;xQ~rS;gKnPZ zKcE63Z71e={xXZfZ=UBb#C_iL$-ozX((5JncJ8c?@o1jUAI|-KPwwwayH@Uio}ETO z&!UF|4?%Ct{fPE#*V8VL(|iZ;h7b77``m^d0DTa@cWy$QLTRA8vtIGT!Hc1v+&<8= z$mw1M^ln_wmL|^P+`zg21NhZ;l~rQ?0KfUJU(R}<*XQ{VS?@RE{Z#%dpyyEHhy4ov zCp}(NDSp)ONc) z7vu?~p6ha4pXd5l=9dA!sCR2_$mvM@5vn?kGv0U);tLr_Cf`OSCzrq%>~ zvGMLq*CL>&GG3K&+G``|=6ihoDu8~@*jFmF5qcQKbAzJixMJWp-|w5h8vN$_(ce{q z9-?RuqR**yFitOWUwaYtbC~$%`+ZOQpx%az^VRfcm3|4J*KkF@c$40l=j3hlhZ48k zItzZMFTj79`~%ZKAF&X0@iPTGqFs-8e~a2)u}TNEQK1oXUx zeoySP?QO`v^-xwltc*i@qp!kl>k#M9POLWx?V3h<|0Up${~7!;zTQ$Dq4g735co2# zAI^e3IQIv?wUyHyawZNX|K<2)cb(@&eD%Hssq2n zeaPcPoL{#?{$yjFH6a%DCay=lDlD;z?;YTbWnN2#(^l!B5}3NZiHr}$?_E;%qUl2q z!A>Nu9-t-^O?N8*d6J1!`A^Wt)Bn5B&;0Te1<8 zw%8-cS>Y4t`CHaIg5PHq;&|*#dI0l>Nz5P0`ZR<0pE!OE{p2CP6Xzq_S=6gOyRgcU zc94H&I_i~i)W9C&HNzYIT9j)g! z{QgVkol%T8wmpVCGQV&31Ah+g2Z`S+@(%QAzE3x+H0aa6NB>H`bnLs3|0(bDe2?6j@!dUpquZ)=PC-i`EL7xDvizyD`S~$TAb%^K1FS;*>>mlerPM^ftYg3TJr8{I zyg~aA;1uR}pl=fA^828h@4*$KKU`(_!<%lfs~CQtsJg={hm{(&{h05^4eSg2uB=z7 z(JEc|9quTuTg86nQU4V-pkL;)U7M=`-+VuA$`aZS&nL+GqCL;26%T@bB%lA|2f&Hw z2z>FY$sYr!H}@~Z4{v1LwVrX8_``0AXxCHTpCj~_4;+&*EG~b(B{w3rw-<1pJIiCxE0H+plcK!nX zH2TSbr2ADxdp(VF4qJHMIELRR%6!)`6*y-)LC?X&=}&*P>3!6zwrj0YxGnfo`Q5$f zx&0W_Tb+3@(Q~`I(A#FtFT;pGF%bMy48PQH4*1P?>+CCn{^f1RDfpi#N3HGo6yKRr zX#uMg%@w|b=KSO(gKauCbWZhKG72|g-_xIaS&WGI*x1@3XDDln7>0@Yz<~#Do zn8$OOf_R`L`G;18{ja$P9I^lQY5;2a&HEd3^PJ>n?sFv*r|5^+XLJlvic&@VXGyMi zmh;?;#6|Ns?#%niAAJOQ%zGD$aNXxV6Y@*FmwJNVk@pzLd>SzpcJ96p{2r7u#vgh< zxe|I7yZt&5IJaCuSK)_M`V<6?`5ngxMW7$^UA(t=Zr_}*KT+r0v_4bDL7ouehw{6g z8173tlRn`n@GGrCzcl1HI>htUsoZyvd{?nNz#q)}8I;?ximwWcb-g(`KZ<_J+JWAf z-xoI}|6s2B8g4c&5`C(9QSnl0F2^lStT=)Y~Nn z{N{V~`Nx3n%lJXYb>Hde--o{f$C-K_Nxhl(8UDa{zCPo56?R%>ol=OF-+UL(?@h>Q zzW+XTFZf;EU?=d3Hrv68;D787c_c0!v>WsxBSDvO>ZTTSx?Vf=f4WFN-YtuEb=v^C zv^O8mbGpz!;IB5@H0CpxF`rq{LbKVTE$`(r#&yxxA%FZe(1m|L&##*A^nJV!_1Z51e;nmp@CoRZVnOee6My)Gc_DY^ zg(UtA^}{$yTnc|HdaKF(#|-9gWxTKLhj!)Weu3z-g_@wX9?bUv>n#JlcLd~dRQ9CH z0`i;h!tbMhXupqt;r zuBik)xEtSXUF5xg4*dSTE$cnSILCaSy*STT#PD2774m<~?-L!3?-LI@KtJaDgGZQu zGw*?qyb1nuy#H40#D(WwH*w#@ll8{)dxUi6Eng$O!WQ7npNW1EJN!*0#&tgq{Qz{u zW|g=|&?6q@wfacyN)&*KWX&0ael5N4Cg8vEUS(%SA8sQw2zj3bb8sDw+ zrX9*SnmQi+_{JURLB@N{n~<{rzdKH5yT+A+Jya+Mc`A`!Z!h>~CIUzDW1YuBPV?P% zx7Milz+Ui6o_5L!&}Z|$8u3@L-vejtX6Pq?^=>E*dNY2fBX)kL5agMC1^hBjBgTL} zqEXg<->L?J?sxNDc#mG7$1)Ef_Rz!&b{jhtJ)+WmR+)Pb@-Hm`{rFMN-fdCuj2x&} zaMpOCf6aHJHt>F`L}T6hc1`Htj`@dZ;`i8!dhPO{UPsc`Gd?%JbEz5%97ldX)t>xw z?x4MEjzDicNmpvH^kcp|zi>0!l{^IP68*2|evSDZ#}UAQ(>o7c8o@QWA;ElRY|}4D&)W04>;lv9aUhdl}= zMsogox(oVO!^A4Nt^$7#&o_#GS_FY^zL&SJGwANzN0WK#{$k~l0eojo z@COWpey(4KJR;}ol|eV(g}_~Lxgi2Av$5&|0Ee1~yO7w|iN2s;tKH+c~1&EWk)uEZHr26ok#_pVF- zKKKdpJmq(bGA^p+1bqVI*g)dUpnovm#alrB>*Nn6|I8+k|Hf}pYpt@3Ja~$>Nn8+mxEu{ zO>WBZYC9~-{aMNT9pSxjHs;?{WNDRi&7rsXZqS3YYYx}-=DUX;8&Ge7L8w>SJB@zt z5c5}qiGM2+?b>6k_ms8>ywR=J`KRkyb;&;QGK z8N%}b=DYUI9MRrCDxtl?zq%c8_Pqu>>`45N6@$h%-z6-O0(nwmAdigS=}mxt#U-mg zN2!Fi#_7j%I6lNL+Y~tFyM%lAy?-C(pT+(+ZvuY>&W{S#D!+5SxXk&&iS+7>GtGCG z{yYGAmVbiwssTTJ^ohI&sW$m*N1@*B1%NN(y>M>y%TulgML(r} z15R=}>Xq?Q>?G*sIgQVG-pqUtvCLP{L%cou{XFGquK;zw=l>zEMQKbuyK`QM;X8@q zXLfgh{24hRe>LKSss)S2G2iL)8qIolfWHR$^Dqzph~IsP9!9GJU>e8nRq$^he}0bN z$wvNp&F82$jO%ofGg=K0jWeI~`y0gRtpX%XZ^e6E5=c+wdTJZLUl6-JG7$aZ$hfo+ z`2%=p87e@d|iLmduTP}G~fF^Jsso1>v-0Ab6{`aS2gzUa?o$5@cft`@uw<>pvPSV zzfZ4B`g{3-Gh$WN@in6&>NUSNd4u>i=1FQ2XXpyxKaPh!MbGvezvlNQ=}J&7j}y;5 zhSNX4&%BWNu3ytr(2sfUVo4R$JCo;}rN34(&Z*8grv~K=?*g3KdC=Z3NbkaZgeu%e z5dM6`cPG9wSF4=oIU={8fK#6IQiY(;1fCz0_~Z+&UmQ-NAC9k^fE* zaK7R9Ba!5<{u6Kl<^o6jNyj0eUop;2-68%$#(`&uQzah#n79G`DC?bt%pdj(2fom2 z@q01zJB}T_Am>Wv+42*AbSubn>`&-N;)F^!VE=P?K2Y#$^oISI@8yk{2szF7a6O%X zpLh%SQg63tjz^AbnKut{{&M8}CF|l(3|jg0N;-1GmDd6avb=# zHb=jR{vY$c8uMMbFTO;*m-3>$!v7)Td|&31ixPhh>n+QAC4WB56ZLN9ds{NDH*uY4 zzF!}}eJ=AIxAB)CPk?bhz`GkjH{YYr%e?Un=8eTqI#q>!y555xL~oV6fz#$J?582y zHJAR-e2;z)*LSgyQpjVTS1-VL+nw>Y$TLIT+n{mM znJ1C?$Y&sMF7o`$3F>VJ+jXM>%*miFdR16H@!d?(JCc@^_LxCOsJ z{}bnNP>A~r=64y(7dcy;*Ndeh2E#^@yx%LK)}8FwXHOzlS=&q4oC2*dP43J@8NP z+>!XV4ME^H-=n;(6s2+UF^@EW_=(JG4QF0U#`|OD)6&jBZ)*F%Dnqsa=gLgzEgx~d zRRNgB4|4!r+SQ;xHy|(~1b@r^FMdIT12jNDMEFp}*wfP|zCp;~ppcmO@aU+PZXOAW z=G*mo;tNc!7L#Xt#S}jzCd4m1s&91201xl>K7O{i_}HMx81<8=L6Ke>D>%X{LD8*> zTi$zZM<5fT*|AEtS!B7?!zEk4>06)06J9jlJl zeP#mH3W@G(wMutNv@(_0@TkymTS8n^jQY3uzWP@UlpRG4R0c3uwZ%_y4F+3VpSU0o z4_`m8*5O{hE`E_-?UmeKuC@I95(4}ply&*V2ZVY=`qc3os!UN?VNic1Qh&d`0pSq} z_xTzlDP3*97UA(q*L{_JMg@haKJkit+235{Z_1!tjUNUj1ZMV?@uvVEkHG9}Q-85+ zQEX0&60U|wKzwwpWpkD|{rKbhmZ(|(u0{Hbzbl*V=odIJGRALUOs06*;8-SO@dZZ@ ziux@TgI4Hd1Sb1Vzyj9t}@zcWr28ZTo`|NX7k1$bgzjT zj6Y>+NK29RZ^0p%EzSCem}km*;SXZgS^p3j`7)>u(9{c+N49;Y!mL;^5ik2oWOyc6 z150H=(NW6i#U!#=OSTM|%248D%iyLB($o{}FWVouzU&7YCc9rer}XUp{G94*M-7Y& zi46}@BRf)itO$=<%KeI->qHKW4;frLB08vlR#M%8QO^^#2eDXXKxtpE{ZdwXrfC89 znGN#4Gyk4BGbm@|6JI+fAUw8hd!IN@S2vH?*yz}f0Z{>=A+Zgt|7fe`8xIdvmzp|4 zB0_@VUE|{0`qcJQq}mZ7ad8U5!zaL^cqrSHDJMKKCZerReZLNYt{xtlk|;jiF~X$h zB}MsH1b7Z4-pwTwp?}B_KTZJ}NCbLTWBmVmTmR3r+zRo8r|yk^tmRCQ|8>Q+`mZZ4 z(+vM>ZF?5c)QSvrnoa%NiypAv zzvlt}sZ`G!%>N$^{{Kt=nWpCWNcB-csvj?^PS`Um0(y3@OuVDTqvzN`^ zpecT#g9rP?gv7>0>r8k25WfT$CMXObX`W837=O~~;*iLg_#u)B$&x64mSTL4Y~^iq z{>>-8Uf<~0K>@MBZ38_z`TBY5*42#-u@cCcc~Kq6d(_rRbdRjR3IErBQ$IXT?+3(( z1nUO2udmWHVUYn*QFVhu;#Gpy)g>l6F5J>+d{6@OX)ka+YeMZoVFApKTPds9fl=|{ zks*E|It}yOAI*B6At;j!M?TxPtLMwvbc?l8YGtWG8Es}R{23(rv`f^W@Tg!v?~pi^ z!P38eIk!f~Q{q*1dm0Hc|=K92mvA(D_E5oR{^xlo3Q~Xw`L^sv?50e9bvnqMH zPM`S*17D>2i=T3|{so7NHOTaCk(SV8baho*Dsi;JxLemZEguE9D@$8K?eD8K=^sCab^y>n5Z7UMxmu!-70SX{t-=1nK=6 zRPvW{tNO#$?u@ln^w*4P2$C_IF_=Y}|6V&R`_+xfwv+0HMSCT%@_Lnj~$C$lzZqWnd{i6H& z#Rf!$hWx#JS=j$d!mNycDPtDa?4@*#jt>io#U%0fLO##^S2AYd`%4j@=geM0Hywef zT@Pz-r>!;G-TKBxNBZ?sA%b7uC=ZW1%611@6Y-1gC!1fps14MA<+pBb;ZZTF4+DY& zV$>$2UrcOtd~{HB1S7X>#^=j^k!{0X#;o+3ttWjMzj$|Zc)XPV-^GW6e?hfwE>Ms1 zfFS{qq9sa`opQ3}w^XCzl?Liaf$HS&Xtfjn;#BTYFE-c;MCs7~j=fG$L`XoaUbJV4 zRk)?z zY7^5hCORTKXo!cKhlg0W>dxAk6$C}A__poe{7?b_Yj^X%A=$GET>cUK^KJX52+w5r zJN0L40ROu3|10p%r2p&oXD|D|uD_T4p7oo9#kE!z=bt%dwQq_H=pRBm&&I~Huc)0G zl-Tfj&Q5&&JX*zcQi~HSWL4YZ&;9LRA3Q3~RnwxYU-q8NBf-O?lgcxzy0UYBW;-*z zhvhr}8>3mPvc1{>W1IiI5v{$OKIFt7|D8qtGk=3hG9~<{y<)YqmWcXKQLF|7 zC_(jEmTJ@*>t#`Z+Mvij%5aH^)yGT1)p0pFN8=aYKpnmF3seUddECU5#4X!j)ziNU zi3&zdFQhKA|B=dy{8dfrD3uNYv;VCJk5K(<;_+zBa|s`{n9TgCs|zMT9Vx0a5IXgj zxi8=o7Z?#4{QuZ{yBNumD?O~VBzL)gTCR5muLVP(4SP*twNzzwS9SG+)|3WeEd@Y5Rqe1)GornSftt$d>saV+qJG_+pz6wm?gO zK|h(&$^c=@HeU?U>xI|fIq`QQZp8i1tfDbZPIpz@dm~PqIC0{{`8^-7FnV_|nr3S_ z=CEVtitM@tYNKPJHi8o6_)=@lPTszlxWTmw=!oymI7s=j+#H_X9eoLZF`mOG5CXzI zSS#GIdZSP%2K>A4A3PiP?m+ptxn92wQDm`teS89Mt^vFd!NZOx`e*F?cfMHX?c$E% z7fF7#&#s~H%Lc;Gi7yYr&r zG3KmkGkmNHP?J}7g!8rX>16Fy9;dzici_{zT0I=Yk(C`5D}ki^DQnDOX&r~1Ddr<< z@S|xG>y%DuFyR_whr&DAbaVE-qdN9IwPHB_lv1k9_3Wd#Ij>8wt?acGtXTe1XslCM zvF{4oT!%9?Iy*32L+&RHx_4{Wt{aQ@D!a)ITIuateRqU*hu$h6nz!@pz)Us&!~jZ9 z?rBzOIOl*O2MpUxywEFop}2ny@$bHW_|Evr;@J_H*3${Zn*N>5gp4oU>w?6w;p6D`yex9pL$NFr)yE;q#M6XK(hh$FPy~U(hwgNUF#7=8s4O zb^niD{*`$}$;xn#0dJJ1L^|%yN$*a~4k8vNJe|zP>5k5*3pFwV7f7X~>^9^PcLpO4 zxzil76$d|2@8!x95rdFJ#r@^=`ijxO2vi-e@2(MBqs7(pHO?^;`cMlenp}E!9!?j} zerK?npUmfw4{@*~41)k)o9GdB63`i`Pkma$6`#>V9-7`=Uta(reI15DFMpt|L4;rZ zvv1_>^+2LddZJPN7w(?G@Yv^Ws@p=?4q^;pL5ICNr)1}O1Zny0$#kr`?%gE}@OJf4 z5CevB;XmjdosE}^6-fy1!95&~ph z{lQ|68;zEz(RlxzzHaIr{LW(a97hy!Z+-3ecl5zAI$(zI-ufARHNc^;dh0cPhc6g( z|BE{`Br#DOM(J;Gc=WF4lP_P7GYWPAdR3eX*?`Y*dnVG63_VAIj$R|=DTp1*P3-wZ zDy$?V*(}Oh&Y4IiH5+qn$ky9@^C06o|9OMswjQzdW*80c=wv)oI~2HB=jtD zLeFDQ>H!6V_Hf;g`nrI~t@sr5q)h(ji|M+5`u5rR;#?_$8%@fv0xWjDg}Or$Ozece zCw8|ar-0^;KDge%7ApjS2_lveG`!logt#$ZDD=Q6#EO>S>pSXeBSWw>?Z4uVeRBs% zKGzql)N^x_NIuu!*HkoSy_}vAtnJ~P7@I#-@8AsK)K(B9-XFN;R8wt6Mx4Alq3gmK zB=A#pU2sa>#+}}S!gqc>oocbWJzhwIJXoy;+WW5q5aqR?J$&X`(E(s)K^}LyBEuI? zDR|(?WS_2ve01hTHok_ah-KmiMQX%ic~$8byON^s=^HLCx40^(_%|R20znkSqpW|y zHaI>!+pn!UGFk1*0&+GSY=&1_$qeRge&<`@UCu`U1e5#Cd_j^6JY5!Z%Ef?|#Y&;R zcUNDFFZB@-bC4i8hh^*7JsfuP3SH1fT)-1pExE_ip<4QLg z4+pQa0eCjp?qo?06h~0rlFR@5u%Dga_GTNPg}a-5fJ>d?*cEXGH&@t=kpTftYK$+q zYVgnTvkG|*(& zlg2d9A5bWx#(%|`iVHNYT4nWyv_01OPG7#1{_)}X;`&)a`xhY*kbz145U6)JtkdX) z^tH+&H!mTTbmTM+m|0Xf5t^lZ??%z5;lKg5b`J0P*TJnY3MdY)9m-LG69my$AQ9t% zWR^4keaU2`*oA0U#uU``DX|Og2SV&3G_QqUeIslbO;}__d!g9_f)Yu#!st<%BFzmr zeYodwmlHlj@F*am>9>y_r2{v|C@V=Lk8AO*3G#c^4z|$2JL9>u!#*5e z4z8zBd+p&lY^%~_tDR1YCGa>jijjHOqS%XtC{#4{)*)p&h|~_cMd<ES^mWkj6xCk#u(9Iaa);Bt| zNDq5+Bf(7k%l^h492l-byC$Zz-mioTxRq~Ki`VP{up|1oZF=PyF>mUv`YmxO143b5q$B&Dv5SJHG@-H_aZ*ld}?m{CvU#ZgtBnE`+4*1 z#M0`6Z46s=?>EI~@uWG&4oiSBh5!W725UrPF{z>Q&PaXS=HyEBX_vi_o>L#&!>8n= zqO5NAT}RgFR-Yo#0m8Kt3Otvt&Q3tkZ5RC(5N221r;j4c6qLs_|LSB0$JLE-%K~vs;RN%5SoEjJb{H=|SO1XSpD1tW9v{dZ-IC*rUR;xl0f zv}wI4-%lcQ+=B)>{PJb_m7Kq;00O3g@i4iC4H?6rBvA$SKOh^SNtc+S#XQg1Y( z7@R?QBtrPU!N6vmmkNxKr}g}#4(9wTdrcp|;cJJ}@vAWfXSZ;S5?4fX`x$}_Kto|< z+3k#)n34-P?_y3N=pFSQ!rs^|&8&Y1j+nh8_+P-61GQjY43G+7uGMW81&x91?UTD5 z3i=?$)xD9r(82*F(~`hFet?*M7J9V!;0~>7!y*-5&bwHSTZQi{J#+i{vUI`zJ@&M6 z(+MYC;XG18LR9fd+uWna6fs$Rb5Qx4otk4J*77?&Y=Bdwhwtoz3Sfa`{?ES z*w5aD54@Ja(FJ;Vash8@jX4a6<6CH_{_->CdcF=1wPeZRaUA#mNxeMG5 zy!;DQK0Y>;X_O+m%vit?^&=PXW}q5Q2@mEbRK*F4d%|f-PGnKTB&1cPM{7lE2UMWV zL?&OM=vk&hxrj)h$?YvCoX!Z;zVBN#7|oT(*gr03wCocgm+i=f}tM_-HK%$ZlLgj2&A5u{X`PCHqH zCN1dm@u zxIVdRo<7ZpUXANs?kvD2AngKo4Y7nhO+^<{H@vNQal#PTv=^7KpXl&r zFaZj3QipJ&8?Po{;vL6#6EApG1l>jhlgS0_C*o}t5hz$)2ZtGAj@Hx-rJu^iU%nn7 zJqpfNDk9kDTyM~;o&?WeRtIXdG>*wWg2NA}BouwT0NMDy}rKPS1PN7-RtFtjRLsqmKBu1=8 z+%5&dcrJFZgDHJEITPt{b)3(sh;<*oZ40{FlM7Z# zu}ale13D}4p(;4Pz4Iw4XcqJh2))Q3A!;(d9Hh6B~3s_5SqVl*`gRfhPKruSE zER@v^b$5w%$SI(}9IvW){us8sg{~`$Q4x%VTO-c6isdSNSj$qe9<_&fun0Eo|ZM;4*Y;_9+X5vg$dftKoDzADC#M>Cu zyyVt}r+J$DchS3L??MNDHY=j>z!u=YGO3L6B7uWipHu@4ZRN>g;NQ?4-EKpL%6y$z7gGKEo4efcSvZ85x#B zn^-ONLV@O}wI-pk?^2~f!Pnu*64z-R1Ldp9v#U*?LX0f6!{AuuI@o%6WNFT(DAxmP>Ozcnqt1e^ot){Ml;PT$3hlkkwe$PdrN^}wy>HZ3SgQm) zpCl3RQ}RE!l+SB>L23NNhudd+K0MLQi9i|as` z;r%gJ!y>n?57#T)9bm48m)FT7{&Bw8utgJYTkuffm;xlUd_JZ$t-!}Y53Q7(#mVfG z3xpj|o&}O6%~NFwrm9=~$kvj7p_K_#Df9eYvd`y%u4Vx*kFD9-TuGIgm@6Jd<_ZtD zC)$_LH@}9I>8!$-UZt$yF*Hy?#aLldWdN`#(QXnHQ9|q{!GPcllZe7ddMzq0Mbs+< zINzm(zLVHVV5J_+bRF0Z`tHLECEF~Xo9y{c)nURp&5;vcj`Z`2iEM_$DrszjF~cbz z9CJwFdhjY6EoLZ5Fr898z>N1|_W`XJIIJ#QK&wN|sauQ~6RG0*1~JKsqr07^lCy;B z?janC(r&8AIONpRA*8+g-hyKNqJ@`2Y?vX{OXJ*t~boI-zDWU2Tbd-&G_# z<7=O}n96fc&MU+-otvH()H-3`J=BidWXr`GM*M}YIc1! z*VD2NRd!K11gYlXesE=f=^wIJL??)z|jPybXTJsh?-`%!DKZLy4apzk>n@w>6EaM_(v_7N=3TPk?dB0 z$x*5kH`+@ZDNn4g=-sXAwwinahu38Qgs@X-P;5V^dj5NRvVy8GHz~m z4C^UHH)xkO*jVhknWQy&;?CP`&%&_{H+4?SDmDMHFty?-8TcGJqwe&a;ukss=wnn0 z$0SG5M;g!crm_@hOJxb7x z2P*^mu6C##6x_r(Dy2xME6yMev_u|)GDvXb7_xNCGJf_XYdp|B9)C=NqZ2MNh3M6X zrqC$1T0H&04x#Jq3(GXa1~8R*dY21k?f~q@*eyS(Kn&FetS*L#HKzja>j+5q)aOZIC=3t+-C$|Z$p1qIY=8In(-C<0c@_7pl`6$(L|N(eYk z0IDe3cHIb7L2fOzmYyqZg9QkZZb7Vf?pGWq>Ml$Wa8+&?HCGN_iA#o9Ficn|jiF&n zCAccmCd*(H(VNJLMJdyYdH^4?`sip6#h+s$_H*p!cI!uRxd{A%zZX+h!?a(C4Pp{a zw1v{!3XT+1L0q-2ZHG9Z$R^TMs#`x?NarRM8mUR?#_k3VX%HZEwCS1kXb@<6sjZ6( zZ35?AZ0d4vDMmWyT3C&Bwgu1Ij~oW3i1S7=?%8;DaZPD+crXpg*5CYNAsO3$AS7)nUUN@h|jA(Q|)$j)?X?>hKX>4{!=*pm#|i(qZ-6P*-s zx*evH^n)WTAymW|VYdK!r-9wGzF-6zwv~iA_a;EFu5~4PBfq4VSIVhamJ*~>!2gPq zzNHvrWCNwL>?s!>o$)P6jue6&1jmA@3$)P|K?~|^3(4A(?7U^I++PqC*FleYibbb6 zu8+B-MCnqRtvtCZv{@)hn6nAE#hs4mjpmY~@~RX^ebu2XT3K^x#m|aaQdlWu<}hNW zFnJp$pl^Yg1)!zy8>*%T#~0*%S*%_+O&f=_Td4M6yW6Y=lg*mUCrBw1jzLhfUK);# zJI+^!wLU_;Jh}P|hC`VB9R^fTyfl=AnN4!%H7#S`J8tClp)h5J=K}-O#T9z!-%oq0 zMr@qPPJJ%t7~^B0FfZL{djmHpHIbIBecv)U`!>fpQrEH*;D)5ByU_I7BaL>X;1sOb zq6EyIB+`DW76gj20RR}4hT7&xBGYbsGRC8zx7b`!(n|8^&>FE=b~yQ(j@U98%#q>_ z9QFWC&Fj8Ogf8x@kqqVLefkXEX6mBlch*aE7N#;sS2`!UQr(%}lA}^u8!>7wH@fXG zc+m%s{7j9uEjg5Y$^YCYxF`N$l6(xb2RP3h@`L$=aS3;DqjcPnDJM+!zT^CEs(`Sn z$AE)-N0V-%FY7Fy93Al_qr{|YIQ)hB2of!9y2+G*jS*)>5NFwn+h{R3Zgi;SHQe;p zNJxiK171mvhb=SR^X@-?RFdePhc3_*;kjipK$?~7h{N9uTD)vRw*el?CmdC$-583b z=S=Qe(ehCsKZxqa zEpl}JzxaujQSS2sP?0f)att13DorT~f~F3HxQvH4xx6;Qu?^aY*7o0~59^zdI#w`J zNcwS=PiGlnT`>Ou^GUarm}J- zwWK+-8!qZ5S1xW!fgO#CoRaMfXCqY@S@)IU3@RarAfQ{=&Wa_E7`kiFVIYQ?#GElP z3?x}DhBXJLQ2xT$TiWfdxP64wYv8_$#06JT^caex79HghS8PEvOFoLSWV=U+aSB%4 ze6HnUf}W4y1BGV3=$t$&Oaa^0nxjY`!C6ovSy0s@V;f^vI!=%oq4{yL4XC}{Zb{CG z8fv#kq^<>ATFfLwbj95!h%taZ7G?8o-EooJ`-aJ3876>n&HABuom zjsLd0AQ|m*r#yuBaqc+QxmII`0QPj1^SCmN*5Y?UPz(a5-G~8PxO1^k6}NEIaByA1DlY>wg=^{B8LMa2hst*PpM$F3#L)&3)vXXm`ZuCeRD{?k-sBI zF)4B*hZ-KbkPP~NogWXm){JXtk_0QWnHpzFugB#vRq+dJo=ThWtnIhc)7LZB6_^<% zR-0jigwLc|*E0RxE&<)pxE6FaxPFCwTYvySs8Yv)MLQt7$RC-!M0L@5rCSn?#l(RW zQ@V8lR+VcIwt{j`n!#WQ(wSKUUE%cz;6=85b!}=F(~HQoI`w)?pauG)0&N$Nc!&-q zR;_;MnxJcnq+Lw2vdr01uM{UPB~QlGO5*e?xKhV^4HP>Kh~8uxC}eDgv&VrbV%l3G zAC}uHa%BU;8pY5=Mt@@Gv8?_O7ods0L{VxL8d7*)E3nz3AJ3q|LZ9bLNwOZi4JiHj zdb3)*W(^N>*>Dr$%`JmL;??qcQvrmEMg*DtP^W2UjI#$KNM6sGB4glP49^PVvgZ!Y zK*!olqzIRR&S;gr1wU0p{|!kVrE3y_s}n|9i6+gq>R8g@dtV_X+JV8)O?Knq;u=L< zpGa%jLH1-pDJ~b&$?$dmfShiP0}wHSCGrI08-xmSaCq^j*vWR6s!jyc4m6X_yHGm~ zr;|E&)|JyjHq0*#w*y?1VBPLGzx4iOPHPGFHXa}tVKi7GOrl?mZ1RDq4-6zhBhtAQ zh^3dSnzOX)H|bo|qln~-@sEYviN*J;wPYmtQX2DVC*No7vEAY7fTrn3cMwaOgrXsn|Q z=7vUH$2wQSZ!=f%w> zh3-Uh=yAUH`St7q(Ycq?#UODi_YM}gjxH{hMa^8Bce@}59+)H8yEx_kPJy_0u~;DG z={#N-VXJ5H-TC!&s$`}u(ZVt9bd6-H$nXp*hbUv&>O6axD3hZBfrbB_uJVy)cbe~P z3Ef-?N6+SZk*J4&t44s&#JA!9YsWM0TDwN$bA4fVj*TisYoLQ6i(D{1#1nFUh zY>!wAsgzqB_;H;^WJseC;EoM5+!Cqs<&Bd!*lH+I#?Z}~bm$7JWb?sn3|F0d2+VS= zERBX7!4(IgTZ_qjQ}7nMOsE74;agrMk)rPW7mIrI4$#6b{WpvHJethmS~eM3Wnf{y z`L~Pu-lqsSm9V3*SNUF1e=@a4VTay}uE%J)5LqV%8!PsXhs`MA^m2&N@=dI}+`I8E zw@{{vkOsAMw1Uc33WQX+;A~~w(tFJ|MlHH$Y-7A$`SoHO!-ltt9OK&p;r1#D?%SAe zvAn*G5rvq(6XaHsmbWp{KuF%nWaFZ$ll7biri}#?oK}f!p+>K``x**8nBs+5-H6h~ z-B-20IyG9Ltt;-nCiE56k==|5bdfd^f#pTo$Zp1hGLlxuU{TyTiuudvINYA$vcFxL2{J8V!pLjg?n^gEOKxrPMDou-8RH zGz}S0PIbu0)k9Sy>tpe`Y&773e-y{iGocI)PAIL&%|J!`AzFG))ky16R&H4&E#U;% z3Cbw;#`DaKXKoW38+>2^LaKfQBF@J1O~M_18%&^gfU;fF0g{yy5|Q+fxB3g=5gt#i zS{GkY%bl!DL%a^9X^ytV?bsbw`}JJ5EzF(NkAyLtpv$6+4u$*)3Lf@ z_H%LJha}5|hjxR?Gn4^C#he$`BM9X=b6|0CE>(xar0pyWkTS)*3I#hSg*2ED2RE>F znyM^mhFV<@RrpTBYFaO7+F{@kA_9bvsj@93&_mhu;kr-Jpf-JBk86DYl%L}CTOgG)kn5K+i#(J zDF|8i_6n9?6B8TEvv|v%z!l%i{A3MYpml={Su+f^!3fbj7UgVcHZ$y@ z{9S87Z?{kZw~ep@-pQz0cXp8>pa(Us=Ib$B2T^GxTa7OUQ_28%k=Wljz|Q!{YP)3B zA|`Ee)mF4y9*m~P5n^XS3@;xr#IOgWk#IvoD}K2rLE%_u!k`?l`z*Os z(uHEEh$0jHSv@A0Mk*g;{8LJuQ$Isv>-gD#XzgnI+0uYzU*%)iUUAlCB|EVG;e0dQ zoWOgf*Bf39Rv>?_{4=Br@{nC}&M#002qKEk$`EVx zTcApPk?P~mg)+>z5qkL>U9A2T?sX*l^2JNUtHg{sW?-i|$a+ED1ndu$z^++orT+Vm zzc4+8X1||-E1jR{>s8`pDAUS8{hbFnZf5cKxnK*V3dSFK0vY`d*oDc_zgd;*2s?uY z6(tTci{=NJS2C(^@JC68P*Mc~@?*v#9}c$}UaDeYrNJYc`^p?bMCv|5r?`NC`%a8%>cCx zeiN$-sTs{$@fl9aN%uVOR9V;NdNEs&m1Ck{kT3%vczeH1+gl}K&28^p!eaNdI{$Y$ zbY$neS+H|<30m=X*(D?<+%b7OP8so8xQN^sqUO<@wjl0a6PCc3U8XK zt>L?(sx$RP_6OEI!rN)_Vhj!M<#;uw8ZTt&xyyy6ih;pBOILmsUAtPX3~`cBD-3P7V5)`MJWGeS08rQ?yy}2g zh_|IeLSMcfbBVx0^A{!(OJ93qoTqY*(s!bH@o&?*u(fWqS}fB!B`vsRdU6|-QBC<4 zKbJ^WDp0B4ijyaq=lRAc@))~I-t}smQKnAjh_?hFju85tY`&b)H7fM@I;n(N4vEB8?iTG(T+-;GS2zMZ_9;Yt)^2%3sA7idyt}q#Av1Dj0EvWd>bqAoLqD=zE?8MH};wF zxJmWlN;6kmcYyN!Q^X$V^k~NhsE681oRBNn1F~qlr@&2b>&hsvCOB5^PI3b}bc@*m z&1?K+I$li@_X;1B@`=j8;zWFNI$aD|XrhB$``@IMNzJ;JFM@{ZS*efDI zSeWW@8R}4Rljen`3%!rh3@I;I*B<=X97ei?IB%ko;b)8dWQ)~nu5wH+ z{Jlf_Ogou9&H9ivb-c0t!0t9iN!G3aGjt?JE4XnkkfVJ#gkP5fa5H$u^xQj!PGvsX zOpvJOE6AG9QOLS9vnSnFH3x!xvRo!DG0!rbz z0tHFUb0s!{I%r~bjka(~RFh*B?zTZu<^JG$?5Y}@=!PdI96QHl@r>g!f{c)z^$jjnCIL$vY?>lCz19k9BVbM%BonWGL**vivF zVhsc`Lf=DwVrQ06F+^27QaG(Vs!uLO2lYEK{@whbt_)!*UWzzxHtJ5Mpk?P74m7xH=K>)|WN`I8N5h=@! zt01{4C}nHp)arjgm#d?kGXri{3i1yiM7F#o#Ouha zaLY>Cx_Af0vc0@Wb%BEtX}XgkqT;eH6CIV6aZGk3E5}jXMh=emSR~?hsEdjZ9-EHG z9=lk@8uDoz%9!Zc1z^E04v%c5%cw3}#(^(d-TEUL8xP$=SM{FdeXW)mf93CE+d{^>l^46e+yjw$H?|xxwz!_XB#i~I@r&a5> zTD2CC1WO7yY{oj_c7t86jgHa%j^+0(lpsZOvzl?L1ftEZ7a$6Ww<9QFoe6eR0%7eP zks2gG8{+A(@l>KeT)1mi+SWPfo_>)o>ovbM^Q!ro3yY~e;=_H*%xcym?}>QJ;yU4U z8OQ3u>u!P-1zPUY{5aY58mUNNiUlJl4)YkP3P|?j+w@b|#-nc%lISj44_?0F6DuFV z_D&_d={|d(ORG%gmkW(~WQ@@uEfM$cyitoMSX>SK4jm$6PB&*dXaCd974p%&oNTTr z`3UD9!AXX)5R{iLn_QlJINz*Sln;OuELVedHeO$DR1ggC^r9bTtpf(PI1AD}U>)Hf zQoN4j>`H6MhXt=wD3lrIgWN9Nc*Rrr1Tv6d#S|@9PWw@J{3_nYLSoxtiEviWphP%) zhz4*iSc#)baZHWC{J6&I`7cF>C;*TiPzC&^w@V z#@Up8S*BD(Y*wivRwwxD^?2~20-#-vqiWGvdWmN3K;o$2>_im-QB_&jP0j|_uQ+vk zAG>2T#tas47sAI>OTswZ8B#4NWh*io#A1%EfRZHjlKzc{fJ>Z^9EKzkH;jSbltFe| zEm|tqd`Kr*^aasM4p^+K;Yy^C%gw3;W4a=IPQRh89{aimR<~U`P>rbrw|T@sp0CwxRm2BG3GLC68X%T) zF?yyLeIXRYq%?3henxSB^2$cI*r{mz?($$c8>LQ@8-?oPHbV7qejA}Wo8C&OK=uhS zmV!#0?fSYX&L?;7>u2&LEYo&Hs--J_u5#u|OF@l!F&n}7hk^JXC+ zK6$ec5Kp{e2#EjPFbLpv0)m{9Bqiab^2EiZ9Edh#3v~*ZK&@4Gl*paV)xX{89Gteq z*QC{*Q4PG|Trf(PEsnXHQDR!NPEe<-FViIehqfa;*8Fuc0?F%4)Fvs}(`8l1mrlk< zynwCC48_Zlv1x`yi@O;N(|8PJ)*Hp`CoxZ-jd zZzMz1TG*(Or|#I%J)biap#UrBjGZh>gvh$%N9T@YP_F{r z*uB1!l~u15&$n_KYvr>Ao3JVDjL>&e*cq<+iDJeTp{QPxkyI%h)4}X26F`(``V1sx zhPXQ_noF}S3@1y=L@pJedTfBc3QmR*jPn5Kx`1q?>EFQOtMTPWv2%1O9HD%b* z22fjlx`a}wa5HlY>yIutRZxMY7})$$MVXZFBqSzP>RVZN(0Eo!Y)sGJ4=&At{thjO z;)4^@imu+|%p3aj;0-!L()cOWErO%uu@nV308S_unX^-pOco?>1u(``SUUgM^rI(f zditE8wvyGx?k{p>5Oz0tiCBcl2VSB2iX9@8MUfX3Ha#$jhl5RHi~MP6)j z8Yqk-RX#a%!w5IJ*=<}1Fm$|=PT&m+O?K^5%rJs;S-JbnNSri(-HH$eou!B^6~zq15o&2Huk8v>9jID1gKI+q-lD5d*#?ji8_4xe z;ta2ByFLS#I`jxZ#JZzL=@tq%@7k=qZwtAD+ZMCa$42uK-m;;|qbst>`xVQeML6R0 z`X<5G3Dxx=l2!Li$WZ0%q`1yw$Y+o&?cQ)PgWC#+%B~fR~-P`xHth?fIF`mCz{w` zO0=XSfD6{J%v{@@NEM|OuVPFYERZns;$2h-PRlo>mg@V3h|(4E4!jC^Ks21_DW^W! zg_L(isYteSn87uvq#xG)QT73%h%_6y)rfZT=1&2Jy}aO{iCc}?UQdRrFo!0I?Do^K zhvi$rzohQrA=jCsyR}m~5MFOJEeu~=lwWGQzGPJ3`qhR6Bdu13&}Z)HihyA5q9R~? zQ``_}%){R%9^MBJS59O(>be=&idpr|z~u3YTY{(RjQ1Actcbr>1FyQKL$cB1N^GB$qxJ=gjRQw=Dca)(5jN?Qgia-8jO)s6fGe7D zsfwPG%Ne>Nf6?qrz=gGjQJ+pf%r$mwIH7d?XXU8^H(`k{B z)$S6X!Bj>B=bD1m8I_TEpTXgba@*%#_Xc~5uLv8mT?5P49cj*=0cM( z&f6GueLs@3a`_tBUysVR!LHDl#Uv&dkg7;e7Zbh2E;mNmNWa1M^-?cJSVJR1sGJ`^ zOsMD#4`NjJNpC!3eOS8FnKu5*yJ?l&0QlqKOfLIVcGQgr}TlXFJiAOsE3!Zf)V9{mARJ4Hr z&ln)4PW8Ri$(w&n=-wTGF3Dvbx`ZDmFs)g9N5GaYGV;tA8oa~)Hn#+YgY*8w`R!Ou z@UFhN5RO&rpE+*_h6cBUfj7g>1p7 z^F8VBK~UH%AkwQJ@b9+P?}$gb?4ONrCpbN2LP?8bLqCPDOyB!O6e&l1%ofs$R@~to zh2kn<$OZ;tfuddvqr9?m{!D^=`O; zsWNilGe*Z}viFjKd3ZG*KBp^H9|0S@sL6?tIn_u}>cW-lQiRJuUJb`1BNhXWTItW` z;}H?{eTLgN^V6*{oDt|6k~3FV&pnIGHZ=*dT+ClCR->{TV{Iadh8?(svYn+Pb_q?Q z3dh=TN5esLi4|Z6faq_OW(Qee4({48#64y>Er}A3(~L&s>>ZromlE_KgO7JBqxv(a zGnyAfZ3yT}0dg@IWhP5*hTQj4@{^ZwZ|;b!W=V=8xngXb7}$-g?~B_?*IN|dAFV!i zQG3zjIDu?`@o*j~MQQT$Iux`)Jq-hb-pM$WjUX~mTwU1_8a<;PX&EW$4b%uRlb&JI zB|{5$`{>fqRfDV}F3X=g0XphBnsxzGQ$`u7m5Ze4pv*9=<#IML!B%1)lt69Hx3Vw< zN)iLpKG4HDw%Mf5QkuLa?QZmu_9}!pu%o=JE`*e$jjYIAZTm*Dm%)<*YpxPVx9_dD z|IvCeSHYA@n?Z)7HTn;%S;7Ry)^ShLZakU%@T8B$;2;+oW?Q3hcx&g_DJqbh;xG-~$-qZb31G3jc3FFU(HI*&25F|fr@sm?Zj=!tS)6NqL^{Gbd7 z{ATzvnWnBZ9KC5lF!Cgnz63@LL3@oUZQYQeH)v}a3>QH zZ3D}TdUpq-DV!5l6R5Vn0>_H;^^5+!%_1Y&vZ})=sj*-Xgzw$Mr^DHB`UH{$7T_Rz z=g9|ez4;`|-p7%t`|S5GVNIRh)76VlDEZ*!k~$c^ctPDf6>-9co9pFt44WC89VkiV z$qRKI)tW(&B9aBtD=LM>li5COy!u_rxk4B`-vqt%^1bY5?~b84Va+aeLVd95fua^o zH|epkDeOz>uEbjjkIG^)%gZcLo=6+CLcA8zts;XR>`Aa>XIF5l;dCI^3fWbXI3c4A zCFamcoDy7tW5o)hqD1_ZP!=I`dVz)s1G_AZj*y#Hh5_?98;xY;4++V|jv=|YNI^2) zF(lKegrqKM#%{kr^lavJ>V)=~NDZ~VQ^aVKpU&cD?DpL<=-z?Z5M-b@c|?ba779hh zt^Nz6hp`?pwQn0yJyZo(8IwT4%)4aZDkl@Epu&l3y_A`!u!t1XL6!07Bn-!+QV9;N z+aC^f5q3Bn6+zhHa8&VNm&4J~yvy-m3Gs$=#?vjNeYXx7Yyve!Or`%@W5LW-OBHM| zaFs0HZh46yDlSV~Ufm8Od>m4qvsHx#HTYv=Hnym3-QnUA4cpTc>yx_Yt>7w9`a-S= z)A`k%Q=He6WNEYcM|DNrjN|3|REAK@sbc2Bx*){KkE{nO`Qe+Uj@-5>-ZPb21F$5X z*v+ex>4qu-zd#c%rR+$+p0sqCp=$;#xJPi787<&9I~j%983;!xP$lPtnfshj&qq9gs9R=9rgqkrXxWk%@E4 zowOo(FNMn~9{>v3=wu+wrNR|o%$KAWhZK8!--4AhE9CZDFF4mBUmEi!(VJxA1wOhT zkyz>_>nRu^O&Z*dI73Zx;1-=pCU=(|9IB{WI0vrFnSCnrHMRi88ItH;)+&}np(;3e zbxj+&X!+&!WiAjm4rk}j#GZR(fuFxLZd{>AMkE4xN~L+G<1OTk?0&}@J!}du+Lzcu zBwsV3TQNHu|10NEKzPMrSsq2o9z(2@IXQHg&Da*03Z2Cs3yFD_;Lf(CsG~&1aBS*r)KMGyKRvYo9PmoQFMvoe5~+V}8Q=_uB_LQ_USV zC1glrM#=9_K72|!8w|Q1E@8I^*(oAAJO4mSHa`V~1VMlj>`ccLZKpQytzHXQ=)iGn zEXHHI8n-}NNr_ZAeb`5g7+Nye{Pmls%|Jt2Vw z1tp2w<%6H`q*P$3h8&0i9p@4!xg?jc+Z>cQ+;EEPjBE5Nt;Lcrzh>+zf=3 zq>Kkr%6L>DW$btkl(hk9E)ynp43%Dc^6Dv<86;{S!!d!y(yeiT^`o;ld)Z^8Aqnm( zYu@>9CU<@igA$Q2cbP+2T|;tNu-$gA4@^w;X|I3@w2=dm+2s2wNx0eMZ^bwHPT{Jw zwJGU{y+AxpZ|(Y!#}525j8f65(*0Z?lopFyZ5K-{(*2k;ez!t?>^_@cvD9PES$Dig zZ8@Mhp`E0hTt-J0wJCObIY|N}HJtY=YB*K4?RSko8N$8Z^3sZd3fdF}tbg@7ikiOR z=GZd=2s{;y`oos!}JL*T7yOwjbhEaV3VzkI3=S< z-p8po*Hi?PFlKey!2}C!3Efz+pmrxGtqzSnx<`*%DNa?NJ_&^nY5W7zC6e<5x(iBz z1z&5GslP%siRzukN!;Of`N4hwZT(@3*VXmgrsLhW6SlveUp}K%^${56M6_<7bQ2>yy)hG^qD-uiGSf%&@XH z$yAHI+?3?y{AALJ-;KwJxe$<;TJW}2wcEaH&?L?VXjoZ=E_{1h_Y}euUFO_8<8PV1 z$nvzaT$Mh9@en?8ySDpO%a1Wca#&0FGeI8@oh;Ss!r} z?0n(|2JFtneZL#jSI86Ujtt8CZnO}&)S+x4-5F5iI0k4)y{#C`O(!e8MGNN1?gmS@ zWZlTU5%~jO(%lnj2lVR6unQfRb&yCq42}Tiq{76~8`P=bDs`taw_vX!KzEsqFBaL= zc)CQ;EM28R*p<8UeN(&Y&zuCiItCDY5A}ef=x%m3&XwYn#Cq6W&laW<2%(%m+Rxn! z2SJw~T7?ixf&Cy0PAh#_J^kX&&TGZ`cDg)Zye94cuE^1oY@V^2Q5AH zgm*%3E=g*`Om1kRTrdf05L(k3Z_m zyg{^W3$1*i3**f@x+&8 zN=9S~3^SCBRfVXCjixim`kq2E?UjB(tUZ8>%GQWOl``)>%e>ei>}!5HV?g#ZOvJ!p zo}t5N@>oWwn2L5lhaQc05a3Km~7 zm~fbkVxANfDknxXF4RdR9gQPJ2)CFNbXqb%{=AMZV2HH_QK!h1Wz!mNRdUHEYM0vm z-BFf{*@mvTQlM?^wTjc8%C<{pGZlQs0rO`%(iSwHu&cUTO|>wsc=##=3`k7~B^hpd zsL?>)mk?QGs1{oKf#_5-o;4y;dWmFNZL`xnw$_ub;;3Y(Wi!;N48hz68n%&7*Ur}O zoF>I=ee09zD1EP;hcTQxH(b(obiN$h$Pav=(rI@NE9-DO@0G8v?tqFX?5fQMD;-(t zpwB0Oi5&>_<*!O0xD!F@_5yoPH?tRRItj0a*u3kKe0tU@+XK69n7e+-vTtW>t>CqU zY*qBj)daeO$qSd(Hrp6TE54oRTNRmI%?DF!IIo%mlMItVE=@?uM&m)wj@@SCjUVB* zs85K8EzB%|!!C0)n}pMj{wTTkR>-YtCSNQhpvn13P}pa;3!L&?B~psku#G!!JWzo! zfZ1eb-W8-%lNg#DKQ1Pt$qItyWFa>fr8mNHox-VPwO#PM=$^r4&%0Ij(^Z1w7=D<; z@Xf&O;X9p9N*RN0!9}7{T4~%k2w=wm;&R7USOP45+ejl3M4U(UAI_9qp zj$!?^trS#EEfSI7C_vKB4lU?1koQhTO;e{M{cWe2)4Q3spNqO+-WN^hjXb=!aNfzx z*Wr3ko6Z{>%fKwB)5Q>lby2NhlTJ=vCM%h9&N9gq5^LYHDqs;2QQB4g4En|d z(ns5Z@}`@OTr48{d)oFK#=ExD>$+VTi}BjUcCd2-ED= zWzWT{H5+b1p?)2X z7qt!0c$xu8t!P2S_eXSU(#2yi8fhPP`DI`*E|>v#6gt~TrQb(vT>L=CI5FeOm%HCoxoFeH0=SC?a4dkMfq+uv{htO5ckWGa)QyH?!YMZZBd21&@>iEZ5{{_qnNVsXT zDI#ozB7tnwBPwO?a0ZVmuEQBN^jU~*%J==~{7HuGP9_8v#dMzRUjNY!XHj($L@;&I zWUDOM_~@r&jULk?5r6bHMShj!1*iJq36AHq*OfHDDRIO26>-@54#W+8-wF6|*AisG zNgU)S73{WSApDJhEG9U*twI=OAISX}JCzJ@_z|=cN&K7Ssy7NNF;ARY%sk=1&84$a z8?$3T10I>gY5|XYC(R=}2DTyvRLZ_8v;P(!R~=8HGRBKIo{w{xOmbZ$8U<3b+p9ZG z?8-Ydm9;muLvI2?+LuUm%=_}K3|o|?farm_#7^b^7fn|d@VJ8A_-=AH z?i7QS_Mpt&t5WwSxZaMjTeR~ees5aV$g9}WT2Ekh`??ls|9%rG5URN7gn4}Jkxn=4 z8=j9#7YHebZ`!v2s34$OCvcWm`&x2uIulwhbPK^<%fV#Dd4AKMapeUhF-0B|lq+RF zGoAKWE@z?of%F=_PD~7qhdEsf^a57dLMKSn$)j)!(2#z7zRgYAxHq{S?!b{j1&r}P zFq0kn?hJAPi;-hgC`bZIw+!yj$Cm7?4l!8mjO;qEQ?yhNdmqb>lqYKGNyUb%18o|) zfRPErRy6inJ>@Fp=xmryvnzyF9-iXskhy__>WC7(B6~jiD)%X~5%RXYpI;bBD+P2H z%_SIYQrT5?vWxeeL_qX)n%vBi9Z1e_9O4Ca(=C-+5TP)}O1T_P$Tf=?wPFGO45+c+%t=h$Qz^HpevKxT{j}CLxOxgh-P{ubXL9%ohhm z9vF%`y1L2VhaNE*)u+IXV7eFAmzT&6SQ$ND1~buRz8sLoRN`lAT^)#S17enI*kq zYNTXhRzw6_QPh`SyP3#6rX6zB^hH*+)0s%47R?=|vy}Pi(sKQvA(pSKtn%t&NlDa`zz|(lri1AeSHX#;EOweNJX%&4O=1Jl$=sPA1tJ zOA57B>ycB!YStTP?diY&m}<+zaFh+FeWR~-win1Ix6u8iw-4(7^7WXjIPDHX-&BVp z@i?OVBuKE>DrC}W3`YyR@Sc_NZa>iF^5X80?Fptpvqd&U=ZBt_@q7bEwA}$#hYFNy zoVW1rP*&Fd!#S}{uzK&=&h}cJIow+z8QK7`CPp2%lELE81q&Op)lWM!F z$4Sk7#$8=wKJV3CpsPEu1Z37RAU-uwDV{Ko8uLX$4fDl!h%c&Kigfq&>FEYxtHS;| z2G8em0(IbWsMsNr>uE06@%VL?F2c;-Dq?0g2xxi~^<$FiI5$u3 zu&M7l5xcHf5i|k>2}4wXnPj29a$Ym~8gI|<4idY=)o$9|z>&RkzhLj|4zS{#vO72; zhMAc1VoQk)mG(l}9B?9ao5L&-RD&^DqAF3>6blO#)sw8P5h)FqPfVhrTX1cM^4scl zuU}E*igW7W+!Id2f-_Nht*ZhYWg6wWxQnh71;{}4K{haD$Ap>o7xR?-2vtDxA z^kY&QwabxYsuRCBgGG$|x73~CHY^!=64<;TZITy7tIf*nMcd6nWpfUh6|0(rYRlQN zY)+tC2A9p*bIaheIdy8oCD?D$!Wg@zRph+VyoTs!q|fPPhDp2_OrgWtkdIc?ipL#k zbA`!XugcE>`A@Gn(8a1S)07O+uh@a@;5c?D}`Jq{vAn zc8UaRyQPrYy8A13En|m;yd{aEa+RL~DPBS|Q#S*Tl%|jrJ0h^RlfEBupsY_ETbqZ8 zFHRp2G<0@$k--U0CvX%Fp>-#mioG)zBil8HdUF}ZPD8!nZK$KipCFSq4$MZ~6(4J1JeLMw zhf%bgZh!-iDCoqx&4dWtzm1{2?72%7?eihlq^SyM;8sq7?m#*z#1oWUD0e~`|)bZFkV+M{M$-xay;8%|yiS>j5%8`XrtmkGR6&&WB9dVc{ zWL3;aWEk(Q#V5tUDs5VL)O5a4VHk-;y(-g)mL@DPVNxNy<#rUM=qWzs>+9=d-d??7oB0+@ROxIj2Q$erlo@0y-<^Z(enGkhw zfU45c$u9UE(}i2E)cZQoNokyIpbo@#N#Da+pOg=|YDml6o7A4(h4xfkC{+lol{eJs zV(07xEsH_Q>SDwDyr|O3v5?}<$`@_lj+rnN#XtcH;dkV!UP7h}uE!*0sx>!(F3q82 zvM|?D!g$Yag0<2yXE_$vQIHlRyvrah zI$K?G$Mj|z`cY5V9mTh_p|qFia3pU_iJ@Zt0g=!x$*7?9G>l9~GO_V_cr`}O(&>2K zM-&Oi6y@Ga7buPl*`V+c-0RK00!UvJ;WP%&`elJ5khuzuaG0%B(|1TaZE%7JF6H~r z0V-#@7_V53fG5}1V#iNEbD>TTWCNcdb5bf^7cyhQnK*KWq-eZwlr9O@#~~uWR+&{P zddUQMfLR+X;O4CYI1AVXqLZxGKY4oaF(pP&Aw31rM}81jS@e+yy$u(NeZ_#tiG{&O zN_=;qR>k14L10DP0T7?oYg0{3(aT}zRJjuYTPg(Y>9T4BS~ma$VQlwGm|pEc<9j79 z>p`35?&PPq`DoD>)v5TlIE2O&+ zxZ#V7M9h$p!HX3cl>=dXphOUb50r{O0UH+aCvj4}N%)xCzDwNp9dKKFC-6DJ4FXuq zRBsS4Z!hO~=M4f@okiXtkRJAmIAJvy!4PffkT@TN;gUI?RHiAT92pJ+hsZp`s|Lna zSV)qu)z{C*gllPiQAKTiHpUS(;9?@8Cd$Qj2o3Wr*is3o4|M9La0l)VoI(z@J21Q* zb#vIL%zbVMGr6t3J2=cn#^OK~<`hSR11LOtrM0q)r5B@UEaQXG=n|ESCK+CGus?nU zaeF?P0_r)E@Lf~(ofG8U+t2P9ukt%yZ0Y;wSvA+WNSX;^#~gz4KGJ?1W4Ouu8Qj+O z)jOJ33MfsRWa=U=Uh)c1$+P1=3(HliXOpD_;m0Rr=cAcH$g&)k=Cqw)p3##D+$tH7 zI+ABQ<6P`$oav6nnT6w=Z^)C>tx`3zj-9k+PSUvc;c|OI{;;@+eFXP8QjC_=Bf;Wg znN_hKTkkIC7`PAP=|{yL0S6-KImXpyGT-}mE3UenkS^34!tm*A2ZRRSQ{`4UI%{kmqM{+t+VR3w`>)-jL(AwDzXhazKXgH=zGxaXkG;m|kS*1FLtpEu=*C={2t(R0>WMCT1AoAn7rO29=AxjoLaH9NXxi#cEV zogX6>)b~e_gUldKvMVFpNK*MXW&BCZJqh}}$P5srfcCXEYol1&eLuOTm{s6q1Ax=l zZ1z?3T~ORTf(OK6(Z9N+$$572lylN3si1rBK~5EJEE=76FEF93>Dnhy`G2V#NKiI) zJ}zzb)nZ{2ZEs}&Qce+_D(ne@h*4R@?@MkHAi-=R?OPTQN&W?qNgE^=)%d_c%sHSC zldqgM7vSdHRFu0J)H0yOg8df#66eKG@1?sLe70nwT6}=Y23FOl9h;D!s@OHmIW{3S z98jH7%6r%^&Xp5byw_5PGuJCRsOG4p=Y^3~>GJ~jv)U?>xW7$68xf4x9w@GhAqN@d zDTT#=BVWX;h#3{R>8WSDj-9v~#Wjr3b{c;_ql%R!NgoSUVNAp{yUb~TV-vULo~jxI z;}T$XmM%7#ymMGbb+j6@_tk2Q(hK7`O1R*OS3&xx#Y2O2=rHAHtad!p9^TpP`>`HBqYjR-p|{`6iJAP)Z2BU&?HD=l!81%beGeV=eVT ziQS;Ad>=&_DMNYDST72kzHJK3;_{)(Z~2nR4xy^VIPx{;Nl3{vj{0!N$h;Vv9B1Dz zVH5QMX}qjwmAHb$;A4nytn#2+u2oZH6wcM_cXQDuw@ZZY!A}9ln7(lfg;E_2V=>3d z-+xm%&ZHkt)e_rh9E~UZc&yw*f=uXJQ^{lMp65~2fyWg2OZD!~uV)wIRdzXDplqFT zn`#8!-EDw(|6;K~d5?J;zz(+oSno*VZ8G1Cp8S0Pkr4ujlK@vpI@zDG14- zG4!5s^w?Zc1o4c@>$#AMts3pCYNH@E+Kp!wQQCyrv=MS);W5z zm=iNHnP^g0Uanm`L`n0_7^Te@tHHBzpUhY!(WVT;y}mFEM`NK|7=@)tSi#}4rK~xB z%K*BRB#(ii=u9WzeLjATRi7;fE2;)( zQ@iR6Ysx=h3+7+g6W^Rp7eg+BK$mZAqus(##88N?QB~rt%o~@^4iY;oGUkgBuz;LB zpnB;3RN57iOnfpnxkc(HN+5Hu&2C}0mRSGb9x)WwVO^@1mDxd(fFWlV&~6sd-9*#O zeFT%QwD|7L#xr_{Z&G&|Ws%QnU#DXCK!g(DoX#aAa9V`hm5jjHr(jh}+NWq;0#BIq$qfS1c|s3DhZCM-OtCnQ-52et;P=W@NUmlFO!b+3f2j9`338za~)07#rv%2(@wQhO@p zwG!Zg-CF=Dow*gfyLP#nw-h8gW=Y&IWG0Tl8#K&tc0kY(84+_fy5vcB^8nL<2D2w0 zXb&=Y@31Trx%lA}=os4I+)5Z|wGFi3+&W30TOC8}AG9R^v)IY?Dl)hM&&Ko33Gr=N z=>jwiiQ2^QrIP!zFwyZ$>fyfV6q4A-l&is6*~rKT3L6>a?KX1ftIHVUGIr`3X#53( zYfarjW0oe)BF9Nj5BP?!Evn63zv}`OJ@6q=`o#!OVZovDUFrt%b_UY*{aqa!-l5@( zxh_jc5qoBe>+T*tVzOZgfP@^nu3v;EM#}a!P-G1*;HfgXzJC|kA@&;Dg>M69i#Vpl zt)fw9fo1zE>6j}!ir3aIM17FqOJQ7)-^yT3R7yazr$N3@fWqhQ1LUAqk0Lks$Xh`N zy(%3Db+#SMH|kJOyAJe}!0M$t2B3BftrqG&nNJmZwGSy3mba^7i!ZQkjfXrG$;xj&!3B`9_*(Y2v z?P>3+Qfny6j}BKdpV@MaAy^!>7Zk-}P`h+Mq78a(^H;9!?iL8_R|E zSQ@8};9?tlL*nG%?+N>JD)$tt~_omX|Il5*5c>0ut+B24D&@kSw?ILqterp+=7 z5vgCvf)Coj7KKhnS@5yQ!7Zxu11IoG!{|hxfs-Ga(5NJRn+qd=Y&V4y0UxwqK73j= z%|VzdrEI84PaYrD)TG$ZHoI-~-bve90fU0x8EVC%S~DIGfC@f~wI0mM+D>zmFN5g3sCe>h$`!)rhZ3vh^Vev#mZ$1H}iOiC31aq$NZr7S#q{HIeVn9qAO>fs=p{S?=YdfCN;!+TJ zT!Oc!F9b2&8EMN&+BCkk33)m8DaP_HIgovVOAbjxfCn1F>0&-+C0`vkR@M_*A-HqM z5D1s8CeN-meGC*;Mi$-_o1=uXkxj>!h2sK~DHqmZKlFTsvpUTn)CYE?n{yFJ7-Ku? zzo%NcrUnot@H*1t^~C_O7|U)jmCzRY_Oh-Ii>g6T*y9OZp^BnmokL1p*!hLCJ=BJA z{ZxT0gbAaEir-0SH+QDki5i)4VAi>^Xv<`Y1+Ehb8h|M!gR*=#JnZU%lG1)DP_(XV zSJUSs*%w7bHELQcgco2$ADgc(+0O2RiL@s9_j(?;A9v1Y9~B z(m^IG4di@(Yb1%TtwTs$%izodK@bVyk$%|vgKk$eX@476jEcE~y^#M@z%1B^x{;^P z;t1V#?`)w&joai}k<;JSyU&#AZ|ff9ZLYB`HNLw9wTh0ItYh2VWRkTSqXz@oqlZF@XSyeWZiAmhR;VNPx5DrNuFre=-Wof0TH1KIr5kDMGPG5 zX}g|Slb~!#uTRnJm7~#+u15)zqHKwmnp|vyZkDHL;{-X!6C0-+IG7QN=oqYWV45a+ zK;%GvtI>lLrO2g;x{_hs@M_~k#28P&STYal5!W+Du2wEleC(GfP7>%&ah4!yMq8Am z@xa;jfXdc9bnQ zt86k~P6sHxX1d>+Wc-YUnYDVDWm_v!>II;*Um% z8YR-h6=HLL^LjZ3(Oxd#>-4lT)h1p(P$aHtJ6o<6o5gT3MVwwXd^O0H*Q;^%VzSy? z52oZ=6(+>)^%k2eL>7)$tHsJtMe$1q3W>Rw4i?yIi_2^^m_HjQyYk;k^|jA9Dh5iv zlj`SaGJi3cPDUApPY#f2GznM!%~YQ~WWGdLdogg{ORA^HQr{oH8jhD6%9_f{ixU}P zpN_jx9_o`)KK1mA?}Ns%+^+@~lNUX*`K%}K{~N5=nWZ%z;Cz?tGXM)YUsEulCLbm} z8$3rg*ej$JMTYOeGeXFDj$RWiJ@ImSbHw`g_JYD!Zv37{zp0l5X{KJu(Oc?8NlA%! z@?_*%7&8s&in0>)viGhx<5xO2cZDPKc(PdY+jmcJw}oz1hfe$LyFc;tcHY?zKms8I zfsmgJm&Y||i2I)9=qy3!{r}CuiG*(~cj(4~RVK|O%5W~T@x>y$8c&zw)yb0y z5~)aoe6e7}SKU515S>|YI4YB9D?{yozC%DOfS-aJ)gv!RdsmR;Mj59dW_`_`X__&W zUeA#s_<7LiH7PZZedD9WWNv>^eIQ-ICTQBMUVELW7xhZmccFC{4K^&0iXLq1dc1*8 zwt&t)iFE`4L3FZuNnb|WRwJ)d{U#ueKX|!HydHlLAXN7r1^6Y(KT|?r%rjf>@l=V9 zHj~-dLs3R8BYxW~9$^A!*0VW;6tC@KwHSIC(40>2B>2FM7J>Gk{g6yzIWVg4*8jLr4 z7nAvDZ#r3T@X}yN2lyVKnvaLn86MGJn*}}@zTm$uSBu#mR*5?4X^fQ@5&_jjp z;ng^J-Av!>17Gl~>PqDIZ=VfTgBd}vTum0}`!(k7em33Vx$24EHm!aigva%Ik8nhD zn=#MayIhUOd+RxRpKmS|QLV>3_xW@(f6l8yzpiHV_hd2LOnE&){k+J$b}#mjsD6_{ z`g1$o;Dqjp!>jcMiRgjV%L!z1%om;D#z+z|NC~SkT@E%^d+Xsq!M1*e8STy1&(>IM z{IkM3;~yG)K^M#w{X;Z@fAD|$XRvxkXrkZLKN+Gkp6yN7n5d*MX24wh>#@ByzcaJs+YQM%HYwe!fQtp{;-qhc7{-o7HCV zoVE%sCUZ<+JR2@w1IHMQe#eLN0SGV4UM-*TdCtEQs$N{KEf{#4+Ozr6Hlah?v?8RS z;R-vAzrWVKtkhr-30XqD06vBU(|kBv(!bdAWBT!ucg-cbSnpk`-GYUjpndgxZ~O{d zZ0Wd*Syccze7>H11#3DREca&DbNXjGxfo0r&py9i(2uJPe)xQkejH8d#{>Fth?4a~ z{l>E)ezPyFH>2Ti{TBUtqz>Er5Y8vV4ARaC71r(zA-QIo6|js3B(aul88F0I$?Ii^ zd1(+s`T|F2F+hheF0^d9zMGwmm$?3&XXDv&^Ez9Nm$tkHV_Cg{%OaKJ#WFx40*%A~ z^r{x2*TV(@Q8VvT3Q$_;r(dKM5szYTvzE>XqVnazISrzotdDRh#W_zcYu(Eoa6dk+ zc~tqfKPccieLRd0?H(MA7Bf^Pu~Se_cn01h-H+FX%ZlsBUeLh1uPJ-29iKbPJ=PuO z+PzK>AYMG%Tt%Uij|_CO&F@GmTYSj88>;EZdtFY`$G+`}ZWBG$y7)!|VAd-Cy`rcNjK{9@pw;3S_tYU~P6YlPI>}o$QMTy}L}G z;KV4CwphL9T{MHxLd9z)d)dj;_p-Au^f2N(PwA{vE$Uc3+x*Vlym734fEf6N^?3DQ zF+*yvH;1GHQlB#B#a=V8QvbTQ`R{D>#N%#1u*fZL<_<*!CuU_?fy#vdn zC-Y~qqmA*z3i4QYWC`f5yY!qMs{4Ed1g+x3lhO2)ln?X{7SZEicVAu&Hq1A54{1i= zJ@EnZ9@)K<=&>zc!ofQ2uV7w2radSes)^)^Szqp{KVI|oz#4YiU%|Y5Gi{-KFQ`Q=|8dLoF~{BrFEc{K0~UAr8aG z?a>(a*dCKXkA?H|@fm*Up|n6;M#b?!0QUzCpN3e?J#UZJT$kmLnRM#fuOqvTz{bsHaQMqQ=b^Z@vcASxwy|7fw^7J$LWbRS#WTDb51;o@50z#H7Xak- zg((?2DJkS;m$MDt_zGdN3COIcr-3ZJ>H(YdMo>>D&<*v$A{zokdk{W%AHljriLV;2 zDuqRH&AHPd`e;bC)CpTX+H?Xpp-gl8(_v#Gg-a*TCnJl^8a!V*wO}{S7b}<_t;ZN8 zn1v?m*9d!B6;*(D_BhF}~Hr{h8MtIHufUo>9F z$*ZQ*H(xebx&s(P8zQ~Mr-$&2oa<$30JQwP0iU~olUUnnKUVeE!I2nU>|XS(U4#lhZA=sr}Rm)zUTEvlKATyG4U%e(Gv=or4nYCRh=D-NiJ zqZdU$lKKWWnWi`6qrsBgZyT$Bj=;X@1ZzCL`M%kT8M16TJVe!|dulSrxkZ|qmK{{2 z=!T=lF4R* z$j7fBpnyUZtczz|24x`HN~f4+v%PNVu#NTysA>!V5UjR*^OX{FV47)$2@6PzJ_ID4 zAf(w$EELsw3skZVrPc<6oephX@Vc9*!DyITV})~*;kx7*4aV<+dTY_m&S>;~6v#RO z?XU^KB$wO;0l`{gD$LX!&M=iSjOtl+Njqj}a>#Cej#>(wwd;rM3|cB~&ghL?-Gat( zV{GxY2S9VtMBNVBjHQA?%JU)-(MTd z<$G(4xO>KbhUeFiq1sNgjNDN-N_1&(QdxL_gNs zpf?2Ebl4lg_CaHJq6lcZv-8bznvNe$`>;ICp6+KT6@iqnlyDm)&W?BOyAz$IM~~!k zPyw%)sPL3MsdhC+vLmE>Mq!D~!9G$}yd11tDmtCytN|zw4k`}+qEFeT-)!S@gi6uX zrrN<+HBqd+Bo&U|>Kw=F#trgo>?=pHE$tk>0*ss@u?P_qylFcos5a^a zw|iqnD|W`|P*LK&mQ>$KKjHd4)(Z&b?p79%v|VkzCS z7o?19x*6`yk>E~e_K9UMao)tK073l*GPxE95#n+0VEq~y@n$N^ONFjgSR`rXQlq;l z>4M>T$2X9nOvWQWFuP@MshKcaYYt?x+oqAOAnzKXq>Z7kIgH^}A!tYgoF)=lM@kFt z(;qkDs~Fky$^!whXELde4!)(_KCC>By?=0LG`<{MPdAF}DK2jW#|z|hF8T^lykEYa zATc$vO7$md7bZ zoqB_w|J>fE@#fFq-97%wy#Mq38-1bv_engbH>qdyu6ahi(9d!d`i=Vd6d<7XU*P9z zTTlI2{roBXXGS2XKC$*+k<}Z2V(;VseT;#RG4L@4KE}Yu82A_iA7kKS41A1%k1_Bu z20q5X#~An+10Q4HV+?$ZfsZk88!+(io_u1DbdaCLf2Y6s`WN@0@B6R+7HKZ`Sc|t7 z&#(XVF@W^pZfmS_|cfQ_mg{nS^qxZ z_kU(@g5T-SXaCu6@xP=YraxaDk!E&p?{h)>zx>~-_WhuJIaKY(LHirJpEtsG{rkH? z`#1hqEF1ou2JQcqZvQZ7|NFZAOF{epsN1iC_J3sF|3iPiZ;XF|$9)yFf2e=|YS8}e zf1u#}TG0Mo-Tw8U{hPY|n?d{jKUDqvIB5T&hG*~p@!{ONRB--A(Ef&Q-w)b@)VOa1?LXG-?*{GP{KxA34}b|J$PdfB!i(SNi)qqWz9CdGf>WiT1y*wgk6-U$kEu zBE^#*i1vS5jQc~;epkTxW6}O4G4AsJR{VjwknKMa@Bb$ZiTdla;(gh^pJ@M)-gnf$ zeE%nj_TTu4gXi}J-aZuTDBC|P-v5MH_aFR4M_01_bK?E~RN!-8v`^HL#uNT9(f-v$ z`?nJ9-%qsvB+>qcXn!E!`K-W$Y=2k0|BuD{-xKf4_EYixzc27`CEEX0(f(C|Puac{ z@Bf~l+pmgoW&11f{$CL5^>xwyDbfBz(T{BZhIoG}=>NL{PTBrV@&2!feeoU9{y_Az zFZz+~-xcrwo-(jAoqSWkDcip%-rpB|=7*yFGXl@w5c^2Be>>6sy+r#D6YW1qwC`!Y zO(>A#{z$<2c>({AMf*Py>-A~T|A%7#zME+OoM>MNx;+-{-zAtB%ivLo_OB({znN(N zPNMz$iS{2Q+V`}4M0k^M{zjtxjYPYV6T^Ot{1vtv`6z6EL%{hr#d@8J_TLlue3fY5 z7jVk=f0StdQ1o*o_QjQG|6jzu`>JUF1JV9<(f<1a{%?u)|3ke0UD5spL2o}0?SDhi z=Z{7EuZ#9ii+Q~k?Vl0tzprVK{`stE9||}>C)%G2c#cK;zbN+myQ2NE;Gd_Woz~d= z`Ly6mvi(ZD|K|k$KP}E#+5Xu?`}Y&=-w^%(oWTF7cwe@EGtvIzMEkcy|Nou9!N9_pScqK$oB6f+CMG&_ud!%za#Lo6#dBd?o-)BXQy(Eb@Q z?q3w||AuH6Xxsa&X#bOj`0?+tX#Y*o{)TA(8)ALm744rB@1KhHJEHwV(f&xZe@V1I z7VTG}{j*|TOVR$9#QU#A`(GCAUlZ*YqW$ZleJt9)q1y@nuLL9brfw(v%l2=HelA5n z-xlpN(f%FLJ{RrZ6YU$({(aH@7191f(f&2j{v*-;13_;;7VUpsy#Eu?{SeT|6TF^yQ2N8 zf^JVm`yY$*=u4vg?}>h{MEid!+LxmJuL=0SCfffs(f>C@`#%%?d`q|5rr&_eA?Y5aWJdvwEj6eJJ2O7VZC@K-?Rm{g1?a-xckDTMT?E+CL@w|4_95e@xwXK)cnUhw+FDaYbB+ zE8-|xM?|fNqY)S4inx&4Hd0#=Ta9QfqP7ti;)=KsSELSHHR3{C5f|c$s1bap7Mx z=tqO`{iKe+%fVVxDv8qgdy_5{u{jRIGC``d_eL zLG&N+TxIC1;l3gC817q%{xC7SEFxlQ>m8sn6qABrAAKLO)ZqJM(Q8T51UT%+i<__~xu{{+`hptoWDbLbCaKGW#e+&tYGB z(f7i8nnEALdDpT+pq z=*M832>LFVe;s-X<44ig!#o?&-@tyw&`-mCThL#|zQobj$NbyTAI14hpudZ8y3p6a zeUs=v$N0VIH{f|w=iH{rdWL;oD}2`s&M{zF)wV)RonP7r-H%)bo%4a_Hm{{KI+=DSp){|);d zM&AJQsYX8&_l=;RkLRjGKNUTSUV-r&(f7yt$IzF;eOu7q!T53X4KaQ@`ki=B6X=g) zox9Kv!hMtIw_x3R(O1Fxr_fizeFxCj!F|){)fj&Wy$wBsel6}hiv9xbn?>IU>ob8~ zjB#@4_u{_O=-=agFQ9LSapur(!q=a`GK=TG4dWD}pN;n}h<+UQy$tc_oBanaZ>0Tq7R^dgy%}5zk%_G(09W18T8%oT%+he!+o>t zSmz1!AjZj|`$I4O|GzVhz9RO$fW8~e!{UW{IW=MAEd;9QlV?}Yh> z&{x1dR-%7_@x$nwVtuO7|BmO4pkIn!hkicRA&R~k#&1Ngz&vBbM>OXimz`e^cl=^0KEtMo<_e1;}4<#fPK%P z7i0dT=r>~iS@i#$y#K7v1o|2{4>|PZG5=}w^)Sx@dOh}Q4t)#kd!Tsn{I7=Vi_xFO za|O}2#5iT>TVs7f=v(3XO7wTIUt#odtXnmDH})%nz6p9AdKmYOq92L%Y($Uad1L6m z#X7g3>r^i9!+(3^3tGU(Nq=P3FP=vnlwFrNwZ5v*qpy$0h+8_h!+RG+ zpT+Ywq8H)0V(3E{rv*KQ^^BwMgy(HX-x>RrKySl1UFbVveUj*7n13(&$`~hwem~Z2 z0R1sMZyJ3P>p8@ZuU8rLD4uH+eP^sg7QGwyoj`v9^UtBjG5$3AE*QUn{v`Hm4t*Nq z1eRYs|10CUiqS8^`UlZ3zL4O9l4*gK9 zLlixNc{ZZ&gnf^p{~GgbK|c%gjHCBso!ilO#eEa#wdh^wyP+r1>oK2R^yBfoDfAPt zzXRy)*xxjI0nasrzB}fVK|cud8AY$fI9c>Va2_Vm55>CW(BH@P)9Cl%92U^`z&z*B z&&KtE6&BC`p18gkJ&pAYqBmgO%Fus-=MACXh51*aZ;$5*44{u;ztZSG;rb!;eR1Cm`hMu6=*!``vgjA#z7yzQV&8M<`(ylR^v`j90exM3 zJ)A@T63-P_aq;}$i}O>Az5?bMM85&&unhemJZ}j7D9p1GeFL1IF#3Bj!1TelNz)pg)T7N6|M$&!XqiC(sYXbLG(A!+D-YUmN2T z(D%i<&7oh5eGja(c>bToIK}9%;ammL8!*o@^nYO8Lg?>dK9%TK;<>`;m*M)DQeEAE>>{};|x7y98?ha~#n zv2MNSd*MAzq5l%s51{v8ztZUI;dzJ9uf;xQ&6P*8=)^7-tUs5L_QvdGY-3fb}UxKNQyo(Rao5W#|XuxkBiNVVp|zE}Wk* z`kgo@)#$}|t_b=7)~61AGmIZazZbm`{au{H82WQK&n@VUc&<45pD~|y^pi0E1bPJH zccJfvbxxvxh;e$+e~s%?=uNnO0R33ZGmU;U_HhXPHN0OL^an7`DEjkQ&n!FEe**n* zjGtr2I!~jYfv*n*^yjb+bLfA<^?~5x`7gox6r<-b{~-G1m}eP!4$l=rzZ<<0{Q=A; zjQ&TgXEpkIxITh@5qcf^|9=#YS1*cwI<9X-e+%mnL;okPZ$W<#?`a&p3H#NKK7sKQ z=r>`UF7z9*k4f}>aeXg(GuADIegodG0rZn_&eQ0}V!wvaZ^n7ZpeHc?DEbjthb;OD zI6o8Uy_io9{Q~r9^e}n>{byL`IrM*F-2$sDp8xCcyv69Vm}e0Eaa>=9{u`W=5IfeP z68*R6Ve}Y!HTo5pPXxUc>rjVYi~WkCFNb+HqPODu82Sv>tp&Y+^BG64#Q5##8{ql` z`h~b}7y6M{ha~#TIOo0Sk76BC=#St$44~hSopX=17d%%6J&fx|(ci&5v*`Qb z{7j&Kgzn}J zUx{9V{R*R>ihZd@-v#rDpf8X0tV90;dKA3@<2Ryji+RS-bJ*V&^xH6>IQk>_df1Ns zG_FsepM~Cqelgy=B>LItz34|{{Zr^+JnsNI)+dd=7UnaAz6SOygMKsCXB7SaKkCl2 z%A#M4>nG5=@mx9dCGma9Y4js;eF6Ond_9>%zZLr%SatFIx8Qv#M&A<88$@3Ty$t;; z^bq>Dm`^2oCDuQTUW0L}(SO7|Bj_h%oI3O}?0XdbXskmc`U4n0h91CqZb5$^oe##;v9~m zzlH0w=;z^iC(uvFeRJq1VI8K?Tk%{4^gVIkIrQgn-@s~%=l^+JUyOb>_BV*W8hRP} zCRm3M`gQ1)=s)7U2%}$v{jEk%V4Mj08JJHUdKT*$MSlwSZA4!k>k~t-#(Y}PpTYHU z^i%MDwWEKJ_bY*Z5XSF9e-+~-(T~Nx_o8=V{1p2AShoT6Ut)dI=*wU}L+DA2lR^I$ z>oAIbGRDuM--7czfj*0Ia_C=Sf2YyE#`+Y{cf$2^=>6FD!0L#)+YSgZ*kj zUlaQqNB<1#){dUV_zCnSF@6{NK^P~AemvH{7yTxzPYV4AoaX`b%`u-e`qQ}Y5PCh< zErb3j_Hh*b6g*cJ{REuD3G~Zw-yHgHG5$1q0nb}NKMV7mL(ky)z#5C^{}c3L^e*gg z5Iv6dFGK%3<{3gi0oPZe-;I3zGq92C$C5HY5 zo~s4@9qex${alRSj{YIWNud83&((!~GI|m{iv8_H-w)55LcbdOJAnQLdK!HP%zp@d z9_Jy0ek9gm6#Xxle-`~Xtn&o=sdz7P=-c9Xr_sCdyan`O%zqC3K|F6@&BgQIg83Aq z4`Q4k`kUxw=v$+Q(C6`7mFTDA`Y?J8dNukbSceFD751eL{YpG<6n$fy!$$NSaD5ED z9sAgV-hlhY(SL#Wu^oLL&y_$w1><+2e~59C=-c4Fz37)<{wef5u>J$+f5rG|^dg*x zA@q|l{|x$d=%eWM7$=MV4dy?AzAxsPL!ZFDPornCj|KF7F`qf~Wig*X+2Z-%0q;>U z`fo9xAo?1(ZyEZj*vAn1y_ioWdII|uM$clN)##^T{0RCs7{3mE;a3j-@2@EOzF7Z8 z^f1PBr@4*p~?U85q9~y%Rl( z-i_yNM6bYoW9Vz5x1eu}^AJZr6ZdULe;xNtpr3{7yU??^Zxa1I>`O0t4X#h2{|@JE z0R2mH zG5sE~ZB<2%D-vGS~eN*%h`l^^`CHnvW$_=k}7<~fssYai~x<$~B!1#6O zH{tJ_QS`U5o{i|UShpDZmRPqI^ph~3IQqkwPdobh=n3?<(Yw&U#`sC}b1?s2^o3s< z|G&Rd=;vb{2GEljCyl-dt{+0*0`ttE--!E;qM!4>*(`ki75aM^X9E3ltaA?i6P$-> z^io`3K>rl$HisUq(SE7G}`GnCA!rvFF z(VxJ&MbJM)uR}ir=Q)brg8Me2e}j3((AUR&TF}qM{>IU7!?|ilKNa&#pkI&Pg}w&n zlSID?``(LwF?tHU2J;y}k76Iw=x^e_L+Dpx{0#cV7-tmycJwUzH5h*a{ZZ^o4*g!- zcN+aXtbYOhc2vpnrw0GhOHx zVm?Xqi_m+~FGf$HcVImS(0g&;H2Tvx4@2m$U>`H+L%4nvy%zV)qIY2(CeTAz&m4LZ z&ozyH46ZMr{~qUL4!wZCPX*RpJpcD$p2g^oVf-Na`MACeeMQ_ignklwCHnJN|1kQl zI6u|s`(geO^dXE>hkgmxEsB0E*1r*bHH;HOe;V&g3wjU6kE8#HdA6hf0qc-JzZBzi zp|68=PNJWSbJ&Z1B(6`PUxM`;27(au)E1q{0eFHpi7X3WTe**n7 ztV0g{a`b8RyReT1^v7}EIrJMb|G;{S=l=uDrx^VT+&74R74BPxejC;^gnkv)vl6`= z?^hUoZ>(oE`j_Yt^eB2A`Wu)}6#Z7rrxE=(m`@CSPmJG!ekIl?j^2yjj(#H6Ie~r^ zuJ1zcza-4^D^vy8O z1p2SAo?Ym-U>%a^x1;x>Z-sqKp||6{1L!|TPowXT^%+9H56_iBe+Bm)MZXBwXVFi` zdQPC9hx_Kx_eP&azaHnRfSy91L%#_76$mY!|3<7sG5Y>k=OB6?p0^DBK|F5=eK*X% z68%V?fAm(&vl{&dJXZw$1>Cm|eIxWJ`YGs*=)0iD&^O2PwxBPI9!F1OoObl5&=ctW zc&;w=EpSef=ndGHUi81Br_j&Ddp&^uAjVIlcVe6&^lxxZGU$KBK8~W_i08_pzlr-! zpx=b+bLcmtPovjiJqzd=?8_Yb-_QdaES~=_F`r`e$MC#C^quj%W#|uK{1EyL=#}Un zN%^gS`p7B&p@;Fj1L%99r_pc3`!a;ygZ0dy--hc)(O1N}Wzp}# zxt&1&6`m`H-hy$a(bvN`1@z^y4s+;rm}j73@%-O{_p2EFTZ|t>AHelx=!00F5c*4a zu1fSD@myi_KVqC}^d!zh1ic2m4n2eYjiSGe`!=F4gL4=|zZCOtK|dJl6GuM_>)(#P z6P_!9z5?F6F7!q`R}y_GjNglXIQBP%K8yPfpdW;FNTc74bsIvz4f~!!e*x<=iavt* zWYM!&w+ZyyF@6qxe?0Fr`g?e;0{ZC~e-8aA%rmgz;`xtZ9g5L^j{64D_rN(RLthH> z386oU{jEfw!uVnIFR}jB=yA+5f_@LyxemPvJ&OKwoU2CkbMag;^g+z01$|9iA4gvi z>)eihJDxXzelmI&`W+ZQiGDNIvlsnV^b~p?`!ayO7M?4O{wu885c;y1PX_%CoZC_K zgE5~h`byZx3G}sbeGYwlTtAI|IC=qn7|%P0{uIU!Y_xd(Tku@P=nr5Wg6KcT{+6LH zg`c+xp^ADqMhw-b?|BQJ?(9gs8b?Ch~S5fr+c>dArus$*LK3v~|em$;_ zqt{{n?dbo&IZ2@Z0_)s`eiX(}qJN3!?M1%}FSIfT9) z#?PSlVEj?^UtpXp`kGjW3G`p1=g`;3_|xb^Sf2uV9oBOW{VdEsu*u^2KM3ntjQ$OJ z5dHUfkIK-u!TAiKAAskoM1K(T52GJ}`&OergYhHiWjJqj=*1W(ivBRxrxATctaA)~ z81rmF58?Va`i@xVcJz~ReFD9J_31*d!u*ry?_vFW(XYZdDfDA8&H(z0xIT@(3C`^h z`mI?14Ep6*|55bc;rcB46?pF^&|k*7<x=o`WgLNpNuYvi@q3?@v0zX?k|Bqpu zV)REb{~&r0y$rnt>lQ+P1m~v`y$kyiMn4YgP>uc%JXZw$OpH^9ei-&QihdizPE~8PD5`-iiCB&>zM651>DYank5N zVSk6v5665m=!apyM$v!7_*wMr&?nHVunsx&<*?4v=#SyK3g~~sI?th>hig0PCMd zzYyaLp|6hlWYE9BI*g*fhw-!M-MD@Ny$1Kqp-Jf{VP9hCD`6d4&|k)U;^_C{zU}D4Sce4qZ?L~z=pz^>iGDuzu^0U{%rk|)CDwTW zy%PJCM&A$XIfVW<+&6=MJFXu^KNjngMPCWmPoR%tUvlU>U|*)u*T*;o^e@oo(6_?+ z1U6ed|4U>1V)SP*pCJ01c&;+^F~ zMZW^$G@?I;`Nz ziE*On|Hk!==+)TY82ZZC#}@P@a8BarLl~zW{R6C90{sE3e;4`=7$=FI#X9$*pMdpD zp+_ySe~5Z6zmABX)dpuddu znL|Gi^ABvkc>c$6eKGonxITz}HJ-N&{S}N8LcbRCtVF*XJ&e8_?puxiF4iZ4K7jS9 zLq8K=XQJpkV4OzuU2%O3eIEPRg8nb8Lmd4$ykG6;lNcv~el+Iag&xQCN%TpqTQBGlaf5#>t@XhU-Vs2k{{E!=rLR$*kbYgzlQk~qyG~Dz?dWfyC(u{N^LC+^V*Dif7}m2FeFp23 zLLb3844~hM^-rU}jlb^^`vua5odLVpJHPon=4 zy%#-)bx5I~h_4$1=yRBV8a;>iaR~ix%rk@j9_~AeUV{5((K|5C1p3B!t{nOinCCS5 zG_Eh8uYi4=Lq7)RClJQ@$MY7WPvNB`qZKS8~2T(?}UA6MBf_g6GJ})_iaJn2t?siv1czzaHz8MZX2lJAwWg=95ET70)}3 z{xQ!#`qh~K9Qx)MKd|-U`F|7Vp%{HN+&73G!}FG*&)~U2=)Xs=ME?};OBnrOtXnnu z_Lxru{Uhv49r`|4hba1Ec-}_zk1taCehBd$-N{~YVth5kqE zZxa1b^j`E{JZ}np5bHdE{t5Oa&5m^#LO%rWZwCE*%zqU9Q;d^E|2OVCf&MDiIfuRs z`ZW4y7{7p?zSpFN9diW#|RWCxm`D#;HVq81oFH58?W1 z^v^MV1bs!!rw)A&JZ}_z7UMLczlnW`p?`#P)qOreT;GoV1@4NqR(JHS@bV3&II~(=sEPe zu%6TCUtt{z=nvxhIrJ5<4uNeJ&;QDpe=+)ZSf3#J*I1u2^qnyO5c=tuPbK;Z7$=PW zOPt$k^n0+L5%ej{vktu#_l=^jh2Dr>hINRcuZrti(D%fB~9MFMDzjlAF!Tj^gPBNLf;c#XENvqW1gevd*Z%X^f|201p2vH z&m8(UnCCQl0oNDMufja%(7(lf1KTd1|10pk#pvf^{z3F%TwjL1EzWHS{kOQj68&(@ zGmPGi^{hs}70(+%KMLzphrSN(8$~}5<20h*g7-0o{t3o!LH`cx8Ao3p_iac29zB8n zCyd{P{yoM|qHm9N?nQqdy%zlirPjs7!y9T-AC6+MH#A?7oRz7odIqCbN9 zOrTd_{2Y2I#+gQc1-*ct!v4;oABJ@fY`1v+e?l)t{|?U;L|+o~FGJrE^ADl_fPJh) zZ^QUu^dE73HTn`5KZ3p`_Pq|h3G!u3PwpJF~4^vm#dV-$T?tbZ2$ z30yyc-iY!=!fBbX-EG6 z<0R1U!oGB&r!h_v{Z5>pUi2ugPodYN51?;@`=-(Vh(3h=B-THJejesCivAZ|pG7an zd?wJZ!TXX!Uk>9;qnDr;&@aIJ=g@D$`ySYS@%;Y@*B7Jz8qXC(KM3cf41IabCxpHN zdL{bxm`@meMO)DRp zjQvfZ|AhN?q0eLfN%TjsKE3F*7(a#nGUhpez7obsqaTX(9710i*Jsc-#r31;rMPbv zePx`V3H0wV{~Y>K7=If5aqLS0{d3%R4n2tZ2dWp(|C5+cG5RCemmvB-@%O1R^m{P> z5c(+QS&9BS#tEaZj`LHEz5~`Fg5H7Wszd((<3!O9!E-gDzlQn5&`U9X3;O;zpK

xa;f#`qcZ6y`IE zeh=0yi+&rvK2M;ZgPucQ7xS4$PvSfj&=13VG>4wU`~x+M=l?8>Q;dEK)+dO56rQ&X zeH`P5&B9O`)HO`wpOo@LXy1I-J8H^qVlx4EiD1 zmr?X?%qNS!5&8uB+c<|g^er&|Y4kCi&jR|xnExF5#<*`_$Hnu%33@U59L5i#{|wic zp;w}Z(Ce^1mFTs2-Z1(Ep0^r(4(B0)ekAT&hrSW+8%4hz>(hvSCVC8gQ;gq&{u$0; z9Q`89za9Nd%s+vCBF}x;@qSI9_u}he4!sKFPorOmaSG_KW1Z*FPr~~W*m?2%uYz%k(VKA&gXm{t zo@MAcj2}Xu!}XQuuj9GG=zUoKYV#IBS$>Jye^G&9ow%e?HlfX|KZ@%zZ3t{8ipfyoQ{x+vGLnxZNt3$uYZGUQ3SJO>(&$u^Z&I<*;2ZuOo-- zT6tYLXxGT=$pO1cUSBT!py%IA4#_#YLf$~m+U0VEoUu#g4dt|5B5x$8>>_z%Icd+A zEqD_-Vb97xljHV`Tq(!wDS1;lYER0W$q{>8{<$2s$7FqR&xh<0c?&sc56fH10eev1 zN-liw{g=aX&hD4Dma}%BTqS4h9(fx%ZFkGt$|<{3-cC;19r7>agxw}@FURdxxmu3d z&GHU%)NYb%g!|cazh0iM+d6ZWjUmmIfeTRvD$*`4ws za?=e>pDa?0)$K zIcxXHZF0u$kx!J{|JBIcV3&3;!cv;rZ<<`EPRJ8}GlIkaKp0e1@F0%jHfvW0%Tj%4xeq zK1)v7Me^Bl(w<*^!RN>cdsaSIj@vVGmmIUF6YfE>45*a^#kX68T9vWf#d$$w_;D)dfE- zC+u1I898px$fI)1o|2!HqxPgcCP(aX`8hdkkIB!=A$vrAK@Qr(@{4l79+Y2_3$xyT zIVnkFUy;*xxBRM{vODG1ocFROJ#k_GoQ9gWPOV- zpR$W&ed{fswC95h*0mPOJb9TS1f5e^7+I_PAk#s&| z_sIH3fcdoDEpIEQ>`r+*Icayu`pEx$!fundm*aM;tdHW#$LwZVABB~V+D&qe9I+eZ z9p$iHFYhFW>{@weIcV3&yT}2%O4hfq@`aDQ|8hjm*%k7xa@H=FYvqhxD(@zz?Gkx+ zIb|2gd&o(9e&q%4DJSe%c`rF`&&YLh%$}0>mZSEhTrWrLad{s(Y>&zN${~A1-cJtN z!}9)ez#fzjkP9>3e>p1W?0#8)WF()p`{V{WWB14h$!WV=K3GoKo$?`a((aHCl@oTG ze3%@!TjfSMW;e@+%Tc>YZjvK*gZwKwY}d>Bh=+X0u9bf+2kjbJAGw(i*j4gx!Gc+Vd+d_z!Zzo|TW5yQ$|0DYU=Pa2%Y|w0zZ{oycE5aroVEMp zHaTPW$S2BayIVdwcgPXDK|WOu z+x7Bka>%ZgPnUysjr>Z@a>_1} z&z6(+{E7=cM^4zY^0{){o{_ucm^~$*Cr9l`xm%9dw=@XZOpO$XUBj?vXQgk9?_|w!7uaVYkUw%5l3@ z?v-P9vwW2twVUKVIbt`+SIc3$UcN>S*|qYua?q}kuag6Im3+Ni__z08PRTjDLcT%H z+U0V;oUu#gzsqU6M7~i@*+ueAa?+k(VZk@c342z)MULAu@_-z(r{r7Zs68p)CP(aX z`F1&MkI8q)A$vr=Qx4k0@;~H&Jt*HL7yjk_m(z02?w9YDvv!|6C}->*`5rlKcgy$6 zDZ5j?Pfpq$^8IqcZj&F7<94e&B**M#`9V2qH_5|t#BPuulEZer{IDFdYvo7epj{(B zDhKQ;`7ychzV~0w$T_=0eq7Glz!Q4ZLH@=J1I%KI;8<(%Cw zzbt3%K6zZu*gf(qa@y{eUzJmKr~I0nv^(T~$_cwoeqD~+t@4B%vzz5NwPs$Oy zL4H#X+x7BWa>%Zg-o@T>GmEV`sc8UBi zIb|2g|CW>Xe93}8kQ4T-{GlATXXI%)W>3j^IciVJGjhZpmp_uj_L%&!9I{8`PvoFI zEPpBo>_Pc6xsdbz%LO@S_sgHlS-Vf3l{0pa{DqvhyX7zCl-((RB`56;`D;00x5?kg zal2KXlVf(X{H+|do8);pVmHX&$zi)*{$38*wek;g(5{hxlmm8^{F7XG*ZVIAiWZ!+ zE952QtX(b_$r-y;UQ$lmCGt{o$}Wk&ES+JtZ$EN9{>j zAFY*-*yHl@a@Zb|SCB*Yh`gd4w1?%D*aOikXsQ6+llFY^g7qtg@(FuZ)<;<9TgX9sSl&_&*n_e@iauX>%lj{f<(%CwZ!Kr-KDkQH*gf(#a@y{e^-&1M>Lw2pK zkLJt=?HYL(Ibc`G`pA)d;Z5(qtUr>S&)F67u5#8cmuuyWT`KP;r|lA1zj`8{vWsMW z6i+^B&o8^+J>`TwE9*z>^KpAdu9IW-l)SebwI^l$2x>lJkIVbWVS7y8R}R@D@_usA z9+ve{$@zdiC?6mfCcXc1RL%Zge=P^?8u>SJz^;;i zD;M7I{>w2rXIIEa%2~TyZk984sr)-RZI{SL$tk-?{=J;E=a*UVALN8RD<3V#?HO4= zV3v>BQ}Qu#)Si@E<%m5lA1jCLG5L>j$R3gZBnRzb`8YXX56Z{Og$eJ!9G7!;zkGt6 zwfp2YIb-+8C(3ENTRurn*`4yA<)qyqpDZWrHu*1d+-{ZI<(S

qngPQM*a*kRx`3 ze5xF_>*dqrkXO!wZj$@th}|GxEr;!T`5HN7*UHz*LAyr2P7c^r^7V4zHSfQil5=*2 ze1n{|%jJGKW0%T*m(zBMe50JQi{zW+q&>gXf^U`+_N;u19JgoW0Xb$*$+yZ;ds4nl zj@aY!?Q+;2lkbp2_K19^9JGh!f5-uQP`*npyz2dz({j%4m+zLdcAq>bXY3yN9yx7y z%lFDDyHmbTPTC#v{c^%?lOK@dcB?!j$LwbLK{;wS$-{EQZjc|6!*;#=upF{$c{yZ{$S=r2dsu!^4%mb8OLAe{`!8qZ zoZTp;sNEz_ z$`QLkep3$H_3~SC$gY*&mVg!|-XYEQ~Da>O2&Ka#`tnEbIEvPa}k z#oFE|N2LsjOcCl~3Cxvi^u_K4lll`e^Zd(w<*p!OO@Adsbdnj@vVG zu^h9fWc>=veAJ$lOXP?>E-x>K?J-#&ft?T8Bl3!J&>ogok^}ajtdHc#7hd%K%RxD3 z_sgruS-VfxN0Q|;c8{!&-p{A)Zh191Wp~P}%SpRKUPDgUZStCO+-{Z2sL(Y3om&8W&LX7e9o?rH;}V-xvXCq zn$Os!@`iHSE|E8qQ+AQOv7EH$0}I|nPS~^Z&*Zp0BUj2XdrH=?u*pa5NqI9lVvox| zm&5j$yty2*M`V3uRX%7B%Uj9;dr;m=Em&c^f%xcgx$# zDZ5kNPEOh#@-O6s-6rc-ALQe9t6VL|>}GifIchh_HFCsmkav{BcD<~RBF%^FT3LV8 zJ|DDe3j`%Taq$u9qYBxV(=Xw#Vds<&Zrh?u{T&L`~-`A|7wx5;0Ewa?Y-h z^+$*ES-V`;AC1gs>{9u6a@sDDkCIb%k^Fl(Y0v+-;6KO-dsaSLj@vV`{y3k- z$WePzZj~eUxO}V}w#Vc@${~A1{*xTEhvnnsfITQ5FBe9=|8iW;+5Pefa@OvX+vJSh zBcCXz?QZ!bIc0asf0mPWhkUY}u-oLn$Z@+>ZkJ(YnRLY za>g!||1PKP68T0sWf#de$w_-6lUE$L&^mNRHXf@`G~JZjy)Ph}|GRB!}&K`C&O^*UFE`LAyqNR1Vlx z@?&!03Gct0k#ly1{J5O8%jFR{W0%TL$Z5Moeo{`^MedL5qn&IP7d2+^7C@Y9+6*=gZ8leq8zXX<(K5bi1%O4$~n7Vep$}i zee$@Rv3ulKsTjdEkW;e@k$WgmVo|GeYgZ!o( zw(I4$w*%k78a@H=Fr{s)XD!(tM?GpK4a>_1} z|1Br&`S}HZASdiu`9nEw&&bnq%$}0-a@3xbXXJ=IE`KD4?J@adIb@H>pU6RbSpHNF z*n{$Cav|gWmkV;v?w3E8vv!|6D`)H;`3pI1cgtVODZ5ktN>17x^4D_0Zj--}<94e& zC&%n&`CB<^H_7vI#BPwklf!ns{Jk8qYvmv0pj{*XClqDJtnUphwKq~MLB2>%PYwNdr;P|Ud$IB{Yl^dmxFT7?w41Qvv!|cDrf8- zS-%1)pSHW@)#Q}jDX%Uk?GAYjIbpZSYszuERW6fbcC);e9JQO|ayeo*$ZN}CyIx*L z4%xNxx^mF2k=K(0c9p!oTzEv!znL78b9RNiftsJWn!}gfGxg4@bWPOBKK4=fi zTgm}@P~J)|Jna3K!*b5tK? zLf%!*+U2r-MRq=8m&&`zX}d(;T~66W@*Z;1p8saSd&&uWR^CgF+cR>V9J8n7z2&Gq zDc8#pdtBZ}4%=h$zH-POk@u5>_OQIa9IyvveNjPx+F?&irMvmH(a;qG%$K_+?ustUKQ4ZN7@}K0O zJuDw52kb%lc)2j-{g>l%&hD2_kh6B5+$Lx29{EH$ZFkEj$tk;2{>3 zw4Ag1<-6sq-6s#q8M{ZmM^4+_^1X7(?v(G7lXi!Eznrk!$M7 z@~|AS8{~)Nuw5@dEQjn``4Ks2*T|2`0lP|mOfKBx{g*Rx&aRLjm$P=cJR)c8Quzrv zZI{ST$|<`@eo9W-^It6ZX*prf%FoDgdqy6WWA>E%tQ@r`xc8XZOo5%UQcm9+xwAkNk?9w!7t5<&@niza}T`4*8#Q z!fumam*aM;JR!&IX88>{YB$M~a>Q=F4BIcN{dpUMGyQ2tCVq`m)gLC)Fz^5=5a?vrQbjNK!D zA*bzb`Aa!vcgkPMNxMV-T29z)@;7qaZk6ZcnB6RYD@W}nd0vj#4f1z#*shnqmqT`~ z{DT~{Yvdp0fL$g3Bp2@T{>y=-7o4*z+T`m{N8M{{@wUIcV3&>&XGTN?unkF50cY%w|uaivODENcApnHtaY{C{4r%z5v<-)pb+U6;M~-skL{F78LZ znw%!?O&&;2755|$BBzMElCL2ri93?7C09PL`X^_T%fxNT*O5!bt;vJQQE_wf_2eRP z6Ea`PSzaivOCCxt5byaJJdB(x-bv=GZpw4S+sS-jYN6r=xCf`ZU6b~SeC#Q@1k?$g>iF=bLkWa!KrR!{CqGCo70)HlAVov)nc^+v7s%=24dfTeY2tO{h2&ImG5IBOig+pcWpa{u5&0Ez<E)q{CFD4g?r;uMK7l_A`-yr9TN0Hwo=ZHs;myol?gUN4^ zGsOeQOUdcte&n~wY2x1GcgU&Yp5$fZ6meJba&nTmBl%r&&WTi z4dnO9Y2tO{56G$FV)BRN6!B8>N8}{&BJ#)N%K55)aw)k?JfFOtTq>SR-aw9uXOKT3 z7m25nHyp1G7l`+42me6M74IbfNX`*&Cwt^<@m6vRdO|DChikp+`k&DDl$o0vE;=1Gjxj?+99Nd7ME8a}bVyoj7ku6(?T z_y5T$)+QA65O6)5&Gxw&WhcKDkg_mz+T^5byaBd;vLEypwz(IY+#m93*Fpw~~92GsRoTy~*j~4djc+ zY2tO{KIBw!F}W`}MZA=JF*!-RhE)~xu_ajHeGsu^bi^S8( zmy-*{Q^;B50`YkA735s;DDsu$9PtQpe{!~XF!?HSrg#8(06AUUk9;*bP28J2ken*+ zNghN_5qBkDLrxNRBwtIed_?t6&L)?M+mf#%mx^1H2a}`X=H%=}FUGh+J zfq2gk;9=xk@lNs$B3??qpPVFKM1Fu=`H!_T6!L6xfp|RmA#$#G6!~Ftj(7xl4mn#qnEVJi zQ#^n?mz*x{M}Cx?ChkpsjGQX&NuEbe5qBj&PEHbcBtJo}oUQsN7m~}wZOKoPOU13p z^T|6@nG^>lTIYr!+yqugQ?nr)@Tsc$qPmYqy#BIqd$fe@e3UTqJHnUPUex z*CoG4E)ehe4!oM2E8a;iA?JviVPIZfP~{3SV6+>`tjIYr!+{53gA z+>!hZx$;5PKe>!tCT>gqmRu@sP2Nh5ikp+aBNvIAkhhTw#dXQwlMBRqwt{~k=Zbfd ze`zCZ13Jm0T*GOWsM2if532BNvIMlXsB|#Z$pN@?LV1xFflWT={_NpPbYXTqbTy zu0t*rwo3j-cD{r&K7SaHzsF_ zw~(8V)5ROeP04BEb>sucsp4XCGjfV}DfvKhl6Vn0nOu3l>YtoKE)&luA4Dz{&m}h} zN5wP92a}7$)5(XB3&m5&EyxAp@#I6vx#Cge!^k<}5#*NSZ1G@nD{`iI0J$|eUEGg+ zI5|z+n|uU0Ros(&BsoRgm3$OAN!*cqG`TXO`X{H7%fxNTZOEnK*5tP2sJJ=#7;=%g z3HexZp|~!&9l1cfrwn`?Iaj=sd^|Zvyq(;hoGso;?m*5IZy}#RP8V+=pGZyP&Y zAm@sAk}o9Zh_{o2SN)SS$z|gCOU13pgUL~GbMp1%B5@P)5OSfoE_o=qK)mN`@Gx?&cqjP=a*lXAc{n*+ zyp?<-Ia9oaJc67q-ax*IoF-mJzL}gVE+*eXP7yC9-%3srFCyPYuAHX&C+CpM#Pi7` z$))1CAN z0P=Wpy0{cU8GVy%!gXB{2T=EQZR6K(`lUyX8 zPM$?B6i*?~CKrgulOH1Iibs(jCg+Gpkmr!I#e>O@kTb;t$aBf*;(p{u$!X%=pN^5f(raYynK2Ke>=xCT>f9l3XfoO`cDVikp+4A{U99ke?EeFmFUV=)-sCUIsp6jGugEFluH>)DN#c&=Z^)ICRsZBNa+$a-`CD?S zxHWkzIVx^W{*GKEZbIHhE)>@#e@`wD@7V(Wft)MeN&bH-$l2np3hZR({W4`#8@#o(eHU4gOd;;1d*Rx;n?_mAK`|tE$Fw$b}%yvn^NGFbK!ASS3 z@#vdi-i}7zsQFb@So@-_-2n@0n`!N;M&*}dZSv9ST=p$iWd$RF&$&sL-NDfJqgw^T zfnwx4m>(V8Y*1C%uv3zco~r!~T1l&#;XkRQJIz%x9rnTxuGm{I7^)1e@Xig^Ssh%z zcXUf6_=rpJ047*ldb!=NLOMxfP7UUDzZ(K_hWs3W-E5uFF3J0JZ&j5SM76IDoQCb{ zFgv--*l#P5kZf+Al#l7w9!hghO4V*-2+X;ShV#e-IIHR6;K3{Bkam16t+ zX!7jn)ad8~gW(JWp~QQzvZ|^gz&D#Ftxeqg$^C7<;m?|zBjwPTUE!=duS78_4oX-w7K;)K%OqZ{HHlO9O=1uh6<8XV1t;$qo`M<(Et~U8 zRn?GSxPH6#2xl;q;hh!@M)HfWG#FZq+WiogcO@S48O!QOKJqo`-IMV@@6Bhbs!%h% zeT%lOs;bJ{T_<@KtL!u{AGZekM4H`-=fw01Mc+DwJBhToVJiAUq|*ZYQ9`OqufrDX zG@Q>IF$(L@E6e$OV&rFmTlN|Mh9k;eWVV(@@(Z={BRW^SgZ0&$U-%hub{RI%F5XIF za-mJX1z*Hg=9PFO*&(}EC8Gg8%v}3R`03O8l(24M37^8vKa}u3JQ;^2Ji|(uj|W+y zgbfl)c;V;Fe!PTUE=qV6x`Hn=+i)!*7+QfP%=OYF)za!<=wt73Jj>HnLh@9;m)aNg zQ!JZ9eT%MM>8pr7zCBFQ9#(qWao3Im(q+i~M|jQ;bKlu=UxLSSA@{i`gs*-=n<=bH z@s;2>mVob^c2SP+M(x;}1i{6*q!-}gmFg^`EEw(^#XiR3Ic>p^ zy3jE?JY```22xTa`TqD&#+Shew#Tm9pt0pm{4#0uSnq<>$OT(#M%hxI-Q3jps%qFdre{|@z1j)#S@UT(H}DW0Ty`R?w7dfD|_RTWYD(F7>=} zqva2S)g~{3Jp@DfoDsyf$&043^_;FQlz~>4g@3k%#qiFzn9<~^4|4yp)JnV!xVq3Q zw94DEl1Z|yMg48m$*yE0tz_OrmF&{Ql5IzvPz9;6 z#JXZOfuRNi+ynShV?3*ve>}?#qm59$aaKP4iDWjyMwwh<hfcZ*j+E)r}!}YIm&kKE@3_G>FZ3x*QwCoffuNCn#)JpUXaqLJgUja2EES&(`SzF#KH=Ww=V`R+M;y6B~fmTG70yW$-yQ04vAFdszGLAz^D> zC0?c7y zhOcm+A!ODjv=gRw%oDL@aU-5}h22;IO1$YyxGkGS#vCkWL$HHcq|;0`IJkdm7|JLN z=1nZDO3r`WSI!;zHOsk0OICSX5taS4`@g@!V%P3p!KN(XVF@K{d!}st=03@7^u@Z!SI+uSDs*K{ZcGdS(+Ac|MB+s54v3WMgL-o>5WuV zEb+Q5XRb09ptK8;#tib!P431Q<}@TRF@@V(hzj)08~YZh1zv&YAF`4--9ZsfwIVD) zL-bYBuODMS;j9H{Bc0jQtTFvPNqaBacg?q7Xw4EuFC0mu6KOGV4C_dTKz`p3MYFy) zn-v+1;4o<`4=yKW086~X7(BOEb$>3_@nGWXTEn!LBkxGL#JdyAFhX~`QWJsXsUa>8 zXDs2mc^FoCcyKsr3XyGFnFtjJLtk?ac)@#IZcWZ@dc90*o{`x(%B*XX%E?pXtW!Oz z5ZZtuRcpWBglfyRj1(~=hr}}?{2i0<{bghsCc)Ji(WYA#X_4h%KN)GQjEp`s=ti}0 z1~g@Y?`P35B^KA)H1~65FY$`3K2#CN;*&rfU2%MNN7v-395c%2u^>7Ot9r)KvxDp{ zKk_4|a>^f4x!P7%9O4r$bjz4XMHkuLDoKfvRh%sXNYSCx3%&`%Kf zOl}#TZGZA9xLDSop!NN=K3{)e9f&)0=y2Dg{OQ`Z#0zi5hLPDq9EQ0x(rNjf>;%~U z6|L0LuuDZ%)$UoYu+6dEp)6i?nwyZx<&97Y@8Yy2$s3NNTd0^ZN9tD9S&JdWQq0e` zTnINScXQYcGx(Fswut9yog7OxT8@&J~Rj`{ygN+cII|mlZqf zP}Z^Ow{xo7Nvt;Jl`uCt6DdZ@YH11^(=J5XZsjvr=|!3K$*cHoQ#W7y-?gF58apgw?gV_+>4%YTQq4o`FtOqc*Rz z?EYtMMl=~*Yx8Av27hgy@?~wcd2GdziIr0io4U$r^c^ba zm|}3ea^mL`<8VGPEi`6o&3=8p2GSDmfH#>8hQx3d{&6a(#@z{nCKfNjA{{3(rYSrZ z2O}AUY#Q%yNU1oW~ zG{v#y1rbqH=D6a2iB8A7e8fXU1T!?KjVv!~pspbgFF*_DF<3M}Bk788#?;Uj9Q(2U zBfM=0#V*B!rnG9zSTCqL;~B{U3^>farVrO`RGt;-7D{BLHxWlPI~REhkC^6^X1y}f zV&F)OxRFj1M<+~auF(mM&VH55%V6{Zlplvr%X}DIei#ht!H@n>oMXF-KpiQ0pl0MrKmjoi(h~^`eC*DUbd*jyTtZV z39Zr^QL|Oni1IOFTO(SjB^uTFK8%=pIqIyUFc!1j@fidz+uXRE*I3nKW9Q!sSV^H} zL4+B{<#hb<7aS+vzVoOT(0Vkftv>bMgu|$BQacXQbevYb^}Mx!pREN9xJ@nK#Zd_@ z;J&Ao=iK=0@zF21AS)M_KbenU*qu77$sLI4NGN|GmggMij=Mze77Z~F% z+A1IQhu!9)wVzBCd)l-G8;5L47LL!%MozM>>aGLCO0W(?Axp49Gt_-bP@%z|bIKC$ z32cg1?KP}Xf^+dWc=o-&tORAZs07cW3;IqcwxS;WCFmG00Uu_LxaXiW+zZcdbh%}p zU?1(Wdi?&gn(oW$t~)HN1M|4o>Pr^0@FaU?^0a)`n*fXAxs1)mVPg3@nLs%DdTWZrOdn5M~2B zOV^y$Q1NgkZgg;?}iFE4d5+1To!X1?`QilC-^!pE!2HI;Lv z*lAjc7f?>DYx1I)f}ug^u7%jpUDt7a*If~7!_e~5jiTqz+MBpk&x6*JR)w*i5FKOJ z3S6DNDqQ8gRm61IF$(-$djl^mx>~pgo3dm4oa?_7(jISBU|VaX<8+I)Bh)Q=VqEfd zi}!ikz%CS^%1gY%?{{sj$2)9c6~_i$sUkrX8&wr6`o~_uQ0(m;4vV(1#JhhqmgN0Y zXG~5(Fi&l=-aq>_-c3zE>hZ@7_G#nj#!)XT;0>kNur!Du*E%!|=_a9)0@#Vhz z6V<1fxy=3tIDGDN63H2LIrCtLMD>W%QSS1YScb7D82Thuf+MiFPpB*y+ERWm3YNIC z3hm5RR@@R_qo{q4H5D0&%hq$*X|ZMTv)-j#+&Z?{deO67)*!x28SHs`d}p}9yr<4+ z^pKV1-!j@L=Pr&li5Ds{%j4xp{0Yy-K!w$ts!Ht|Tdaj$gEX-hZPXam2zxH zFJP^vFmux_5=HR zn{)dwpznPyp(Q>sQ7y6m%U0+kt_eYkN$4`H-g|6vz`}1U^N_qtoV;3t6s7^9c_8~X_)dfEHO{N;MC44 zLc3vl-SC}GJCyL1iO3magod<)fAW^`Pf~LJ6y)0LQEMKrOw3~=m&cq5vB~gpE_cUT z?k=(1HA>9grIx!CUuhr5BFo)Hmb+LuKjj%|C_l^tff|N)gU0WlKF^Hz$AF3Z z`6b>j7|^W)#L^j*WAPvQBGcPn{O?+WHC8Q**GUi<|5-S^R$pYkV4+37gO%^Lp^APs z4oAKgF%s>oM)a3sk(KWzn&F7@70E9}xt8(*i#tFKTc=ELF*8wu`IMWH)M!tU+pIjZ zHNoCrd0yldi0bkTguwEw8jruedC6#Z`f{q3Xx>wi;oZ>qypSPbw!X%_pKAK7C>S2+K^pPx~ohUW`gdcfnF}9N52X!ld*NKmR z<##NlnVa`nxf(0WcQ9M=f&6O{>h{F<>E*9s2CB!2*Ke_UUABO0_gAl z^}2&oWM92L<|g-NTGZYftf*acV?w=_pbl!(>miAimNnQ_v-br~GRq&24dLMWdVBnIt7Sv@-_U= z9zvlH;*jKyj_qDT0o*-6RN_5j)rimay6b!y*BWp_r^_3fiaw0N!_A!vHP`&cyPRXD zwkYuy;B=<^O76cp#a)kLJuh0sPfNCPNtc!GCd2!TR7u7h{PAncv{A+!odS%$1^K`Q z6qkguvSXr4S1W9qpcS8J1zK7Lvd9Y~Y@FxdtQr|F#8S*jwB1c8s%=!_RV~Bkyo@<@ z%5iF0<)uesY5NO5mm`f$9AkyQl7p%%{EIk2s_@@L>WW(v8W0d}s{`+b!s^m0_eYf*1I0XfY-8s+} z&A5eK`?nLRK1jp0?Os{jp=Uk0%6S>a;CKWrG2SFs_-?)hI4o1+x6u0qTDYi{;> zrvA(h)@a;z*WXrF-%!rt`A6)G+HNTC)M#Y4p(EB+Vs}AQQO)nRp3p(fHr|ZhSHr@z zQ#X`x&53{Q_GY1usS2LASoY}Ke2c!t(KObFMqdF>5o&rPM=D(Kv&&ci3};4HdP8_7 za0S1EG+?b&(-5ns^R7`f4Z$JPS533$K+|`S)Giqm*WM1_$dA0m?kd|92>H5H2mQ z#J31=iLuMd(Vc>M6`0F^;}zjVr=lE-QGyb$=Ll?S6@lx!$bOjD?o^d$-K9j)Yn0

uV`%lyUaY9ppW{L0rd|>UFx8LIJ`6N!1 zB16k`W2?tC?mJ2R7E9hLWIJJ~_D{8Ij?w;auJp%HQ{#}Ij{`)!ZJp*FH%dMHT)g&; zXB*?J=`TV zc>w6UiMvI>rS-n4E8PR@6S%JSazncsbS~a(M&i4;O_UnXS>g} z-Iol|?xTsjZ#CO?kA$Y-8$WEPg|6Y<{}5JbaY2E46(gz39@}PeZyH9X1p``%CiKxe zS{mz_1umIQm^~n*X-FB1O1y?^xyL9v0mmQq_=t`~x<8^Wl{F2`ITPEfQyq3awpZ7y zx!uXw&N4VndC*2KlOKEMa)@+k9CKxSX2{~9KM!})Lf@CUI#!%$M+5D)@``w$GUXL0 zZt&l=1_!+41x$ZvB|1m!o^Rfc;pi3UxMQ%BtU3F3i3Ra#i@!@`;2>F}ON>HeiZ{B` zyn``7p+@_xvKoEZYV`O1s?p;xYw>l7<9Q7S^_5iKR$an|=5IL%a$h5>KDNHEUXt)# z-`yCDeS3c7R_%GiO55|Jw&%NS&;Mj6?72HGELbyMz|A;>S&J%lO`+Q?RTvh>S`^}> z7BvI?7!6{Hqa(8?WxC46x;>#5;2}*Qi0`o<|n%d&F)~>3FL+)GTlt z#L72C?TIdcA`SK}y6s_KMa1*}$aR+gI==jOa`{)(BQ3tZLiv9eQzBop;O~2te>M$d z9d%OTeSu*^7tDO!ApV_3ZSj@{7F~te`yN_-t7ni*;a%~)$V<3WH;rP!zNhQMivSY2PUPjygxX*2Y`Y z`Dm#~%0p!EkN2cCllu2s#j&+NDR;*j-yQj-zs-Pn`!GHOIswCG{3jZjEFBtp6odPc z8l{Yt3Ta1Oq404@tV|u*U=qrt`?dQm(*^e^_ch9t!x-#arVjsEro;DFrYoNGl_@1O z@t@F`-FXxLsgs=lK7PtEdE#zd1;k~*<$z$Md;bOa)_Of1gIj!X4Q|LrI%Qx{FdS&` ztlgd+%b_UJqH|6?o4P_7GU|qCLLtbbx!JGQd3G*_zzO$bC9~2G}xrCun2oP z$w>#8^c8MmEFXR*?%PMYzeB0MNhh21yh(pPW7*quv$7GTbfZbrD1FR>OQgl_LD($P zX^ct5PMUAhLP{e{x}4HnlUAD4!=#9lZZWBV(oH6%QW|Q~{U$XtslSstn{*4MTTR-5 z;(B~vAF_9qNuM(U;lMFYs%O&Wlx{O=38lZ^l60&|&zSV{(-w%2G-9?mol=fTQz@-B z>6d{D#7L9ga?&FvwW2iAq)bW=nzYoU?j}uj(h!sCP`cfuBPb0tDc_{VCSBsBQ%&-4 zk7xO4lROmHJKm&BlRjkx!hzOK+N1HW#mAJ!n6#ME&Ls-O;U+z4()UkUAl@_S4N7B8 z%B8f-q#v$UAZ{_K$Vmkz&7*XONqs0yGwD^6x|npAldd*t3Z-!-wWM^JNfS&8m~?@Y zjx#Bn(w!!4Lvg*MOuEpd^^8C`aFCOJ#hEyo%Q=+Bn^Z)pTo-X#G&gCUNng&lK)hqp zQIzg7X#%CiCVe$Pfwe;JxUWzI)qYhlWsSu zj!E5}bfigpaf503B$LWeT(5;m=a}>%BY^K0ImsKYK>R_e(K612x-XHt7+Q zK6%mt@tR3nC^a@|9Hkdc+IW=$G1R2Toixd$cPV+9jku@tDUCJh36oAU=|(5@GHEWQ za+6Xh^)P9KN&m7i;lLSAI>e-jl(w71D)bI8sgp@-O{yxiGJSJ{3Ot-rg-Hu2ZPG=t z7D*<}Hff!cUNGruN;^y%O=-SK@Ap?At}|)2lg5~I0i~ZzN~d&-Ne`KHvPs!aN;jzs zrJqeYfKq3Zt~2Q`Mj#wG(Me5BI*!sWCT+$RUOkgeGU+{&{&>Oy@fl7R&@~!Tsx;|o zN*^uO@;|RsnI1H0g_E8%X(#ULEdSM{97>OvRBX~9lkRiU%_c3ztuCCgr{EN}dpAl$ zOp2J)!K5plbe2hTDb+El5v5a2>TlBTj6gWh)=70u8c(UNNgJ_+x92sL=@^q%nDo=* z7KjgrYULnG^-LlCcWaM2TXd9(orT2r=;)8Mp_h^lx5O*Ck-&^ zc1lN^bOxnWl!lr#gwiCFo-^rE zlSVq}Qj>ng9lPaqQq(olDD^UFlu1XL)Xhmpn6#c!U6cMnalJ!K>TXgwBM=TWbkdJJ z!;iFhg;G6})=>JUNXr|U^twqK9<@L$GU*{o^-Y>Z=>?NMxlDnWY|=a@O*Cl&rGQD< zl*X9!xJi9Y8t$Z^N!L+oVA4sH_`W`5`$m&mnRL374mN2BrG_T$MsYp+y-tfWO!}S? z2nY7gwK9FpO9hb@7g1_t(ke=yy`tq+{Zyt`Oyw8Er2 zO$s~dCX)`JRL`XTl&&}FK9e#`y243knv_hbzDdVYI>n?bO*+V=HcqNzQWHu6lPVD- z@2{6srnV-1#ju3~JLXs*KETN$3Vb}Jq%=kE9ZIWA`YBU^c;2Kpo%FCtDU|A%G>y^& zCM_{(j7d|RG}xqOlX>u{r6WwrFsZRg z$xixZkP4hjsjf*LV&wh!qL!za^eMv@4s3bI0l!7LWG^vY8XFKU|lRl-?z$DhYcd$v_ zObVFPz)3$0RDs{5)X=1Dh>`d83tHaLr1cD2IIw=U1>#kc7Eo$rQW2%+P1?{`ftY8~ zV@{f2QiM`tlO|9aZPGlGZZzozCtYaLBuY(83Q{`Pq~RuYGO44JnwxYCrKTnwLaB*K zr<+vAq!c@4Dx!3-Ned`F zY*N&u*(ObQ(g>3tr*w!(qbUtGDQwbpCS^IPlSy||YGG13rISs%!laW;N_A3_N!gSR zHR%9KyPs2;+L-hg!xj!y%&3);mH0eQ;COhdmlj>4xWl|2Mt4x|=(jb%iIO!ylzQVUSmbW&k8>M4S>T8mkLO5`k zlm6_lGOea`xJivD{qn4qw>0T@hAkY}`k)1(*rewu9bwW&Y~d|6>APME#0ryMa#F;k z$&`*XsgP2>NiUleF=?EW`kOS8(orVeOexc(J59REr1P9~j7bA29c@x)N{5?tzDey& zI?zc!U#SA~``jCUv3I#-vhg;k|0omY@Q$%%rEC zG}WXdDYZ4}F-m+s0p=Iam=rSUHYZ(TQVOMGOd3wq0&`iWr+2TC8XKops@iqdf=eSj^z1tzV( zP=R>Mq(_}}mq|}kI^Lu?lx{caF_ZF58s?-6OnR77dy|GxI>)3NOuE#h)0}jWNmD3w zFe!~vW0N|XbfiiD+;3(2GD`)%mC^|&{eu{J8=unhe=ks(${DtBpu|bfnUqQCM3dG~ zdcveNCcSRb3@42;>1;|TnKX;i2$N=-G})v#bQIt+LDVtI!lddtTuSxBl)X1b( zluj|}Buac=AKG3AlUkYdTf_qK$>mn_luk8iHv;RuKVQptWhfBeGuGk2awk1*(l&e> zc6pjft0+BW(z_1>mxQM$yWaVCv1si%|LnpA;rJ1y^K5*v%x z+NASL>S0ndC;fD(3j8&t?k1&D`u+(mKhUIRCT*T>fmmYF7nII1X$J!96`AyTPX*$0 z#yT8$%1L=9t)!Hcsk53TlXzNxrcp{a$wOeh=S(W? zp+J1fSce01oiyI0yC@}PD-eq*jWX#`lb$qbsFN~G8cV5;Nx77|n>5U%TTDuGQi@5# zDAhHo52Z#Xon}%Olm4D&W%}Y06}UI0dM34`^vOIe|0i8#3YfIoNza;eI;Hw1Z9`za z$4x3RX+2{d4m{|jktQ8MDPU3&r5jC}VbVO420E#mNdZa?OqxLH43h?#bfZbfJE@^b zzv6pr%Nv>$q*V2o%GBPZPA2WV*8;KOVy)alsgX&CP+DiwZ|5oybxd03q%*r=TrLQQ7u2(q|tsym)OF)&!pE)T5HnXPP*2lsg&xOw1Co;Cgqwm+oax3I>DqNlNE(kcBkSIZAI>1312@+}ZiliE{iVA27UmYDQycLm}v#yT8$ z(Me&G4xrS~q|MmE%QI=AN$;67)=5{G^b5YGx4e-_PgAR9~xJU*5lu{Fux>4Hph?X}qse?(IrdlB0G^vzQQYV%)XnB2;+L`oGo(1A%lkTOIVp4rd&zkh{ z*$Tw3j5VGwPsTC>*%gNjI1jrF6JSYbe?CSz1gp>2;I(IjN&buTeU}q*;_susvO7 z(qxm4bke^Us!Y#PI?|+UO25z6@}o@ZYm%31fmmbGBb1IZ=_E=~lgc|Q5Uosl%}Fy& zx{uP)ChbOTcwv(koAf;^C>)sNq-#vNgHozVt0-Mz(%mM#YEmyJbueidr8Xw9g?VjE z>TS|QlMZpxt_xJ)3n{fVX&|K?v$VX0Nxe+^=57nbyCxk^=@^sRQ+m^+va=M37AC#m zr0FI#rF5)GyHFe66q8;w>04G%I55UZStd20)Xt%N&P8(H&e?GFe$^N&n8(Q-Y{tsrS>KrOX+2kHl3+J z9AwgyPMT~|F{KVBRiZY$J58Ez(pRjYaNuSq^)cxsN++1~4yE%=y2YgDO*+d-hnZAJ z=|q#JQA#$cvq@u2s_Ue!=c~YBN++3gIi)Q#w7i~4Jxuy=q6Ok5lO|C**`!oT&zSU4 zCk3LJNpqYu&ZH5PPBCc*YQxJh=@FAYX9a}=*E{JvlP;%ps!2;Ioo&((lb$i@6ek^M z(uI`j6r>d3+a*&eH8AN^lSZ2K*90rm=jW-wXHu$bQYNMK4{G_IGgPMTCarSP(YL=DHoO~5Dlq9&R!}%Fz)4+As<;MHz@){L_H$=cIs1pHXUHQZA)`3nU$9(k-^9%DXHOr9HK>gi=G3`cPV9(yym05M4}q z+ewd_w3t#OlUh=mY0^6;1x&ifNyAK7g0l@QmadiWP)adr9Hol;wfqp1hMM&Cofe4YCcQ%GAd}9g z^oB{_oTfmWX43Oc3YqjcrRFB3P?~Jg3nu-`3JM2CJLz(h@+lo`(wEr6>toUwlh&Ga zu9J@TNr#xUfYMB41>$6r3Z0Z|(xsH@nREc9aV9-!(qF8gaNs5uO;{w{vogSqnhsw(#@Mtqll zH*)s*SQajmzZ2XY)GHnIdvyGkAw^q+##Cxm7(b2UgUQ0_je=c@N1tK~(;M;GuP2N? z&emxgq>63q7YC2t&d0%zN%+;F`#Ax3Z;4k{zh#0;s$Wlml#r=#dP)#)2C+9~5JDNd zE57F{pJyAk^l#X5pABPW!p0?t2EN`;4Zk}%f3kwLzmivm6`BzLkJ7smhGNl(1x|mt zzNMw<{xTBbEbJq`eX3i}zm?qgnq1$$RVT<(Njjg0f&9T$9+`gpv+biKJ(b(U!j-op zHC|yM{OludqL+lgbCZh0Aba?I4>HIC>qmxsUWuA|H0()op-nqq4u;kQ z|HKb?J-qxxFEMdB4C4X)f0{shH6VLsqJcUwH8ZVzZBW&7C$3R%`m`v`I$skV=n{(Us9 zWgq8hA6EadhRGl@D&Z{D6T+{L9;-4I@>p&0p+jy1)OVXwv_iby#n;b5mD4hI;^x`*W!c}Q2}@ghFji-n-OB^$6B@POl(t&ui-;8VHWwqY_KvT-8%738aLN)$f)GO+?7Ylwqldoah%sW%i>YZiyPQz(<1T zBI?29;ELdi-3@{(%IXE}H8w6-!AF8Cws(b?S9Wf&1LBFDj1Vg8<;W+NB#e2!&J)*@?DtSgp#z?UU5cF7L(Uu6 zat1yWwNg~tXKfYlY$-N^XYAGk*_R`VNU_9gf2MCd@eK;DzEQ~<2y77eyHorqfe}9q zDQ@1kV{OLX^=>0H!cVrMpqtrx^|LaKBxkl)$3-2n$Bbd;JtOwkknK15I&VD3%x3|w zVjyUVzb*@nS+#El)&vz<6pz2!)H68oGqe(~uO(569dE|EGOR-e^a=|!8a%oHQH7hB zqyDw#=p3F|`g3IYjaL{uihf6zH72&jYih}v&! zyybL%YuqRL;eIGJk7147A*_4C_G$nBTJN}aD3p5#%7m*}z{K{_UIp;Kz{_eDzxU@A zit6^FqZm42ZoG%Gv>vuVV=VFhnu(tpXZ(X78YYcyf>&!{QRI+%Np6l7pKqzvlz4lv zHC82z#6dS{i4D6Y={$_$K{)9x-QQIHjUAn0#|4*SIu3F16l<&-jrmWkR4q?*pZSwu zWU|rqwR;OY)V{8AWbNx_wW@vH?*GW=5oy)=%sC-3pV@Kd|#lx!e*%|RuY+XE;TvxkybL-mIm7!j0tG}uLkzDHUK!6O zI#P+Z0-9T&O+M)4IFBROiE;d41@B(wwut`>`K30oCHnC-tVgi~nc1f|1!5cYo)lA} zXv*AC=lGHWx8xx=oFePW%4f0Gn-^79S68!&GWNLmPVL9O=zUFNXAkiu$LzC2+tlcR z*5%N}5=VXp(w(R9*VP6XNH~|pi=kMOkE=>!C6CV=b!LOvP|fK<;Jh1ao|XA0-@eVU zFQc8w2W9NunViyh_ixFQ0(|e(u;j~EU_rAB_4$-Fcq$eifRX%0G_MSk`NVQ@I=^p; z@}d}nOT4kuW5;m5jjKHeHNH2{4#XPUW}U-Y3w%|bYb;JdAUsU_?c4{yL)SUdX7`i! zUx(uHBCYwn5jQ?&W1MwAuAe;>AFJOR#l7~&UYFp2Srp1z=$|0)Njs5@&Ec$tc6i*w zM#!J^^NI73PM4(M2aqVjckG0GCP1X`8eT-vVohGwcGQ|O{8WB3)|R?M%ZzL+)`J83 zVpWUQ)xsPIxb(A2xcn3QjKbpih4@^cv=Sb$aCt>vd)kT;nvacQ zkJ>^W@O~fP0wtO2TcC7r!vYtuA$fQj!QB`D5w-rlXELipQ#v+|a#w*$% zN0Ug_D_}etqf^F#);@mIy~T`ZIBOOf!4lU9j?@(>K6armT3L%@wXs^wOdg2P?vRwV zDVt6Yyhe-BV)|lLi>hW!UBX}!ijb{btVNw`@uM87Tw8hqb8)4KqR+I?YnbJG?Q@i$ z@3POxV2O8|eO9qfA0k&R=So{%#BFXm0?T#oI1Oh?VMNS5scZ)Bx3Lv#t9CHhLg!%a z!v~?*>^*tv6BxM4XKVE0!}Qfq|2m_(p>F0bJl_K4cav{{GQHZjKskTiw?Nq+9qXP* zZw^+vW*xz$5#JMuCRMMw9c#S%uf^)TRpZe3Vq;e-8irm~$}@ow!mP14t1R4imj!%J zGMsQ==FojxC(m9&nW=7&=AaXd4LUX3R~T{QJXkSN8*6?E)8*py6pVCrunNJ@sy{Er zDvdpOG)|<`Pfe}KrCHV6pc83Prp0O$XJTf7w9r1$5809|>s410XD#-XeP%aT+;G+c zU-ucP-fRss+XiwK>T>(6{j|K!so3X0%sX*v7Vf*4VX?3VIgeBQV1vUoxGQA7o-le) z#TCA>`j6A9L#Ew*?^~dKKFy@@5m3k6KPaOesW`r{4W_?Tr={m&eqaW3|#)mZ^hL^Zp@ne zAvPG|^A`K8C_K*o>B_zJFnq2N`QCBm}>4Y_2Cc4xACGydx*dEu*v;Q9=zvB+1Q62f_jV$tGbYrt-|jQsida#cL?$3y%dBVQLAB}D#Lbeq@$ z?X!p%2Q0pyvTIjIel^E4Tk|&7c%NMb{y((&3z}g2aNy``{~sbhN^^ktX!A@1i~Qd` zk@P-ozFlW6tr_{B)o-lL@8s!^i~JAvSy6a_{n@qopAW(38f|_=S6}2`g7`m1ei}B4 z1zHt-hi`%Qc_#O1@x7W^td4vi=FHagz#4C0fAIey@*gz9_TfOrH8mseJ9`McGAMCv z>}agBoWO&%tgUecVhTH}n-gBp!avt*i2)54Q39z0g|wJY)XIX(vvpT{1ED>Hb`);HcjPNUrUNY16KoV->0&TVbx zn(dp#&O%MwSuGEKmgCs08#@@>iv`ehj=?d!dxX@f)KAB+GXw%@2oMfwFKqP zOi7rEzn{r<*8e%SSawxA%t?F1ZZnd?KTJ!IlcpGkLm4~rChn*j!$)~yg@VKl_6=ce zYb}JATu~junK-Oj2wPz)7tYv$oba{3v11x`&by6>oV(mG3S&TR^Zq|-Tj$&}Yg>;3 zo`8c`EL}#RGj@zwOyY%BJ)U(CCSw|0qi&w|z$FNvx~|$nCHso&$)nME*iku)ia#1H zIZFZWT=U!~b|1#wnb3K#m78~x^5W-Jcehkl6Rw5qd#!mbI=4H#qc|2vZH2VdUo^*@ zc@**j-2WBB*cHK89h4;TW^m1=XnwjiOrvZJdmp>^Bru%^r^)A4Cju(Ke4%Mk!%@3whY{Ez!Wv%S$1q3c@vi; zB~Sg1XFhI@(=mtp;H?Fm9jaAms{VF2F1AC$E9egh0a{y$_nQv7Sgr@$Wnm|(raq0a z311|pnLUr&THFc4ES_)JP){wbIH890qfUGHRw9gCdK_Ro9L(Nk722{AZ^-d@ROa7x zDmvNNk0GVROJ#O!XvnyoALDb-GqJT>b0XI4)3DF%hxIm}kB=mZ7W+{s>z$et`fx@R z1$xI7W;l--dRZD4BOe|{b8uOWGM|P_pvtC13u{bnl7aG3j4dCe(FA!vT;f!5uULSkM?e%SsZQVe@PSa=@zH~R~@^(qX zh6TM-Fr!w_9n<+;-h;H;`0W|7&`NIjyW23Jsdq~*>gg8k#C#I3yXT^h-J)->NXNic zjjD&#)X>Bop)sDaw^@2zj2N+_?&KYOuyqj6m*30Xzs`5ahDxX9_rc$N z+H3p`vfbEC!}iWlBz+`qXP`|Ujzvl)(&^xTtr^~aE}l(&9rl37CBt61pE`qpG@f_Er$%e-a$lQjBNtk;4_W_2m0??S&o&2?6I67N)_ z{D;Q&h4TO7%6|@;m#;*>wPjJ_?{eLE1F~apXN(uR6q~qtYka$f+)ghPh?Sb+In9j_AuA;-(fBJ(pZRfwRt%>DXeYX zs-tROw*YOgHt#-!WWVcN$d2%pDdj)PG@6lfW%4k4_Lpfh?uzbrAE=@}m|xenF84q7 zx9yhNJr9FE(&XUH7X~-7VE_jq4g*R0GkZ6?MJQ5(eoS%)y&fXD zL*Lxw4p{}s9R?L9cNkif+~JmJa)&Xc$sH#8zigzpvGYcKz7maOCNJS#Sbjqa9rta1 z*7q@jfBqJ{DVh@OfMb6dnk2rN;?>8uJL;pCV&Yd-lAntIr(*Mx{A2Mcjfq4u&{Ff; z1A}X6V{Jp`kUB1YTtxJ`n zUwak3C=YU9@`z2y(b>mfE|axppOG0?)AiJ2jZ)WOfW?7njc-vH16QmMYseUZp5S(P zEe8^v(j*??Z(ttsR>kJ&`(y$RGK2SP@-hQvNp1>PK&*qE@C?$nc!`&cx@34``E4J~ z^B#3e?nwBy&$Nsmb0_|oG`cNv&jPK9e>tZq;>UfRj`Jm)(^VYh%7+Z7$cLk$bN#`# z9xgJVHEmFt!s?P>0MxM@(X8ab9BEZz6ED8EW;mWPwfA)V5>7(wQf}V{ z)jRlhr-r)y&FRJlYfpdrwY9JN=#=VxTczB`Ikp=Ib9rT_ws;<0`?_hr`Nrh`h-blB zHTQlX=5?{X&%aT7P;rrnnglbZlTfq@eo5{22H3i?qCOovPur(2?9(Bw;h%9L_l-Hc z+GB}V|99sv9Dm7izdlaCfBQV$}|oY<63=e%Xa9w3B7t&Yo>|S zC0@u{>Smi_Hu?6&9^>OGfnmXFllKAC_0jdCrnS1=&)FD2CMQvPHn zAj(W;^ghW%l)<9+1GGix~ewf zT-g5Bn^>#pEjWf*pZ%scFRJdheimc?={js}gPp#Bz2Ca8-owegs*6&4KF+cMdf%ay zIZg4M9HVK}@*WU;7lro}tz1{C`(a?!B78rDhsEj(B&BY%6x2Un;M6g;S#$t4bJrs` z4tJH;w+M?iyDq!@98G8wrXrZ&_QM2h3F7KD)p)q_N+!ndE^vI&qEyc5BYkl{R$s8i zH|f{#JG7A&f9~KpplWY|9S5i(o0!itA8b(JK^z#|tT5p1LM@|``FaqmzglZ4{@tfo z{N0F&#P7y|rIz|1{U3dI3GZ#iJnR=0wBI&V89#2J+bRM_`Fior4%JN~KEUp;m2-9& zzt50@qwMHzs4)%N2VyQ^T@2f3uHw0xI-i}7r>(r|Zv7jpG8Rq@;)B*;x$Ek#vqL^SLw zQKc=C_wr@FNZ0=DaW<5^R!o53mbZ&{=i!QoX4VnpBEmwc2D)inP5kom0?b8yeO*OB zK0|g8$FSca_T71R?K#$xVq<-Ee~+Nm$67{gD>o8vz&!=uUgB;Fa*SMa`c(ZrJ->@p zDBo*8@Aaje#sjeb;A`LP-pq7DC~O3C^+^5m&ya-K5ZbO;?Frtd)TMq-@xdQFdf}D9o9EGcXcD$ zdFKAp;{E1?v{1G&m3FqsZio2@!mlVK{5q^71|0MD+fHr|aCh-`GDN6$GHZYBqcq&o*$T>vQUDiy^k+XMSmGeJF&h~QSBWEj2fc6SH6@-2bL8#U~*UYUx?=v;X8y3Y8l3!MKy= zCT)6qrGDzO2K5m2H&J_SB^H->#fTzWcN(f-4X)FprIjtml{F1^KVU+{qbqW6B7|S*QueMHc~e=St(z z1Qa8dKsxUX;T+L_UYI=fXk@ay1^)LgFRgip^FS+q6L)$ucj_+P&~{jllZd10~&tfU#u*Imhe4~Yn68ei_aUy+gmLMCEgfqY74Y}h;M=RdyL#~ zyt6Uf;}xeYLORm$uI4b2w<<0E&c@hi8@mIWIDTVkx4Qd1?r}|>(Z_VFGpa$ix}zIc zoT!^W6&(;=v}7KVO1%5AbN=+SdI_I@fp?FxA-Qq+f7cqH#z^UJO}Mkc=7-&c@#83) zJ7gcrQ2Q}too8St{u;Ijr=y~mpug|;9fhc?;)4<*oIJG+Te-dnX_fgc zF8g7g!Et*@Fsy#8FCW?S3Gv0Mu+BwFT|b_WtMZ5uQ2qEA6c2XI8{az+HJsxdjh_XN zY>%Jp_G-q9;GJIj8G(*;{{erzUc6l7PB$jvA~Zg0zdg_-e0jl+@Ovsu7f@HFE-$D6 z_31+`FF@txVO^MHt>Pc7DAb>2s=8gBV3|=yC-IEM4Kkzbvod+6j{Sk# zI>*}8P(J3y3VT92oa(%t4J|gvD2#iQcVw)@I~vn@+f?!#y!UyXJ8xBb^?39@|HRw%_X0*h6NJQR^Ef0~J#%S~ zc8}uhb1iIj#*g?-vz^8M!8t{UvyIKX#eJFE!Kv)#dMh`U`v(@h=v>$5tKR~uQ8YfY zO+iYP?wo-?15Z3?ldMmsLi02i(obo0wNIhX!ihY?>t^9s;HFQX&{kKYn|WI^Z&hkM zWWKvzx-ArXoi~YU9@itf-xQx5%;4F6*lvtPbOXti`YoP~MDj5y#Gl!`vlNRruYN~n z$Uxj{v-E_SS7!+?|^$cxyr zD=raTjs3=_Yd~G2uJZqW&y+j2yb#yl|MOAu?%bI(XHK6p<-+#qZ1XPAUGH#iM7VJ^ zR}%e=2>HTe`ZBaw2V>~O`hj=t4$1!Rjs4k^o^ePNx#1)ZPC@bYuR6CKWr6-Vk4Pd40u2esM0>2wYRX4%W zR&#X~2Vi2B0AS1DAyv%u1{s)A`?T^oK@m@Z#%XU49Ug0=I0&j{@=m8>96$jVZ~;@K z{q&AVy&j-5JZG<`Y={u983v{Ln4<4~E!^mnlIJa0#P49z9E*56f*;=^%Kma=5u?(F zc@@UyO2>50rF)jB1Q5u~ti7DZH)a$|EaVbJ9Tm=r!iQAJoKo15WqJFS_NEoB=BX74M zO8vM~Js6}ZD*`<2>Vf;)iBYjtxIU-kMAcK;JBq;>;EU3W5Wn^*O0@f3#+e3hp0Rh7 zNR`7mcND!$)MuWt-b3?;*?WziksPhVZ#AP$$7b8{fJD%7^@Y!mw7UJXb7prQ^5o!Zo+Pf1$MOhQK zyCRb|T!seTabF_-@LI6s`ppo0bX->5$+kM%ZKcyNY}L(eWu?vZtu(GNw7RYrjXyX> zk3`n|VBN}IX{Y&oaCdoyyx8kzfk6H`pCBuEK@f&0#(%?J@$GyQ%z&0 zo!qharIfN^WbB}hf9}O5P&=G#2Ws92bP3^K`~_!_g`seYNWRTV{D>awDIG^h332tX z#Gk?o6!x%%^|K^U2g=*lR}{03{SErKoArG$1&Jmf{!-NJV2?!L^Rz-Vz=*lUqR48@V75t>rIj$mJM(4j%CO zDvy+g8elyfc`Fkv#{0iy&8);aK}s&f68qbK7s1Wbw*|`E)R>-v1b{ACM}sC)xXl?M zj+eJ+^txG<^N6kf_Z6Bs%Nop@- z)}uikQCzw2SoAOLors7?Q$AoVa_0**5uYiu1Iy=4CsseVlHt-47V-H==Kct*th#{D z`}xo9_ zDSLLJfVqU!oG}WWN%E&}=MB(N0Dp+dIek7LHAh}nyr_Ek`;t7(yWNFX3dK}$Pt;#X zC@OCF6O)>$8PIe5NllSIbNGJwdAj*A1Mx}`5Hm0~mva)Td1na|T-cQapufyNX)_fhj<$3Y4694sID=?+gb#XcoYor_?~(zL&oD;^(a)rV?}{f z7UtmbVob7OERNIQv&;C zmG-8B`f+ktxqI_+c$)AN_l0ndo|+?%nn|g?H?LQ6xC`7M$ROM>=V&5wcn8iq1qI2G zm4-tF!b0|zN@+lRZ{_eMDndXG6}M%uHzOMue5nLQUZxamRg(5DmEdv>x>3Cu-BLtN zduKA|;49!|6kDS+X|ISg96eNs&d_Fb&F)g!K<pJ{|<=>r|(g4`$`UTjo=v2?+s9b!-ZLutu=<~WSJ|8YkQcI$=s7YJ#;G!zsb`Y zQ0E_KJdUq!7drKf&YkL%8O7@iyKmqb?R!hIv4VhFFC_w|!FCQ1VFzgXIg$_ti0P~W zOfzMM*zY=JhT^lKRHmL8&@x(X<=h`6Z{cp;C{6O|H?F3W(GyfPOP_-HV-~NJEf0Gc zy-xNjoZ|Fp<*iHcJ~2zDP;=-hllmB`FmXA%*G+o#G-(o3e}b@w`72Yhd|N&1|ByB+ z=e6H9H#`l~f}UFC&3K4L#;-eZ-*9im?9Eh2%)`%l1oIOea*|gMspqTJb2ZlS@IT6x z$!5L_5?ZK=(%#7R5Wr+&KAO#kZx>&_`x`8CM>vi%F9Cw{bX=2q+>H0n*no!H(CCPd z62GRFg5Ia!0#@(PMhU2Djlc*@woPLmpTTduN_*{Ls)5*0^X|QU3?XaydS~v@mn@(> zUJEFaI1v&qOXH>9j?fASHSoeE1Go<+w?wZ(5f;r83sHF?p{|B7;hDj;JP%gSb3rux zk8(w{6y#774MjNL_)j9k^;`U}iPo^zjI>s_+Lx0f0h0E9X)8q2IDR9}OpW_&n1+UD zU@Y<$CCae{V8YRD^2#3uuGkik+wi$Fke8ru=n+!HX@a5WN+yk@lv_fo<$hVSUW?NO zm8vS_1HP%@4hP(%w@pJA+&`FFi3hju-i(_yyOtp8$(&%GE~edyptj~6(?bNcjxZS( z*6zea>(6tLG_oV0z~A6N7N($6oTWN*W>GTFvf@`y&b%X>!TpG&unNcX8JRNxXR=7~ z^P80aPaq+0KqIR4~f^8cSg2 zp|Y8uLVC?CmNR_zAi1xEx-Z_V$iaruTe0|SGl^0skX=A-F*xsN?n&S%`#%ALssgx$ z=yE<5dFBm+{d5VY_Hn^>;JA8SFlbGa~{ z+!fy4Db5+(#Rd=A)*!RAXRDrlQy$oW|K38{2*HAiL+C8BNUs)Hbk0m6G3SxJn?JC254dD1b6xV$U4q+7On zg4MhL5A|H|B$yUIxBojcy|WSWD4F;4m_Cx}3}*enx9mruPLn9F$dwHHUdF02wcTQ- zx4=rSk}LF*_LkC=CdrCM1x8qLs!C5d5>o6+NK`Vau&e*lH(KKh8MLc_`|!(}nVM(o z#3?|Vl@o@b3Hq0ODAmAX`VhN{xP~ciwKWKX=f*7fWI)|ku%&ci-%poU(HA3x`l3{) z)&e1kl}Z^vU)qm!qDI$%kl_B=zY{1oYoYhF*ZO@tR`ii}lJrDRGW%UDyHe0;S)!($ z0-N^A(O5}6@RXfB0%V!^93U~eM#Np5@KggpkG)7brZGcX$xYxds3e$X-Xt0_dX$dL z=Lw05(fHZ9s22?B+M<;|<9Pc&Rau83JHmGaeQ5{8^EC*lm+a7o8aU*ULv)})acF@x zulyDesG*!>1VtLDyJgdAa!U6CxFCfM2;tg~2#aJZztDAZE$K=TBAuVx!r5zIV`m=k@m{5fl*d!^J#Blq_&@6+XAUQiN) zZ5z(~s4A}1zV%kD)NU<6LQ?w*=aov=gsr!ttujX#V_0hY%8dK>OYLrIZG%*UMEsvf z?U4w^S|qiDV1j(9eV+0ek=pSzQ%W0MsXZSA^B$1Nz$&j7XSzyi2Oa>aJ)O43m0I8b zo816Vu7^1J@%OjiE|1au?JbGF#z0q;%Z0k9yM7EV#b%_T0 zq|+P$qFsfABGijnQ)h0BAF$pdHcr+{Rc<^ZQ-i>vn$sBx#)#$F2!wsX3yM`84hDKB0_8K!dvySV}>&Up>G zHxz2Ir@ZAA$5XC;mT-Mf`MnC>!=5t5aYjAm7jy$Lo^tuCEN_9QoP!)m*i(MY+=n0E z^^jHm3lnh<`!r~FRP|38iI z^69u*65s8(%Jq~Nm5UgS`?)Qn@EDbPjeLLrBSS6e86kv9QOOIMWBE2syBRCvEL6x)Z-8sL(FDo0j@% z5!WKgSGNENCT;ZhSQ@8Bp%Wym$600O>6C{sheVsoeT_ z#woPWC~UyQuU-U54yDI~Xk4({zGB)nj?7lKEV zXrn}BZF7&`Cg+)88=G-3J8D3+i={f*4w=h0Ha%3c39(_Lo^k(zw7izeP%YHJeo7af zgoitSr9@h}I^tafGE^~JZSMOwbiwp_=-cC3XA9mGCG2mhU$z!TmOuJX&KFb9*W7Y$zPgUB9!=kTp&>L;nRLL z5GEnWe=#YhG@nbn?g`Z|4=N9B1`7a(KlinuNbE_-a4FX@Nr5-sSb>vIL500I(s9Id`gf8Nh z8mefND#9bamnB+2k5$FTpJdJ;nWjtm#;ms?Bnp|mTyGhtnZ8tV6jMRJxu9Pq!qZt1 z4aTAhy4Z(PIGOpN7S6N59V#D*hGx(Hb1$=y0g6KF5Uk!QpNvXEi0MyDjcUfhs8z#+ z?w~b^1vn6sCHPvOr^coxpWlW0s4Y?KpOo-vpamracT@emja$- z))6x?`yzB2#^8?el8Tk;#Okg0`l8rq44Pv9h@_$L8v6x_opZ9M?CS~WU_LsS=Zce> zNX-LJOkeY4=NYXMDogLz|15(8qy3csWMDTRt*Uh$?yp*tSOK9)%wzXEKgTv{)T)z( z0e(pb5an{2pID(i=Sht3Q=?ZvlJJ)p$h_yw$7QtjXzXPUUz4b&*wD1s=+53GFC;}B zXt$5k4niB4HIMztTF-3LNiMlzSDt^j!l`>u%4qz8u(q|Z$hUqVD-g@N4%dfP#m`U7 z7MB;32WkRT5Cr7nIMd!N+7`~ff_b2W5z&Y96DgV2k!4_q_`@e*Ufn#SS5kIhW(-uYkio?NmZdz6)UxU*Yr47k7lAw^T_8GSnD4|m0@k8 zCUyxeMK){wD^g4`*7{Kz|0t--98j^=dLYpVTI+kzgZH0B9NjAKdOT=reH-SY-ZNZ* zcCED=ZRj<;uMh$IQihR$?S(zzyKKBK5~j-?a$P6M!bZN2S#A&t4y6y~RZ8WFMR5b5Bm`4eBDVT`sO z3xJxAF;wn?AjYS17F`w$#Bl}B#C$0RO{BLB9+S!%Sx?#^tjh1QM0+O1WcA~2C zFv1}89UmiY4K!l3u2DI=aJY5411eVQYPjGL)VfFhix_FTjAfPAPRwN;&CG6?+FD{4_UYlek$m~TDCf$0xP!0`;gD22_P{TFj^iY2dnFD4DxN@i!^y?>PC zQB33;+N$@`_}xc?PzwaAY6t+Yw9`iM5;lBYS87c!xxZ zHpetm9;Mc?7c`Xq8O~If|3N%04nNR2!MDcdbb~1O0=PN z9iTYv4N=F|>LfH%=A>3vf0^b_1tvARXg-S=r`bIq63lI+0%~{m14S}Pi%`dJv4_H{ zmtYZ`8Du{o4%k`as&10Dw3d2uJqHkDsq6p27rv$L^Q1h7$vJu zeH-oRl~p0&HoZt&3gb1V=VPt093ZPr4h#>rRe}Bw(ly=swA+P>)XC#q57`v zo4B5p$Rbn{$(W*QfC4RuIF8%Q=CQV~im^;K+xPz0v9|B_d1wWg%Ba=Nh4?h2twj%D%pU*G&s?C1ULt}1WYwQj5vHo!t&Wm*u}5g4Jy`C zsVguKz`*jukWcg>`)0mdBt4aH_OTgyNW?Cn~_@Vux4 zLx~f;3f@aB2C3zohrXMC@Wgo(FT1s?nQ_0>$_ZIzC6s!pL_FRv+qcM3Lwy zNDQcOV10S0tT9HD0j(^^*ud!6*N6q<0+1B<|I6bRZv!OlNq7DUE2a6C6)2f?1nB9m zHw;zqVF1Kb7*pEoqP#rG1jWI9=0Kh-#u|*i+&kCn1CwQGq~uAE!S}n6X22@H$sY%0 zP%uu%XtF=|>SpCb$%atm;CqRuh{FyL2X8jg&`Fsz=sEeC!Y(RcOc!?3X2OG+C~$uF zw7+@E8RL2ixpE-HAQLo7dtYy|&qaJb`VRe^>e_2gYmlb6SgNNB$5>G5!U^WLURNue zl?Bw0|$-j-uXFv9V78~g2fbKGNK*(`;=?W>KWkE-YP5->1Scg^X55SIK@zZP2p4} z0IAC9C_{Bn6#}!~8_k#x&_X9yb9qSAo@L5W`^AYufmMX}*JFl#p<$miAUfs9X{p0H zf}=WG`Zq*@PGPkmy1{|UmTUcI8EA)6BETiAlntk~31-FI*xy2r|&K)CMNFoeAF^7c773d+a$_>PNfTdY&*&dQJFbTT!g zw+3_$1HaT9c}&SCeXzE@{%fDI(rQ{qQ9vl zr%$H{YzgFE;5q}7;;B?2OH*_B(w-Qc!JJ>9Z6Ckw7-rc6FN^(`X9(aKss#26X(j4_ ze4l=f0P%@+4CEDd$v?w6@Fk5E#<;VrxY(Q1_LNtO5O~`~JsN7g3w3BH!?9h1mo`?* zzzz{v%^(@OcXyaaC7R2F90=%S0TLQzO`Uuusl-5tQ;UBi(jiXuyI*nYXy$aYajKK% z)Jv;TDL~LEK0!N>+SxhvKElahTOqOv*Pmg^JOEhnMab|hvtB97sas_ZmW5MuU=0JD zx}AKLMU}KSg{yo1)p@etOp7dtc%js^a=kxj+VGy-qM4|)x9|PHNJ%4#HisjB+pvI@ zA}abiHswL{V^p+U)yGa#(Zo%#b7f5(qAL10C>Df}SN>V9QPF`0H4}-91!@g@IkC=o6j%GyS}<8IirYu;()DmEgz!Vl$S4mz1Oh{67FqZs(OGeqQZ)2KmSMAqP0?9 zr277Rj}oW*$ZgFtqq$vwl$YW$?|VkGB{hefUVaCA0zp4t?9*4BM0ccQ7JCe!Gerg> zQc^*qkJGGDg8d<)P+6qpGK@7KC6`jBw3Liw?%Wr*Q{g^e7bB zzYvBp2RC7ap=`o+4~jTPMxoq(XACY2{uRY#fraut+#VQiO!Tc{ZbD|Ve6Et~uJl#e z%p|O&2yx{KRLUl&pJI7HM9rv0(4oD6gmSQLACVl9@tNO+Ul#i>Zx+DTOkP9{%jVE7 ztqv)`CDXlnAxc^vqNQO`^rpyWoDboOoz(M)|HvI zm6=wU)T_MXM~JzQGwmQZ1T-9mw&u;=YUU1GkJO?zXd5v@<~qU8jx`68*L(n_Xxc{k zoi2uAcyTsfG;G)kKou1wTBUzdzO3X5l;?}wrM+AbJE)%~M7f^;G`+tfb)@S{WY$shlW~C=BiQwvyt2~uka{%Ib?*CMW@khV1~0Ujj%knKd|vrJFv`x$ zbL6EH@W;lImx!?mG2!Fel!`aPx)=l_OfXUZMN3dAKuCuP%Sp)WD*jjc`aoNU2@~OX zf-fB=oF{-S6NWsFy4j56P>4xD#f!Mkp_#B3MB0oIBt1}GStjhR9t{&ddJjxUIds;J zGyMp=4>`_gJYZDnLI)-w-thICVeysyTIyOnYnqS0=A4r%KeL z;Z$GHwBggBp&YRuWo!L1v4@;e$JoQYAIxSCZxMs1?00u%HMKqbxEA(sk+O&XsuX*; z_iWh1yKa?1RMH_Hh{=`m53L%Gk*d}nej;`yq>8-=2kp*d+AbS|5hzNf`bDS|V3Nb& zL&>D<3~oz95NPW#c>P!5mkxtBWC+-*;>)57S-kD?mqH9)Msd&#zKbD~?`P}VRy(#KAIRn(IwWN`7D*FZxwgAAgDIqtdTrX?2}bX8P#IK2niBM)*}#A3bCGIKZ!D`{?GBnLgIN zgFbpi`uOHyO9s`)YQ!0aE{0Y$ekI$-3r?BoV+#A=B>_QNCH<4dublJX2Hvz+JPkc_SFS8laPJZRNBOMli6MrrpNnW$)rMRhjLR%;kJnW^{t7vy zZP@rFzp1-OS4^L_B4_F)__zBF7vQAdN2rv8Pi!EWoLIqY4fRQ&d*L3{(Vmak^{?<1 zxWysW^|zZ_=L}1JfwC&L^vZ7p4!TVp9ZHhx85oA3vcq%`xpUrfY#2tND!cqkX;DqE z|AB957$Cthw}r^)kA;iVd^iHmzf#Iw?RRko)^P+Lx;~k|6*g;<3Y!neYsi33<OgvR#`BBWOMUqO^d(496U#>*>;; zyqSEDyumQIbsPJ%RRcMH;E(XuSwv78^pggfkP4qiWOe{@0H3Ro=P@I^MEwR#%>UGH zAe8n-tKWkEPH#(n5%mEm&s?ttn)c4(o(x3{w7HRlm2m-0t}F>M@4q0e!3b=uNcF80 zG_hU^IkwDJIJRtkFHjb!^sZAN%vtRe2#20`3RaLzmn(@EGTp(KlbsG`IJE_f5~o0#9E#?d z!<{B0of^_)kW(N{a@YiywOA9;9W^u+x^`iau)FNoTJ}0a0e&Ko~UV@KhujFg}`vI&CoaY!!ub~*k`?vHWvU}E5cE-q( zFE&3Yi9857R3I?lN1#a|&{$NV)|Ro}U4Z4nEx`Fw%vSEaH|;^m!0$CY|n~e!w5%CY2@KXJkesYkvG{ z<%rG5O;u72sxe(C157H(UVLg~~uw`>ds3E^!SQ|(R(=&wD^0xe8?_s%7K zppmk4FTS7gM*>3OjJAyn=RNf1I@BXO7%G8FD@qb9mchf$1?>J1g5T;1IgL!K9oIt;mI(MO!hFeXW-h^Zl~a#>jMv#Xb&XhB7qG*rX#&`C z>P#qDR!)uhD8#7=T9W9d)%c+FCvJ= z?-_V%YP3bxJp%_}1{Mb2{GNf~G2olsGw}5_9BjyV&4g7C_uK5Aft&sk17(i|Q7Fys z8JKWY43vA`jY3&~AWH2#2<;g-Q_M}sOcqa7a{cim03Q^3e-j*{R`Gh{II#?vI!8_y zAm9{HGinjsGcf&wa9c+KPI)K%ve;LEQw6X!lSfd)vN^Q<;28r7(7#e+1*6uLVW{uW zc9&PyOy;RaVpW_<_8#6ns-X8VcTmWu4!- zXW&W1Bk_9%HkP1g^Px*^(3U+OyJvvsG0e(wiMwauK)h(_o&kQ(GN|PyL~-{F`~t6x zT6&G_zPJ261B*)`6{!KOuEZRsl&Y)OaoVGWStASW8CVWwj!0K0!U5$6F`=1Fy3C$| zrA#OVBxH)OMtRH+WS1^=KVKFH^*pm`0l``|@E-9h71}c}ql#?YQq=!f zTD*uczFcvtGx{>4$Qt#h=Au%7+774cRfaMfr=F#<2HHBDDt{yV(&5xh0c<%nX(sAs z|#%Eml*&p-z5 z*SCkM*Ze&Lk53VMxDkK6^DdV`RMM{fgrLOO!_c0AXT+|ARPl$)6oU`G&mVk*!Di3E zQ=Fp)nB*{cGs06LWqiEC`>KO&9R@$NHvH0I@S_6QGI*xwLKbg(4UBSt!AXjPX7G6o znS5`1sl2ibK1)3s1{W+s5;?eM;J}3d-oiZt+X0$M>Q|F79-<9hVCM*5-{hB>i9(%I zW^mzt_Lw~bPy1E5l7w?NF@4PPE7_rzIAx}fKI|j9XJDvbRrS%+=|eFo?d|1PvSZ74 z%FNjQ3)aQ#8TcH<0a`7E*oSFV?N_qnc*-d=j z4z6McvJj<1!keCgnyb~G3V3nv(P!`+*fY@VYDQ1Jno-#6|Ibp!JFh#b+ckJ_mNK4% zg$XVZw5NBg$ldh3S<0CDoLwSFA!Jia3s*$iZTGme`^2{EBfn%xVzg~{RHWT|E2Q0n z(hHX|s{D4l*>-Ir?KU!vQjxo#?XSSc^YbMJ&({~A7VNX^l@MH#;@Wdhu>Dp0?Vd(E zEM**C1GmR(U$K;N;f3P%Ucevk(u=}N8GnH&=%tL=pqpOcxY94Z)-Rplm!9XBp5T|B z?3W(qm-a`gUCIcqEfn^&>k4WmLsl(tUTCU+sxW-w@S1Z-5YU;YO~a=apPqu38c3^$ z1qgdICq=O?TUv|v_>21&E=S$|iQ2q7aXm~)Crp*@m<$>ndAfj*Dra8iRLLvWyr?5J!VLdjH?KmW~lOd`0 zlFr*e9OMmeS{MmB@dF_E>3;yEEthko%=rMwWj(60AFoJ0*nvUH70A*Z zZ%|qJ2`=+gh{)GK)ZtNcHoky|ZQb1{(aX?3P}IZvQ}99w-dAbYedFN(so>2~m04i5 z+cK=c0s5K?gc1>)s-f@-CnFyqRB1iV1b`mkEI9fm=Ie^1c!l%S^2ke^Ksj8RimPwl z_O6CfLB7pRpkaAE0TG}bxi95PK8C|S7-3dRXy%N&d(KwX{X23S!NoaqH-f1vd%D#5 z`ttD(U|9wnd43k1qVT4Q^@^d0Llc(k^|@_ndKzRsyoxUBmk&xb>9xdvzzm|2krjWW zpxX|ZIA?qJ1csIE-SwwMJC;EP7C4KZc56|%fKzEB_y`TtcXbG!qEBmKcC+>O7QAt` zcMrmV<6U&?LI9Vq$GWw)gv<5Q-P(#|(ec&6h5J2k3Fg>a{o4AXgl6RAUcj~jD9j%F z8cIm6mfn++4ebX`5kPV4&Nun5ln8N0irPbR5nh?WntL9wV6FTF^|T-O3+;nJ<=LI& z@M_l}ara@_wA!4KF}MYUT8vt0?|7yyxMIN232F;D_{eJw_aGBRengD*aQUUKe+0x8 z&$XH7O-Wbr!YFwF5`{fyv<^b8rA!LaScI;7KKBunx9k!oAh@WM9r8Pp_>{W99d#|? z25o3QU^)gsX*+Zk0w@=y7*E%YW?C$n>;Ou8>ltZ?UlJ=e=@LHj!YX<%6qyh5oyB_2 zSLi^0MRyhb^$AlRt-*=3cc&N|@n;}#+I#6P^RWal2=^1;W4ae^T)mi&^oimChbKWx zaf?%wOS&`Cu2(rf+`IQCs)NZsa*#$rjjIpn$lu(L#DKYEZZm}3JLi2bK6oL z1!51k)Q70f;4qs zIjZA)d?m{IuaZ+}>Z_N)Q{H@q95-)HJ_^<6o5If@CptN7x%1tGM&Y093xDndlI0Fx zk$!|}{|MVBU$LLiP}k&wePTjqQh=x{h&dW?6*__W>*dgm zG@%(abnEMMnHi6bt|Tn}w-vfLgv}fslDEh1#e8;O2RuLrNPa5j6J|uTJGgb|u)Vnx z4tubdfQYa-W`iYkv;aC94Re)MxOkNsvu~-NpzRJ>>hl3pS!!iD3jwhPS`T4@I3$M$ zu0&Kmt2(FjIG~7HxKEw_r+=cVeWG>gFbEAOA5iF7 z_?`wKri%Cl68z@1bfpW?!K1Xue8rnsJ*oV6%$0|wE?GaU;=F1c1z3T@**V_03xO_7%Z&F+wjGxI zYFP3=j1`8=M5%6eBur-|K;i-~^ZAg2GChYTUzOtoA9;5ZY%*i2cSLoaTIsVKsg)DK zDDB_|jURNT8V5Z?kt-wa2-JxR5scncJ~4dqcnn=suT?a9G*t5aR|r8TGvLckzeCeXH+n*6FY#r7cD)6v*6G`dLrs)<02yd1nLdkrP$PmG*nPx1l7{7VjY~ z&IvIn*y+$tjJW6wf2`f8VQz#n=K|EBLFVMSp4=(ZB^+K|(aNSa01>^%it;8m^L0f# z9SVsHx*aN%$(3!JN)e+|Jhd5DIy?M$bT54aE9jp*@(OvlIEFX~kT~1(dSfC4JUG=8 z={3^V6XQWO9g}K`pFN-A_A11y){57!92Gi(cPqJ^G`Z*l{Qa8sfyn;aNHH7?SBhPK;)F7{`fDEAb+R6RR0dw3rkBoJR02C(2>N z!(ML+U~XxC5~l9~$F-}|#clzF!-k)TDAKu}Am zwLJ_1Y-4*6!)qv%$V_?kn@KvR3H(n1eOckumFQGos(NO$?#pgnkNxVpheYfC)vdeK zsk;^HmR-`+s^KUuwg<;I`8>XCYK`!%tf@^&JN(3o8U6pA5ARZ#wgh{fhd{VXEWuB%5lUo!l; zkYE|ot1xW8VtGbz8u*~hs&k~cPO8yyD6!jhvJ}H9(yd8~VLn~VI-n(UaJpdUQ_PVs zRX@q_H1y5i9g!BJ7tvp;9y0-tabGgbga43D-EaPG5T>kFc#dgjfNU>7av)UBmki%R zETgdo$LK(TE%}n+a7?xRZ~m?rt%rxXiSSG2Ym-q&BLyL_U#h+w1oNIA#TKi)zK`-R zY)#rb0mj)o?>0P}gPqZoAww0AdU)d!7$QTz*_T^Ml{{BiHdBc-;cwvB`&OfzT|iwyHE8^ z4R4J<17;Lsfuz?6AL0Nkfh$Qwe`)W`Yvh&S`J5JXl`G3DPJ{9 z#a^f$pkk4311j>qM6a$AFn%8u=RlqvDi+aZW|7CcDFSiwxZRCJJxs|b%TTIK=~)X| z!;U*&(v-~OKp0FOrvl!Jgs~EO4RSY{whN~Fw9T9#wEYx<>dkay{tTq_oj!I=2{@6q zS_!*A+tHLzeGPF?LcifOLa&rdn|pN|(DrO#?_Ec4s3tdABotF$rE7nOVKD0>f%uaq zuaO1;6F76yin7~Hf*zfYT!pD09R(o?V+7C&lJd3_TyKla$Gl<5L?rJi2_)RN1kq3l zj(O~9P=Y;iLMR^dSW!%5trJzlk{^qJV14cw))BV~hmrP%Q@WCj0T!~Nwt-iP#!|Ut zWv`CuQY8KPfGL~(Y6R>TgbjBq%jf6VT5ROiOmoqrO9o7e&d1sT;8zC|Bl;6@uhAC- zK^zVc-i?$>_=@)P__vPe?*0tQ`%@&#R!{J=DgI1DuaAo*10^xWtY4*gP9&4$5=tC* z1$zOx5T!_wN%Aid5+Ur+6`z>Hiil2vU;BCpaR=0FADw{`bWgS^+-fZX%iwbmRog7U zcmR_uK*(a@`%b;l=_|cJOHP*4HGi?N;Qj zoz2}Kq?5;rFVj=Y+^e2~qt=EFpBa*DGb>bn2O5ohHr*gvh~9}*6sSPKoS+Q00e03B zq#@a6jZ3E+!Pt7&z^avX;0r8qas3D_i{r-%6>)Dpy7d!Kx!(714f0)Acw(PipPo<2KMpfAik48CDbz;ys&@)>vkwpkjE zGKkzIlVeyH^|cLCaZ{I2-;JXhz)pYjx5vUZ+ zXT#5?zQm+bu7K(;a9f;eq?j;ajW(SBerk4${TBE5Dj@3;|#nHA9&;FvWV zZk@hZDOeldH0=#XBL>JXYCTnqP_r6+3k`lmquX8VltwW0xN18vWChutulFcSbb28dD26yEu*Rdml*>*67Wg z{5u-G8CXU%`kMY(p?<{7xq;f8MqkMZgsvFY=na%^rO}Gl#&W}_IuW3qQPE5Bvn#T` zBKle!)>p)Ek<3ZiG^}8q03>Q?yI#Boolg;eeou6F2VfbrJI|;Yu}vp1TP>i!7p!daZaQN_c_s=ak+?d1;lKpXwh2CutE;x``~z{`jZF4M;=KJKz0R(Oi^sXnV&EZh z&f+7G(zpWr7FWPs4+{n7Eyj8MFA}mv%-L+3qu(FrEqMsdsBhso?_+qns3*v7yJnwG zyaMLz8Ke|2ku;tt-+Bv-b`#8tV=4bHe8>t~vak8x^W}7@xc{IJseuJ>DAT+Tk$@TB!@e)jMwppB63=RzhaV{8x3+nk zbNFm+b8m66VcR_E7F24XZ63#jn6bi~LwK~Ir%rH6|A1{CIFaZg64SPMAVsnzwt2=N zR&*owvfeSac_4CFe#Bm&!tkhVJ`EF);2iusNybDX_TC7%v)JZy3Q3Z1#J=nvmilw> zX%7(ZkZrC36SF|w#)aCPZC*`;vqbDYug1F;+vW|(S(3EA%-6)kP3th*`vbOlG+jx= zHlH;wV4LSb_WVHp!2^DeZSJCy7h9TxkH1jJ7BLe)TqPv?y>sw^q_l`-C`wPj{YGuG z4*d~}hjxJIc~o<`Jf(=sU`AxclnH-C%+Spzq)4l3KG@jE(%Wq*Dn-+Mxfrjp`#x6T z%m8ws&I}JWtHpSp#~C(OB$7U!Bc&*e=h3z_7SHil8nH93sl_Ppet$!wFZf#o9Qj22 zuEdK#`tt{fk2dMq9q7FM9B*l-ah&JUUQYss2spFmACDam930zsEHX~CeOPB+i2GIL zw4JgX3(3ISWTG0f9Jllmo!~Y)sWk9X1v9fm8yzVfW9PV)I*>h8wcAa#V|r7y-*&KS z-*6STjYuIEB{^8fE3PP5QZ%+j}ng#Z4Z9LwXJH|h?ZcEv>c>b29lJ3WY$A6-n=nbg*vK(!?6+}OB0@+?Qf5;m{Mc_Kv#MlbU|!ydr7ev8DtoXl zu?1=h>15g)d#Y3V2V%cJpH2M4e9WbabiPxbx4iz6lpxWK{dU9*HnRTm4^033*l#aP zj@Vmd{pA-7G8p^)q>NyAUVcb#(UiDs?U$5UW2Wr9eD-vf`mx`jBBBto;NuXXW{Le~ zyHJ~p{c1Sj&l39`!0d-(-kLv+aFX~WSGLE|qlD(=btwA-v0tw_02zt>_QW8ZdHKT# z5dGNiaF7mE3YfO+rxCiT33tsF)?ZrfA!Lihem~J${Qi0QNa_XkEj%wD3TGRQ{Y+RP z%9ZvG`-`EeU3C#aIaH0SxRR$qiy|V&7sCw>Ab+fQ_IJ?X49J^g!A2x!II47O<9zc4?Y!@-UI3LY@JBmuXN_{Gw*&B!T zowvQd-6eF3?pLwYH@5hwL}SS=Xbz}p@6rPHj(cQb3yR7TP3PLFrb2rT+lT8Sy?BcU+5gicUb_25NUz9qp zF>q2;$rDGS_koN93GKw74=52;0A}o=w$wNDCG#)o;qS#jBWB}CaHWBncS-0sBbK5$UZ;ZOT<4a;aEQhpi-Xs z77_3GXA`OhycN9HPEsY+wLQ^E78h4-i|!f*tFxBU_OBFkz^N$AFKa3&VPvO%w=em? z{_apxYkzN3+Bzd1hb^jGl{HU?P|6=YmR^Jj&^*v{A4k_*ZdppEjrScjl zWOJ6_`-h->-&uR9*KhOnsqz}zfNkq?4X=hC)u{Pn+IwbSG{)3NHYQ>ktsG^)>p;81EGTsH&nsC^L!c(sVVVH3 zlI@ID>h%0Xc+N`n%{9ih=5Xu3&P7B2?49+NnrHVP;Ey`+4)SRN7${yZ%pa9tvWyWk|$Bla727J$|kp_a8aK|Ti;GP(j!?IQztVC*L#>@l1Pzgs@m?|P!6 z@%~AV`7m#>-=|8=olarL=F(xQc~l(!@(b^d-wfq%|i$@t-R3Wb*1=B$w{bVX0C>6VfN{<)@pW)$S zY3~WQlWT@0zw~-wC0X4k5+?mgrg^GT1kFz#UB3;97C`M~#BF3gIibYkCkS?tv`Ks8 zISCIocw$@KulUh>al7S`1>{w#@08ua#?*j^y2Fo_h^&~N3P?xhm;9SD&tb!bs^Jl; z;dC}UD$r20V-{?zkA%Q_@g}!OYIQbvl??VP%bPkTLX>>NUl^R^)iV7G=sgHQ3(p~W zTSQHiOEaYvbLU{csMHMR9D<-H+DeA71&l(}DK69QUx!Mk7bs9^Z=35-1<0H?OUeg^ zU~Vfgy8*~_w3w$muNy?TP_PsgOy&=Lk@3iEXQ0j;iW?rrLmp$4BG>D67MhoRJw3V~ zysWy&iSw*&rN0^je>FeW)&U1!;HF`G%7$6kb+5*x+_(n0KFRqW%KP`k#G05 zhr8i7_)5CFCh6Cm311W|tOb)*zeWXM5QS6McF-7U)BCc9P*DgUV-3_J$nVpvNUB-v z?xze4Q9^r|ES{p?;QoL!f7>jd`&I)rtt(_$Dh-#hpm~* z=d#sf&M>FG?XI|ZiUXr&y-U;a%DXV+Qr_hYq@H{#JKp8#lf}EVj`J?9;G|mMUB228 z*gIZh>`r_Z7B%Z#4$<{Mg&*_Kq$S>EcZzPryG(+)1Urm(DcnVQ7v=+`*zqpg(2Hn? z(CT3MEiCzg*@%kVArp;vx&3VGT^3Jnfp>WU00Q3SAzJ_`zX@hldzT7my!R+mc~S2o zJC_4X1_%h(B;IZX`eGfZT1?o_LT}PnV#2)gyToAO)Qa&ie%-gEF5HV>^nLkE-+j7?Cpmp5IE4K~E~JkFLb>Kfx7E>X^_g-bjXO%K{%rMqw;*PNxs0PQ z6(}Xp0DpMA>Bpk=8V03Cjl(E@@vRJbAjgzX3?%9*#4=eZ=jiIRD`+wpVVkzT!A*-# z(sRUR6F24M7f#X^fEKqW=jfaAwoBB*>>`Vya%d7V>cF^Bv;k53JNQp^=G+}&vuB)T ze1`U)VRW9`z8)S>qJFo%y#vGG5J+fnf|5?uLvY~v1*19_;)nes5JP)5pTaB)!?!nF z$lQ#vensIJ<$EmN7bHcLvXhT`+?F3uY$6DDM8S;mc1H-KThS3!|-DSf9#JR0=Wl% z9M6)SObKFU)z{YeaVB1=aroI@NPGN=!_=5|YZ7&s_DT){)g+qxCYPcKlPCfyM>9@f z@PN|+p;>H=@t14!$vB8t+WRZrSN}xy7H6@GAPXwafMnWXCs=L0~1%cE@p zxZDqRUUPZC<>2xdOqI6GVEIk z{U;PF1~yWK6)IH-Zqm}H6OGDTNl%{cY3?yTN$Ak*!M+=c{XD;$k_{r7gpfN@XLESb zav1VXAedf0vp1%P_3to1Q*ZO*nRQ{Q?Zag>lq4BaPU`mqklftIp1oI=_= zNW*p+klw;6%f;B%4IvTe*eN5XF3Ht?Quq-0d=1X+yp zD(##)ZeQ;27}#^MD&q7V3wx9cI|fF1_btQ7&k7@8=Z3IhgA;8i7((l4XYZT02HKTsPgxADXh6m6oYftlqGrIEq z2m$RhY{WsfW$+$Sm_y%c;;d{HaIM8o-W7e|T)m&WLb_(qK2~vr6)>Z`lvLj|`~iF| zu1v#Y4c`tw)0h$fNP8brrp1(?kF=KubcQDzhb0?i>y>1he}QUc4&~cH6Dh{DvzVPK z$-W*L1@EW!0>rzK=^vHmyaOpT9iga8NzKfMvJ>?tv>cl9Vu)Vakv)+`!M#Ff6d zDhBuIlCJ(Pb!GXQ#8x|cFy=UdC&%$hGd}J8`9P=i5A16kD>U`@H9m0|;DTveyjNo5 zWvockQ>nLUFF@`tvaj(g;OXyce4|*PNB1>eu|s%Yw_ljIOEq;0;Wo2!%D{?LInJiB;V;Ok9QPipg=SCevO=C?Ker7mB!Yasf&hW;)_S)YRh1mO637=ILbB?3O%ATWrl`dJz~^aT?5q;Vx=Q~Y|2l}k(iO|L*@etpTg9I?a|vXBthVed~^p!AS>*#zL4bH zh*@uTPZ7X2ELpIZ4BSM(Hh?%FT*BfmO6g$x1Fw0Ae608Sq%mz?1P zr#;NOfA7tAIy%b{E4kjAEcOVM)!soTQV9srpTCROfIw`qO=lOch&B$yOvj3-$dAG7 zT#{a4Eu&b+)RW!J=%2}VKt@&XTiE-S)9_=Mz$`Qz!bwsV8ZL1oTtmZLno|DuOCMlHReT#pzYhhm@h80_=wzeB7Uyi$e5%04?=Ed zwOCp{xHlp3g0^}^KyyLcPMl)-R)MrrD(&a%>f8tut?`Wg)xQcmptd}UgZrZp$f#*( z4IelCV2nsDAHQSjF$v<8vO%5y6_&Ll@q!!;r9{(p$J?(X401^A`lHG|i3`XGFB|nLS%3;e7nW>P;<6Pp(iQPxuP_R8_wT~MS zrMty!kJ{$a+~AicUZ=UzK}y`WOU^hq=G!Hw3Lca+;2~+k)fk z+a($1B*m5svLDFfN;n5_#Y~U)WrS{U41c?%!x+94@~ptDCPJS51T+8JC0|iu%{Mrj z`*z8*G=ZkqEZ;6kxzvmKcFAt%#C*G?TO68Yjf#NTzFqQNA2rO#w@WtgMOOBHy_GYF z@V86;wx5Ct$to{WP<)PV`c}kh0!UCr*?SH%F-y1RzFl&)(`D?pOU5{ajeNT#1Hawu zw@dCus?)HA+Ul+-{&pnZwfMBq6q-g*&JU7)6DbW&+MWPRW&4!9DTLP7Kd>P018wVE zTOPKm(Qlbu)SFD?1Op5)Ch_oZe$DnRvx~6g1PAJT%j~_Qb;$!5Ta5}!Wg1_3GKU)_ z*<41fExv=}gou>QMM@$ZiVp@%>tXZhObcKq6hkn-#PFR2PA8~? za>8hl_I~w@69(JUUT^dq$lmO>r^()wiTnn#Hxs`8b+dO=lR)rRiBT0ObxbgazY@~o zEb`4zO`&QidGjFjEQUkM^L#F`h6=qTB@>RAH8U@P#nsdd$3*ocz`r`7b3Zq}?TLJp zJl6IIL$)~E_Tz)VZ2PfpemrR;%jzA}j#3q$)e7d<3*sp=tFNU@zydHeh3UDR6E^oa zqcyjILXg*xU+6q7K@tgH%ba{|Hy<&wKpwq4!~MhE|s@gNC+LmkOy% zA42&YUAhbt3`dvp2N@?Kx-`OOYD5eEksH#b?Y{U`XF`t{PDTcFsRXsXZ@*W%bQPva zIurVrQ9|6{7k64LF~4Pzm{Sk6JaM_Dbm^|2iA7Kz0yB86O9w+&n$x90L0x)Z&`<2mz9|l5iGS-X2wuv zo?x2Px4$oAD(%S32S|20_BSOtRc7}@X3oT8(EcJTqO#|0Xo`X|Q{Ngg^Ri4jS9!+` zwlXsYrs@yaUzq$j``ZKQ#;o@DJn;fsmYFw^=CDA->`}GdC?m^7)9aX|!SUZppvEwSPo6XWQItXQ^eL?e+p4AeGC7`Tmh zm(YvgN`sPe#4cO3{pcfy%Oo=JCLO{oXDXiRt#|PB#+)NQx^&^sF?UdCyOb>b%%!^8 zvWG|s-iH1NQ+Z`*rXvPY{lma(fWg@R83dP6aS;%~v*_=Vtdf=qoJC*GNKrbJ*j)V% zoJ9|qjAML@X5&1VpLi?r9HLWeCC)<(+fpkbo~4{d*0&N}kz91F#6O`Xj+NMvQw)EZ ztsi<0Sc%hM6P1Kv&YB--CD#H3yj06djeQF71QqgnV4F1mWqR zrLto+H5&Kva!xH#`-#XTGkYV4G%~p_vw5QYGo0+zEjK{4%HJ>P?Cn8pV~!~Ocg%=Qfi=Qx7H+_v(!5-V#o3R_ z(_fBmWuj%8Bk%LJ_caZ#I(LZ(NgyUiuoKm202_r&(CM81#O){#$+_5)z-05USi>;B zSLr$ohdUQiDQUgo;ouKt%S&A7uqeD6#&X!kr_tVT{KSPv=h0m6&m*cexW>l`(H@RL zs>-z4^7zh%NcpvutP4zLpk>{G=L57YmLin$8UXw;D;euI*W%nbWzGqag&1naXTK*} zjN#4)vF9<@iL6K9s|0oMYfLtpyf12`5{tArTth3XPJfxAm-g005Z*JGR#JNc05|R3 zgP?*&IhA-x12`R72Yi?GC3I2A8joYHg&hL;jwC*O^2LgtK;C;%Cv(+jN;m0GNp`2C z2_Wouqys0Clyg{nU}Q!8s*fS1a?lI-Fyt2O)7}ciG?i$C9pJ{{kI0+=JE=cyx&m`s z^=VV!n)Zs(Md0969$=XQj2-uH_(9U4;GTC9G#9E+EZ?)_$4`|{){=$fiSg0CB?FYX zAd&S-fj8Uk;rL58vPP>-i0uY^3u4JPvr7)cxFEl{KaS-zJN_L2X%?+ukv_E6O+*9- z_H>(@I@;}qf+6R!Js}|=l)r(J(GKHEEFkK)jQwEVt)a5NI>;fy+fxp)j zcFD;|DKV^)Sp*z3z6y}u{Es9JcwMrX9=i@F&pi)LM^2opqrU3sIlfwhzEflR$X~s? ze;~ZWti+e#@0U8%`WmxCfx{~cI+UtD{Y)A$?kNz`g#{o;j;8p?9KH?>y$Zp(9+x%bEw9G@vhGExZSKLL(k08Bxbl}}Dtupf~ z66=&44LZ%=u`?BjSD{%INk6T?RmL+~+Xh2UN?Od1?F5jSd$joW1{N;z8~GXX8}YkpN~C#G;o^MJI(xu!V z&`@kj+v(DCS$d=?t6u_Bkb4TV7QM$}sm(%S6<=3`CvWKd!uaH8!?D2pm-RmYj|vH;_`;z@;5Vv zUM#_tA9?qik{vPBmGvExuT&)7XjOY)&If0;!N@b0>A~I>fO!gF>KE4pUK}Z%se#+h z_9rO21}iI!ObjYwe0!C@-r&rqgt{ye(UA z5bgm3v@Vv`ojTRKwLU~_sGvpwtWvKc@H$1>U145r6?ru# zOIL*`$I%)kqz`@K%ECRkS*2k%{?Tuj1>R2cUwj{UajySj{{Y8M^j}O2yg0;vaaZ8Q zPX3G9z>6IJ#nu7E`jSRk4Q62A#asT1(!h%s{TELKUfk=y_%!gM%zv?Szz$yMzZe>L zF~WZ_J@BHR|Kg>9oRLo%sr))TIOSx71z^}n#0FDnku~J?+lq^SG=hRw3E>)1HD9F4j1hPTM zQc^KTE)NMqE{K7Ai-kNNzku`p0y!}qvCS+66Td4)&x+vr4`heJcn-19#wuvvY)uMa zUXV`A)zA>IbA}Qcb$JZ5-!LRK>P`yU0tIazpv{L1ptjEgG|YQK&<>A*w#Y)`ykc0Y z?~Q;qXa(U6OKu#N{ACz-CjabkqH%}t2urRJd@?r-OYJ?8&6la>A7hq*6I8>JUpdX6 zjW!>onioo~RM&l7>Cfy}*)Z-un>SZBoGSko%fBbczk}u9f%0!Z{EIosR6Qpdbe40* zyNI!2G{%=uaU|x{>BJkbo}`~)if7y$04tbwk2n0_z}Bc{Vy?XYWR!kNP=pu9n9}GtmT{3HhbW>{gOMW*{Qxi z)IqUQ16JUVclCP$`MSPDr%#4=YX!m20+0;n4Z-3+vG`9ch8_GqU^%UIPNuz^F^Mm0 zN|YQ})^uJ;&$6c8CAkg9YE8x*XG-4kTEb94!fwQOPTWY^@Xa2SOLhpjhX~yMH{^jT z({>`<{Tsf*d-ioS`#PfGeO*03+$H-0?InZ~>HTkf#Vm7hptDfi@FIRG zN}1QEtf~D~QKFqfSjO3J6`;;AV_h+diwOf5ABKN!*j7Qyo4Gp>&vf7*c5Ub!H;5lF zzskHE!Sq8EQ*c}?J4g=)mh1qbqvY*h(kU}sqn*ec5P@+wVX%}$eiQjBU3X}V2wX}u zhDzmB*AW5FeSv0ifc67f-w@^OK>Z_hefD~q-)oc`KOusPbIJDgc)oOVnigEC3+e5EzVXTnqd*8dFSIZGWd3*d5%kkrYPDKv3SIww1h;Wjc zES?}eRTNIZW|;cC8Qig5MO)F(n=(A@jji%#P~=>ECI~(e)OSK?4AhZW0M9g@eWR$4 z#Kb9#`nE*jd>SME(4Cf%*fT73!sGD#WB_vUrR}dkoO7Lpp>eDs#Qt^%7Y{ z;DHs`g`!C$H1k);jW%x3Gh&TorPSnNo5lNSOrOF?PR2RyB?}ai0~C^H!a;j0*ZsP= zN=bU~EI~W6IB=<50qk^BGpW%#R{DuXKiL$QDP0QbLy|Ez4>bV`%CYY%3Jb8dz)uP@ z{fLJ$uN`IaGUS!Ov37x!<0(Zqem$3*R~MCG{XMz1DEXp_T_oN}*3O!7E<%uq<55>- z$>oFUC;E6FL5R73Ft7Yc5h-(}R=!;?c^5`PygGO_yc*I}GmIOEDzLLl-;yPIT-2?o zLSdpJpP|X0ijqGfb>y8ZYgY&h2w))qz`EW?B#TlKh;g{}Pzbz14TuuNLM=XD$fTsb ziYuQLe?*3};XSc?3e{BCd6-~YY>!7$0Dz{OW)-+zOZktdv5<~8vq0%tgtU$9L!ru4 zx@RKxLF&3FrQ$)6Pf670$dLd`xj|Tour!e&;iSz%fC7D_ek~T6kobc8!V;$(Y2~H2 z(tdO4+|8wxLz57?v{y$!yegSt6Go3uBH+^rmOhDjHu1V#Yq-=BHmL5>-owzyDD8U7 zhDD3<+j}_}qoYI}xHu2!%}3M*G=~Gtc=o+|l+#jmtXTs5r>dmabdKfTtxz%5EiDMBf7Ua>jZnwf1q9QnaD6Nmh+?%I)HxH& zhR(Y}^jqUh=%hnYvR%c`flGTIe5w{+MJv#JW?$YUU@#X} zzY+#B@fVj5YJp}hY%fG3?X5ptCLZ{ld3X5ptG9u_!3DVV=fmG@#W?wnx8QF^yd^ZX zty;Ylc33N^IIntG#U(Wj z8&yPDNe~;5H|F*7sl-BMX5Pc%!t`l>$tmp%BF*)$jw2%AL)xpsHYgAU4Par^9lh8F z{~U}Y7gDQWM|l}C@tBo(6qjf{YB79-7|!P#Uw~VG z=v%Gl!cq;0SJ;)lCS&@S%zw!lEykBr6EjzgGp&pm1ClSt~xtzLCww;8daIEYd-Ku1Vqg$8mWf2FOSJYxR=k9LFjqNZ|R2RbI{VCwGAGZ@< zkE@YUoUt59&+5&Rf8iFn#Kl;ro&rDaUqUaBSZtj)vmdmHnM3~`uXJx`wQUI=%xYV` zu*AQeu-+jsZGn3C>3T?Ww98vmOOVPk zPkJQ?F>_@MxK(O@fTv^y0x=?{(lg6KkeE(z20FS9w<0R9|B%unAD+zL!GpBd?gK{}=CnV0h1EiP*gxFb#y0Nm0LC||VyFpJ zMukWCt@TNfFc;$Lhl&MRrAgMK4F=WRkW5Zu%(o*iz%hd!!3|r^{#HOm|D_xl{?AwF z;LmVi!_pz`iYiXcFG>z-SCkxq@*&;uyPLC1qJ?<%bnrSF_N6ioujdi?`}{z!1~(8{ z=p;^m07MJsI%YS9IBBuNr|%3qJeY1M)%TKj#11dTA8+Go5he@{Dn%XN>gd)bJ7szc zcsM<4JNhAykm;$5{WG){X7vDlJMsu8m)aXB(g>%E&H?K*Zi znf>gh`Z-7S^9KHSUow~?K<(;x{1DIh-O!eGgFT(tfOjf!#h&sZ@i0`<)BmIHOW?E| z*SO!QCY6{$oFs}uWy=yFj2I@0ERj7NWKU)kT1`W5(=Z&7J$tg0Ql?JJr1XyEWI~x^ zYdGPl33C$Wkkj}7U-$Cd&+^Wa^ZmZx=XXx?KF@P6*L~gpYrXILc@iJmp1xu3w~OlO zKdWR!kFuxHeovTEoIlA#k7i>lq@JDSk^I)0gqo2{I-{tU2Psn4YAxG%1eviB7aPMbrlv@#9v zX2ihDh!eh@Zc4e{cA&Y?myxm9>?)G+H*V4~Uk5Ddl~1PQcE|6{h~HW)d3kVQD7TCY zXK+4NvUEz3)w0Fn%xha2E{qj^U~cigd!_AxIcz^vI_Va53uGnwG|QSW7_;4{*@zwb zWMoa~ir?$+;tkj=?E#hnQ6eSXnIH}4oTvnO zQ;?iDL%Kk@{KAzz)sPgF}N zYjPWo4LG;}Lj;?}L)pZP6rULjt~vbM-&LIzB1&Lh%DV^2mG!MrSG5K0UwjEUi@81< zx>dS5Np{qo)N1`#>;mofFCKz&z=IV!DYpkFSgMO0^3DI=FSwOFDO=VMl*sJL;7Gvo z2?@BG86wh(-`7#eFhPxNvGP7bXaS_|6+&<9SH)529+>neNuRrU*3`AfJ;OXr4+=WWU|#{LBmtGoRg0*Fil#vj;Q zYwWz<8LCi#_as<156&3Ar?dq1k}Aca=6~5d+z~^E^@)$tB#P)_!yc*{|AzbO`v{K3 z43s(mLp7zWO*JLVc~R~s%!A1-gF7(Gby>xBAt^~5s$MT5P?%(uTPg4F!d+l7`2OK_ z(4w$HhnZsS=zr^^%f!Wc@JQtP2;4NA)+Cruz%`FeXiJ98{+x+T=*a#|#XfcK92F=r8_<~uX+ED!3dXP>K=y60T5tB}XW-Jr zgzGms*`Gd)D*-%-{Dhf3G8#z#yz^aL`ezQ{AD*rzrGL17ZAlELe+JDEFxd1@uh$LP z<$A*B(5Nw(hq?5_<^gZxuHISv`j$WZqy|dGr++R&DN9B;{qtHGJ869SXAF>I8@rjQ z04Kk5xo!-Ty8_9SaQdgO|9F46D%yoR@HfCkr++%*8(M(NP8EPl*9Cg`#4lQG;n%)a z8V{y_zIjs(#7X~rJrlQ-VC;`rD4hP8V`{)!q5U_IkxIkC-4SZrL5I^nH|S>2#938# zROkH%qtfZ0(~v;KsBTYTRA&(MPKM@5C@%rLxXqneOQ(O@salt_R{Kz`in&57km9F* z9)#KorGLf)!HrM7OEo&>F|<|3e(7P5&%FQNw=I&pcFVeEMf1O6&B`4oC!g-FO{J|NKcExEijVKaszZ zRXPKH`aEIPgW#BVrhr8fbfN>d&LLqz9rzw>ZsT;|zp=5_Feab_^SDkdvoK35ll_o- zuQvt$_SbPba3)S`{wW=JhD%~t2cGqOPzN6RiXq$Rz?tF;hIL>+I;|Qzum0K?9e6%U zSx$v@;0#Ve6LjDm#GYAr2I1DyTHQ>>%f_&hSGtvu>PJ#2kr_N+zvXd z1BdEnFz5jv*->r!8KcrV@RT_+s)DyLs?*-Eqk>C@_nRS6=8Mik*3vpKTh+RRwc3Sh zEuST{26SNQHxV5;8VGK@4t&bRNt_Pci_~cyIDEE@>C>g4?l#Vh)`8c(M9N$p=+UJV zQy$QPHterBUe<8|N!l-4@1HDW`rE3bVWDEl#*?D?&QHua}& z@1MXNt@Q@IF3fonJn}9Oc=%ecwHW50)?1Bb%f@NF-+%U55zu-&zep}Ke`B;>HhlDY zwcZC`#A&_x*#7dTwBBHs#IV-8Xqrf__56+{*#WrB=Q~UUF0SWy4#RGZmNz#wM(bUU zQU)~Z!&>hRTIwcfJ>thU7S?*#=*A$;@BbtUAh7V4=RZE^17APX0xnwX?T0Vw!AJTF zQj}f~O17i*+W56^md1lxuZFQwQ0sXxEL!Uw1cx=O_2!!zO6x7)`arzz+zDA2JLs_1 z8?KwdpwC}vN7a#|(pqoeYci^_i!iEl33@du(R$CR9lzQFeETD6X|2~m)w+hY_6pTn z`LfU&(0Y$nN3`BOKyc%=-b*e{;lIB|5#859`%eN+jxF+Igp4BJd-&jtpn$L??^V}KVE__K`G0r zunwF>OWgz=IF8s$=lNZ!8w0E>eu(P8ll{lt-?iG}-tPeytpoR&A#m9RL9n+U<=oMM zE&bZpOXEQu_}z3h5XbXd2Q#B}V0SpIVI4Tv)KEHb-sdcxI~|z@{6DaZWe;Sh?V!Ut z@Fv|127S&uc2s2>F)FPC`@bZkx)b57H;|xrbl^1XS@T8bF4odIa5q)!O4jNSs`c&* zLTf+=KJ-~c2NnXsjn{$CyEuu{f$Q+P1ET{+;^+yiRW(DpQJfjA1BX3D%3K}z19WqY z4!lRXi=qQ-Hq@&F7owQCK0WRBK) z7riLVc@8}CE)#h8S}#isb5QGj_Rq#@y$lzLZqBb2=Ro*wJilMyFW2k&t^O!Z>n&;f zCq2JQkwr4IwLYx%E`KPf^-g@kkZrWy5?Fw!*1LeYL9O)`d5$cn)q2;UlqDmq^%l@l zH$m$WKen;3=XaxS3<}`G^--;NhX45Rx2=8{ydH4TTJON;1ulCc?Dcw5vK_6L?bj}l z#)De#-!N?<&+i8q7OnLThr=4ydP__VrS-}`Vd+?{w=Z(ScF&?J!0bg|bvzFF+omH(HS*vTP*2hl^tpTk!ZB0b$l>otw z*LpKuoWyCp*Kpv=XuZ3hl`(B#NOw19Mr*x09wlY2)~kayj?sGeD|b<}-p}9EtMyi( zsA0e9=Pgueyw-aVrM1@E9qVG=5xoAhTJJ~bPk-uG5yo2UUGuv8M(bal)tu=f%T5lYj%zCxn+LduykKcd&Q}OOlNwYK#dd1)fb!G+2;5;9}tr&bau-T5MPmf?59rqmC z5Ac4l*4-l--wU=ys0|;cr+0x2@U3Y*UFG!Tulm>eyZr1fJ{#T#gMD6cS3qyF0=ojr zBfA3p?PJp?iwZj(RLV4te8ZeDs8o9fMks@a4Ij+dnSnjc%${C~&jh`w&Jyd#sf{vr zcUpHkE!1i_pMm+(>!$W`8~^c-f46e?@n&H3K(^P`cDLq-bPA{v-RzLT4&Yx zq0%@v9QZ?fRX?US(W85m!HHTo9SbknEj`whR+?lS%*p!MYCDf5_aeW;9G~2MN$wGf z+@lq_LrLy&hTP&D9`Tc9(El)x+)I_+L6t3F*_~8bnS!5Zho)e&G9q9w8uI60Pr8(P z4#p$fE2CkN(3Xf@`OJ9*cMvtfJukJKsm*Vq0_5M{y7?BYui4SvSY(|sNs!gw@P3T& zo?69#=mtu#0**7RQ=Z3NJ**j@+Y;-?hoH5R##YV#;fO85(3}pCYXzlwxpfx81t%Tp< zH)WRGNt=9VeK}Rg-GStO{fOZUrdvbnQkK=wnyt!qVA*-9tU&8GhFhZ9%^Y}Ootgu* zo4V1Z1HkBvco?FC>>7#m0G#|}3B z24$hKk@vf%TJQa6WlAe-w0<`R8|ws5qD0@BXUSdpZyy_PJtXA*8v&=c;$g!V9~;lG ztj5Mes_cJR_EA+e<9Up&_``)TIBH8uuK5oVu) zh}OG6rq9R5(SGeGrSYKN=%61v*f^>f-*x1vreU~&I#KwOSOtNnW8>d4XXl#`J zz^8s=m8P`P8>`-q!Nx1P4Cdl~KueQ*q=St~4+y#Qp9Z=23X;%+gN-&0HZE0VSFr3A zs;pq+g?C7#t2d4iwCI+6n{p#_f_f5GeXNHQkStq1+j52%WA#RSCyT?vZtuBf{oLJNEaKu890c1=D7aGS&!kio`ks{Iz>WyqaU9& z9UE_r4Ps-Tf1sHFHae4lh~8L0w@{O{mnAETjlTD?wqWDMkD}P<;y-RR(=vMpGEie9 z<33^brYFF_Ou>SWjc-b9gQoR%2s7RrX$%?V-vFHlB>=?#Gp<8jf>%@wT3X zM=Hq+-B}152l1JY4Ftg1auQ8%%)eK)Za#a6R@|Na{GAP-JYn|RhidmkIyw9(`cmpg zA-myHy89C_@#c(NFaq;q{5o|)!0ha|BVb4JW{O>fYX2C)Y0zka7sQ}SXBjlBFs%{) z`PJbVjXj4Ktbn&c;A{u74PcKtERzepZ3Y#j-0-M`WmDcLcy5pa-Eg>QPtXN=3aUXD zKMn&+hxAME<1jO3q^C=rjqE)ReL!-9B=%K&SL=P}G71@p-pY6)0=-R-=09*UXV(ss zoY%$-p>1%EMvTMa2d>NEMBx8++f_d*1oJ*6zBffBNe){ix2wT543F^4olcHSg5IGZ z^7QG_>J~yck6W~DhFc(SlVBWif~!nQ|i%6;JZ8i$v(~- z!*Wy?xfR)}S=f72=D8ZSMa7^qd!MpRsa=7@oBqPfuBHLq@gV7#;Xmrip>X_7Xi6t4 z?T$Ng0xFvaHRR{cdEJpYdpB?CZy4qpbq{Y5F(d5!5BAI!Nw}huUl3DAXz>{#pF4bO z44E@fJ|M!geCkawSrbYipX&=5Ib+;UB8v^Fs7MraLHPiGO$%~HVhyk%C)Uv<-d6(; zQsIY_1XAP#)oi75NnNEtQr*5Ddem!4ZJ1o<-OET>eU9PTjkv#<`b)i!Jp85^b$+Op z{dp(_;W6)o>THFeaCCJdpJ_MHkn+ZVpvkgFlGiev(NF(X!>4lahY*7l$;dilWVo!u zqw1R1oL(?1IDxAb&w29$6a=1K2v0?U5{*ckLshr9#Cb%RE!(QrQa1N!WS_khmSbEg zukBnby3y$WBHZgJnZ^c-Ybzgt^mx2I(iUj~&NP}yIJ6ws3L9$sRQUCB$YoFlLw z<+)(39uU+F80CMNbQmlrKV#Gr??te0MUdHWm4kXS0Tdu!E##lFZ>KCrEs(*Kus&Iu z=`DokYqh8|!oaELqY#Q3?HN|iQC+(`K<$@SZR&Ykj9|d_M#?cqTFL7Yk2732I=43K zVZQDp)0?dP!deJdbMk77uk}rB))aH%*qi|S(|pD2eq=O!RQq&lBEdGxY^lN9b#-ix@QG@Cphy22DkD=F^+T*}8G3qgTT(jejzkR*CjFw#74s z^C`6l_BW8r{H-I1x8N39$#*3kWp4wj(mnePZAPA1ZMb4dPv~y1*Wb_?UU_#&N}sFL zo)dDLS^Pu z#AG$*qUm#b<_0tunA0bo1I_i#>BW<6Mq_6qh_79`WxlH8A3Li_?v(7PA;X;+9nD(GhF<+f$VA z3QcGB1N@qbh~B;&Vtb@JwKVwOaOepsg;W+)!c(k(wJC?Sfg`|GiWN(GlbI9f9Tb)0 zUK!cBD-0^Wse;RLUaq-=mv7?|weh|%=zFD~&_K!?43;?dsWr$Mz;_$3_qEj95McF1 zsP{x+(*>cP5PR&w_IQQjkuCGKXX;A1Af4CYr!r?JUgCcZMOhg5O#Y`utWD$bfm+?{ zR|0;%IwWY)*5g2$(H9yCwC(|m35{Xx4%)-}2MK-W9VE(A--{l#xf}fQ8Rky4NG&-O z{GW;@17FEe@7=)dls8X#=Ci;Qxo#;wb#B>;+?jl<(k#N9uRWdxHwg1~$yD_l-{YWD zouo5asfV>I_4Wzs};whOstL8cA21E&*W0{DVm{+NI zE-DHkuOc=^Q5;FejQIs$z!by$?v2~1FUO=SwVQdmC}g!9Tr*nLLH8bbUztbG6lnngthdexSDS4k3akE?d)e` zs_M`J{+}ST_LxXY$>BQX)BCSz)Wve*Dx+owoM{(lT7UVdXm4$&_JOlrU|)3fJa%2q zU|+o?^ldC$P|vovv>R=zS_N$>uT;vDsevz4uD?@Mqq}~N{AYH|h|xy>FRYFVXi0$B za>l-{^D-p5ltOb}Ed!XxF2C(d+M=oh^`23g^7L z_$d5Nh)PNg{R{O@h`*K~VS`?-{Yu|XzP0#ZE~o-p1rM5gDeoQHX8lfxMe4V_Lt#Ao zFV0-BybG#e!ZT+r<1g`D3SeHqSJ8ggPQz~&uyV6M#H*CEX5+a0(`!z4-uZAIx(U1{ zBGV-lb+x2hHX@mkfq&yvdW$@@nn93qZH;T{a&cuC>?GH)k| zkI_!b+YeHRS?5rZmJdf?Ke*^t( zgeO#A9|v9)cplq_2Ogzq1LMbeBsf2j=bERlKx*(T3dY=88xmAdB_*NU6+pE{JPfnJ znF&>9yIhQi$Jt3H&b~7!n!gt;;h zq?fs*tGHTyyQe@t_Ob9aF#Qqgfs$UcZV{&Ma~GI?#BhG?ryLLPBy$aSU31rQD%X?G zFi94WL80N@asvuDo)^IfA~?x0i3{M_>JhLu{pjmtWkzm!%{8`6j#K^=r~EsC@<-Y7L!9zgJLO*rl;5rl z-+0(sb1YY;TCDGb--V;g#ZNrrYjgK_XfQZ62*`{=j)4@K@r z2Cx@=%(R7+O1O^$8FC~q_{pLmQPbe&)R-@%{(dJRJaC}AH@+rI8eM+@=2!eJYuZ8v zonEg^Kn;ODzdA2#3`0d|i)_bUhN>ywrW%ZJ%4B1HL>qBZpIfO$-Zgbo z5znsh@+H>L-}|UWK`_RAC|nAh;})nzfjZ|~9o6(S)k}VJ>->+bTy`yn0ShaiRH#H9 zC=8QH)JGcB#84*kKq;s!0JVbxO3l3Q7QFaj3LB)Tpk&JH#Kc(lWwhRuOQ7cgt}?Y% zR^d@92y-FL0OTQMLdCJP^(TRnVdbLT1&f;+b{ zWhd>QgF7{ZvGBt6y1T!Ivd-)OjFoq+)Gy+SXxxcQ^D&YZ8noj#mQqUP``wsPu(Mz) zc!XKxH)93_a+K;|Nj9cqB1l0o38w#|)O8tdmr-inh0gcjxJhwj3*Gxay~D#XGy(ir z48HGIJfIXaT+Z?OGhA{#YMU80up!?((Ng^62^!!EWgqjF^bW7uk!fcoPu^9H5mKyT zKboR7;IJk^`e_8P5!!@)jrnnIfkr}la$_NFl|fQztdqk;tf~jpSxONINot}< z48HP0(J)e4JCdd{F3rz|Q!=}$sK&MfH4`*XhPp^oO4GGOl%`%YU#DL)59!N$bV(UasO?^__9%Qm1^@)-2@K;@Wi zKwYE})^@f5onac#WB&n}9OcbpjNxrSJ0VA_Xuu*z@!8oi+kmi^8r=r;(hasA2q%un zKqz&2gFco~?AN2Ju6nq&JrlvDpZul}%3o4<&-x zBo#4Kaq)zp(I0EVW&(NuB~VB{z{f)gfUF3>`=PIh|7*P6uxIniB}N+T6UPqT3;DLu zn(KdIGIefA+_jXq9I;j7N}e_`wj|n1)=tdbj1kpk%^S3NqTOxc_f!X-)Y1`jl%=9J z=EWa+1O@wAts#`U z-XUD@OFHhiMy_gEY(&aCX1Xwk{xF7#_vbTwk~Y<5S^2Gf<&{`Qay<0TKoQh_+*ed+ zeK9~vzh?}Z{w-T@^>4Kqgk^N^H&8wD8Umyby?N=!_az1c-GIN-coq22V#~d^kORH%5Cc zFCd4Sgj8kz_#%JF0;zsKUWJ3(*XMwdI7dd@7TqKXfsgUWYdwU`&~=C~vK+ZZ%sZZm zIEYC*1_`d3-nhw!lqWuf1k1w#Fitgl$V2XDlvZ;v;qq|E%k0QqvIu@Dqa2Q&S(OzY zjTM%y^&Wn~F2X(z%j><#72m)jEcA}jHAGQwsDX-P!E#q0!}&syzK`L~e)5To4pvv?fei9Y^<=PO>GOM1W zn6R5_irns(vKzJu^(Mme2{fG1O~F`0?7h+iLx8d}jBytIe8lO~ka?LJf*jfe;;>&H z>Rb85(|CCe=@SA_G3B+N!%O<=$lhRwJk}~wULnEh>_P5A3y=hr*xe)EviObzXr4@_ zke(<~{rIdew|*8>qAY}(?kv8C_!wjhivaL@<&MnnnPQRP7WzAh`Y?ShNl^D2#*kAs z@)Q?8f=#qV6W;m4!DZgsnWP4Dh4cK2a6jt_To*1M;_iKgrm&t~U7D|5q;6$9oRK;` z%ee@$)FtfCFfsZ8-$8qAY1+GIOt`&FY8Gi8^WWt5_UBH=0DT1_)g%}DS<)^6rW@~e z4~N?;8tl_|yWd_2$+TsnpYbE>y zY3h1lh=L~`q`ce5@{;RlxHRi%E#RNWi%7hunU2oMSnDyu7o@Wlq5aq%fg_-m~*GNSzYO_zs8MYZxE@%(OzN<-w=+1 zh@Vg6kQrxG@bbNUUdqvwX%q1iTj8?Jv$2=*#*JP^&EHWQe!T2(6Ca0qvYR_V)BFLa zs^-PctXRU}+53c{7(9>dbw{T{qzE~CvV^VWp*2uc@A}X27n#O|_M0EWW%qj4fBNFN zxPmF~B$gE+m8$f2z#RCF!Sx?;ro-z$+p%z9{bw%ys=)fsv$&IH{pUfH_1Ax*76!Ah z`gD{*Tqp#JfnKKXaCkUKh!s+QF*wS6wNq3o-f;?=l6E}K@AYz-ve4k-G0^d)H(Vyy zpM$V;4L{exorwXo*t{^v!FgxOo`{z$Vkp6CM=UufTOLG+1{#29|6~i$neu+0?Mgtl zZHqnmk;dxrLQKG?)3m%h#L}V#AB@!EYBhnyYX%`#v(kC>RoI@VbL>M@LT4JA{+Rp#<7Ew9oa^BiBWNwTu@a@Hvc%c< zaKY`WS^bmWDyeyz6NHH@`YitlylwA;f%s?T!2Zb}81pYb3l9+v*6faR9;m{5wODBE zpOjgj01*#~dR+dxlen z(_l1dr?k3@`Uu)eom{BWj{SjfeX*o_G#H0lDeo1m9y;j0B$t<>y{-U1O{tcg$U^St zodqW7{k$0xBgl$BdXhP~6akAjpR7a<{X|CCDmpn2egwG7{k%~wfr>RS7h2ED*aoTp zGQ+SF3XlPv}XD0rrViQUUEI@)q?l9*0g* zBlPo)KCVLNlD8rNQsb(m?36>NDcKQGTKWtx5k&)(5EB1fav9QC9C6@WveFVl6|}gZ zPekY<^Z}Pi(;m_Vkp=C8VAcH6PMgYV?1i2o?QNueL<06b5=II`?Wz3{>{_s%mKolSxt+JnMbJ9G~Hfb$%1jMX8!a$Yujl$R@@qH*^7TXk&o zzk?M=p`yF@Pj1q>xdPh5)yq6@r}gq5It-vd2hYeUUTCRag9zL5)PJJ&a~Uk2ngQrT z{I(ve!I-0~+tP<_z{70cFHpws52Nl6UfPYxOJI?7TMB*TosT~4^7W+4w~moV5qT+C z8w6o?By1iXLf=RE4qlgk_VDNlV4viZ&Xg>SBw*2tz^x;KD{ni@3^n$BVz;G(=U&Dw zqbLnp0wd{rpW&&Q`Sy0^^|r>bFg7%&#aC5vid%6Yd*~1fQ@o#^-&W@Z=XWGzQqS+6 zAcLaQK4fEKahrVtXm)TrjKkrw-_q)qL|ip+(`d9VgQ;-P+5$S#L2G9YWgzG#*Mn&N z4ZXN%odHixkLFBROr!eJp;cq@!RLkcI?~-9FmW)aX2HQ^U9Z6NFmGzv#ss*nkv6n+ z@O-e9j&Nx!dEUPI;eNBLt3D-!|Q}=%2x5!+=&)FI<4Mv%_ibmvlHSr*prB z?;PcHDdf!MbPjl?IsFX?Xo%C`%E7SD^^g4h>(R4=sg4X?9866P*$n~?-B8%203d0M%89CdQO))?n~{OGLSj_Gg+C%8Bjp_p$6ZOD%;gMd)7bssO0Nn( zA)knToVg7=8+yCX*yL}rgB&)Mu@HIdA7c7HwW!jL}dyTq`X^T(ky*?(8|+1-=(|~oiZ(j#ZT_=7X*DvJcf=Pe27Z6 z-)!^?xhlXYqA^1mwvYJI(@lnMBT=;+v|4#oh6gL;l8NFfiVy?r54@cKW4Ja<+G))+ z%%TG9Zg7kFp)g+I@tgUPNzZl*)ROlNZ!vEh2Q>hNga@{mgYt2f+*yqP@!2^Jxs@_0 z#X!9e;zQj6dOian&UOo!@iYM91L50PCV{K5AU6JOXFKaE^M*dKKlHeOCWtMwpDGh$ zrJ|`tZ2^9Q<6!p?tWdu3Rz!1j0I;pHJyoI2=UTdC6I(Wz25@p4oz-}GC<3XdH`~ke zHcAnS{M>&nF1x4C_MUYhDO3`570XJ1AyvlX$-TP*5X0_ejdwav76& zeu)Y;phvO=FWQnFa|fGAd7H#x3-~~y-)LPERCDg})_s-v3j*3uoQLMi+zC)es8j6a zVuXB{2>b?c@Ylw=&vqvne1!$?L;hvG<_Sbun5Z&V{kI;kJWY9h&(Rgwm{|sYm6${U z`wEWF`!T~I-(9#I=kOMs+FbTq{c)u1xBB^L?6-P^YEU=&#Q|)Ti#4Di<*mdp@mnb0 zM|5-*?KhIV?bPv#!)gw2ia&+oH4skLmpVUCU{#9q*2fV@-+L#n6-#OI z&%fK}^xyCbSbR*dxHmX`$kD0}SzD;%e9h_2H^?L5T7^8Sm(y>fc!1Ln>N;q-H9|12 zD^cAvPJe}jg^mIm;_bIj8frtly+qbG!@T{Dn=mzRi|@v7A7QUN!An$ddHW_A?(+6- zL^&G8pW)n{LGe*|6)$*u$Wh?!M@OkTqbRZ6cfj15&vC^v8G$l9Z6;+_Kfp5K7*V7)5J^69 zki`(hvnPg7g&_pqC!wM~s28}u64|A^Q$c}VhpE-3ax)>o42rk|VmRv!8ahv7y_2H<9+Rp8l))cxbE?MQ{_`*toa=_k-(J^gik^G)yFu6)MI# zMZ$T;#%6xoA=)wVqrb9%F$MBpxQ0HXA}6PX`jqzwbd1XF%6Mmx1}GGrQ@v;sFS*LV z3RV6pLo+o7Y)^0&8bWSP=72i`TQOIP{ZcW#+KSn69Oy<9W}P4 z((7yBy*mOF^rv&k;gFHCM)xj|R9^#=f(;xCwiqo_ z)K<}*nzw7mwKU<@g5J}--Vs*Wk{ z=tz#2)pn7-u`k7W%k%*#t%q>&J0`5~$Zj=0w7d;H7P5#A zq9-CBrcw13EOi6Ckg5F=68{{Fb2 zw(N<>YT(tYA=j`rc&pSDgEbLNde0&W)+o?km&*piplt{LCukoBHmqm+_i#}AfXXLY zJv;la>?fjU?>t6sS84uNYOmt2m&omio?R$w77goJ;eQCUiGA6o1?^e9j05e&y`=A; zo?X^87PM`pkq~H4H4SLc-h;YnLA!uqwE|z0^z7MOvZx2NjukTR;*VlBpl2(K&|)Lr z=HCh)fDhRjVpPJAT_@3jHl03XpFrH`^@fv9$d=QgR?+!tM6>GrQDgLMN=$edvaes$ zbUpj%{tjZRb4icy86=dg4|@jhDD@jg?CcArb_B5{Gmq zL+qM8r0*bNcS7kHHG82n5<=`0(||_o>b=<@I2`90v=T)vbK4}uPP>leOTAb%+jPY4 zMKjb0#Gc2HdLQW<_NT719$Y}l9(J&$&Q-ECaDctL=}iH#JC5RCQL*eE5t zx+5!w5!>n1rX$wt>LB*ulh~|}*fTDZ>o8*fqE<7*h)swG04o#nWxENn4I{RC1bPc0 z_VG@nz#{g0UdAD|>+V8n5V4n`bPQsrOCuq~{;R!iKqIz`besmU=N(8gnvU2Xpi=&~ zh`j;vTVoLW9K1Fkv8RYZA$(l7VB7(gIv24=gJ|#L{qYQlz4&(i6~x|h5AIK9s-rPF z_MD4YF^t$7cWF9ekKD&W>?nkvE@B_QSgyl}y-Hm1Nc45aASoR|>4^QXql4J?Xuw5mZ(~r&!y|~@aHiCbAa*#Bm~0ra zlTJjr5MuBC3+r0Me!|N*#O}Bg%Y_iz52a%e`+zhOLhL`Xbpsl)J4nZA5SzoWY#VfJ z)n&Hc|47H4Hxey20(e+{w^0P?(sijZmCm%s!ozIF%-9qnT{~C3svn%s%_Q&MAxcDjW7uXbk zhfZ&_4i80V-enblYcmiyf0bc5kl~(-`B*#wyJKxv@fYM=%rQrzFMWDuFM3Eg7nALt zi&;+t!(Cv~lO$nogCKfOi<7m?>n;g15R~$cAH(|2xfsocbgN|k>KN(O3G>z}?D5c$ z$=Vxs`fGCG^*H}YURmh0jox7ahE==iZmw&U3~Dd=A!O@MJnKYcY$Scu?JOCEyh@|a zJs>p}!;afhCCgrzNa4)}#5j1VacF-8y&Lr|6qL&bEA^_l85y3(GYJxd_l50B?&#Um zn`@q-Z{Qr7yy6bPEG^Ht?$HgMVO0Vvl%a2X{VFC}Rx4hiisj-(33B5S=iq(il9`gn z-|wh8aW;7cu)ZgN8{&{&k?aB`AOGa&?fJx&BD)JHEyYtHg&Hmw7DOqYN1Y1Yi$ia} z!|0GL=_o!nEyb^&pN`_~5FTlY>*Z@FF)TvH$rO?hUu%877PP6K%GKwjqvh{VU-5i> z9R|qoHL@{TEfenu^XuxJ9pRHg)N8pjlON#guKoal`L~;H%;|{1KvS7nM~ZuYiF4BN zHYQh#bEgr@7mCSoPdW|-JKd9xL(jmqKIzC8YI$!eS2zBoV;lNWp_7g+p|bSO`IkX> zDf$kabezYyrV%F{zhpTL;45JMO@`O1c(o4({KEI>Fbd~+XOV1wT~*4ggdE#4)Qx3_ ztN)tE3qN!&SF=Q~%vN>^YOx%WiQ9#5wbG9s_8&pD$RkLl)Y=HWT62ie9r`fF!yr}d zn2=@4ly^67X!~}jm{zNK)8QLb;r+VeEeO8b84{GS+Sy89)Ceq> z^lW|UmeD`6haLS07+<4N{#PI4go(wK;lgJv^4^B=Rwv2iu%KwURgz|v<5%V+*}*L_LCy*8 zgJG<+Ui%0`Q<&mO!F1{a_`k3(xdT6k%VR0;bh&I`8Ovvo#b#u^U?Y%>BR0| zJ8)jZvDy>xs)N38bmuFb5A0^~cfXDllDStaxJRcJxdP2=Ilnv5wMee3=f64k(;c0- zud)|O^fMJbMJGj8-1?~iuL>M21I#KlR}O<=LG4J~x1lZXBkqG9=IVQ7OT$Y;P#_fW z{j4pVULXEeM%+{N$dCqABkDS)n#62}|&T2i0H9TXOlK?3z&=)UDTnOA_LB zl%~i^Q%}_3*FdG8jd+8xslcrQU7~04ZT zGs7()GvvI6A1WvA=+kYdI*ke{6at_~!lz*VH!%9Sx6@%$VAQ7$(}KBc0VxcHHR1_H zJ-{A_w{6MJz)IAj1a^G(08j;IA#1|-hXvXJN((?bDku3)CMG}vdB?Bd1=tGN2BakZ zO>sLKD+{J&r9Sxm?~sKAwV(m#yuy z#Qi{!738YfqOa0%)!Lo=sh{bPUl<{y#ml&X>ruQ6efmtZF(y1#ZuSJp{@Z+Ofn0_T zOqNqgIw6)3OOYXCbcKp}&1B+a9R3>!dO}pIY-ADl74R|b&_fYSy>b+@AQH}3z&j)V zq+V1|LG`lgVq-lkd3mOTCHXZ3>hu`M^z6ockK;(Hq?g_u*t^ zBL3G-RtH9FYEO6Yoe{CJv<0oG;IS5}xSH?I$Bd=Cb#5jl>R||GDiLJiUBy}R=;PEX zsy^@Gb244%GVxfaPvQUFiH2+4MTAP23reL{IXoDnMe2kQZ+K#={y zk6Y5Nab#l(>$YKyW9Ogg`PnrNnX<)MK@tzfM36+gE@V`ubeZyAgA7`GqKMZ|-WcFK z>1>Ck&b&ZiQ{HT7L;h`p$WFw#PMHt$*1O^Vw%!ie+};Y7w#pBno)rh27vyFAU%{Y7 zhniQ)q7eGkx6KaYc?2iroqPzIQ)Gd+;es5b&g~Z41XMUR^?GZE1*!w5-W=>*@ck~g zf0xPuz-_p5bipnWJ&^HXAOi$DSP`)uw^ymfv01Z=l4`5Rdl_ML=%bAdx z7+NqLbN7mUqNyBls81z5l(Gl9_T&Fr*xW7=C~DbFQ>9;#JO$tY?4v7^-=AQ6axF{+ z^iANbnykfKQUTE{Cz@7dqP-;7U=UJ@MCYH&nNv@zxC$(1Jn#tJ2QMILKLoa@G~^Eip1q|9)j)u$@Mt*EU%(WdHW)~Q!XVML5>8iISVw$ z;-e?B`vLvq{p##ZC1}2s2#7jD`o3&~eX<-+ytUMFbf|nA7-Qq1obdg{gHhC3ac?Jn z9;Gq#{?R)xwH!{XT_66%sIF>j37cb8)77rcH;3#)REhZO3Ex7wV~}Vq<;_qH$=5UU zPsCl1cf0>zD{f6+aJER?uTGu4oqWx zZ+|$4Skbt-hh@doNY#;Y$y6<}V*VD2TbMLKm>vAq)R7A=n)%moaaCxQs&R21E@d*P_~U7AA0-mWE2nB{5k(x*{>=Z1p7N{RdGC zrD1QRFA(yAGm;-F<-ITTvrP4@QoQEiyLQ?fMj~&l$f$V{lLE~(XsT^R<@_;7ohl8_ z*zw?^HYg%_dIe3t^A@_Qr?Cs{ct!438!CS`QhYhX3;yOhmb#NRTV zPc?ZuZ|-epBIJm!2K0=d64a;BSXJ30V%4!n6HW(NPBwfql2c~Vb$*VV7XOhi&e5(^ z&qm; zv@3Gcadz^$bewI*)lSRVdU^IRcGyuR&N0dT+Yo7Ij)6pQ$DlyD$ zOBUArIrT!_ijZ@|^JDYZ#^I z{zo{ID-zA2MXJ$qt!{Zfj1*diXD3s2bCvu(}MvO2efdU~WRS73+>&agrG=rZDB5 z!^=p1z{ogvgZdu2tNn_1X8i9s=c2#0`pfExXrln zWPD-E8MC0(;ZZ_Wy*5mE-U;@ayuU}hJ27GhLiUL^rcdlkxUPY%GrB}mLb#<9A(0;i zsDr*2sf6qVx2ojA`_MKm#ukXIK?VeE$DBfn0%6^EFfM_Z?(Wa26GUp>12_Xdv%F8P z4$=5-Q{8bW)fLjV?AB^=ygJv$c8ZK4-X&Z*5@*pG;PiD2#qG_8w#fFk6h z`-ZtjGIIG>hLPZeV$7kKe=}9=XjON%9@0mjBd!mE2`e|*);~($yTkxsnasgFx|}6c z0s+taGD(sIXZCvOP;oK@%j#XgArRVo_ZXVME4y0TUA^)+0D6)6=xX=Hn0Y-uy`~zD zfw(~GEAz*Z4g`-l66$UH)$unDvNVM~()iWxb1;{-4O zcjEl`pSR}T1iF*5Q6_^K(!gUp%BWPG5P>7@hL%}?a~b-Y2196HD(enh-q?qiYx=?8 zN0J*dQNutyK_e;eeWWC^=FyOr@^Vikve9Oxj00~lQ=bZXk-Aw|IGtGlUHT9jEQu9f z0^z`(gnp{BUHr14pZfvCOz$Md-i6CYnzy4;C7WHQe*8y#`m$@*x<$7+}6QJBWpd#?;!eki?`$|x&O+C%4&`dE?V<<}C$529 z?H7!um0>#UH2$=?+AH_APuk;&cQ~}S*eyR-+rW%v0JP4G&BoRbf5z77=}$jn&#ZAY zX8^Z(60FBz{YxZm?JW0nl^$|4*s2oA%^U=MV(hA!eb;+aB9MaS3pnpD00tASG+hbZTSZ$(&J-Bl#K9N!#u$3S@c-(%_%k&=V%p z!ayK$x(m+9io|xJM#LZvs$zIh#rD7HBtwhDI0G0qpx;1@GZ9-|EDEThJ%$qNT$$z4 zjda%n0meGWp5AWokG0?doup7K^^L3+hx+Lp=?4=PKdMZBR5BUgAoz&uXLMXY+%Fs) zPuhN3hWaV_G=4n0#`QDM@5jZm%$vRu8^;8n1;i*2mrbLZ0}t(3lvInn1q$9HiC}k6 zkX@vd@D_O$3N!Z(HCLOA>k#-ltE^$sk4wv50ljG{5tO9is<2ea(50{lYTAO{*=|q} zwykudT7_jyqeaLvjN7czh*b{g9TPHAhxEqdDip7T*HeM+@jK=HbQDMtb&cEjz6#PQ zR3++b%_sr41}`m6d7mL zIp>d#VVXSrSx8^0rpVN8ZVga^nknxLV8R*x339nq4cojn|1_@TsYQ=^BUqWGe@Fx? zKHO}^fNuPS{`Sfu_vP_P=qnV%W(%`4lbZX1l94d|WPWoqE(|MB=S(N7t)ma}C`J)F z7{7xaic*2#iD;{9KG3A=ka0Vhj)>OX84iOpQzXuh&QfSrx@~7?P_fQI|BnT0~HX9a;T27 z$Sm;O1%lG0KIYS;mi06L@BqnukTUdQmJCM&+NZH3Jw?6(Bg(Mq{ z{K8&%Eblli%q`1CfLfJN^Rko;;dzN5#pEi|LM8rG`~&Zy$GANx*IN9=85ghuunE4L zAobmrz{?5Vl-Xmb0O+Q?>8r>9c{#zJ&{7zt=)P4K?NjFQ%Ym3jM#vHzi<>wLg5%sKF+!Z^ig!+@|{3V_5fHq(l6or!Ro{r zCldcRr=n;KbJ`;g&OaCrD#?PYus|l}cL17@i5C`Jn#Yi~_j3(}HmG-hZDUgR^ZZ4n`&II8+Y#uxJaG9`b0HIik{~(Kpk&R|5Z1+TJ*y^%`+z z1@VH~^P$O|X_zi~TI;i3E5^cVqYm9zxY0gqbgt$v_s1ABSCig$c&_-Q!CeVG7~-Ha zSAF4Whn~jp7aF4J-);g887q@+ z=I68|TN@RV;nn@RTvKDRF+0Zd$RJDroP^gB9=XHk88E&fKJzIwKrL)b%o8n3!{PPC zJVm|sNCn3~xmVmTx)?Dbg?KxVI(&aY=X-wKhddO&^Fgy-g5K>q)>Ft=8(+uFM^ED* zGP1#(NrNjSk4h#ReS9uVG&DIRF}J#y=E3ZSPN^B@9n3tp2FT3o@fg1x_AD6M`@Vb3 z$P^Ldk$Z8Za=2WEf~8kiqKxB3+WsP6bdSk0G%l>$;Ojtb_hqcqNP)FiBP1FOha+hn zn$)O}{g<&GDmdIip+MBm=4_l3)qL@OMN2L&n6v2hneiPY3v0|cd=O=(PC@3W9=~Ec zrYRcqE5`d;w()+&D1=zPqhtMwxzt;t9b)~8+hG1kP{^-1fo!nuk_dNyELeKKek929 z#!9PV0v?p5oU|#2A~zKmP|DZ$pztbbY6?DXh5H<;hj}-m-WyU0x@=0O0;3Ci%}Ws% zxUclOUsC_pPW5F(puFbos_|2M%?D6phhOQn8>dQEm*A@@GyW{hiMLWhk7eF@Fb3KI zyPhc<$7^oNn8%h|nb3mj?D1`H6_Gd(?SVznMW@MRiBem#z4RS9?V&xfNpF6}kc6D_fvHX8USNkLpsAdg;GpE3+qm^s06r=Ww$#=2k=uUIx z%e#B*#{GRuV87T9h^x-`X9CN#ROY+bLG5j9SLXm^U>YZPU{jsQc>o7@^_^Ip0~(z@p`FQ;PE|s;E8~6OExUU{MnRkll%s zfE5&HSMisyWiB^vg3xJh>;e<)a>M92?QCQop529Sj1<=!p|FB&%G>yjkJ)f+UrIY5 z@J@MuO<9d(9o5VevSQnuT2Gr7J8fpiwRu2%n>oGt$5???nRIlIQi*s70Nz&vQB``( zN4HopoR?$ZAggnfabN`J9|UfaIXVhqqq3g9?~nskQ4vW$!`;)>yGV6rnb&G1@;a3m zgNKp7P4Jm*Y8SN$Vcfp_PM1ETg7SR*m!BVhY*?*DUgoc+}@>wuEqQrVO z63>LvP3CSX9K9uxzg;SK3lIJj=WelMP`$7R)7<{7j~;9jS72*t)0*fSAYs$TO|9Qa zNWiq8kEpR}UE8V~n@qK>dKOH_Huky(cxqFBmOwDI%26w3t7dC>Ocp5G zJkW>1E3yE*_$S@cKADjA$@A`$C?NL>plJe}8p~ldRC78|e0Sr}xlG;L;OSSmzS~^+ zI;88ix$+a(=C{q2$06Oe&6Ve1m28_U-}YxJ{|AnAV|unaZadL&MiF7&b+D@t+y+6p zZ7xQY4W!|L!Yoe?S{N0v3g|yHU!EvvBWiPf7ETI70IKIRYLqv-F6J-7dk8m>iM7$^ ziN}gqe_lBH^F)3d0p0pamc=gZ;@pJ`J{oXE_OD+jE0=mLljV6CAc8u&z$+f0o+T^v z(`(B;d{(>r(qsk7U0a^hHw&kHYf+r}Cok`~#Ji6kJEkvwb5lFMu*+r;#4MQe;F|rs z?GN4w`;zm1-H^BRH@wld0;{+u==I!d%ULt8I}Sy%b0%Ef@u}zkcG}f$IMJURi8(+SKp-8 zQ+2~@_4hgqc$gXm!c>cZT@(Ul#US8Px+K_$ZH?e@OW4_Sg`=Od2>6%?z{hjvGfq;Ma5h z_55SY!BCB%?HmlD3YJm3m4P0#SwIvVVNlQvmdK*u&oH$$*OpzTdT^8)&(wFh5*+;Y z>zk5#{Qr@uuWm6+_4a{e`u^b(B1z`Kn-6DG%f{{61!(U2ZrS2pGWTax=FZ;gcTor7 zE0;~A6+3exNy&nB0HE*ebuY%IQP*F59*ZAV9-C@jt6usOl`r};=JiR0*Y%Sv%t{t) zE*>Dq@D|iT_?2dC28KttgjaqEqw-r8j$W4l`{AQ(t+}o^cO5cA;O@RBp&ESf9r!q3 zKRz9bd*1{a#)Dn>dmNhf9rXhtlEnjr-_o<12$j))@s zSwaU4jj;EU{fyL(L+iX$xy%zd4U0IU&?JP(AE3e9MGI&jGwHx#o zbb7fTvQl@Rn_Sv2SqBoX#QIoh zr~xYw?0s7YdJvXK8iaC=GwI0z9!J_gIiN!N?5IY%EB;g^@^@52rCe4d3anfnt48YK z#li4!xD7xUJ;03hMOj#Dgh08F)7s^+mn|gXpfB1@sX*ta$>s%hEY!W|!kqvI>K~=z zW*|!T#Rxjo^RqKr0clZ!5CkHZYKi3~2?M<*nkR%-u})|%RwajZ?TLu|Lxi|W!A84} zV7W#kCypcDOOSGF6vREiU@<80pGp@=CRr{yyA@vP>u09MZ=43^@<-w`3^(jdQ&U`OKCL!#|oTt3s8??>}B59)aFy{L|3IEC@9%C`zq~;E4 zxH+SoXeN#OAUOmvMKo&_Pc(|Au#>!>c9NAEI0vpo0|NskR8WS9n997>Mf~_#g`;O# zYTiZZ;z0}1M!Di)7PiMhkfqFSyv>TgVbg{HW&NX4zRWMb1c`c6p1Dpb|E?)tb0b9L z6?I!;?OH67Mv6aXiysg523|Zj)*f)-X>r%JN7;S5IMnniTZ5qyX-Q=D?OjQUiVxJ8%&qN|v zsnXnyF|#{Z?d^k^rV`qS^_g0oO}Z2-op6^JkQmbfqBn-MO$tf)ptgZ*MG5j9+RG45 zT{#v5TxUI`{+Nr}7~=k34W=<1s}-vyB-SpL(O@1AlI(bxKv?tO-0c@fAXwy66cqk7 za8T+Oer3ra(>8VVBw%Bikb^0yz(-iB2?OOFY5Vs(MON~4Det}&;*DX+jg;sDpRxcZ zOlsOSGj$m0|reR8+8J<{Z<()-8f43WVQ zcnOifYJ6-<4PIuT1aOf|3n{PZq+SDhL*3S`s(x$MKRUN=^apaSdmg{BC-g;f(dS5n z!;`JVpX8J({2Ss-s%RZy=#2Sf_t<{gtW7_I0#QN`1A+MyU z7LVsA(2+8o{6!@E4ukJETytx&bdom}UlPOTmyyCNVh0>%aiVw%3CO$hL-mn?4SC}? zWJ@#6($*@@X_b5{@HE5AJE)b^$}W_?GSuT^i#UL+$#>Gc$?)?^N=S5Zkzx$96$fG2 zEiYN*k&MDc{G)qM&?=vP7qZFPQJ1YJCWaE*>2A zKB-BQ+uHL9D{Q8RP>5GwJ|`{@0rB$lSNtBEz?~Vp#^$fwj8TI}c;2As-9ZtrS+*LB4gS=!#ZPwCe85Iqc-(9!-YgH1Yb$kn#W+rzNvxL`%djBUN#$jP`cl-%NllEF8ViLil<hxWTkS~DbO|*yDc|2 z0lRs!cSUjoW*b2VbnareB2jRiCOL@R$=E<0gScR!YgmXHnMZLA1Y)p#0?Vbt_CE1F z0Zh6j7XmyFS`W4l0VzN|rtYh6;5k^_<3=TJK*!_P3j9(TA}Vm)n9CdJ1p!i8X^;&2 z=W4pKJ&w~2cY@PRQ1eVPr`t+`Ki!|BT_PlnD0St4s3Xz|fU6^ZMM_xnF)B)*@?m`~ zxqN(8T4J}cqO`Ai0^U3Vpz=aV2yRTo4aGg3@pDe8X+cAF84;wm8RB|m z0i^{T^Apxqz@nik)rdD$A|8>iu>;wWTes#a-f=1pV?{}R6>H{62=O}GHOkd zP9c4^3>+`oh^K{%`0rM;DaOMbt6NNcU?_?8tMQLd^7@&$G^Ay{IpOVJQ2pv;(bxEs z^``tOtj+*C_BMt5n2MnehnCXGLki~h$u%kJx@{%7Q688SU_F0$YL_r5z zu84Ce3nc=Bnthc_?iItKtjQ-}xbi$47C(d~8ynkXd)FY%2JI=XwY`l%Vhdb?TF4s{ zM72&31->L|b``Ua6DRP;)v9piG^f?h#-%`AKk^E1wj1y$jdj=&t5>`^HU+7j5T&>f2k zwvPrI*(auv6O$G5scZxr&He(pxeY1}Ajc2VL28!oxI z62D->;Yjco?D}%`3!5;@<(Jw<7gkH(`|B*lne9K_C&3q|a|1e+b8%)%-WjyTzDWeP zKKp05&^x>nEOcI^(8v0Rrv$+=Ju`*Z7Jsf)UyOb?NzNKI+=o*EWA9CgoXz@$p zJV%>tSewIk(F9DcPkA4+S<9vRtDgAJ(c(9dBV!^FAXTwtpcS$cKL zY>C0VP;8)F0mGjId193sXIh;5cRahn%-y?m|*~J8XR8%58WgT+Zw=F-->dmrNStlo##Gz#lZEx6;4kI zGdXC)w=-R7zGSI@4DLFq{wB2OqvUVnT>3B-T+hV!2C-ITJq0CpJ`LgvX`retZSKN& zC)L7Eu+X6vVsfW36{}!)>#tbPG@d>-W;o($VNffPZL=6PT!g>H$;jUg|7PT8oAz zvDHcV`1&!nbyn6*C?B(vy^~~ki6Y`9+mmbVIT&LigR3eBFwU0HRJ$-w;=HKHrUZ!b zsdhpV`o(cPfn#dy8XmV61Iv`9V?hlBMyt#35;?#c?cJL1F9{ zRWI?fVkj#8a@9x5JK#mKw|4w!MkZrNw8k1E7-Uj|$RxZE9^b0lJfL{M#^M5~d-=w4 zC0j2pWBtGiaTs-Gz)6I7iZS>jTV+LW>?WZf*2Mi$pMp8JxCwwy7-eFv4avVQ4mcVm9HiODYx9h(`VOk!HGOAtTkZT+(+H>#O{P2}nV{^G_E(#nzRKVeLOaGO5B{I|Vi68|W{vvRW!I7A}k|~0N;v&_!VCR;R@F0sW zMl*$r_%97D{%oz;4a_K2Yn$u@=n*`GfpGtO76<{_(-3V~jpdMZr1JcJEU7C-q#<=L zwf%12L~SB*Tp}kCLRFaLFR=l@%Dtc%&XMj~NRrAPlDh*d4!d785$j zMT4Psl5@|+zfo53St1{c(0n|0thP&QFjuLfzw#Mpt)yree$Kxe&8fNMFV5vnSOF?7 zV%22PQ2eV)^3h3mgSw&{aI-?1Wa$c%vDvRPj z#XThV5=;1xcRZA6vS=Kh7B1qyHktLZG*`stuzX&IKdGWw_*YhBN~Z8aH zF!e>`0@bzrfbN`{dG3m4b#iT38YpbU{~LRqtgJqEWQ9 zC8r#mD4rq>&6S3VRRhj;q3yWK+){wO?tB+*bu(rUK#9@}q@FjDMbq#rRWu6!Vt9%5 zEzv&GZSuUNs2sQLfz1yd5ArLKk3;b?&=9p3{R52~x>DYEPna!-&DnU#6t=48V;5&W zo`P$REmTD#fCXe2D^Uvh#yju_SNv00-HOk1h?PJq`#Y^%fL7dDO=J~ymkKf%B=oU$ zrf51^1BLRZusjpzcuvlmSMJ}zd;Swy^APfxBF*JSt$(AL+^Fzx@GuY>F?ZBeWwCIU zyJ=(+@cw!E0CAF7k%`Z!JaORp=be=++p}K#0ifRr)3A7FV$A@x+mM6!y$g$HQm zW2EXuwNX?)7Dh+oFy;OFm{|_z-J!zjDKgkJrl_TTGX~UGHvK^p!_lGMY(q%9_iYM# zKqjLOBU0wxT7?|cu1Xes%w-a!0LxEi@>7?@U)UMKo9A^?-kHQRT^u^XxwSQ!c#&Fd zhS&aSiVtKsw-%F_m$ev&jq$(AtT%zm@_iGX<+RyH${ELWW0#cjTt1k{_5ur!(70Hp zLE_GDem>H~jc8tIF(ef=vX0O~!^dS4zJJM#>qd+#M;gKC2i4{kZU9_&#@Ig0%fSCa zWBY)ES|N-=ppo+SBfCrTGK%p6JLR+eANI}#Fska>`zH_}Dmu}k(iU~p0Es|IkXH~e zfh0_Hc*sjDXb8!KL_;P{W=v2NG(;K4v8mN+Td(!rYpcDr)q9K9Dr$HLsAy5~QA>*t zsuN>{s#UHA^ZnM|=ggd$%!JU}*S%k(oBx@!Uu&i)cZdgQNI$bq z(+2+U>*9+x0>gS{-Knys_v<{d<)<}NpVsn(fU)Ykij2e!i2FquGroPoH*L%orc-nJ7r5a#uQ2qmx}j4QJz6OI;43HL6R(jf$o1TR%agW_b;>5`L%#WD zzHgiPp3_!{NJPrIvdui@W*#{NgXQ9c@RWJd%{-&bJe88iqOWZ&?MRvRrFV7H4jj}? z%Qma)jl>_Dc{ZAPWXBH@m-UwC8Z*y&Gf%tDb5C!17MOV!nt5*4c|>8QY)_7v$Eou8 zmkM3Iv2K8w{s1Nin`EO&e@}1e-~PSkm)N7&yh_rvz&xqfBYZzxY;-X?Z>Di{gGz(+ zyv$M*C99-TwWQT435Y5C52R7dJCVd-o90cc@Ka((#mCBC4AYBnPGXNGR=t11%)CpY zS!4*4hLTRWCFy>T!zw(4Ghr%<^#*Hlb1*zhl&$)mW(8*K1M#t2RjiUnDIF?)S;(y? zD%j->8(vlz3T9vXratA9IxGvPdbw!kQ zYU57He7j2iuu2}%$!x2?qhz;KyC9?s-w&uG`{H4eHn|3*i&7xsf^SJWSDF?arYwb9?*9W>buLhTf<#L_!SCgsIfDzfxFGz&D3{?s=G=byMpW2 zt48U86bfhSxHcs@>+jTYc{<-@9e0n8yI;o@>NuBf{w?IKpLv={MRNOuLVlVmNb6dqWR)aR&vblhB| z>Tj))^dX&1J2G{hKwC>YbEy~k`3uNxNdzZx)JfDKEyQW;Xnm0@O4?!g-PFC~ zv9*-&2Qyop>{RFajQ%{=} z?6q9nKk#&QKLzPVWP+bqJ{iiIH0cXz#37%kKtt?z6%LjogR)=3HD|Tb0;h2uiJo?U zo((G|Qd4t1Cmf1q3(VHnnNoxtcBF_*5{vs;E;*WL?c3dIWz=7y(f-&A*u){Zxp4I9 zT#5$Se4+$=Jv`;3FIx^)T90D7T)dwz$&-r5bJE64&4!aJ{=Iy~h(oM0%eNVS25PN+ zFU^VMseKGr{3)a#F&eDR&;jf)b~flbhY0gfJ3{!-Y2D}Y9J0A7HrKov#3qnU3gx{2 z(sgYS@3ad{D?Xx(os9^cGH` z2rm!W2`|)!JzP+)H;56g(i?N|GRwiTz{k-in)C#D>jTp3tYi3T){tyTR+l$r*z5BMN_+i zdn3CT{w_={`MF(zP~-}>$Q{Qct#1PBTjXDUyal+nw?qK>Ti>E?v~115=BmzhoVHHg zZi}n$oALE6`zzIVNKQ9P`_}L-Bv<%h-M;;i166U&eM^3t%?*89wVKBWAv@aQZ@OvK;bxyf-_p%I#uiK{YrVP@xB zk!5m7GBlvz=XeEMwq>|lwiDW!!e<@q_+(XAx1?9YHN1w1Z5ffhoZaK)<4bGG&Gea3L!p|AjjK z^(5sVd3!IDL6V}qcRvtq@7c^D(%#EC4w2a2&+b+2eLYosUeoc;RPn#i@vbECuFynR zXmO-t>k#x2@&a3r*UcX43J%)R*71|3IC@0BU14|DphO{$oKfAQo$OyA;E_jvBC@zn zy2t2P5?`L`&snOkMK0Ttj-SvVSGYOCFuIZ!h(!`NjLucHnElTuVbe))X|FDI2S+J8Uk+VxFg&9>Sah)S_cnQyZ?cpH4=q|y7UX@^A3u>s zpVIz}&f_dSwk6Hg@-#28-5beUDIX9D@B)!*hDN^4mKruN;i942^*6oT!FOGpy;W=M zy{_Q<5$~d|?%Az>@TZprFQHVi1?1BWTf{r?GCLBHyDt;QZ;6c9D7ZH={aL$k3LiR- z!2@8Ux8sX%#{CvrBd;MDICbZJE-=ruZmF(g&O&=)H6FsVGVJ~DO88y0a1oqX;0jNe z&pOr>JS4*43cdq%i6Rl1hdJ?BrZ7xCo3uaDhATtxP9Z1wzG4UGQPsM7Z)C<|Vv!V) zi*)6i;)QA=6!2tAq)HYST2p>i5>iY)MOK#x24UPPSY6v+2R zB8SwaMOwEkxncW#^iyimp1Chd+O?7p(!R3;zMAtK{J&31h2QjR$K_nx`b&+k`&f5V z&ytQgSM}%*>Aw}W{Kv%Sx3&MJ!?r(|(0gvM(>u@iSCbY zNJ5JFSJsoZ{up;ydh!zJX}CxC$7Dr;>W`J`(jv{PysvvB@3C*3nzVu#X-`T*NE--g zw*KIAO40tn&(Nbk?hvo4^v9FUiTx4d_k5ffz03bS_3`{}W6delVAX?GqrUfr$?r|Q zHLB`|l2=&@-;jh9lfNyF-93SwEU|}i?nW07|8#7?Y)uqjLf#0 zv?nDYqzsfFm1f0w+9-#?awG-}hB>bM# zD{0?|k#?L(n!F#$#_vf|s>$y!C^M1Yufb-;@81Pd%eU`^!|{6=vyok&K4gxzM^!Vd zMm=^=H|k=x15@*RothgIzva0L;r9hw;`sgc^M|8n7ZZY=9w+n&zaxqQ#qU$%NIT6W z?J2PlQcgV`N=+)#z9R`C?GYwt8^5CIeAnY9j77%FcfZ57Z!H~f zZ(rf}H8mHhW?1|_PxjIfq8Vaqr)<=aSfjqGjPm`Fx6ms#{q1Ats&{>IfIhR+^AHbH z3cs5b1&ZHKs7s5q2YCaAkapisdLwNW(||eju9bxFyA;xF{9YlYn*9Dc*D;oAdv%r^ z*%f|&=}Y9dvWG_6@;?quw)`=D}_D780{SMo{U5p-k`wGQ91~nI{W?1|_ zj|X2x$X>z_?nVA%jry@9q?osG1={%Sd**QbE=-{3p*u|ZR}^UZSCs{Y#YBnlX6Uc|AgyAem5ya48L!|!iq(Tei#4Ap{JaB)tKF5i8M`# z;g-lEHK*_2o}Q%^n8D(#jL3D%g_yTx(+P1t`&+`^6F>O@KAzBoY5xjv_t+mx8pYNV zlt$kl`I^ukbts$uo68GPe5}k%S%b{V0L5#T87dtboHaC5IxIMASg6z)oaH1Y+qJzk z8+s{!7T)rw=oYrpy8T031^aoYPtXr(pNcigGM|!eQ1Ri#g|32K{vucKJL-3^MIdxT zJO6H1|7LI%Y?0q^5|_&3%FBG+Hi0FVbc|SHT$mxa6ugl3ErwHY4L>BmGee7Ukd$VIW(^HqI5f0)Sg>?xXx6ac zg~LLNox##!p;^x0g)nh(cCZwHf58j0UEvE0%L;ap4|XbcIa+TQ8Vh!Cs)dXE_E>+p zLxcOdL&g0vK6ZGE2NpVxFCJ9r4h~Lp2aD6Rw{zNBvIJgpg&$NL4BstK7Tm{3a0Ne% z9CNuygj#RE$wEjiviC+_QJ3#Uradklhjc|3*^bsb$WNbSs)aU$a;UsuhdSg)AJ}z>2)B6=Uj!(x;g6nab zWD0c54qh*8FAIK4pz~kp9PKZ21)nVowwDF}SQh-e^Wn%!RD6s%)ML#T%JGodquW|P z#c9y`lA|?5v993D?wogAEngtD!?K!++4`ZOU*N0R6Bj#@^Y(ia)@N4qD4r@8>T}esCc}{=)6zlmakR*p!03K zFK29G3u_FvP2^7IiD-Mn2-fylN?5ne%A}}4HunquL5UX!-9V96(FJLnJ3hk)VEO+f zKJ6otbiBpg^TxFDh6 zkA4sSd@A_dKh+gvyq%vxKe|HAq?Ge0=L=>cn!&HQf>*<`nYuQ5OnNA~Kcw<5fKm9_ zLJP4m)bJ2p}E3zXhvq{UBdOB#j*d?v{2r~Wbcz>Se&na{~`6s+xk?<_x@V?3NFa(yjADt0DZ@!Hww*`{y1o3 zxAb4a@6cR?3h>e{AjFD0ZCy=K=%d zp3!msFQhT-l@45wq~Q;To%Z-qHzmY>nFiC*dpf`4Nxqo=QE*$wS4vg7^(WidA^Iri zBUPkBs-ztqzj-9Syu|p&4-^0W!^B^m82_1YbdBpeC8TSP4Hu?yEn@1Jz5U}YMT^Db zj@5#4=%L*UKJ$-rheo<_<0FG*?8=AgGai#iPG=Xq>rZ!u&`1UE`_6Z@{vj~J6}&v7 zXj}T|M3vrbsq~^tiY}!e52clb0!CmC94}g*p5+dm*|{|~~;TxyF!C$68GiHs)w+Y&UUok55rVz2f13G@sF9!xt#1W z&c2j|r;c^3`!8bML8RyfcQ8FG^7sa6L3)m6}XC8cyeD)Jh9 zA3YLl*=R+Mw3CK)Oogqc9(W{e{h*FZh0O)BBVy?TVerd(nL`%FX6Rgb{thY6dBZoPMe+?Vu~|Gc_`XGs#AtzClR12H7cLICCmYtb;+Ky5ziu8lT2`W@VkCb5FEB zrOyvHpC2Y(9tx8A9F-TF&&~OF*Z*+-RrHBHkysvrW0|{*qQlSUtQ5#142@0woZFDe zTuo`FJu%fS>Lq4R&>deyZhQ#ts(pF!*Ce$EUa;(e*ZxNKz{d|tkN30(66gCKzhOcD zL5PGo|A>sm{l;qRf0CFxK~ypGLE`UdoVHKs7&EQ;JHg&KZ25`#6ZZ4SZ<`U?T+#WB zCW!r^?GfJD#fk@d9~JFt%V6ndmzqaq1z7MxBZA--&GQ1v*@ae9YRX;kdrKvMo}QFksp$-V?MW7+%e~ztk@*Y6>Dx*hA$&(KVv84e;Rp3*7qonEiR0r zgKU^w)G?o>zsSR&sQC}0neW$R7}^?K3o}qagZOl+C`U`a5`Qa`qLKbm5m+<@4{I9_ zTjmz5yx3f(+Q}q?31UxwR4o0aNzy0EU*d030gUs91eCRSH?E)#2!npl-6Dq{`)guK+a!yWdh8ZXQ&E7*PIP^&Vj`9qPcUn}o) z|Kjzdv$0y;!Ak1kK_2NobALX@Qtfa4g>qA?^;9Z-_BUVOoT*yTL!T{B0~hOeB14Zc z_J#ixG20!koIkn>UUWH1UyMA1(_Cq1EiWg_dWGNH(#IJ~kY9Ri+T9ol9054_=i9M%Pv|+@U zS_?Tw@p|4dd`{q;6pjSE=pI-0op;-CQC$*=6ONN|D}o+ zu^ai261x`#M6b3-emX6_0_ATN`4c)+`AV=xohGCEcaeH2Do$kF%wz804r|F)y35Vj zII4ta&O3^t^*;EZs<+;(*ByL@s>eyyZ6c(jCaLttlU|({HS6Cbk2~rSpld$>qvC6? zZj<#uYz{WpJ}j}*B4kDXz{#9lWd-ey8^6^LB0_`QD7V?c?PWRp;h=im&YW--6Y35T z$g4Bm1@Aeom-jc61z&N82j`XL{K;MLbdlrsZDl!sCzlAoIUWg$2KfXo3Qzi6h(R)d^TXq1b(Z7Gd0#DWMxgE;>nbCaOgk z%R!lnf3iyRE(7*={(wPQ|5KFBqCA-bXqPMt#DOb9yTk36wF#G@D|B4baiU+5W|YEF zq-FA$OFptMcmSX$^?3emLBe>J7w1IP$*;!7$4Y1Tm@k>zlIn{A9TQZoqN7@VBF&mGLz+dK zw5}K^%~JeHtk0dYzPEFwzA|)^nA&J1Qyo6beLV{g)m6PFpxJl# z^-0GQW0WlIRuxYhBoKT9<$U`jQy#HDnGLAzHEP3^68X)A(!sOg=yOdcyMu30BJO-C zPCELfj>~n4*Xt4={;^f!OjRPq9}t$hMh!yqyLnhFXPX$Yj#;mwg2hhS){&!2DAXl1 zsuDKGWa0CP_CAU$uPkRr@*-ayt(y4}Vy1i~oFq5(I?NO@D|1IT(r;Sc5_U99%&C1^AK&r?S zk4+#_FPrkj&QE0jBHh>Gj2nfkCin(xR9DWIu7Z!H$6N)!vI*9gOOOL(KuuH~RZ=_O zX$wzi)&%_IK4t4prP|o+$Nb+XexIb1f;_&4Ewv)&1%YN;}t%BQEOiag7qoe{<7-j>rnhGwo&J3+#`q&z8Dv{IJcBcg-hl zY|%7|Z94Sc5Zzi?J}xt1a$1r$f)l9%W1TX`kAygVtdW2CLTWbGNM6Zf~; z^C%gt`je%PRO|Me%!;hHrDv6hn}Q7^6i30Yo2JMDwcusA{)YIDhq(Grk{x9~_AgD>2M`oV0GW>AJ~3 znLcLT$@G`wMilR?d4j_ooI^4Z30ZQ{p~1x&2wcqHN-vnL8&uzCRYNE4Y-^M!_amcfzAN|~ z6H(`Aa~wyH13cTJ9w9C*3$Ni62m216BS1|jN}kMO4s1LtreiDHJlg}$S`@a(4rz2} zB(6Vk4v#8k%lbE#zL=x*#dwS`Nr%0Im7`}vtgFu=opZ3)ZW{0DGYtN^K^?}a-J;mFYI}+qCAPga~HR9z(Db59W!uJ{yaB334a!o+Tt(T-;^6R%SO1;c9?s+ zokOHN<@4V*{?zu@H5MY$9Id;{B+}y7w!V{xCZfx!OY0{HLJ) z=<)X=&x!R&F~3XfT&+LV-9Gg1zTy=9+j)MH_B}#sd;8cfwc5w%H(3g+7D!h~KNZkO z)m1O+-U?)Po|(G8J`;0~x19H(zbyG&$T@)0rid0i+nC4f zQ8;CMrvK>9GpgH)C#Prfyj{!YG<>RoKbq-m`w78ac9CgIK7Q$+61=v_IyhU?5!a`m zi3LcW%*RXvOYzfSCAZ6@`he~a%(!JlK!OVYn5kvhg7i#f>Y88UuD4r>S>+Xk8J z9m&TEgS!~SY5oso>@u!3L${zGm14vQ;H-hn@&&&|b-kjU=JGJq5Q~siaI~bJ5|3nLw@@`Wrk$jWtkyW%H~t&Rju|G{LXQs98VO2$_gS)HA0*w z%$@VJyWn|f!!+-RaU_73sShj={<{08iR~2p%*}55eXEt$kCGUk;QhYPH*KPj8SG$| z#ZuaYS*EE!O;dlKBvMAykBBnm(DEdFN7dE>WE!2HT0ik9>(|#lQ{ART#>Eq;!Dl&KdzH$YF1-RLCt+a zl6OB30q_fntn~&7P;aE zy4be7VPlwO+9);8sEs{iiFlY6W@VIx#&d3SZ)E?)A{4PH6^VNRx+6D!o%?Jm2cOm_ zeVRm{bLR7@TM#qb^a?*jU%JC9qWg_FwA7+8I5N7eE2lceV-{{mi#)GRjW%YmVequx z{$O8XZ{*eQNQH~gF#$NVH?mj=6U)XTj&l{7oGuR5MAY(qe~_!me0kWY#xB+d~fS-0=yb0<^UJ&{D|&v} zKUTUAm6Njj^ z2&6AlwEsnMH}q`39xXCXf?v29Cg#azbaj+-caZZU4wW((WKb|5O2lic2SeOY_^t4m zRtyh`b`kv~eG!$fIZ+a#`uZP@{~>8v(jin7mKc-&`)-B*``ONpTwI%k|1gQ&UorIu zV&^qs@UfapW2HJ?jkISYTgj?A1ErizU(%(&rbwoV%bY38E5-Fkd7GQgkWEO+G96A* zI-M0BGB8qSD|gqoRk_zuE{jZxcElB}`$x-*rne8}54M#b(&e8XE&svO?4-x_KWE~# zig~WVY`E52ytx2@Wy@##W^>ic7ezmXd#pKS7l`R3^60t3c$rveq2gq4?`1~hI$aWP zsff)1;vZG=7Oihde0^5DCT6-VQtLa0Fy0$jJk26ZI`UfGk)Ly1D(5xnL|oOF5&s=! z{B#)<2fPduM#aTAm%%Lq69`AKiB-} z@jE=Ah}_Hprv45OcxI$Ldo-xyMVzgiPaN<|elr0vQxA$fAr4r1^0GKf+v~hpB(0A0 z>a{h4I#v#g?=k)Unv>hiT*r|s#$2%zJ08E;eoOSHixXzJTy>RW?bB@FbXWM;@56wi zM~_2)=D);#;P<~GWve+}>?jfhRf1!!1XtM-Fw2U0rcRGtf2J55??s-)tgzAa$bjp- zXBg|+`K(TOz>_qMQbq5Cr`2*_Iw*1w24kRcVt?8q?nPcNfrRjh7o16ac*0G>j_|af z%MR?9`4TgrNpXnNVfs9P*c9rWBeE%Z-3RNTvY*qi9~7(-~1939ScZ7E9RI%>x;9T zI%{3l&~542hTS5QGX!!};D!T|FFfH`D0_?&5vRS8mw7qRZFWn5%z<83V*1#<`Y%Pj^NPn#5G2&*E~5noqmH5po-rilF* zV@pogx0nUDaBDBOT;{F3GNxb-WN>SOx;5a_nlp@|?IoE|XUq-;;6{69nLI#5vzl#> zWIZQ@UAI+w-m>yJA+6S>`gsUx+XaeYn6s*zq;XDV>wf9N5uJXckVPj+(U~19WlLY= z6enz9rX!~~YgtDo3XtX1(koBDy*)jS{_ z-1%*Ucn<5s$TzTu!~};gwieGs_sh&2!gmz;(Qy`B~ItCXe!LOJoSiJ2yso zTP902VQ&{I-W~iy=dRA@q7}G;TQpbq5-FS*Y4cB+{#)Dl5x1^8wEedG{v+Y%_~0b`D)JZm&%_Z&^xdNf58855qOGoqtu z$Q&qOG(kaeR<<>oTs=oqo;jKdla8i&YBbG~3gGio_)29oEwe@w<0Uql)N5ePL8L}g zL(kDvV;@Zih6;_VDK0vij+Hr!Th7>M`ur4qt1vd2-s2X{8Z1MoC|HuoBy*QXPcnKm z>E5*ssCIge0VSdkH=v~ZE(;~-$Huf-RC=GGwh$#M zK1?;?8NBP?Xp56tPfF$*#*Km@6ib5{UY~KCr5O?nAdT(%oow$c^z9t zo{NK@i(4wVmsz5o8w3Me2Xe3gme zg{Yt+idy%-!~NO84?1_nXqEYq(>}j1TDX8CMp#a}=!|vkuHeJ!`!&Ja<(oIIVE7`y z9aJCDa|grnSs4GBjvVz_8%NIUwT_%cZH}Bv?s4RJ9&+TYYCRO4iC7(4;iA$2b?PZhQm&&r{&E9?7MAml^-J1uAHr|a7pI&)+}!1$qgn545`*^ zNgZx2RGHp6057?j$FIn_yi@#)!49V{5~au2WJz?WRDt^=595PWPC!~EXQs0L3-@_<)$dOah9(fYELF{I1D?W3D5oxIC>ZUy}mOcLbPW`7_;tHSg zJywTEP@!l>hi#NdOUhdI9hi->96CGrVjEkeWi1_PWuLv{+Vp;ZcR2ml*=gdDpI_y7to?kraG1hhQeU!7idrESqQ2X5Hb-(aOG zUYaHDoQT@d<~1gesz(zpsZ|C_n)C4aWmLnUe3I*&2OdBFicTTbnW*p{^Q z3pnCSxM4!+6;g6`KN-D5FXX!&dqsW_mTB+^CFOd4JLI_TI9}TcVsUQTZ&h|CatNlJ~Vf<&>UEB509kL_UPW z??k@znD&TKY`%|CiJv+v6S+XBu;fUYF+Sk3<#Zg`Zt6R`)`^bsH0QfwVWxak|(!dc^wl_*gZ`ex*76x#Xg=y;Rq;!E{WiH@}*U_8uk{yg3- z{ICNxx7iJnb^;$C0s_hoKR2j5*i3s5E==7LV$23 zp^4B;SVg#sa5Z5y;TpoVgp4Qp8%Gf$^D_86S%&dGp_A|d;SYol3Hu2L2!AA4e~`k< z2QufS5&9AO6VeF-2m=X&2pNP^38xX9gwqLC1P>uClYE5!gml6H!a%|xLI&X|!qJ3d z2!@mNgnoqngml6Hf|194LO()(LONjpVIaXMB#zLJ(4UY_7(f_E7(~b*97Q;qa13EE z;aI|PgiONmgcAr~Aq*j$NH~dbGGQp;tAwu+zD^iMIE8R3;WUDia5~`(!kL6D!f?U} z!bn0kA%}1lVH9CBVGJRcFqV)<$R~^=oJ|-{m_V3Fm_(RNC?HHBoI{vOm`0dRFy_&2 zLO;T}{4SJx%eY7AN9a#TCk!AABn%>C5RM`oO*n=yn2@%X^o0I|bix3_K*AtG2H_~e z(S&0Ng9*nGjwLt$@zif}aH7{Xw} zv4rCY4ubr5JmCbwR|rE0Y4^YjLVrR!VE{qPzhMY}SsAn7P{GK}9b4(GuO6#(K>5mw z`id1EpZTlOTjj|W9b)L%+WKm5d0lOz-zf3GwK)o(izW<7}*{ z@NuiIcDb*@x5`;f0%t>ozvgsZLVW&(y<~9KHd3(HS?8@^aeB&nS9ttVcb!z>^j1Su zvU;2_$m^Tqo#Gr`l|8&M$2m`wug@t=)g)G{mL#@MWt*)#!&y_&=&bkpowfDo;3_n8 zMg2&ryus`9Yu)W!X?8+lIX&uE1*6-mJWwnxm4bbq6?Ccx_9Zm8!nZ=D3Dngk?}r}c zd!)VPQa(*?9PN}6VikFsrJgEU;#^tLUT* zCAW3n6_WKZd}#8~_c4h{Oh1Djte4@SCU={B9j@H@GJF!H(OFTi$5eC>33IfBABIjD zSXOs1Ds=xu^+sF|N&i`+Cb2y|p9d-RXdO}^ohl+68zLr=@$FW_ z*v7ZUKxHLE@bLYZm{AY^R16=Dgqg;h#Z?(rB5CpTMEhTpbz?CZiA;jz93Vf#?zf3 z=CSTNT>BO<%+VC}E1XR}=7X4Q#g|{SaOQlak4vHxqRuedd8MZktJ!(EXO-$@m>XcS zs=Z3H4DIW=B2ZD6K*!2PrFix5QQB0LskfoL9toY&lP^^3_4R4KVCrp<{<3C__;$>u zUWoO1p=*V&qM^nq6-wuLRyO!ojdluQ7V*Oaqs7Z*{Uyl+q4DKwCh4MS2BS<0Mba>qbeW@|XLkOwmP%yv65}L{VLmB%jvuwru)_J(mHUJ?b;L zY0GHdvE@|68G6i4nNmKx9EDnk#Zg{ckKN`sjDm^zmH89$tHyaIPpTd_aq`59f(a8U z3YL4ujjuX8e`0n1*%QYtuO650JUe$%?%9U4!_W;i6q$yu(^&5HVhPk6Rke+k6}~D= zFFI|x$7d!MePLZTdgvLo^wvq7-YY%6>N;f9)7TIj{SNJ`Rxk>}jol_T$8lF4;qUlECs(OD#v#G0%rW$Q=`x=EJ!)c6h8s|9c zDjJoYTr~wE1NENf2BZcHim2GN(EgX8TST|Kv7%W~UR_bw=!y2E&qIr@#7UvVHcrAc z=l&A)*Lj*vwjnVIb(n4x=Zv|t&vC8{z!eP2yylWp{+TgD{wa0VRy$YJUg@bf{Jwz4 zuyWKX&r6*lra)sO@lLZ5qK9$XXbZl&oNlVTJlacg_i~S?A>nqc5ItzjGNuOVWpz=D zr{^k9)pSJwODS>r)ESDC(O)9b(I4gjvQnDzY5g3z1i`$Ta0%fW!W6>jk;YUdU|Mjd z8z>1`Kq=8P$THeV9ws{<3eYMkU|Q3D@8z&Z{+3rX)>hIuQIHJO1*|#h`4y=2E8U^A zHG{df%21X!%5&N@tGIGMhWnDdOYBi2m!6@lsrrKP1w z%Zg7TY&gR5G+|QJWV5OnrKPHzyym<-=hSJ+?rrea!Y^3_i0Pq!5tY}Rp9f_+ePTOU z8!fM`s=^wpWFgM7$wU8D%fHJJ%SQA-O;UR4HownXXUnx{R33lM>^Y0vIZ-Zn(2@-> zlqJs6fW)i9lC(cXdHK%l1#?}g%gaB)^2Sl#!i!RuH|_|_JKLFE(p!0FA7OdpDR1t= z)bx!%!ty3i-kb|kmp9=E%bV!TE}D6M>hdNYVR@6B*)vLdk(WtFSl(pHTacQ($)cW? zE~n7Magaj(3Y^)lUb>?|m4|uJ*Yb*-+3wQR^cAV{L{s*Wz8TK!#l6rsqp#)6bY_?K zLf1@1S9Ra%Dt2bioS(X##i~46a`v&EC87sXmsir)a!M(uEOj}heJm#*Jy+~5Nm)?- z5$d^o^jz`G{M6+gp`Ob}&lS%cm%6+o)N}dhx#F2;r!MaZ^;|xBu6XA7)a4zap36ti z%_&N)m-3HL&*h`%7JOq)YWj{)&*h`%N*2yYUEUGuxqS58%%WcCQzyq6UVW8k^jyj8 z)Z`WPwH)-@frHPxUKHKaeX7sOg3D`#UUtZ81s|s@Ce&J_e0iX{ znnhn#g})*@$2ltguB@(DF>80iR9W&*S55`(Oen7iG#fsTwQ>_XIXh>}^xDR9wKk(T zjEto5dI3nuy~pY`;HvT@>p)#Xxu?z}jwM*~KV1LHN)b0_<5*cUme;Oqs58AqsTSNT zIZ$9ak)lTnWXZg;T;~`wJ>DraX1aDJseFn*hsmbWSh-AZnYiV&hsRKBTXD=*c|Cd; zth(0kwAP9`Q8W$b9At^A&pFsUZ#24*4mSOA%Hd=!?e+R`v~xLGai6MHx|SFn6J4lV zRU%O%10xMthxK747F~xani_Mpq={ILQ&J-*GVFK9C%wz;W41;dv9b$dHMvpRTU9Hv zEsjUAYAZMx;%hW4*QM!apT}Yk7dh)Zb41Bz7Jsu!aPW941HxjFLz{cSxys|uHD

2T;+9wVD{D;mxIQ(LYZR5uo5vDZB4Dt| zD&@;IJH=(cLXzehW%bBfeMO!29_1Qty3v#gGp}xFtOV(3GX=c%d0Pzmu(p4t^P ze(54BfyJ*VC8`|1^nyKALOI?Byl!es*q(^8V(EM+UD*Qm%T}opMVUNujhP&Z5T8p8 z|FA)0wq_P8%kQagWEjads&zY7oXI?kL#G?_EHN7y?Sa#6)mM_iIdf)-vuIvfkNdtr zy}yc+$}IB9_c&QqDRAiu{fq(po78rkLY)K_|hJhnDs;%6SuuobTMSSspQ ziKdoac&}0tc2c7$ssr_Ef1W~O3_;@Y65_t%5J)uM&XhoXW1v9}Pk5?MH|A>9uEpzg z)K*#459-uq=5n*N7Lk)NZ{1oTu2{BCQHXdyAx0{| z+wT__WP{J^_hN52Rn~d(D>s^NKI7v`PkfShbvP8=_EAG8?TY9+7@dx31pD2Sn)X+0=O$82nA#}M1HCu%Rs@tK` ztKyk@)woyco?R4X3fk6yX@?46hT;dq694>)(Mo4<)>4dfFdNNU zPGfRnb(~eCjm2t$s;gsGCK9Pv0$NtAsxarJ=sl~Lxcf0m(Ps7?QdH0d&MtAngEHB8L>KdK zF~0sXnd+>nC5f1TDoylmiLx#|CR?nu#es&pTF!?tf2taDjS|`0hur06N|{APJQ+I+ zJpP&9`YL4+pqPA(n5FX|O;nFn;N01>Naxc#W6Ibp_Eg8mGQw3v^e$ychgn5v)uxC1zJ(?$PyW=O^r@Vq;#SMtLiIF2E~)6qoajcBvQZZ z`9?)YNf}d5b?pjmqMKAOPDP)Is;bkYd8MVlqZ~FxQr224^br!fBx?eyylRxl0M0eE zxvkThl5H4%Z-bYkar9*!CV-v*Q7QC|HqcD&8Wrl`m1tgKm0cZ6BjeLH**mMT^aE;= zu%)SBB40sPEuhhMV*NWC>MANdHQu@^^pWgRDz2C*G{0nFXuC^qio8JN;dyEHypk{B z>4nMym7}N5_%1UASRVzE0c6Sal@*BP@;c8L(?0aek6dH1c{@5sn>sprKf2;IBZN)H z%(_}x=vg}1tDYD@Ol^`n$LplMB7ImkvPzPHG)J9Ukd>A#PNLNQp-CB=s4O}vYiP6X ze7eDz$Rh=orma(cuRSTNA)KW_`*NoZ1 z=X#G!1#(zgEdk9Dpn9AhzOuGb_PJ%jtQt=b%Zf!eg8DhWfD(}7u&RRO>FT|*v5PN4 zCyMbSz5#O^8uePX48d7|%fMQFM*A;5y%LJKi0Ddea{41KzmgPFwBsUF!S>h~rz@Y{fuDURi<3W z4q#iw3*SXkiQLB)LKVwJ;UsAkvYf~hcrAaiHbxb)MGsr^tJI7vl^!l#gRj=h8Dv)A z`Y)DVt&3Nfk`XOC*U%y)ZWtv}YBgsVG7!v|YBe-RTR&xht3`!j8c)_hRmluJ=U8Xq zm02h?b8t$>aIys(%9<9K6`Yln#vOA#t!pzRLTF%VCWpSwyW$7KAYj0$<*%93=QmZ9 zra(13S-QlsBw<{-<#KG$Oob$Cf}>TZEVoV+dW^)L>2yJ=1H^`u(P$-)t6#cac`oP- zix2kpnl6jF3f9XtRti;Jd^_UOajM1Egt+?5bf#I!!6Y73=%rpWolLRGQfW3wr=_Y- zr&z&)IGQAm9jF%Hl`@-J9TuopgT&{z*y8i9bo%^pLq!}a;xDw4=wWHyrIW>>9Dg$|zeu#ASCoL| z@wB{aV$=9)8^m92@icjQ%ms0GA1-~M0k@vV@&EMDl6-pHK-2fav!?1QVwgHjaTKw;zjs4 zrN*RWC9u;QpAM%*00t-YggmQcZ%Fd|W?oKv`Gv``p1`l97c|<9B$GvggDJ_Dg3Wk6 zpY!+1(6b)c(vlIInyl<`^HuV_2V1eSZZa1ndgs!%=U_{gWJkglE3BBUmQ+#G-1K^} z?5OmuGN0ZpRiZ5m1eqqSSp@T0=47$IqD|5hW8B_GEVG&}ObyLSC8fVx4SEq_YuO|R z=JnvG!dQ9HbfynInnd+*l5|m%)=Ej=s%;X}8nQ1-WFN`eUa_aK_yaR=ETa?rto^(^ z`By6&IY}y=H*TIuyduk7S6JRAnva87{8|e^IlxoV|Z5`CP;ieS7o zdgQ64QUu?+3CbaLe2NRta$3(UOqrt32+OJPsdLr2?}coijdm8vQ)2pZfm&7T>lyk) zzW$+?wXy?(Gtz!*QN6eeJXQ94r3@**zEOlb8r|hJ^PIO}?wqLi$j9ctwk)%Ym<~pa zaOURb<~WzAhdmZLr!lWpO_{2E0H-` zy*{0?p`z9|Rc&JEGWuSrbGrO@4%}pBo94Xwnsb~nCfgb#DNKG_clCLQbDS~mTQNPW zKgVh77m~>#igTPjx`!BfUYF&x--@;OlogFPNcEmQU9|T|ROJm|aznDc8@aFe8)e-& z&iH;ML9Amcs_NJkGaZJ$ODqxnKO>*!VU}2fZKJHGkR?y{Ps!2#WX=&CUUIYq{hyJa zWJBE2AzBfMu5)IO$XSwq$v>3}7E1k;DJhhSrCh)(y%?@ZM_WuQ|9z)E{FiKG^B|qF z|LXmmo6`NQC%_n)mcC^s{O9Chb+KlZI+v!NY#i~A-Py5cS+f_O72nZf>BI*5Kdpu& z1N@wS?9Nra$CI(@aA2h;+Mh|)&-4-h;H`;`skqtLZq{JM&1cD-WZ7X2Ps5y_)!Y^1 zgMO@b$`tiv=+qhV%%T(`YXueqxMj<$I1GWmyIehGhf37SFfL_pWJT*w(Eq(x9AHOA z^zWZrS+v&JMPJ*j&%pZS9AjUz;0&tgwL|!WD(bRhn&T{U*&}B$%l-DXQSXh9QE+4o zI7hDnbanKJT5s4F6j570#+BGrUmF{v?#TFquaB*(Kfg~|IQ6+gk=y8#p?`jrG3Ib3 z>$*M@z;opOa(U1IYeK($E?ce8Vs0FtdQa6o}!j4JdJ?$OHD}Ro<25;vtr27IZ2`d4`?q@|8;X7ov-??bNYyX`iA=I8xmprhZ#RT%;x{;$k9V7h5d6xMgRv;qK{r4>C))`%=rD^ zy8omXhDyv+H{nQ?jD1yUvwEyr%;NNxi20+()Ntf=Y75tzC`b2PX>w*xZB&^>M|Go} z+<2F+O$MCBwO-nFh$>@P%{+Jgz4*DhZ!uO&ZnUR!nA z@z1IsgZ?j5drJRw>h@bZpwp(Adlr3{g%R-^rhSuy)Lp08*!S9T6UqA@IemL1d93D+ znGVPGjC3SRsK2@#_q{Flo%O>HDXZP1M*Ytu?JuF#MT%x6+oiC&>aV;%@i(LY&)k3bo6)C4{V%`2@HeAh2GXD5a{HUnXSqF& zkjB3meX9SL+ulC?m+}Aq&FKF-`d?FMHFaBmIPQP%w3XxwAa?VW=j-J~1J$c$>MvgC zCsby~zIUOYhnU^y=Oy{c((pV*>PTvm?|bpL#;R2R+`s05H@mt=6AB3n30}h0gb?BT zgm>QQ>i&T6C&HJ6LHoM8ziJDaT(=H7)P3%NL)~wtAL>2<`~~3<;R=GI|Do=de#CvL z(@9z@%pA!5VTZbReEm@OWUzr?5bh^Z`C)Lltv zKUJrbv{pEC#G&pBCqU2mL){M&-Xk;-PUGIQSL^U1`1cM!>mv=bNVn0JR<8RJrrGlR zz1n)BNyBMOPim65%vk$mTkqFoiIa z;3h00TuN9$XdvA8*rD#bo;=k3FyRHldj#XDL)~8`j3b;+s3KfL_zvMg!Vbb)gwF|I zX+P9GnovTxl+Z+IBm9`KneZFJCxl}+9qJxIIG1n{;R?b{gnuV&G($VISl9oT@!5Z{ zc7C)4ee}=J{VHTV=_pRk4SKEc8L@dV2TvEu^&xU&rb#?O9%) z9JYr%7IyU;Ah%})LOCi_}jy**t&Lv4y49|LxlGyEH!+0l1r z4!7Q(m$ICM2>k}T#G4I#8M7+kW~H}bRgYWh#oq~c?euVTO#H@Erf5t^ zNk4Nxy8i60<)t&Cv<>S*zp>be`yQ^rXUh`)<}C0}CW{)pljNNT24UJRAj$Kh% znaiA$%O`-xlA>{iyc4Xpl8@l=MdOv|&jtRo%NLK!AFrg{Mw8jjMEb{#GfU{vhy=!@ z%Ac=mPL@6G>;2EU2d@2`7czWu%@^Gpx!%zAMfW~1{m>WPSu4|wQ@{M8`>t7rF%8@Z z)`5G#?}J_7!{9KNVLT5`1wR9qf+ux<(Y+e{26z|vFW}7=8phH6ySg`mK5!4XF}d~SXcK3@ZvMN zy0>0z7~_WHp8yAD^LvS5oH~l%;0d{1-C0WwW9ZnfZZ~*pURSpt>;i8CcjXg5D$RIg zSyy-6=rm&|*hTx_2Z!+qv-2yux~GDdfJ?zTa5Z>4co+C2xDnh3?g5uACqMXoa2Wh~ z6`Tt8t0X^o0=ODH1>6W<)_{A!eP9=OTow7s3}ZYv6}$~x3T_2ggCBr*fq5SCgFbK% zXjGFQJQo~xo?+YsP6ZzVmx6o1)!@_>0^bIULBorj zf>(k!gAX(yub}e^>H{wYyTEneunP?1?nd%~onRF>JU~8h`IY1Ye*o?V_cW0Y{Ax4# z<{QT1RpbM|4OW4>t|A|J+|^y(4}hb=o#1)k2Ves@cmeza$Af3A#@`1%0IminUPC*< z#o$)Z2krxJ1=ANA#?QbkaM-nsC-5S$4!jn;8N45S0DKbM3GN0z06W3Ki+K0T8vKl4 zHs}T?f_2~p;LYF;@B#4CwX_#}`#RbSe(ieNyO{LgRB%4H6s!kVgExV9fj|9rz;)m{@Luo-;0E-~xk2axw}RNN)p}1-=0m%ZK%DB|rEza2+`L z+q4_J7~BaqgCBsmfrBgQKX5d-5p;up0{vk6ZOjMY-QY&>Yq!&G@LI47ycZmX!TKyX z72F3d1qXfyzJXcbU0^e~5!?yx0W;^T0Q7D!2?>3SI@S2JZsz0v`i6g2tWX z2eZNSYQ`;?1#Se3!B4;{Fz>sx8=MZ_3%(BS1m6We08joN?OuUi1xJH^&<(bNb>MB_ z&EQsW1NhQi)C)$yF7QimSPlL0FVGEcxSM*x<^M`P@Mquy;Lv|VKEWyAC!h-)Qp-FE zP6ZGCJNdvN_d+k22i^-_2JQrJ0zUxn1P5P6{or`;Q*a@8)(`0?uo%1ztOhrLw}QLD zhrmz3SHU5d8^!@}Jb1#7$Pea%e(*f-HgGAp0lXF54Z81x&vnQRm<8Ssy1{KApJq0e z-A_L7DsTgM8@L;M4EzM#4Gviez2JCo$d9QP%me+P8@vr{1vi4XfqTHW!Ss6cF_;CO z`XBT!SPWKymx1fRHt=5XKftZvPH-Ri2AJ+ePJTlFf~BAvtO4u5Tfm#az2FA$LvT0v zIrs@U@B!#V;P*g3_y~9#IPj;?4GsbKfYZS)@H}wX73e2$D!36`3cdiY z2Hybh0^bEUg6^M@AG{Rol6Y{K5Bu#w@`H20rC=?%8hjPJ7tDQ#c7yZ5ec&=My%Bi@ zv%oq33E#ke{{`Q`Tfm#a$v>yv;A(IW_*<|G`~V#0$G+S^z2Mr%$OjfaNj|U*+yE|r zihSVDz%Fn%I4nRvgHypF?a&KO23LcNz`MXoa3gpHxCdMdc7cy=g5E0`r(iL-53B+Y zfa}1;n`tjt18xQX4crHw`ZVLBiTM^B4es7fKY^J$=qK=P@Gda-8R`Yy;2!WYunTMk zhc(k*&r&b=9k2@A0ImZ!gZF|%o}*rH8MqI;225XN7*B&);IF`9aLP{P3v_`ugV%!( zfM33Ve1U_1g?xcO0*75i|ASM(pS?)E;8t)Qco4i79Q_jYf{Vd@;A$}aYQ{A<8hrg_ z+6&IvO?yENxB>hbxEtIJegb|74p|LYg;LcxDFF5oK>IL5i_ka_B1E0Ye;IM1S4^9RB;8O4ga5Xq= z5B&sQ`&-%z-UaRhH-PDD&^KTfc<1k^7u*Ndf&KQvXYe@i0q|?!PH-*w0eCAocrEk7 zo3s}k{1)v6AAOs8!G?FJ7o4$=dcj-3ec*xLlkYmpeV6uv*MV;EH((w32k>Ulc#nF) zDd0}98T}YT__%h#T z4|P8P-U{vp-vB=WPdMgK_mCSIhv0bddT=5580ZI&K90Rba2U7|YzFs$w}4&Xo8YkZ ztZ�F9vtB&s_(;3*HR&V-I`-SPJd|Z^~w`GYH?nEb!DE@`2T075HD^&EQV(0q`KW z6Fl*(L){;MS>WKC=r?dQ*aj{H9|8T~OWiw7!P~)w z;Nzel>;P{A9izz)P6Ky?ZtxTE9&pIb%#YxB@VGJL2e*T(!IN^~7nlcb1XqLmz#G8y zF#H$`-QaZ44Z6WPunoK!+zCDaUU4?{g6-fZ;Ey;fFzgoi4Ne6=2bY3_IX|!(JQchP zd>7mazBG}1;7OC9_nU@M295{s2A6^-Or}0C3;chKoe#W|W!<=Ub{>^*A?o5aoJcrP zaUMx0B2Gk{h&UBtBH}{Csfd#dry@=?Tt>PGX%XpC#AQfJNf!|<BC zq_mW5@%n?o`CZ@bIeYGN+C6$dpWbWk`riNV=YH4@^GFA$LFY1#Q|N_{ z!XQk+D0~4X;jLq6CtMFpa0{%#-O%+o``GiTFI?rLTzKpSlnVne12DaFCLP=c3-A+IfrI_bi*MR&=Ryy>8V2BO7=d@f1kAt;+z1P> zdI{yi!za+*B<_Yhe;@fLS=@O3H;{Sc8k8>)STl<5y8GTm{2$ z7mP#OGlKVQ)10R3^_*EEzAHXCWJcV*$43^*~Sc7js*LN8I zAmzgKFbq@I&|bI}X5b!JfFq_+F7!jk6U;B@fk$0Sd*MtNg$rO3E{9o|hDEp@R^f5e zDfdbC%g_hs!Vv77L3`m8n1-XTr(8GzR$u};Qp|toff*Qp!$PzddSL=ifEgHs1^63S zfp0^{cR7E2f%I@X48U7p1P;A{dc(6|7EXpmIDHoNhI66wDVy!28>u&(4?}PzjKP;+ z3OZ*qPH;Lb!&{*7H20}*qFi`om~x@_X8H?`he@~uX5ngBgm1tqtU%`~o9&`om|rjj zgRlf+@R(caFE|wD;dEGrE1}~V@_}ABU=H<#ei(SH@J@8`~ zfL(LxCm4VUI0I(jCRl)ffE9S^?WBK}a-bK!8==1NGk4Hl=z~c(9p+#Zmf@B2NSCIc zp$Be+LAV)4;m0ru2Yiu!f}>ywj`$M&`#sjFDCyuJ7=UgVfyui`2bbJSd2sQ4lm}Np z<2lZw&?71024EHLg02kby{}SlI0c5`GMIoVn1O3x0p?)^mZ9T^)N2Xl zLMIHsp)dl+!2}G$42;79Tmvic4d{4*;|6-*)n6k$jKBz71QYN%n1QiJ=`Xk$*5EtP z^&-b3^udvj(cf?^jKS>*`U~!cc{u3n^cTDc8tWK8=!Pj6fLmb{z6+DkzLfrg9$17? zScTt%&L1(Kpcih1LAVdbV83rL&hT28gSWvFydT!!3g~)?et9)@5A#^8C&s5cD5 zJX{Vda5r?k%yIQN{RKzB0GtRT@Bx^BYhVU$gGJc2oO;8F(3xeQ4ZSb{gYZ=tg?nHM zPW&eIhO=P_J_KuUJ#_t;en~Pvp#z5D5}1HztfaqS3>M++?~o3rpldzpp$~3`A@~lA z!EZi6d*M!)hc#G+!=GfFe!{v8-7pCKFb2bLbc%6;ahQcK!XkVfR$>3|Qtk%kA@sv3 zFbto9akvSlVFl*lu%{>&`k?Vs)<5Wm^PnHDf?>EB#$o(v%7t5C5$=aoIA|69ypiJ{ z`d|Wv;S(?pvoH-`gLznlWmtp8&)And!}vgBHRA)%gi&}IOu`h*!pYAvKJW=xgE{ER zF%O|1y3&*j7r;3D9ZbXbVIJ;-WjO46l>2j=Z3pzi5zkRByaq<$oiGU(!7MC(pK{?& zScCo6Q0^56NaJNAU&J{({L8d!v|m)J_e1~Ic}gE?uCAM z=Me|HBXAi^$oDVWUxHCM3LE{bLp&Ranet6NRNe?H(I9vwPa4pQkJS@YV(D*&`eE{j97y99N7>0{s9Hw9z zu7`P8gk@NT#unD6qeu@whCw)XApHg>!z4_?EL;tX(02^|2Csy!KiF*dK_7e!hTwBB z1~D<*Rcn?D{vfiY-OJeJ#Y>Tz^}px{D+fr;qjlLTsRMwU<}sa6VUZ0^9=gn zhcE<34We8agemwQ%)_Jpi}u1Bp`%DS&;!@Q04%}?JO~qT@^O?4XTc&|1gmfjbiT!U z4Sn#_pQT)QB8as%9&UkUxCa_< z+iXW3PkJ~J`e7VK;A)tFn_&i)VFB7)^f%lCo!gm*Cvbc~7YxD~Fbcgxs5iV4=3oex z;8IwF15c#fKXUy8{cy)n>J9h6IJAF`dc)B$4<8styLu%7v?7430UEafU@$fb+)CPT22!j_1G7U(gSIKE?-5 zhY5HS%)l?f0$d9#@D=DNvwwjeIQRm_7mkGy7=Q`58D`<2v9uG~FJxZ6XR}=b-S7z* zfa_rdZh;Bt8b`TsJS@NvtiU^y)X}56^=LPg99f~U+9G~cqL51RWJv;E+-wF z2pxatzAp5@f50Fd5a76h<6#nB3A6AzScGv{g+GVR4>^9I7w&^W*#8RB!;vrvuY_5+ z1QuZhR^jj~srN^WBlN*{U>Mr2qMzU~FbyZdJPgA!d=MIYST~^?#wXLyumr;}cs1$a zBAAA+!vef-3hjoIrqb?za9lwTd>jVgDj0#gU;-LJ<`>)pi*WQc)Ema2^JC^O^ukwQ z5FRs){)S$dg0o=`-U&Zg`;2shG)=Ua4{^xWv~iYL1&eD z2EFiO7=*#=>2J6YCgI;<7LE*&9xjJfIPVMe_rJKG3Vrazne-Q&1>^7;n1(xH9-edq z{S8CV*vmS1BmD&j%%;EKqhZR0d66;a^}99)wxge*ygrZ-g~?KXmQq zxf1ljbua{vxQ}|n&%rc2ALijSSca>iQDYnzGQRLR7=*9EDBKN`@F2{>W%tux&>kb* z0gijyP?C@VH@=j<-+7*%7yR17_7h)`~>D; z=fji>N5dL?9=bX@YERi$1TTj*cr$c;it&IxxE6-sn=l6d0aNhE<)nur zU2k7U|$(7=z1T z3a)}VScWBNUqQL>L+ExeufI)y!FymBu7z>90j8m2CH)NFhZR_Zj!)AM-(g7}eV2B_kuU`(!5o|eORxkT1F1js!2VBBE}R4-FborL5zN38EWjMBz$s5N zFOTl9ZG>Jpd==%wsW1lbgDLnl%)wv55<2E$D-PfgyMR#^8~wNe@qkIXDiM;J2XR?6B>GZaD5)+70K!2z&%4 z;0~CDlhPcIFa)bG2c4h6Loc*_kK+-JfKfOfCgHO%3;R7sdiXx9!5VZ8BL4f-8!muh zxE#h|2BzU_Fb^lKp}*lu==`q^+e^?33orXfPdYehE#m~kumTrB$8n^G9{4&8 zz&$Vm`~QIULKn=y>97FL%urwW6m)&IgZK8RFPsNMa2t%lzrqy!7tF!V7wBhrG_1if z&^5Tj_D$%6c^HBNUnD&o1ygVW%)wc(1dVl!)A9Un1iGOI`r%v{hH)5&KZa>&{}J_u z*TE{h3p!mLwy#4kTmys9`4asFhr=Ws4YP11EW(dq4Swon>U~0o?Ih@f7r+pF6vp8D zFbxN187KH9Sb>j0#}L*_=z+h00XXN!^f&xDOu)J8DHkST5vE}ku7}POJ8UOzpj>z! z48b6b!JAJ5Jm3-E1NfxDn%ILFVcv>T3v0k{H2;eMEe z{eQ`L!?~~sW3URBLgy*;6ZFC(^OOq*!#JD`)9@~shhK(e_zh^B%6gLLpF7=b7Jp7iisn1NTr0=yko;8&od ztHbtd=z+!-(!+~j1TKOJ_%oP+pTGh<=MSWZ^P%HRj(_NZL*67kybeZS5+>m9VFtS2 zB0aos8|h&jI!1Qb)EU7+ zfoov`+TI~OoCXVUDXhTvpyMpY{eMUg?|}jMI~akZOQeVEVFrHgPo#(6hZXoPbe!E` z`y2GYy)XbhJ4g@jhY9#&n1LU|0z3dK@YFw(-phXRUDCs$|4VxKMHqpvzy$2vNqRUD z7U0vc0{vyupF=-D5BviRz#HBpJ+$p2J-h^FU>FwQBkz+Qegqw(7>^G~5B~`RaOU4g z4{wGEn1va5a)tEbVFhl2j&s@P{hjnM00S@vBXAu|z;|E<_WzLda4f9A^FAW|Xx8~X ztPAk=f3TjxAHf)0^-qp7Sgeu`I{!sFcp`M1*I_#idf?Q(tfO$*C#*;C`!ET2!7TLr zKh`6-8dl*a&^d?b|Uz$BbhBR#wg7U2?Dg};N&^I2aGkRF~7gYdkA zq=yq>63&EKcn2)PhhY^ihfW{oYv_d=U=aQRM&W*#gy(dV9!6miJ_D=pedxS^^C5qs z$_u?P2tzOm6EF$4!YuquM|XD-dSDe!gU+!Xwi}@rJ^_R9O&EpyVG#+S1df~T@AU&MekM!^f zn1ru?iu7>k0Mf&munH?jk^Ul%$D>IPPd=`@I|TP0-`yR99#?mF3V!i~?(Q6XZb)}` z30`($cXth5HMG0iHJ;-E`rutK1WPamy`LjJOv5~M4(sl&$bUoE#mpDzg?Si&-jk>| zOv9A?x4XN$0R6BGgRlnk(B2+(qBUU&RrS(t>u&(kk33d=AFjmwEUi+GraQCNlvXq-)d ziidfag*6!T(tiQ+fdQC=5$HaLcEda@!7{8uV-)Sbf;bp}-gCRV<1h$QFbZ?93d_(q zkN7LeXAJQ$c|P$l3sbNNv#<;c(D0ECI-zkD`9l|sLoak+KtIU$Fbe%J0fR6N!!QS< zun6O@0+Y~~%shiG=p0La&Uin_!xvM&d=JYo4Qnt9om0u*Px&wm z{V)zgFbQMOdkN)2Kg_`(EW$9Xz$i3=q=PP)gkG420honhn1?Y~gh^P28CZpRXiQ+9 zKqst1H*{P>xzG)Rmol$l5+-39W?&ZPVIG!X5muowk@VB3KXgMs^uZtu!Z3`$D2&50 z%!nI0$V0be92D2~;i!cMLumGJ`Gp^7N9Wy8gx?vUuU=@a;e+v1Che`P! z7Ulb?ly^PlK^H7TFZ2dU592Tli!cU**U)a5hDBI~73iHtJ|Vt`Zs@+2_RD|6D9pkH z%)>M^rjrl!!Xk`A;|r7nz0kOh@}U=oVHzf2eg^4b5$0eS7GV`u!OcMYmS7oH#Xm^7chC+Pgn1Z&MHq)p9?{DGbu64>>sTAlr!hqu@9d+cr{T{MiIQaY#XS;{-Z9TmoZeG8?d!l?{{-2}`@xRT3YuzWv zcNYy9_=x@DPai&=0{BnyG5+@&{zlmJ_E0@Qe2V`aaeS@&7}Z~4rO)yG|0DfTs-I-} z65qd%KTY*v%h&k+A8ovge%OaUSxrC7O7FwlsNemnx8G9_aPb|>l`VunU-b>$Xg~hm zHa>-4j`yhPZ?($L;h(|}RQ=_aFX5Nsk5v5x%h&J=8MhNvKg04aJJ0*@XR3a#<$d@i zZF~s-ZTw(0{WVtl82)kmajKtc`4s+Z_z|kV+44F3H}Sgqm+;H+7pv(PS?O!|ALC{I zHs{}imUngXo&@7RTlMxY)B|LEefTHw;a>iRrVruY#p~)H!|!f0zf$-;_#1lV-xwe% zKZhU5`0B=|gdc}LMNL23YF`aM5r47jmHu%Vyr+jBulfh9^gg`Iudl1#zQk%jzJ|Y0 z^$V@^G5qnaTKDCuw>KuJv@eC9fY%*=IeY-GJ3dPI>G)Z_^1r@WzZ!leeu(O?v+CzM zjOXn5sOs%sY^L|&cehC&!oQE#tv@mR$9UcNrSSF>^y^CwKeLT5;Wy%S{Zqrgh99Z6 z?{=mAhx7g(KCF7XHNSoM!9(=@7s4Nh*R4-6d_R1bn*VK9`6>J${PC)vX89cc82l*J zf64MC{Hbly*YIxqxoZ0PR(jVF9OG@$`|zXN_z>RP#>em@+xQgTgV)tBhxfNhU&4=T z<7@bK?Q?OWZr45^UfQRtUkD#)Q+^CTv5imR+qEx;Z`ZyOUfP#X$JhSV`T)!NQp2yo z-`&gK+4QbY@pql@y8VX_|4|zs!vAj@AHx^%|D~2c#hQQeb;rWVf%?wuh$pXj^mDf3 z)%}?ppCP@>f8*>SJcKa!SmrSx>7|`z%GpSq?mTPs=e=0`=OjT-9*u2SJ-r+MGJb&S zFSF|D$3N4?hw%M?A&3;>8 z)jNcrfgi5=8?E}3DQ6b`v+P5RAGvzfyYnb}Jy6CkO+HhG)w-`x^RZ8C*4IP&2k~R1 zT~GDNXQb7xAaReM#NQ2(xRrh4&T7O-c}@rKi@963OZq28KEK4*^O>j5CqzEmPuAyC zCZB)d>-qTg`J~CG|0()>JnUmm!PoN{tk0)HKE6|1=OgWQe46pX>#kQk_~~tY06z_{ z8}B0Z4B-=$Wh^_s*Ld3(Tk|GKKAYO)<1#pJsi%3ZCSS|=WQf~L zoKL>!iM!Y8w*tP5Z`J>jzJlM4KTS=qTn{?9kgVbdss3s!e-B>L>wEzJE?&2uNAPds zr7q3-Uu)%`z!&jDRo~dIHrj7`nqj3^*5LyFRnoWW=X(3`KgY{D*(~4QI9bRzMA;8H zN7TB{koCd#*k$u_V)A{>4q%r<>T(3C$POCj3Hhw#Z)9nvp_`P^teN*_4 z+xQ&*!#2Kz-`&R7@VnZ07bm2hZM+XJ?K@NLPv!g&!b|;FzODW?ufL_gWB5Pf=c``k z{hUT0%GWY(Y2r@uw4S$;kB5DX2Va$OyW_vC@tF8 zPuv;yu7Oj#Y@I``^MH$u&x9ZB@4l6^)_iGPKuUgT@>`0(Ov*i5$xqs_xoe=^JT@h{ z)Uzo2t<3)J#e6n~_K7#on|_?^%j`eg-(6vUYV=d$0$=Y;7!M!Td22`4Q2YN<*|X)t zlU+9ZrCmesiEBmrJj9ZA$ z#v8r!vn%_+H2I{6J73MmypI&%I}iJ#DL>oay;b__+e$t?{dET2bO*ikEqZGke`d2A z>irPrCgX9`U$+j#@gwoNbs&wG`K|ML{Hbkx8Sif6jnCL@L-D7p{d-HJ4Khv%4&bG~ z-rv1l#{G1qKV%;LwlR;q#AS&a`}+RwHzjV25_jd&&aWGZ!yZ1ob5KXuz!~(EIjb7S zcbxJItOL6KN#o0SZTs=>w((_rsf{-{p>5ardix#x4xzw(I$1a3pRwlsrHRgmjZTLg zboKTVH{$pEyZzk3Itt>f;G^^C+<@w1BiyNkX2@9SRb6Qi8Ja2{MG_4&YQ zwO7Xbo7Q7L-nS{`DD{^9t&mSr zlaF}EXZamY8}Gp<@VfrXQjY~a^?2_Xt6%!mBT7DBBkpv{Y_?ZfZ))Tt^_2NyJfq~J z%o(X~j(kRS)w(ZL^RbU_&X*GY3vGN2AIFbS(<}GaUEH`|ir4KoefSt&j{RmksH1Fw zq<$g%GEI6Jw=(;$MfjC`Hhz1w)nC%irHwf!`J~C`Lrp&7^Y~JmdX({d@IGRj^;E7K z3=SyAnQg9rOs_lc{D^*}KVQw?JU>hQ!gx2n)p(1K<459q^`mlsB#rmub>oo7pN0RD zTE2PXL-H@r_f zUqX7_bz2(0w2jZ>lbZ7D^UO@oogAxvw6F8n##%1bBE;t4!aqK;)~&0j2mc6OcYh#Y zrl-5Da+Gx}f`5qgI-kJD@VfQI>12JzpGWq_ngQ1F(r0}slFxm_wVE%|A62~M^MqPY zIp&sG>%8=biyLn55O=>Cr##>E;YWR5|9l{XUx?R@YYhJoUN^5&_($-%d6hHM>&B~u zUrc(MYmJBU{JCbPpQ?KMmCg0j#SM`x>4UxeRZZ{1zmNB+o_>@^2-1Hcym6NPdOl`) zF7K@RDfc~7_`T$>>z^FHhS&8^$xN^7pBnxX((C%iHMCLwC2IZb7k~@JDOvC*u`l|6{|C1je2Ht?@FS`SEpq-xedTpX6gFE}+CU_WO3Z;*s|F zm{=cr_4mU<_;MQ`!@q-Xb^J+v4fZDk&f)oc2cJLslvUqGJ*LU(Epf6hn>ec0{eskY z=fB0R?x}Bu^4=KTdY(x>j+1!b1AiNzoAXfC#l|B~`L;gayu|s=)1N1T__281e2d~e z_$wsCp5vk3Ci_K=S=|^1g9H0&;&@HVW?Z1OORhuet#8yPPkxCp`twQ|zeMAu-4V)( z@kNR2t+~>UFA$SFGL)CgK^D|(2UArTADQB{pUi#O(CnjIZyh;+6A+A+E;ljt zQN%{Pu6{`~y{>*)yp*roM-=fdweeNF^p9>GadN_5g&(EXpZ=Al%)@snI$6r!$Y6t=cL5kTuh5#~+_1 zeJd~h6k^}A4qrdsk3P&Azdpyi>s0<7E8hBvHE2Z>D9={#08;>%6M;mXP#(4^_tDhS$>2>w<IWIG3Vt8yuU7j}j*rGxOTMiiuTE|VoaAfW9_eo{{uq3#>lE=p z{6Wgm`6zxj{$?q#XYH^z4j@UN#E&F@-Fp{V{4jj0`7P;-_#ql!pa1NW2IJ*CWPHYM z&40W3@+99&J}yoGF`AvKr%ToWXtT3-tAH<7eX~&*pqJ_k>bUKmHcHZk~kkL42?0 zeD*u)m5}st{Bw9+Kc(^ZvHbfYYWemT>*=LV^?e!sF+LlYbz60mwtl(MR*4Jp|Mn60 zyu_W+C+=x0E=FAJLi4`u@k(5u`?kI=-k&7yM)GU+i}L<<2>(aCu3uvK&3GA_v-I-T?{|q$;}iHJ`P`gm<~4)(JbpnNU&e>=x^>Lpf_*Jsw~o5; zDZK2Ln&q2&C241d{nmPX%+CC~zh=cW+IeTs^=E{9`d_Kfr%FC+`R3pEc%v=CPLt6~nG+U5?Z@h#!c5LMqm?zmsw8a~&2VZZUD=)Hvn2 zslonaHGZ?)|9JBMS$Q?;lj~VO^5irATK#>fGJZ53Sb0#Np667?Io!KzW<8YqJYLi5&TBz@yZobO`d;Tf<^D?& zU!?q2$CLDvi~W$)Zy}$JRsUzzQRZ0V_3e6p%D&8fU9I~yiCfYqF4yQ%$uC0OtHkj$ zOPg_P@3_uu8+|YR<~f)9?KAY}p#c8zc;&tbt1b*=KtAdWk?R!rw%^@h~PA3rkGdOfX|!;SV3ek;kF<1c;NXI=IYclZ}t=Og8W z@Mqz5=j|ANNE@HR_s8qzaSlHae~emB<@bRlydA$n_4dc>1263iv;SFuuaEoF|LWCN zb==)7m@m!LpI`j=b@=1da+Ke(h4E|ft?DWDh~uBYAF97I#QAT~_jdt*Cti2HuHfh4 zb>ra}!}VGl@4-*N>*j9&pTO(dS!5r!2w(5-tE$@mPLj`tS*`n1`pd<>WgR{v$LlG5 z@>y*iuO;&F&aQR;L>(9VOU?1G;S2af^}G9g{toU&GX7e+Pe8ulmlb*H@0G4E~IpYuz(?`No5G@df;?_*VTdzJkB5jd%Ds58|07RvXNH zuB7+iZ^4gM{e6vfRD1v*$8&nI(wpnC_z3oeEBNoW zN$J0m9{l1q`3LYR{2;ac%KPIH{Bk^}8>@fJcYR%amB6pUvrV&nWBBX)y9_>y z*B!qFGkq&B^R$9bkp2oa|N36adhR6ka5BIz5f@hDl;8b%@q6)8dwJ`*c@TfvEv>Il zB>yOW0)D8PUfEwHO@FlNmFMDF{Akk4d9ZnYQht|P#4p63pnB!}Sj8{Eb2_!so4-4h zeu^=HAHg3f>&xFhvSMT})?uwLz6;s+-P$@IX=ez(0Drt%j`==_lvAdhC|<5>jO9u> z=CzF^lYG+TvywR7zCMqC1mEgDq?8k2KeY@$Qrgp{lq2mK-e`}+$-Zpm9P_@zN}O^WNPfPHxi3uI#U1=d&o!OQsq-4g6yMg5-!O5Dp4QKoIQ}`j?*2v^ zzg3f7>XFCq>zkf>*qcvb8}+CVH)NImy3av_kHeoX74K=!E!MpE;N5uLx*EWL4zF9M zBKU!Lwo6w2=KC>jzN)ex8-@q`9V}%&+})^yB$IiUB_A(wy6ev(ehj|V`XuSA_)%@V z^AgsTHr|Ur1Fzf11o3C%yQIQB{b-(Fr2Hs;e4G4}__1w#7Jpuw@{9Nj+mv6$U(qJL za{~XKL!0zo{Ec|sI0x~Q@jcGv=kP1=t>%gN5`Kxsd+=fQO$+gx`E0!TfmPSbdw#ED-_TQEX_x0Rp2vQ_ z@w>-nyJSA>?1^ghPms86#If93c@ig=N>Z;VejmQoK3~d7;y=Bn&+#MWn0p_|R_gB_ z$2v!xTrV1*`@kyevc~V^?B=s*bbZ_^#$3FOA~I;C17g#CzNLEWQihsvS~(5kFMprJYs$G5ABZ({;Jc_AGH%sqIkK z4v{r>;oIC^`=uCZ@yKF9L1AXa|Z`urySndxxWG(r0V{atm z6eFMAFX;0zIDkIE*YjDS&nHhl@4u+eCqO=X@n=YVZtRnf`MYMRkAn;2+B*Hd!h^T} zsL%6S<-Tc*r#RdF_c-{G-f-m8H)TKGL z)UW4#b;p(TwWFMhb1 z{>H{yFW!f*;isrxxxNYEyI$tLpz0~#yo8c(>+5oyxH-i2s;6?^PU92!0ct+x@BXDf z^Z14M@2Kkp<#c|d_jO2xxW8pvuV<2vV=~VlethWjwwJi=#I>rA^h*%`8h(OWU-K4| z_$a=DZ#6%}C-FPl_$+=4zSTM=>5KT?cwK*1@dMVkuD|qy^J>PG^oQzCA93@3qHjkC zKMUXLc$RWv_!)S9v1rZD#<6rn&&Wx8(!^b|p>=)5=kZ>=uD{Dt4!%`AB)!1|4&qyl zht$)JpMXEq`I0|WWP6)9rkT|q<$M{zAO2H)JrZ~uo^6VfUitki{xp1l)ho}*3;1L4 zvwL}tH}l?~d@b#*5_bi0pI76Q>rf{*HfQ1wb=>-hTS=U5o`vu^e5>Q6KK}T%_(RRJ zG;zoOtaZOiK6(5kJZCnm-&*XqE5!B7wXTo!i-QZcz2u{tM;^S3^t$6NfFFg|&9ex8 zB>ot+p62s9X-5J-41cJ3mL=|G;&k(@h~L?!9#wp)jdxzd{vF?{p3+ZV{AP`pb_DSo z@rRl}G2*Jk`PKeV*8LRzxSzKkFDWO7KOBFk`BNtD-d9@pm*itim&UVz)!*J=1&B_0I%!U1b%Lt{4@CJ_`B8qG9UO$`33w7ZSt?+SGDnuYk5A?rhE_n zp*H0Q@NczAAHn|;KciRut>qKw|*R)(|O*B*X>ii_$Z!V3R&w7Pex>CB;ydohhA-+ztl5|zY>3_<2*&&pNZ2Q z=Q;dawtw&OinwkDk$Eo`|-v1&WKWUo#X5~2b<0E+8Iv>W*z#r;3O%V6n zeCzf~eKYud_(L71MdCL6s&##2e5&~8@VevF$pkInTlJfy_u^l}pU&rIe<=5fgZQm@ z-TaB-x8PZptn~KD^^LXUpTr;bTI=IP`oYD1W*nZ|H#Xzf+pRp9>pi!MCC*RWbmB%y z+_OsDml}I`zOK(#<9eQh5Z7w_q&{wZ5ILdBph+E#1-~YXBmDwjh*_W;D$?r`iPRUQ|U#9*Wh|`US5#o6kzSX)T^>O2K z_-FatY=2{a@$H_TlyU;ZjsCU%dMtt;g=ZUL<)i%GI)R^y=k%<2>%MUY{}kS*ditd2 z`n7=Hh}X5Vg5QAGjkDtm92fYT)%=y;rFifa{L!jco=XStf5LN`QR?4#ASV4-q~XrZ z=JTghwplUC^CuT^N%HAO+>^c5*A>n6HH(k`M*p5g5&s>$ZahPD@GAWAlw~~oR?B)w zedEL>d*U9{$N6V+pOv_GNYk7jjEj9s&+KR%XA$DYyx#h`jFcz)vQc<0&-i;?N_l;L zmy;!*nZ)VFv4{`gN2v8N?}bYLR`Ju?)I;`VQ}CxqJ-$)2+BDm0kJLA9#{H9VGHzGm z8rN|3zq}@K;Tw4FR-k|E{729F(TI6No?Y^7eH}^=x8;r2=S``12LCF)RlB7eFB5Ma z{)+-^{KaO7S|SksCDmC=V2q}!=4?5)XPuY ziZ{8A<#V%rjn_fy?F!=`#p~{e$MG-VFI3Z;YhAsc@o(UJ9hWk{%y&j4nbe~|oTI4k zzY6{!`HUsDSx@qjTcDEOaU=PVUgtgdF>Ue>;5}{9NAQ#I3)J%MjT4EKpTMui-_y%C z7IX0#{3rMmRd4=Yrrv(jv&>lSQ`XlC-t(4zeRXi*JPYqr)6+ja>#GMJ!w*%x^4?Cs z^t$yif}cnF!D@Q*d0V~x_&Ipp^+5(d124~On&W5xW&H@M&wutc_HDK9`#PBapZ|ju z)0ne&$efjOs^k-VyViY@nve3k8Rt#x8}K|nvhuJu-q)9MQk3%$zFyAzTdbHq<%Gy* z>vnxPG5j|CnQA%mQab-F^-SS+;dT409R6cG+Xbr*_QnZG(wFe=KkC=P8osNIcZE5h zxA8vwc)XlPn&YATy_*nztj0?}mf7cx!PnROU;N(6qtAMuCZE~FwOaS2oq7CZyl%ZO z<7eQ9sr6Lu+Z!w_A$+UzrR49%OZmF_kYRuIDE`~T8vEa{+97My*RAo4kk3Bi&Qa^3 zydRms4}3?zf63qn;C0W{3i$uRx9TV9?+X4f{B*Va`kXZ<4c|+9owslvCXUA{R$G+& z?q2-tHsu8Ilkrce>7_50HTpunmU3dmy-M70HSWenjMUd)AF~;MiG2RW@07ZXYi1;I zvM<|3+yxT%q!MSov&Gl-@+y>9BkoA~qUSuU{Ep0VE60iS78e#P{<@*Q*7KeS2jF}*H*0G}m&tMQcaj^H=qTg^}L z3H%!T-D>|R_jfY*19;u}qk#VeuRDKK@VoJ+sQH_FIuGAPZ(u#ezr|g&6W`@jFA|6N!JpU3O!6T`1><5T!Gc)wa-<-S7>U%K5O-FGnN zat!@RUr#rF2wry{^5aLe@nQTq_*Ua4^^|?tNWAR-jQzi{`kggHy`Fcnf0;M^x?nK#)t6t;&tte;pewWpTbN1boI~Sr{HBiH0#ftk*==KfBYOg zkG*?&bExFo`gy^)gWoX|*J`{ap9K4)mG~|`8@K-2%FBGGuP2`X`8-7&w^ywg`;_{i zNPk7}+wi*nNZ_~Nb?axwOg}`;UwPhLz!yoc+h0`hAK-QEjD|Tj@bz{+{F>ErSv&OYjN(0bT|1LzdR;rS_>rV<)lL~NAN!mM_%1#hzxb859X0X^5T|R0 z>x&$Rc-?sU@Qd)edWP^Zysn-x{BnFi9Z%UOjqiONr-@rnT>oBijTmWf9={5Izv>(N zy5{?Z5?3LvO5CB|?{nVC@0EA9UKb=EFMbt%*T0w7>;1kEaWns-e_j~FFTm^iH-(>% zZ?z7}@sz`dHD2mj!cW8hRIR7QL?zQx17M#}#gc^PlMYR&ha_rQMGyS-KNOA+5|-YNTg z=Y00v_|w(;D);xj_&4#b#!dP?M}6MF*U#rSDD~-cJ}(oui@5sv{Cp*@&-pw}c~#F;+hAqYp)wW6yNGRBje}CkHWX=kNWuIN8)w$kK>2ohpPRp z+*e7P{`0C=_7i!$m-M>%5tz%Gh_BC&w>KSfemJ9yKk>T$^x~^{U4I4f-S}4hQ9mxJ z&pv#8e%z$gr_cN_*q2JR`rqbG4AI0!|TS!jhFiB z^7rH4$LspPKs)x}>-~TKFAmxNN%D(PA6@@v@oVt9{x9NRZqwc>{)hM}w7t1+Qr=s0 z-oxKB!@sP0SvQ~Wy^i>Zi~P0LJxPtTH(tn*ehuMQ;D@Q+yze6Opd4Xeg|A$FIYO)N+*HDV6aR{58G2^}Nxzm*1_tub&@oybG_JAAbA*ymtKA zcMZbV$N$NnTkX`W&q?xIO}uve@o(U@a)bI+{50;PHs?p< z-sU9rzQ2#SJs-5rr#}Ap9eCY7FNQDUyVP+p_nMMEh5s0Tyy}(vi#hy;~?`Yf?tdu zsLm_amCnO^W>uq}N#f>KYTfI4<@1C3;v?aUb@#Wy;a8ecywarnbWbRONm zxlq;D8R9?O%ildaOo^9rpFXS^S8xCQHrp3#`uXU=KY%|@jZXffXRPHnwg1=i6tU5NMdD_3>z^xB@pJLVtK}%il{3cs z5cqRcuiU5h;_t`nj_V+Pahv?3_(k}wYX0`$)yKlccS-z6+kuw(OFv}sqwrr=)Ay{U z=6e}3Ph?-VjJT@7@iBda)n{gG=dQ+YeAm=s_*2#N=DjCLAH=W1 zFH^n!v3mK^juPz{_^AWkL+!NVkL#^EHqJ3lnavVc-N95D$yeson z_GLTp|1xOru}XV;=IdX3&(|FJJ?qrx=V#xu9AD4x%^zF++#)|IKT7=c&zkWsw~v>8 z^*v;>oiIdSzU<2!_5Ke&V+dGvlWoI$ri=D^D@wN4Jlc`c|m#OwWPd?-3~P z_c&Nk(s*v0TYY8zBb1WfgZ~hJg6hq8%f$!q6@07l6(7OBqwzA268O#d!5kOOc`WCt zfxXxFEO8sV^yL)sYw)eE7p0u4ncmg6oMWtVB+j*%efP)%-G_?v6Sw-TL&rskTj4!) zT#~p)M;$sYN8F;(?c=21%f!tebD;No9m?OWF&^eTf!Cc6-T24wy7j}4e;hxUvYO+j zyw?!MC-Ay?5yvlVlRk});?Gv|SN<+c9{*K*tMRV4AHNc>+h-eb-h0FA>gUEM@w$2M z$4mQl>BIO3@w)oO@zVe2sqIsKm*`->vKD`zoptf1A6b3eIA7c$vsT(!AfKA=fc8FG z#q``Bw(?Mq|F7^q!v(G9ftw^r>M<5C^)xz_dYE;UY^5GS@|jGWZl4&%kHZg<1U>r* zHD`Zl57k171NHtBQtEqL9$D&cqHb^Tn!zpKe#`qA}O z)@wY|#u}$aKQ`}BNI&|C8#VTT_V0s+@i*di;~B?K#$TwG!@4iqI>|qczZ0+P$2@*+ z8(+qU@w(&JSi<`p_;c0r<)ttFTgrFi7vgp6qaS}y8z08c$4^)Dw@ zuipPO{pG5+H(uP3^m+VSq}TOt8K1}N`q%gx=Lo#6f8F>UZM+}99k1)(Fn$|eH&5gE z_wm~Hn_kzxyqUkQeP#UKHuX0i)PkXx2u2H^rxufH@h*vWZzeLs8Rk~ zd^TSEfpx4k?_=11)rfA)$1M4+xKMw*74fg)b@%J4_$=P9)<=1-!uc5EiEq{4vMzY> zui+2%UPXww(HH6K5yOwbpRU$JdCro;pVKCN4u1+>H*ZV$VVd;PjxhToCtl`_F?Frg z@8;Sg)snabaUSAEN!*!AT%Y^!fdu!5H08;08o^6Gy80&Y!||>9UD9XpQr{E#+&piZ z@4wc^g?-Mk__-a_^Refxn8rR~rd()CK2`F$X1xCV>HNCQb_2fG{c-xl-nf*P((CJ@ zkGMyP)73YGUx3%0e`5H1@WUm;o_)ZLjhQa>OyOhrF4ZgdM{{QR6IE|-yr(MZOZb;G z<=4kQ&iw}bGuCeMMZT+fM5BE}=?Xwr_`^%3%34f^miV$}Nak};<@KfVv>6Lv^1wR*msCqiT!CsFz-TuvskGH8u5Wl!hJ)-!;e^8GUaqEdY zR&9^+UVRS#JYKiXmGDpE`>W|MX^g9ke+~Z?{On$yu{W=2Uw_$_VnPvc8%^3R)Imwy@mZkzN52S7XTHoY!?Kfc^1|FG$G z`=>bm&!oRV?H_Z8DdXf||5C%B&u8PWX{!(KX^f>LlYTCc&p^Na_^99q;C08JV>#<9 zURO^KUh;3{rJh;p>A-))XX9d}p0b|2XRRku@+lIhJC2k1t@r`t)0{7tS?RO*P57~@ zH?QZV9!2~P{2x^>b@+Yn{;CnT>=OO?$n{Oucf771KKugw=hbq|=kihy*_TOrPMtABz9{6YLTIj=SQqjPEVzHuHu|5E)rP{xPxN0Ypn zK4A5yk>ouYyqw>gp7NzBQob7>!C%s={Bh0l{rJZx_DOH<4J3USzXq>6e&YBHzSVfv z+mC-8f3aG>MOOXu_`lk|91 z_wg}1bMD-ytz)gRKW+Yftt9#QuGHVB&f*v2b;olNAI0nT*;V{J{Ap@E!`3`PZ z@IKYE9`yVU&Wo?%b?4C_-hP$-IE~^zA-(qehu@38R4w0pDOBcJ7C(gi`4?fWIn-&? z*W>y;!}ptfp!>&adRfmJkI?1Y`gL86xD~`P6RmtC?puwy&=Z$n|1{w01NFZ{XM8_p z#Wv>cGAXz||DRx9OwsQ*BlrmZ_P*uaXq6}VB#C>mO+H!tcm9KXO2n0j^YtmOXB=zz zPw<0OudJu8Cwbn3KUww4zu)DJuk_ z`cwKNjsF_H)w(I=$i8d=zW#UFjwiThkKGub=HF$jpdJnCpOZRL+^=opJ^0;id;mZA zn%4PCJtO!@c)H!Hqx46g--RWK%Mqt*Zx+Ay5cx=XCE{Gu+LtGB#&`Mm$Nqyj4{^3@ z51n6-xcUD-TuJw6vrLDRliIA z9{hLlqt*P)_btQ+@ayqT)i;*a`u-z=UyE;beJ<$}_$+>Fm#5HcMNb36f=_Bqe;tsWM3=_APxL*4b@{>yz zDKC!y?2JC=BPq|^dr5iq_7gXOINkUb@Z<4=B|*;`pgf1H;C=YZRj=GfcdTL^!t2g! z9{jvE`3KDOx_wCme;w&tjZc02@e}br8~@i+fBHezYY$&J*|&u8Z}Zuh%4V)7rsv(C zNBEbg%`uYvN~;*h>-F~~YKQQyXBz2s^Ua4JNdB$nyNqj|`WpBj^4WNq&38|IWo(~q zoCBnuaq@YTINi9V@o`N#l0J`LfN#~WQjaqJ7W|cbZq}iGZZJ0$d@cDnR&xxtsfP#u zNE;u(Kh(xY@C)&}`J2GsgKyQJQojs-E?(Ep0)9IFQ0=S|w}?1he>KVn)_z&utBCcJ3=J3*=x_XxI(Kfz@pQov(^tUU`9tPiPy_R|9!^^yi@VU>t z>h+vCOx#-Hbn8kSzYgD}=A&E}r}3}gb?1dVeha?Ub(gFgfhXCoNyUr z=S${O=f-j1{2t>I()X(uAH<)o)>nCM6U5KLpQw7}?{q{>f0F8z`=LpEnDnilm$^yf zN-@9i9M(4D;m57^G>+-|?p?;OL_WKSYc)TlU&7?`F8+&>&$UWEJ?m+oeYxj3-e3Ge z>*w{7pX|%V`3p;d66N zn7@ON_QcsQmGJfUT*XDuA?8is`#k@esV^^r_u|K^<(V^2#x;SThQC$yjWKQhr5mYt zmbh;bC!d>jYW(vF^>w_6UyeUp^~(D~Rs3^!-9FICqaaDI^IrS|_*UzJ)G3HxfDTh{v0)b z<$dxvehGey>g~U+cb4>18b5lL{{4|W{!aXYUg@pBt5n8s#*a|F@?6mP0nZ!o0o6-C zkMBKR9^&>A*Xlm9)H8tp%8mMVMDU|#>#tuD_%UsK27duwcU%K^s=nfb_#|GpK1cC!yzahR62B1dQ^$qz?YVE4#jn86R=x5& zj3RzJUc3I|ci`{pmA-L;l=?e4z(0She*Sv#L42$7>-~&hf&U*hf0>_u=)IoAh?_L0 z^|~ner0}!w_pA9d_5<>0P`<67NAkqIOx#6k9Od=gKP=;mZM?D0X8Ttg@5T?hO`pFX ze=fe&`BK_ZV4pP^Uw?mh>Q}A#TW?Ee^Znff`8;#GJ|8FhmW}xO^OLjxEuZG|lOp*H zn5WMtL_VkD>-9PI-|}hJ$Mqwd?Y=wp`J~BbIli9HKOfPyD@;CrnXk{MLOwNoJ)bxA z`DDoFio5msc-hz7g0JWEvOb?G`TXQweLgYr*@Ca<^G$s|zL)sj>_UA$dGZ;9ujg}@ zKA$-GBx3sgW*UDlUbp|qAxQ^^jvyxew!bnfuod=+`w5z8_xp zb?SahwlcDA1n|S!q>tbS;*V6*E6-;W_!{*bpnB!KhYWrTh9Stk;B zH~!i_>CH?gobI}=h`*wZui`Il8;@@_PEt=Vz6*b-dWMMmLYs18 z_-otv6#i_`P|#XR|WQ2 zEAaLEwl6NWVtVc~^to@FB)_qX^!Ej__(^!(eSxCsb@v6T_$x{8RO@R#r;vV)u~3Z1 zFXyu{K&h{+7v{4&iAxigA#T3J?faNR&$t9dW|AIF#Rt*%eRr|}=+ zTb*CU=kfg>)aPF|%h$En*uc6%{#TJ#bNr+Y=DULOwUpx_?wB^^1n@R|t8yfL1Yf0` z;e2kEV_ug_e^%L-?ZeOIv+=WstQe__ay^?RA77h#6!9Z8^{AJFAA+AkIn8=V8GYXG zb^Vm*5yZ8c*U}yzKH8=pA^dE-ZXU+)x8Pg#hvc8aNARuIIXSLmUnc4A;N0#HgIx{hx_-L**TNQu1yeR2^KLT7Ev(`QWBC8EP@z85X1n>=hWuLd#v=LOl3#7z<88nESvAU=aaj~VynJoW1SFPVzYaC|EUm|~(VEMr7^!f4&@@vhHOXR;Oe{8UP z*ncB%ptM!;Kb604(D!$Pcbr6i=Qx|qti2xw$&bjdwjazmnswPns()`ij~q3<`?%#^ zf@#+DqeJzWew<{4%J~=ge!x!oL+kWof&3xzYn@+9ujY2FQ$0(RtF;{i|Dbb3ey!~oA-^oYRzD}oUn{>_Kh5zlU4Ead zYp-+W%HKx*iNSGTJ#Y5cBKb$j?=hd6>zH-la+Px5R<7E0##~1_v>r;!e?`wDKf1Jg zyt?1Fd9pIGntl&ktm8T3e;&Q-IOenH#vd<#xcsl{d6#c4GWyZ@(-!M~fc#qHGFyJC zhHv^i@KKGO{Aqd~X$bVUa^2&-IN4r%s}}3?;>uUMFKWgy@<+W-CjV$tpmJWCarCDU zqdF$nrd-nxH=P@=+}FNDZn|<4Hjy*i+obi%70P+n`JFE5?qg+p1J`-e&t%oDK>wOIQt;%+dOW^h32>dOAe&F@sMEP&1e681m)8+TMruO+YSAL|9 zzexTD)!*YjbKrB~E9CF2@;&Am?N@X2%-eqX2g*OC@r+)*N`2DZ7eIw+X_G|TT zqWtzc{hKcT#5(<(EB}N#{aYm8^uJdBR?vQ}*CXrXoBdmDel_RaOs$X7^0x`;IDAC2 zO=WK2yldzks!WUCJk$Q9J}+@?_0LxgQhgoruaUp0dB&{EeyQA5Dib*&(5{)2%$q3E z&GETN_4T=~_Uo+`@;9iSYU>v7_?N$0eqYtqwQYgV`wuK=4K06k&{rS6$N4itzN7Yc z2>OBdlP1bH?XM2{W*e;clV&RSxaz63ew#1O-KBLe3$*(pZW_#x^_WSQ`@1MTC$?fe>eNU;rTHD)+Uu%08$j_^Mt?gYZzgTB` zR?B~}j^FgGK0hqK+V*<;Uw*Crn|Y)iKda|m^Qr%O`a16GEgSX@9Q#o9mQcB>($$+Fv36(mLhW$v?MF`GL>tduwX=<~SW8 zf1>;z>nC%4_rH429F^YjK3TbIm1{RbWevsq1E!wUOLSh#KV8ow*I&?`F?Egi&m42S zE>Jz@c&T;$SSo*m{A$-RF;()6KQ#fmQOOb^ORns-%W*`eD{( zpQ#_-di)d3+Le87wwd*~$+fGVr0UsJf2S!oy+(UxduGc|*YOw1pIXCjQ~42E=bSI! zd);zUpubgKx0rrM{;c~kw`{uKZOWyUt2N)Y%b!}u?~s3f9lullWcmB5PhDdixKF!) z_G?|wmdc-Ar~cKH-y&Fl;C@BZ%liEnl^+rGwT~;me=^&w$K?M-eq`HoyZcyK-&yZt zPE>t+-s*q8x@&vF=XcE`)8&6pzV=XeU91DlaWGf@<8}N+^83uLU4Di9tw zNq)8KiYdQBey99e$ICkTx5)1sY(Mz=_Z9sr!SM)OpO?v>BY(@FAGkiRm496w zzrRlK>GG>xzs-0JlfOv*rp95Sa_=iwYaFJ_f2)o^SN`jD{6+HrD!xs;m8*4p43dA0{F8#?Y3i`nP3_8Eu3W9{?T~+Q9lullg?0P| z@-L8oU$7s1fXf_LOXaVXe_{`RoOd#s@ffdl&mKSGb70>&v*&pH)IVoUJ^lZx^^c{Q!&olQGd-d!`!x{cPMbnSDs^@QWYLDk4`LERR zSIB=+e$Dw`{vYL6TYs7M2fnJmGaGV|33}djeXH{R@nq$kAJ?9TXUdPt z-!WKE;QhY&@+0!M3;Kcg$(P9ApnlXUze@f(`PI%7vz?K@>AqB*{tc49ca8SF^NjNI zgZ-;=9q3SQew}tY<^NoMt?gJK|GpaiFx#eRjf1)w{>yW?I?bYWQ<9Es*B)?Ys3*--xUu%4p${#Gh)^W00 z{w{UOH@&9cZ>v+jP5yQ@d~+PM%kMA0$NXcC1MBthH02IeZj#D$oj3e3^MR6P%fC{7 zwfV#N3*}F%;hTOelb@2`<9Ijy2wcxiJ?oUaPPtn1!@$4mbA$40txHD8Kd+8IQT|Et zt6f)2`_tu5k-vkUcZ~z@9~ghG{Bz{j+MkQ$pCP~2@w|fiYt7T^c-#@jxj~dJ1JU#k#eLwgewfi|){?&E-neu1IuQd+y{1W+>$gj4Y-f^k* z&bjh`p>2x%<&+-V(c|;aD|LURPCwe@zgwpt?egE0U+Z}3kpE}-aYv89YiRQl&+P9` z`R~ZDb-h_2|2_Gey51~PZo9eF?_Zd9*2+Iaeyx7?e?#jX`ID6G>Sy5m947xp`PGga zQ~!ARGwS%$y{Wg_fr*+*V`QO&_$YGjID`Qo8oymI+vc3CNOH|+8HR?0{Sw(-x zngW%&0{4R=|Iqz6m9I4q43ht|I`xm1e_I`Yviz&$*O~`r%AZ}Q{CxRW%CC0ZnB%)m z>zFz6@6q$fPC7a&+vu-FZZj7sGY+d&&lY#C-*}v!cV+m~=f-b(Q}eR?TI19v|9tre z1k3aJM$?al>c>p^UOyhuYPm{3OwO#!HYj(No=1Kb$eB5s`Oh2&5v^N>-^Ke$HwAK) z;~?<457Ymp>fdF`*XrMR)i+zd*X~h)`mA=REBAb*-TeZ&8tsl=rOyT5U3(olS$?bh zTG#cN@?&+%&zC=}PWdJB53EyumHe@F%17SP`Cg~|Ao&yPlpihstUBc<%b!}O{7m`t zqn~dq?X8_yIc6H(*t>Gv&X=E5`ESVX@-Ok0UH!v0%V#&44!o@c%PWi7%fFRSA(mH(mq z-vrwa|Jv)I>ECMk*WOcm|1`a${@3x_LLZtZS}OmoI{s?;&ivZto8Hsk8Z{xE3?%yWC&9{-yFymVZHy^8SZ` zP5rCo&zFBi58uC$?rr}+b$$DJ?ecB%kCvb5QT_*A{cD&1i2Pdnr$hcy`HjKy!QcOs z|7ab5f&8Dy@39W%yX#E3@e2J7du*rxst`TgZT-D7@P(lx(y%KzPh^&9sImiON-^X7*I@|Vi5wS7zF ze_qF5Eq}-_tj~dm{ofh#w*N!D-&DtMlRr&e30{sZ#w?Xi9Hy0))F{?K35IsfIK zCBN4CVS)V1<=+?_pYXL^$KO)<@5{fZhd;N=UoF4m!8+T&MsI`5-ziw$|JZ^z{%!JS z$=@ZcK-cwYhp=hDj9uV8uZd71L7<-a8V*q|RcKbrm}*rva~f01&%{%8Hhgeh1#9>b%$+FK!iFZmzZ?Y-5ry~w|HPv|!qH#;|^ zJ7>nPO*v;#o%O%`R{6E&#}4_8^7{n)>tEiy?e3KS@o%+%?0&TN?8idoK2bg2=y4tn z?>Y~c$?yM%^&7VhwsU;pYn)f5*k^H=fd?e*cA}sehsT33dEs@^6uUSC8^P_6ER|Un{@g zA8Y&lKi2&{`L*W3Ve)UNXWn_zi=8R*sDzpQmeo$|xv-(08sc=b=%C9xgt(AX<{95av{-5Y`lk#hw zAH(F&k-t~4{lMpL$IHK0eyw%xH2F*An>n%T{5zxj{G2WSMfrn+zW?!HfB(y0UdLZ1 zf2I6d^Y~i%Yvk8Dzx#ix*9lM5z7H}?{sHp04fZcEzl@hZRDP}TpCmdpDllx{E+1Mx%LiO)I^tNAqLVnHTU;a1c*BZas^6!+ts7L#ccJ*(e z`~&{9e&ZuO{9ktY%j7SWU#tIXYpb6#pm^VK0W-Qo^R%a+4A3dVg1I4S;$t}AFFlJ zCsA)3rF#8Xq}(5i>o>mC5XdzwZK(8fh5UGJpd`}&&qYv;;umESj5 zf8g_Zi{$r}e_qfJ{JriK@+ZhYp@)Br*FQ7P>*UXpe@>S_s(HXw4M#S&hLg<$v_Yeq zz3-bCv|jV=CUR}6!>r3%U-$11L>dofOuFj4xKgKSuD||GSG_Zo?`;GgMxQPH5=%Gz zT_Am`^vNdcjdxewCsyhu;MD9Fvr{ z`#W0t1WO;NG83hblRni{FuJ+*n|+#lAMQ>1etoOd{yh068+mhtNqNWhI_dXG-^S=K zNY}Pr6&~Mg+BM@dQ0o`td*vSKE;mg2gOzf3_bg|&cRaq?-;t|)KNI%G@TlgYm-ne` z?-MbG^{oaFVv{Am0UoA4)Y!k=EpH~o!honqSe`uqAp!TnRUze`o$ z&^K!Lcl9RxCMQ(0yczd4{3boGJd8eC`e3!|ou?Q^pD4Xmx?2g0X56MpA0U0W(REyW zOUH#7bMr9e)@i$kQ!euF13lf8>#uc-*}i5ykGwA3J1*M2ePaBjw0DUqp?asO-m!st zSIeKF@>?69hbh;gdatR}dwtJ(&H23QVa760(&q2Uwzc$37Tjm`*p3zZxB z5570*ROJqB?sr4@(B=W14eCuMGOBs-RqnAWmh96!cvN$%jyxp}^)}vYcV{T%%v1T` zco}`Z^f^`ZMbc+U-$fa1Z@-f@5+=V)`h8XMtE<#MO!*G2hfMu9>3QT&2YCJNH0MRe z^!uvFG0h{xr}gprs}cA6Ys$@6xiuV0bTK|VN=y+fC z#s0TTA1b|C|4l#5y3F`qKkwDiuf6|e#<}S;9iJ-a_4A7A<*M~_h5ECmNi~Vd;M&!-d?rwH0!c)Z_&^He7)-SRqN->NXU6i<%0Xw91nA)FP2_wJO^q0VtlWk zcT{h$T0bYM+<+4OoL0SDwSKm_nm<&|?&oOf*GR9`&!uLaCEx4kHr3m!*3ZZ~U7job z{4oBd$8(wbGxqH-_H(uLanh^x)0|&TTBsR6xP9ih?NmP|NcZ}GY4v_o>;Dp!dr;-P zgk1RmZDZKRZ*p%<=RTF&T4Q9c z6P5XPo%9E#R~t`r-J7ZQjGxx?$a@;$?lot7^GJz$n03nN-XW*=d%CaLwa%}sslwN3 zQFW|$jXc7?5S#rpP5F0}f5haw=fE-MS~I$NXhYwSb`Up%6Zo) zb2Ux{i*e!aujne%zkl+3h^DL_MqekrpY#?_@&9M^{`$ju1Elx0^x0ZJRq~P2exBYJ?kO#9ne^-Y&<>N_f+n{i*K`o>ECk)B5~mHLj+%aX(W^@hgM z)VoOK7b_PDlsEb^>5oab*Ac6wKUgIn(T=;XN`9d9c~$gb(&tps+ojKvZm$m~OTR`s zOEIsvrk~TL&ye0>l<>LD{RE@WmcCrNJzfi>uaq8q{V`1WCFHNu^T08BMDx(;U2FQo z8&3*6XaB3*&<}dnXWH-IsO!6QdtBS3w^q?dOCKOTX4;?VZHBiU6Q!H_%{~q4)1;4; zZXaKr(nm_SkH`7scQp0zF#TL4eVp>PzD)WA>AP0KqRFq8KDA0d(iCzosiF^*KBJ00 zO!_rd^mgg9q_+mz9j)h+rO%PR(&#$hZ!~M-8yXI6ZtXN{;*2}0dFWNq(OPAm+NXJ_ z6s?Vw=vo__V(t33K=lm$P=6nVdE+Ki&y5Y5H*Sa=+B~?^9n(BC6CKq&{Hk6@Hctw# z?bAG5HLE_g)KwoZK+U|hPW6pj6a0HC-n@0BPP9bV!7|!EYR&kyZLaN8-aM~7%=6LG z=Ty-rN}ng)-p|v>2hV#`Pp9-3m7i3pTr~Q8=>z`N^L#M#&LZip(#^aRoO8^N=9n9; z^;19j-aK;XK4iW)k3^caKb5nO_kq%9RMCe?zeIZQJT(1mmp-+MK3Vz{>0Uog=kzf7 z>Cz`v(PvAaAl-YP%uDHhcZP+FQbAHSu->>KOP-lU6Jq({0ysjP97#v`lPy*n|vx!+OVYio1G_aA25*Q&iwm9vk_CUszq^v%c`nEW8=?@0IF zSI0B@28aiGVF&fw`b_D)KlxN?kFKve@(tiu>Z?$_4d0xY7o(a5$AJg+Cyg47+Rc?~X z)jA)SX}vX0zIQ(UY0s+XquGzMRnJq(O)%|yYl-u<7soUn8@Nwma*LF6{vCXu#hbsh zIVUs^&^k}IFTHisD&=}BcQo~J&r9>%S(@hjplYu?H7RdAXKH=as&Zz17};L^)nkPI z4ck)VBmDy-RGZJWh`TBpzRSPM(KYjWD>Ad6rm6gL>NQoEhtWHwuas`~LFHle`O+p|HGoDRb>HJrHLruBvweAVc zt-8I+c$)IVlpC&`InSJF-N$#k^hwf#=as2&6K~OdwZ5hzef5fqhH*uuaLf2x?Xwe3s8OC9KUO&zazaeAAI2+ohF5| zb?|BfbVB*dkTL$SdarkeZLRV9l=I1&PtNog4!TKpq*uah_hjXVEAPGE8rTohrB9W9 zgz4Y+yZb#``h8XO1=3emsb`7w0so;t;lsR&%yzDjK2G{HQ@{FIZ96-(orgEK9&T@^ z>Gwc=fygYCxBE9t`cqZ(cJqZJpYi>5-}1M|oR5>G50?H-(+{0T=6-KwJdHn7{w4A+ z!M`-%oB4ab{N?h$XZ-H-XIyjZWnJ?ZhrM^)FEj0Z{>9fJtEIPA(IeYx{G^XG^{2b{ z!$9dHr4Kgx*}iW2Gfesf>D}*ddh@Ohmx=1n;obpXx$kW9la;^ZOXkhE&sKhp)vnPO zNPp1M&H1-P`cu-q>y>xCNNWU-Xgnfty)flhE7xzG{*H=vQ{`dw$N+8s`Y&F82TJc( zMIR=;x1~>1J?+vR>93l~FY21VwEAX!-aUVfZXOrDtlIK?%oByB&prRb*yeFp^~yv$ z-5Vk|Hr%B7(A$31KST95nfiH{@m(VQ66xOO4XpEVg7>D5PDk=)yw;Mp_TN!@$DnJT zZ&z+$i`MU|_i9tN`~1as9^5Dmni$tQGLsM!nm{oB4W^(Ue=Oa@VNbZ@ySgHz3H4X*{{> zIpx}R(0u4@@c%w*_k5s!F&~(lkmGv3E->RiS>?v6+})Mm9Hn03FFI$C3< z+nPP*P{w=ij#_`H{Lf64DjR&S%oNjWTQkjS1T??1pfKx+Wh&P@yutr`+UVv1iRM=S zO=5Z8{>T4s>uuXf*B{kuj!{QPq88@IRxT{2-gf!3k_fueYw%>jk~))Euax)*dnKPL$so+2EhY^3ODjOuZ(k_|xT2 zs^iaWBM7{+4rlB zkJ;CQ@GmwEd+l7vmUyoUy;YVupW2mcbvJzBzSU&u1FGoLrT3FwS(}*ZO+B-v_m&=f z9clWxK)NH{JlFB3*JoXapI2@v{#M54VX~{JuiE=OroG56etT}EkT^pBW}5Y!+VkH3 zIb!?ZF|GB&RS~V@Io3uup3?Qad)}F-dPl1K>84(-pTDP*WxSrQP`%ekKR};bi>%k) z)$z=0-R`xb=9jM5x{E^%pXoiJ;a|L)TX`Ab)jLejc_`>?jtjivhevmSe?^VicM!c#)3(yI{%a`~0iq)!T zvGU&g3*K?ueP6^JCryLu`)!->+w1t|x@Fd7rhTu!MfJkl-rD`0tNLE7)89p#@KNh1c1~ z>a7j#hH3g$!v747Y1ZuDnWp>}Uz`W#N_VR03#EUm_M)Z&9;Tk9($`22-d{H3Zq{Wc zKU2>m=MM;u`{8;W)?UmFu_VhK=TSSLI>0W483((!VhONq3~1W3}r1hi{i# zCEfI=S~;UdcK7{S^Wz|Vd!8O4eT}t$%y=)-y6PS2kD0pr=&I5^hTUt$Bbq14_V>T) zS$@-J{`Zca+}!VU&68#v=1QNW?KSUok_0Ti`@@G&dnkBwUBdf*iZCscTaGp_|NyLQd6LPO@a9#|#pN5^M!|uNuoDUkbk~B?xO@)qjXNR51?p0yu z3iqRsb6d!LBjnsq6Wfh)*M*#u-1kGO_tlW|aLCPuotwjc`Tu>cHyBBGigUTYd41c> zhF{kHNyzyq>`2cU8NJ?ld$`TX6Gr>#`4Gqp_zT1>wCF>ZFX|KT|NJ2Z+DHJZ)Tn^-mDP% zLtp2vM)%sj&XPv=fxgc2#=F9|_jQ&vU!$5-W$PY$InIwl?&l58gCY0r24{`w#%JNt zb0W^IkzKq@-#wC=qr#*A6?U#Qt(syx={*0gd#!8s`KJ->x#v~iuOseHUFR;>AF8JQ z$_{c5a>j|y#yhSFg)aO03HlA`P~yJ6&hNtR%qHi(@NtVaZ`j!2&S`3RBjW1$&t3P| zO%3Z@^Zd4G>?fNyJRNmkXlnStd!Fm%zS-39Y_C0}U)S6Hq^Y6U`wBhJ_Hl3Ayy2xj zuAXNaPf-0oX?E}G>%80S-k`SnnjScz%gz7KYgJ#)37vBA9qRGCu=`|h=Tmds{l3Bd zM{noq$asB6hwExh_E%TxJMO5tjn0~=J9jfjjX$zk!>@atCFQl=hv@awX6`MG&QpEd z4Sk&V`k3bn8!Ii%nWA?oN^Fi_^?Q?U;$(h4PiOt&Gi<}?wLomMk7zOSLxj*p^xSJcCd&2JD z8=S|&W)@i*Hq%IF!!)msZRdsDhr`aJA@`oJ^K8gWO4o;VFIv^_bE*5Ih&lE$5$Ec# z`*_5e9d;jzIN1hwL&W(-gZs~j^T!5vMa20*Qr-CIIN_o}aSxc8Xwd77X+ZOg&p@vhFB4);6Goq^=!Z^}`{kM?mEhT0x$G)L9^ zM(4?}JFC%|+2F40{J+%vI)vK0nk>@kafQ9YX!h)O*S3|Aa!?;vZ{M%tmMn&12&o zyM?wt;4ppoKXhg2y@t>~!r#yz;){&=Tf})c(jEI>pW7JP`uJh`jnUH9SLoX)6w*KW>JQTMvtoVTLxe+D_f?B%{S z$hooitgu4)K5jPV6#KYW#+(oOxF781T;J%vwwtr8(S3S1=Z&U!Oz6gb?q@OQk$!F| z=6u-C{d3Ie+`|21%z0%C_ZKne#Vy^x@9uoOrTfh8&O=+d`Q4r8w{q{@-T6^}_vYQ5 zzixe`YWU^0?&E`rfP4F3XTbpX+QH7p1Kf{ycV2GUMVUvob8~w*%eQlH z*~6K=y}Mzs^U(J0KL%PkTerNanA0=i z4@RBqG{Z*Cxc{V=b4SFzxtH_Hh&!W~UU9mw^m6WY-6g%8hh6uPUe3qv_5PXF`xN&~ zC+&A*_(LIgd4uzuIRf4dxgRu`2}VC)#*!mP#1DZ19`FO}&dn=>5#h*=&t_{D`@XTh;>znyQ zZF;rE543)3aBkE3t-*P^!C$)^aIX8is3x@qQRi2toQ~!-QRn%v`+C&5vf=LGhaNng7ct`u{!5j@3HQ&&eXp$PuW|Pqde!4^lU1fx-PU+@RpFl}C)PNhbbZ{~4B`dZ z(67RwC&PVS2%BYHqpmr`IF?j_-OI6CAU8o;Pu6EUU5i zzV7k5)#lF)f#<<&bE{tI@8iz#_Mty#?i||2jMZ@WyP=8V&b+0Y8p9cj_jFZV^S? zM_uQWh*55J^*TkbStPt@Zs@$>n)E_$_>9qI^OsTQhhcZM>#PX7Z%3U68r-E(XGz59 zuj;;0)cKif^v6v_dKs$V98-FJ)OdfQqFpX^Z;Lp$=&nn|+y{Ii;w)Be5of7h$wr)O zBYK}#LZMr>b`Olh3JV`y7cr z@{>Ki&dqtx|E+-E!{=cUz5)O5ZOKDRC=SN`VIlXa0Hf;Gg~TW-*)x zE6JfR`+nu;|F2wW_a{}oe+v!65G?D=!8e)s#FaiE^X@WKC= z_0?+Uzty>&<(zhe?%3#2`FV=<^LMPDudsf;&iZ+d^>acWlMB{A&HDLTe)fKcUcra> zxydV~?>}0lJ4t$M&d=jY{HT{@ zXg;rL9=&`x-tQjh`S6(Eaz&rBh5vJas%7Up0c7GM#UU>Qc=rhOQLahQNf zn1X4TfmxV?d02o&Sb}93eTVj848~ysCSeMuVFqSl4(4G27GVjNVf04?J9u{B`mS7o1-=}>TgK?OENtl9Z zn1NZCgLznhMOcDm7}Z-0=CKWo!8lC7Buv3H%)l(n!8|O$A}ql&jQ*4MVGPD$0w!S! zreOwVVGibD0Ty8imSOY*+J`Y1hY6U3DVT;Cn1wl*hXq)KC0K^h4{0C9U>qi35~g4p zW?&ZPU>+7=5td*XM)kR5Z~wy>jKc&>!W2xy49vnD%)!W2xy z49vnD%)as% z7Up0c7GM#UU>Qcsv=3u24ihj5Q!ouPFbi`q4-2peORx;1pU^&x!8lC7Buv3H%)l(n z!8|O$A}ql&jDAY{Fb3l=0h2HV(=Y?GFbDIn0E@5$%P{&M+J`Y1hY6U3DVT;Cn1wl* zhXq)KC0K@0eNNe%|6mNpVFD&$3Z`KOW?>HIVF4Cl36^2>bJ~Y77>5a%gejPY8JLAR zn1=;ege6#p(RH*BV=xXAFbPvI4Kpwcb1)AJun0@A45RC5AI4xDCSVe#U>as%7Up0c z7GM#UU>QdB5e{$v!x)Ui1WdvdOv4P!!W_)Q0xZH3EW_wV+J`Y1hY6U3DVT;Cn1wl* zhXq)KC0K@0$A87V4UEA!Ou!^e!8FXkEX=_?EWjcx!7_}7XdlL49425AreGRoU>4?J z9u{B`mS7o1!?X`$Fb)$i2~#i)GcXHtFb@l`2urXGqYbnVV=xXAFbPvI4Kpwcb1)AJ zun0@A45Ja+hcOt337CW_n1&gcg*ljq1z3b7ScXxT_F)XhVFD&$3Z`KOW?>HIVF4Cl z36^0rO8YPd<1hh}Fa^^v1G6v(^RNJmumsC6+KcvK48~ysCSeMuVFqSl4(4G27GVjN zVYD~x!x)Ui1WdvdOv4P!!W_)Q0xZH3EW_w#v=3u24ihj5Q!ouPFbi`q4-2peORx;1 zeP|!XU>qi35~g4pW?&ZPU>+7=5td*XMjL4##$X&KU=pTa8fIV?=3pKcU=fyJ8Ah9E zAI4xDCSVe#U>as%7Up0c7GM#UU>Qa?r+pZMahQPm#s}{@oq}nYfmxV?d02o&Sb}93 zZKiz~gK?OENtl9Zn1NZCgLznhMOcDm80|~@Fb3l=0h2HV(=Y?GFbDIn0E@5$%P`uH z_F)XhVFD&$3Z`KOW?>HIVF4Cl36^1W3)+V<7>5a%gejPY8JLARn1=;ege6#p(Jg5o z#$X&KU=pTa8fIV?=3pKcU=fyJ8Ai9FeHeprn1D%`f@zq6S(t-)Sb#-Xf@K)(Px~+i z<1hh}Fa^^v1G6v(^RNJmumsC6x;5>?7>vUNOu`gQ!wk&A9L&Q4EW#2j!{|1&4`VP6 z6EF!=Fby*>3v)0J3$O@FuneQy(mssAI84AKOu;nFz%0zcJS@N>EWt924xoJ)gK?OE zNtl9Zn1NZCgLznhMOcDm7;T|_7=v+`fJvBwX_$don1gv(fJIn>WfHIVF4Cl36^1Wd)kLF7>5a%gejPY8JLARn1=;ege6#p(SfuNV=xXAFbPvI z4Kpwcb1)AJun0@A45K^HK8(RQOu!^e!8FXkEX=_?EWjcx!7_~QNc%7b<1hh}Fa^^v z1G6v(^RNJmumsC6x)bfg7>vUNOu`gQ!wk&A9L&Q4EW#2j!)Pn*!x)Ui1WdvdOv4P! z!W_)Q0xZH3EW_x|v=3u24ihj5Q!ouPFbi`q4-2peORx;1yU;$2!8lC7Buv3H%)l(n z!8|O$A}ql&jP6SNFb3l=0h2HV(=Y?GFbDIn0E@5$%P=~K_F)XhVFD&$3Z`KOW?>HI zVF4Cl36^1WH`<3W7>5a%gejPY8JLARn1=;ege6#p(HQN+7>vUNOu`gQ!wk&A9L&Q4 zEW#2j!|3j`4`VP66EF!=Fby*>3v)0J3$O@FunePvX&=U59425AreGRoU>4?J9u{B` zmS7o1_n>_kgK?OENtl9Zn1NZCgLznhMOcDm7#%|UFb3l=0h2HV(=Y?GFbDIn0E@5$ z%P_hp?ZX(1!vsvi6imYm%)%VZ!vZYA5-h`L8|}jwjKc&>!W2xy49vnD%)G zhcOt337CW_n1&gcg*ljq1z3b7SccKB(LRj9I84AKOu;nFz%0zcJS@N>EWt92?nnDD z2IDXRlQ0F-FaxtN2lKE1i?9UCFglF(VGPD$0w!S!reOwVVGibD0Ty8imSJ>%+J`Y1 zhY6U3DVT;Cn1wl*hXq)KC0K^hIPJq2jKc&>!W2xy49vnD%)IKT<1hh} zFa^^v1G6v(^RNJmumsC6dLZq?7>vUNOu`gQ!wk&A9L&Q4EW#2j!{|Y@4`VP66EF!= zFby*>3v)0J3$O@FuneOkXdlL49425AreGRoU>4?J9u{B`mS7o1zd`#j2IDXRlQ0F- zFaxtN2lKE1i?9UCFnTcU!x)Ui1WdvdOv4P!!W_)Q0xZH3EW_v_v=3u24ihj5Q!ouP zFbi`q4-2peORx;1BWWMTU>qi35~g4pW?&ZPU>+7=5td*XMn};;jKMfez$8q;G|a#( z%)vY?z#=TcGK?Nd`!ELMFaeV=1=BDCvoHtqumFp&1j{gb812ItjKc&>!W2xy49vnD z%)3v)0J z3$O@FuneOK+J`Y1hY6U3DVT;Cn1wl*hXq)KC0K^h!)YJJU>qi35~g4pW?&ZPU>+7= z5td*XM#s`VjKMfez$8q;G|a#(%)vY?z#=TcGK?NU`!ELMFaeV=1=BDCvoHtqumFp& z1j{gbB<;f(jKc&>!W2xy49vnD%)3v)0J3$O@FuneO|(>{#BI84AKOu;nFz%0zcJS@N>EWt92 z9z**u2IDXRlQ0F-FaxtN2lKE1i?9UCFnTQQ!x)Ui1WdvdOv4P!!W_)Q0xZH3EW_wH z+J`Y1hY6U3DVT;Cn1wl*hXq)KC0K^h<7gkoU>qi35~g4pW?&ZPU>+7=5td*XMvtd` z7=v+`fJvBwX_$don1gv(fJIn>Wf(ny_F)XhVFD&$3Z`KOW?>HIVF4Cl36^1WJnh36 zjKc&>!W2xy49vnD%)4?J9u{B`mS7o1C(u5O!8lC7 zBuv3H%)l(n!8|O$A}ql&jGj#UFb3l=0h2HV(=Y?GFbDIn0E@5$%P@Kh?ZX(1!vsvi z6imYm%)%VZ!vZYA5-h{$sk9GcFb)$i2~#i)GcXHtFb@l`2urXGqZ4T##$X&KU=pTa z8fIV?=3pKcU=fyJ8AeZ|eHeprn1D%`f@zq6S(t-)Sb#-Xf@K&zo%Uf2#$f^`VG5>U z24-On=3xOAVF{LD^bFdEF&Kvln1m^qh8dWJIhcn9ScD~5hS5p14`VP66EF!=Fby*> z3v)0J3$O@FunePT(mssAI84AKOu;nFz%0zcJS@N>EWt92o<;jG2IDXRlQ0F-FaxtN z2lKE1i?9UCFnTuaL+?YbU;Xc^5!l2C%sSKmW{cb51Mt_d0AGY}!1v)g7(L6cXIr>4 zoWb|0T*LRE42OpwcG!WfgAYIE#64QSzW086@7wydq5BRWy5G?KS_dD0&g9myXHN6= zz4qV331o-uJ$&!|O?LmG@vjB4p1#+9@jW)Nxt`r%zU`#)1s#C`4H7Hg2NIB-2K`t0 z9*~}Tr0e@adg`uI`Cg5ldeo_WA4gBUms9!PkDhw(uI~rwsc+Wxy(m5PKF%b*52L5v z=(JXu7aE)UsW&^5`2LlidS9ob%J=Cs=&vXDTTuDtlTmGAB9sc+*{`yP@8eH%;Vds}+u^(`wS zzxa(P4Ni+Quc}`8)|u{F%=fWGoPTZXR{Uq?Zx4nw{*~_)s(jyz{#KW#SH2&pqJI~C zG2a{H&-VK39q5(sBU-MX&EsSJGxgj0Q18%havtP+h$??O@igfnXKUvY&d+gPE&3}L zHS&4%%J&&nzF+4}^vd@qRqEeD=bNd2uvP!T(oH|@dXN;%o+lV-xhmgOBeC7L`DtYsDMIoozseGSQ zWdeFedbRzoKXT*mU+aAQ3;G&Me-FL#eROokRIYZrk3$W+_vq{CUpz13(!Jwha^=YJ z^M`u*FP>lf>2I=C>;IWvzIz_1v^(9?w|1uTeM^=7qCeE*wR;`y>Td;lj{^Gr==-35 zgudL;TlMR$rvJUp@%vvn)5fCjj9%F<=c6BhUfGXx(9cG%^k*^pmFS1jAM^DRrrjse zN2712ZkYW(h54{D4*Q^=XXzu*&$o1Q`_9yJfu);erO`Vq{X3rC;wE?UxtOXIT2bsb`H<&vyFaDAS*{mOcvoQ%gS+eVwKM09}9V&g);LpAVo% zEd6El-j*KL0cQHsWa(|_{Ve?y^!}E9HTnQce;9qBrN4pRYU$0|9MhjcmcB3gU`sz9 zz0K0o=tC|2PV`}xUO*pi>8sI4So#26j!i#DTKZw=qb>bH^s$zH8+yB?{|Hk81+S2#dCDXL~jHOROf7a6Hqc64ef1p2S={ss6Vd{V0(#N5{VCgf^U$pc` z&|k9jchUcB={xI>F`IT@w)7N0FapCDKSq9zm4A->J63)*`MFkpBl&l&{0>|D+dI$7f8EntocFB!spRKd`HRTEZ{=?# zf1j0KNPe}If1UgSEAQx!+MDC^pH_ZPPj7Y}wDQN0|G>&$Mt-4{zmxojR{m-7k6HQG z$*-~U8_6%S^4sbQK287sW#td>^k!$Vl~0oY$jV<%euNKbEZKDP4TCjX+9pGCfG<$p_lnU((s`A@9;R{COB)1T#5{_CFJ;(Ti5 z&mq6U%4f*`$I3rMex;TFEBVi?e4}1ynEtG?^80yui}SgaKZpD~R{mP@>#Y2v)587vP%XZG)9Rz|*yExAG^F-)QA8Bmb$DpF`dm)#k0+E3Z5LKz^N- zf0ca5%5Nm^yrTuG9+ma~PI`fF`X9FPM|gU(6S4B=l5eo`H<9mcMqsb4q@*U*+S^3+@kFfH;A-{!{f0z77E8k~l)6W)X zODn&>r#CyJt^8#2TUq%#$&a=2&ynwM<$LYok59Xm-`~?)oUN_=H_4B)@^_Kn#>)Sd z{CF$hYgd1Jx3%(zczUxl!ODM|`~WL|KlzDPemVITE5F4ce|slc`M9UIINMqIbI4D& z^7>Qn-u~L&%D+H|Pqp$tCcmSVf0Fz(EB`L} zovi$}F~2{TSos4yy~Sy@^5>ABZso5fzq6Hpl>7`U|5x(6Sb2AMzdtjr{2rd(;_PbW zPa=Pfl~0o&WaaNB-)ZF+li$tCzfXRamEUr(e>}vj{9&Hn?98_EQ^@ab<$pwej+K9i z{9r5p3i-KK{xkA>SouBn@W*qWm2da-7H5c+zm)uZD}O)vJ+1tk8KUufn3Nq!$IzxAH}_&jFi5AgIBXQ-7wm;53t ze;xUKt^A|p7hCxv`L9{|jpUbD`Q5b)GuMm#to+HI-t0VO<-bpUn3ew(`K4C=4f6Y2 z`5pH1$LB>WKhD!zoVb<0f&4NnzmWXbt^8Z$ms|Nhd;8lv+{z!|>CMgxD?f$&0apGV z@++oC9m0akvbr?)s`t^B#<2U_`S$RAa^%*v1V z^cLq>D}Ocl;a2_^s`!81h9P-Co`P;~kw(?JsKf%gx zBtO>5w+;9EGv3Od?di=@WXX;%JX@>8w+T=J(|`De*bv+|#jKf}uJH^T4FC070nPj7K1S^3+@ zPq*?AL@K z<*y@uj+K9o{46UUImE9&W#z|sy4L?z{u1)%TKQj-pJV0UBR|E;?=jLJ&$(9qEKhH7 z&a?9OlAmYgSCBv7%I`4BuYbOkZ};>T=K?E#HTnCj{IAJ(SosghFR=1EAL@_iH?90> zp04}}5Z&~?Q$S<_=jfeU5f7{9*<>}4NV^;pVh{xV&xwu{~as;KKZAt{7z&1?VV=jPxEx$|F`n9$bZ+$KS%yWE8mdt z>rY$x!#us&S!U%gA%C%zf0X=kEB_ApORW5^hx_BX!pbK-y~VlI%HK+UrIr5^`R`eI zcdTFkDl0$S(_5U&to(P$zhmX^BR}2BzeRqvmEY^X@@<(}kv$Mv^UrPQ8EB_Gr zwO0Nu^53`eyB_Ip@26J&1W#{qW?1k^tF8Rc$oI4I%gAS}eB;snc=osQ z2YY&p^Fu3t8TkQLegXMwto+;L2U_{?G5+>mYvp(M^k%2k%D0oh&dOg#evp-)PyTu< z{}=Lut^8-?JFWcg$NJ;bX629b^cLp^D}Oclp;rD8@;6%f_sI{l^8Ls8<8zahKit!s zo#9qKO@5Y@{}uTWR(?78o2`7~asKv>wDS9UdW&<5l|PgGXe)mc`CG00pU983@@4X~ zt^7X6`~7LR@)JG1#ktMO-%5U*m4BRk*2;fQe!P_*a)LkpKeF;?d%EubTlpKv&$04< zBtOy0zeE1VR=#DtzrB;J{QjQa;@ocKPbELu%Kwo39ajE#1Kz^!~f0X=PR{j(6)2#eHNq_wBw(=Kxdb4whmCuo%XXW1_Ki$d?Jjt*B z9xH#Er#Cw@to$_enU>y(evPHyhu&%F1@u{#{-LLD?TocP@4D3l)6cD)ahARx`UFcK zhd#;DFGQbW={I`1uGjRZH`lwLOAk9u&St5or+l-)Hy*F#2lerUEy=&<>H6F+PA->*>KIKg6XFSu?|4;OhNBH^gqhEEFpPzJyuU~<_?b+1xHD7nxt_RSa zVZQz&@KThZ&(e=UUu@~)EBgLD z`~Ncf$glhQQtH3{9DloJ9PR73qBo?_+kJfv`gHWx;lBPG^gYh?^NSDg^~cbQ=qt(p z5&fzu)PJC#UxYsNJYOGqkgq?B{&(~l2mAVy=+~d`=bdrBz5son3+NB}?dVUTw;u23 z=b)e2;pZnE=j->Ne}=x8{9N=~ze)ZCKmP#wUf=TdR*wIB(F^F4INo%ad5?p??dRul zyq%B!2lSQJaq=X3EBp64@@G!<>sie4{|@?!ihiWuPi?;Un0TR|A9<9oYk0lKtLTf- ze~Nz6MSgxI=h0{;$};-MF@8PoH~0mnf5*?aj_~y(Ie*tmH}m<3IsSZJ+S|_$n&#`f z{K?n1rrpuftNnh>aneK10H>ADWlbV~HuZ!L+00Y4tnwb;ML+deU%w9hhv>JR>gz+8 z2WF!`GtJj;M4yK~!TbV(9{Zy|f_}rrz8>l2H}Hh?YW;ta{Lp*+{8N71Ipn+kIN18H zq*trw3G(y)?AKG7m)4*^h~Ag_TQp&s>-bcD-*g=MDCuUu*!lC(7gNtb^0%T7ec5kU zKYEX6^u)a1I{dIU@8?Q?HhTI1X9oMLqJQmTzdsY`Pi5Xa#nW~F{~Z7L*{xE)j<0It zaHW^;=k%`f`;yi2vy?a6yJ525|81!M4)hnk;p^W(zZboe<3YDKy~o4oJ52ENA96hW z0e$5;zP=avXVK@3_Vr`Ym!S_F@9RU*S4ua>d2egn){*aM_w!@OM=$ZW>#x>!ZHeyO z=;!sL_ZWgcZeEaekg@zClGhW_|SUmuRXCHj#Zw`0)u)(p33%)L~owt>q+v*RO%M7{~PI!^TprGevbU#zwYNNr4tUL1~o#yG$J3)tRq=;w0Y9Y{T=p)a(~>+{fOkMZkyntCor|J&idUYTdEMc?vj zU$0!>=b~S`zprmcyAPn>!#GsVm*1jK<+yDkUqqkuhF{Ou=}*&ie|+rYe0TI`*sjX> z>?b``<@e2xARnLV*Po2|4Ihub?sQ+@mj0ZH{@aYNZ;gHd`XI)+1^r_5d#(Olh3*{a z*K-j0o6(P8KG_|84*I11{QP0)KSjU(yq@*^0==Jg9xX)QFYf0nJ1I{v4lpU3&KC;jY1Kc0C$ zfqn=2W6UQ*(R1hvztyw<52KH>uBR`dpFGUZSB{_8(SOZ)^#JOB8-1j8JbZvYgZZa& zo<{Y;)~sh{S?%tQo>W&BS@A7WjvC!>F8oi9_-r!)VL zr2b3MpJG2&#`6d0tE}rqC;C9HYrI@=?m%C8r@viVUV4wm(BENy4MTqd{Y}ot%J{#4 z{`k3mJ(cUx-_T#N#_cWid5q6d)V~q^5Y{1;zli=Xt}ln8zlQ!c^F!r&_ZE8iD1RI(`>Tw;kmIm2k8O}{ zUf0;)XVN&sAOD9~C+|eN{n1}yomRO%?u7nx*5RL%kE7qr_*cgBQ1p4sSC#eHY3OYn zhn4*@1wF*P@rdI;rlQYb{;3?#-$x&5o!57v|A75lx!(N@{dDHH%JKXV`kur6@jQd$ z?T_gH4`pv2rgQoJ|KHYx7;8wHkUd*S*0C>%A(bTio|wjxBu$b;*^;a^*+aHu3k^vK zO+qChA$t>&B!1WHem>66VXpT*$MOBmAKiM)InV37&TC&@uY0n$-uVVT#p|yQ!ADWg z*M{_S0)CS9^)UEl_|efyw-5X}+@I&?drjpvjd^%;?2EvEeqA~?uc!zg$NJso`HkTz zwA1&|=?t$!+-zL#0}o-J7yH-XKRl&$ZCscPzs7vh#;-Z>lHPo2B|Mop*gR(wJO=+` zNOvc^EA65hd>=gSi2Sj6*L@=ux8ua^DeOzbj~$hLGQ2(fdS&sR@LupC)PL(g2f>qR zM@7@?4~E12b>$Rz4Dq!5E{8XIUg>Tm-A(Xoj0+a$o$xDNWpC@meQ>|sU4XySK=z~2 zzX_jMQ{4L79Iq>G3B=#p%LDM5^l#Q5mWS77-e~KKhv7G$l>Seo+ZaBY_1FFI*6_x} zxgxx?YdFDp;jgDUVn46G^xbe1{&^n$MF;WP>BR@Zlg5h2VLugKl=wUcp9>#K{B8VN z3is>VKgPZJf8J5@e>>w*d-RJq9*jF7Zy%#E_V?3Y)yBTw%hK`JO|jT__u5w{cmnG$ zo9Dj-KkJRZBzx?1KCLoPbAnk&eaZ5BU5i z#Gg*51k$~sI3!LGw|RI)cx}eV@%R%1e}sBs<@*>s&Wl4Qcz)IePomQc?)QhU!t>BC z**N$P{37GN)t{+wzaLl$|H@mxY=I|GudLtr2|kVTYCwF>zzcL$d_KVcYw&(vzHY%g zGoBX3zL4uEf_C=}@%#v020qtY7gvS<;PEE#*pu>S9Xj3MMZEDe-t6bdz99DRz^5@z zSwAx!{t^98UF<)CXC5G(;QeL6&kDzbbID`uqniF6>^FJ-AA+Z_KT}RJq2Izf|NCCJ z^VrX2ULAw|HFyH~Dg=*uQ*j~rUl&I;@oI3CTvZ~yoL`|BUe-t;Ssl}?*kcZAK)^GEIBMe~T;cr+Y7?TUC)?7wt8 zw~Ob*>GE>|{3!OB7o<+N)i|a5Wboo9_ha*)PaO|*qM1Kf{Rz2%2j$f>o9s)X-wl4p zPQ|Sz{4Mygx$-9yd@uZTvDE%tgx}1rd~Lw~fp?T{^joqo27dy+=3CiY|GdiaXl!o( z*lPAUuNxd;#U8wz$@J$ed}*OfG1XvKg((F=i#g3Q^)hANz%VGMfz@d4gcKd29&_h zGCNdW55Q}{3zSs4_redrS7(vVWAL``O6P2DajVax9FG*wxsKoO)@w=J11W`0GCDuf z57>I`D!g%C>03K(K1JzXJ(0RxK7cQHO!jrq-wVIENc=(KR_Q&}^A(H>)?OZSJW{_g z(D5K{6;~)-E3X;wYpTcl^*e8D~{qm&yA$&glY8iO7X|iwnhx{B2-|u+P5A=Rt^<+Oh%XHaie^>S$ z;7#BcsJA)bTj8PA^5+}otJ!8qCwi^8#r!e&_PeBS`T7bTN+(_s{ivCd>s!q<_$><1N-&J+CdJvXpMvR~wQ;OCrtvd@YAPifd+z<$Gi#rYoWyM7qCT_iXj z=ugWp{W{n$gU5X&omsTIdWo{H%J^$>$p4Y}(K6Dp`coNReX4ZkXHWv`;YD)F-gG+5 zkxt45#nb%%(D6v|S&jXg^(rqLU&_vnY+pYO-p%nK4vQJDt-m^)hW$nC+h3BOj}W&W z^Q6;)@x3T~4E#keJ_YB?{`@2IzXbL*;1&0!j>AfL(q6@_IQD~{~r6*ugU+K`19-{=^Wo9{!)7V;ve|w z6s7wp_K$ol`;-mx)B25t@a$`3-x&Kd@D_u`7vMAK4!akBXq{Y&t8x}=6}tVif0o0X$7$# z<9HCyEdR>h>dysuzXS4r2==*GMXt}~91ph(=6f^g_uhsNJD_}(L}zIlI+;F|PWofA zZ-D(+_`V6^E#OsF%f57B#laP8_-7fs^DX&b3;r*B<$UG0E&PqoBIoNX$Af&eS*~=a zldt+~#Ji_R$LirOcn9XC)<3+mR`yBX$xmw+`{2KgPhDQmxCM2PZtvhlweF`7{lIDX z#J=L?@xRM@*>Bq+KXbsphR>)VZuKf^gY0MgCY`?6_l1x7PI0qwWH;RJR~vjTouSvI z|1vt|H;O-5Pu$9D9DMQl)P82zB>R{<V=M&$_BOltt%x_^K^Rw=w)T__9ILsSAJUOX)28MD`8g z#kPr0xgz~C@DA{b%&)9`zk$ykAvSVqVg?D;`i>5eM|O39)=HhJeUvo{pU{X*U`T2 z#Xj3k=?u!D{8~F53O{jP@wE1}72c5bU@>&c?NYic#>vkf@J5b@`z6-J)^EQLk7K+m zjeVJKWdAnvocrKQ;aAH`-|AuPZrShJFF)_cz88E2^EdN<9=uubVsQ6k_5UY$znju2 ziOvUU%r~xMpU8UC%DvAX`IE9rep;O8!vFqDaV~}aU3(+fs}7C_aU0LRmgy&j?cMhd z?2~>TbdF=6H-mUIeA2hlS#)0MHih4t968XMS|vfN!Rq=76Wb+pwQt^{2-^>5SNuy1YihUz?bkUxV-Q)-TJxm(H~V z^3&$4kM0-$EQ@#>{7Hn5UMzhp$4UoeKPpDtB^Umg3?IZeRRaDKJc;_CzY2v498`OW zV?Qt-_SGE^;=d!S%CQrCHvHmo`I#4f9bW2PajPe>KS(FEKsq*_z6Wpjr#!L#^8)-D z{cvvdTmC4WOJ4u*BfKf)QVsiOev*CiR;Bv}`~&#Szf~WsANv-5i1BVP^{4A0>6DqR ze8r-(7QX0v`ETty>Sx*S-6(sjSIgnwC(7RD?fDLquRG=EQ|Q-m-0}_zfAqw@#$M^z zc=Q`QX^C`{dMI??FVdgEyvxSBnecwQ6$jJV1m8e^V)ZcQh;%-fBIPDHwHQ8xdT#!= zJu3UoU(25u>=WTPfJC%o5`c?X||0oVmz`uq^zm+=vm*B&^`qSjNbfUd> z@ish`eYuM0UxJUIURk|;^Ec^SWIbZ_?PvJ@T#B>htL_QecN`-BEzV!U{q^a8@EZ$d ze;@wjJ{h@wR&+dwf9EZVgKH+?pLqC@j?yWdUi^yV_q)+Au~XJCC@mbi|CDq>Gi6^7 zz8zj5_`(bKWBJYUyX@;S{#t#wA71erm3v8a>cIzPm!HSrpTm4EGIK%y9lT2u<4r_ z&*JbY-v0a&_!auAx#(9vFP&-0(kTyr6rSR>yEh#V{145PpA)cO5a#^PZ69Bwlekg# z&-1&xE=VWwggDhORMznz4l!SeyOhE|FT$%c&bEMWhquk9`2SygE=De|LXHRexAIEA z5;`x3?OnQ@qt%4(#C~1@*>{3>{v)#ftBwcyd9m+@eW?`jhOEbPz^B5;((hG)uZDMc zUFB}$b=^Ou)9$MB`xy4GzzZLhz4ib1UXuL`_OGlREr(A$B%J}Ymr|EyzizJha0+HL z{M1?LS0&xGjtBK2?p?*l<}LfM&+F}Lb-5z_vwujxBs!nMhcyzn`kdu2*{@(c&4_&y z$Aj~!p|r0k_z>)`mzF;k=loZt6U%v7PV7fI9;sc=!M;qK@@sMX4xar(@fqmE{4M=M z-oA7kyaw}F%hxve70w+@ztA=5^sTBmnE$QdhuPnFm~@Z87nf3g+rhK`Bb}<`*YrPz z@0uk2d$7-RUH1Mt#Txj{yQFVA@&C&H+Bx+b9nhH!kI5;08z29HpUf^!=u+I{>kX3Gm@#I96)%;(|oDXc@AlCPPLf8?IytLQz)w-fu# znWWPj`);?SKlHxT@z3vWp9JX+`&H#->Ane1h?f04bXug7{lpH6v-O`V;8Raar!4kG z)60HPdD*`Ve-mDU`e6O!GZ|!mx=rf(`4+syGI5LZBFD{Vq{AOuu#eg!{}=Mh$1+Mk zn)$GeFSFtG*(V=@{d#!PaQQz8ehZ#VKU^NZ%sr?c#Ir5?BsJlC;pdnaS$#SG@jn5qv89einzScSy(IFPiOm&_5@5^NqD||K3Q(Ea}4Yz+;`WhCPqQ z|5A?Izd_-ThaC_6FaERqxBl%z_%iknZM^;m-gK$#EpC(Ul%Fw^RbDo)Smt=3pOP%z zn#3=M?Qi!)|IV3J_8s1ozV!nS!K-l|VR35-kKL6yEKml?zK%z#SFd1yo>N; zM}DGojo*Std*>##vnjtxteb4yZ0C58-!WOG(*b{y!uI^nZ6Cj4pF;cU%I|7tm(CUT zx9uD@>s{h4zmk4E>_355;YOC_y>1TKzruW}H1==9?^YFu<^P%y!ZVo zM~hD=N^zLT{K3+_$MGNzSEwiNpg$GflXEv~N2lS*8B^;I&Ly3b>y%#`FIG5iX(1i{ z*oOVqjVj;u{Bl@s>3nr}YWQy)RHt(F`9K1a1pbF?@FBmyKr5z9Q zRgLa+d3Y^EqRUn zY=HiBcxLWV*!=Bt_{A5bUmg1s@MzZcR=w6P@a~)=SUdU=9`jD>deW+- zbOxMJ+^XW|F!<)f;!nW$!2S8g8F&i!e4fF+TB*qXH*-Aj|C8PFr#<#>z}sw4y5@i8 z(z1`;sruFn`)2Tio24_Ib=J?0TU)?1{P7p|y`yAr<5Y(-(%->(uI*zifcMQHdn?~7 z@O!*-nJ>!*>HhEcw(oR2$ZuQLp?PttMmcf+obDO;Xzzab26%E(>iE=h6(&-CM>rm& zJ1JKF*CO3)<;8#Z{Qnvrn@jpRv45w6?9*40j*S;zz$a2qZ2sK1qU@8r@n|BvBIAhE zLZMxbNAmwb8vGA*64~FdbzJ#MN;gMoDckm2RDqZWJo5Q2n&wq$?$H7lm zSAMPDZiEk`98KpiJoyg!Y3(b%O62&w?|6{ks|97>k#y5n70*OqY~N-hJf88(`lUbN zhcc>OS$P$yCY=WC|CoI|{GM~Nw|Vat_$=z1+5ZdQfPOT7c6JYP2L3nQqj+|P4|6s&bgk= zhjY}DzP}${&ha4rb2x9cdRQMG!~9_oaaaa#SVQ@;eT;OqBbQep#{>O@tg<)#CUAfM zttY%I^Bl9!R!2H{FQ(QR4)@O`OVlOZR#81^U1seC`;m$hT0o}2BXj;SY|39O54{&rt|@zZ}v$NK*cjtA#uG3=AvkN#`e58}M3 z1pHUWBej=18mQc-4wQbw^!kHxjtAv3-0Nq4hPUjc`f2BP&o-3(FwU*4f13tB!TEx< zqaE@sq5R5@Ey!&tbh9zzSSESZotosk$yE2@6j}}Kf}V@ z?b9+&na}U1VZS5|zS;4hd`nSJtiO7onfOSapR#pUTlfa=eDEcByS=JErSX3HG>AU+WLKjD+pDgKtPlC7mvihk1k{K@f1@wtS3Y2s5J{RwTP z^PqPh;A40j>c6cc?tEPKDZ5hlOHaW&d@etmqQ4sM_s@Si9`sj-2CH4nCJqJKN`Lgf z;@#1y?s%kp#bQ5-{-h!HzragyF4G8}?g{DV<$TlXXKr{_`r#L_@8o!-bYI85>^+LZ zBJ7K}4mqe-*9WN_+rY=eN3*WC@@oE+;*h}o`I^{Ibv)4V`@@y+9 zZua)Fuee0@%Hp;e-j#a=Hl7xJM)rws%6}V=T00)h2Re7r4s8YWM>!s;e7B&}hkZ+n z=YDvh6RMy6(fJR4`6c<;m-;-QgZ!D9TmB5dKF71-hq9N*lGAe<}H@)%udpo-BLOhlc>((4>K-UdA$iw;rwnp`ML=A@4ff# zBAptHgXZUJj$2wtgg>T)dG^qak&5%qO#1ET=wxUA#M*VeuF@~+-4~kcc+k&GoTm8C zAdxHBKl`%E%f|Qi-K4XXc$#mE;M0kl^~1HgNA|yWn7ij&xnEtFbVoWK)c=O;!`Qg? zF+7Ur@r|E>kD)*5fKJsO%2$kcZumI7qIXYqfa5{BIYuhKCD2(2AL*?(3-y$}zmNR} z+<%VacesCluU0SVOdhXvZN4<$af>akgg=t7Z^?LP^|MxQ>D=W0iK_Ge$udy z>__u_=u;$e96p76>9&tr*EQH6K3%-|(jmD2+;sWpWbfaXd%!J_gL^L_?|r;2oqeSI z4n!vd_b4pCh5Jdr!)uD0)rU#&{@y*DQ}E(EM{n!pLC;I)#3r?C(?1UH?VVrO?l1do z^lw&AUWfN(o@4t$w;Yd@uVMqFbCvU{2Z(&)er*2~S|2Z28>-Uq?SSm~#IOzM!)7&A#}Hk?T)W$Ak9h z-@p1GY@a=p=&f@vV1Im?;;d0V6q++sI=jv&T|1}C@>1ktT(y;G| z{j%25&qh5-fe)g-MWa)GnDqU2(Zumc?dTcon{!{nY(_X9%x9v#dmwYLUq?N$_Hqo~ zlluU+pHu5)`Lm4pSbduVzq(R!=!O4zhRfc+mpj<;V4O|x%4G}oiI=2f>%^yDk^Qkr z8kb#<8U7jQco6?XwG>b5=ktva&&7P(`mrtW$=v6(__TdBay9+SQ0>l^s)>57B(Gf$0@PJNzNvVGM5 z;3>@8n-HHz-jKb2|NmQf&g?38o1Z^0TK1DyD_!$*CcH2EBz4fw`DWzut?qbGzLn@_ ztRL72kG?AXXmsu!Bm2Yb1MGvp3}3@@JytHiz@xTF=Vk17z9pSv7sdatyvB+r&sBbz z#)fvmCvuNE4xK{dWIy!>mCGUa8>hq9Fiz#dezW62yNhMMXMP^SK5m85weyRDZ%cm% zo@KkFW%uF`ET>CcN`DT! z_B~F+vy@S~wr*-YLHdO`?P9pVzI1SMlZv8Q)c$ zqh}~?Gf4Mw#{+-H&Xhkp;P1hA=McAc_YHh9_dg%R{-G(-+4jBct$%9)Z|$ugkHY=^ zkXG+Sj^_Z!13yPjN$t-{c>e<8M z%@3QPzX@KM_*gvugx_FZYH`RqP5OU(@1@z`c%*uA82dQxAzS-xFkL!Ry!C7Xe9m(9 zGj{Lp3cSX8#mD_`_^0oT$Z>u(4L&&yzTEMkT+XuJX#Gjsnc|6@Qxqp|bKv#quPn~L z!`twlnf& zl`pi;dk(0w<3WFwL_4ziNejn=IBa77(DJnsUX=S$=6}qGvR}cu=NqIu&T%ViQ21j$ z_JcW(HvOK7((%t7-h-bmsB-UuSk{kZpSV%=q&$2cJpKc5tIzr7$bRo()&JVq*N1QC z`6G+xQ}F2f)X!Lb*a82*JNLN(&(1lx#b?Z1#d!tyODB--$BqYa{+RdG*nU^}dE$jm zD8JT^Er##lUbp#~eZK57?v=frAI*cG^6E)wLF96IAPwHc@xagBv{Rc`4}>pDr}$JN zUsvJLndHwh#`hKrrSl^9#%=y^-tllfzexLKm19$@Ov*UX+Ho|=b7>rS)$yQTD&XxSt;N3A zpYqVgwW7wJ_W(45{{!D}xBOWRZ@W%9BblGudChC^Sk6@~-PQ0p-u;k# z>!mX)i}bC3>jWQzPDR>R_6@R6cc<*T!JmbgzdC6Bt z_<8o7o`e4a_up5Ud82eLeIkFVU|$V>tg_-?3O)TYu8#i^$_eU&n)fAjJI~E8j%; zBAy?y^YqL~vTs*N`Lg+ZJbcz8O4s)Bb~+y9HqQ9p0(tFGf_ zb6a*pO|j4Ri|lLBz6Qhp3@Hv)?pxtcUKY3da}nP8UHShwiL}@*fBg3`J`e9seYSD` z7~FqvZPeG&X-R%9{&nFuy>-N^@I$lYkJY#R@EJAb|1kV4zeD;7JH&0@^?mpp?;h+% z_+Iv-8lY2pr*!`Do@2TN_uuc`Y?tiMR8qQ4(RmO4BkL3^yD#7g>{pk^{x`UPzC7p~ z^2Pke=GChl586w`chrswqO%wK8_z3lw!b%Yw{*I{ByRJ}5_`l?a}I0s-lyRH{o&z` z2k9nx`^>w;94Bu3ID*bW)+wexYp>GX%suC>)Z5?T18T}otG9)}m3{IBaT^zQ!*>@G zxA;7iEc^7i#iP)l3~x{UvG#Sxcd{SB{Mq7D2A;ycP&9cP03Yo=|2_eJiTXAb`(yC* zrBz;5E-&v>x*5Is(qHft-uGqmnX%u?zBun+vUZedzse>0d+BE)-Lj4c{eL0$y&r)0 zg16-Uu+2j*!~OS{zH&f1{(DEifZyPG0qdWu9gN&xHFrGlX9xQd7U$>T6}|JUIq)2e zgSKAI^@DU)ac`;s`FaR`W1v@0;O%%0#pY>Me~i42Xy$l${=7J~&K*CASL1mctA~%n zx3aHg^>zuo%{%HJtbS%VM0xRitL5u)$AffVr{A{y|3r8V`V-UN4^NydKT8vb8b3?t zhId{$1n%$OPJ*|lKWv81HOHg*_w66K4@-Z-*UDEbemNT6pYt*+-$jmFA9%aYe(V#y z_gCcpMf#(eZ`8FoLC+m1|8>f=sOD`&(HqWg8tL)D<5I6l%@Nby^TYWeV z&(D3%#-v;IxO8I5OULTf>+l5b6`1}?c>Dk4XH#_c!q-HppRxKs;y3B<`a$y$>#q{w z{{BGv6SAMrddK>WA@D@@Ra%qo=Z;$)0EIt(z}|n)fBZ@5AEADlKVuz_q_Y6~|2|WE zES@*v$EZKXYoC(-vQ>(Y^(R{#kEDMP`;9|VmwWv0k?r4fJm?3iERa9t$#mh5g1nf08~-NC%q&QFKH-#DUr zTZ?(Ze0a)k)$_a2&vjlpneR{>EIte16S+UU9{VO2WIwWl>@5zP;o~!=)_M4%?Dxit z+jz0aamx$R;g47`Irt#@C9CI09S?M( zJe_OUxAyKc=lWCnjbBiGu1+Ru!cR~S+rpoQ?`B`c)`=tFqdAwidDK<7e=e5ulF}{e z?K6*ppQeAd`cUVx?BAvztBpUM;j6em`7(SM{K->_zs<+qgZuZUF2eoy8yC5vbdND_ zw6d-bPx1Pnk?@I}YiGotFX8JLr_9e$e@VahU8>IyVjq1qa=oe&=58Njvh1y1jdDEb zXU?uoo!>5j>gtY5Nr~ z!u|IbO@)6vIdwn32VRQzy4!wf-hY&C+#kxX?RzhSXXad{JDJ!E_uEVT>(UwYg>%Y=ju~+uie>QSF*sre0esvUoIuv{VoMk`!9`>=VT)N+o zej)Z(^P{sCo^oFP+j^ndf3hFK{a7ono$%8Ss{Yu%O3j<dku zlaN{VkLFI@?q&k!S^vQwRO#tIpmN3T)@IGzr95C?~RTJ`ibn5+jw`~xOcy+ zR!-TM%BK2I6v=0ed;R=8czxQVofB1vl8%4BbUoaEuX65Ok?U2tG06Yyt% z?>Ni|LS<8eEn*`x zfB!wwsZmti#?5K)7k-q7HZQu@Jy;#2`_lq(i}T~~@|6^aHt4?yuf=l#Hn08=K8JbH zgY^HQdsXiKedZR92kBO?p>(Z(i-%8T{58G^zMpns?W;x+>8xh|!PYe$;r>10CGfXf zDPK0O6}(S6CpfRMez*~QU`*=x41=%X9VwTI>boz_4&vXmvOKhMTnMj7 zeeRC_LHHcL*TCY_p_s~d1?_hR_DdZP^qX_9z{brY#bqD;w)`oGeRanJd;eba5qO+e z&mVd~_M7SFt=^7+mt!B#`k(LNz1X)m`?pI-Cw{5owhw>qD=FT!wYZhDP zy%LTGarWOo{4%^lKjk-?{_xJSvj2kiW$~;9@4Qz2SiPMD@5ec*^)va(N$22p>95Dn zrSOI`70Xaz94(6=ff*V$G;!D8Ge0~ z{27Tq_f?d=|NVng;Qsx|gYfO_oBf2&!xW#iOw_#Mo{?Ow#6@Vj{LM?L(R@=*Hldb7HBZ!>)rrCW&mA$_oqc08DW`uBP2 zggHjHeY8cVat`J9b$)jeevtKrooAJ;s&vb-pJwZ?cJMVXq%QXa_(=L^8^`lklTL|v z*|$osgqOhkb1%sHfs^pWvGP-;9|~1?IC49B%JHBb#c(g$boRotvL7{y{MMw--{_|?b&1WRSA75j?nDZ%%L++Z=AIp2EjW>o@;J#34#Qt(TlKw-rq%-jo z<;&Ks3*lG#tEB84=L-C3`XAGbuN_%`oZ~_Kt8bQ$`I)PZxPPx{mg7PF_n)^~gZ&}q z6*g|>sVkiu6Xm~^*8s=Ox7*`63j3lV`C~Q(V$h)Pcblaala4IUbZ({CM?~1v2Olu3=y9 zH|e)UCvOYs>|h+Xd{uKiQn@t6KIY z`=S<~qwp2pc%Aie>0ID>H?uDRe{PrbZ9jGfe2{lPAzjd9)^|JMoj+DR?1g={cGB^mLwO&bz&+ch*x&KA>|;6KvHin2@GQloZ|%KV zd)Yt3`z350kpeHp{;KgI&&WQ@L(11g()|Hm<$&^K@vqxK_C?wEuylty9*k>oJP&UB z3^N^%)b5s}p!1=HgdXeIUbbbkEN7fYrm)9{`+7X zb(DRGe%t0D3*hUjO6LUqYJpC&k7NF3@o5bApJTlaufqEc9wz>SI!E4T9_M)A{|fKi zb2q#e@9VJmKh`CPL-tTK=a@FndCu{myh41RS6|Zo$?-sc7Wa*9oNd)r+<(4lCER~+ z_7!+x?nPMK26mIqdG6!Ypnq%FU49OIQRB!C)*XEv5B%x8OZEI6{9FM~@!G}luntDI zeH7^-ou!;Z*}7ma++Rn}=qdZ|oa5h()@pdvcKKuNB113P*A0oEz`l>;LHrYWe#+wi zCiaJUKbZCZ^WepAs$SVRayqPYTZZhPy(8E2l8y&)^S>Xh3;a#)yIOyH6h3Q;+PnEv zrH^zrekGoT^7;wxe-C57zOwi4As>KW&n2DK_C`E%cxI%XHid7^BVH`499jX7cX{oC>I{QQJ;te>wM zC;N$;#E0R}FnCesCpLeW1@CxXep-Ea@CE54?Uhb(be@CvCSTU)uXWtY2-EP#kJ#5a ztN7RFmzm?G|Dv~VmH;o#K9!Ykp@Fh*$$i2G=!}FnPLaO#V^M=-AN#Sm&2yH+W7wy) z_I?tcMETx@e$m0wiLazM*g9f1+<))i7RN0nNQ6IrOoN|8$N&C?xCmJYz@@!YP}&x`PF?33HNW95s|FVB3hF`39VRJ@G$y}fneQS%g^e%QYb zKhC`oyB|9fzMJRYZJu__@knvIg#BjLk5)fpUXnjMm`A-tf3+5#nR(0qwclZ~-@G{K&Zx{@Dm`;XN<;uj7^$Aq#&* z4cEFa*84uYa*hY(zV&qKesv~%zqb#t4_=*pm9ivK{}t)@?^)Uizs~!jqp{CBLiVSL zr`>ye1fGR)+2-fl;nzE>-4!MdO<$GHUheZ)yM7(+-)C3^pY(I)uqSq3@n?AKR@FBf zmkTFI|5Oge$I7?0UeMus`@pRY$?j?ckIjcRs8Mw$9AtvXU-{cHw_N| ztb#8ZDsFYM`6$`@?=89W4RQZ{+P&fa_mGZ;pXVN<>7RyYgj>7rGFtizyyy6K!H?Xj za?eYC^S>#3|NH1W!)sPneXx1&0r)xgle?pH67Js*&of5){(H{X!He^LMVp7$eM|P8 zzm%WZ(SIF2w5t4>0sjm>jPD(@`SW-1IPPzl{T*Yaf8$@3m(8OdhS%F6Zu5c7@VCfs zSJK@JFO^AtPKEa$C;d=s`Dyk$;r*|OTf4jFc+h_yJ0hKe=rnsgvk&(FNq z#^sFf$i71<>6?9Txc`0Mo8arb{kp>ArIS!vak!FE32%b??}scgLH4IVk-e?Ehr;`? zjxayx!~OSL{sQ;kZ`NR<;`mrBrcW*czwBJPUTxqf6L3B zz#hkg_?P1QHAWKWQ;vt%>8$(gxuNuvrE_AE@@sKUfcxKvlKx%U``_176@IXS%FE*O zBK$Px$kyKPpCX;)74qNe&l~W@tQTxw@tWiIw`3{&ao>B=@xPy|g5#0w8({ChkM2Ww zd@IGl&c&)tmA?Nz@E-83M^%osacVrgC*PNp2R;wpfqSGy;hEl-{)A#mw>-Q$d?fcJ zY#ccV_xqV=r%7j*_rA>o@D)7AXY=7=(`8@B+h>~!k76BU{m&`*0`67X{BYO|=^XoB z<$ehNXTtCLUiq^ACwivrH+u8ZiH=7qujSbH^X{t@m?fR-%;#-hHwK=c`HzhY8E4Dh z|Nf&l;DZ*)&rk6G7kCo)y{&#``#?Gk>6dJt(-MA?eR!MCyaw;b`SK9-7r^hopmeQ# zAO0|M9NIY^^aD+~Z&?GKz3^jnXP7*(beV+N^{`W*Qf$wEM+~$hC;ZINx^P@8Xo_&kT(fBR+#_QsbV*l8J zApUM2&wKAsf5(IJ-N(IIYp3hr^U_Pl=0!yo%D&-c<;%)64qlY=BO5RF!58p+lf^UZ zBI#`N&cznM>-;7CQuzN9{5|)tpVZYJlU5nsZm{*wp zQA=ds_Lkyc?Yb6xG0&G-91g%6b05sw<3DCkJ2IVyOQrvObLrdo^%A_zZ>jkicFC`HE!B` z?@@RX@1rY;*gE(x?7LdN{&qa5hyHW)c~(e&`c0L4D|9x(`_mpL!(&#;e%EZpVFr8) z{21qb){bt${qy34Rnj@cJl4u3`={dVbITv=Z+pZ2_j4YGcRwS4%s#YQI%~LxY4xEZ zym?l|vmgF1bUcWU|GdV}*e84AY3a|TKQ>B!ntnXI@n-pB_dI`vpQ1h5e4yYO>GC6j$0c6g+F#>rhJ|(L^6_dn6JeOqS?_=KT-*nu{h$<5PNOU|{ z51#&8EvOW~{1%;j-a7Qr?b5HfRPj`A6AHcMcqIMVY4FwP_}|0(XBzgIz7F)=xW@ft zTR%oS9>hO%SoPCA{;7Pyzc?Tdtz1g%kiGvN_=%23@_zyLSve=N`nemP#JCep-0s{d zegD4AGw_5;^7B#bUxTmV-l?^hyLL&ZZcEvlZy&+OysmuJLFZe?gZRX7-^|9xV&6z- zP**Q9Ul`pIPJ_}_27+wq{hj`99g^Zyj~Z40S=Rl=XC-$}>+ z-iPgu2kke+evaL{zUg>0e|!5!j(yVczyG2wJU#Py^XFCgzdQ$F@htegbo}$m#_;`r zC=RxM-vRgE+n;&A{O|1TCzp0SXxIMx&=TO&yzjqQ0nfqy?KCod=Yh!es+{9N+{SqO zV6lz|=|0K(6S|<&8}5H^#nG_7tDi?zUNy0Q@St>xa(~bI^RM9Zcz;VH`m5$Y$bK~I z!KUc^0MEWs@q7m!_oM75dh^5MaQ{8h{eFtv-X}O7pEeN6WL`%gAH9`rLSCaPQ}kob?-FUlzY zt-a^{HFA5Y?08U5ih9q{4~Iu>PCZUthPUvZ;~somI_1jBA1jxC;VH@Dt?@JFH`zxe ziQhweNr1mZ|7`w$4DZkT5Ul@s;e>RCao?;Y`jg=P`;9ljUwlpRv3k<^r235?z5ePo z$AkFnW_-7Cb1Qr+=b<%7_x@9|A47Ywd2ADSZf_rKE_^Zdc{*{(^n0+bFBqB@Qx0W) zhN+lGU4fmg`{T1%N2VnSl=`7~`IaWHe;kDS0wfY}(LH7RlN^FDs?~{+dDEo>j z>bG~1ueOd`8Qorv!?2GgUn>1j=ree!Thg)l!ySJ}-~XQLa`4UGdjQ(NM{tC_y&O57;~fv$S3Pfiv>^@q1K209&tT_$7vMkfT#}``?27a&u28+T{{H}c z7xw|~CvNBAec30R43GLtI`i14vikox{4C#tRvP<-@D7ueu9a8x)yQ$E=6Dc?^4v4A zes3_mAMXdSacwUAW_Hzw4WxSrzM6HerCa##$mtGtJV^IMR{3f7E$_aj{yD~buXuIG z1N#{6Z@3j>_~&K#mwlvT_3a1v2G$W)E_ePTovp0PtlxMYK9=uSt%CkX@bv8K+P=yU zaQ}JjTkz_9e}u(h>vg5;KL>FRzU^uG--mSX_*eFwN!Ru{$H3b%ud}}5OZeqiq*ET9 z(l?|th5bWoFTEYNzr{5CF+R-y_de=UbP_JA+_UjV(f^_EeGka9@E^VNyU*bM`u8{Z z`r_)bt-KoEl>V0`Qm@84_P2anmJIBg`+ba^HztyU2W}>_@B-Ur#&};r_XHm8`NqOZ~hD`+AN?YG329pGCRbxNrvUzc-*| zHtBcf+}Yy%GTc9h&758K>zEhi#GeZAd0Uh(t3Mmzg}EN+0O zhZNq!{~YPQ4)3}^>E6w}H**fz504WciG4Z8BgO4;?Cc7~VF6{LBxZ20yb_JS+SRJevJgtB0j?1^N2l{m+Ju z2XX5+Qu+D=oiE{M7}w1Is<|^nJO^ytd@$cV~pUdG@;Tzh>zC3&;yqI^ck>hUZ++aRn<=X_FkLRf^{`uQNZ7!Si=hz6#TSb}c0PG4++NF0`Zn;QqYv{(EHa-#3^9 zkEOh<-X_ES_c~s3JW_kiTv+-^TjZzByFP_a;hwh58_zo)QRiOitY+WO#=(A$N3tJ* zea7NSw+7|AK5YNL-?OkEdw)M+RuQE;c9wMPxrRUCJ3bM&^ZEw&$^N8w-}M4~)>hef z#h<>>vhSFz^0M{y-*A7vUG09^``;(>K0Jx%RTI!Z0UyS_&EeFOkwv9*^|acxjYqTK zUD@w?6rDoFWMA&}N?*{gDyi%#W|(iyfeb-%F; z?%$UyQA+mLGAmz}-xrLZQ~W!iKMDTMQQ2F6(x9|-k`|>-cP+dk^KGk#ZOX{LKlgI& zPRKC$9IxEBIBqe)mGH+;*!%mR!^%dspXqqezg6VDVYW`JT29mZe0)e zpK}>rQT9dH543dug!}t}cUO{q9-fyeh5vQnr>4lyJL%8o!2Rc3euw9{EPET5U#%?t z&0e{G0v}OR_I6LT!9%j&!1Ef`Z+{41#C*W!SL@&-xQ}Z6dD$vKdAavjOxHL*fqXS_ zJSea2JXdAw`de`Sxt_bL%Dze_`Dy1AiyaT^&y@ZQ^lxBa)LWNztR|ht?5|q+u7p>a zCw;poT;*Ze?_!_C=6mnJ59Lw1y-Bx1^~m+JvExC0{qHaM0RAHTOIA-#!Ts-TsriU> zLi3ca^@p#)E8+h7@gMl^ZHikx(w$W^avWAW z9>gIF_hBr*HEN0Xe@FeB-RGPPU%)w|&42Ex9a+Cd8vNNXcjqH~uSZAx9O8K3kNoe|06Q5S~W#55$wYB$&j$7U_ z4S&o_gYQKrj{S%2{86L3crppR58)-SDvw>lzj^?ytg*%x0Z z{sTJsnu>Se{K)Lv8H&Y~Iz~@t|F7=3cAy z&uih+Z^=&^Urxi@Tu>aWK6hy@o$SkGUy^j!!1vygz17bikIH`hpRzxI{Sz(3{r4Mu z>v(X!-`P9gzk&U3o>Q}V$b`p~-(%kUnwEt*iQV?G)$t$>{=Ut(@FUz?v-A4PaKC;Q zYAK!WtRF4EpTeJ_->~?fh8N|$wF^E?X(gSZZ!135AFhPIIyZGZFTww2J!17|cC2)I zvu?FIQ@ORc-~SAOPg|8*|2RB$x$2MI*DBsdI=R?aEQ|j&;LCX4)BNcHUz0;REwRt` zc;xzA$?>3E`ty9B^^>F1u%Cf_)R@$M?u2h=f71L(Xe<5D4%yrK7d`!s&7_5AAVZ;xzA({E1Q06xc?mS5ct2`x2c3bAHfIF?xNw}!u@@}Qtczx!^VyW zc%N zm-V2pOTy_HAsxBCex!V!3B)`=W>83Cv5a9YuGNeL8PF z`y>1&&wrWyfX=c{;{6r2-#!yQESLPWe0>hT#eA|AaqHeCa-4@c9>jSm=Q5UVv9971 zS+7|=84bV6`#LOco8a?!-;VinM>pwY<$iv7{2Al8)nQQh<2&qoo>o1tz%L)`E}dBK zxv+`wgWmIxHG9Z@bY;coX>{I%@8Z1P`i-CA&h1;WFHR ze!Nnz$oXxP2Jh>5P%gvYkpGtMPRE0LFEKp-VzJwshR%6(>T|Ea%6DGxV4jd8l*oIf zY~H)a@gUvH?9W?0&(%k~8U2sds|oM~_C;;n`3~NJb9~!}snR!c9GW^Fq+5u65A){> z_}H?k$JzAH$^I<&$!&ez8ScMt{~UbRB9U_i``7;{1l=k?QAk>^pM~Rg(g^ z3NOrgljXO`^O1F)aXiRx0_XBg(3ugock3_KW#+@iG<3d0$A5maM*qm^KI?dpZgs{- zn}@{1=X>wTx#0LmuAa2zy_$5lp|S%a>(_HU(D&~du7W2qFS7RX7rej|Dwjq16o|=j z(iz1%-S)52zaV~ve%{tEbKyHUzq9^-KYYy+wWEH-d11VCwssJ=ajNt{aeqHB4t|XN zA8SV^;ltREwe$4oLFjXSYV*L=jtB8P)?FtEO5MG$8~gvf`=zr7OXr#7)cruwA>#gh zv&Z4d+;_T*{7!=V`w3^@ec4a5bDSP8O5a~Ee+@6mb1~MRw-_q>j5#&`vH0BelK9vO zD&MB~GX);IL-UrG;hW&Q;nu#6!u{`~X*(=(J$%mbpdR}7>t@0I_qpBsvh4luM;j0S zk$p4kA5OtE)-kMI%HTWc@FUXjjd-v4In zxNIZDhkUAfV&mE<_#tnfa}|6z`ytkEABC4#rt&hKoAA&X={Lg9lCLV=t&9s+?jzwL z@4V~__|)vuvHs_VXrXq(?>^&Z`h@{ox=WCc$tmLZ(Df3*JYpN&6n1|&sR`8 zviKi%JW_ryVISw6BUc_JegFH5r^8?SK6Ux7grDI#EE`WNydj;N$7Ejz|5w55bAQ9; zX?q-xmN$KrTm6?Z=}Vlp5uZ4 zr+IIf?Qg6J+vD7A9|y4azfb4xvC{F|V?2Bq`x|{2$Xkw+{rI!;vmI#+bKHEpE!ogi z>^t!NTvjf5-j>eHEYh)lU=ch&_p+?qZ^G--j};}6f$vDC1ott_{sKG;&qv!h6*FG; ze{(O(?33X+j>#X}KTMn;``(;;eonfv6UA4t|2zTyjpIT69QB_1fy~71R+!%&8U6c# zNz!?@sM@dfSKHyaxEEycX*^l>g&$G6FMRd(%G; zFXheu=T4FRk38>Fnyl=B=Xf`DITm|Q_LUn-e;@XVaKHZVg^w959ox6uG*vo{zZbW2 z^QYez@A|vqd>;K*;QhE)X8HXE9$KO}ypMgMY0}xxdtGe*e+hh3F{PUm`$O=Qxzcen z@9@w4(^c+K`^6vOzRl1u$LO|?w;d1K@0yB=kIhSef|ugHZf*Xi?F`xb^YA|Kp@pO~ zNZ|;53(v@N)=!}`Vy1M`F+N(ln;f?>?lw0==ds`5?NjBNC7nYrD?Zka;v5h6&u=RZ z1rZyM{Wi{vZT@f-{uJ*mxA)N2nl1el&MVE&neey`O7}ea89%_Db$T=SaQGJQx%FL+ zhy5I;fStkqLYVVEw|!*zQ2PFTj7sn*-sd@#zj+D%5BJY4KA*rhvma&k^RnZCKVzrL zpCjnpm#B1SQ9rGnZg$-M_BJ;|`>@}}`q%bXD}N-NVeyKat&e_yf53ZXZGPB(j_jwi zk74Vh+;heK^W5j)U$L&S`9QjPk?U1H$AkLczL@HX<*OY$jiLspY1xl{cx36 zQ_>x_R62>gm#-6i3cTHF+1G%th9`Wb`lkPeLO0;8nKxS8W_=?4Bh2?KZok9hSTEZ= zr06o)&#EkCtA``tLpj&8b8zu? z1`~%-@bu5A-agIz;S$^*$D6K{PG`P9q5wLJ;Qsrg(yfyH@aBrY-G_PK@ksS;NtnC) zE#Cc;|IkUty1q2&mi#ob&LfTo?cx;sIac4^gAZZ9w=+6jR?FW1exhrR2j`(t-gAqE zKMT@z?{D|M=kXE8gLM7x-Rl8gK|6{f&hz2^`_G?PBOU+yDfYtsakJW5`EzWa>RTT2 z)h^8WpW8kX9S{7R#eC2DjU({Rtm};zTPOPv_tlNJGETZLlEy@M6!p#Qli{WSSK`YOzCbDV$vj(t*J)mv+itv5&~Ww+vRH};$0EqTwIwd>#DRlk&u)sy=^ zm(B{-1=bIL=y;@hyA}J;cBPx0bn|SK&MwBa&hSqhkEnzF6!zkPvKABEs@im?0ArFR_~rz*{$N8H%h-MIt$=i=?`t*`0$sJ^@pdy zC#J!dJ0AG6x{~s1<4ewMk@fF)Jg|>p{%PaIQ}E&C6^DKJ{{_4`&%Kt15By3xQ#rqT z4*mn&|K8Y9+ht#6r2Mq@xZm-hA4_^)abx>D^au7S%=c{k8uhhw%5z?9`!@68H{O%~ zcAx53_!;hvSo`X^LpoD($==%6Ja~8ZEv=rk+bKW&_0)35gLwAkz2#PK_rm?}e`&Bw z_I^J$!0{jsq1lRqwTrFTU)`m+**dz(H`1x{r~J3JJQ==$`ysYplzzADV_45dQ(j}> z$=-ag$R61b+bey$Kkz<$4EeQt$ZO!q-g7ri_L8nQub$|*`HU;!kA-RQU19FVJ8vC# z0{zDML z;3;v^xBBw~+<%Wmfgg2F9o-{q*hA}Q9&tQ~|I0jgZ2i(J@C)3}Y)`)C!#i-FJQsW$ z{7LWq9##tTi9u+F^KkuVh=Xj)kDchmQ`VTlBnA$#-I43(x3LN z{IT--+RZ;Lt=r4@cV{1oXE`Pv|GwFS@F>;=s->aObMS+lLs>mv1F!J2{Iq@}=daQ) z#C_)eI8`0~HP3h3d~6Xslur4oi2Y@FMR*R{Z91rTrn8(p!T~q8g z)hqvdvYAHn_i&^~iQ_KDnsQm#Xx58=JNd-&JkBge|}JozeiQabV8 zym1@6KKF%e9I1Xv_C39Q&aUw0lcjI{z%j=om3!9TrQ^R3wmE$DXQ|6^rsI)x)~3NP zq2qsFOt#ZX_agnX`S}+7Fz2V%4}9-azBK90^fH~6JFw+ z(tRYG;&1J17Q6uCvek#1@PXdFyT#`NKi#~D`ST3)uR9*Z!T+AJycfj%_rTPEU-Zra zyTOy@s=vyF{$jX)U+xP06z5jfP9M9dbo-A}eyzO^bv#m>$6 zC*b~jgo^*6bVu;qTsHiVfhWGDe07Hpa6FPfBh%oE&}qx_9d=J9XNuD8!hVI-^QQ2w ztfOr{u*C65>0ZX(e_m$zpVDv0y%!truEPEImXy1sdJ^}c^7|?MWE01Odvqc0N0~pp z9S`cy4DMB$Z@Dgu56>qZOZRbj$`6^tkt|3%odWmo4}SvR#&Zxq5dRTZr2iPt5jTOa zcHDdbg+F#<@4xrpuQcon{w4i!uc{u}c-I@=dAWFf68Re5yREqGA3l3kI{y3d{)GGQ z4X*XK?EU?a*BrOL?)H4m$G$W38Jj1hz$JyvK)Sqb67in$?=cu zKVaQt<9L0z|Gn^i;n6u22TS({_?2JO?_H-}wY@HV|NF(3!xwxg9qX@({40C^9(Z+l zb@rJpUmfA?SpV8R=QWN8 zX_#jZMZF^(o5y~IP8|0NtUoOIUu2y|VV*s7gnK*9@TZgGL3>>1t&cXOp|cnJ>%3>I z8UAOyDINbFaudfR<*R)fd@wo}2dNybTr%8}{_>Wp52pDdd`|lmC6UrFQ zZ{9zoWBp7k_-gL?*go5T@EcLGH-GA;laBv=93veM#?6YeR9-fYe~A5P-s5Wf3ANKp zr^|HZ_c{FE1^2(Nq(%nW7x+#6;ak`bh9_=Tx=r9~;Qss8qA~`4{_js^$AkKinfsa6 ze%rze@Z62Xp<*W4?>#JkY9sk9e8dN-%WE5a<8R{8*xzzIl0OAAN9I)>5B!W{-DKxY zL*O?{DW0#PpW%+kI{DM!r5z9SbGMS8Hm~as|Kzf`#o>E+JMJggy{5WZq%)LzxmMpU z!ROSIj?EwLzEk!!%Bmb~e{3Jz|2~59Su=+FrDX59wx=8q{Lgq+<#L7g{vzDJr@bFu zE+uuldpcVXHy7t|^2eS#8|-+Xf0KG>_2jd#JwdQ@M82561)hHSBNL__7+l)H{ECD~Ieq z8K^jzeWje@3G_?R__G!6zkm1=ygBEpMX}ErC7p?!r@R602rsu@^}*8p3SNYJ0$s3Q z+B99n_R=bs^!@LxeinY}Ughg!Cfr{+9+YpAw@;EUw{&u+$e#nG`vrU-{@DA(D(8`X zN7l=>FZwuq!BnODAvzo2MXyWW_Q9UWE1es^%03?ZsqiV~RlZ%}m*KU3m5%Kz4#*d| zy}#yoP#;chk-pu7s+?cEAmfX*(?{VOXs5Bbr2efL z_IYc`|Ly2ZO2d99_Q!e8`AgLQdIgm3XS1ba_i5jR`{yh*3(9`oaoJmc^^D_@{CorZ zEZlFj`gt*I@A?PEJG+09qmcBovfpL)Pr^s9Q@U24Pr|ot7dId3-y@yXeE*U4lRe@7 zzWP`2?=4PJdf~%_k725*hh_3JsgGo47mS& zF1d$Af$o&aQHN0Q<@Cb*xYAyy-$%$GvBX@x|`p+)-3I zn-)vQ=GYIxfAsDTKkazXk0p5LI0NC)yjOn){%j5NJ3}!i=m_CE(0_*egf_mvUQGV@ z-_u&ZxOi@!ueW?HfhVwkYww{t2j9)Q-ukhq2c+{d?~`eTKXc)63sfI$pEh3!*@vjl z*1nd(FVJ3$H!mss?#%NaMt>8$JnJv(--?!!{Sfv8OJn~L+<)))Ie1sje{H>AptN)j zQ;t^OYQt;s{=K~D?}q;}UFB|Zs8B{a{(J3~!MFdT{?^j{8Gc~A>aE4$$+D^^A@1Q= ze>E7sf_m5kf5yX;CoBF|e~vgF)bnf?m2M*a@B`(fpXiOJgB%a+v)n0vK1ILGgW@L! zDZf_#UxS~XB5wN$$?*L2!;R4?R$e+$-^kC8;PLPUya!-8d^S8U_oe&68&{CdEZ$dO z`xSBUq&?Cvj{T>O2koxR%ZkH3xf}Wcd;fhzH7hc&^VZSb9k)Io>F~!pjt6l&$a}MG ze{VCqJN3%uR}WW`z5o8A9`Lx8^8X@0-lX??Gosmf_LWL zg7q7rfiDdl)Z_V1J>ugB#8;_aen6;G`<9PZs@$nbi&pjPwdmBTWv5D&EBETrzejwZ zZnEmst;?Vuy#~a;RHc8H=X*3766r50-7XvP4;~-%P^Tv=SE>?D`2Q>_Kh(WP&n|=e z4YH&L^zYthP@e()8}%vQZ$P&${krt)(`8`!%9SG}nuePXKi^~Ez%IRd)UVoY=+H`) z8uf|vw~-x=_@`cQBL8K>%HhDYeX`~MlX;`Sr~fC*hLsfY|2N}`5slLbRGL1gVIRrq zG)&X*IxXvnZnvmhg<545uv4G@0|#~K->pXkPg+#2YPB|!xmE9hgS&QW*6H@z>KZ|} zZmu}mHuDQ44R3?Q+dlsP$M}DgBiEtG!fDxdiyt&_(BPgu%XbTP?-AdlS0C4|<2wy{ zzEih;1N!$E80yri`+!cp`VHvXrC+D+g9gM8?9^rO&``Gl&&T!aF{nrP^3@)xQZ=$s zr=ETK_vzFnKEBIKoqF^i^#4)!r)_Oy*|sp6f9U3eME`ALke#wIa3!3XRbS!}LSUuU zr6jOjzy2L#uD*#-gb=n*on3V>M66iNT(cRoTYmrTVTqex+&@jH-|>p~FPzAD9Xwk6 zx4*t@oi4_s;ppplH7bU49NO|}xW=~q_p)_T6p!oA%hBMz7!23r=TWhE?zH#P->#Q< zA^$x8;J-3`T93XJ;1^$RT89fgy80!*df9Hf;nlC7W~EPGo#NryQvv27UmtxNejdyo zN5x=2L|i;6fCpkiS}Vp#3L1}@$7Lj>fEH`VQe>E`E%{#%&7O{ z)nc$7e$I^BZUy5;Px9dQ2NUvL&EE{ z|7ve=KVPi9-{9ECa}o>Q@pqUrG9rLO7K{00Jp3*f`~-*ec{-TQLf&`!u$)hee^w9@ z9IWK2V@xQ0=IDO39*ifaxDbP4jH_^Y^yO@__&f-Q%02dOFrCU{m+hmo^UI>s>s|MX zcb($qtatXY=wAJKcGU_eW!8G=d)2A=5&n^nJ zx?bm|b9UR2J=#AF9=-U_)!E1HIX->!>9$B_5iT!$__A~LF50#H*l*5m&;LmFq8&bb z*1fqY)RUc$H@E+h^9+|IkMKM@e|P)pK*L><2e@Q^C-?V*1z+D3w`cti#qED?!p9Ed z$3Ap=S0Ih#!FKTA&E>m~?|LZ#odi$a_Rg;Q{Xfp|&9~RRk7uMwOSaMJdC#ghyhkIl z@E#?-B~uNz=VRyNb?-mL+2!T+`Ppsv`bzFZNa^~S>zhsw6jQuC>s~6lX@_Ugy}HHr zKyQ`XD2Em9vm-zL@gUq?xlis1-sIaZXXJhGAahyemA^XOcYoaG-w7AvJ|joo=qvj4 zbMNGCaj$dPIqP@yh2vpl2>AT^P_2 z$;$^g$lwGVMTeX{ee*PWGvwXXQ;v5kd^hC#^}n2of#lu&c-FC0NZ*KeC=?RO^9S*9 zBr+p?Wk1@=oCrzZI!xcnNR;%wcJy9Gz@#soL@#-QA(S`itMLYUuLj~Lc{8DG@6A{m zB`+uEkO-&rmH6IgWmWRpe!Rd?bfwS7$L$zy^6X)J$npJ*#YZ|qmJ3$NxpatlU)&_J zVlN$_d=4d2Fc~7x9#f&1yqKI$C?nJ7V@i)iX8J;WM7{(~o^Bn2s6;{1Lea`epd3Z? zxAg7(Ktj5AK~lkA(l_@Y!IMvdSGiv&@0|8-<2PF1pZ2%O3$25Y^xgbGLE)d$clXZw zu%<-YbMJA2_SeZfr?;I>^adwrf1A9}I_y9@x_(>qI}nXm=N;8 zQ2jq7@3q^sB~vxtjbi94{ zC`^j$4++8TgHB7HNk(h6K3(-c-P~OFU|8DawV^tAzfa%VzlNEK6IQ(2y@xLce@Wjw z!OYGtyCBqVLicTOh4jzq7^nK7w_TbpktW;*ZD#(SjM6qU=|kr(&abcfx4lp2$(;7x zoU#w5;~wd+%i|Xv414{bc#_n~%O6X}ByDroWK3v%$#{ykGmoTW1?%Qm?|Nr9ey95x$367t*n^S!XG)p4|D{i*BL=JPeKKaHB{zMe?W)})=~#wlWC!`6HqavKq3RmU zFn+RpWW08E+3TEL{6~np3pne)-ktJB&Oe!sY<3jS8~ZC6)$M5dR5D_VyyN4i%UgB- z>_)Z~$8dj1-fZvTO}THx!pv;!0Q(Rjxd)PQTE}(mZr zl&ap--F}fQSxMFGL+N;=Z$3)&Dzjik-^l~%IA*mR2Po0myG}22UEFGO&!wYsc53@_ zWTJ0h@<2KccP%`Il3mL_R5hM@A+vMp6x5%ihtlzQxBW*;h(+D<(g)LV$w~dkbL65p zY5u8nM9L=hY~Ik&vMKXOI+i=#w>{|Eu}D(1r$+J~NXH>pQ;+c+^t|4Ddi%E1bBsXo zx01)wG2J?q>2vkz^77`^8&7E-YP{f)WUPHRzw4XZ?ng=z*op91*lA;MJ5w5?c0m;{ zq~GBkR1>nVQR165!?D0sxDTn@xcQCrOOCEGU($fT`;Jdx<U?Kw2 zD}ZA~6m#I;kIGllZ)H|=1IJVEn$UFeq3nv1=hLrbPDpM4+fTxQ;{|0;DDy`8r5w#c zGkXKSh;vl9Bg)fU@|koU)A5=brok#sD_ea&flZhLnSKF`fOn2zf>2AqVQo^~sXr`Cr|+O*8ngq5Lb5B1Hb?xn-Zv|V5RWID3j6ZO^=V;zFy+V}S)dMFuB zY-;mR#2>Ux9sV^P!L60k0E?kgW*pNRQPX4CzbB)#9rfK_-&_~)f2Tskr^EQEbVRpP z5>4Ww1?=R4or)ex$8-F}gE!q+K7a8ZNXK#ebq161-IW#_37b*&yV|+jlj+D#<`Z$d zxdd1bFg?C9K2@jBC8O@U`9^nua$d+U`oe)f^Xhk!uN}Buwr^^E6}qart90fmU{Bf_oo5l#b_Q_|-LaTSmzNnrFb=TTegtWq9VnWZVO9o^DW%X^!&> zHo(VjAE9FBf7rPn`rO@{P1%ddclX`tzC~mg2@<;6+wR}VHU+1fe>NGt5^ zgK&@ZpOY~>S-?A)S0#K|5I>cU=w_uO2C@#`;Ff0|OvXKMv@DFU-#P2yta`Ao#D$?Q zM$E5#s^0io@=cE)hcCxo7E3Y#Kdyc!`PzY#DdC$m;?(kb9Z_mxIdkaCnR@RgUq5ov zARM7@zYb48xBmw=Tumao)*kYh$Ui|0F5Ch4(aFL2h9WwndN6P^ zJ+%YHw>LbVd_(jTZ&t$Qx#Y+NqBv{mgXy@`>b%F*&SVBwx?K5_$;fgy`26`96j0b? z^d;%omxvQ;`Cj>OGJcyJiLbotT&W0U32};sW>8VZbIGV;8*)^0ip6!W3%Cq10$G-# z@v_Ztq+c>rW4{F4vQy$o_o$}g@$?&%@thm3Bp|;6E(;!Quk#L3^8oan1?tg#*|W*$ z!dcV_&;Tezxq`)J1&-z{nMabbM5i?m+9kgEkC}ONtXikt_(J*}Y5=*{ZhBpuG~l-Q zOYTNd)cI43UP-@Yc3)!nei?Ac^=(Qgqe4<`C&7ZGDF-r4QH;x*Za z((#Dvd5 z`W>?q_B&L7U{ZFF{S*_fJFW7klP?^QX6?klwL!7)u&LGbZnHW#!|7|km41;rMcEhg zOVgHqt(TJT?UMr}ufFL5A<2JDd%|kGl75TQHF;6*=B1eI@u?{}Tj!nRYX^q1<-E<= zsUs7%LPseTu9#E~BYNEYYWi(Tvt-ul+vJ-fo57-@^PVzNDm6BOP7^|E=9adETK9;Y1>ylG~8Jl6>nR zdy1c~DzvpS>7IKj{a#l4W&Ay}rSs}x8J%X|O23$0p?Oz{ub5~`=rs3I!|y4c(eQiz zOUd{4>Cd%SiYZKz8faV_(Wed+n!oA!+FyaQ0x(BHkfmP%B`HhB0+izLk7Y(5PTH%psQ;mXUVCuVvpzzm%0b+EVHK*||#ibD7-9yi@aQ>NCC0 z`E@VR5X!!mc_;Z=UKi50`L8ozwAlrg>9A!lCg0r;WWpckC|L5Q2jJrTER}W#ktE1I zoqi$E&gC5f5bsdJBt!NN4H7FP|9IVR06hEz>WVLL**Aj6lW*ktyrJCu>1^PF{!7XC zLK!9-p(of!0AG$}L^7=WmE>D3>R6J8OHhg~0jrGaJdxm!$@>yfC66dZ>nAYUku+Xa z@>nwFKBbQL@Q2QSg2YHWE_1W1A5F#Kl#D03z4xd@c>2|J++-CFRrPE- zI^~o11|myz`4M{E0ZSEgG#U-!hMuWGy8x zRD4HSId1`$e}YH$G^b{SNhc*#z~PKAhYqJl;|AY%;p|WWCYh)z8gd zN0XF3n2am2m|^h}d$2kdGyH2Zf@mJ5REvGqa0RfGp7ZKx&7=D10-<~n z1;pNs*8S1s;c|Sp94x zA^Y4pG38|O?7KgkJRLuK&_!j-PJdNn4oAJwV!m8^Thw3mKA<}Cbg=05SNMPHg9$bd zKX>|rF8;L`jF;UH{8I=2*}Tf5>!_&mEPGXH?cbBjycslcSNj?)>R*K zG?rOmmgxBL!?M+WaD`X*?glHAzGb;p5G*O#Zeo9RH<{k|2Z(@f9SrU##R`SRQPlf? zqh+Tv{?`34?C^{ZiW?vyEtI`Q9Plk8VZw{NgO9b2Kq8~%8YPRL2bjZb-2t(de$lQ@ z*MdD-@O~_j2T-)OT`z9$$!hSekfAUS6l|UiCKz(BXrFNR3jXyT)ooGvd~!EnIb8MC z{*fHRUG*--bKWrM3zyPT%NS9bbf&!hy$@W))ipdzfelY3Wd>Brz`o|C<>-m?_tXO|vj35ql zResc;FWr*gi`MbcZ~Wh5`QH=y-&6kY$)5agOa7OwIlL+ zzT4WTRW}5rUTx7)`QCp;h5KYS`nE3azOP4sr-~)}UO}O49kFwhhV5X>tk&Ie6RTxs zTJ8CP5Jm%Ly|YiBu5dtiXtOq}RIDxBv1`5hnBRvXTlw|ZQ|m|n5~e+_B`05w^whQF$HHMiqkzWM!lDk)WeskxH9&fMlkqPA&TMLTKT-|)`0qXTq6?cP z3cy>S4;y`G?3Fm3E(YKNv!`$9J@`n7dN2iaxLn;1?k1z(y7!~=&m;8Wn=eK)xcXMV z(J?Wf&F1U*Y&^UjPe;F9vbQIGeKA-M)ITog^Dj>e`-ijT^iPb4DcN70FP?6e^Wkr+ z)f|fhL_SfiwC#^nPZRLnif9(#zxmsx6hpvdr!Favu4(U+?dIulcN7t9IdxKv+ zBXH>$m}h(MVZQu2Sl$o{OLN8!r}+6|f^L&b{D3idla3C4 zyZGz(<>=vew6OdJKND`y93H0e`g`%bhv&|QL%5dP>J5CeJG)mTO|SBB{#D;AeSYne zV*L%T7x>~Dw^oxFHI?b=@#Y!BV4fHTm)s5GuIG81GJ~FYT$=C;_!D1$yZSyGaylQs z+>hDsGxs359A|uXJ{b*W;1Sd5r|DqD?vq|8$_{N zAK}^bZgfzdk24reFUTtA-|-IyFpOsS0b3wJ25F76Ec3nXD-I8 zFT4W%#b7u>-`RIppNbAL73KQ;altvB&u904fww!-iO6pof^dVmD>3MIu1U(pgI?fY zOAv{>0+;jIqnbbN1L%QslWTi6yT_uqr~F+XyK{7awJE8_zsTO|-QaR?24D1X^#0;M z49(e7^_JM;4MVSA|0cB1YIZeSKtQe^#PQI%8BTFGZd&^VAZyXzDb#YyJeL7UCYYmP z5Lz9$e*2c$uTtPmKLH-49+exwh#;=m*AEv1F6B@0C(QJoUa#Z^75{DTYd-7_z8I2K-+Ukeeq{IK zPcySOllf}&8}|*;AVV^Sw;$;-C-X5^->djy^3V6dG;!VM1v9a)ktv@dzi9DSLw2RkdbNWwYbh2W-UVdtI z3pW~nzk1|Z=avn^=KbX?C~2lsMN)tD+%WiQFyV3FuN;PPQ20?Uy8hW?d?LC)p|>AZ zg-d-SQjVF~Gr473aVDWGwVk3~+l6S3aWPqaCmCbYeWJxLCZo{;)4hlCth`rvN~r_* zp*tI`&!C;-$B;{3pk4BieEytdCA(nPM{ej3!rV^NHrYnuwo*L2_*34g0Z4Ic5|_=t!kM-=YE zirZ1S;7)*Ag#@s7{o&`)z2V7RIm5TmHXY{34TAUFSnft@0f9R{Sz(0SBUgu` z*D4(z_hcwGQTUv_(cq_+ugJUlwTsC-AogfoThbSHdj{`4=a0Ln8=<$4?6sV&&~tt~ z978QA&}V!&;?W~$ga;)8PxEkdUBQL0qG^eGr%eycmHMIEO-E6n1`<>sTp9ymb1hfG zZSUvxS8TMVN2TNRM#JZ~Xc~?IFkJsTG&i9SJLl+l761N9W`*Y!8zV>Hq3O*hdlXLV zxVEfReXtLp=9aAgW*2pJc@iYcg>_+y7n7l0zG9)8TJy|7abCr7ZiOF z+^`YvEpPqDr`bpU)_SzjR;pOmSWutptM#-ojv^O#-HQM2AEt+K-YQ`C7j}sVWF=W^ zrmk+Mh<~sjQJZLwyCvbVn_CXY~!RlRT2F4}DiR;g*2{7-2+XF^Q+vN%%0 zWfA!**H5#3%uR0scb6MM)=W~|OILrvXmszyv{@n^erAQHh^_Z_^$d09q@yLt;t zgO$|TOx{eSOmh+56Ks<@HWzShgR(aU%px)kv9b~v*D>hF2gg5K;GZzVA7`WcVmg}M zJw3#M-N!(rXw2gzv0-7{y2uR`qK+8f%lYGxS}CC5{WTYi>&mdj|=Hu{PgO`!5fxb+&gLGKPY*=PhC_#R<^F7DiMf_lL2K_=1{Ms`Op z1LP~Q?*A~4|v+{qHwK0B`mxEyf^CJG@a01ZbA&`W>76O&X2_-6yMv0 z+YogMF$dg@r>90h*l$JmR>m-fd?BBsZomiM};HCY;l}g&g1AI=BI-%qhdOLhL7n1)TWrO!(2!gt$p=(M$M5x zZ#W2`7(nd!(7W3;nP5x&Q;O{fbw^svB%w6xoHr4D7KPAUZ*B{s#XL| zHv-;|q3gg=ptQwyF;(Q!7`W%Bh6AXbfHXUL5U{Q{Ds4pZ2ap`16htVB>`9gdMudf> zK26XV?D#zaEXxmd*lF$E0{|1=iS@wl!DqZ%Qq7)_&S5elO8FAy$f5Q%DqaAgFJn=_ydDp^2*YL+NApEx>jeRYIiuUjA-w)M^!~E+np!mWVge+XF5$rfyOkhT& z3TJx{e*)11=ok&AkdA`|*nyB;yXai{;;daz)j%}#WIX)dX=C?_E(|sv^gfT1_Xoxd zxcukQ1gLoTa=M4Pt`I}s!)$x_d#kwRma5pxzY!4h9!AZ@;GRQ8YXx|?4;MIcMx z1lzIQ5r4;i_uqtjQ12TVQWoY2T}-Xf@jB5rOfp^5wdAEF783ABTnob=!Ay9i@Eea6 zxyk%v887!a9tZSo#!|adujAE-h$b$}({Np^`)k7@ zdZ1%?FfNKNGbV74x%>9qm9mH7naH8H4B)8I1wX@RfC@qbLwG5c`v-od813b>1$JSH zw%&o|h{CcPX4oA{t{qts*T?BXA5dqW3*yGO6Ezil}Vmt_gIXcVD=`f6j*JJIvDbV7RBfeprU51<2}-S-T8M!Y^WP6 zoSI|S+DT(gG$g+azp&JoOYgx38*S4=%JFff^4|W5`9hOG`2UzWCXY{30AulaVo{z^ zndl?d*6|)ICu8sF=r)Q-(>Fu3ZhNC0-wBwYQy%bZrKl&Y$W!_B#o^UNs01|s2e?hh z%R(ZX#EBItGm>TQ4M$QQch^h*& zt$}nHule!|9EngJyZ3Lt_u;>T0sU?{SbXll)2F0aElvY|^s(ULi+jXapwVI5$jML` z?oqUw%UyOzDm%KiHZHfB*boIVK7Fb`-INl+DLxjIzNP3 z16$eXZ!J%XQ8h?r&|krmm03G3eREoIDh@B)%g?N}Ct2LW25^4R{%S^w$~n`3i)=u! zv-gcBmJ=Yp5)nsb!d^xY5y-p3)_mvNn(fC?YoFfqr`6j5>tTFv9rh4<48PS!O?_zJJ5K1JWqwhBknZO^TaHVpM*EaN>*XCzR|}wDfifdTb?#r`=O2bVU%78&vI@mO z!UZihpo4~>h~O3hLTPn~KN-8Lt^SY?O#=%2XlN0zf-{bHzs56s|XPp)zKooRfZ^ZYHubg?|8BCZI1gg1DB_#{Fg zxC5&N$}k`pqQP`Yoc63^Pg{fCV=W9ose;UcmIa}?M=WJBmHyh%F8PDob#rq!9~J8& zwM;1>tN_I1d;)K9w_py!K*3MO(c>#JmH<+xMyNpV8coV1NDx0-bpTdFhIwlBJAJ>5 zG=oS1SA@)aRXdOnD0>lj0u~sY3%8feME0tr=Sv2X*g;Wk7u=Qb59&HL5*77D_6S zh06F#w=@GvDZK_KXxI(pg3i>Le|MLB|91&&#STC+7^7YMDqZ2RyK%SdZ*sTz2V74c z)iGa)i2!@0;VMgGItSnsD5CK1b)Gk!G7d;i#v;3v1*)c4u398Nx(buDae95?SVs|Ksw-jdrM=;uUx_` zpBJw#M~9Wma&wCO+U?Gmc8%y}^2vj(>l2>U{3 zW|6lBTl3S@ee6q=>&cD7uZj+?8B&8*Hu%6v>0QJ(I58YLiD?M`f(DRNYd-WZdFOD1 zNc!tYJ~E7zxxKT3e}6 zs|j5B%!7-FW;QrKjbNq+jEMX)t*%T9$panGZ8VV}T~%o}INGMXxpgcU#>05IT0o{oo7?r+KFH!}4iGi>)?*kpXHW71Lw03>N`)oG36I zSUNJkHKJac*{zgtn7J6X3qylZ`v^obfPxB#R?BKPTDpkn^#LZWyXIG%v5ud1MMY3Mya@oGDO`ul>C6q#Vl10D*^{_D+1&O_EjXF zXmPa}W<)oMrVFF}0jTtV$UqjYy!%v-M02vH+Jz#xO{D43^3NcmKTSnV)OyiQkFcKz zbTkaFspzCtkc(Cw68W$(Gg%qB8x^3F#72BjMjhniaVQ^Su9`OqnQQ-o654gjx#=sG z-Ib>7G)JpA=>ReAVMlJ@yjGzS$GV0{x0egHGeAR0a23?v9(@=`%aQOVlyev%2_J!J zWX#HGp_dn%bY|rKs)7r2=#7Bq$0NyYFkwg&FcUHisNakOKGh+>RNk%XUSdsDOs^i3 zayD7X7pw}O5i}9gYk+R!E@~vGyr9KtW!Y>LBuDPsj1h88nc+5`oL7>!m`>5I^R&|!+fu71)MeZCy7ruqTcsODNVzPqkpM+eW%qkzTR{Gfz z-WM}fdz@ijVjvjdsDRw5FY?wvY0pM&kF%_mO*Mi zZ#c2tIkx<3av{(G!VqF3yuVspW@D<#m(W;s%c4)gEru^2mnoTC%w%@sB`{&TAbCl- zC^=E(#i;Lz25DGFG-v^e2Q0A6)Yqe;vyFeUd z3$8}Fpuj9wHDV|)2`E;udaEymA^zTZCm7}r3$wT8m^d6ca_N;#z6| z)^>^z{g)xBe$ZKCwx6X|vV%!D^l);&#)T*5G|JL57=pU__N2E7b@T2w10oq3VKRKQ z;S>SrtOT)5)CcG45g$O6B+3IIYfiUST9%FBZ>qUg+tvvz0DuI0pfy;xwVax>y6cjs zA#`uy$*uEFAPz69u)3pp`&zdjF^;{{?s->fcxxzUQ#hF9Q*2-&J9ET2*CX{et@&5& zvw^Rx#S4@bn|7VG^(hr4eCYUIm6lW~qs%;tNW9TWH)LRCB|CiW_Dr-}`UWBwlOOD? zE|=0Fu(EQyGcRmR_@D<2ps;_1RS)gOx9VFhjaDN${|i2UIAv`-GwM}MlZcm7(_Myv zL`EDF5MUb188YJHMOZIVFBtqHPUszpf(_}`I6Uo!n4>;24lJtM(19Y&k_#yIQLy3x zT7yFSS|6#O6MtbwPukHsKv}R}w1_H+Av9h?`UL_9g*nc$%epf*lKAtnS!Gcx!{<9e z6KiR)yV=Fe@d)KY?Kf)d3NEM70N#q^Y_Y30?_xM18LUA0>O_E7`g!3$fprtK@mRNI zfLiU^iQ_uy(!j;XUuy56^eS|^G3(nPoq+XginZT9KyJ8l{W4o@Pup1lynAspY3itL^v?dyFZVpKqt%ek{ z$*gag3RF#9`mCNH%f1;BvR5>w%)t_@1*j*(ytDpfV5kza&ghIJ2K*IhS0eJ~b4wMC zoj)(ouOmCPhw!8uU7?`-npW7oF`0rxMz>XXCd-bo6csJ%tklC(JVo4}rnGRl6jnew z5JKP;%*Fegi!Vn#6D`~Ze>B5tK}U{v8a1Q5))p)qc>mDviFX|h~~3#vDzvB1Bv{kWuv$5u7nP>xcfjn#BoL7~wkY*ZH;^%>7je1^0`HAYf? z50&=01)ykS5;mcV?&{Q2JF$1U7+#cF>Po@rjC8jFns=bf4T||8Z`bRa^0E*wgL8u61ZDY5#zj5s4QiOh z-twoQm9d%{J9!e6cZP@Kf<7ycnU0U$g7LSGBqV-<^fJ)17ZGo{r`g{Ysve-an~ONt z;UQbmg2<1w&VbGX3aX5n9H8>=B(#u^P(iM(MK_U4DEJ}CJ*wJs@=mwXVR7~OY>hDc z(ellA7V_!d8(mBIGZtZ}Hdi~?XxBzSHkHX+SydW z5A`&#X$TCF(i(L6YF@Dcx-$S%(@AIaUPkz_=TRV!n{F%BUWPQHW~x(rn-0>jrk3Oq z;&xiGN5SS~UDK_}8`$o0s9>*l_wy1ixx zra6Dj&GlMgz)cy+XTxMPAa=}>p(EB+VdmPUO8XJ?c!P|(fvAMTH+C1St}JCht3`VYa%D=O!7PD~Oe{bHx0 z<%K!u_%)KDJmt@1jzED|1%jKKmnJaB=YU(3u<>#?u}avRs!5Yyle~DCu0iqt(>r7f zBT<3%_*p%uEYB$vufF)e=ZY4#nl^=7=R(YC9 zxq@HiA`UkXgcV?t;Q2-z@UKvVDFwVrahdinEnf@OuxP987tG@YQMvn;ffx;TkM$ar z0&57;2l~KXljseP3)PCj$!qwLou$HG1kGfPwPRgO&D2&KEY0kyPWe^LETMRsn z2-%8Y30!h5{n$?6b5d~+)OHdkM-rson4Ve^X9e|Rr86JT3MI5DIYak?xG(VQrevfU z&@Z6X`z{RR${NarpOJ;kirK=k>Xxq6^9#({<6$aJ@7%`efnuc`mXC|(9KFVb*bop` zL64`F9!04#sE!4uKpfW^>Hy&wPb1W{pHbilj7y3w`1oL~0IY&|`_+5i$0S%P3u!fv z7S72#>vhgmGVj`?;CqNb=)CI^w@kdp323#r%oiWE!Es%Exw&_|pEHSBMOsq`2 zYNq}^%jLjWQS#0#-BM)Md9F^`r;D6EgE8194qFLo6Z)tMkZTsYjrF+L0rfe2zSp6%BaYtu{w&H zu%&*C)UK-?HtU^1v88K@cQ-f|sxQ6?f!Khx!=*%Bh!K3n@Y|qxd>Sm#Fk$%b(|C#M z9B5Q`_NH63e&5%7l@H4XKXoV?hBMhJ=9sIe>iGjo?Mrd@woPst>ti4@?VfDh++6>n zM*?6P<}cnT6>}@%%|UpWg@LgwQ>WL7~;T!@heGA3Uu>a7R(MCHl;dWj=am zz`1}bbmVi7V zQGID@-*5Odq;A)16bM(KTbk1M!@TvOi_ohPh2RGp*{6$jfzL-(kwZV1Ye@tJSFi{w zT{*=4<$}DUB1O)H#apJKVHtzEz6^QRQ5p>3GueX@17^f*#^bDHfy9bPu%CBd$#K7m zXyC;qrW#?QxqHn0gK>|TTeRW%A1ZiHP%0l$QDMnB)^a8!54pj5R|(F~PpN7^+V|eQ z88S#jOBs)i=k&W0D_AYngh|&1AH9i{Fe&+NoKo$B46|Gi3kADnd2)og$QC@@t{P?Si1rkW7bb9-;} zO$QSs?gwvHLy z8|<(|Z*b>YPq}%CPu8F}V1bG*CwN7~0s^gwW#*IN~K&-k*o2pnxn8~gct8}I;NlXW{Ok{6?S zgf5+fN+@rVN+=D!)Arhgc-w55q2=Q2?4o049Dr!Ms5a%o_`Y_clP7LQbX7kx z@HYhI0~$i5KwYQD#ib$elROxrxAA1sz$Xv|0C{El8PdB0JA)G&E*mF2*m6FDP)qVA zu4+((5S{5(UO_?6hZ+G~S}2#17Jv|Q9X8mw8(49oLqdRrTv^J+LDZu<6L#c-@Z~by zQL9&;tiP)(>#q`Ce=2=X1POkbL{yQACQom>U=&Z+d2JzDf|IIRgBr3>y&-j+PICxi@W2qXQ0;^z`2d z_9(kKVx85!Jf3Ch^nzUk4!g)+>|h>NO2xd>znd zMZLiviNH6+#c1s{kA4No)gC!k3{)07g&hS76AqMWKekjB0AW+R$Sb+uviDZ zxX0GN2m|N7koK5fEQsv}Vr1x!l+r*3yW5G)H3znH}8+B*@zjHa8X>e{bRpUlaAn4=#I6>(S?=Q$w2pI%PZ4+ zBn>H`5kU5l=0y;{I!hT(Y^l8(K8n}Lw4;pTpXSs-D0xM9$u2-jfZq~0ZdjNvZZ=2y zW390%?A`tH!wM}eDI~dK-(G?zR{7;o)aB<<(tra~F(R#bKNvbx^qc$GE0O*3?qZ#| z9+!|x>XxX3ZcbN^%b}^o;KKb%*}}HcC1$*l$Cow1(Zrk05$9;>orOm5N=f#r=C_&q z@83jO+KBXO zeRNwFC(Li&NaN$YpWGOlb1f4#c`5Qrn{q%LDU7j{Y^zt5+TSn1{7y_SS-rI@v=2gG zvFA}I6Z>yfgq@vJEq-zi3ae3`{N{)3U1S-$&I4!iJHigt(J0@ z7V2RDWkt0DL+}Rpng+gh6Fm-yvO4$|{&B@!Oua}R@Tml7k7p0_E!Cm}UC%&59(HgA?p;oXOJr|M>@`iCO-LnP0TFs-7gMeceM?GObsm z1M)I0s7_rAs%>!KLlnH7sUSb2llo>gVT351px(X9U~i0gyuXjq7kAHheE$*QIoxf1 zoi@7n&7nG7p|jXjVFL!2OB=vH;E!;1yf#M^WIjpQ33P3DMm%A{?Gd&$Sh4$N!M3eV zI&JNeFgdn@te9AQ=O>Y@A-fr%1-U|&18YXnYR%W5Av2TI=;k1gKw1ZKUcZEvl8J&$ zR_~|PsD6#$Sqx!7vTlv?V8W)j*b7~H61X?uGCF-)V4;I9;yH{vdqa2%sIQ84ss37TA)+%GJMbl zzy1<5ZbF0-f7EVb4~{%7TXkO3wp3G%b&EE>SE#Fq_#v^Yv={)%g%QAez`=CO5BU5{ zUoaQIfjTUxZ;vaPtHZJ>f37jP zNvlv=!5mdFD97J~==xGVzlE4M(qn`;(DE0XV2G-KYh)||bYLhaU^>?7$U;4MM?nSi z#Tx&YQ3oPPUVRYeyd3Y!A#akXD)@+vv7mWJMT-r|sA*xf_HUsE;eIdg(0`>6Xc#|+ zTCBR^g`{D~rrZMAl~7!fRs%UI)L?vYgiw8hVLDb-f2>^JTEDg7#;iuX!XZaz?4Vj< zND>tmmZkj-&OvlK9}TBo!kSX3J++>D?R}-(E-!AWH66wR)Ikq&-YLapqUxf zEOx&_hu2m(3h-x2IzyL0H|BD}mN;JV7T{i?=`X5sE|$5R^Zu&E(qh)LQw*NIk+(5o zR_Tx`(pGKJ!(f6UMr3KcsU8M{`;}yWzmD{RjxS5nMp@H~eLS2`QLAHQ?9x&l>UBW) zO^rFB?GQnnKim(#CmeYbcnhv!g6T61F=8h{u2@GQ6j6xz)~w8cbnZ59J=8{R->kTd z5kWyP!oi8Ayq>fn3O58<(mvmtoer1I96*A(w0rYazzE3P&HeF`t+eJ`R$yP-=l+e@ zCi5=_Zguw%LlYaA#)sXkI@Wm=*Dxh^TZiECV+p1i4J2wM!(ux}tGlP^VwefHace-M z`I`@Aq}M2RN@fgJ4184eA=umxS*Lh2&9-|vMRfX4)0(B(>Hhj@F+qip$!I#7t-G{< zokD-HP&M?GZqY+@q}BCyUmIhe&bQHQaEIyW!W+_9!hua+$R%|!Lj11JfrhqclOAgrMc9!i)Wav0PyZd3p{`Ld(>xMK^0m*RG#`WW~j42TR#*?)|QmgwV4W^m7&bQ`^X<799-VE^C|X6Fj3k`a*nyzTu!HshTo zNL0nryxfdA#ub}AO(p{NYM($%yfl0I(f(4jai1w-*?yLBLbH*|+W1j~H+9h>{H zm@4^UYKfY@)V~5$oQCD`@JsHK_yZLtkL5On3R?S81?*wGTuEhT<{fLL^;g&lCq-X3 zWL=p~c~>;(QtgP*uQph3UieZarw{Ty!UBahm&U3pCg9{GD9DO7XA|McJE_bX=T;(# zt*3KJO(YsXCF)cY+#O-9Mx0luGL>dS$}zyLqqw6J6L*xm%9=oE&l~43c%>^#*z`VZ z;XwoRDBRSZ)z&zW3T|RL3fM3cE7!~KUkBri+k%>Hg;FR|&pDh7;aZ8PbROf#E8*)q zQ6r!>fZKXpKA$MztwnKrUFOZLVMFp`4gkyFK1aQgeIRy-`(RvgW$!0K587dut>|BV z_1pm(?vby-sSVuj?Z=|`sik<`uRs$j=N)wvm)xVF3-?;%Ki?*wm5PudvZ>AA`$hm^*eln~vz{s~*S8-p4TS;;?wnA{d%pZ7mEQ zcn>y_HQ}_10&QJGUv+xFDxg`^my_XOF&K{5-?b6E zIw*i=h&qa_#QVW|kg+#I1HX0n&NQFU0LYKAr|XvSz){&#LM2Bwy)uTjIJH!=xRsSA zKJEt;nn%W)D>P4q8bGv*^A(VyIw0BI2N6av+l(rd>+RvIcTKerOikIy(C^U8))>fF zaAYFL!Z|YYPR#Nm2v~HiXl~!toP!mAj##=z!N(i)Vx#Lh4^@^SP%6_o0PfD}RC)(S za&}^xz{)9MnoxT%sX*NTLpO)SzhZ2%c;|2f9h3~#l)Gh7lw>}6^+zZRQ7!j5cc{_c zbe)MH=40S7rcFX#A#Xlzl9QpHFtnhmIXMTZM0*Tl6c z1o17)Q8vdCJnuCtwpLXi5e%K0x-QcChu*rw!E!nH-W^^}Pr+}MYNIzblp6NxJ87Z` z7HikZ*E=FR!uhdzvx~|dxGe@KQ3|xq6CY@?Ru^fCY(9ImW1-98# z#78fOrWNrmzsl@fBq|#~;Hue`Edqf(%Q8#owb@$3?mqbrIAeJ_*kJT8!aEv^s64Zc zqOd!g<2w|;fKF#fZo^FKm|Bdzb?Ubjc{$6?eAY>kjKH!{t2Bb(aL09~H5W8YCtq=U znnXOr75Bm0=$Q&j3t$(bLvzfr)N7{i5+mG6%Z1`VMt6I40lFz*OxK!qm2Kc&LOjHL z2`f0(nYffJiT}W)%m9358%^>C+8uq^IhY^nB+pmHH8j2{;?MgE`gPP>w%;mDst)lA zPB2BsHJbv2ZE0*kwsnQb&*S^i5_0Hf2#PbV`5j-iz#p0AQ{MeIAh2V6qIR`SxzSs9dy zYcMY}gXLiMX!>21#5y2B+Eyu-66LDeam@~L8@PGOezRrCWG9vXA5|Wq2ZPz#y?8|k zi7FVt2vu`lhamn;Kfnt@5L=V^Bcef70xFTXEx1bL2erg+_1wmM$|To1nzVr&<>7kF zHj}g;G@et9zwNbB)uH{0L0ZPRP_+snoq7}u8j??W_p+MNbJX)XZs>P=u=+Y!fU*=6 z{U_K_6g(DA8WMAjg3+P0e40(hvoGQAB(cbHHW~PpO8Fkz{$L#<>}*soNsHetUC;&#%)!o(VZ z^bUnOpboVoGRY%@_Zs#-f6j*^KtGTIs)~hgqBI7^n1I9EK88fp@Dr#>A-2_cH@<)u zEIk2?MdlsDo=XI1NVOnFxGFB^k4F&Ww_{yQJK@^cP8OWLFT5v;-(o*=28Z2F5-SBkyXgB*Sn2DVFNUL4mB^RKa1lQAk0SME^IAgx}~ z96SZ5p52;rrdSsDBJZ6FPSp6`rPduC?d$?UL-t;=96x?u1B&rk4v#}vl{zT*(MYx# ziw3`jaZav^VFE`GFo8%%CrS{=FFU%k3Djqs$}br4eid_O8Y8 z9>^RrsSSNCJ15qK+S5=5G#X3m-7V7JShCc0c%^<6`X7F1vBcN`f!?OngI5w-aY}Ew zlndxcmFcp6I)Qa0h(zAT47C*}(zYugZv=qVJaTtCHIXyI1WiAkRS$*o8U2OOwQl+N zM176;k5s72(yN&TZj0kG8=T3?H=$6Q^27i$b#L6<3TtyOR+dg2I6IOAP>j!PPZd!^ zRZT4assx{^(6eIM1C>3JJp}o%hq9QLpFUBm=`^YurVQ!3#;QtAkHBPG-8(`dp~0;R zLBe6eADk_!D?xffpju;XxC2>;C~;6Z4i_AoGs`S>TBRX6q=4tgXBuTI&;x3kF)CP( zMH5(^>Wp#;wq%(-hk@?Auton8<}QnwwG;RmcHoE$(~fk@SF)g$KjX&BvC{T4i4#9& zV^=+V!m*xutIk>UBvUFOrmaULmVR1_ndcr?O**`r5vfNwuQFq#1cP!yC|CRxOy*|# zkJj>n*e|{pTkQCHTx@LDn^pq0NGFr(+Td}Wv0XWZ5D-?4gIGgtt79YgFe!rE|J>Md8p$vRS}k) zxLdsIu&3Qn@QQjGh|z4?0q|5Tmh<&|IG-3EqI_lEiNgp?s=w@nX<-pEeJn(ZB7lcI z+hBy{S9*skI&3ix!;93Cmt|)&eE*u3vB!)|0*K2{{zsN?5|ka`ZqOn;^E0bHs6z@6 z51ibh7~RbgH#Av{(EM(t zuCkT{!B!;ng?SuKk=q5*B-qSoIY5agRDptCUNH|KM39_b@KtZaJunQ+DQDU1P!%AH zwc7w$Ob+502VgtK-YZ|M!Zo6@5IbQ;9Vky^L(18DMy4v!rtpnxB_dTP<b4 zntE`Zq1&_R*@)pR8J+Y$*e9x0eQAWR$3q$O^|X zLOvRTmwu@XQNp||#k|Gtspcxm>O|SYTIawZxUN%IVee+@O6rB*Hd=~bAt0w<Zm(yWTbJG@1HkM)UT< zu>?FQFRyTHfqFA3nwYAJoSe9y0<+WmV~M1{vK9UC=~c#R+bhoJI#CjA_&qss7+Tn8 zQ}Nu(ZpK<~+DWDbK6cGJM0bwK%NB)F5P_Hso?N18%|yp z$k0QQ&&BOVC%aq~clEUX(NBoNj`PDDsS;7nz5$i0gKc~n~E z{S^w^pw0-|*sn$#{aFSeGo~fPJJA#f_y`*d_!(udUv(q&RrRRIQgsthNm&b-~O#U(5~%bzxGMVb}NV{5fn zT>H=nXl{T6;VSX^!6!9=#|0yuj#m#r%37HOtRa&1C(3g8lBDgGW|d7R09P>*>^7HO;Q0q$|czAP$$NKN6@$sv|S8_Llm`eW*|bN z&>vUBCS4YAxGfNBjN*2<2`Y<$nmnk>85(+!28e&9_lF)5@sjA042EID)PFU@&y7u( z!Sc}+SRRW%EWQ`R&x7R(Nj-i1B4Z9>LlUdC>q+LE@PrbTN3%dq`ND&F%UD&zB7c4% zKAGo2&fIgl)h=-11J=QlR2_0SP?Z~=8)Gk2ZjZ3oK-%VG4ShxI7tsw-?h=8DK|2+! zd?U(F*bVpF!u+|6DG$Tf?Etk*E@QCRL8Ie1_&m#wBE`o$8-{dtLe^W-JXfbBLT2#r zl#herpbHkv(h3(dO$JT5FzzTj5Ti0*;ga6*k;da3dxaTib6cEI#HlqOjIZl8%L2Cc z&{Dma-~EFc6N>wB&r2fOW)5sHzi!yK7)ZD}5U*Z|LeFS4MbQE}I>;@7R(pvm(A?}sT>rM}>^7A+5$&-8w3Emdy+sD9^$}_Makh}4 z0uGcD7Wk5nzfmah`njjMPn$)5};(Koqh|883& zXMs0!stTb)CIRG?wTF8ZlIGjHadPTMwoEnTF+vvDo=7q}6-`-A;`INV z+-7Yv8K@XNBe?mTi~G64Hj8|Lcsz8B0H@L861uwRbOUtsxf3^? zM!8CgkwA*}bYbilIH1C~$V?%!jw?j=L!)V!-Xox}X#4$jRd>Gr9n@n63&A411}Y=y zjM_bO6RlWiwM}3dYnGdaD4l2H+W}KE@95jBxgf|xWq(Wir{>yUP6+ziU%9IXQ`(sO zE&VA??zgmjx}_Nrl#@-wW_l0 zPae|17cPWD6^Iw>!q{)iTj=R9%!fawqhgn9;4Yim5w3p$J1C9n+#wB_2q~!JF#-&SdfLH*)TEhsB$-Xw_Y@+vV;6;)kflj+Ed%>n^Fb@ zMK%}E)wM&DEBKnwtG>ma{gm5Iv+GWDWI{MC{cPQNyL0t{_1vNEAk3)KH&t$oSl+<< z>Tqu&R$C8B_>tJEsoKqk`KOzl zyQK`DH&NEWDl`{wkk$gJqcfaeAJs_JUI2eVv3_xwYp!bdIms%glAN`aoB>#mY_mHW zs*Y$7!N9jnJBXMtcB4@ruIFDyGwB9iNMjz>4PqFd`K^w*uo5BCTI*J%_w;EMS{`*2 zuF#8Xt6&ZA3zms0SeJ?Rt(g3=zHf?qQNJ)Op!&S)Z~)OGtnLzZW27t(<&Qx%25S^P zy&u8yndgd%ZfInMxB_kRfRJnUU~SATayBS@sE}PM)yCE}+(AF_@we^=s8B#d4jhD# z$p-|KQ}5`u7udL}zEerdYUib6d2^?^9X`!at`t?TzAB~eLi-E1(PWOCCgZtJsa0dN z8dd>JPN0}GscK^ir0MbVJ4drycr53J=;3NO_VpbS;Vr0im6 ze?7ie$5FPwhQeKNaYD@C01+`$Ti52bxCvwCo#70T3td4h8tEvTTkW%_?W=ne>zrjt zDiAhaRe%L>lq#s%U7=cKM?yEST>|1F%&<|5C~kACi0;PM)nBRlK6G|T27)?K+A`8< z1HB^ADRQM7ZZJ@ojSYHdr-f90f{yM&*w9^En;A~ou#n1Pz8cemQ>=&2tOF-3y(x|W zV}VVgI|K?<@$|Y*1jrQUMS$sXKd}Md8*B^{3sq7uAb>{OM4-pGpy>CqPaR2xy>PMR zsJA=Wdge@C<@15?BeuJ1W!v2ab7~IcY!N+S-RaqVuHZ~aNwYLxZ)eJqFg67hyT1dA%G(*b|@is5uCZRR652(tp5PKUX? zYB#Lcl#Ax!vWhIFVCP;SQ>2upOTTE=g;ney`l;`c0SBXjR^(cBz8=pW<|Ij{mDCd_ z8MqsOa*W6hqsq7tQ`NW11g(iFu5;l=9dkAE_Df|m9*}~|M)5o$Mn5Ub zfv-$MvBXLizWu#YaK@o9R4cR1#9q^rK~pP!?=`={%HR?-3oj4up9jKG zH&T?{{d)H?uR(RjQsCZ3>Lp+XGR$;6I>~R|_m$W;gKk^#Edr9I2Y&c0~a73uT?iyvo&U>9K2gnW_L^jJo6qmziagRS+?9h_m&;_W&UGC0iqb1#>|BI9m zSj80@M3Cp~@vS7_sY3Uv)cahqat&v>4S2e^zlEcA#j3)!>Defy#Dp{*7$q-nHlOML8S-$GE<=2K^AGFNn2y_XumwV~@c9@$;Ro&m1? z?S&L}wjAu6m;nP;#RGnR0OS6kyLzoL#J!C4J9?%SPK$6w3=|t~c+>NOl#omXUy0`i zH*+*MI-Y-46B6&cRCet0(%`Zx#b5M8RZkQO9$eu=9C;A7+i!8 zgB_UyLdknh%czhLLBnCJX5*Bl*@)r=#u>fAxn3ubfX)g3@}j+>#+A5GCJS_*RY5Kb zFWG&MLO3#`+C6aoIgbk{63xkSh~|yYKNcW=d`fS0bVzzNt*)?)$%2 z3oI2cB7;!dA76qW5g>xw$E$Ud0#8zUMD_QR?oT$ zt7lm)*oH9M89Ps9G&x8TEKK5vte4B37@~9MixrrX)W;}bfea=qM)tD5YX>>v(9MsE zE?_DDZ{~D(tH_}PL&NNb@LvQ*g1@M~DDi!3?K#()BNGM;8`SAuRUlwU)ov1gz>KR= zB?Uwf{sycnOrtVkgfibSY5a_51qYVMV9j!(PL^1xMfUBj3>mDGGxfCYmjquB{FdU+ zWx*C|6~Dkt1&*wYzymg>_dHgY3`r`TD(+wBZbK4%;2guC@{Dn?W#Z|>a^zLubm@j! zPs@g6g`IJ&CQQqZSG!W~ID5P65pBj))LGJ=#?`y8ZF>tF^2?U|Fm7LK-*kBuifRM6 zL?GS<9nY{CCz-OS30V*mpA0{>r=nHeodf~9RdCm<6x!yV@H{iqP@}44z>lWYRzCdG z@l&Xpdax+NfV54KNDF6&@pi)J*q^yb`Dkt7e7%`DOF0YRGDPosIc8GjPSSR7n6of zNP6{bxO(g+_jQ{A5>|z$E{Y9M0Wk17-&nUZRT)z3C=PlcxRHcC;7`zzhko8}j)j6tITlJTo6Q45^wCE#u2?unYwGT@ z8kShi9z6QU-Ao}>nk=n2LmsIKUhWg;cu>66B^KmLh(bGEtiM|^ng&F{oxf2e8^C-e*)8&_JN5mi+z(Fd z)4)M*X@%2{ab&0Y%Qps=~7Ca2eEC74>%{c|GTUH%yO*p&>@(FNrn!p@d~HF*Tu zht{&;uhslE7@zCqoXaUtk_UJ6|phx zh$m~RU0EP5Q_V5X>hs|1jcNn*R=NgplO9qs- zi%YiXUCbxr;rC7(OVqX~%4Wx9#UNIV@Y&8!-5g;)%GeN?W1##i$fo$VS`U)Ckd3j& z%=av#s#|;p6TGqH8rGfZ7QeyFN2qA#VrhqQR+VC8-V|0?12OG*U_EOs$D_mYB}n+| z38rILXEd!13U{gL-{}}F8oIHG^|B~v3JCBDI+Ymj7?D-ISPvV@YLLeI#BK13#Yz6B zHY!tRp9*ku8D4%f%!)wx=2xHu_NP~m*?R^$a}rME&tf>4uOh#tW$km+jSs7+>Smi@ zf~R6WOGQzuBw$2vS%DZm)7l(jxn=a6>RI zPerxEJCqG7Ql#z9hGvoGL7>v|JB!hR7!s^DnkLsTnNq&TA{V=a2TLRgIXotafi+C! zjv}^tv*3i9}fuGw7y4c2SBv`#U)OG+w0beEEtt{}3ur=X*fdu&b z7U6LHZW!>D#+-QJLp5slWCcIfG5LN@#Dc7NiL125dYf0@R2S_~6v435s99=%JzvT} zcH#EKf1#6&`U1^bdNw#&P-9q$7zN^ba@}4sC-p(si@_MZZWwb-Lz=;q=)?uBkY2rw z%+~CP9lG}Yt%|OmA{ja4p!}?ISk7U{ z?;=kdS{dPYR&x|zJ*zSD2yt*9@CIB)*HV}#mE%skF#*1*KC+l}CQ7E6Bi2BMPdnyM z8$i=K8Y35Jpxv0b^Wn)P^1ft;arUX+1cLVTCOp$rI|+=)=C4zyl)g_lL$#4i6_b{2 zB7}L%Xmukjre4px4V#>jSytC&6b@^yoBB8gx*~+u1)rw&E*JqcF4S@_5cZ*|h;Nz- z8{@$Q8tJ_%v?2Tg_|2y38@pLR2Wp_18-jF@H#=d)sl?N79ENBnI2g}V5jo+DTj;42 z47i^XbAK}>nx^)qU06@q&NZBRn!4hYW^T9#aokqpP0uo?v^rril!D85Y;fjOoqa{= zyWb4jlFRYq=k*j)&{#qnia8QvQUYH|!V1a0@*+qWum;Ym?`*uEsC|s;)e;7WU03Qf z?+o6m>-hD-(k)Cl!zFRpLAN@?06cI$RCWauz|=PhBCLkoIEn3mV3GXZQ0z#6N7%!d z{5WU%-fJ7S;vi}+qJfH~R0dB7<~GHA)@@A{bd|x_6)?3@^^h-wcxq!9q#ZAtUpjLm zuTxm8yIL+*8m{U_mB=)~NEI^_bCtF)63AGc5Fi%-v|#pT16_G!mA9ZU6irzp<<0H! z7494WP^RK3YBrN|2bV?lLC~xg!;2~;%#O60hbtKZCEQgD*qTC4(fQBojtGfaRt)A< zIW^a)1eBDL^=iy`i!_eA*DUqX+QWuOD*xzyGZyc;fYM$%M%8qNMhK^RmS-GCSr#-3 zZ2$==Vh?)t1{B|Ksw?WIl$(h8gMzbcYdWN-`SCRZ9svIMSlm5L7sb;8n>JlNuF^m@ zS|edpvfUWKXttB<3I^ohQPr@)g_DvIB^c4`b;U$fK4zP(7a^TWl6nzfaH@rxGE15i z!3@F<(9L$T7_2`#CMx<#x%g?yYx261pV|$SU*R%VXe#=ufneZ3AsAefW0()OPm2k> z&FBQ(y`1hm7~E_0W@jswCBzyErGctGj|L2uRvto}X;VogIxTQ^*6KURTCOm@+os84nXiDNa^PyJG!$VWH= zWVo}HCPdg4=# zZ)>YHUw>vYHft7nHgGmm1FD^9PQ&#iclk0n?hoDqCRszOimqp>A4=_lc-eXE)5Tqt zp?SML)6V^7<9+3o0d)bE`Gzo-n9FExXUI8Z-rhYlLG~(GkD$I>AjSRMR1~gv?sbh@ z(Y>j5;$s?NMNJzqd!IMi24q^4&|Z0)rdG%nrJivCRm$g}JM|)rOL@Q9FUxsqJA48U zdWlLF`pMe|=V(8+8b5qLM|$dbGJ1=oztR22)uVXv3^&{m;=*zpB~k?VE!Zc6hlZW$#6I~Vb(Z<)|`feyDjMPu z1#Q5g;}3wE*(p-gq<}syWfH3Pq3L9^`v)o(s|EZ_(6Iv=9L6ki7nSCKi!ZYTU+>_u zyOaoWxPc?Hf{x*Oen~v}+xVlVjp#l5mkb^ zd7uLrBlhc{nA%;#re!nKxS1t|_@!o6qN*Q4RmZ^C1TCylw&8I+)ZYB9MYSC+D8!1h zR|TrD1*Y<0?}H~z>RqnfY1^pxN{scI5w2uSnN|nDBm9I2BUP*#e!6&CeFivz)}{!i zR-z}u^t8TQ#-(;p(4`9V~TuL@aza{f@ZJNw5gi2NZEOZdt9-j9Xv!&>dKI%gYSS9M_Tx5>z_NT#CZ0Y#|gwH!#I^^7YWc z%XYt#FGKv+auTdP;MV_Cl}!US?DNo8OIJ+;Sn~z;uxb$9Ip9ClDaE`ez#u_)rn$6z zuAYQw7F(i^x21|}iAp6TkIipNLxxmBhsvHXUXuZcYoRZ6O?<`6Z1utmw%&6uEf+@Z9Yps7GLoG4ox2ak&9M-p{KSesNfIA%N!=_ zM0KMSugQB1bkYniX<#B++Nj1a2>lboz$oxgupb=;Lf`O3F&%svnKm-(!6T}s>Iz*# z7?nVdQmQo1@$ZRrIV@8(z|!3{p$q;ez~wk$3DmI{T^!MZnR}s9bYnEV0|>eB%Z?+J z7^s0Xyu(W4l!+Fi;Oqg`Bk3F?wd!A8Un7A8N6fyAhsjoaDU!VHg=}5pR6m(S<7{J?uuHe;udpCBJ zrsrt-AxSM_L~T=3`)4(ufhmzUTP@H9zvYMzd~zSP9ap1b^o#~81xq#YZfPHyiIY~4 z;ma*6-_7wl;J0<k^aAnb^@_v7omc>nqU+ zaiLXkaza&J?S9eUsw!2f_F()}%bplTRGdYQA=($b7jJSo3>HqEGl!$-A&}0p*D0)d}FM-r0 zu+hjcQXa_bb(&uR0|4c26U-P(pjJzBWeI55Gj?I)tEQ1NKkASTWk3_{)?z#wj=qk; z`z-)atX=3wkLo64p}R~cgT~wgRBlx+Wq35zueS3+Yd`{%HpqrM@uZ5t>icR4ev|;q z<+^?~r?TI~b%s7IjLRAa@v%9f{1Sw^b!=zWsfH zs0HvBZ3t#rF)ZW2$4RI6jVb2b^a$B7bNo1~kS@4h4*v?7VCcd>?Wh)G@IiApV2pxL zwE$eug!8T}cU~EF(9iFe^F?`i0z9Vn!p&h9B>-zB*#izK!IgB9y|4dZ#%}iIX@)zYv zU~oSv;2Tyc*URqYxzluY5C}kxCNTMLhtuKYlF{8PR`%}l`pwy8QCt-*Gi0apfHESDsggG614$9Ix-625?p!#lphp#6l#C6+ z{{(Z@XWBvD5E7qVxUE`%%d>e_z5}u|WQw(j?ijlZ*%6?u_?27|{C^!*haj_C=ekb282WI4VdqGP zPug~6TL)TYBfJp9XPN-w_SP`dFBaGWWv1gtGUHGdX;V2=+De!k$c~DOZ-;@282-Y) zDkoI!3R^CXq`6*^+a6!9IBOLjfcgZQKZO}~K$1tKn$$a+Jo3gQk6?xHuJI5A_ zOfBKbG#5@CBF8e+;6tWcgbZ%Hv^XLPg)pUk!zN(I)APwm z7=nu3;28cz)f=bKN+5U;G{R>3$c9EOZogqk(fmHu$ zp&yIhrxSH&UQ=6vMSz{~QA(q$!0C%0!7*})uKksDnM}p3h((#ta-lZVLm*z0dmZRBlWQ_n|*tTMQ34^Mw^9D)i9{1MeCnL~_ zn9W?bBLrKbyB(nwVsUulDYKfJ*U1!7Sb~o16+f1PDob|e#f6EZI=rzO387`{A^bBR zCq?@DYc${LH#Zz|QgV~wpdd+f^op6}8PUDY#Y}OC*V-xZE~zXJ=h?C!lV3?~0caJp z4RSjvLO2ZjdYv*bDJN5#r}H^Yp%Od$9qXs{QjADjLGX#1o$TFjU0kE`|5 zY?RB{2W0Aqzy_Ua)JdN?G$m1_$c{Z3tl(;A%(hHZwYgAU$u8S0o0%oNB5jMvIHqXLD1lasU2FUVI;yuO;sTvJFDU6(LG{^ z2GiI$$DaHUe_sxlQxXCkjGYfBBS3Fr6?w{Q$(1x$J%2B&5CNEo{>6u;CC#8)ZJVYv zo;~LBn!t=zFxii9gqA1!hEH$&;oX|{SQl`_}M?@S0uwmK%I-NXf97R{2(ap!{= zVn~cM+!xVA9Z6fWlMY!{SvYY9{Rn!FB5m$6^9mb$2iV<+AsBC)q&0kvfsga6^Lkh})0(cl7AAKs0F z6`E2>T?_|1x>)K`##eix4YZn8YxM6E62B$62`6onk6t(7dGxQv@5nPlAbmG_Bo#j1 zJAKUf3Y=VY_95DD{(xdT01Ai-Xfd}TalnMw%4CB|WJ6bp%-2Z4O{=^X7$?v*J&~n+ z%#>L5iYS)ROA|+vl=dzSA$pvy$J0^l!k><&!^QWaGAt6ytl~YjDVoj7P%Mb&DgW_< zZJFSVKT@#cZwR67gdwz+=Ng^kl#wZj!24DpX`zrHVCY`}K5C_SWOy?8<-zP}dIxXU zc(xurLi9@ShAPfdu`kt})i7*lMwNo(QmO>`nXN;UvZ3?XYHOR?8Ho)>MjUC@3%I&1 zaED>JvtE{B8!oU}_Y*_ytreOqu$_rlRV$++{c1YBN)w6Nc=T7)QKVvVNkeHP8;vWl z9d-z!E7dj#y5in$T6Yc}&RWfpzH4|7ggS%|{-ZH+Fx#eD8tq<`WI+uS6V43I^#^J# zsY;9lK)eXUdUq(d1Rp*s!9P@rthjy@1o9)U_CC9k_cod48&7g~?v5bN{00HU)B$lf zUpsfQJkGE>C-Pm?Sn4ObzkpY{Pwh(j7E#ZHgtlhhwhDJev&A|qrrsL6FKFa?Y3thc38^`t~PXhCGZF|;g?>@&qUV|kVf zF^RK;D5K2us|tsqmsT9qlR_CyPPGxBGMHXDk)$dy8U$`SE9UtGB)xN10dq6a%dadt zU!LCXiP(=icCW;~c(Los(|{GZoG5b9*AT=ooIjZ!$U!l#rYa1>Jf#zuN@%CvwHw2J zW8^f$mh!^nv-Mv<|+09;loFI@y2F?mhk=GC!PBgCJxaP7l@Uy6^F`oD}u{ z?(BZ_tyr^3N%#HVD;28DMwIGbI1ClJ>}g|JC8dnu%V6s4A|Nhe{q@W^^e2= zsFm-v9A~7w?h}P(0phLIF>Hrm1f%5x*n-X2@wlcIBO7dY7f_8x^`LLf*4^q{Ft8@a z9<-8N5{5u!9p<0{4=LAk<+N^q!pA70+Nl!J%p4mgALN2`N}~(t*ibkm>I|9=7r^t7 zwGCArx1h!7K;8=QbIkMv>B@7JHe=jiQXQ-s&Q2~Thz-<+;m@Eeptdo=;U&B+avlTh z^_}S3lp&s{-QI1#eeNcnjp~u+^BR4$9V77RESQ1cI}!=g4#W~iew4}C7FLCOBh@LC z$TMHlv3VHkqwct%=`Z4Tq5o4;&Q3CJTP>8M0HD7{(Wytv`?LCryMz_yOGkO9bnP8l z3#I#nqhidd5jo8z4ysD(c!!q|eKjl06Thc|Z=qgFG@Y-u=B^_Yj*?4k9F66MfTd=8 z8xqUF^O@0Z5NJTVuBVWK(aXgXJY{5#%v}=2)pzHwRC7^=;+=<! z#cxv=;Wy!IuLThDz$!8V41D7q4yI3`e*FXNjNiAkl|=N1tIZB=|{fi#4W5zU-#R!cXQBGXA3utXUy7aMdDRlcGLrp`8#Ntsud zv4;*h+07(vi{nB7s5+EpyxO2gq|@GU)h`hlapyK>t|qv%q=7?gieU|6_U5C+Ff~$DU@F`IXP~|b(H=h}mDx>3H&qZ=5X?ye!C0eOd9=_8 zf9uba+yx1s;+O}6HFl-ud)ZWjYqO4&K>W0ci3P3jr0s2+W_UT&A8CAzV|>N4LH z<7TFbP(}t-5Y%iGqV^#{ylB0BaFBWk%&hUC>-F%X5JTyBRyi+jvBJo9F3^`oNFevv z_>^0;E(mZn)lD!8kag8Aegj?YEkU7f=i|m<5KpH;iLG4IrpBKcPsHDJI5+p`3mdy*OsQSJ3(4%2C)kh|1FF4RNE7brxdD0gNqG@(R|XD(MTRBvLX z0?+reVuk`%<)$XVNyDozpk*(fqaWKhvbS!C7+wt7J7({rOqDyK&TLZ9Njyq?NA-a% zJ5_1>9IKIZX9pZ+sm7s5Fa;#Qqn6uN^(@0(%kNnczCgjlo?A{FF*yKLc%m^d{ z8yZu<3M0IwbC8V=)x}lHaLVvAZ@A9x^C&21_4D?|u0yj&x}{9fEYz#*9Gj|0$+dt` zy^&kx&H@J|hbncp)vKar5l<)`^g>fc$d%cg5%X+47QDN6>ko;)A%{WTj+zdOdZI8; zr}uzcHosh+Pu^g@6sXtUwDpXhaPZTf(UOyrj+RGN%~`2&I$pJo^Zp~H-y&z}C9ky{ zGpsRay1MXv$$XUO_Ptij_X;^*QfEFIE+LY-Jdfk_36sprp#*xnv5yEnOW%ZM`2|=9 zpdO%4blpLxE2-9S+b!M8i(M&&CAISvGRaNe;+)0t`95GJ8-(JpcBW8m&Gx8iedkh@^p3Qt`oUNU@vUA zgX1^=`36T4M#uKc)Xk~BW>b$X2vetbgha;`qf*pZeLU+yXg@8&hG=r%n(9-k1W2K{ z>IYV#b(y4e8X=;d&8Je2=j;B|#S;)x10k>+zHQtgG3V;2C@JmqmJlxxo1g<@^;0ut zlwh!vYciV&P2D=3PE&Uil?ytd*n3v9afbt9Y1V?9r$o^a9tIS)DR#eYyS`~hRC9YLE2X%f?dR%j z>0G5x7#i?USn&hI$iaT0@GCy^%A_vqmJFagA?)FN$c!+%bKcBQ+{TG_jW0>y?G43j zD(p+E?k`emRg1cwNyVI?NscqZgeg3-d!R08vOoU=QwBj96fVXYFbYfazR(@y(vPC_ zj?q@or(%O$9>%N~8p!Noqw9J{U%j1-zI(oW^Bf}|XHzb0@I*Mm&XvZYB0b>pZlBTw zgI7(h0n&ySIOp+=91a8u(Q0sbifIDgZ|TTr@FRYTP`hJo&A4aNzn&;6TvG*dDr+bw z7d7gj32r9BNy*($V`n%#f%PWzd}in9B`k!BZ}lmLN6@Tka~R!vz=Qy9asw~Sqxa!g zks$!0{gmEu&-Q$*NyY6Ne5z)p?BK#ca4*Dhq`?RqG^`@Zm=J|a(z}Ng|9!)8Hth)P zy5yCFDn=Yq3PXFKld6X7=n^|;H0)md2?bSicJ5FcqPf>)jw$8Po=J9l_2E(z;*MAv z>jxnQlih(MVm=0(#lq?CSehs6AmOr>fc+`6b9{P zW#Q@s@gaerS`pgE^uVyfi|Wx3y+V~BM8>?v8j6WOQvXFc1i(|2-$4nunlmJ#?Xgdu zAka+`At7%WN;}|*ZK|F`cQJQlVMuMo2}KdvkV+&m)~;^okHZB}=xJ3@4Y2H2H-pmI z`5I1K08G#>Ps`@k@2P(wSUl4bj|O+#N7R{L#fY(Op;>zDUNdQK3=#(1Q$jWzIKC}f zkH-X4@}DDw%uQ zcEKt4o2qF-vm+5IY>;1wJU^BAxRqoL9_}-e>nMCdWIfn63vit}>_R+`aYUMQg8w$z z2_6X>Ud0yCRt2-9ERQ0iJPHFov^8aqnTW89_sjLzQtwjx+qJwCLz)xaJK20Wryhhb zElv$zHyULzJj+v`7enLTAFp~v%7utze8q-1I{5! z#w|F)2l)~ONkp3{i~p>_in{MBUpKly z8&h(Evudi4^8tA|k{8?AHr2EPfF5X)!bO*8!9!gSU|?*Qm2hCsUE&CI4a3MMSH)wj z71ds&z#V1tsLQPB00M8JNe2*W<3jyFYzGk6OXLpAx57A8zO2O(u%2m(4RA{dSdd#4 zYeSg}hjO{|zUxVBl=^W8v=@0nD4d+5wJi0=P{y0Pr{KhNgO6i%m0@=hEaWb~J{1p| zC#XfX1a2T)(+N8Npk&K3N?O_ngzMnf49==(`Z%19rtdra6`ccVT~(b~`LJp)Qa@oc zValVr8L&G&ZhN9`u4S)vf6+0*(rAA7CD8(pF1Ew%_2%m#rJ6oIJYAsL2XmNE_w^81 zhW|{q(_*-0-fY4fi8{%6i3|0J@g{xAMs#}(`Mrb z*k{Mw^Yv(RxHvW@%+rdg5ASK58aG9d(5HX4r7=iXwJZY66Gt2?w< zM+;IPvJ9$4Rwdg-w6l^+`AqlShN)2F+|BKM*(0ryflN`Y7As|ygF|oDI+wItnyKzB zZIBXyMuKjAVsx{c2pA4vLd*_&3vYxb)mA0ri5$(g`W$x>WdZTkhF;zRhB#+bNC`Th zNIJ{f72ny8+XC{?d{eo2m>s57z``^xdX@G_YR#NbafZKj8YC;RT*PX5+^%Y*y^EI0 zVcFG5wlyc$3kQb!jNy-1vDDzu+iwx1@)gX4h+*r|s@w@8@*zXc1jU^aWR;9RLV?~0 zXux5Nz%#j!evg@oT%*%nsx>lXL_+|<+4(F$BMj%6$DB~SNSxhHX!%mXP5hQ( zJhn3KhB7@AC9Ns}OboiJe&({ROsNIz9zy3AXmdFOZHapSQHGT)jFb=Mjla8kjkM#? zv)#?wNcT;ty(>S*Z2)wmK5hWgnN*GKO(Cs}wS&VXW9?vN{Vm@iJQa95R*A<%%v=t< znQCaBP&$1I|2iw}{z z1Rz8|9!MN++&KQQqcly=Be6F--6?$JhiC@K9d92^7Q@Tggvtkhv{?gWW7HX{2{C_( z{z#pSi4THY0yX=5|WN6 zExe`(ltxHUUHDv0UQ~Go`TQVQ17K(2(dDU-+tM6keta$gmP zVcFQ6=NbpMgl@TTo#ypOt0?9y>R5HxRgO0&51)7NN(KZROykh((oNgo>BF8 ztG#pe#<*BO%R}WKL`!S`SP2R3l~qyAy?0}5xtSDh;!BoF(pb6BI9989q&i5HC6b%D z^{gQsK*#Hzj(q|LtBG- zJer^s`sQE*9^E7&(cNqos8U~SubjH+-#gh2St69g?l@RxlZ86YMgN}t!rsvu9nxtQ zCdZImFx=V`#s2$b&Y<6QsnJ_TwRRz@(J3bqG%NfR3tCdx8E2uM*JVY}1qV|^uj1+h zRv;1thS!R5?dLtVIMg#!2R+P?vc9CCOJ6f^ZM@_Wq2W_LeYOET`YHB_R-En#Eu&d* zv`62;(n>AS;QsZ3exc4{qSUZ`BrK&yt0Q4V8ZMWOXx=wM_d8lPEM}12BHSrOfcVqE za|Q6myeAM^0PUw^f6F0nalNw)Br1vpnr^F~j83hSj8>)};LY_Drz9pEZ2Z8#aoI67 z<8yRqqXyl)EUTKDxf^u};oe|O%5?`V&gn@su6;+tT3i+}hheG^6yePP0v@6H1Euu^ ziV}02Clz=wFxRT()>+q52OUG>;d@~R0fswaPK*%xQ@}Wtv8()*dLDN(Tj2pElqF*_ zH*=aedPSfPn$&edDu$yG-r%7JwS_q(QaNaZ&;ug~ZOeIkIrb=fxOtRI*kL}Hi$mAq zM`Q@3UHob0(cwX>!uaNj~SHCb&M(?X3wT zDL28lAlfu0h@eY0#dR#O!)NZS`hkv?1a$P=@x*&GUg?mHEWCJh7%tb>Ksq{i*^@C_ z>|ap)1D73U5Xm}m+R@y3Q1Dn)P^3ug?2)KBX(r;#;{e*WpFAi|U%x11*8BP5hg%W% zTeMsT472sRq1pGmODK3L`>-7-NUm<&bc=@LMysp9xzT6!_ZPSU8y!DeydhqCRoo$ymHgU z)l%~5CT@x>{KX0e2!7^dT&(yaJj5|!SkviLG6cxdR$A8qG!0eW+f-ES+1?Hb?||u? zGHWI|*)lvwp`>spkRXrtPN~ao0Mf$+D3HbF$1S4oi-N5R0g3W(=c#cMtOpZ2Pi3zY z-xD}S8D}G#w7n2u7r!DMz*ahpGnITgZ0m`-;M@divUWDc z$yX#@eLXF z?c*JN%&Io+0cAt651D9KHSbZPVyP)3_{Mf~DM?HzzY3Vo8{{uWN=j7}-!Vd~SS+Y6 zpTcrS_E{~nJthX?hx{%8dq{zf8_if2m2_+=p3>`It(WuS z-3FwftA#1OYt%C~fpP~Vc28c9=A+po`?XiA;%Wt7d31AwgN?DM#W_4bqis?LOrJ2- z5(@)OR@Uu^SrXMdsT=P0Go_}apZk4tfAxMver9X?`kDKwB~2HFtG|!I#u)A`c^f7J z)4Ea=E3hECqGYVfc+5DZ)J1{5X|be`{gCDzCbCbB`Lr}>|V2!tG(e^acMG*NiX)vbWfMo~nOlFzpSi#>grUxrZ{;7h$CUWz3UCAp$5 zh6=`w(tEy}d;~a6j`$%&?1hiMF9QnHw`h)p7d zRa|a1=$4NOYjn=se$1yJzPi=76t+tKTJK~$y)r%73kn30#z3M|SZxrN3{k4t9yUf| zAu<#shKNZ>p+Z>vkv(c=b%_p-XaBE1DvhkUH)cDFYIlx0Bfls`&Z5H@N%2G!Qf)#dF+Fa0;oO^(mNs zPgd0Oz{H;vKi52=(X(^gc>uTo3< zi#O1U?`cy6AV-n? zzl-f^mhZgcw_SM!4iq*%ZjQHIe_W4=?Nsukb~qbmKp_-3b*`z+-KMbwsk_q8C$*D@NWaXFagH5p|-C#6xOY|jn@W$>mzKo=WSQ=85wJv5DsYnJJ=+f+D z$$>k>i|?vEs=nt@q3hfxkA27`JCYfxOPL5ME$E2gjC;7`#!4sLk*nGsKNM1l}TqPJ4vt8qL0^zrDdlLfv_P;UoE~b z->_~6J8x+v*qY;Q>i`9vk?J3DYAXw0DZQ4Q6)bqx(fWqN*X69yOquL*+$ zl*^Bba4|$(3wfsKPO8mE^#KjEvQtWfAxL*anDO;&RlX{n0Td2pb(eQiP&T|5c5R2C zCCn$*HX3(=cp(myv;%j+xU|ksf9H4t16HEfnZBQ~PiR6?C)bN{Jeh|MigyR4J<^@6 z7Tx9}0<3RMC?>m(>CMsn1nE%SfYUB?3J$K)O+#-rT@xek#_a>rz*3@qR~q?rq;0tf zK33!%eK%=x^(S%rW(?OJ`VevLie_(!`$CxjwCx|g{jMOh=S0*1bho1<@K!fh%bgy! zj=bBz^#)S}AXyBF+|Ff3n}EWkOGI_zvLqTu*#1oDf$|P zP^Qy0wW&3LJnNwQ&4qA<%3hU-idZiiTC{BpomTvhLai($f=4H zkdvBbTabk#vUNmh5fCK}lO6>f;55xmb(aON?LBt}N_awzjgNV~r0B z^W0EVLi1*Fc-$?b+^M-4K4=;t2LeLVWoc|>^2(=AbFDgk+yY51r}&ShohW5XSmG#f z_Q{e8HntX^mmTu%+9fMCl~8-Fzdu=A&o6l8$igGeHwI~#l$CWHMZ5W>7!G>rjKaje>joB? zOJ=IJFQG}PaQmoUwS&(zK?szBgoMxDqTq)lSI8!Il{I-EW@U_SgU}+ksN)T~zvXfn zuesr7@lPIkAf6E(>?zy(inE&!T&06`O#H_eOFt*}0mG^(O-91O;vqIR)oWLGXBnnu zgv0fE_>r9!TpQ1&G*~1|x05#*jsX%NTt}Ge-idu}yGDc04RLOJ59}}X1GN9PD821b zy+3kYa_O33?H*2sIl!DyxwwPcUM#y5G7s=j)?L7zC9`SO2tk2_3rQvYRAl6AlYFeD zxWqKIT;y1^j3F=}2pRlep~le>Gsz5ZL2ji5>O4Rwt^ zC4+Vbq+p_%zIF@K47x5ZRMa%KLhycqHYdZ;gxWg&7*_=_;@&n`DQGNGW2w4YtmNgu zBBv}Nj6Gh<3{HKK+nwCTvtT6lKFw<-FI$;jZ7t6UT7+;L)kM71d2$>Gfr=fyFZIGl zJ_;pl;~T2^sby7n^1Kk`up!nA4PCecIKcFMl%$YwO5Zu>1yDDC4%W3)(i#Fn+-k(} zpi)u|(h@N8;!~5S=K+$xgy8kl9ecR+5q-8`Yd6@UQuu%nL$Vsu`z?j2I7^K$aH&kA zx%;!p;?4H0kX`c1^D{0LB~LO_Dr$Ej1rJquIHj5h|B;_M9tRDlZeyJ^_a%jr)YwMk z(MCW?lXa~bNH%?LU%C}aqh6|hcoIscl!65~s%J@LE|_?{_z5yA4BCmEGi>6FUTX00qm4Mg&x|pEc0AN&1tp zNHYD2E5i2P_!*>>jNxsRO>joIJOWisp|?qKFkMf7jStzJ@8x*MW|%n^Sy!!ERGTas zMO~ANc0O*jW^snNYGt~qe8(MUh9;`ZQ9eda4zA+AC%xp?woVwr83n5|)TlqmH>b!f~B= zRq~`omM37Wo6L&TJmqk>3J;#GPTeMjucmneai@_92Gc;JsuXobAlK_KNh74FUyqcs zLYsP&gq>lD_2MWOj$c1CvI4xH*ts+O+n9fw;U z-fXhXD1DoNLHq>ShSsZ{f+Tjit8+$;=0k`_j;qZ!*yX2Gb2o=MPOTgB@Hn+sP4xFcAyuryik)SjPqNjXL^*| z_%$@?yN8}(&HZeoAx@ARQfheS_x2}6S8__pIs9xMVULVLs2-j?V0BjxZ>1t>$)r_H z`rDO#w2$&b?68%TQsm(N)p~+TlO}=v9YoyYTQN@sF*VN37z8F(k%H!XYf*A5T3U6b z-E*(fj_?~Q@1z)vYSg%CKgMEVc<7D=Vwor#)fH%VJL9!G+z&7c%S>Tu++W~>!_|po zjGUh)KDf1S2+8n})h1SlQh>JZ$>d;hOHsz*69;)B)kjGP+_l#V?3rl=hHm%5T)49X z@6CrGQ7f+-jx@FzXWR6%y4X^~A^Z;G3h{xc$s)peK+0r>V;`tHQW?kvZliaOP2zbB z`ysy5UY8kcv}D^kgUx6Ky;}7=y$cG*@dD(^V8haK_xDN$jt+7CpR}s$xNVmI7-h}tEAWaIrXWaq1xzqV58?0K_jH6 z57cWrib&&pvHbTA5duYv5A@~Uy034$=-9e5Gql@P1A2$}csQOF8%XnTHvI`YJzm}n zl!@1r7(pDV0W~@n81$TuL;=h4>1}zkFU4FL9s@xo-e6UqgByKq+-RD+LCiw5y|l z-orIwb!40+GVi#Sl}}f*wU0BeLVzm<1e^9Tz!aqMBWz_7HbH%%&Y%$yMQZo~H}_>Q zI(ht?a2J#fzkW{?_#)lnj`j%^zFQOu&Mk%6pf@3IW|kL(0(P8IM-_uet4A*o52i%n z#I2;f9x0H(2k3dB&+Jx*iEO9d+q=Xs3dgX8!bD*Z^;1GTtgVoycD1sI7!c1E5hK{f zLH*Tmx~5p%hL%|2Jj@pk6k-jd;m3j+;9|?BD`IUR*v&nBU&4(#B`;im0mteXG+bp3 zc9V018m;a&{Y1vuRbU`INd=zseJQ-NE~m0v3L9Dj3oLfm%NyJUSTo~`Z3I@-R7R+? znTT?@2gw;VeNYsduCcTt3zLz}0ccid(4Lj(vyLDlh7*!RUrCH87*3jPH4p|b8aoKz zo?;MzPSh*r&d^w9)fsJpg-#I+<~3 zTX4WWqXpO0WEyB6K|;jxw*5n+tr8TpeF;^@OWe>vEp|i_CmiBw(OV3m9RE+wrKoy7 z{C-mvE30@H^$u3a#Poi0XgnYl|t%WNZ`t z=KjJsBN6(N?(IPaq)JFiLZ=o)qNDx{sAJGLaJhbOHJywm@2BX4W|dp9^;)p7I>A(e zQz9M20gm{7xrH&l7kl6eD`C9igTI3q)#pqUYU4S^#e3BH8LeOR?;YLi%PpwMtv*Ig zg^#8XaJ$5TkF;P}N`y%=-4q~_yXXZYzgan&B{FK1_6y!9q#00>7QVqwp~F8eK88Ahxsj{FM6G+-wWdU)E4Iv&?wKM#w-1Fv` zUQIE#cUeh^KQt+Esh&uT=*=+mQ-`RV+zGjgIIaTeamHU4HChCaU(a=}9*fQTct!@@ zX+Ld2z}+}p@ajmI1vx(NsI-_slW8~@tX#rd}z;KBs zwbiFddeb_MWvJGevP=Q-{^4ppog-R!gL%D(nh&?=L9!iK{dc!dQ=Z(58=;|4kcd=R z2hZjmAGK$uPid6DPAYOT&OQ>@JQc{4m;{+w3AeEON9ttuIe{VQIiXC*cH~j~-QKGo z!}1Sv6w^p(#lYP8G4jkFV9(-X0uLiPRTOS-V0`d4F~Dto;zz6N&08oAi~ys?xqRL~ zpydzoV1fE9@XLi16BzMid%c>WNDrc~vR%D}&=fn)90g7-&7Uz%hKYxVKjIzVYKRm? zldqtCWuc-D4;0`jg&1}M>I8iBxvj2*CNO!f@$jsCq1q;q+~OKBBE{h;vfy1gM0Ng< zDNcm}pl*k=3H0;M<&F=`L_l=V!A@%UL6^72hQvWb<*eMJKxfTsGvGaWMTrVW%)Dx{`DC7I z>lQyxXS~iw=$hswMDw0td=q3Z-QDu^8HFtl@>rrg*{} z`4}VL$s0XIwkn>jjn8T;hs=&U+gOdk6|y=co@ufkG;g?^j;CuhkZ@@;WfaR z%6vjF4VH2aVamAUTMmyS?ik0hgM+oQq#jSJXXDCI=)|c$fKTTZc$FN@VqXb{sLcjOMZM*75mxpZvRU0o zh?CK5GF+1p$M<6}7yhFO5vBM<3|;mE#SA+bMD*cyFvcS?e>tz*aocf>Pq%_0nb-Ks zO5vwF)Lv4O^r*d~@LbrH-dSWEUjbon%*s~}6|AiatpVQM^Ej8*k=wi%p#eSWdLC@>55o9CyG(Q2|dx@V`0O1Cr`4gyk0 z4JPi4E)ik{uq7a)CFrgp!XBY5KSuca!W|8bTiA|;n(2|O_(RtfgxCsXLz0bx%Y8wP z3gqp?+S%hCDWI&4DTVCQe8F0%AQ_Lgm^DI8o^(e5t3n6o;?pjzPtcAl{NJy!_ zUfn`D8&<_*CiemUsh5;A{%W`dSLhVpI z^E*T>MYttsiI^pd2PA(V4q#;(IHA9rD=kZK>z04Cn#_obhV%B*9yYI`lCn(w!}~Rr zL6la&MvQ4(xUPLZ7OQtadbd^KJ@E?AwCBTgem&0xvq-ncGS%zTDo#g2i-9RdN~nLe zM(as#l6tiO6I-&>2Z(6g&?|{&yesm*P0X~l5MCLJWXs}p=w{MN?;&-SmEo(?h14zb zn%0zQGlOn0xJ0}oHo#qU!}c5xp$t7Tlp(X8-hn~yXYSUmt!qdM^NtJ3aO^@ha{-Q% zm>pIL36%?!prMt`wm`8PCDJGu_!Tb*xgeF!Q^28znCFyn#rL#-mP{xx(Dz_AHDj1@ zaVL`UN-#SIGu4K_k|*~L+8D#x@$w74+S-BVint^PwXm5rsJky8t{05 zh^}G7WS*z(S-wrsu&KSu9ptw6nq)eJC-tOw`UK|AQgZ!xX!!|$x^z<-f4jq3{eX=#6|hx@V;Ou&^J%$NB;x+$>p&yGb=TQRx@0wRt;QD) zV+fgigW-8KX%m1(dXQP|l1gh^J zCy;VOMWRQR;ny9ZYi6GiQwXqY$PL>WNZpaGPFdS7u&+}OUIhyOQXRPAcDshW`&BhI zq7xHYIv;6*ew-4Ou~-i&e5u7JYl0UwIig_;3o&}t1!BO!pMjn4YvsyIj04g^K+CoE z$$)6KesevaEVjHlI6Z9)EU%d^?1t@5_`Kt2OosDUHuW_S-asr4Ikpl0g^o z&ODt&IwxNEn$YI$lDoZQL$jc0>Kjx{%tVURW*P3iVG)dHn~&x2v`@H7`Mu9t3w|;y zz@D!NKa;?$3Z!Tx7ZTimGog-gh_#zpu2!0oi6dtXI&)mTL=?`c<*Okh_%H}ILPc6d z#$}TeGK?7uZjP5rvd0qA>n*o$F$A9HLTUq_8nc}-hQ@AVL1AXOol!}XSyKiUL)^9& z$A~N`jNn0l{^2ZcMsFu$3I3Jz1->Y=L)oZwb%QE25zw~IO#jCQJkV}_=D%#|_E21v`y^&!vP3^V!sn>o+w=%xty=nyYi~*M(j)tq@Xu8F`j@fk#HfS;(mqGE#9QL`9`PsZ&*ugv@n&<{~IC>j% zg|gnOQ;QBH$4?C46Fo5%*>D7@y1rTvQ{HvnSIEDz6j+MBeF|7=dz=)J_g6(Ag?VyR zF!BMa{HOM5v9wSQP@|5srJd(p*o-`dj@%9fo+!I}!Ho6St2pG~ir<=@utd7@5xSRB zd%VRPw%=GkvPGnvC972D=v7;D4H~;RdvZNtHp1zV)aOK!rMsGhmbxdG$@MYDmpdju z1245g6KFJ57tPToqT}Bp|Ay0$Xc7YGZCV#3yHtn03+^b01T9ac#E``$OduJS(5X72 z0rk|W7^3=O^9JF7?QjYN`uO7ym+S9N$2>EhkH2XxcR9p(%^AW9C3}i@Jq$faF&&<) zC870xWSwg&#Jj{K1sMmFm6B*tJWmrgA~cj~(qhvR0rhM%=cucsHu~X$D`fvHc)O67YQ#6&%$uN zIKa|!+Y;t2YRqmNZhQGqO!k9^l;A2Zj(wyo^BHIxE2+Ve%&m5!iW*9E+J@k?)pn&5 z8c>@)QyL1Xe!iw$0lxE6(+7+*Z_x1p2ag;^+r+0_A>Jd4gis&CTBE_cN>|7pIcL*L zi=T%1baE(dVYn1+!}us_Ff~~|kSjUY&6d4++e%+yKV;ZMPFh&m3cmzc)5HYLkf3abd-nKB|PD9|%(QtQ9mCQG9z?3LOTJi=77A!W`Q1_87AOqkl z`;02skJd8fX-4eCr-uOp?bi_BVnxG10IYwjiQ=M zxV!rHJH0_2sMyv7>6_R$8v{b97ZfL`xB(_zYjQix=(JtzEJw&()HEA393dP8KCFIB zCyx%QVmU!sU+Dhzj$U1FSJ&GYXn;(IVS2!2CTLAiQoFG*q&r5s^U>C894v$YI?OSZKpdmHbk1nb*3Q92RbP?9^XQ_ zR}1X23bO5~Nrtvcc0TPjdhux~YA{`sfJuxAZbUBH6$==4RyufU+H?Z2Bu()r^EUw~ zZD=joDuzj*ZFjNGiDd7rI%2WyCS(R}Sk37Vr}|NW;${nOX-Ok8(2kp)Kx=QRC%OWh zuZWssY$FbxvfDIw2R^OI_N0kPb%~>V#3v#pBg=WQ(fZ3s3irWc{IXZj;D|uJ0v;p7 zJ(ET3kC$_VBJBV_XN58CnIn+MkdLKAn+lNbOgdbf7M6fyi(r?!aMoP7bPl`CrBqEG%i3t4U$7pe4x$QNhS#)YAwP$00gu zN$;!3Gj6B#{PK{CJFu9~ii0bV>K4S|C>!36dRRE5l{O#>imO*>cV8}A1XEMswG`vS zjB}YIJL2?26N58C^MkPOt0W{U|`rOF*tgow?j0sUSsOP z<`fD>$Pb7M!~Rnfm{+Jfo+Afs_Ci=F_j*T0-rFKok5fS35CTO9EfU+pkvp{4#S;>m z#is*w|7oNd0*fKkDUn8y>zizgp>KT1jis)#W1S_4vAR{9iH?qz#EPXoOcuvBcJ!H` zIfAfmC)^q5^eMb#idaGju<9(O3IT*gfe@94qw#ra1G}DFO~7a#O%QxEL{dHChL!zr zwOm0pq8~MbTjLpVdrD`^YLL%yB2urGB2Da`dnleRZOo3_Vn$(}6u z1hvsHBT_J2kHj`O;E z!Lwm`krSl=qda9oH^ca}*1stJZg5CtuKO6f{tX6WSPWKat;^%Otk-WSx$0e%YTX|oyED{`p#w(Y8V{kNE}Ih)Ar zhtqSU*7RsyQms829mu)Je6Q#ukk6w`7)WHxWQyM91R0 z!6ofSPj~1CP!l9~-wmO02fnTF-ZI1lD$s1XFvh(tQ}Uc)AlAvLPAhB8j$TR$kted# z04*AIhWG?@6m9+MAGoE|%(_q1ruV43F4;iiuG>@v(Jaa|QGHnH733guTk%B97=7Gt z5Hl9F-t`*xPM;EFe@c_xFo}#DzjJ3Iu})YfqI~h1QA`KVioTPJ@#I5cMtS^bBdaNc zq3Ca9-T7R3wlqrPR4f?@x{_aP;=`&;h>_oVfO>4AWg7!4TvRPd{=#m%ZzFqp9UT5# zZzf51;&zdRpCz3*EHu@ble4!pzMjM{pa26$j<>#2%xKNZ*2S$ywUAb`xNWkm`b$L8 zbzydZ=Jz+(mskd5M?}wsJ8axf!Cy1h%YslioOWVu}?c5bMP_cGBJo8!TK>gtCvOS0|>%@OrV4!9AjeYi>M> z4F+B?B?nCsQf*U5I}`VOU}l~lP>NDLhXV{8>)!d5#POLHlT$8HTwfO>e#QhJF7n|d zLAuOm2Wx5O+8}2{`vfJHVx|+okkUXi2eEyLNg8_-;F1V77vqb;jzbRnNU)Wc+@#ms zk{e2u@0SI>lhYFxR5tX(5vQd2nKW35y~JFk7SZOE{|zN(RXMj?sn+N5sqY_7K1ee~ zRF;{Z2B;Q)PL!kv+?^6-_{Rb1{dQ%?h|pnXDeUZ{%jFVH=NI^X+3B{HU8hmi=}j^3 zWRYF#h;=cWqVLt{HV#F#+EL`}XzjVG9#;qs%9K%Uy@vwfVU|!~VsLySX9M(%#0_D^ z5ZiCyA8Zrsm5nzZVHAFZ!D;Oq{g0qEdkeC=6JxUoOulZ=m+jfoKh>d1t91^}%0a8TZ`i zJV+aJgt4|rQwNjcGY#mReRQjyij_2H^^3PgrUO8yGL4#Y_XmTOI&gkp!S3N)MJ|Iw z%e^Cxcom>sQX`1dAHhrXV`Y+LHk|~SBf*L~7a`)Ehx^&qm>L_LoT{ng|2R~0M`7TsHp6)LQ_nK$0=h7b-Wf_}>V) zGk80s141&;oiGlx3mBcvz*e+BVX397%N7cLAKhFkV1V)8Ck3jBS{9}g_lMKQgKk$l zX=z0b0m*(59U{q)AZLNq2RHxWhB-5uYQH(14USOmXjcndK>|LMZ!Uxa-2J}gye@22 zIK~+m5s^S?{R+!Nxkd`G)hJF*#@H6Ds0l!Q_7;8jmeDK)jc=M^1J{epuz_C4taPaw zwt;@vvlxP9TcA}FYN5(sK`9so&{m;GoD}QQo27&s4R5kWnFBDFHCfVkkD#?9L;>VpCsOL zo`vwF%PdwfU@4zr;Z4c|@V7AH$t~KO^k&yXF~8m-Czm=AFt&@KULk2BD{dJ3!z?bx z-2uIvxH~6~yFb&X5MwMLF!0n(fczF_jRC($gnRV&2j!(&1iA`Q}_tB8kg>O29F zJ873Ab-W>GsDJX!Wcy;Xyid&GRxu-7uYn6r6xHU6z*R!yq1o7$+TAxmsz{_Ax`~* zoss!+V~erdt{+Y_u9nKG;SB8BCPl}|36`X%sdNG|5ql~Yf;homJw3meLhkTIk0=Yk zjcv_0ZijC$^4@FO-d==YVZNlUZpGUvVvg_^&qm&}W7oZYokVnI1w?@$`VZ*-K3ZxM z5n4#ec0@ygTN}XL^FJ+wP%3HBf>*3D_uPe`p!iNI5!jez+{DhxiNcX^#hVWp%r{wY zmJ2vFXcKaCSnfC)Cq_V?z(N7{lMqS2ksVI(v71%kx1z2ru}EQ)ptgeC7t;vHOA&=S zQck}fKbPZ884BUkk&cQjTklBnO(xMq;f4#0TPET~u=hhWdIgYd?IR@bhFKP%vOr7V zL`Y?ay|HFbnW$R8HgZ{!d&Zr=YWm@n%AL<4(XbX+j>L%OY)CD&%zJH`j0dN9J%lD4 zW^h<&4p(CDlElrP#^XQq6q z0aQ`Y5}18MjgczrYPvute&kZ9+kJ`X(jHnsvCKU%fhi&#=vJl%NM3b=3!q9PaL4ZT zj_GVccpsU1@g*w5VUFTb@ssr!m5WJdPQ~6-*V+T&{ODAVg*~-&;Elo)c^9t$2Wg@J zkpPF|@tVT{ui*M;1mdg zbG$>|L4B-@cE$h&rblBlk7|L>eco(@(zFj?$ zlC?1Ho8zNsEbb8+P4Ivx-bQ`e(gylX?8wx+UZ}mEH?M&+MY}ubE?(ZnT0}uS`{k|( zk+$4B63xq~(kCRG+Ia?WW9Y+3;ZO~S(ymYrhQ$0+@0>IPi7B0I!!OnK!smbsh}wtl z2XSnkqHzl<(M(4ng<(Fc>y~>OA1HhsE8dcbh0~P`?WOcCAI61(*ct~>BU1RR!&$-O zJ`Ls6K%sm%Itn9r2`*YhN7!*MIDNSLJ_m`jqvG)mrkF$R!aPmL#lJMM4DD_!NJm4OJ9k0wn;?klY`X!neW-`g^l4;Wk0xj$ zVRpr{!|!7|{2ot1(IX8!FWzhYju{xF`p{4tw(F1Y*Qs1C6qi*J6tQf9txY`;-T2QN`@cZvqE``yYb8V)S}V=nMBx%bbSOz zA_;)@&37tC^M&mMu|hv8SSV!|r5l>HhLOoAg5P(8ijw>>OTPS+cLR!os%xcx)j4pB zbo3z^v{kQAP@DI|6$Cru|HoJ-C%>=j0f!ZmJNf~ zX4muYOFd-?D2l#{5V{(sod7}mm}0IB!Y@Q^I^2Ysy&$&xEh@=jfh5pI6*LEHAXsgU<60krGL$$6y%YP2Y0d?_(tBWkX=mKwl9F-U zaqC=+!s<#aVwpn_%iLZAYH=5jIdel+!|D3e>*4_bOAGZ0X^Iq{B3+By8x*h1^riep zI89^%bD69qiIUj<8dZ@8YS=?)4E_?3{~vubpz$;)^Xi>kU}%lkj8~#yFs^M8407Ny z1@G_QxklnovklJU&Ia)j*+~-S)_B zvx00DS;>z}69ZW9P&RtyffjM0o8u{TRXBo4l%W}(O=uox^6GlfopBOT_R%*ALk198 zq5z)5R>sqZWZnfXNELR8BNjF4b9>ZLiPs&xPvyv3Ey(V>Kx>op3vWq% z{+zbHYbFy`5f$y5i3V>tB#{c4Y5*nKY{>CBZ*g>tNi%WHV?>F(ia3pS0_R#x*#|{GA4aYY_bZ)I};FH_`97`kV@T+oux5N)nxVRLuhp8(Q~_s8ZPk4ci*t`@==b9W@3wbR>JDGJzV2xAjC>C zPgJKu(F@|4sB0|QnTk5ABH0_>NHpLdo>_{)J|$-X5?h)ts1-~^)vPN|3rd)tnHr{e z*WENDb^fsm)4FY{@CUdo-tA|F- z)1aKel&Do-76;e!;U;SCiJrfs)kgD#D<;PdjnC^sUAmTcClkwi;_a;hT@qFcvIm=O@l5DUF z)QXaDm3Ti)Z6L?FcfqZ3zQ3K$HYXP=5KmdtumOv|rjs5hib^RL74YfX*K4FQDtnGi^`ZM)3e7qNJV?9jG9{w2#PH&!aAy217%(37Yk6M7mXi z5O-7`$u>)mpC_lOQ`$5-geUj9@ItJPiXZfQ5v{0yG?s~J5|w~~Sd66~CJ}{S`t-NT zh-=jW41%hk3%)|6>`b}&Zk|??!hafs>59W&k)%K@@2L7Gx^3dZpu~mJ1Gz-^qd3tW zD3^vZ2T-@UoXy7BXkmfU)oypz0Y##9PyemFx93BlP4GCa|?NaXBMfn;k;4 zLQM3)&kit^GcsIdyS^StuM@OvhuqZ;0Hi@+aBeesd@}m*p~oeIsSZ@IlWeFx`A0-T4Sno@R2I@ktFGOyO_T?Yw|lJc^y9l49{nhI<}?#f7Eo7+w!Y#x zcPVOkMu9|1)dqb$?H!z&`XRrUDj|P7ROkYlru4z?@sYAHiKIj04wij{<*@3|UDS#h zo}z-KiBoG?nkLtfzzPzu#C2%zBq42&smoV4XzXB;xt>|ZeOBK-?S1_rPhc`SP2_Oa zqcZPyxDntMHa>}rP5q||mcTMr2*X!*GgAH_NIR6%%Va=Dh2H)8#dYVN*C z4ndiIqedKR)fE6{o;r7`b9Z=~7+O7|qf|g>xJ+RbawM)LfH1L2T9!287L#09lfwWZ zaCEp`7Cb}iYB3m`p!>-6#GvXrs<;WbYmbu5>g_HwUeqv1X^AX7F*6p*rE#UF_6=mu zraLkL5)OmUG0|)IW-^eNhS+CHS5Gv{zgMo@kx;c==J2(3PB@QuC%*NMyo!U zZqPesf(DDFZ*URS$&cID_@$1Qlq({i0D~yJIgElN=OB^4k=Z~4%Nfr|#hsohDwG9N zXEq#7&{tuxBnEzL}am^ChM@XR33 z`q|S6WpX@xdEF&gM7m_W#(>{!Ph6=@!i8G1CJ?_zbQM+Fa-)Kxae64xN2dr9-opXJ zCT>h-+J74S(mSfUBhGf=bvcH3n?IbcMQ22q02

*>JS|bHmP4SG5(!fx%_mpsKhg zbpxShBuylcFcx1Ow#`Xr4#*v)0^imC3v#;FfANw8?#jmB-4|n^%D2h}p(DvDMFtw` z?JQo?0t9dpEWcmJ#FI?s6TL~DjU8}(UQMmoL(|&DEp#()BC*50Lx#hQgav9+$pCd- zjSh}?+Ssp?Q;P&*4gz=|kVZw|wI-R?^iA|q3TNn-Fr#&BseuXNolLo>rq8Ayl0$~Q z$oH$*f_zjXcWiP6Sr6x!(rc?^Z>xy|UF6n0;Fd5TyKpQR?moU?An4^>tLtY-K%vsB zxiiqu+86AI`+u>mX|#oQY=n)foImTRq_QH;;wK5yoKA#~B+Nn<-^XD%6Y_%jT-e5( zVy8RUK6W`h`2;+IU;079(KvnR-}JgT`+9nYv<0aA*pV}+8#K%0xl7#MY!0*V6FGyu zh@XmQ+5N8W*v4|?=on8l~F=kV;1QD+zV+5wI8-lH)%N(*xm>yeKZO=POK7 ze{Ux@v<0zp9V7*mJQ-G-^p4H|@g`3}j{F*pum@6i5=U%0%R!P7ld_Ri5mJH-rkrB! z!8D$15xJqLS+Xu9)1nyO=!OfxH*{9^e~k1owEvJIq`0$nuKNVsM+N+ z4vniXI=Hv_INuI0@e}iF`6q;5{sgwn1_NDr{e?u+uPyh?ZLhA#|1tq zKeuan1tlUI3|bEUQs6ZdwG8P4=1;5H_TU}|M(*K{_uee=SE?>PxCf@r6mME`7wLPG zx5X7E3L}I!#+T^d@a>~fVKR5PG%ENZ^-BM=THBdyi>2QpV#MKn9o0!w;eqEyYhMc`Ah%N zeg1FYkMZX};CFP3e^K6c@Wua>&+E^>X@0LiWOej!2L~Jc(f|DKna}IbKl;b+z4WJF zH-FWi^s@i??~UY6^e63)|Bv3k{_daR5B}#@=JWdVzyH3joP0vBtDpbp(dW*t%7v}T2f4^rwuRnkJpZM=SIQU-p*MonBztjEe=Vw=cncty5|IHx$ zK)n9{Hv0Up|2z4-{`}eBqi=Wr#OwDT@REQ1U-_?e8V?Ti=kFW&=U?J~@%8@`en;Q= z3+9FI{mI|ww|?(W5t!w{{D0PoxV%& z|EK2je`r4cQx#0}x$l{u`r{XIak~DY{=)p!&;L&JdwMVZ6bN$2Lk$3)W z{PrK=CiS^hF6eyfz4Hgn|9{33!286*n12rx+M6r@%?}9)4X#}tN#HIV zV*G_e`2*bis%3)9knLsZTe^g}bm?v#Wx88Ona=z!jsavoxk8|CNQf)ZS%)JRWlI!Jh)TmB7;hZZ57J)rFq}vT7myZuqZ& zw<)87-x=U74=feZz5(9p;LD1b!N5nul|bAfVD%6;7UA!K^+Mj+as4*%b-2!mu%owt zpM;#Qz&?im67b>(rvhJp#D9kCf#6vR?5gyi1f52K_c_FGL;Q~jw?{q=5l$o?CwmC6 zbGY6IygAZ;2mdhS769H9_@{6MkhU(a9aVzdir`xYzO)EC+KlVfa5IqZYg}gn?^IkL z1xDe?HuMkk=0Q4Pehgu6tlM;o4CwuB#*db?{{b9s&O#ura_KO$4?=ysd;e zd<@*Pz>kC9(NKifK>keVmlpm7_}Rq29Xx|wzM^Pcr-!Qvw--FS;QtgSz9g=PLB@W# z@rZu|an%rZv{e2s>9i4XrGUKzJwGH%$S9<#;PQs+54RKXk?@zm?+E`QU{=;>#fJBCuFs zMxP=4DzH+Jl>+hO;K~6z4Tho!w??{zkUtF9=@IUPxThemG_Z0eME-A((+u%#;qQij z3OtUQIk~uY)B$nPxK0fju9+N;a2L3%K`fq4;O)Vi8+pBp^y6{uXeH7)YKiM2;Li+w z4}z}>+|#&DjO(4?X@Tp%;J!zA6|jAfvk2km;5s7xHN-i(1%C`&O{9%b9u9AWJQDz$ zjPRRCQxV~>;gnf zt0Vpx{GE_J2jTT_n-DGxzY6%T!_`N;qg22uBW)JAKY;%Z{292}2#)}k$i?BhG5oW@ zo$F?Px1p9fj8eFAkmz@N>b10{;VUB+_qy%oiamO!;I){BLmg;EsbQ zJMi0*F2>%G|DAKl& zKAVBHM81dM&s6^XA?GLXeS+&8@H-)GS-4Vg=@9oVe50O_a~C|0{(+3ykX;|?I)iTm z!U=%Yhf59Y7Opdbe-Oeq;lB(yi4p!q@;X9J7r1$FSHSD&ZN&Wrzp2VL7T2wj=3}Lq z48JC>mqOk;_&1O?9DY0SUq$#1-20H#8}Z%2+ZJ(2fjx=%8_=sX!f(PoKwJx8m6T6) z_zA$f9C0_{e*ztfAiNS`NADs0B+?WFUqxWO;MO9KE%1*4e+Thh;r|Mk9Xy5L9DS+u zY2n{MTsP?U7sB1a?S`e*nnkWlS0d7C=3c#x&?O}vJ0Do1u#Nb&B zKL=!-0NH5QE2IlAv(kDQ;I^ypkypnLpKj3jx zU;3m*+MdGp!~fCY;LnP5zaagmz`g{JqYltvJg$eq&j~jaJUzg(329Q}x-Xog9`aMd ze+k#O5#J5rhSDt`!e<~SH?9xDe-`p*BU}#hzQy&g@OL2HC(vUg@R`cr$##?mEi`w+4cg1<1_Nkl~;{e8s!1$;61o)cdY#8+@>K|SQx z0M9z)F#&06fUmZA$05&Ra7&QJ(K6tfkR}zd#NZ8sO9?C=(#{5c5cm+KEd-g}aP25J z_{zaO1D-n0hdTwn{czvFbpZY`+%HZB@_HWe_mK8iT!+EG3%@1!X2SJ`dm3&OTodqp z1iu~9WQC6`L;3-0itrb3LlM3JFcP6m`&^{o2j9^F_<4b+0MARn z&)|9o^8XH4X~=Q2(w`Cj8knQe2=@f>;ci3jPq?mta3{$5 z6aEIcqiOfT+j@B07i%QrlMBXQjVuCIs3clk_wAh6PK8{r}z6L0Wd3&ipB#tC1K z6aKFRr^jKM7$;mRPWZnQ{0xbD{9h(a<~^)`#FKs#uz!R_Jw|5NYUE*r*? zXV@}e>*K*6#v_m(u7rSg@;{buUYB@ed>jW`FUCQ_w%~d@!Y}Ch8m>Qw8;a|J;++lu zIIe$%Um5N-gx?jvSRQ)qfuB>{=kkv~j77RQ;UjUvE;t1I>)=iy933aFIj|y<)d6_h zIC1}#;7u|89S?KQIM_wxu}0UgD?f|wVgdL+BW}MJ@1nN=%ZM?^V)%FC#ODHL6t2MD zIPn|fgpF?ku#$H1pZIMQ{}ud8lGQa%nt$SvA168_VI2K_h#L_nJTZ=p0>a;d-w1df z#ifDY0kS43ejfaGimNQ&MZJP>uQ=D6#UGzn2Yfx?Y9O*@9ELv;UIM&k94v0|0pha5 z{R(#$5d+||BJAiC{DKUIrliU zj&r@x$Cvcp;@(pzCjRZC)SX;}rKqBIKut5x|~;{IPIxX%w!DiN6B;{fPV% z*lC0diMO8;ED~Sb^e&teuBdox%6Iq%xM{fl6zQH)+E-kJsJnO*LeGT?e+j-n5S|79 zTlknFg~TYWOV%<@_;-Y_Dcu2tpNJFJ1mQM{bLlF={pq-JCJCt=CtlD)?-=CNmuv%@g8wI697S9z_@xn_ z0$5V`uf-#)Eb!sNx5K}KG*jWqBK_ZR)e$xt0NxS0cn)!nzJSXRM{XzZd=$jM2EfNF zyar)+%AhK)b0Ay^&QVYJMS%MhE%^>Vgm4txJn)_qV@HI;;-qVg@O8Fz6y^BRD!v##G4f26KsCGQRSDW$^)x*iO^8NznKPmc73;jY2GAX$F_{|Mn9;JSf- z9Nd$Ve^7GVWm&}U#r1pO?}h7$2p3bHnkBw&z{25P!}Vn)7>W3275`D3vQ$z00br*Q-yg1~_*~RX zg!@9)aD-7bFN!k}t}}{n1+b;KP7UYiJ>cJg_iy-(;D*3G1-uFJn*^ROaP4S5!h_%{;d&eV zQb;ohcoO&@;JOQ31B8us<2npV*%;=D4;29Bq12XPN&NJZY3Z6gViXwgt@ZS-3v`YRqVDEy@Q4DyV zhJO-q-4N~zf0^Q3{2})v`b+62$-f0WCDQZ<|6TEH#&sl|qe9@ztMCb2x54#VxLUww zL5Cu7@+l7d3a&2&DKRbp&pY6`1#B$B6W}`FdOBQl@EX+yJ{j(WWUWE?O++?>zZ|^x z#5)u5uL6G?xTC*uJqB@~!QTs)4fr?Uk4tBOUlEoBycOZnDZS$x94Gu5t_#LVKNaDr zia&{PNu*s4mj&*iGIW`?7ICfNA4l9Y zr3Do7GWbpb9|`B+tWRJM5LhM`3AzM4J!Cj~68`&OYNPA?hoA3{?El%lu;oWfDIMqpjIx7bkpFKM>q}A zzJqXc$tVN&$RZBfJDyWAGOOe>wOs zBmE$R9rZ=rW~I%C@B=tUzrmFT?_D@Y_u;RDdj{#Mz)uI)S-j69kE{q!19lV6(J#Qp zL2f^|*AaIf?mfj#LELpXM`eH?f^#$F5VKmCyA zYhcB}yBOj3;f^Su?4_uysnqSloMD*FfA@$ao8W6YzY6yz0Ph15*m%3xU6) z{9U>%aIL^s7WiQ0eID1pAiQ1jR|>y?>v{;s!hah4o8e~^PgUTzkj_y`@o#l;NzjhN z-v;+0@UOws0BOHOxHYgZ5PlQk5snv>4A{|eh z1?~aX4*5<7ehacXBm6zmOhz7#!V&)-{FiZk7Ch(SR|ii$xI5r41mCDU0?oj`LuuC| zTm!Olg69i_Pr@0E2i_Rh+aU9G_!khd5&kN;1c+-7e-fOd>acffBB zo=xzl!0!x~8$4|#`*+An18fJ%a}w!SBkpM@4_IwDM=yXkFT!)dGXp%c;ZFlk9`I&E z+-O|40{#rJK5&t69pH>EAiNsskHGzcG{3?*YJjvGfISKSdAL?^jv^rIH#kQRz3|Oe?|itE2;TsY z(Qfd}1@d`fi-}u20Sko12#g=8N@rl1o&OJT++1| zu*=9J6|geMZy5Yu@Oy**D*Qf(uO&H@{_nqcqTK#`%8;2l^pO(s9Ca<3Fu+GSw`S*P2^4ktmU4&9L56?Tn*5b@Nand9 z7WH%v^IiWL4Yg4CFJZ)|Mi380xg8b9!h@4vcQf%KghO&;p~B&9-63jFnDAr?iB~>N z{z}5HU_k5m?P0cctHY^@H`Czou=uZHqU88ryhAyzU3XNv1Lb$n;k4p}L(*Z;=lD0d zfea`#_{G=@3sny9?;0PF+4(LMUKNt=;d>3q#P7y#j{jYE;0I)OJBLA%!*gAuJ|^eh z9`3gU%yE8tqS1H!Rb9soGX6POfN}UxchCm(bnpuq6PR!E%goo-u&ZIw%JJVUOT3Kn zKSg~Cx&u}qlXDXrKk3C2*Lx9*@lH;+PLz{Nk(vIXKIPoOB+xM3_ao>u9e*}=I0Mus z_(i{rNr=PSrXs)1F|NNv{sr!#2ig7l2NQh9zseo32H|!Gbn98-o!#L~P+0Jb_7@F; z<1coTdb)KFN6n`bU++2(&_Ur9GcezUZa@Gs`Sog2{)g_c6$mvipl+pvy916O(=#(F zq|3MCRmwMej>IH9(8nG81HBadVl04(pTiHiLoT2S(q|U}4zItI{5iy*FJJukWo0Vr z^NTwa2eNz%ucG|wZg2@&Ao;V7QU3RO9#aIdn4e;@xnmvSP6uS!9@r{vpx9EVA? z<1hOo3djHf~|;#1uq9AtLLi3zgf z@B1q8yg@A5`83S8P*vt@&-Ix7S?Hki+{QqnxI~zeaH!-lGfxAgkxi-yoj- zKJhf-&xHxPq~A|| z%QxwZv|H_SwA+Xv6~>y_z;pQ)x=8-q!pouow>m5+@Wx_O+3~k>iv*zV%C{~Qi;sU| z1@ecfpEdhrLcqx>;PqcM(9t@4xI1hOvUX{2IQ2ZXj(BJgi+N8T;&Wv5Rjx0nVhBs_U<%Ad8Ka*7K-jEdy=ySar6(0t)lJ|=!) z7jYYB{`fiNti4P*X170nC;#x$#EVGIWz;t(e}bDtfvnxT2;(^X4>vIZ;c*V=*c6s) zQYDtl#w}mr(($M8#&vr!DRg)$jc={J8j66!S7<_SdOrA({2LEbAL~!AttNk&`P9ei zbAL>_oSc+ie|-YaOMLgYkNQWyJ$Gx6PEM?wxPkHpznJ&zAf7)B_4!qHcq^RvLAQVc z>MA@_cIrQ}8Tr>TI^;)eTsZk1DiO!J+i3D1J3|Fg%z$>UB%av4%mQi~{9=FT zed4iMneS7=UqQj0{&&g}uOj^W9>mwbMcmreLRb`b{0o03ZqLzpRCtGP?8SWDeA3Z8 zJujt75x4eaIyN*M|IB`rWA$o37VaFrRO6*EiV3-<@p&Vd)hM_ zg$~8ZKRlN9DK9zOB8gYlqW=`(7toMeT0w#L3M!tHGgafvC`DF)ogH4vEn_lGK9>ArJJ4=c|DO&a-nSfa+p?kF$`qminqe3PgTuFtUkZ6m-sTb7z;WW{9^qol=xQ-9<}Fd5tP5B z$fNc4B*sxL-=uEQ5rpglx;&ovX)WrO6h5^C<-Fit0s>imxYUFA(~{pn{2gZzzf+a` zy@g+soQslUc6&$tNDHky*|@!0KFSGoFS3E`xhpFB{N|N6OBvevhSv_HdrbX*QR@bE zwQgYJt5@^I|9rK{O!--A&~7%)>8ttFjVi?L{-)?ed$!j63u**3{|(}WG*2-*6xzu0 z&a21rb`yUj<^}G4MYFfHdnBhm_FUZgit;l(NB+9fr}RnUy|oUIPk8N=w8ID9 zII6+`;%{m`X!SNt5#s)OquZNv_hqHqBmvp@wrykLJGIU*65}mL6Qt*1jX!e{4$dq& zIR(7&d`Y!?cXVIuxyxLZc9^%AqX7PpF{0TCbcuB=b2Z?e^Tpy)}CZ6NPSkl zK>dHmxX{sq67jc7Kgj;OWq-3<#6s4G!+PFr%usGD^W8R@jIgP3o$+W*07mzsY~y!I99gK7}aHrXea>{C#96Vxla7lH!sBiVVh>}=0tkLJ|B zz1FX+Txl>tb9$!IOPAFo?0!+|KSp-2@#Nmsfv*elX^J$L$;IO z;WtwA`bRhe%gjq|M?Ti%lLixl8}H+^FAQvNP)yix2!%IWD| zpanISJ|Fx+{uX73Tm2b(miRK+IZXU7X*_l^75S~-9NmZflcQNKv%|hi#BZuSM|BFQ zB<3B?p2OWHBM8+mpyfS@uUtsn?ql*VsQ-4a-K)HZcv{V8tz6k~pgkw%3sp5=Kz5`t zXC_!Z%!I_f^EKARm9wWb|z=y?ef|Ix>^d#$zpvrzM( zLE_*36y;Y>%6#p)ONs%QvrjhNFOxr}BJpn8kdKg@Z6)F_Z@Dv+e_!qMLh&~kNc@EM zv&>ebX0Tkh+(&vq)?YW=Oxzz2Pdi6_HWr}%sNMmk+D!bS?pHVAe`%k3qxwf{$G*u) zeJ*+Xwh`}B&hX5Xe^>p^PVK*s)c(7*=Pz|9|IYcWZ?motrZ#%fT5HB!3D~(`!!bfY^Qc9O!BkVr2N=e+6R{b4PHVy z_m@zP<-30d$06msaYzyz*l_vAgi;?HcO@A~IR`agt1S6nzo2sMVZLiJGNL&=ODopE zJNz`|pLHKA0$ICMN&R3p^@FD8OWP=az3PwIGY<^n^lYf{kIC7Hg+zz9*1ELeS1=C^ z*5!`m5AwBjl1!Q7f4&YarJi}T4rci#y+}JG`HA+d6-dL`r31ucbiYjhyjysGL$|ZM zeZ?O+lzN^%O}k;+E1;E{CvHDX`TfMdryu1P+ejS4s(`8{Aih)UQK7(A$?Pnhr$8c&{^Lw$w|$Ff#n&&l5TjjV4HpRRT0uHygT8|Lfp>m9F1{0r^l z4-o%Zo$K)X*&E}@AAFnzyy?XMK=ZXw&DZR?s4$4-x*x`J)e!$bsy{nbe+md+)t3DA z3zFZS-&a$yT$Qw*mQ(znXr6mR^IVg&I4$KYb03oewGsc$eUx+j1KP8k@bNeZZ;Z2%ftydB2_&f745plThb3te^UIFY(UWKN=_gDSr{)_#5*z{)&}|PtyL5 zwGWl>fVg}sXdGhg-Z1S`FO8%=R{y)9gLeFWzrTAA@#@*hA1Zy;sNG(p`J>7CaXszO zrXKBP_?qR!H`OF=?RoKRw9lH_l)p*xlVT$5^qG*B{8k^jXQe*=`GnE$lHWg%@vGK9 zbA3qpXC%MGDauLp4ee=myO)UgtrEoZihmRaHcp=`s^_N9ujpv3FBWUy-Aq7z&VRvt z!xS0Tf_QD6N9r%UEDRE#oEWXk?bo`TYkwUj>P9*KIfTpIbidS}Mvy%uES7lX?JSo& zXY1&|7sT_2QO^bHujkEUd3U*km!O5gFV>CLkiSPF;->Svsi{v#Jug!ug~6T~03!|IoBslBrMSa1~enWOUn_B=*CLpgcle-bU*QiL>x|>6DNe`~-tIIk%*bjYG0cBz|A# z=wRvAT9E5XrxT$`8y7-T7 zC;$0nkKV_Xc)%V1;l9LkiGNKr^%>?qlnSzO=0HqL9Dl6l^_j$9Ka%{%-A7$P)^5zk z4RZX)wQghe?Mx!-xl89F%x>G~5}$N}c3UX(NljNiiEq(0-8k>BdWHe5P={Bt$0JZ7Kbx#hHfUH1V+kl8=~Y0BA~kMhkX zr#FzlpY|8ftOYbW3;8ps9k6z+1mdg3bdU)IOBm-BC@AvWfo$rnRUl>XG*>$dFryodu8^5K~zPW#{tYlu=xn_6X_a2fzC5-qfoujk%aCbY(?>~$3 zvE3F>SNuACt`{b5>vg&DAUOQ*j>PSLwJA^ejWv%md_i0CUr>K!a-wk1*U1S!$c?

=sbNbi}(=rT$j;qA*}~{PWcw+Vhec&$aPLMBqK6b96`5ZyL{E$tdTv&M}&twBv|R z)i}z%zvbxV_Qa2CU&i|1w8?mXt9bia_wllx)ANS*Q|x)%5<+~E=0VnezJvbF@h>b! zdxkUHkZP#^L3zFL`I!#HcWE9JD*jtpsLyK6TP@!TY2vT{_uitMty=%7A~|7)h}X?U z`&;`k3LQkCkM6tGtA1KHPNnl~59i&eK#t!(PxY+&%>wE-E#CvT$ltIO^R?$XcTeK8 zwT^1{@io*_C%;co+AXd0AE@=B%IX)wgx|pf==jf1Ais@gX5S$meDoT;X1CHhm$+#i z^|5+;`D5~T@$O4rowxi=`)<}>?83n>m+y(PkG9WKuChKHP(8tI3TRjc^84fJf?LU- z=^XVjJumm7KIsRrTumh>!$$I-(aS!@zf0#dI%z+sp7?)9JL&YC6izvz!Ux094!`v| z^Ia&sRBH0KEKmM2YBw5vLcDP(`G*M~+JbWY^Pg8SAaU~Vw5J?<9;cycaQO1F#O=QK ze}i)T^Fj?Ua55gL7?pXL#{ym`bE zc){1{c`7&WuZ@SJmr?(ze^7rkUjcR1IOoIZkLJ4%^FB9TJ?Qm!KZNnU6(tWztUzZd z=JFkdmpo0)3p9?_dU-f@*i?9PJx<8&YXzrq5o%6JQ?Nf|1A6ko^T)4Jj_JQFHTlzOUToz}9HV+NnDU>MoU>1oe^nLADJa}^ z(@y{6I;UatyDZ-h!|e0P zZt{1|MmwNd22}C~;xB6+VRBwaL+#|Z)%gMIkLE)ehwrS$e9;UB)Isg)S87*{KSNX2 zH-9~F>R`%A_95jo!MMcHYJ7Ob$xo*C)8u5nP5U3YP5rBpJ>*mdjoZERrO)d*8tApR zUxiWrJMN`W&;ZHF7)E{k^L2eNaCQ1*(>MgTFQ9@8RS!>5p9takw^9D7MwD;u;o=X- zUrGJDJ*VX_5bvz#)Y{cq7znw1J8J#M?63wO3UT<-jm)=*^8M&b;s-U~L2&{)5*mN~ z*`AO5>w8hoC5=D-Ov!vts~^O4GN5lYUn}LU+YEe6edwk033YTn!Soq~m#Llp)%6~b z+2@A#<2Gqt`K9uGt$+Ob|D$!?HQs*Z)9+FL!%JDO?0LMb^VJuRvbzmnm`={i;@;&*{eFqe){lvOO$$vxmFia5QYiF|Q zKBm-tv~g^ujnwmi_H9vZ0&2R8cv_8zt$*p>i})Gs=h}T4w1Iea$-#6rpq%ee|Eg!H zzm>~9AFlo+)Ba0W@!!?{&eLiiN(*1UmU1q7=U6J^MmRaW)&4&x{_YqUJN$kH-gj%? zs%SmUKkwwe(d77ds~wJ&oO1)1Z!YzNrhitgOZexn+y~8^oB+==7^Z$Em zkl#NiS-dLo(Rp}(Ye}Dtd*Z*p#Z_;ss@{f*e-ikeKC#-ru;=vJGRlwDb87YBNH>n&TgKUGrvGkY=Ldh7W-+PH4}ZsG}b->u%>X%&Bc%O!of zNFN)gT^>e#`e|Hb&&zr|;LdK_`_pb_&o!Ny?>zOVcHjS6L;gt`Pg?sqDwh2BG%m8| zI`bUjb0@G|roU^4(&LMc4I8uG$_yb7LTP8eJwKy!0w3y}fZ1o(Ta=$)^J0^KAv^IR z-n^!MapF<0Q6D?kS_?PS*`Z@y+QHhZVjAby)%PE)-Fp`S$M3K2rGWwtKd*H~tIy$O zi2LW1<{l!xQ0p_cj(Vjn^;}b$dRqNFjET9E-(TmJjlW@Q;yu@p-=6DPe-e-Wi@2T7 z%&zn8gY^Eal`E^xL7mb(&g|UiE9$vR_1Ww^CzA48dGB}3I3s?YC(o}N+@LCP|2$sP z#JumTv@c-tub_gt^0rpLX8mA?x0&x5&F_rA^$*1RYCcs*`3`-7_yyHZtLKxoZnN#yH{GR&Xqglys&vpI9)PIZio6yY#G${%5y{37MjR%ryKl;Aj_qXS$ zlFqFa>QDLh+&%L$4bpG zcScL*dsy-l>V3ToYOjpHL?+7dzdx`<=lcBq<@NKF-?-qT`Ch?9$>r;xr^**j{D$`X zEZ@)6UM*I8<=Rt6pH(KmKd%094Dp5Dd~-)N)~hbw`%_EOvRp|LKdR>tY{WP{|4u@@ zHnR;mJy-eOrrq#Z1@tE_9sfA3&zBW{m5G#7Xg}rHd^sr^D9686^~COD$Kk}2X#KyH zP-AKy@!ioc|d>sN!)L5FC3$uZ(pLEY~nAi z{raa<61Vo`kHzGlt#dUt@0b=w`TqN8%@UITjK)_TB!Bc6me)T=Jq8`Cl|`(9*Q^iu zo4Y?51f2_hLqe8mU(8>ZJ6D4F=F#}j+SQjPQqQznSKJ{vMGFzXp!HCOmIT;x{#}GyRi(Mmgua^`a+s62J2l<(SQz6``D!Q;FkvVL)9wQqS!= z7i9hQcNoB1TB#Cv_;Y$+{?^%M+sr6swrV@YY2jWe{-$>^=+U%m7w}n@)Kz{%I{W?X7Z|_1m zIDQjQ)lS4`>by{C$vN9jcGLX2p73|~(a!$+;GH!-$)=JQNc$b^!6k>W{2G&nwJw z`RhevACSMPH=kO!ig6w}q8>Y5jK5k3PICU{=dZx$4O zSL@sLwZ3h0tNvhc{0;S7qq+w4!wlm7Jnqy|+Gp+S)YG2FUI)m3@mb>5?p+y2JNWNA zADT>lfBh{d9!#f?e}3tn`kk89C?CV!fG+Dfosg0Eufnh9r5yi#lj5Hf-+PYt-O3w2 zi+Jasi6ePHZEq6)vn1u)_@trU&&sdo9oAn7RgZ4*ydf&mQlZNsqYdyr) zH+C-2bK$*rcDOxpKi`7dM@Xx7AWZsT_!HRkW$mkW6P^PDhWOg0+v-QEYhGZ_SHyP8 z>6n^!u<>vd8XR}Ou4o(*D)~FLp1kYWqx@Zyvi>BkM7*W=^XmH#vo+5}H3+C-cGi>Euo&VcX)rTeO{_|@1)v2%Xhizq5mC#qhC|b49#nd z|J3))cj-;$i{WcPf8~td&MmY)?0*O8)f_DEb2_hU@*n6t#tNOsFuYzS@}Ex4egVV9 zfEsE1kVEg4pqd5b&fmHFRbS^53JA}MisJCwJE(sd;eUn3um7yGlyh5lF!|@SAJJOp zy{$i*g>@Jw|0k{2Sv_2a2hHLBdEkWC$X`bF8PzqQAC{B<_#eEFS?Pszu1`CJ>N`SV z;%`)mcuT!!gkfPot1#ek`M#@lczYHurX}uwC#_t1%1I|XoBdx!L7bd5^Qnl*xjCQw zZT0+iQNGjB@H+lPRmfjS?bW`MEU*9FgICeOI{tR8$X`ct-bg_C3-$gBhQR^-J(B$X zc%~IPCMU;#5BK;e;=8mjX8ON@by2s^yUE+X{y^t3W^4V~?0iuD(a2KNv#9h*d5`kz zmSw%N`Y-|UP9Og~Qd>kgytDc{TgSMNFaCP?gWAt-6`5~V)Ne=grqZ5YsvWR){;TA) zTfeWVkM&=vXENUpy>-h=Ixn$m5&55?_>j->0+G}6)P3TZwgeP=iTFje10}`Z8VWf6 zZW^yzzQ0}|-o6m!SiLH6o_M8##O=Pkr~6pB3Cmki^50lV{yX=HoBW(3i2M7eq*#dnEw515#O?$`Pz6R>2Jgv{Yt!=^370!_DrJhQ4AEG3Gs52VwjT_fSqZ?ME11 zRQn_T`)gY>QvZEw56vdywlm+m($nhU?Z?<>{JY9`3H3+y4k*d<#LL?}Lin)G#B*x? zZv9s}jbm4Iq8u#C1azq(^%=K{b}&7&>Uq55?U$WO$a4ALznyrJ`OeHkIo6NdnMeMP zT9>foAy}1a!7b{OfYr&r;4MjVrAFjMzyz3BRE})=mzoPyE^ZNdTN*FLYP@9i z>TUH?&DBp~88e^~P|TIfUvEr(gz}GWq`o7aSZyn>>I?BnU zeG03G1=KEW)wvFPP7C3FIelK$Ifp9Bw?RANy)|Dp{f}$k-+v!)ODxN^Vk@5uYbRgZ zO5EQcIj-lcfu661%6F;yS%2TQLl5fHL*EygE&ekYKsx=etDmxVb>An%59XvjZJro* zp7u$0!^7X8{Aj%wX?8wyqCr~VbU|L>c9Ve$nlS9 z=hxyr0+;XK)v2eI>w}rpbE3Z2G(hr~s~x+h@td_*zi3~6r?`!0q5Sl!S2j+Y@+9r%?`I_# zNPMBUPW2ilR!;tG?faYj`O}D3z0UhGG9eY3*O>a8RQqi0%*YkwfAt^oTf336EOGyP zDXCOm|NCi?TA!b(cGd3th1!(ke`jy8>f0o(lUTi~8AUx)>U@aZ-@F=U?$kKb+W$Y$ z?%6Xg(+6G=G(_>?4ZBmH`r2=@`|JLH$JujUaoVA}gtfy2+~IzImuofkETQq6$@xd; z121TPmrioNIY2o{biNnOdq9_W6W^ST^~c6z3ouZ0`F`x3qiEHO{Qkat&nv{&7p5Fr z_v>DW_+70#nVrAbLH)O!BY$J*b5!HWA=ySPVWn}Pf0mYBcP}1FyGs+-JEL(? zsQA`!Y#%zbbE|{&pX0UZ9@-ccFh#`{{pAtnGdB`|BQqqKR*hWxeVueM+1l zo<-vvv-3aNM@a6q!(ZMee_riVTf4DoJ zQa#D0aiBe4ohp#u|IWbrQ`D#FH!PRU`%0_5ZK(F&+U+VjUp>g%Hyoky^Sc^9+kHQP z^#Qx5QZewBOr$=iw4Y`9_Sr{$=6^ta$|=!XGsvGr>(XJuH=?6=^&~2q{HEte7|%L< zSW)6u-%cc;oGw}qG5eI!IkW;ghh})AE|hb45ap~@zQ0$Yob<~ceO^+wCjSBbSrN9~ z1A2f4(CJg+b;_~&S^oL>?bEU@+l{OGu7y31`%nR#oFUUGC%@#cJWhT5{rdI;$?woc})pQ8fhbkX-)Y`jta4*8S) z#(Zr(Wa1U#{`ZHAsQuZc`Gwi@y3S*SYCqbZyR_=B%T0RJJ~_0n@lz7!YtIpy8I&to zNOV0+SmIDE6vWxTzIPtxfY$AcXx%=8(v;M?qW_(llcy=CV1DLn&tq;p2u{BLp7NOq z#BXU`&h#v#{u-?1g4Uzv2_HY6_-uV2wTAGtX=vvh+DEP}eAHvw+dCRh z`tN;Y_>^*1e9v;(eC>2&=37SZi?xvaYbPn*^dCD>QWsgyg|GvZS z5tLI*>+oUH|I`5DPinl;U-;ZwlruSocGx036gSlEtJdGedN`NSA(ylc;D68X_6W-P zL;LBL?_7<~qck5eeD}A!U$ym~xY_5|$DAwJSf2Wy);b}Ymw@Unp`5KccVYGagX-k} zTl;Z?#Q(~C@=wID_87Q%DJfdi`i|{ zP|B&T{U%$7sf~3lm+wuTo3(oQO-u`Rwoa`aP)j!VEd3bA|_x?gV`2SPWdouMIe2w+N#^D$8vt8=A zj_pz}$&b!0~O{RbnR@km7ARZK*EhL)#&teb znVzF@;KbF3ylp6_h%(tViE^rI{ATr`{dnT5zGr!Fh~EtxoE-oCyZ7`y!alu^&`A7M z#xmb>dXENAen8PNl#_oj<(vK8e}_8x{{M_@P`O5{T;n9?P#F0e=sUorg#Yv!aew`1 z<|g8mbsov;;k1jg+fTHQ)!Q$gXZ`7^{Shqt1a$T&`8%qAH@kgukoY{WpDnEV;D7I^ zjrOf(d*4wn^Ct84zmHn(E6TsAeHreDrueSmd@4NxMHg8bM80V-_9NV z(}(sssr5j+k6R8B&z6h!K{FoEb90$*C9gdktn*9ltB~K?tHe6TeoN(TD>)lgu2fnl zF+Cr2r2Ng^_j}809-2_|(2A1NAvyKDU54ed{`Bh9DA%^!Mh)TNl%A{(B;KwNFuMCG#~osda9ouj;=&?{hRRU8eDd)$_B*nXmtS z{(}gW$?FNaeD2WAtOrhs@o=d@r7*oy~4PtA3`| zJjL|<8~dB_wf}|xqMRS@QI0(qlk!lXm%Z=dB^84Q@s-t!?vHDHZ zb1ph8XMg{^!Aw2ZZ|b?W`?3+wspI#*U-4!e=Ij5TRIB&M@4ruau?q1wqiLT=m8(WY z%IUq72DW+A%)g00NW}4m-LE1mY5#Rv&$o6jc}vRo|BrIe%an6+59M3?xwZ`X{r#*% zgDAg%_g+J&*0211|25xJPC}jcXrpqyyPfOOW3_I8<|UvXb#8E-&JEf+YR6pzPXk6y_d;<)SG`*eT(+5ruS$}|E^7l z`}5z=22xH|J*QT0C*>kuOwTpC?|>T4q5d5|qy9~WFApW2rvq`@*Vws5k=oCW zBjVo=`dssxt=iW!dp5aDIkB2|*zGD6aAjR)6Pzf96s>%J<)Y*_?s*aZPo~50jiF z`VLGAjjLmYU(k6=e}8K2yOi($fAsD|#NX6;d#D%CRJ~6+RpTqmcPl1z&JOIQ|L#G3W-8yv&nf4=&RyGmT$_{kaf8mg+PM1XHI#Eu=k!d^g?e8; zDl^-8s2$Lls+51XHOp)AJAX+0Wp6zsnY9m3QjXc@uc746rS&UoKYO8Hwy?4cyk4cK zXEC*hHm>NZ`p|6&^(mr6+hqs;{CV_f%K7>u+RfIra@3=J5@?;l?2~9V`IpC#-^yD~ z^MxBZiCcYmQ{x|he10UM>{g6=8h_N2@sBsUe@6brUi)9QE%ovLpX-XwLuS%?o$1*; z8TltvqMp`%jypwsw(5D9>{hZ5^Yzb5WNblvn14=e8tpb)>meB=XS2q2x&CIpw$8t= zDD!=K7V&Jbi=(z$pY*>+@p;<#?ek|i^Yy>CJYt9Yzq~9V^YvafrH6#9)A-H*ABxCT zlrvS|C$Fmn!+&9UPnM#d_FVj&nEd{CHNSj?<%&5-{?{cZmDY#-b8EF!4-2RszAgTb zGcsTQJBP)tQ@;Pc?XFJbudML{ro90rRQsGw^AW4h*}IYdl=mKb*%-H8l3d%G2rQhr|T@0*=V=zL~UeJ^^A%5{2y%B$}# z+VfsFg1Gw69^~zzr{wzqs^iBK}Q!|6;z*!{ibk zm7j9%=$u%L@WGnz%)d%K^9mo|kNiD#AFbWEbwzq=Tx8?tPJa@Q(RXI7J$Ypi@w>}t zH~YVUyuFC~-%F{c^~R%a!41?|`jpN>eWrQeW30cLa{PU(La30=p8o$>JuKBfCE^K=R^vew5{&xTts9nvX zb~TLbA=BTXKF1f+&NeRkxEFE%obwN=4@16ZzBc|T(2e=}|67!)0qxLJ?V&w)Ust1i z|GVP3G(NAQ@wwgKTuaDbK<8ELIelOAzUn_vADhSB&^YrW)i=8@5eKMe^9$s+e&Gk* z-xIoDCV%qR%-4U9^2zO#Q*{mHgvp+-={*O(yL2I6MVMX&O{_}^8?+>89V*FRdWyS<4Y7jAa`Oz#EM z(>YR;|FqWs^K7FWdme9XmHtl>x95FH{`l?FeLnTvrRO@0%9R84-97K;im=}16&}-t z{CU-$m_DC&ChmW~ZYerYC#Sc!E>~n0@#^Z=N=g2LW|R}F{c$VTrNOk1KhOF?=LUD{ z+~9MP({>c)`{&ALt6%$2{hB?G4mowjl-h3yw)=T~O+Ky@5vb`YNC3GJF z{dSLXCL|zkN8F24`zoRC&*t>=Kvq#>B#?w=1qpLj3EEmg}jgU zoQ{W~-1&jj>k|k1={AJCpmp!odQYmZ^dF#cvj4x*C3MdDPt{L!>jB-;x#eQAgXxpG zH}%}5bA~0w|7>OInckZxw$ysm-8$6Y))jj$BL7jX|C{{v<#fN6u>RY)sIl5Z|NGwS zr%=w}Ym~o3`gC|eIsQ3>MnkB7E3GS9zMWE1j(^@}Xcgiuy>d0*O}wf0c`>~W=$kZ@ z|Eku>8w*dgiTs1qkC+|y;=ysxm;b+b=P;0Scwe0ZET;ZzgVu-N)%vitV=MN=-#(ny zd5OP2q&dx0vM2 z`k49V_s(tJOF_JUbKXa5_ts;b&FOiiCiS;`e?|lA@cT2#Z+hP8OMJ1uCjc`BbYLlQ z|NG<7i-TmeM$bKm&kAJM)CUO zzo_wf1=aI+{~`Z7dXEyr{(x$1Ab*8qlv7Ula;+!(`dr#f{>|DSxBjBkI?Dgs>lgmi z`(NLOv7TG|P*~>x4`}>@<|3f-s<-}kFBfV&uvg;&v;X?t)aTSz)?2I3OSMl_RQp86 zze)Scjr2dT1C{T6FvM4H500h$&%N_4^EI#c|9?68RoY>@w~n-DHJ|t5-Z*or^sk(a z_P6ojr^P8}@&e+v&Xc4QasT_Vp?QdB=|=t$D%Z-@)W3k<`!+l8(DM@F9vl#cc>#U( z3ib5Y`HSYGoSQmFfo1uC3fCb%UG0O}ISCfB+;g$p`+isLA>?nnoB5i}i|c#oyETro zdQ~0^FYdWGr2Q9ke*txWj(RTBJkjcN<{RYyxCHIcMEp%^(mrFg|7i8_rzOOP>ODup zr+!C#iuTv6Tx%~7_y0%!ORazU?^#Si$K>>n9>DSzFn#(_P7>|=8@^NLJ(lVGLv`_w z7)kzany*=V9zBfmOKBb0@SW3%U%$b8(d-B0`g5mGAB|(p{>>K>&vxfgJlTrlR;Kuce%gPf(8K+fZ_ve@Wcxf60vG_ur?UyMlHo z3(dF^EBb(FtbcC-3fF@kdZ?{S5_LH@7);eD*7LVvD#SsBgC zFdYbJN=nLETABL1FFf%)>i?rRuKPBW@_+N@FJ*Loa-ZH`z%VbMoRO6Oqw0BI)x$#C zm#mta`C7eAx{dt)JU8>##Qp#KXpxxsVU3GgD&O7c2wXj{RFQTF6aMX4;{N}zf7XKK zU9WkTjgzlUCI5{dD97x-C=e+ij z4=QrTW5$PDagHKBIaM&Qadz;`_*6k`LzCU zUH|K3c0N=h{&Ib^k#gp(q@0?PbN3o?|NCISHKg5Ed;3atbCG|s_O(w+&S!a8UjN+7 z=?;|R?_-Wvzxk^A&2Ex&t|jFs(|U{D_bOq;hxefV?ZjUT^)o*EuR2YALh_Rz!_k1A zu1GolcWGAZoJJNsch;`{_BQ!_ySTSs`%@}%(UcR){6OJI1y0Q$;{VZ zr)~T+?Qk?X?PiFSgb#6$7tk zX7Y#Brv9%e^2L{^zyE*PAM1R}VV!TW^?`xEQ%;P&^Je3X#2RP%|KIu(CMM4Q{`!At zYTD<7&Q)5yy@%Ob;3=4Uz_J#uZoJWWG=6e4W*k-8ZObj-r%f|G!qc2IW-M ze8I*uJwGMBSNjpD_5n4{6uf@hVo2C9@pUx9n zzp&zW%K1#+JF@3@D8>WM{*(0{baBbgnv?wg_wmZw*t-a95`dCd-KuCcsDiV$xoJu81g`BSxTV){Ruk@&IdgoTlbo43W{(IbS^&_4-hR?6H zx2NBwe1ARjfab-0H81WcJ&$Og^f~Q&+WJNc?Ta)Sm4nEJnw`up!e47f-6jkK>}@{gdQcIAD!6U%Gu-aQ!H;V0G3gvt*26Oq4p zaq?UH-26$}Ayq2QPwja@a~oo5B_i-L%%U9syu^Shl;gj5pEE1@{r~sb-kA7gy_ag^ zu8CbKr;)e+wd6d@<-gbS#^02)So@C+r0fA~069CH(KsK?4hao)eC^MK(v;($!%4lK z`IgZ<1<3=7M#mSQoJdv>bXGe;R@X^znzTwgK~D> zAwQ0n1T;wHnxXNV)rTda@z>{)uTf9`|BPZaPnqn^Q*vSd!j*TU>Xo&Vy`!klaGiTW z^A*s}muXM`f2K0!r5ykNmvaH)4G%YKkPS9pW~|!Z}z7?mGwQD zDU#p*8{%m+pR%+Kb?z&%-e0zM<|#cd!}Pq^xMebKfV0m5^W|l6r7o!Yru_~i52&v8$4Ba%gUv_U%%%Jc-oD|7nissUd4b9KzB&0% z>boclB|l4h-mh+JdB3baWY#!#<7(p8zeMk*oP(NQV^|wdNiewb`s2NQI{&cK+Xrm* z9^W(SqW70sZX@?q~eJ%_ne%t##)y?9}H(5W{+eVV}02^3NpXuIrVt*(~{vqlY zGKqif7vw*+h2^#Wu7sYWsybI-?PN~%zy5bQhI~NzsWeWAko>3i5%>S!ImLA5+w>^& zoh|;os<$~+Z|%MeQTz71+JQpiuTz-z*?yUFFuV^aCHCvBO%@b*?)bOcr{1Uc*6Ts8 zqmp?kr=$M2aRcFytA(lOB<)|@IH76{@<(f5!|dNecJu!q=Y-btb5x+7RzKg+IRXD% zZl45{KQ%eqE6evMoy#hzajDtI6(dwS$MvRaURg-{;3nttcHS5NeX-iwAMyX^<-5e> zUtfy#ZGizw+ymd`J3{-0R)112r~Uo!04IV1j(@ZE zZP5${)Liwd+!h!Lls;MDh4j(9%zrudmSfAzcznk%i&JVw!dS(4}A)O;?rgKCsC2hca%=d5gccy=@&D6i4 z#xvFqJjg=+_FAv7`?whmj?>>i_qDYM^_;KuPg_s^d|>?h5x;#-IjO#3{j_>j0PAM) z)rY?{e!i^s*7O;Zn|kKyMEn27+ZnRI4&~n-N%<#bhiCMD<;AMx$FMMx!vAH9g-d zNd9#rDc{zMYA&gn(@{=QGE^ThrCkN7qf@qQZ5Si84)4Dm|mcppXSkJxQ#0|_aVRk|G7`RM*h~HQ~!mM|N2ViyGif$*gDBt?Th*IqpaE|z2xn)ub;_$ zZ|i>q&@2bkLjT*6Q15%%eEjvtoa>8Gd*y!*{Hs#bf2P(+%+62fxoD_;H$3J6^(w=1 zy{iAsw05BWS;{&1KJ8Fh{2}4ty&?xhg%67D8WS5H9unRss!wc4cu#mC;q_WJ3hxmS z6Va>BpxB6*mW^uk9}pGMvTL{g5sZsJCcOL5u1-ML{(Xjnv3=tfvE?I%c8`dT?K2>% zMu}QOfVFHL7yLJ-;sI0R24IRE799bldJc#fSgU!17U3a-Vq>~TMk7ho3z1EotRDTF z4gnryRJ>ex^KRj>F%c2rQ3HBJgm>#YD57|=@D|~bC{X0!$XZSRbIz_qJlJRCJtHbfQg-n(lIl-dItGB^9{hQjp-BBt53*~K~d2N#rAZm%HzIA z4aTh+TD(MfIJkzAtzL^k-D}lq8Q!#ZpQbH~g-14R(4s_m)8Zw=!-sSY?~i&A9^187 zt;iOo!-u2lpw@Kn2bKDT_w3rIKjg;EF;d!<3a{QL7FOwrniSPN0@iC9`M9{^7KdA3 z+)LPfNVkVp^`g4AsMRh0+z=Dg8mk>nl|HD_#13sikH-l_B-#G`gK z3hy>JGCF*4^h5RHkpnY}ug8EFqW&Y!vS92J)jg(`GpKt8P^te-ZF?G=$s_xqi1Bi@ zC~nV1$wx%Qsli^vziOu!;qIE5!HarmA*aQoaXlg)mh{nx=zkgO-y*D9KN=Ak`8ZJx zX{mq9JhI-uO!%m*=>Ct3iR|-GSoy$Y-3LUW(p!~yq_)J%@X#1gCtijUu0r~DQvHiJ zLGi~WaAM+15y$B9MgPn64;xE7^(|scMR)BJQ?EgbL5+)-s1*}4Af{2*sII*tV#)^> z_0R;@s)ZcUnnv`G=pI{qP;9*xrNRM~>K`#^5Jc2!(KSG7p%Pdt+$S=+f4vsv!W(ug zUaQu_Y6m`N0b_DaHIV;`0&#@ImMHd6LcfS%;oS#BMt6+~1erqr$}#r;d08JmN&l~N zuE{R8vAY}pZ7V+%^8eg$HUHNQ_ploNua)gzoqWm2uF-fj9{)&rkI%mz0<_(=;@0;6 zv80D~{hvzu->)10sssOHT~+Ewo3ww`fHFhSlnsb+4ImzuK`?my3F1ZnWr%+hV+KYZPKigu58H*sie_5hVHd2si#K6*V|ABBoDw z4A3IoSh9buk{Itk>@G`%*B%_z-HpC&#N&p|K|(L)VHLsH>yb1pX?U-pL&KvZVg?Ox zgZtQF;X{hWFUU&odMHnl8zDzTM#l~_^&XiU{A;Wh2M#(wH;`!&Tejzbm>0Un^r+XZ zR+E)(&{O>F@dU6Ho*Vp6& zPcF2Y?e&dnt95X#-iM@Hn(efE4KNhs(mAV|W z^kcjq-vU-_LthNey!LeZWAeo^DElKlU`@MAg>O>N>1$fgHCk(x<$4cmNZw9w9L*RS zG9Eh6s&2^LWThu!+)5wEe+>l1t}@tcxMr-ZE}p62mT?BG4i^mw1SeMx>WR+#+S7PJ zy*gB9^47{BEgSdjVoeU464m4>xvs{>REBD$har(qRe!{v(7&lp(#O=ZkXZYcs*G37 zZq0aNR&6r5APa@@2TLW$OFgp+57xSMFq<;&5~s0&c!j%9%xs`E@AibgO5(LdOKPBG z?lCx;3CR#*abgq&#_#!;jLf24F(U2M%68B&CG$18`07qF=|=wzTyxc(JNrRwLGxf> z4@L%axbINwS3V{clJZjcK2ld^n5MloF3CmXjc1R_SdN ze;Z6MJ)A`;KbA=&Js75LuD#i>w9i+%)z*djVEQ7DXHgh^Fq6i}qhTsb?f!bbs}G66 zMDo*FG)A7wB$1yCQ#dCA3gixTNnTXDeeQg>y-|4vB0}YSt6ZMP+TBzKagzN6-kyaf zD$l!fbB$I9b68!gcA(9vbh_<+yVhXYt9M4@-eL^7DSNt(*6te zzS8(sYgpxWfJ^gwLiFr{hTyau?1@&g=3auDw+DT5eXY{zcNdcsr|hWmsi>~_R2v%K zPP^HtT`JF&%XWohIuE!GYHi53ivwP?0)VW8u~6nanBCTg^S5oc1y94Ej|?t={DVK1 z<~{Y<-yfzu|M;c7Z~BjKmeT$R^LKkzc+*Dp*}7Rxfi)iP=ma*UR&CNcIa4Vg>pYFi zi4s|%_N3qPZyw0GI2KElFefWtESH~NftjrmJcZ)l>9~jDo#WUuGL$D_+cDof+>RFR zCe|YJ@}L#DJ$|D*aTIQMR@8Uyx1?^j74^Zol>uwCb`f(H=>~VKG${aTzp#`s+v$pe zrvYt;HAqzYhhc53tincX6h3iE=7Mh>lDF!uHQiG}brF6ET9K@tV3QINF#K-W*rM{0 zXKWOt@WEx$y;_>p2cSrb&~b~7aHVfla4Wsl=EmAeRSBa9s%y?$^K^V35X)??xlrAn8m6v=YoeVG|a!T^Pha}Fi&SU3th`R%(!K@fo(vjE8I|e2t}f$6zvzgnCrmvt%zTaDuGT z4jk7mckC42O1b9W&)sOvxN*<3n|!Cao*t_0f`;7>Zyplu(@Az%M$-f4H` z^nD7xHSog0Tj)cHx?-!wOh>$KEVq=Ou#i*-;<4k}cdy2XBj=vZDxO@`k2rw3fUel31sDe_3z3#Lx3||8h4G?^EDE)w^9{6E4X#CfX{O zSH*Bvf$#?(*6v2N=`MR_E0$M|>aFKn3q73D{F>WYc~_I2c_-7EsUNfo{byrd=DfRC z)TPc#w0iNzU563SG$pse6L%Eh+Z3ATMQ!najh2XHkB8s>31a0fuFC2gNv zX*YZ2r6-nG+ACUXh4HqY%NJ*!>*Au4D;7;)!44b}J}OtKDu_4GjHY3s?E_dWv`HrRv^ zy#famHF_|^s(*(MFvPehZ)1IA!=SsQBII!J;R}q@BKt_}Z`GfxKv^VbjChu3?dtT~ zICi^R#sgSGb|YpH(1oWk0^O<`g+RZa^ThMGjhe!bGsYs$7%{Igl?M>EyoFip$UTS$ zd_-R`u1`^}_T4JZ_@%%i#K)=?o!qQ$-HB_?YgK%N3LNkX4gW-9r@J+g`Z8kHo>H3C zGabK86o;3i0@kK}w|z+|M|x$*%(Bn0&1BnadeewQ>`In z!JH&L!vo4zA%K-2Q3Q(+Y0FOwCrXADoU!KNr>&EN>quCcWe?y<=HIRl?Ru+Ja9fKT z5FhbXbMk-nJ5W!6A6={)HK%LUDKLiit#W3p68zjdaLTL_7W9!=Ey%5kYx;^?ArkU zql|DCoRKb^Mf}_oz6dGaMb^n4BQkH!qoIAHL@b58j;mhKMG8xKwjx#wqZ%E8IK%h- z#v3&GVdpJ)bPU|Cqpz;~PF!PLez}M^1&L@5768#N49hpLi@?d82iMTNV;;N%<}wMz z0?+AKX1k-FH25a1#>3&VgAP4Tn;2&mF(*HJhz)wo$U*VP=9~4cx;Q0`ph6%K$e!*s zCFi>En(_3S@=4y&s)%ojlU`8kaoV@7z_IgRfWonKu-cP*4w6Y+tilDZW&CX15Nw;s z(yWTnP@x@%*0jKe%UQ)p0*BNO@MQxNa_rq7TKT#~2wFlV&;91%`-clsIV$Nw>Cgk8 z6aVNO=aCj7MxO^IOlaLcYgxA8JY!9K*23wpB-?1r+x=Gh^Zm_Evkq19N#!!Ncu^&S zu+E@3p-PoA@Ys_WQA)NkTR9G!j1h0q1X?(?3N<_C&3K(=bX1~e zQX8U&l$r*PQCJG8o`0oMv;7>xl2##jM?CuSyu4tOS?IgUAl1L}(ITU+rK;AHad4-3 z4m+x932O>6Kv)w3K~T#TBQ1|P$%JD)VAhe4sIu8=yna(H64;4LokqP@f3AVRRAngc zZr0!zODjoP99E817EeG&qWtzlW(UxkX>`W#>}`{#cvWK}7gq2z#Leg_*_JLvwUkq* zW1cmq$_U-r<3z;xbUl>rF1I2^cCsODM7HC|BQ@$a7*2*pWt*)bK~##`MDGErwlMu9!CIfZ=oX9v`yiGerznHEfr zf-YET>q_=puMFJ@G=WDeD~Gj;6FX4O*8y#Ym#RLu&B$)61u=em-!Ol~N7fmr#t`N( zFh#wkq;SEY_}-K?7>SfUIUhDn`Cs)hL}Fcw8} zf4$p&&iwnlq)CmC?)usG#ApVu@1&!*#ut9e;KT|AC-hYRXEjYS598=wEXL>@3q!gY zS1-zbR5BBldDu%qUqi@*ufT+?Mk?yBcy@23dE8th4>TX;x4wZu&LEi2a+#Q~)Ug}lquiva{f991wf--RKBmM$w z1m=EcQMMBbg0XmT-8D&Uj9XgTl>$<#Bv7}uSGZyHgt1{o&D*Psxu7hXb}ueNq@)W{ zqT{$DL(+Q#UNo@6S;<8FsN}Aa>!uB+C^GJ|ntMcw4Be5mbU{S2_JWTBJ8r zv-(y>Hv;7&k}mq=d1tQYw=z@#`ISd0x2TM^Z+%vPxN}j}`xJ++?6+ zolW)c#@iwPZX>A{V_$>p_q2Qm=U@Rz=`JU6V=tCSGSPyp$*53NDADsOu>-gXn6hwV zVoSq#;0e3QsWujVDi4pMH#TiJ%`Eucpxd^y3y*%ucioPxF38Xc3p4T8y1WXFY;U@x zT3EE28T%S9I4*&Mn%s6FmdGS%Lm3W(o<4?%l|&vNqLUQn0Yc+U0@THr1duYk$)q%a zGAbJ$p%{Z+b{RYZFS85nT+oGfrH6BL#T$LpLueEXKH_Sto;xZBTxGo>D4yd9s@Fymjqkarsw*89py)b--a zrwHpx-Pj{{FVbAIetw5~$4Y^K24PZtt{Q>3ViBXJM<7+mbNXa4VLJw<ToY2PP%P9hd@#P%CXA#=1_%$VVl=~~6ktI@nGp&#A5Z&A5l+ML+cW;{#Cc#pb zQzi@|ABPlHBpCY%mRP4_NnDpN&7OCT#OWq0jV{LK zgPrpd?k=cX71abF5JfIjizjd!k?8!sl8SMegF0F{x*!Apvvs*TVm#(bKvimbXVz^X zGsz-kQ5h|Xd{w{dtE(g}Q9rqyxfhLqAQ{0{j+@(9jOdWw%3hSb%4e`I)!4rEvGi}n zPl0d6?eG-7RW9`6yJqT2$qBTR-kZv7`M46}#gw&!q);}Mog@S23Pe*qB5|Hk(49}* z?PJ_&jCa05JZCW~!_h3~>v)Fmu;ec4x1Y69#$JD7oq6p66qg;$@98#pkp^9uP7N7` zwS?|fFxagzc_h|n{FCPvltXE+x>Z?2_HaqHB&m@qF2`aKuD^Z+Rzd12oL!u>Wz}p; zelkLwHIH6<9$d{fJlW6GUqjc#W38;aAs0#UpKVUT(&5O26gm&_Q5j2LW@_Vojdh_3 zPlT=~em;O{S&k^``M4em#nHy9hAJ({wo)^QyE|^hsNJKomJS#TWl8{LumFvZOO*~e z`*UT(WV(`!s)ie@@j^`ia4{VLEb0)U#@pD zaVJIMfPE18R$xJkuC8r2R+BF6`L#zpxWj(wUFk*Vr z{hUi$b2y1F44(7CiHJ=tNPtSkPi?;b=$k|O6j%J#N&hl~y}=t07CS#k_`^EV%R!Vn zQFt!)QHT)G-UXs6ahWT&5&j@H9!gw3Qd_7A^AwV8`B11(A`0jb1$G^I(}N@z)5G9? z+)id_h~yQ-R7r`!U^ix@Ls3N5PN*VeaJHrpS^$ypkTK^YKhmeiV#zWiqN3lbG+tR@ zV06)hRGt>Mm30GUA8BfhLcJssFslV{HD^Q(wvhcgR-Su()NP}dx~`$97O;dorj81I zB9=Im#V(pHWrJA1^iipdY=yX~3&T;sLY}kfU5ufaBK+iPC5mz1aXb|)NGNF2tNAb_ zNPY3NtqWhIlrb-0ET9D&WM5Q+u3WRbb>o~JaTO7hLldrTNZ+zi-;iQo*7}d&P8}%P09LkO1MX8O z>4wzwuzR=4w(_X&Y@?By7+S7Hf#5w=L$_R-Uu&E{U+>mYl*K7aVBNO?YB7zhC#ZxK zWc>wI3L@N2V7HCNJ1I+5<2+cKR@fl+$vDg@DaSa??U$rR%O)9&|D;mPNQ!kE3w0D{ z8HhfS(WCbRADtGmCl_Fw!{p@lnxH@^mZdOa_K3`kgTp?F#7E(fiuuxrFrF%A728cN z2@Le42sR;RTr7c+6Up%nF9Qk}#y)b{?p0VjNN^4USz=q}Jc($Y^4ZcUt(^=WoZUl& zIb&Ufui#kv`%#a}2Q0QwV;pq>bdaGp9=BVvw6!Ijs7r1xGVZ6ig?MIQ+(=x*pfc?p zanCRkP7j$4r7D5GM5Vrh5q2?^Xm}yYQ%3SE79SPusjK2AV>1?F7_x2?>{vW2@$CU= zJF?8tS$tdL;HG6MmpvfSZH;y7R(W4;ko)8E z?EOq}F*2ZPKg+kcF^_Q(+B2Cu8uUlRD!y4{`gBH2Y9J(ayGKty>CavJz4Q=Rt-&CF z{W|ljl+`ac4R?_kU0;r!1A)ZovYp$EY{b#qjs*UVu^$)tgfInc&;w9*x|2p};Jlx( z9NOhXNYv<`>PVoB4-q-Ybz#j0T$kIZ3q#SL{Fu4e*-SbJVHKZp_%Zd$+Rxan3oP4Y zN*?>Xyau0UnPY#2)jyU}8jGjY!P6!;k7&nyYpK18H#9$&+F?>+4ajB1#<%6K$CNiqT4Jp{e8piCk8P^~! zn0YdJrN`a959SjdJU}W-8H%)+#kqjF4!cp8iqWxd=yXCK8q=vw>}%KRHr72uf#OfZ zwD}%#jP-pgJfZMh(5%3nxKSym7!t6WBKIlu8spO9K`~-hDaFKAP7FvYrMk`-ef;hi z>5xl2u)<@9pb}Gaol?b|!R}zndp3eVG3-TShQ~1sbGa(_D&a*{UOEb#$62#-T2^m;Qn}h3q?D zabt6+61fwU?P$nt6uAPh2cJ-5B4WZckR^flR*sn1Eb)C;5191?kmFKmv~=+O4$YNB z{Kux}a0PU^)X`Hhb*K~I^YY^YHWfr!e-%=$gwzk&d(Te$Ah(i40uRyM>Fn+U`X5sU z5k#^o2e)D~hzzn@jAby%>VeIWk7wrfi8)xKf+v2a#MCvOi4o1zkft)^bx}c9*jUAW zJZGKUk(uo@%SxUq`ooka`Fx{^0<|h8ygX0`3H_QRT@afrByNtfc(9K^^#YC2u`{)eTJw~sswyyPJbCKr$Bv$=R8Ch)zS(lQ);wg?ITE#Z zz9at+Ez~YvL^DsrIbhj^F4|QAJ)yRFB8v-Lr6f~qrP^0MQ!q+E{sCuRKpm^LwyN3` zHHj22cRuC(>;p5l`jjm)0a7)(_31JQ4IZ79V4q?|7 ztBVOdoLiL6w%cwjw+pm}klPAO?qX8AgoZ*He_TqJ;8$tubQ!j2APKBUsQZvK`f1De^)e#B0r{Yak}LyULG zP$9-|{Ny;DwIhE?>Rw#82 zx`I4$zS@MiBI{+m++4{%CjEEqp)3b=U7ay;&hUjBqjg$?;ud#x%ve-518#Z}hQhzwy+3c;O ztQ=UFGIRSk3nD3XwG3!K$qmB}V~~qP-t2y^VQa-jVZ^owNO6K8#;lAhmcw_Cjw^E#Qt^dfEH8Y&h@=W22j zi7twnU6sS8wYjm1P?ht|b~WWxF3q)Z0dJqLbgQikb$i{5FSxO;gwkAu%r#tmCci2U zt+v~+-?zd!$H)5p2U?p=UE^R}nUfY|r<*}mMy+~x2X=f3AF7t4B_ZlZHi^A=(6Bqh zcgD2!RCYh}t(z?=eUX-AXsqk1G#{ryon_=dV2Xr=zS4fw5|2WIJ5YC_!)yoLGfxH@ zx7Wn@r>#}{RlSE+dYw}U$Es;{FG-~e@#KfqC4nxjvOzU$ncl49i=lOs)Z>x zl+ZJg(Nx!%#AGK1#>h9J<`U0_n;b`0IDHq7`f6Kk)EN59bHN!;vb90?nsM}+btufxP%xVZG@1WV{csEv@nXbFxLjj*CqCeOHmo#9!7KQ`c(TMU*ptvs@`2( z8l5#LYZ~))ng4t8tm>ls^ov(QaV3q@pv7A>1+g`1Lcub|9ho@|1I&ELf0;P&4F0fQ zG%qLoevHM=dpK<)U70vd;U{qIL`Fx6PXN_u6I(#*6aVaZfe41d<1~cKb^i9wdsu+FBH+Wmm(m zByNM4BT6N+obq~BzDv%S>0C-v+EHYb!er!nSAPNmH4@&))Qhff?Xtx6_; z&yHyKF1ANT=)RSY)Y`}NBh{5GCe$M78M_AFA88Ylu5!edU`l;T7wy|^-ESjPv9V#&#Hdwd5T5VR5 zgK;%&cg(^tuFl61r!1d~Ef|-a4Wsro(2v&iLx*WbjxDvxgke|L){LEq)c0@!hH!@S zc3}gROL~n@)+YqEFAA}%9I28z^Fn`JSAejJ@r-<193?)h#8+JUB)Y2lY+c7facWDe z5mmm(rTsefrO}dVPhNO6Xi)$(p_yb_JEX^yLnQesxH7&B@54jlfVcxwiD{PhgMOgm z8*K$AH%KDKfXCT`-LoipS|3!|VJ9FohbMJxQ@C!tZ+xZ*k{$S>WfK(=xYBR-7h$p} zm1^tNF1A;-(eJ^r3QMA<>@X{$R=DZ#5fG zi(Sv+Ah}sdt?oo4;^KOQo=-y`_e66m$9v=$&vrCy>om$%OD<~_FNg|8_BRzwn5(pW zVBJEz#2(jM;c=g*)~zhH{oDe^v+;!rmJ?HQ072$H%Jv@S-4SjW*(SMr@$p3>bvv4) zyMdYR_PU{Hsj>Gn!UY2#hpCL-FmlLR`Zd{r86U?c)r=!Ww-w!LnfrrtQuIy;{n7lx zGJYx<%L%zgoK&MI%WbOZ&6A~NQbSyhBlMSfP^noMy;`Y7s@w7IZ+K?>}j@Gwrnx7pL;`7 z5mMwj_8lZcGChF^#ao^b-eSU+;=E;&v^c(@(%d++nz7d=Nm}+knj|IG`!!WE z%r>MXPw=2^BOxiPyp@r}#-gW>CyceJZ=Ez7mB1%Sns?5_Q9T9p#B#o3i^p#kbk%m| z?0j-#cnZr>Mh7CftL>6Ydx?u^F}>^SXgE9`Ye%b;pv6D4*=a)mwb5xVp4yl#pHO*J zMG9Z9SJ&WtZ`Tx5499`Mm9tk@Pue`t@n=axE^UeO7uU7Y!ERkCQ@V9i;$%_u+g!aV zjTqdtUG8R=>&GW+Rs?5t6Vtg*!D3X&N}}0guuJAjokqP@f3AV}epfT1(vN+qPjM^i z$@n71%#xL33O+USGVj*m6c9R6)hm36-zih@F=~P3$%gY}FdNP^&5JUXtaaf;?uvi~ z7QJC-p0zX5!0uevq3|aoUEA1K^3+0T8(X zOpLh*NS^cxu|z5E(?fJB2!jO}^c>Q_U;CnMqwm6S@dX_tYsQEMZ=q z=T<@Bru1aSO=&7ua`u&KDoGxPej=6`8ZrU%})>xf~7MPG!x`n7RNn zdn}8Y6+BeKtg-<-YX>PC2Wv{!e~2nhU8??w*i^hS49*7;s}`$Gv-0CljuN-S$E7q) z8-l>z$x1svxe>|UDU+kas!KBrxmG2*n0a){=9kil)u_ z!%x$A#p}XO!#I^nI?GnG4p%qrb18Dz)U2_{m`#!)u%pO=axjS2sh+&F8s3Dpi>rFt zXmCPqgxQSJ?$SZmjd&>z<%Fn9apdJfbfpPdU&ZAQK9aCg9ju%|{s>eY?q1Si5J;0P z*9jG4BLz4}X`PQZZ@+6mc339t*8`>ysMJgoAK>B95)YVCL{X+xw?XK|exZ$)_Jsg(}zLn(hzRQniI zNt^~P^htabw81dyAWd-84O7+IXu-d@n33Fo(m?bEs$cYqu1ne{**T7ndq-Iz4}7%X zt|UyNwq8euUigcbAGLK3aE=->@>r}M4USy7 z$LgXozJLr0#!$T&Y#y5%Ua{Uv%U5rKuaIBg__uQUmf9=>y#8^>$*{_}Cfmfta(j)= zbE9zQrlK&)#~Tj0J@IZbDZ1ljY+B{R+Dh>9vs3+rZ_FDn6(J z5US#n>dFQVhu)z1K!L|vztrn53M&c(w`@D+FTzdocb7Np&$6l%&juBXjF{fk{R4`W z3yh1}9hwbD3qwzF7P(p5J&oM-ZJm3_<$j4IRyOE0W$Wc`j=6y-Dze#ud{_PT5>Dvq zlnj1hAfL!hEKWo!2_!koRNXpBY%uCLM4FBkQVK0f>^_`DLvkM(y{sh&S$Q}vBMoY9 zvi;gY*;yQV5a+0NP`GZ;+^i{)=yC;C`qc|s988&)kx#KX;XI;{JPk?3-ly6|FNt-0 zFzknt@Mz2_CzO4j56n^-cGg2C!Z*X|W!s%al9i~gaBb69MXXa56kYH2+UF$*dI?tw z9hE7Yz^3)b4>lQz491Wpfs9K0%xkytuDJya-KDQ|F!ZalmKhFPG^s>p5S#lqT-8Ue zW5`<%iMSy*y*_kXsIsKO!%dYYtazCU(aeZYto)kKNRW5vNf{*Jw!;;9Qf`;5ga;q) zAC_8ONHB1|ZAKpP=7uDxZtidK*&mo)wrIcXwtB0&3SVy1xVky_*BiacbB+GG$Rp}@ zgzbPlfa<}<`NgxXey=O#(8Wx!UhP%tz4LwT;vCcA2amkaohSMqJQyk=U=Fc6}>gP9K|NBFl3kixw8MMAP>lCYKiwPOU zHts_~2G}*mQyEDILO>2_wrf>L=k1oU3EQtU$)@p~H5Ny8U$NiadNTX9kW~B&y=NOp zlifDCQpVDU^OEpOvdB*;%}L#zN>e4@XiB#0ja;dl3mLysNp+r8siMe!bWT&ZUV^d= zl3m2a1mql{@;^orFx3W8E*87PRQXdP$aqT zY-0A93_ct%1`ZgLyj+XYuYoZvoZoEK`i*vL@rl(&;zT>BlEXu~#?33A+!f?x7-f@X z{$jm_D$P)6hgbSUIYM;rL-a-hv%->qt@tDPPIu>iZ6Wv3hO|p4bz|DzGB;}dLETO z5RQ$JO%4Pspyl$Z#w^$ZBuVYj>3U2)KHBV<#EldgQ_E7M!q)2*sg9^^4+HcDu!OGk z^w(;=FAo0!F>RrIO2JbgX2$Wl1vSLw#+K7_d(qOLNTILFbL7eNrR{q&eUm(Cmenb0A5EF5Sxl8r5z+v zAxO(IN3SCcT2#_yD$yK5cW87HQ$dW?X-rI{^xC>K^St91A@MTj9EhSb;kw%)0+3zVB5_YKct~H8%*oiZ618( zR%)fOlg|imN~SUMYjPV^1d8?xQ8ePFTiQkmF7H4l~~-$M_N53 zPJ?;5rl6fhI<*bxXjABg)+q9Pqc!k(u^t2sd%)A&4b1xSzDUSD=T?Hz=5Mpjm@N@& z3)+Y5b&k#J)t*qEX?KZCuHd&3#ZCvRAb@*VpQh46G`>k95z=cX#J%&g*iUv+{y1Mk zz{8NY~^uV z2njA^5-Ewfj0UZGis2H9Fr@BAwdo9&ixU13`h-w{6?syd4dkV~j?4pGzv06p>z%0F zIaxBesG||v10)+rNp?MWxi*}|Cp-6gG#o!Y+c`1?P_}cdh(Hwgw9$;?K+>~AZ*l@D z1j1>Z2VEd0Jp)86n^;x_@(wG*vSFP@$q~uT z!**uCnz-Ng*$&yv(UMTL49?^$U%bbrhX~QBola z&GvKkF1(F55vCRjo`TjdaNjJ)3W<~rRlNDX3V<`+M=DK5RK@7Xk)-Poi zXPb%<9I|%pjt!{V^g~s^Ev%f!z!=?PwwZ_%9K;^B2vJc(yx>l?c zDyErehQa$T_{d=@pP!* zGAPfy)lcOj$-MqgCB?b*1XD?UWW4EOw{r%Yn8FjQ`xtg!%m9{9eN5%xl8z+CEUbMo z`^1Mdmal9?)Ure+2cnu#mx!9GHH6vlPSU5i?Q!9@PlDS%W^)cix$O=Tn9Wpokg&*J zPMl9WNLYRrxr0PTMlXvK=A$vc_Zc%hWT*zigKRmN;pHP^EG#6IuNAL=R?ZJ81|6zD z5#(YMq(dfOK1}`fZu>dp9$M4c!sLh6b2_P(OCY&?a_9n!p^!dT7kqz35u3#!j$vJfzmw6x z1hg3w9_1xTP*6eU8CXl!K)o#mg3yI27}=BJR6ESjo^)fIZyf2i6x#Q$B8ByS&$|oq zwi~E)F}I_YE?mwq41Cb@IQ#l(nS{TD28T6T?`~d6u)$Ss)k#zt*|rJ@RjY$N|uaEFR~!`|4)QX0zwi6sIM=^~Ly zty%4=S`d)Rv^DD@5)f7-k)@ERE?vrB4J%G`GN_W!s5Tp)Twl{fw+dS9Qrkq22?k#k zHj#4{!g8@JBr1%i#(fks#gRxQn>2PmRui!dDl1}BkPQe8vm-KqPY2k55_~bdkbBL- zvngte**&Hrj!YbSA16bOMaY01YR1BM23`sofDAiV-JCdg8h)7H@=qyu4>;j_DY@@MF770Uhjv!Gd(A)!|d=n9uSMAr8 zE6L&s#F$B2>T|^Q9baaMGP3??EB*TG7i$utCGrPEB9l8)CrC&2x&Ea}x85L|3FG#oP9V9Tbs>hrPFS-AjYBj@~Y*NNE>Z57R~mBMy*oscH3QD zpbAT3t&k^ql8fGoKaLGxmXP$M9d zx9o!|5ZaE0_sh5XYmpVYzFoc3ebKoRnj_n%UuY%V#QwfalWQVZ2Qa;u)63`CM zrp>QV-~bGq7MF#C)%25ay%&G-Dy3D9Wz;pv(LE}u9=&$W<0_x5O{H#fPsHW&LQ-_k zZ?>T6)W^BlRdD$DatlW44)DdfMys=lGLCE24vzXtr`ztgYwcz>?-7w+{$JVVjr3Pj z^QoL{N)@jh&o_7LIO_E`kEMhCSOVpSY`?KIUlIn118l9XM6EG?NNJ%}-$I3pOOP=e ztqSg97wU3;9P~q!voL&{T^i zmgl7MuSNB-HbQ$OD`+wIcVTiO%lVz0xxXpA$VH0t+bcQlfV`dSYxVodwBiyL(0Z=jT^n@2 z7UzOx?*awDIz+vC0rb(-4kwQ))RK zbDYj-D3g435r6nR@j*Qv(#S<_Wm5KPX+z)3<{-0q1HaAPUV*B)sXYL^iQgYFp(I## z;SbPsW@R7dL`Rgd*!OF0YU4^GpGDhId*{@9Qr~7hWh0Hqz+0}37qLm?i|H3Piq&JS zcwtBoQ+zkrMuC1TtIYnKye3|j=%7UunP42BB8Jnzw~UuDv7+>#vf|3oA5@dtK^?%4 z@{Fq3Pkjn`f3y|UG<=Jmf>uYJu8{e9^=@C8e@wR z5rK%CU6MHam`?7Hl$;R_99w6^YePv=uKPj!e%RiF;x!J92=^{CF{-d=u|p6fqHJoW z=+Yme3Y$o$Ru$$MpQKbAc39I7MPxNFDhXKQmf=xY5$EA}0z@_QDz%a@2EWGh3DAMt z;bHNN>318g3;HOWST1N?F_1It;)Mszr^55cf} z>`ZN=);uMHSHa!;$x}~1cJx%GavH13v{^2nhjFdxE~sEBK%{}%#f#F+8N{gRP3B#M z!Nt)yzT~17;!U1WOdmYS#09&eY9gK5n1#vDJX>}TSw~~@ODWmPY~^?zp7KhCB#p?E zN)s(1);gzYR$o|1?xFOi=&2Oje)cj;^6D_70;O=SgST?6UxMaUJjG+;p@c)hxe*zj zU|xV>v$770ytq2PeiP2@`1ry3Z6tLO7{UpgpgCnHslGE3+u(P56(HRKsySq2Rz}A~ zmpfWp<6=il$?AkDSzS$|q&ZB><`-XNOx0k0+Uyml3ox6rKB4&#Nbqo zJL!4VPB#0>D=k1~bM>RAc~G3SWD1C!su7dCn5uB4%4ZZ`E)cNBah35S;4RrodLDLB z2G2CPlFA8Vdto(ghL?`27ZHgB_P6Fzey2>;3W+F3IR<`4PWXW-h@y{NVQ>VCsUr~iWUuX&r`JPiQ<*@3c z8X%NmC>aZm97GZej_4-{ilu`IcVYZO*E~D|legSZwL>oMt(fHj))U-Edi7v-K&XChd$!KxqNaz8BuO9m+HeHA@^Orpn6TI;A}Pq$j$NR0E8k)A88Tyh?n zMYO!~v^CjG|3Xrc6dCd6(>52m0?~OgPP*Bc0%?v!?1&tW(?^_XbWi|26s_u3SWa3w z?R0-s)C=J{MGO33r8(4t^u5tRnI<`Wsz%JaOAgqcp;IX=2B)4%%Bb(Zeo`IkxgVo6 zf#O{0B}b&HN1Cv}j$R^9Bzb)CIe^kB6_d+JB->LKGhd9v3j)Iv6B1jTM3ORWVwSO| zyhMD1BI5&vTSf4cpD10qj&PN#(q@J^nSO*1PkCs|4H{92As(0xW_VIYm|QX0;_fsc zzYdS02zy#3Fa>8Xe6kQydA_f77ml82>T|m4W?#{~QF9i$2t<)88>W`J12=&bPltGj zj-yIqXLsf>EV9MB4Jq zup)ycdMk3CK|@B$oftH*cQ=hY$AQ=f6ANnx_qW$x!b?Ydsnu512ii{36;XmJsvxfF zXbtDyvjF-eo6vx8e~<*50Gf!kh(fW0|xH=Q+ zo-V2ry11vDlzu!crB5aOxup3MtdM(kx->ssU$`VoZJ^jcT{N3jmG64M({ggf&da2< z3d%RfW~HBkvkrJe8`k!?!`o_ zfHRi7%u>k5nt6~=7 z=Rs?rWls=_Jhqa_XT9F+AYY;crW8-;iJX)!!z*MtK6sMI{{%D$$2yXuT09M*ZP%Ic zOXZ!bim**CG;Je(Qifb24I>LSIp$c?`L;WW+aXTr^Z7eZgmHRzdi!34G^65!HIw(!i}2mv$y ztzK(htTshsBR)-~IeAtb&g2F99dZW#Q$t?>MbU1gxOgvngu&J_OxIZ{oRdo@eX(x8 z7mhEGGe=Ug>x4XMr%t9Pg_y%5PVL*IS=yl_)-uB*g&UaU<*}7&$`Yj7NkIihHsbS$ z1hFvra>g7WWyo2Yr-_HBHLj~KZ!~j*xju41DV(_hOEjU$H$9G(p8u7yoP-4 z7usELn2cQLhZ2vw_*XR)l~FRPBNij0>ViHBbuu#StJCS5!$$X5Y=| zSv)1;DY`R6x1#96J~^fHC!A{6o<+X=W}|kgJcqsHMCM5jf}N)o}-JF!RDESaA$=CoiSZv*a}*wIfxP!E~pW$x88us6z-EHXo@B*a$ML^ znv!{wV+m=KpA&Hh${jO%LsVM;M}X|4GaICDP%nCCmrW1`qeL;Yh>(b;Q>m}EoeHXf zqB}!S7ma6@dJjdkXHSBHw%5RH@kl=fiPW#?VpvVyZYTY6G2Iq4fcAbS^%zy^FvTN-YD+1;DW&d&3VKt_z(xg`kvKmZ z2hPPFO{ROAdvp<$t&uu~W!_oi)v7bxFKsA7lnU0;fz4yJzQLUN@lP=tT$Ei>I|+24 zE|k~cs`8^bM<87U8hTNL70xzm``9~*NUgRa7oWE=_AIb4CyWtPkuVZF#sqd_!3q^IG9bvBY9ZL&_(} z80IaMC!I!gdHh2PKRGv~-1G3co`;@I1QfWgGj`h0V-V&fEO)_|0oN|&DJBY1QT^$> zC$yJCsX~h(1q&;ZUswBs>GB06yA9|p&$8BO<3u64UWaB2#T3iPuA>U{cpG*>{jhN4 zhjF8^xHO=HoQB@`sG%}tM(*5Xc}h%pxdJ!PNk~e`dN`b>W^ya*fLt4mfW04weAHB| zEyPr=8I@>;o~z)l$JP?{m)OFwu^@Bl+Uk&>Wc0DdQ?&fqj6Ate!bUs@G!g3qSRBq= zwmyg#c1%?BOE#2{d0eUQ_0~5zu6Kq6zedK-gT0%C@ZNxJDzr6*T@*p~ueb*#-?rC> z0b@R%zKx2>h2T0RVW-=`?WKV_uJ*!G>03&3KAqBb>5wV{+^RNR(|NN^3!jZ;t;WsT z-nqoVZz7r5u~n2kIzA3#__CCqima1hi#`m@QB_p-lJM~?RXxVC#`7k($LhfrbgSK2 zTMJ)RX(7w9euMR`!#3T=^_>SaPr4#R(^iCl>f+#PW3ACe!tq9%cXXv+y`ZjS;W1_Y zUIe2Jp1$ydhzPgk^a|qP5k8g};hmv7GTx}Pxz^q=^;i9J)#HsNGzIcZu1X6Ke6;*q z?A6)lL>)9&IER&3sN}!|E1xw*3o7SZwdQE&6O~7>r4aHKLZEAKyd*-FUp}nq{yIAq zO9Fa5T>Vfy4CP`~7Palw8VEVeU?EwDnPJb5fHWb2KGB*0)M!lhSX|p~!3^XtFSK5U zTfQN!Q}VKs^4-x21R3AGy4~+aBK0Z!?r6P(jPGu4_q&l|eG0!jR$aj_qWC)B*zR}3 zfKcdnLqZA~DrnAfC4dU53Zf4D^Oe+k4x%buVqKlrU}Y1MM^PrOlDNhsD=nr~m&kk0 zIVPDlh(n9tIYtpKsp^mwvogx_h6imx$9YCgsTV_|W}IOM4pO{2W!fNRPjHge<`^Ta zG+oeQXV#VOd*A>WX=&gp8q)x&5*#};K&tO4A0S;cWM>9Q_dVqUq|2P_%mC@Wr+t8w z&p!C{P8@FPj+CQ_@D6h!RZC`vMo9HN^co_qa?EW2CtOBkix&oS2~-CHR|-+hI@d|@ zi7v3epu$J`GYV657Ii=>G_vm*A+m_9poa6B#!vVA|{QE$b*Z z36FjEXtHYLGphb%{8>^3NrxG(DR7o$?N1sp5sFRJjtmq^L%;DU%)D`d+&A*Wf?qaxfur(^sdiBo2*$;`N>a%8dcj6rcVAe|WO z#L3%3*nk+dAp69``mU3;G>~b6mxf)z6QnMhb0S$##pXnmOde_VNyJbRNsOR2=+u6M zs5(!Y6dzAiP&gIK8D9`g@iD1I63hLOIu9ASKTf6R{+QHPOx<}_i@`VXjhv=aI-bh5COU$-coG+H(6t*< zc+!MNH;du$`0g~r@xcotRdOtj1G4AuQRVqNSiX*zD2OQ0Nod^(j_$qakP0s3MYj_~ z#o{MiZ3@rLCnk$01QamLMDp^4$Qwulp2qd3`Ag({EtWTe(gf&IPVhf&b^4t*ldt=W9Bgq#z7q>DgT4Q{c5l0=Z~47j02; zNYNnlNvnjJ0!l^8(rveOyzX?&=9H2op_IJL=m^Z`b_cCgfqB}Ir$nVfcWGJ!snAnT zx}7Ij4m79<)PeSRMQ@2T7FjU0v2%-}#+*2a$}VPnGtRBW^mD6VilfKV0D@)G2UW7; zeCXN2M8RJ&+}LNGLOlBThV+4wNQ;PAE0vzeCRoMeMO?&T8-SP6 zN~KqC!uh+>ZmFG)_#ysS+vo~;+J!$?m<@3d4yB-@@meZsVJCfFW> z7A1A`GI(@)Qtb(jUalJhC!^1_YfN-xvp6u1Z9vjM>hhu0lp&_<)ReKZJK&mpUN1R; zNEoM5p9)cNlQ1k>`HqU*(wed`i>td3APRBKN5WRMg8NPg2C+f0V}h!S_-APQ1}1KH z2XoGorC!~V0wQ6lJo#k>^LvKfEp3-%XKj~8xQ7J8Gsarfl9#y z`YY*BxqwCeJBB7G96X5(=r+C|7c*N?PDt~~glsx^l?+}dR?5m|TNdK!?ct+SgQOCr zu$Ub+7PFEBagJ*ps1wEe#uT!5ge}Y7iF(*%cxMgUYsmN)&y*mMZ6*_YL7T_s0m%3e zjAn-0WO~lyIPqXG#u+Npr3l_wDyKJcETV>iI;#U2Vlo|;#1kswn8aP<$Aov5O15o% z_3*vHkekeLkS?RS!(6KFZMVj@v;7DwDdCpX(;dNSc|IOoQW`pGrE!6zJ6^(iRZN0O z+}6{wJWl})cH(z(9Fm4xEVrFbv9acIb*`43oTiwi z^^neqolcQI1YDkxMys>gufT1yUPVy*TD60WiOQQ62h0H@#T2va2Ii2}$$mw)v1wh4 z$y%%xbGl6$8{*lko9EA?%vbK=iR6K$CS&|6cY86`Y7)V^o2tl?z86WfI`QwX6L~y|q?eQYqGC{F2FC;YLnLKxvd>>u6{pJuJLoALt4?NbcpA z=C&i&6_-#Arc>_nv7QEh#vk_9C5s8=m~tg&#iMi$Kyu>es@*kJQz~a>QsvfOdF&$e zd=q1FOmPs_S0)WUn%e)D9XBWqlpgc*<*lqj_&_(p$R}z^PZNjVJd2+CO*|jkh zkD6Zu2I;dLQZi_XrhioYRYwTM-W_^Og411w-0lr%O7Fr3G%)BX@M@W}HKo!zN~H_0rJZUkzwLCmvkD&GA=q> z5JWkz`j|8YDjm;GQlRtQ>?8#`q0Ej_pe!#@^WUTwg35@Ja}WZj7unyOT#%20qy@MR zHk}l+CleMsmA64vZ!0u!gHHt^5%x$3A+o@7JoUr!3U1Sq`$2S*80WdlVr#9wRq1bb zn)SsKXL~v;L>X~T`Y&kWRg#F6zUY#%kOriFK%22}|HA#WKr@VqNCKCgTS30xEjTz_ zFS*z)ao{>>1iz32*Q7$?y=RvMk2*bZd;qn-)TD@tx_qT38&J0HW6I?#o-ZNQMQO@D zfqcPE$!bvBL2ApnI+`0r`+|+MbhDkrBFO`iyKpKOk)MH+I!b^qpE1c1pX9HwDlPE{ z+LN?HfgH-l&Bb~rmP2U;gvEjxXeFl(r;A z@q7bmoMEE$`9-J8`b=QkXU%o?u8u_Lr|jw-8a;`x66MLZ{jUlV__Oc!YQ_f&rcAhDj`-D*06DFN-nk?6XYPSsq7l*(%e&4nu~Gm z9X|Cycxna#@fFnaQ#|fIjZ02tv^sIq(^Doyr7xRoo*RolpE7YgV@>(FIJu%G11o77 zY!O~=iw3K!sCq5qkUAm|1PNZ`+ZdPZmlh+btr6h-$ns>2chA-@K~ulcsdnq|By=_6 zjMS#f)lJ9Hmnuh_&2~-I@sQ*I)0D(OmaABkVbCd8u**E9a&r&NIZPN>{RpVE9#!$J zS@;PR3kgH-5`z^}D*$p5yDJnKJcz6ZShi*yqLRIDmmY8JeVx9VtJ*kIQra$ORnY~K$#vhgUAL6+4 zR=?V4!Iw|DkvMB73X;jzcA-#XjOK#Mlk!0ePn=M{irwj|N&wha*0(C-cG2cuOWf{8Zu}p{W z`lKI~F&>=$P?k$8#X}4$76tRynD%r+{b5@S9{A~oS?vU>A{$~JyTx?8R#&kikS;i_ ze<5vaxA>^%0{V%2Ef>;P*;W@wiwo8^8SOEc`izn%tyiJfM5jG3mYzI8!^0TEpsVJ1 z-VxL3s@B-I=>UA2v>j6g4o4>^(-=@pY+!msZo(2!P)wkj(;{O6g_lVV0UcvSy06$Y z*P=moxXsCFko7!Gl!<wK@cfyIR4ggYuQenBv)-z~&+k&FE&>Ze23)*i z^h~gXio?DM+cNhqUaEoKLX;hl`O`=O_dUZxV^8i<{%L6%228v+JF`^-R|IA+Tt5J3ZaFukC4A_a%A>52r)*`O7CyB!7i?i^4f zz;h5A0tCakNCx=`)t*)xsDaiIWEsJ3(U`$O!EWK`w9s3L4~xgT>C2fXRfW3c7{LV_ zyr|1>4H~<2HZbgQ7`8AHD}iFhmIjTm6Ok#R+imED8dZMaRi#^Btv0K|+|MHdSH(w*ExAd{&&Lp7A9i=-3t*AJS!F2# ztvDHc6moS=B{A9=s#I7F)L1x#QsZjoX6svsu5RI|b^6$LcM(}nW-EuRJN^DhV7(mB zxk?QN?>>lfmDrop$f&e{WD z4|6-?W{#*{IF>H@>V$c6c4BIAlhYCDCQlr;0=QkoM|Bng@H5#8&^gK^ja^2C{c!s< z1q8?9HLj9@#Lp4jD7_#pn?9;mD_ZwClFuzEMV-fZ%q&p2QgkGOt+v!r8y#GwEuEAf zwpXJ@{o8k%B(F+z?htV^?X*oQk*s{xliOzUGwW&cwK2xLm$MvcX{$Qj^tiWb7Cm++ zI%1N`-Z*iKp&{zTo%X07a)dk@k!feYktlM$FN3K+>B#pgQYXQTa@@*!Hhs>z&V@Mp zkUg3&G)v;VB~Z(TW+i#SP934p#HBdu(&kL_)!i|cn@+Xe!vCpArX6XWKzH(5HH7<* zQQ_+3GKF3Eg76RfCeC8ZuJghOLQZnmNw1X7N;jLK{pX%1S+DWGlvs>{{-g+@!O z;HQEnVGYB2+%AcmV44QbI^s^jcti4BL4vzhV`Fnec@I09l=HLF7;1^h{jh^eR}ov? z8cgh%1?rgFhccysH#!xTiklY4Q98h#oHuUCGdFSeoSsI?=r~rM%#z)5Sk+QcjjAoP z^ZFW4RGaD`(ru1gGp5hqNmQWnHY(%FgJo`Ajm790HRCDi9ik@_V!$TAb#!9SqBy2aQ!MFGhUM#F(`;&n730X4K17+973~%l z@u~2(09Bm?a*4|UMNkwrL&GF)eWa|t9-Uz=k~32=ACJ|vC^?iMWk_ux zxGxOY;}AlWJyLBC84+@EP#z8i(FMOBY#s4fP>~RZmc36?|ie3Dg@dOV_W1M7zcTWSKIAoz1kXuu=#Ni zRyt_LtTxy~#*;zgP#; zY#6l7wLU(5n|nEZB$pI57EaGS-mys6lagRCAtm4dT4*A)q%whtP^v%K@Nu8iVW&E* zP>8s-I?pDEI1mfcG;2alG=|28tPLOhA{sHL;KM%9uHot9S|bIT4HFFvPEAFNsK$g> z2FxYFm!K76rP$0yeM2Hjlmg*^va$qT8(NDHmcxVvK!<>!3>#!3UQ#iYSt(Ckn^&0) zOA~H2vnbu?A#TVJk5BJC!UHmP|DK)?LgOAls$=W zxhRB@P(1W-DrMZg+Xut$vGB}_FP(CigqXkme4N;IQy_(4YJJkPT657HUE*yK4|u_P zHaUJX6tLLOZu@I1N0QTFGehlZBh*J!dqVZ?qdKjM7}Q52vX+M4IbrsC>`bj0ZMxQ6 z>!Z4i=DV^S;*1@+SQWns#$**lm!#SKF-OOZ1Dc!@tZe7JXcIzgvd{F}DxD@0xFpC3n{n81fjS6TL5k@mz*AUlrp1Ke^=8WJMUPBO zZze5fE6&0}WJM~@^szn`Mlu~&c~wl>1=%XUL4 zisU?m347>05tN**9sH;YW_I~Lg1!M5thfZV{nI0bnuys3V!h0y^4LNZ-&hoIh^5vj z3Nkde;4{j5LjT5$488{bw-65s&SLsCu+YH@`m{l2Nm^0G){R<)T%HV+1ny`*# zTa)W>)_UPj=nJ8ZeX6q7=ylpAVbJ6TS9e`UW`*{pNq@D7s3KUbA$9{fB}9ABKP*X_ zDvj3pHf~~P$oJVfKQvmM&3*+0*Q*=Ii&gE&t0@o9>4(izl}@+aZ`ayQJYK17RV$s% zZoP7`(d}OjUb5l@Ls=HPyk& z#2AcxPLwRkV3tA?Jn!()s>FM59W?~tn^Cn-g;UT!JRB$ts#)G%&yT}j?87+ZF6YyR&x@)c^4A1P78c3*m#>wU)R~%{k7UFULk)Tls`Az zzb@49n*3E#O&EObmCZ(LbL*8`3kP3$@PL4GGnM*UwO_^2x(e3X>sPydK&>@-O`TQX zVJ+mb@8ap1Cm(;jvL8J^d1~>o$18KM+VB2#!2N6PRrC7aXV2o<1F^qK`)3|Md-mvK zia(<9hz-V+gs(BY2D_Ff=~LEe9vzkeA54({;%j4h1yPk zci3Na>-lNjwXLt>Z{U9mwS)dn`=S@8P!ex1?fo#GZzXKEkLb*XDPS}{FWK+sqdj}X zb&tvKi&Mf>7XR<;@67HmNqh47JkKQG8V1SkA^z+3zBk@VKKh=Y*m3r~VcHw!S=+w+ z@38NsQ_HOm^Y1hYLiixr;w33OC{9nP?f5gz! z#A7+*^EAMZKdi~l?MuKX57 zlYH`}ih}4IL_u~I8ec+|HnVEU%%$Ea`|KyDo`u$~%#@o)!yv@wly5FBavuJ*w#q%#$ zzrSSW+YGk(ZjBeu;9G9G=btz8j^?+$cwYbh0{Z<$Gw*AD$ImmL^XVT9LS8)cV+N}| z`YQarYvv8+?>BwVvu`znas4gx_v_W~FPeFa`Fl7Oxw?={bh_Tl`CG%51`-~A6wk3fv?qv!u!4~=^13O&#G zEIt1})sy=FyZ7(Dea4T_^KBu@{CAx2579%Zepw>?EA7Mh-?N0z(RZCA{FAAjb;AG8 z3w%Rcg#Q5HmkIxCYX1u3ME_O7Kb-o#PWV?4zV{dP_(&6s-=OD@nWZDt-u(stJmdFM z@H^B_o#>pQd1U-9J^$N&cxE2ZJTiWdo_~_Y=RVAzAJOwyX#KC#^Ni0>zwb!T?;-pnXgqJzcrw1r>{l5w#`jV?ze3}=m*`=9 zA3eWC>-h-bFQIlG5^~?Ac zJ^z(7{#OZqj>h3Q?K{TLrQlmB_|+8rMhd>0=9l}uMs&WB=)6w&0*&V_!at0@_fZOd zkMIQ={|AKs2KI%V`|{szqMz}F6nrTKUr)g=r{LF8@S7?4ofP~beOI5pcZQz-c8r_- z-hCEW7N)V!at1qy-WBk(SMKdFQRrH5dJrW-!S`E#{Fe()MjRG8vGq%_#N8spHI)< zCH(6Nzfbs2626Q0OoQm(P53tuzK8H1Cww2_zfSl9;eSr}5rfP3&eHfFH@JK+<4e@e z0zH40@YfQ4j__5&JB0sF!nX+je!?#kexLAbgnuQ?<8{K{NcauHUqb8uCgFDozeV_; z5`LTTUBvh95MCnu9^p3$zfbs26aIkk!?eyH68<#dj|l%Vw#i9=^* zZV~=c!fzA)c7)#{{Ot+9OSt9D^7B2yWzX3E?i2oU3*vtt5dO}DKO}q);g1M^SHfrR z`gweJ!gmq=9)#~E{5=WZL->0UzL)U5gzqE#eF&c={Cx>uApHFZKSKEX6MmfV4EaBG(|2V?0 z6aG5FZxH_Rgx@6m69~UW_$Ly6oA6H}{0`w2!tWA(j_`YguM&Qr@EYL{2wx-oA>nnx z9}#|@@R?ur>;D4by9jR(zMJr82;W0^lkmNSZxFtZ@D|~-gtrM_AiP8P5yD?j_;JF! zgf9`^Bm6AkeZtQXev$BX!k;6&L--cqTZCUC{4(L6O!yVT{|n()3I7zruMz&KgkLB8 z(+Iyo_+`Rx68;&4-y-}o3BOJFXAypf@XsdvF5#a;_&vfum+Twj@V$h8IpO;Vzee~h;a^So0^$Fi@FRqO z4dKTL|60PA2>&|5&l3I(gr6h)I^pYtecMwzeV`}ApADr-$VEv!oQF3yM%u~;r9st0mAPS{)2=+ zApD02e@OT(!XFX-!-UWLnqU7vLijGif0Xdug#Q@fdkFst!uJw>oA7;v{}kb~g#R?* z3xxj+;YSGn--I70{AUSYBK+qFKTG)kA^aTScL-l6{O1Yp5dH?jw+MeD;g<>jCBm-| z{>y}4CHyYo*9iX=!mktltAyVm{7r=4B>dM2zeV_O5PqBR-z5AF;r9r?OZaaQevk0q zA^bk!zf1T7!hetOhlIbG@JEFIKH)RJ?$`f)!gmq=2ZZk?{0|A=L--#NzL)T~5WbJ_ zKPG&Z@INJdf$#@}A0hnD2tQ8vpA)`B_+JoymhitK{2bwbP53(D|CjI%;SUMlBK&U% zzfAbw5`Km7za#uA;eSu~HNyXa@au&CBjGm)|0lw468?zrTZI2J;kOC@7sBrl{%?fe zCH&tBzeo6g5PqLf{5=W3LHK(Sev|O`A^aBM?@Rb? z!rzbZJA}VK;dcrDK*H}4{y~J_Cww2_4+#HY!XFa;A%s67{1t@H{H9<3uOxgI;jbcm zH{o-H?;(6Y;d==`K=?kw=Lw%B{2<{AgdZaO2;qkbKTi0E6TU?FM-YCN@Q)(RFX9@o#!WRgy5PpR4D&fZoUnP8r@EYM~2|rKxIl?ax zzE1c$;T^)CA$*JQX9>Sdc$4regl`ajmGBnf*9h+rex2~w6Mlp6F5x!`?-72B@J+&R z6Mm8KJA^++_+7%c2){@8CBp9${>g+tApE}&{*drbA^Z{HpGx@5Z~68A>4fhh{4(LY z3I7bj_YnS>gzqK%vkBiv_~#HlOZevzzCidD!jBOC`Gg-Q{J#>uMEDmFewOerB>WuV zUqtvi;a^O6hwv{Ue2efeBm6SqUrzWH!oPy>tAu|g;nxWND#EW5evR-Ognu>RHwph5 z!fz4&wS?a${Obt6L-^MdewXlXBK#iV-%R*@!oQX92ZVnc;SUM_cETSKeuMCt-}dYO zI|$!J_;(V%oAB=%(vZxa46|9Qgi5dH?j?-KqCgx@3l7YV;l_!|j-K=>~a{*drrCj1fM zcL|^Q9l!p6mGE7JzlreOg#Q}hdkFtc!uJw>kMMnj|2E;Xg#QlV3xxkJ;YSGnJ;IL@ z{$|3L2)|GGS;GH-@Nr$U{}JIG!rwyp7U6$F_+`TXl<+HrKOp=n;eST>HNyXr z@au&CHQ_f1{~N+@68^V@-y;0)2)|AE-xGd^@P8ouF5&-3_&vh^iSYY`KO+1A;r~qd zL&E=s@JEFIE8#Q0>(~Fk5x$G?eNiB;{Mx9`11(gNBHvzpC$YSgf9^O zLc)&_{vyJU6aLnOFA=_r@Uw)!nDBFizl890!rzAQ4&g5)e2eh6CHyksZ%6nQ!rz|o ztAy_+{2Jl!K=^gSUq<*1!e37KO~T)a@LPnx3*om3-$VEv!rztfyM(_R;r9rCcf#)z z{+@(CApE@ue@OUy6aI+sy@b#Fo?rj(L-;Pj-`Sn zgi9ot{cnNr4XJluOj>$;U&V?37;dpL->Bew+Npn{4(K( z2){!3VZyHx{^5jQBm5%>zfSl^5`Kg5k0Sgg;U7)-Ey6#B@Y{qRA^Z;EM+v`6_~V4% zBm5ZQ_X#f({($f&2!BZUlY~Da{5at=Z}#hdk?>uFpCEiU;im}SL-=XJ_Y(dz;rj?* zB7BzcR};QK_!+{F5dIp%j}!i}gf9{PafF{G{B?w%BmCnDUnl$%3GWbIA$*JQD&dz2 zuMvKQ@HN7(5`LcWYlL4Q{5s+5gx?_i8NzQ8{w(3Q2yYU8oA3?7?-1T1{4U{b!tW8@ zCHy|&J;EOl-Y5Ja;hTg%BK#uZGr#ZG|8s=zBK#8Jy9xgk!uJsVX@u`3{L=~FNBCzD zK1=v#623tAXAypc@XsdvIN_f|_!8ltOZZvBKcDb(gnt3y>x6$H;T^)ii101KznJjL zg#S0fuMmEf@T-J>3E|fW|5C!Q6aHm{-yr)incN6|Cgzq8zTM6Gw z__qON9Rh;b#f|O~TI+evj~V z!hf6a4≦_!i;6NBCvJf1mIxg#Q8IR|)?^!mknjM}%J|{4IpvApDOBze)I?5PpmB zKO_7$;eSr}9m4;D@VkWnCE@o7|0}}p6aLqPKOp=e;SUM_Tf!d^{&$4W{Gng}e^2-> z!vB%*-Gu)W;d==GXTtXq{{INyNBF-IK1=w&5xzk9e-M6z@R=9b+w#oJal)TR_!8mI zC;TkoFC_dN;V&Y5o$y_RcL;wm;ah~il<>=hzb)Zc2!DIRuM)nS@N0y>1L4;Re@DV^ z5dJd4Zxa4;!fz4&PK4hk{9OpYL--!T?-KrQgx@3l-3h->_)urcN6|Kgzq8z>j>XV_}3G@kMM6Ge3tO*gf9^Ojf5W| z{F@0sPWZPFzC`%95`LEOZzKF1;ona9I^j16?-2eSgl`f4orGT|{JRLhLil$RewFa= zA^aNQHwnK^`1cZigYfSo{3hYwPxvjue~|Fog#QrXcL={l_+7$(nDBdq{|Mpt3I9>T z9}xaygg+$w#|eK#_)ic%^QV6O|0LnN2)|AEZo+?x@I8e8G~s&*{~5yf5&pjkpC$Zf z311-m=LkPS_#MKJ6aEImmk9p_!p{=^i-eye{EdXK6aLGDcL={r_!i;6LilCE-$eKo z!henMtAzhL;nxWNO~S7eevj}Qg#Q-dHwphe!fz4&X2Ne1{`-XAA^bk!cM1Ok!tW9O zhlJlJ{ErBKK=@k-e@OTr6aI+sKOubPfnWcBO873q|BUe6g#RVsdkFt4!uJyX*M#pQ z{QnX@OZY>=7YP3w!jBOCw}c-j{O<{0BK#i+KTG&O5`K>Ge)low+R0i z!Y>p4{|LWA_`ecgwGQGPJ}NI{?3FSA^cqkKTh}_!j}ktSHjN{{%(YyBmCV7Unl%M z2=5U7o`i1^{@#RNCVVg9R|tO}!mkqkeuQ5m{QU{PPWT59euMB2B>X1f`v|{9_y-ey zn{dwOe24H?(DQc*|4_p35&lZT?-Twi!XFU+VT3;_ z6Mm5JJ%k@3d@tb(gzqE#FyXU=eeuVI|gdZjR9N~`< zzE1e#gm(xp6TU_G6NFzT{5auP2wx=pD&Z#xzee~;!mks4itrnRKSlUW!k;Gm7U4^T z-zI#S@H>Q`A^a}kuOa*%;jbn9KH(or_yfW}j_`+szmD)ngnvBYGk@XN|0fc@i|})V z?T6DGgQ={#TRxJ{;Zu zyG%a8qSCJ&kXF_TxB{O=|Yn|zhY zXPW$RlSfScgvn={JZ18z$)7a&Jd;0V@|ek2n|!g!pEh~iqkm^@_icTHYp^7l<1 zHu=9zKGWn`lSfQGVDi}}A2fN?&$?Hr$)8xN1dBo&rn0&U$&op_|<`6f@AJZkclCcnVsDU;7J`8t!&HF?_P7nyv$ z$uBl}#^m!%o;CR;CeN9?(d3&=mu z4wJ7p`5h+DnEXzYXH9;$$#W+Ei^(^ce7VVepN{VTohF}P@_SA0H~D=gpJej;O&&1$ z116tr^1qrqXz~Y5KE>n@nLK3jhfQ8(@<&V_Hu*}E&oud?CXblB+vKxN{+P+5CSPUp zc_x3{<(n*3Rlr%c{s@^vPE z-sEYMzhLt9CV$c78I!+c@~p|zCeNAtWs`3*`70*(eKxxPziRRcCV$Q3ev_{^`6QFS zVe){<`%FIBq!GI_}4Z<)NxU6ao?`Fkdh zn*4o}&og<}zRu*knLKUs-A%sUx8On!pNeP535 z|If!H2EZxhfN+Z`L9eq+2l1Q51RZ$lTR`E43mdUezM7{On$1#!zQma z`An0aZt{r9XPJDq$$w+=sL6k8@_8ozoylV+Kf~mUO@5}y<0e1b!`yuQ&N} zlmFi2Nt6G<*88)#NdgUt{vcCcoC?ag)bQzSQJ@GI_$} zZ6;rC^6N~VH2L)=Uup6iOrA3NjV51b@|#SaHu+MMuQz$S$ulOu#pGF&-)i!l$!|0H zCX?T8a^F{@`+vgZ6HNYRllx76hsh_I{7#bxOn#TiC!2h^$%7`p$K+E?-f8lX$?r9J zmC5fjdD!GDOg_`(512e+^1qpUw#gqddDP?&n|z+hA2E5%;+T>50e7(t^F?q)1YfYXt`Lia^ znfy7EZ!-DwCie}G?*A{Ce1geeG`Zj8FPVIj$zL{kz~sFqpKS7fm^^6mS4=*|8yx-&rlfPr~r9?CdD`RyCSPyzL6c`pzR~1a zlYe0HoXI~n`6iR+Oz!*7=>GqS$tRfnQxF(KhosuOn#Kf()=WAddYkC;4R^52?#xykEHo;3OIOuo|OXP7)?^0Q37&g5sC zJZ&^2pKJ0dCcnt!A(LNh@+yJ`Q;{$nf#9?Uu^Q2$>SzpVDhCVUu5!x$y-cbwcvvZRZGU7`|!TLszs?eRBpl(a71pz+u^X>fD3U*uE*cO zLAefZj{|Zo-U0jN8vI@CldJJWocq=V`~4LAaYnAhJL0rlj*Dd{P z19Bgpiv4mgJ`(%n9()we{YUeULpURM#?fXE-W1;VK-F8}actEH~h49Fptt&v8($!zbW?T#Kh;zg&ZVfqil{{w2;0YyNQ< zXXHx!E1Z_gaScw%W%xv#luPkRI3btd88|K%;gfMpF2twcs9b<&;)p!ll`V{#$B7)Rv- zJP$|Y;VYak!C`p_H{y^yh%d!KxgTGK19BgpkNt8lz8w4H9{fj~`%?3dV>lys<127l z?!rwtC3oTlI4O7Fg*YL%<3%_wx8W;sOm4wf;i%k%7vqTBh?{X(Zon-#B-i65I4IZQ zt8qZC#jV&c*WhcgPp-z-;@lUSe;mgdxf1^gr{!|ohEsAGz78klQhYs5$R+p&9G8pm zjW{M3;+t?(F2GB1L>`Vg-;BfZ5N^jIc@W=%gK|H<6$j)#ybSy0UVIz&$vyaXocmn! zj}tf}cjG_fwA_U|a7ymPci^Plf$zi#xgFny<8m9m8^`1p{1+URoA7cRksI+nI4n2d zP8^c!@x3@G*WvqcK(56ruwSmh_hX-2jUT|d&ouuyi8FE~{wq$)<+ux{oRmxP zgE%3V;D>NrF2WDvm|Tb-!BM#Yuf!2~_>azy;;=k~yKzV!#E;>i+>if`19Bf;h5d3b zejNMc9{dE(eX9A#DV&kJ@sl_$cj2dSO76s~aZ>KUYj8qt$4}$9+=idQF}Ve=#ZkEl zufq|!5kHH=as%$cA-Ntuhl6q*ejW$pTKod`%Qg5#?31hUOE@>A`NwITkt^}bI4zgs zUYwH4@IP=;F2%3lgj|AO#c{a^zlLLSA$}c4gKJ9&f}!xekAT19C0?5c}mC{1Nua)%ata%W3{` z4rk;_{0UCW<#-6EIs_&+!(_v5c|K<>leV87gpzr{Ye2XDr?k2U|;H_kaDcjIw5EqCDpoRT~7RyZkl z;H_~&ZpY(sTyDeL;F#Qkx5ZJp2~WTgxe;%N!*T;I#38vJe+LKUI=np&$hCL}?3Zis zcd<{d#uIVwBh5ed-ji z5{}5jmpSi>!}1U=!6A7N?}meNKi(Y&@Lo9gq2?b4a7OONd*ig+ zg-dZt?!^1xq}+k`#R<6`?}y`Z8{Qwssaau0NDVvV;9p>$T#bK;bAy_H9L5>B68{RP<#JquQ*s$T5hvwR zd=gH`C3pso%SHHP9Fq(2DL5(@;F&lg4>vlWio@~{uEimF5TAyFaz8#D2jo6H3;X3> z{A=u!d+={?Zb0*oBRC^>#?fY#f!F zFfTX{MC3+%4i3u=xE_b(di;AFll`V{#$B7)Rv-JP$|Y;Y*w^!C`p_H{y^yh%d!K zxgTGK19BgpkNt8lz8w4H9{fj~`?uyF$8bjO##i99+=ZKPO76r9a8mBT3vohj$BS@W zZo^mNnB0P|!cn;iFUAqM5jW$o+<;qfNUq0Aa8RzpSL1+Oi(9c@uEEz}pInWv#ku!2 z|2U2_awYx~PRr%E4X5NXd>u~8rTBWBkW26lI4&3A8*xl7#5dunT!5G2h&(*c`DPrJ zhj2R%$%FV79F+Uxf}l(r{yl(fm3oPz5^%a z4tyt0$nE$p9GBbh-8d$<;J@Ig+=Q3oh}?+p!C|=pcjAy-kMG4nxenil19B~1f&Fp~ zz90MKYWx7sy{q}hNt}@@@n3OTF2`LsC70p9;iO!OAH)f{1V4o1auI$Q$K*o%2#(4H zcqNX=!xuY0io@~{?#3Z`5I=^4azFk%4#<6Y752-$_;Ku$d+-xD_m1Wtr*KB@#!uq3 z+=ZXQDY+A`#!0yYufYkq9Y2lZavOdI$K)2g7DweKybed?M*J)e%MG{(hva(v91hBL z_<06mL+qDp@JHAuSL2UyE~EL!Ih>Iz@h3Pfm*XLvlFRU?I4PIn z&u~I6!Jp%}T!g>CF}V-h?Ca@LcDwa9AF~!#E@l;{V{F+>gJ;0l5!9=sXn-qQSIUx9N*?#APATJFLHI3;)Dt#DHAz+2;l+>XcNxZH-f!7;f7Z;PXH z6P|!0awFajhvf!bh(mHc{tgbxb$ELmkZbV{*e}=M?_!@^jVI#Vzc%y!KlbB{T#0wY zX}KI1;gnp4cfv`z6z_}^atSWRak&WZf@5+a{vM9X1$YvU$is7-cg10O2$$fHJcxJ0 zLAf9AjstQZ-UIvPUc4vv$vt>4oZGt+mH1$smdkNDPRV8X5S)}t@u4^&m*5HszaZ2vQ z$Ka&gfse%rxgAf#ak&j2hhuUJ{uz$SO}Gk2+lIUAlKsQ z*e}=MUtphHjem)AeVTt9#u>R1{|cw&a$JK`av44mC*@Ln5>Ch^cm|HkMfhYKlMC@F zI4T$5nK&X3H#nb)!}1WW#UXhRpN4~SKRz7?v2e~ z$G^uxxeosU2jp6OF80ec_&n^BtMU0bw_fv)qc|g1;tOzEF2@ZxC70m~aZ)bDb8tc~ z!Ec#W*52 z;$|F{8*mE_$@O>%4$5`-Y8;SjaVz%AHTW9rldJKyIQN?7AIEVllcpxlpd#R0hw zFT;Mh7vF|`au2>8=U&zP;{?vg-T2QqEqCD#oRT~79XKg>;5%_bZpU}wxZH;C#xc1C z{{=_oCcGR+ku^MF#Lwce+<<#A(!A+aa=CKui=RS%BoE?$;-K7*-^2mA4{yMJxflNn`{W+{7S6q_`NtWYk-PERI4yVKew>m! z@jEyvci?w%LT<Y;4$DJ$7>DFR{2v^Y`|;N}Aot;KuwU-Q-(sKKgE!;cOPYV|+sZj3 zcjIw5EqCDpoRT~7RyZkl;H_~&ZpY(sTyDeL;F#Qkx5ZJp2~WTgxe;%N!*T;I#38vJ ze+LKUI=np&$hCL}?3Ziscd<{d#uIVwMa@6<-ji5{}5je{kLvhvgw$fixYA?-Vev+HoQNM$u0Q%I4U>c$v7f6 z;sbD4Zop+YB-i5~;GkTG55xhv79WKDat;0=_Q}=wM>zMq<{t-fMy|vMV71NI3V}oS=cZ4;$LH*+=G9EbI)r2aRg`NZv0!Emb-8rPRX74cQ`3`;4^SS zZpUZhxZH-%!ZEo8pN*q(6XqAn1|o7JJ_m>823(Iray|Y%4$5`-4>%y#;&ZWIuEFPF zpInX4$GLTye;mabxe{N1({ed(z$v*5Ux<@(DV~E9atWS`<8l$c2*>0?d@+v71$Z8g z$iwG2UxLH(5N^aFc@STUgK|H<3a61mkgZLI4l>70mI3V}oW!NwG;@hxK?!mX?+|!zW zoWL2m8~+)n6cgqP!p+=%bNVYvZ! z;*eaA@5Mp64&R3ZaxGqg{c;VyAN%BL`~c3a(fs2i&d8PcuQ)B2<1U<%%kbZDQZB_0 z;)Gm+AHs3D2tSNtav^>MN96*%5=Z3Wvz;HsVR;C5EcQtrTOa6)d!Pvf}UhM&PPxdpGqQMn1P z!x6a=Ka0b11Ma~gxgI}4;U@*w^v4$A%b zO&pN>@CNLcd-1=pPwv5Q;oOs&f1JS?xf{QY({dN?$0@lJzk`!<2YweP~QM zeH@cp@V{|XZo*j{ksI*<4$BRA5QpS?yb%ZGI{X0+$hG)G?3ZisN7yG< zoRKT>Cpaya;~|`q%kZZ-DVO5Ua6&G@pX0b(gulQsxe$MeqjCY>gd_6sna*F~usno^ zaY!D-|G`1IAAgMlav%N%`{iEzE%wPhcr(sDq4~$Yt(`M+Hy(%6au+VZDY+AGg_Cjz z-Wn(5c03-(#=}TO5^}@B|!@8}W8HEH~gn9FpttcW_Xy!`tJ4T#I+Wez^vJ z7yIODJQ3#}-^}~}*pD-ECEgLI<#JqvQ*s&J2`A-JyfaS7CAb*J(1idpIf= z;7K?l51-+@D-O#;xCDpfLA)Cd%KdnE9FY6)9@sDU;ytlX?!kNE+$zTZ6b|5w+>Q6f zX}JrR;*{Kp_rXcI1MiCyay#A+$K^J>KaR;Q`1?32H{r=RA~)g#a9D1@WjG|);~(Il zT!#F^Y{c;Wd1@_6+_?I}>t@+1coRKT>uW(u}$2B-5m*Ep} zQZB_O;e=d*XW+P8gippXxe%X%qjCYBi6ioGo%5+UEDzyY9Fhm|X*ekNT|#;X0g>JMr&uQtrTK;Dp?c&%|-L4WET$atl5i zN986w8%N|ud=3uF4Y(eM+E|2T>>awWb1 zr{!|ofKzfAz7QwnQalGIk#;@lⅇmUZxf@@B({dMX!YR2EFThE;124o0xg9UU zak&j&iDPmLz6wX>CcGF&m z*8Jl*&d8PcPdF`?<2Ia<%kXtLDVO5waY8P^H{iHjgm1(#xe(ukqjCXWiX-xH#QA0% zmWOaV4#|V~795oO@vS%@_u*yOFZbfxuuty6x8vMHntzng+<{YaC%ywG z55a@*sW;2jzbJcN~!W@G9(=d-3DgC->kdaPDuKf1JV@xf?%; z({dMn3a8{wyc#Fv4!i~@~QM861;a@LC*|oA5duksI-|I4n2d9vqVE@pCvR z*Wu@JK(56vV82|0U&KDS8oz{dU7CNK#u>R1zl_szIqtv2RL{W4}J^h z{;K)M8Jv;3@!L2pcj11Vk~{G`I4O7FcX2{)$M4~|+=k!BF}Vf*8%O0PoW&8j5f9+7 z+<*shNUp~laZs+qAK-voi$BDExdwlPeR4Ja80V6jf1JY^xe|YZ({ec;!YR27e~OcG zDgF#6|>(3mlUR@s~I%7vN1eA`j1U{tAcXAv}yj@*w^X4$A%bYaEdK@Hf~m z_u_A{Pwv5+aqa=lKlY7x&dA+(98Sw!xB#c*PP`RP${l!XoRHh`cpR78@HRLmx8QAY zRBpl(a71pz+u^X>fD3U*uE*cOLAefZj{|Zo-U0jN8vI@CldJJWoV#E1kNr3!SK=LU zS}w;$I3<_iop4ev#XIAKT!M>nTrR@9;Fw&9zlWo80iJ{-^6=@-yW+4sgiCNp9>lxh zpxlpl#{szy?}7btFWwXTj+v0v`RM`EAcgO9?wdo}+!gfntCJ{qUxF8ouRk~{G+ zI4O7FV{t-m$J20JZo|jnnB0PYhNE&5uEG(y5g(7kas#f$A-Nv^90%n(d;$*0wRk%A z%Qg5H*e6%xU*cS+<{yV~My|xa!fCl2*Wi>~hEK#vxfGv-6LJZjf#Y%!J{ia4LVOC2 z$_02Pj>yBc&ZpwAJcMg;NFKze;h@})Psahd56{AWxflN$`{W+{8=SjG^N%AqBX{H9 z;u^f$#J|HyxdWep6LLE~6UXH?d=`$$E%3%d?k*_E%+)Nm7DNl9FZGw zGY-oQxCMvgdb|V&5*75n8Hd=2)=)%aSRyIb>*<2WN%;y>ZET#nmtN-o3K z;iO!Oug3|w1mA$;auL1}$K*nM6OPIScqxv^!!w<4#$kB~x8smJh;P9`xgXz(19Bf; zhW&Ccz76~29(+5_-KF`*37nC;@t<*8?!p~7C3oUGa8mBTcjAQHj_<;8xeecqV{!}r z3y#W7csY*9jrblMmK$&<4$1ZSUL2I`@O?NS*Wwk}FW2Dvu}`kX58&LLntz0oRCZKLpUxM;fHZdF2s-Es9bl@Hj+_$i!{JMn6qlsoVmoRHh` z(>N}-;b(A6ZozADRBpoSa71pz&*HG$fO~LAuE)>epj?Na#{sz(zkvO64So^(AU&m3o0I$apdH7`K zH*i=U!hJX-58{8~pxlq&!~wYvZ@_-J7yk?UYPFak&k@k7IHR{x^f=k}@mE$fLH54UVrZrgP0KEF=? zt7@J4s0(IPwT}O*yRABWQ&sbOb9QwffzN(X*EUJG^qS*!Wf`%$~c zU&0+*D;l(z1HN^SuX^&^t*V-HRZnJ*sv7r9)yv<`ndBC@cYcA}v%vpvd9kSnxV!o0 z{-|of)NS3GyESv`vwhW)smx<`KexrRRqp?u89&b5Zyc)=W5qsQ?G|~gclTKXXSsXu zKtA@v=c=!Ck9A|2JD$%0^-X;EsadP}c2)EIP0e#ZTQcLbswLAltwBQ4OzCK&y z)^<^9;ti>VsX064SEl*3s^+!XQ`~np?l>m8$VJb3Yi|}Y@&8@K5C3}+vs@H^c6FRh zF!AB>18(CiOijF=`f4|yZ?K{F{bF>TDqJ{+@MCuXa9^+F>(%bf;HuW*(`RV%XSm31 z=WkDPH*UR;dzNpGKgy-Eyq*)?A6A&+2E4ASd2Z^AswG?RH<_tvo|Zj0<+jnfi4VJV zm{Z8-xcUax@U7NqnW`nzQ?4W7e*3Me`9&7|;ZrPlhWpMmwAIvY0Qjn>X|3(B_S9Po1{z8Q(bF zuWHGR>={*u?5wfQIb!3Xu4?2VtKCDYn%A=u>xQ%vBj34a^gH*v?_`hvhx_)DS)1}R zcJxUnR$cKyg4JtYHCw}aD59GiX1bgI+O~_hkK4Qpa^u_<{so_8;|(R7czyF`7ag44 zHnEwZr`&H{G4yFHyS2F4{Su-Z`k=cpL*LEamru|24IH_(yW0{izN$6s{%Xb|Z#AB& zYJNR?=X%DpD9VcE@3SB^fzcLj;ycsQZW`Tx-aWjT^xek#OkBu^M{il$+U$03ZQgu* z^P229_v07dbf)ceHfmpU%X7>A*T1;S>g-{+yIc2{JlwX@q`vQdzl@Ppdn0?`PK_)) zc4T|M%!Aehs#?8;?fq{zQn&t_2TpQ7?XzW7*Jc;zxFe&Pw%kqka*e)q(L8t8{66`x zd#5D-9rh8oZrWAf+On$ClJ46{R(Z)r9yKjC;{Pz)&C?dU&Ex)ax%bdDtG#U#a=&=w z#cQ)E?Kjg_x`(c0Q7N}&-F!|rQ@rX`?_m?%!?FRlecd#_;ePp!jZx!mj1Kp^sBW5@ z+%_@Oe5kwc0H509_F-4BXy)kP2Hw>g#}(DB)Jx{WmS1U+IZ2c;l+wPZZ-2nRCILrV} ze2E8XHnp%5-y50D-!m$$!Y7W+am_%Nt9vHbY6|G2hHTOV(yryYIWh-;(JZA-C5Na^k{2yY(KpNh{BR zQjcqAq27V=_7C}z@2Dc?eP`p&UZzd+hkWM7v3lcv<{KxvZ)|i|C0ic-QxmA@Pakc$ z$CkC+!&@BNuhf-iSFyjeo|!xC=+nK^Jm4vn@?Q{3L-y0`BnF%Lq);9kB=Oc8-i++8rM<3?d+Q`dzRy5P)p4F`!fplOx>k03P8u6YQT6mq? z9PX|e_s|UMv1Gh|x_5S;`3m207d+$7JAp(8`~~Y=s3oq=e(6TL@m8}wZvUK?Vttmq zuZ42hly~sf$BkdxV0nIhUUP?f&IEj{xLc@;*JgV)`R;kBQ~%m*fBvkQ=R50J@{>!Z zU0>DQ=XO>12>!k2Mv!*T-1O$T7iClK=NG)E=}P-FJ$$u`wXxm$X(_9-pD!J`9(d0E z>N8K!i?u4nb9johR!nuj@NW(_@AtSxc-I4N&emq%&x~9T>@u?Y@3=RQ28!KLG=F7a z_WL;}Y@Fupn0oL%?!hB_s%By)rw#8JuxyYo*@t!4%iUe`-&x?kliia`8(k`JZFlqx z?^=OIcdal*hwz7j?#Aw^>km$u6UNnULUc{==}kIWyz71woZ&C>hCkPRFoGNYdG7NM z%<#8$TdT@DmGeRQu54}gA8h>A8@vmER!XauEb>-Chmn1rchCPt$GUvMD*oTM<-D)^ zx+U!BE>Ae`Z|8pO!kf0Fowg>gg!CN$#)5Z?w zbe@K0FsHjKBAqIZ=1&~>2KYr6Z_U?cqpm<#h5xm-H@eFP*7h2A&iFspc3ZQyzjt?K z)33f^?AqSzK4O5i8OmR~!sTC3|y)_!Egx8)Rc zxZ;Uz?w>6a7xH-wBRj*cx4Z8yxgp|qDlpZq@#KsUNzl3b)jsHgxw6pM5((VJLL zUdrC%_O7G(Kv4FmI2-?Bx9=Ri;OlX7CV0=HjoZdY?vS63wb@Vm{@?p}7y6}t_wF$F zyYZ?8+IPnd{NNj^T$69(HX}>)uGaRwFMsdqxH&q1wcPD~Rm~42baTT}ck};W7+mNM)!Oy?uGOx0VV^fV z{xXjn-sv}JcvHs??<05LswIoOg0vkS98c%lD9B#(5{QQMB_M)U)6*rSm z7YjE&>b`7M%$$A6<+0o3zhv=dlfU(bw@$j?X)RuNz1HbvcZ@Kbyprn%@8CcU%lomr z($HyVfX}&cYsRKpD@z}v#kawcM0JQ@RuBXmb7Yst&7rbVBHf~ zn70>167Hd1%&fibW^L@{%gBa#{dsRZFS(=K|2>{*k7!XNJHXxU0gUHcw~g!$b&dO+ zA&#&7c_p`#U)-h8Ypyvyh}cl)nzex(z&9{KHt|NY1}-8TpL z>q4VPUjCL6Yj?rT+pW$c;s4bgGp{vm~C*pyYIkq z4lVgJ+GL&_OSZfQGH|=2dw+sA2qrG%dOmwMM=7_5ESV8=4@%@OM^k!!>RQT0?po@t z>~rp9KjwwGGWQS1F)!iW!#(n^ z)ajyby*rGJy#DcqUca2ii-z7?NbXN`Pj;6IRadOxDRkQCt8!DC=Re;(cisPb`Yf3L z{J4pWely$WyTZMk>;8<_#6>j@?p>|kO82go_xfJ(mF{bIh<$>aCF7&qxK)qd&umP4 zi?5G$E8=G6E$588=gE4D%DsNXs_4C?r@V7<>(onG?wK40^6xo4>z)%Min_>DMb!YsM#hmN)ZR7R*%ePp@x3`o^2Sb4=eJUf;uv zsI_94nmLwU!uIb-|I%=f*Z{mmQSUS1!|%HF`mR{r?%)w83ky`qb+i;wgA zmUw-W@_qcg>B#uD9^3c5eBUh77x4OiM&Ev>ui5lX_4;0;p|xUnuW$Jw-u6r_AMLwd z=cFzB%hUNcT0YeN=8bP}uW!9uR`#J5JyiS4uBPt=y>3^0me;p}nfPO?z+*8@3;BBgG^t6*S9-;pD)%! zHGij?z8$>2yBSey#V6dj^IyI%e)nkK8Dsml&iAd-{^pHuE3a>bTS#`1=^J=vbTP{w z;OpYUyuK!{Z$iG0pEn&D-)CvQ-m-rt-5V@y&*`RbYp?G}`if28C8qBnukRTeS}S() z`j!U0?U}r5v~S_qzSHu38??W9;~Vewz2ugaz3VDHRNHeq)3-{OD8(mxeRp!>&VTtn ze%^Fsd^?Zr8=vnx*Ys`U^+o8bFnu@kYHt3tHr?xcpN7_o3a@YFkG%1vz1Q0He7N%^ z9<*h9zHpnjJ-^ZXd*j>I>-)qlEBo}7qvJck^lex{UVM?)_Y61g{Fm>Gn7%{D_FaL6ijVX9 zZsBIh_(}Obe%^HCj$4oI+aupM%k+)&`c9#5KhxK2`lfn)uhGz2aiG_?{6KGe^7E!6 zeb+zF*IS+s8}jdEeyIJ;8()Ffmvzg^KD0m&)%bQbeJ|(|rT8qbZxuK0{FmGj>>7Lq;F z^gYXqC;9zlzSp;l*BABrvSr@*e5UV}o-yOQ_m%=Z{|+*JTX}s4()allqvJc(^zGpF z-OY$vE5>wAWV){1?+zNH6v+cSCF z=!z{I+xJkuZ-e$XZ+zQ$eQ&vCW$%jVp?W@SXZlv@5~cWLukS%_-1#rx7czZ2kL^1@ z-*>L*^M&+$h|*VK`fh%FbhW2@eecuIS~1P*TRGVqUwYZ-9q(Mn*ITyd9{Ij+H2>cC z#(90?=zIE)qvJck^liACy!axo?=^1R`7hrWF@1-Q?c3nq2jcwDYWfPizBadz>fzFXY8DD>@N`nK}=_Mz{+%SXp|yy^Sq zE{*R-M$}rdi5qwR%lE}h-$`Tpj>-3}(EjFa&#k?_2i-!lb4}kHe;-}#RpX_)Azz18sAx7-wWKh^IyI%Z2I;c+c!PmH{bMa=k+zyH_i0j-!0PpEr(%3TOy6fujTzsq`M&9< z@4H^#ujwl`eV3TNgS@_HXlSiC#OqsH>TS>DO{3#mIJWQTeBTD`Z{GO)Uf;)VS=qZT z(L?oo*v|B=(j`jq$zI!!0X2!}LAsy{4z}o$K{&jyHYZ+@|r}$cS1iCU||b^L;VXchcCthk0+`&EE>` zZ{GGS_WIViWo74@zBjzr^fbO!uWx^^?`Ce6jNi1EH$K1Vd-sVk<2yayH`Vm*;`Pm- zZ@lR{$Mo&t^*zjpS}V%DzQy^z#P!-JTCq7}`@ZF!emA}sw7+@d`<~bLzFSuIhKokG z=U3iqY+B5UW!j#{d3}#@#{^Zz7ntRX19>+G}CvF_nMxz=Pa-93mV*y9eaK2_VC7+X&W8i zgR91j@0NVu4yJE6udj^0SLck5?{L%i;VtCFS9*P$xpC*ed|%Y`9Xqz~=zQNY?Qh=p z+}-PY%q=8)rs;dudreQ{oA34Q;`PP7zU=Pa_6*uER`eXF#;dE?vD>)YU#m0e`|2E5nw zw3ub>+Mb7beO=tR^IyI%VER6LY|Qwk<@=_azP-G@^XMx!eV3TNgS@_HXlSiC#_L0z?3R_it3eOVu+6tKeXDedQhc)4_X;=e{Fm?e)d!t1x{xd#~wfdrtTI-lw6p;!Lk^Wr;Vw^tGeoyR&=D_MDOL z`$qHcjjz<}+nc_pFBl!)0j6)mQu5-9yuL5Eap%8$U&QnsI<{|zd|#{S+sEtcatq1M zFny1Dujy%g=X!nHczsuSed~Ai#+SQhbbMW#^i{H|`%@?N+aEf3NQz zZru5Ak~coT>C?}9j~=i6`M#;9Z-1}vO!~%~zH?099$w$WjHva0XnPkpt)}mPe7a1g zs2ORJ4`t}0kV4a^C<>FOKJ_tZl1qe2MLtE7T%ybj^B509O>&7`B9|CcDyk{fGa)9@ z1!?rDoH4~nGA`-&UTf`r_TJ}tp5pWU|Gi$B=j^lge($yRT5GSp_CDuio+WsN3U5}# zbZq=W`t8pT^XXs*3sz6L;8dHPvLd8 z@ansGV-(&H3$MN4-A{OJExfRW*HiE|5JDvLD#4p`w8*pULBrp)cO3p&D!kpSZ^B;( z!D~Ty^KUl%9b@6GM;iL6PSq3-4sXdl`**Sr*>QlG9^( z4iUV4gb>NRTky)83xAbGhQE0W9R6|@USkXI6u~=-@ID`7`0H-r?Vd*E>Gugzfy3NORL+dI$U?_7m9z{2Y!cy|+Cx`lVUh1XH=Rue)b z^HRZ^(M;r78aDjpyLj6k5P5E4eG~pV3tlSW&AySV%KT^HE#VL)eURX7r$3ng6kab2 zubGSYu)-T-;hipcv(Si_Y2iICIX#x=0KwZy2$9TO!CQEg@K;f2_8($ zg}*Zd?{vajHQMmk!NS|}2$iQ#5WEJ07gBgbExax+-j1-yGh*SLDR?W;mX~AUEs~rb z^EXED8VKHF^aty|rovy%{f57lZ#n$UP@^yq1EufH;a|_7l89g*WRy77BN4yoC!O=0Am( zZQ-?X@wVWDv`R>GuiV@q#y4;f=NME^zVMD!iGjZz9hgg7+ob@`hV@ zYb2+~{7n+P=7LvBe=z?w5&n`ay!Ed;{B5~UnV6Q5#G?(Ml(jUx!3NORL z+dJ3cufM_@VBsZY^ZwXFgqLpN-EQG^6ui}h5Xrn=@Mbg?d6wR5_{(?kek~ArZee{B z{^|(c@q{;f7+00~&%#^6Axion!P`rJF#jpMUKU<67jL%08)MQRk4sLE z*x8(sUPoE%o z%>^%{@P=A=U0l4j3NK>e^%A^K(UzBE;VqJ!9`iRw@EQo-OY{fx-{Hbv&1A#h%2yr! zw&ctBwY2b(hO)j~Pk1}7GyL_n@b-t9zv;wLB(s~~jZ}D(Exds)-b{rzm-S8Qi{R}= zTVAe(w^?#}%wI(C+6mqo`h)qek?@yh;cb7#;qMZK*VDqQD|iKjmu%sUvhZ38-U8w% zl6i&T6)L=0_png7W8+=C-}6MCYgylfzj}h#obXDnHS+vba%|kp+(MRTAHl1pKbZd% zUS|uhzKi#=!W&}Y)fc>%(1_R8!V6n?Jq2$AAw)9o61+JLMV@7M8~&!fOuzm4?=pqA z`vJ|zilxbd*NyP#jY`3a^)i*UZH`TH%ec@ajxve_e*Qyi5!4amneiJO>EgPC|%emJn=F^1@`{ui_5F z-wU(pw{P!DCd>Hk=kXW*>I&Y)gtzJ{!(RsrZ%ZEYH$m{)30_Fy4YlyPxOn3fUc|zy zCwSY?mX~AUEqcOqdyL>U5WIKj59Yu6!e7nphH&Kz^xNmJxx#B{;nf$sTM2K+m0Xp_ zudju-|6VFjpH3V_GDCtlQsGUu@CLegW%r0Y=d!+uJd*{lKH=qBc$+1s$Kw|fymo@O znf_q@t0(-WS$Nx@claBo@OoN!4FszU;f{@W z@fs_Od z_YNBI+FE#F3$Lf(Z6Jh5=7WMar>@Ae>^8&SwCCuzuPR^d&u@EQr;X0+vHS$Ho?PLJg|MDX?z0$z_ru$ceq z2!E9m41e>Ub@MibuWgAIS(Exg_LFn{+6-noJ|SmBMe@GfxiW-7dy ztZyREBLwevwB-%A@YYC9kNKMZ#5xAGV=s)#(~yJlzb>1 zZ}`i1@p2X37S=c6FHP{yCA`^#xT?&57TyvLQPKwqUNgaKukd?ayudCpN6y8t^kKPw)`Ii!f7qRe;61+WV%geFw7D-Nz`5Pm64FoSr ze=z^;7yfE);Z8E)%9##cmcnak;WZPyJi^;CkgLl4XW{L?lgiVl6GxHE!GbqZ;Z3&i z2D*6r?+|&;WqlKQrVCy(!ppVrHcL*AyB!g{c7j($e=z_3A^fFTc-x`F{?{L9uq3}9ec=cVpbcHv>!fPRTQMBc?weZ3gUQfZ>KnRh{rwFzv zdCu=5&$6+GziCg>Z-4$9IVA2;M$Ih-5A$Sj>OF34fLUG5pPY!r`y2!fR~d z9V>Wu5Z>n(8~(amc)KSufAJa*Ie)_=?~_=UxmLU3vd174u2&IucL+6Qt+NaBi_&1hQG@#yvBm}B5@SS94B}a z6<+bpT!}k2%*7j^@RqQ?34g5wuOs2n`_Od0ph|Lj+|10|s64%^;MEnp6or>z;q9G4 zguZ?zE4%>~-tmIB6m5Cw7T)a^UPr-OO$d?9sRUb;Jfl|RSvtn>m+#`uzfHz(3+tQk zmmzo;5#H>J41WzRyd@l>qz@9jHiFk);q|icnz?wR6y6vM?*zfCL|a~_h4;AR^jMw) z1aBuHL^9_SEatyogujZL41X^?=EyTu;qB+~7yeEZym5rL>feUH4i?^)Tq;kWAb5QQ zFQo8>T6kStym=Euo)HVLwcynyyc`Q}k>vE4zcGT>K=3xwAIyLIguj{_4Sy@AJNyk% zcr7iwHi9=5jd(i-82BjQf76MhNajev8>#RnTX+Lqyp{@YF6*1fv#sE@A-r4* zZ?oj|n7@eNwG+Hw=?~_=pM}3P3vYXg!{3%%k!Mc}ubtq{MullG zcky~Eydf4|d%@d?w!F3$Uf9CxDR>(QA(HtT!D9Z~EAlKGW%!#mjeh(7W%qcI=Wf(dc{g|7G|)#==|6AxioP!Rsk_-4tH7h1bT#o2l?7S$HQ4-mhrO%d+rZ4x4Td z5xjkbfLC%8EatzTgulv>hH&0ghrb+!*Vw{4MerUVywCd?{<>RuyT?&^`h9|Tjo=Md zcw;TR3tYTpg*TJ+P2_p1;2leN!!5iulG9^-nIw441+RwwVE)@9{NXc`=!`rVwf<3u zza_WI_;s}KItt!vXvF)uui@`<3$L-@y+|BIGN%aMM1@y;gTWi-;@zz9max7Ff2Rpv zPr@s(@Tw%I$NbG4OHHPC6}+Z`m!j}8EWEvsIQ+F#cmpiFPJ;I_+VavZyxT3jj)J$E z5F(k+6D;PxA4Q&}BMg7}F5c(2h&;Ejz6pPIiqr6B)t=W7-s})pmGz&6w}eBK^g)8x zMey1yyj~VwGZ!zc@Wxnpbp@{mZF!j%-s6(fV|fk`yq$y)$y`OSnEz^ozl!08zZZ%f zd1flS{XG7{Up>K_LU^k#H2ih2@V4Ac<>?aykJ-fePlY$s!t3JV?H?!dj97SeW^sPE zDdFW(u{`?--YJ6DLg94=Ue)&F@U|^}gE<@1d#J2m_QfsS zNG-?<^GYN1lU^Oiq>ltidl}LKmvlssl+Vjat5>+BX9r2YqbNj3JGrE3LDFT0w7yHa z19>@zt!ajI^Mejsi-V-24C(7G>BB+Na||iH|IrfGupsGChIFt?+C50>jrJwL=PAq( zGzpR}F{E|fR{x94og;xq4e7=rr>_fxq&FDS*Id#_khF&(o#K*S7bHE>kPdQ5{~09R zIm(y7DK6;|LDF{(DW6w0!ukfeJx2n?hV=6X90@E4k`6bduehW|LDI7f={+v#&>(55 zAsy(F((~p{U$;~2Ay&}AC2bTWU1UiA3_EOXMK;c9^&vz0nM*n^NP4{?o#T>*gQVRI z=_Hr*>L6)jLwbozdS;Mx+YP=1PI5_8f~4;l(mx6v32Z^W&XK@_hIE}v`c{ziT0{Dx zOL~8hw5uV#(=(fz8Ol zIT9!|q-$N$*Mp=(4C!+&XY?~-;5l72-oiAdlCm$Yt>^i4y$ zufUPOMr7$63EXE$KXgf73zA-GNN2gEQ-Y+O4Jp5W$mqbJAZfB8?dP_7N|5x6>wO6v z?~*13N#8J}d-EL$e2zSxBY}KFy2d4aB}jU?A${5E@_7# z>89&^3AA)c|3v!EVQa1--IM1?;Ikm$2a!DrzNl!JT<6P29f~0i} zX&;yLq#)^rYkdhE>yrL~gqtIQR}E>+y^aLd1xY6x(p4_$i$T&HL;8eEdS{UIWJCHN zmoz&_dSIx})&(wU+aT$BLwbx$`WsSmjs#vdq~A|*B=AX)^ln4?zDxQ-kn~bRI>RNs zJxJQ#kdARl{~aXVe~mAJ^IX!_LDEkR>CrA}Es}PQ1YRPIpOf3zD`oq&K>x{ez^xU+qhvmrHs=khH>(rn{v3kY;n(nr%qGyT_5hhe6Ui z4CykLbXJh`B11aOCCv?zwlSomT+)6)(qD)85;(^tJw8bKu^~OmCEbfeoFjqf4XJmx zBY`zR(usz2iA(x)kaU0{ebgnrHAs4*A-%yR4FySmxyqM750{kk=MGyR8PX$N(mhDk zIi$}T(w&nW2}FaW6AbCQF6mQ2(tjD!VwZGWkTk=P4tGiW1WA9s(wD$lF6psB(hm%2 zs!K}e6C4RVV@S8(*mF6oUy z(tns%uW?Cx1xbGx>`S1FOPU@eU1>-gx}@JBW#~xYaYOp`?T!SN1xarV(}JYu z8`46TbX1VEg(2nh6h>dq36k!<%$L9!ZmUNHNmm%s!(37iNo+>~j~UXhCOQ&W5+uFJ zkiO}XJ{lzLZAkBPNpA>}HaDbKx}-gVq}79b3GjJUBdjBXq-BOQ*==>_n1BRI4CxoQ zITCm`NIKe(zTuJ<2T9L0r1>r>J#X#|K{G>oxl4LhkhChtm%wQ*X=;#ksUfZBl5W2# z(1EFjbkhV!0*ivABMs?Xm-L|^X-`9XuS8PXh=v}=%b$ECgmPIgHT z50Wl6qzA@3Y*mg9NZ?^Zy51$7A0!=SNMCkI9|)3mH>7vFq}K#Vn;6ndUD7T=(*IuK zOQ5|=+Av7E(2(xG)sev0qXH6$7}8H&(zk=8*BR26T++fI=|2tWT`uX6An6f?^kSFv zj3DVZ7yA-u=aL>4Bwb)gf4{|%z*i#!5-2jH6)x$ULDHdyl;1yOT=9KD(o92ohui9v zLDEKs^dgtEbC7gvwl9G;E@^U*be~gQRB~ z(upqV=eVE};c;lYeL*eq!FZevTSI6yj zQIcFQq#Ej0-mVM?!wctbx4$S=dwLl#y`r4$y~7ouNaNRA)lFKQ2J}!-=Hs96wm!po zZB7@Jzz6SZ{ZFpq+f_F~Yn1R60=@uO83zJvR`z9#0fa#;0wdGLDC-;_BgJ5lev$Ts zb3B$+Q4W5Ko)w`eoShOBTM=Ls0W|1@3i=XUNtxD2v=^=^%-TD7(%z);CrFd1Kx&Br z(yEsfYWM+Q@Yydec3On|@i|RZegcuvg>GMKhiE+d8uiXP{=k9M1rQ#}=`0zvvL=W;w-Rt3dJT&zrLWdP0=`OQ?m zt);Q!K~sw%z$E{(rste!`2?{S236yYL56)nOZWqPM+z}ZI&W8Jw204O7RguRNW^Tq zn}EKU93hnOvu$8>6KOA>IN?S<5?78fB8$l-+m#P#!gfplW!bLZW75(45F(M#e9O)T zdU-f^Wo!q^#Y`ZutO}SV;Iqo9iSwn23IK1yJ)D4{4&<=1gu?4cj__WA9<@r?qxu&H zdL;ez$D0g=zfpMO37SwWC1`=apf{v^kUP_#eL;|(d4*n={S{_&${=ho&Taj0KD9-b zM&d}svg<%k>1V7zZ`c2h{r0(kAN!5IHg*QMOoH2#@?pH_}TDiK3Db1RHkm)N73 zR|buUdij@V&ASvI?QIWS522NEfSA~#-gO;l1d`ug?e|*EcOIfG%v=wP5Ogrh1A=e%b7D*DQ9Bx_Sq&THb9 zx^+j@2;6cJecLv3bF;b-ebigmg^LmP4JgMM+0VF`CykTN<~x-*aK8z6E0jr3PD61~ z)H*dnz@Z@0 zNig&w>Rn_NaFgd)1>9~&6exk6F$|xjT}$3QZ{=W1YAtB2AT^nZXhsq#JqLP>SL;Bb+-8I#?JA511UQ5i}R$7Ee{pXe=XHZxCKL>To3I z4P0YvGe2o6`tb%Pn6>7|ZhQwClOsAMVNAa! zTnvpFfLrNdQ=v>Ml%iXcS*N7JC^$qQU|jG$jlrr-{JvOzms5F`{4S&4-Q_nri1$g$ zZ)WS8UR;+oCtb=*iRN}*=u*sHhzPX^5-ZA@K{qP@DO$`_|Gi8!bOu}&X{i4E_%m=^ zRVD1b{FA-!fL1H&CTi(LtAP7kVijF@SSnZ^!F`S*D zoHVIfNIRXNeiflZ6#MYVv5u9JUU4xTe^@wx3^H8Ae{~G!pHjQ51%8=!F29xB%x$`g%bArN)WPL(<^E#@6 z!%sv8nd4JMEQiscLas$Q^DN%SwdGQ>>N%GAoJ=lInNKU~70DBAbN*c6tnR6*qI`6J z{0^@u8ZZxIwNS)50zqjpDl+q%2l^tjEMfeR>Oq#To(Cc=5Qcl+Dq!9&Ax!a*k6lea zNrxF=B1qMpNx0*+I2YIAwBzcq=&$GNZ;P`)UzEINsiVJ^&i)0-ccCsB$sgWCBp;!! z#7O>^)?6Bte5p7Y)`u4;TvL*tA-`D){mIr8-!+hOQ1bmxvLt^EDk^17EcvrQyL(7&VABKf1v zq2i#|BKhYLuUnGO2dTOf#vQK>>BZlWe4BrQz9@OjyZ;Bt_o6Nt$#*(jB>%Xm zXu#XlJAsH{u&(6a6A!~WUz*J%Z2m9EZi?0h;Q$?tspHnCm{OeG#V(aJQ0c{&ZmC{;@fB!(ytk>|CbKau0=9^@|N z?4n*q#{&nJ)tE{~30t8GG4 z_=m6rJsI^6u$r|iZ_=&SA%$)8D>=#zA1xm z)>SR3(ZpFM)vKd1SH-15TWl{Je*Q>vH!khmI^C6R8k578F^DEn7ByH3_2ZOyFd4yH zHZ@2tbE3kse6&y2hG3UF!t7d=Q5c&eCM8@C_o`VcO76O#brLqg^2;QlKM1DY8bw6- zughrn3tJfZW#i95`BAF;?S{l{@*nesY%;FfgyJax5wu)l( zdP5_=#>#{4GQ#6t4;isZpF(9yG{ji|Sj z{GDvAfD>->&K4lx9FD4S1!}1I<{1hEsck3ek?ft+1?6V3;9Cu=P}G})tRqQ=x_Bxr zUg@{8`yMoRY7Gy}AF~=KzPA!lf1-LV1|+_B6PEvp>dk^vPIRwLJLg^o`98HK^*qv5 z>|bE9S(LSx6yg(aFJcD2EzoMi#HLAj(ynl>$HcqB8z7UB7@u&~A9<7hsN?SJ7}ZG` zfhXZxywy5s#E6hb`-yB_xm}mh{*3H!o_OHT1L60Hu29ic{u}MKppviOl$sx-WOY&< zD!N_~y^A86q*kW*OT8((XlptYUNZPkVr+gk{)8(E3;N@4jNTRqQPf+3kqi|T5HSt( z+n2L$9*ZKTz}wR5h&pzMhE-h)gr&5Xyo{2s{C=r03@XCaBL!o*6<#@C(Etrb3Wln7 z-^BtWb!iFpB@~HpsY8Az=_1^8-ghlRMNN4(FX}CTTZf?>8;X>04b>;n_=cE2t6#Ia z9^!RP!AOG0TS9-5#&^PfE{J3sF4fVPL6qcjj z6-ZlEXK}#iGu<2{Dqd8g!Tvna)gwC1P1O6hiOPRsLlfBt%yl9no6wY%G|2v#=j~E4 z8Tr1bmjiW&nR?nxv&=Qe9G{;La68jGox_2)WE$%(#*{-AXqw;!#@Ef$0 z@4qxOWmk%r7iJMMuO$9cQp*Y+sllKA;Q}O*@Fy}7f1pLJQ#%xl#n03Z116_-$SF?k zFl1J0hvB8E9mbTUcF3(r?J&tcvrvFA=K#e+1>9&PJ9QzSiNHY%7{t5uoA-4?zx))s zu`DgrVPz~I-&xV`$=C9WR|7r;mlL<=(T{|t~v3)B!$;kfQ+dlJ?g?z znWSjGsxQLAzUyyPt$^2fek%(QwxTlA6wU00wlNDI*S0<<=N;@{4W3>`M@834@ zy$nj@geeR`&=x|dPk-UJ*=?Q)3)KMki>iwHN&JiuBDW$KCvhNxQ@QNOiXh4U7R z)<2^_YbpOUKpNUWJ9PTv)&}&ux<3A|Lhl&q#mvbf%x^GI5FJH>;2`QvfqCLL3vD=Q zN0?LxD^mKSd3+6%Aq0X-$Y66ZYg6)Xe+Knn8(e^+2jkAAHW`ZZQ&c;k2Y(_=KDhK~ zpdm>fiCLsg=y(bx&A77=5#O1RiV_IKUugvg!EUBII!etfrwowOOeepYSgVEbalkH> zP}r!V-jNWEa^e*I(H&U}(SWo-5oXR@2yO_0R}$hgXA>f?JoDkkiov>{d5CGDJz9Y9 zaw`%H=UdzuC7r*ew(B7d0uF68;Q9+?^-JK1HN~}9HZui6O}N8m;to&H!9BOqlW1hj zR$K^QID}+nc4_}2pEsOItuUWoofY6y`?L5f=vXONW3iy6?AFELN4=Xc4<+)(Wr&jE z-peZsQQZofG-y2Y$~x0^aXod7-}#B_Vql`)b}=6A!;Dr5a6tDS36L<#g^Y;hhgRNo2o1m>;>nQ1h__yUDTM9n&=B| zx54cg+4%kjPD>1H3nr=rtFbd+yUN}Y`6@n`=$zr3DU7Q&AzQs(JMs15KcP#fNXi~ik-iUwf6j1QjS zfN43MEsLb@_^D2kvZ&EmNy3d&HFPuT{dNSdNgd#5Oq7%CJqOEx>C%p!$>;d;7cJr$ z#hgvi_C@-NTWEYHCdUNhzoLE|4WgQ;whwcruU#0gxMyiV*9tyHtFK7ly~9zWTFJ(A zk5=4MG0aQA!%{flXNl7=NsjMr`b zx|6xCe|Y6bgkEws7IfOd-qi?wwEei2p)YCf803lMEIM{N8pnl>&PaWELamP=S#J5g zfh5tJ6S)+!x<$itM1h-J?aG14nZIQH^+4{N22bbzCoCA z-GN}Kg_Yw;3U$zDN(0$IUkbiQc^8(JK%=suu@;J7_b=)yoi9}__s;1(kaVOyyg@nU zQ0M65CY2dhxwMn_hzpHmZB$m60%rrMmyyu!f|k>u^Ib3rp>Oq0qKSh`qTb=;22>RF z3|8`e!>jnRsD*B#Z!aocsV`4PH~WXHphEq_I|(9;MS~atx{3qeAjH8~or6BRx(hE? zsCTeOGIp7t`8K2D0B7T(BuOTXO zxasP^C1_BMjyf41(+=qp)ZMi8XG;9G8u)F+S~9r2AE~Pl3y^JiB3~l-eIFBwZEI@Y zBwwYbS4;-zyUt0Q``QP_Sf{U-_276C=;fSWNM(ID^mp~L3VzBS}@ew;Std4d~IMy)GP zwuh%fy5zv|ZCb|hE+^|i8wx2^1Dn`jHaUxl)coVo$*OexZ&?O<>%IR6nKgySY&dNQ zRP}Zw&!DjBoQ={Xd(vApJ|7aythbM5+^Ba7_m;Y~V(ICer-A*GiKujV5v9Xb$L^8e z40tAWvM71iQ`oQw!%rsP%fFbn3lS7syoD$S&zU>b&M&m+5?k&8S3}N2p|xaEDPUn9 zVHGg$yIgmxcNF^AA6Xo*P>Ypx3V+*f$t4S6+Bv`#9DHtgqt3Ro1PUta*I1# z5_4G|qS+pYrQY8Sa-kuQG%JAUVT{RR`qxb=s$VyR98M&8C@zp^IzTH={Bu0SRhZ+c zC1@1%KRcyrH#HDTm%o#JvY}fQpYGHKf%ax9y3_^q92P}QH456IIaoORNAXG}RFOMlywW2S zs;CbBOsq3gQLjpVGvG{C`s6_~v{%~o82w_e^d@Z!vd65TzMVm{t~jB0{;8c1-=XoS zt|v|i{lt826);R!P9tpf9zvIt6S@^`cukP?fD`J)PDsyz8Hv3R*-51m?ukE>aLH4+ zamixLm@>VxVvAX@;>yq7gLE`VKp z5L*5l{Qd?hYZMCaQKLRx;-+%7x6jQ*^`n+(T0NqmJ9q;uh(=DKwlE!BD?(#7-u#uQ zc+Jb5dxR$Q?pYP7kFi;miCHn}bfpSP0JrI&Bv+ZmW@3JvtRFUM*k?I5C36F>yuiWV zB*oFZQvt?z{+=WrqNSpEIm@{N4zl+wX!Up5i+#- zBHs;N;A|&`3i)N|he*4UE^on9YP@Ni9biJbBaK=6U=SC^Wj<{i1!tKE??Cw;|7&Wc zNVdu%yrra^MG1uqkpiUD@uvjd8V$00e~>(Pqy)Ykf9A`5-nCi;^}(=2z3(`yFPm1f zq2a_RgYS zUo@*Xce#V8{WJ0~XiON1dehOiCSg`U(v$$iUmu2^c+WM(^^}#gx2Se?{q+LP{HQTh zwt=)ChIdluw{UMgvLjYr!MQhwY%(lsf062hA88z<)qa>a{F>CFuV18=HDb0E@w7{? z*gXnKMVlmQin2o^|HhiMkT2DADv>Aei<10?f~-l{J1i0*^qpgr1^JDg0aY|tbW}Y9f<DH??A9pjY#;X9LlZ{+MJG4qP(K!JHL5foz--FX}*_j{9w%(85^= zontVS2v3tSA)%2mb%O$dwIK3E>&}O1$M}xyO0sgFpyoo zpb2%BHp$fpiHA5S@7ZJ_BI6+MZE2q>F7GMYiYdqKCn6q1{Hw=Yc`aXqGf;yK)}B1n ziy=rC9>=_dMNwhaFF<66azB?4pS0eM;0DNU@e3;p@!=8l+a%?Zy$j%jeBKX*_|I)* zeo3&t8V%Woib(7f&|>3;U~35WaQ_Ajm~O;CuYpm<(G6bb<OKkh8w9c$Ox{sAghiSlvm2O*%x?o`!lB0ycGGi~5b{LF-aeuvnZA zKv&hg5qEwbMk%PU^0In20n*{A=p}7vXy_C;9BnDKCrO?|-2*{CJ$L7Mlj-^c?UL0l z=?4PF(;;Rl;;SF5@8_@f8LpQo_+x(R{)P zMfi|Xm{&3{QEp6EhAeiVCI+^*wZ} zL_n=R^i6_R+q$jpLooU-2Af8k)d+R>pyPSJ*QpweR|Tjs^R>1DCta0Ra*xG~D>m9_ zGWN{F(f`E_)7!ytp7HdstoGn~co~l;&eKpLAX37YYLFtC{VGKducTl7OD(iZv7805ZutfH7Niub?zVP{%pTH@beV$yXrLjx#okPbAZ4j=%qqqL`~e^XY{B%>E# zYGp-l$qK$XEOkNFcBII!?NKi^e>RL4Y4ZlfRXf;>B_?b|I=~^D3KMxK7t#WfOKFem z7k+(_nu(WSEniH9p317><)v4Y zglD!MMY2vJE=|MzrFa-s^zkUYkP2oZ+yytcBRj=IQDY$>O943tui%)~Kb$qg7MP6w z4C?4KJS!u~zFBlxz>a}3HmHdAZ*8P8r^M#M{^4(wB7E~PD*}x!kKGC2l|xz_sN!Q; z9LwHGbDen?*CslJiP!H?s*$qMc-0HjV*d;)vg(s@N0qSr*V5cWxf;CpLk&qknj=Vq zoE7a^)n`&i$9w@aw}}VPgqIAL3+47^a>{rUkv`W71V);9lK;($`xUmiQc_y_N5-p*Uqrm@$^Oy%UU#?>HK(70T$i^QGXZQY+qSWc zcv?G)XCO$F-rGi=k0Bqx`u1;Y?IoXET04iV$10 z8X%^i4ez0@xFhfw)Y?zTOA7b6``5MhCCOlu)SwY*s)_%T*1muh++Wh#WB1xxyM^}x zNQKC|X!ZPswsQ2S8m+BC1m#uJIvq1Z9%;5jqJ{8kUTfNI4QOp=Xf2NR1y0|Q#w0<% zJa|F;^AF;kE>eLmyBpqX5clbr;2&o2O12-iB@dT1Ks!UW@xd; z>WC+{!HzaW9);H%u40tJqnJ1Y6#5*b9alnY2bBgSMvrD57^XRN!{5*Eyo6@iO3 zV<%ic=mRCe^U7F_Db< z9Bq3I4f^_outDwUs@!cWvumO5$YQ{(O|;D?I!}KS3y@3;gUJ3LD!}vX8JtX{VmO|- z=jl6hT^b8kn`e0f3~b}}hXd9rdV6Py-)fDT*ssXh%ZCzc2?R@ae!zaCcNEROWLQwh z3(anWb>@|ta9;R}<;&zrD{+2tK0+O|gj1IO0mhG7TL&1I;ern^dL5DSm9d6TN~=mm zV!`XaL61L|{Z4UHq-S&mwmn)cb8c1AEv$C%OitB$^;_-sRjmwM&r{bUQhzAJdq0z^ zo(~XAb+5rm61F$x58R$ zImxuu%jdim&474c1E&C`iK}S-FG}7rt~I7Cs6lcy|Ki>Av6v={i)naG4hHKg3Xs+D z)vM8xJ~_rcYRuupR&O{wwmK-*59cqu9TeXptj|Lt$mS2c7=F zT2al}w4Q(qrnNhF=|>7WXdB5{xF>syC}y+XHezW?%57J;=aSPiEVE9*g`7){B)__* zP7>$gdmDsv!qt=PkIS9AHXM z5WjX%=RE2MVbzD*?a z%GFTNNYA-Zxc4{6^Q#u~DAUR-GFH|uyi+9oET>%uEDfBD z*&X}+-uhi=L&8#tg#L2=@)b!X#6opN-8sXrz=QNJ&V zU-N8!0Kg55AH!r88(+d6pm~5@zO6pofQf|KYZilgr;tN5ww!?^-D0-pBL>tlG52&l zD;WR0oni*6>WqJ~SBIupD^@1I1)ET(nDr>9u~#80;)E|q7;rr@Y1W*R;rkxS4(Cm( zNgA)tE2&^xOyD6%-%LSN$pd8U;~y&C$626@72dFg+}YUEC}(5Ruw~0V#i0!t7ymS# zp`MX&H!Wd0m_$k>^6_&fJC^`0OtGYldmr|X{6;0DeTf>ia9%==S9{g}q^mbq(y)-6 z(GFy-%)TE|@Lm>AXYYGkI04DSvW+5sv(O_)(_RY%-9j@G{hYj%`r=2e$MPjbt!M>4 z54pWhJU(A%4P|iILt4g;kjIYlm1-0cqE6fDpce3D0JYa*&>RFc{$gt}ni9Jw@QGk~ z26UhpEEmS=dm#ySA?MXd0-%a|FK?u-QD0C|AHRG-@WG2UsN$vE>ebb4zhGj-9`qV-jzKEacs@X30lobL7FgXE|5ue5 zq~r2G51>u5>oT~LQiZ*NquH3I&b69yqYAYha5b%1T!atHpicWlBHlyB%BRBbc&E`p zeEV(HuxWeV^nWYwO~wanwa<6n$bORq2L)!Hj-e8zj7!)rk~46Y0;$B9U_1h+n#5Q`jp;&u2^a|%aDAys$t|w`$z)`}XP^mfKY?iG~JTv6X z!kB2R^LWA}MnTMsCdp<72AZvpw|a1PAA3(`_vRziTAj5FN|QB!%c2~wy6ft!GEy4~H_KIrQa?iDyfxv} zn_<<1($cxWBjjZo$=<0uv~|I_I@|svRvPFrzr7p|dsj;DZ0)beMe(Uix8+aa?9_a% zB3!JrTUb$^^1Olgi=!;Z{VXDhxE^Vrc%;}l2Le2cA`xoa`W#r)e-?#40Y>kSN;1<* z8n}ig=ECJUxa*K&)Kr=S_uZ8`PemU~((@Ocw@s0m8ja$VE}GD}Am}3OhfD)Ax$}`G z3~?H6kZz+Eg}k~>Tzix(L54kCJ*;R`+TKn+GG&UM)z8)$a`J zH@!1V%IVxUdV0rZk~-?Bx5j(`wN}?5x_>fFk90qh{(dPfX0x+ZvxEJnqP>Rd-zvY; zYovEEf-~t|VfI+;WUH7%!Tj`|Y8DXR9>8yLzsHd;vYuFQn;`Z+}9EArF@)Kso|mAEG-M#w6cN(yuH?&ChR`lq9WugUQYAYkgoSu>A8VR~jk6 zsX^|_a>_tQO87FuuUtgB;IVfk$J&f?LF&=52tB=v5tdgO;y5qP%R{75?_3@c_0B^+ z&O4Ud=ICe}R0X1ikRrXPG*Uu0{WFyBV+2)4;pE!`tFx*5B5RP{aJIP_=7tNT1m^)c zF2h+~L{gCwZo~*{F2XI!TKK10AEZPrhi6eQi9$j1c-%O?C4vBR*U6=p)=^O6syrNI z)}2<)s+wZ1FjQ6z5c56=0jN7hZYcNMPqgqV! zRjsYIm1s=19y8TVfKZ#7;85TBSS+|79 z3A-8-$z1%ls8cnC2NAI@ZMo_1@J(KX6wxU-HtP}^*QL{W119QS`ZCHLL`zFHkc~Pw z$1)>qEQ`n!;7l}^;Qe5fHczA_Td#)pcAx%eVZ;R3DW~kwetW z!)4XmhZ`{|p&z@3;E^pTQ6f*{eu4KA1dIo?)Cn&G;aX7eN$`#0i7%73sk*O(e_~Dz z)>wx;t<-31YEbjf!s>&>FZsl^GAVKMT9KZK)BHrg#%eQ-Vg7GV5Z~*Cg6+IUA?OX_ zX8Ns6j%~$5<~ha%UXSP|*vcL}03)oP72C>r93=!CY0Evat-Rs4vKP}HG)KGWnx9D* z0u$O99Mj_cf)wVuN^d4K7$RHZ%`n`AdU_&(cax9ErIyxt$iY;-g*e8N;On#i z2}-c$b@2>!WZqSic_1X9H8p9}sq=!2jx~&a;`@%cAwel-O;`Gs1gDZ)a9oHb!3o^3 z9}>Jnijc^KJb|FaaUr)?fm*qc-84m+iRNm){j`cA`^cD3M!eeJsZ zC(tI+t{w=q9PK)mvK)Fm$qN|mO5$6-cKrxhTekQdyh2q|L=C!M7~rQ7BhD{M793ewgRqNiIPoB?J?hXAQUAZJ7SM%_$od~T>OX;_+`x2J_Z1nb@jMwFBIH`Dx}A+9Z$R@zqLHd1Lo;F6$)s?JMyl~UfsxupM?sXi3?-v9Bejli`6IPL zF7-&=3L~xhuj?rPgHfvbz?88+q$PD6fA|@s?Zzv5@E&4^Cpm)sSVb6rcH=>)`AD+vkzCY~65out6H@+kTJXNN&_@;< zgEhd{QdpLFfX{nBSl1fhw&bdvhO^D+r*Ol*Dfayyw221zR76ycDf0UQ)BxW@25$zq zhW4pdCIXWCMlSULKMDs>om_S8_p}rg*=_}L;G9Bg{&TRksu^T(4(?R6rcW%C(ZJLN zxGM112saxdU(c?v%5+~Epf6P|V=y`yQdyBa4Lb>9G%`d!#y+T(rqdYD!9J@N(Ievy zA0HZ0!MM)CkU_%t8Vr%o+oM*QUIg8ZvZ|aIj2jH8U~IsYr~Ahm3HhC8hr2&+tE zG$o7L)6WTR z?ZomX1?|bxiI}L@he8_K>sQxe z%7=ZqjNS%`x3espJZV|d#4-5)@%P+-=U=vhC~0-pw@4;uEu$Abs6(w^d?in_sLOZX zf)Cq_&6k&+tCJ*eSYAjqy{6C)U1vIoBrgs&ctN=mtL5sZsY|g*jO}A78%k8FTPtTn zlX|kxrh^d7eKC;LIMDmXin%;+$mcln?fNkgmNe~YjMQ=s=RAeuFU7GL^hy%re>j5W zo&GskB7FU+>H`#DI92b~)9XX=l=JVn34kAJy&yNKBw;mP+Lp{0L#wwPc}5 zA6USx`3OeVGV1hW6cmcuw5RJR&CSRW@fJN940r1rifw5#UcSs;*L3)@cli$3pzV6> z%}a&SVlLJkUNVcs#Z9&23J!;#X=1|1;A;-wmEUMH>b)Vq(IKqwmqu0PV|{a!%sN?pY%Tb_ z>vOdHW>foc1qOZ6Q%R}$&u}y(2AGM)in5+UAFhnshXRU5SsAzw%Tbq@JSVmfuRg3O zar+q>YNZKNi|B#|(mV9Ad?hS=@m> z=lLC|l@8Q|iezsY``sCO*H`cH^GrlrkbXN^($3wzopuggud&p7{qNTM{2A`|pubyh zrqo*xDH;jy^KyQcq^&n zFejdX`2@taWh{c9&V|UsNDC)l+lzkjy#-tTJRnbuJQ!2&gNsJzuv%@ zdgrk#On%?Tdy{`LULU{SaWVD&!7)Yh+kU;ird|`j-u9=1{Jk}k`+2@!?_yJL1I^Tt z##_I|`ZAfq^GN2GPe5Lgo@p2s@7p z@BfsFa9fV;(W|;rl)>IaoU79vow=rdo`lSi}2>85%KMQHYN`Q)M<>coh6p zhb{O#FkPFsqSdbM)t)q*9?{xWnB75H^WQo%&=N_y|M|>7>GzgBJdcY6X9fnt^Tkh1 z1kMcH^9Bgxof+6kH)`rQ8rHb7tTjO52c6v$x$oMbgL}Bb|T>oEdn0E{*b`&J6sQ zW3IqRtt^p|>iQmWbcpQXwNHS^5vVm%^(8t_G*UZgLFLr7Myh;k@TN6VuhIsR$sX>a z06tN2>6*s^BlSAWF{D;NCsGt=l1o$hmOoPYa;Zlub0J15=FGq~?120;0~r{$1ZM_v zF)hR&yug`(ui#MQo*AeIzTFlMdS+ndGV~?RnSoso{G~GkZDGejw1b`*h;tq&l6l`$ zQKxq(Y7Cwk$m5&5g1GE;H7pVL%)s$slqb@X=g3B#o0j?&@KV=5Gq4(=bV4oZUJ}rf z!nLY9u;(`9HH^BA;ak3WUoDq9^>h&~rM8F&gyv7q1&erBK%ylSvU!ZQOi zpNu6w_?dxM;R=GyAN0(?V%i1{IKPzgwb@c zm4s&o-uoOa1h>Qfd|324mGk%s+$5q_grhib4)sc?r&}=bvO|i0W?(72fm_#dlh2X| zcMQOCllk1RKcTJSK}m3CpxdJX1Gt~}(lE`U`@fT!88>+o-}2q$5V_QDvdP;pfWVo7 zJJwSJfr;ilY?@H(Yk8VTAVGa*pmu7IQOnYw^nFL%UERB03D&hFcn!IQL=rrY8}=ob zM~aYWqS=SUhLeTQ(wk@CaS?o}CBc*VmM=j*Ph`%heDo$H`1j5Xgpq%Ve`eqSk2;N` zPE#U(k{|cXz_8gcig;%R{)HQV^~}IEbAy(pLlpbWz~)s6O2&=t^Gc56{2A#ZzU6Dz z61mj&c{991@XWyI=gr8+oEaFvHxG4Y;IRr~CSc-YiewPJgGMJD1ReFKQ)bW+q%{Z& z$%YdT!s9On>so_w7n!Zour&zRaKpZd&%*SQa1f4)1P0+cs4@2nCcZ%e9KD@$A>Z-` zp_^RlL8yY2#5*&voN__oK-ACiptCa5CKWg{@cgqhjE9Q)cYjQs4vf^tVHv4j6qFw- z>c0`n=nB*tsU|N|3kla}KOhQl>sllADVac`k(y72X3S+DMdyjkWmHjMq!uEk;$C5- zP9>u>)7ebE<&V@ua;Zn^Tngv|X9mv3DE-wl0}q4NjWqP&*=GjUU=;x>!%K%>^Q`*7L3_S24T5!g}8sLr; zmL(qGCeH`!S_AwenU~XWw(*DS$%xGWr<0y08sI`K3!K@O&dYGGAe(=Y!J7eok8k+{ zJXbFD0ACFU5I8gNDvzz+??yYTO!wte*iM`?1HVH% z9E{O%Fg6$>A7h18rZH|NjMy^+j~P-4`cAVjWRURv2t(xacC}Te7eW0{7U#^snTAv_ zo`byyW!1zG`F#9_m8_=Ie7p#E5a-OmCx%oo&WVHZiXrkbrdwqi<2=HMJu`5lAr*`- z5P|6a=?S2ZA@VWKu*x*X*D$9zX9f;4q=GRa4#u{n23O1eW2;PK+(8(zX9k`(q=L~j z4#wSv$me6URi-hT10#55;5ty}dIumWP|xz~Ej9Jt_UnaX>fJ`dD@v|yr}1|3FUGsmuXk}wy^CgZ zJ+yop#q|0yY-Oa~%hO&K{&>eBFm$&v z8cp@LGJZf{rM5EmKI)g!`a+d{ZA#xarRAn{t|@)Xls<-1e=Ea(>k#h*^lI<#E67d; z?HOR&*^5n2{Zygz%qs59fY*(?73gewyp?NHW#~-{tlj8=Qv7pXF*xw;#CYYmCBAnT zqVYrbWhVy3P#vl-

EA@%|dtIi@9R^T*-6IY!w~9SYBT*22?I-eVs}MuzsAyFq&J z4Y?J(@(sMyhtB)tO{z#5KN!!%Zwjd+J-5DvF{kHx$XVe%C`e3h^*)}ZLNj_rH=RVl z=?z7HDWZ5>S5&{POM1VkN}k1d;gkn${PT9!95y#x243`4LD7Ya&$lj%E;TL6K)Ss_84ZzVhojr>Vfo3|kxIVdPn) z4P7wlL7WSrQ}`!`GkVby0Uwp0#?OepK>0^?ryZ=G-cNHj$mi(7-j7gslBl%>q=~Hj zGmP@Do5^{l;LGv;sq5FF`r#G+tDNO!Brp=r=}%>5VKaD>N!4q#M_rUNL#nd!o;ev| z$uutKE-j>foW}jYu+v+1cslCOok-1ZCZ`QR1^cvke(+vlLa{x*r|#XYbc1KvIn?qn zzQmC%vSR&`pL6e_%Bqhkx-Yc6L5(xWAVfTmJx6VkZ?MGk77|oQyNy68)1OB~1!XM# z@uwQ8(2INcbt_sjx5Fj+sE_iLhj;!&@UkH897}KZ-4@Nw%)2AKpK-rx5SJV2)xa;t zbd&Z|DaNUrK!9gIOi1e0$Kb~w-V^CY!r`6Ac~vF4wXACBh!WjK*tQbgRoo|gMDKtF z17|2iz+>KsD74%!<>uo_+55dtg5-k4t1Y;)=tn{h6M?l}+$Ia@Ktmf$) zo-OjGlyFqF$?xDjA6@iYaN0eMd*(>fAw2aQqK4#*r`|c5+77(`2AVA%NIYjRIIwa! zFenq$Av_@FotSx(%93)=M;Aeb=V=iJ;gi4%>9fxKMG+R}?q{@DEhVC-p=#zEwMkFJ z8gwCEgGwz%LiFxg{HE7W<(8uE88=a%X~OH(9tSZfUz+#*RIObQL{1D9Iww^saUJq16p$I^QrV8Zw@lw%kFGT*!HWN z5D_SCyb2zOKAIfK+DHOi@-74z8MCo}(eN@HzFmgLA(Olm#9riD3ceiQq<{Fk{^5NT zE7Zk!>S_%j>0&gASLU2qUnJ70x;?(W%m;Y4dbIxUAFkHq8+q>CM1aQu(0hj_Iho*k z57FN?rkNp>hyyvp%!)C3ob*oVNfCLH4)2rGBS*zwSAqE*p&9=0F5FkN*AOlHOl~Qg zFEM?~*DTOK_S~CiQw^+WP(VSA=woXl?w(b7OH*C)4TUEtnc#oT<`a}u*T;R-a=&J? z3IYSiAr)1`jZk`1Oh68qqJ&BkRwq?L>}?7yudRnBC@|46K^p4^#RU8Y!F1GvszA?# zk?NP^r83F)DN#*z(gT3`1z%w>(jz_WFj~w_`gQbZ;qgT2O{W?Fe`F$Gd6-A z2d1DwRs{V6d3Y)bM$kLOH$!b)C-cw@RQSsgbTMT%F-XQQKSV))z%QSLHdSO86UWgq zj83hw9jq%LLeVuNZ>3;5)`muj7V;1v|BRr?Z{6sp>-@gO0*;16x31gZXx?#?1Hh6M z$ND3jxW<#bXn;N(^7Uu2R^;~b$QwnRqFk&(CY+VaB(^sH!}d@@e4NE+zq4&>tFsPZ zilMjOsdvap7+W>HlNRy`NoXKy#ZWUi)3RggCb~_j6cbT+KZKa}|0W zqq*y0_Q{YQ`Us4BuadS<~6U^t)cL-TXtF z*UbDZ@k=%;FNA+9h2xxK&#p2zA=FC$2$8J+TB#Fx(xYfi;=paRIYH6A`|YGBNl>xh zPMS87iy>E<3MfjEHPBPnQ90JFG;LWQ2a5R$&81I+Fz^+cH)ycn;?=j4DsIJPyl*Gn ze+|(A)c$tTEvPID+J*MFlZvCtxEb-_MA06V`Nv&>1lYiygNhJ|XSSKk zVMFTceuZWn(qgJpPy+wkNu!~W-f&(BzVGd)wUVTH>%v!iGm)f&**M=$fgFwggqG{JWZ>ZW@8c2nw3yti+6V8sl+efv^c9`V{E>fKNAE1M_YSx9-GI9{H7Y>X6J-|k2;MAn`_l5=P!98Z$VNPQ3? z$WwC_DpSyXG)k2rY@rkeTOjU6y_<<|sz5Ih*Q@r+$syUYT{z#DZDfX#?UYe0+s`0k z?{oB?>QJL=I1C^w=gSYs5h&RNSt9Y@Y}WjjTA#d z*#xrn1JDD|=HMK!;snY!#q;H4#x*oN2;}t{cM~nEMAu2s9`xv-Ut-KdjG?iO{h(<= zP|>2ZY*QWk@;;|yJR##DOFK(0SEg6|Qd9F5%QfWBFbx~=WKExN#BBc9C-8pohLzm^l1sFPKzTSA$P%7k!1THe_O?`o`$!C>{ za&lf~F87@jhawfsF^Rt3Nb^(7*BiGY=LOxOhv&+aqv$b*ig4*fw(51wZ=ex&*=!x` zSe`-?-i4&H;rG3*oHX4;-QAA)*t-W_7gCJ#^+rDyh_UNxMBzFlbEHn=cQ7m8MlL#@ zmH$dC#aQ{Rmb9; zlhYWU0DakBO)gW(_BvAbgUhxP$Q{|9rs_bpZ^KHxe^X#)Yq9bGl=D+Q-C0S>7UW_i z!IISEeHEHXxPCw!p*AjMF?RsO+5*&O7ZlYabG?8RN?v)*(FgqM3Hsh9s0h1a2~23p z*yx>4K7?i;^~!BMm+Ahh>FO>RgWtb26Dc~Y=#y@;`(;tlL>To#7+JrK475R60Pgkc zJUrAbxxSl*&Md5TpAyZlCU=0|#+@~#*&E2&IGL{;RnkFazUpHX{md87mn$cuqlH+H z;cv-AK4=x8gIvLwEpG|k#N*aWa26AM6w>-yq)k6l%!hD7-3y2`_5Mb@Q^Iez!}M4v zet@UPb0~44+`@CD`n*GhHX+3`*dgQeMkHr4Xo}SY_$8UQyjnrTT3(5&m{I8l9KvyR(N#zg?}MbFrm&b+Q2YVhH{qT1-QoJKX*+UUn!bCX zzDtRHt~(#;?W#8|&gz3RayP0Qr%-@Et61=bF`$(n`mk&;5m|i}sg#e<(GI8wr)bo5 z<(xw7))ua$o^lM4N2%j9D*rb)$ZiLOgaPXC1j2Vf^?y@LAkJkn%UVb+8Q3!NH` zq)#6TM<&+Zn~WixHA_Ss^~PPq*Q5aWM^fd8=8Hluo+G`&Cl#dlC@cd}n}V-W{GFj& z`BJ%ATJO_fu{cUG)^#zkq~KbF81{!RLmSbo8SJlkprhWCT%SBDIuP|<0~@l&hwNx{ z0md86Kanaku2GCSrW*s%j6|4@%!hDu)Xog^rQ|t&Q!-vHxCmeA7_<*$&U$Rj8TBpJWkYtN9J|OTvTipUIa&mzELL+Vs z(ilKvjJ~rc*?Xza4ipG6xD@Yx2(wmbD&fEHRnpuoE3X~cL@Y*iTDGYTmt=1&#cX-I zCRF>DApv1gg?XtEkf%K z!cI(t=xq$b(kU7tuzE`Vs!nUz743X@G&-xOi{vgS`=w8-WMG-&tfKT>sH1;W{D^gt zbPtmjXPRo1XtzDaeL^vzuAQh=6%$A$lowJU-1a?bzLF;rqkL}(f1C`42gm2nIcMIoEW#yXNKOlc{_ z6pb+q>xgxfb>vE{V_ca;zhT25nk5?k;h7!BXdSbT^?$!V-}`x<-^^rpU;n>;^E}_< z^Zgv}&*%I39uKC&8Vxeh2_rd9s%)iB7hDVx%qx0y_RL3fO1QX)Dw(L)AAu@d)|9{Q zsbr<1C5`t$pOJgE6Yjsjh^69jC@O?B$aD~1O?}d_TC3aJBN(Y83awy$Adomd0jI6i z8cHpiu4|mB8pnOigmpzX+Q>v>1lVY!pxfB!^H7&8tu8yC^+5+)jqg+@DxAQ;FR@c$0K}!N zO5{~?gf=7D^>Z_(R^U>w^(gyJg?UE`KVT?@eOlP5u#h;BvJ^9dNxuL;0%XW$Eq7x8 zN-Y7I-IU7}gSqu2-uRV|uA1j1>;*LT2$2*|%z9RU*TD>B>}b*+JlY5P2`Do;##D>m zj7xOJc1e*CIF1un^E5%Bn}@V!(NiRw0DIGhG*RJp>lVGWObvXCz8BG6!lD--Y<3pi zYdra#V$nlRiCgsN(};XD#~01$-(>UKC)Q! zTF+2hwCI~JcHPG<`oTZpow8^*UsN8w$E|p1v_l z)P&K6Z5&Y4RI;5g4Xss6e2(vJGxb|q)ZD4@gCO?Gz5n}OIr`o&bno`3m{v7akvLXG zVNXOkzAqwkp#yeTO`S`>Ab!MTSS$2#W#k0qSqq}&lV{wpqu1xwMkUOQ3BRF3a_vxV z8L3mrrWNo`(9sChAudErGQq-6VyaQW$IwM(;>&4@KO70ZsYD)c3m%8Fh=fkC6LgLK z!f6BVGeL(nS^AcU9vQ~k2GpyevLTRZ#ZdLgYx2C5lJcyoExi| z(GTzAP)rIzuTpJNM!66vLNiL;C)lJR5GoVZGR?vqBEPpMA37e6gW%83;2gJN(6*zI znWz{x68}Ui7?Ox-=P>MK)Yy!+a8D=pgYAj(rk|K5UJI!6e6Eu%ygC}1%2NW+WX#Vn zeO23@f*1zCxQ!I4=>E>pAP3)4tJ)0RH5{sT=StlN;80iyaY1QwK?#*k{q9_{#{TY7 zQfq&2I<v8_#J<<(}_=_J3YPgfX@Ew|*h8yu8_Wum%&QafcsuiMH4$C5U4Cj)e+25TEXJPcku0-&|=1=&x zVW!FD;|M2JpGFycTGQfE@s-Y{Dw4QuD{I;x=N5m8}KP?QTu$1+>iO z9py$Hyn}vPpXz-_**+s$L+UceI^+@%ryJau$f_|jjy9p3!w`8EW{#$w5w31>9YQ%H znSe&{Wf6UaF1K>NE9asq#4h3IV3i0L6<5rkO=U-Na%9B<8V-M&JLZ;vZO97B@(KM4 z&KJSU<7QwfR57}MHvub$lMA(6QB>1Tw!ota|7-G{k)#v3b91WN8Kt`5{I&EYV7#PD z4n|7B@4dZ@O7QL=AmoBy?MKD9h1^jo`B=K2Omy9FG%=)TLEbIqM*KLM&3dr_gN$~8 z#2I!_0n2R)cl7mmT0yjmE&k2@h40O9oRUS^Gf!MAv});8at>5)qL=feSmJ4Voss2Mf@w) z&ZFUoE^M?YQIg2~<=Mq$vP_b&T9q5tcL*-P+@6jb9%X#7N{m7aW($Qu!HcC}4_(Hn z$Mu;1mqxhI`X3_gvK1rCK93%rl+BSK9jh9xs1!wWDURlgNJr3Pt|pC=Q0j3AxL2$J zV{6Gd2Nkx&6v|ynaC0U6=wiyMV$g-f5M#x#b4S3BUdD->?WutL{Rs=4uHpn%d{k8& zqADK2ibX+1s4VhVqOXPsY3pbvaBcHaY6cb&lw&`gtrF0n$X#mgx^eu8rP1 z-N(IRhkKg>B6&}G$qqgw9Zp10h7|`Od`}zDWlaEpD3bbail#`LX0C3b93ij+sJ+C6 zGW~^mz?9WdKS}N~n8L6}Pf>3&(f6V#`s&ti6LzIKPPp zdbMIdKymc;S@^s3<#8vPa{|XPLKvNu;~b`CZ;E$01_GvcVaTPt%aqf6E_>ePejp9J z3)VKVw1=Vk-i7yLx52xtyDVhzJz}b@dzTry92l}Uw7*T>rG_bhgm+ngHJ&=}(xpgw zmm?`b0e|ORRo0FYeJGD4X^zjJcI>8GS1C<4suKn+j5krAoq>7Gbg4Z>&lZsc-^_HE zNv#g7^&fZxzppzL5Rq%r3iorm;qh#!W#|AyR>>~@<2BMk4oP@yK$@!%WU{S1qKgw( zP-QWcPb*D$P~}qq9zWvzOKqJ`^GUq-#1?9M z)J$w6HQzC_n~fTXu@G(6_$*W_!!=Zrz5-`@;s!BGT1nol9iv-?A?etjA5akYByMdp z%;7Lc-tzBD#64$dXf@nP^^0uUq=P=35T( zo_@6T)Ic|D0yp~OeV|a@Iyr|QX1RtihnbfD0bIHHefBnKiit+oAvj8Fbfh$zqC;o_ z1Gz&YM?xt$l8LU>fuzjLq0lrN-ZmNHv7F57SjzH;`KmDcN-IFiB?nn4I@=hn zS}&XVbLq^V^YgA+3qkumK;>ULUu{(Z9%f0Yp zb3WO|JVDH?`q~&j`rwrshnhcAaM1Rt)xLSC`7>sU<2tP8HbwpXK=sQuGo7Z8Cc zq`2qP5GZoZA~uVyG5&H(3DtAR+E@2N(IG|Et%3q3B*IKRJnhVRKDeTw&WEW00ny-=02*sbdMO9rwDB8HaYjCwZk zeTj=Rq{UC|1H>84N7^{!Ll|*1{SW9aem@56Qe&7TZv{R>xe+PKt>SH%eNwBgr9+w-=Z_;LXea4W#@Aw*`>Uvrj7FGhoWtf@+HIA z4RaN~l<#3o^6Mk$_Y-@%$Hoy{)l;%Dtq`9$jT?Gxq-z#4152cD` z&=1T-))CLa>JI@JyuLx-$?^tl1gGZlCu(P+H}^JsPS8Lms^okONFYmiYQN5?vZj$` z4Q?yiC?I;{02b~&2!-uof6?G6yTJ^fLkbs-M90$l)n>$TYQ?_4oXF?(4Zh_itb>~} zB+CSSkY@iR)3@X*-@+3thb)$3Q9KrbVCG}FovLa3PvSr%I8iYQ3=;eE7@?b||HYn= zo`5ER_!2O6KHGg5sW1H9`ks(~oxy^1D3#hXjzQR&*b}nm6bQub@_Ga6#@-UUyaoZz z!(ComF#na@6EYOKjMQA(F0WI@phjX($aeD7?FqS5NQmw7x*0S7EUj+hx7yM!uPZQo z(|vqb!_B|PyZD|ET2*Pa*)=tL6yDt;AGh}d;juytCO)g z82mW;>_@c{6B8}_iaAV`yX>$(fJTSV#y;{zuNArhJ2KHyDWBMnur-1YfSwA4yH`DLNJJJdRZQO{E1_#a4m4CQ-F2 zyb)wN8gL4=9%3S%Jj5rSbZd!wO1e|`!A~6*y^4`x`$yENk*E+UI9I zQFIOyC4ke8GFb&dSPw$13q|4f)fVNVW^Cn0vjOIGf6(pz8^Vw8m>3HK4H@HY!y|LV z6zo<#-sM^ScC+;GOB;=C<8@Dc)UVgEc7US3099b(Of)# zh_VspP4v)PRNfcSrlBk8yD=!Ob!vrm*#*vu^uBn?hb-TB7ZOJ_P>2AUb~NuEB!Gdz z1}qTYu8uz40}uJ6<$;CnHwKF(-gg2ht?H;`mXXhz(pLSa@aVL zE;j`m>j2-*_+qL}CDPg3*?mnpmpO0{#Z3=7a=Un6bW{cg=&)}!Kt$~=uA#=v6UWDa z*7DC+#VJ66r?P@~w`BhILT`k@GIsN_vXfxJi(tVEV8QeKfirI*OLVv&TWMbvd^P@` z>qoeMjbM~|Bjcg)Gc@(!M#iK0P&wdxWJvUU^Qls_*tO9tM1a8G_!^kKvM-PIQQBN< z65gNJj;f;LnY0@jcbtax!}yJiJ%tWhJLr(9?2qF}MD`mQCm*f`k+_j@3|~krAiOzP z{#*w3K`Q2MhetSzH#Ej?WSouRYxc|?#I|DWnJARG8yV|QMj>;vU`sbLel87z4s3mC z$Z{j&7CvRT$0i4b=Yv#?MZDMl!Am7>Wc-M(FHYt&2;N${k#Pik6NhOSXvN!~FS3fa zmp_R3jg0RCUNq~D5So06_7Q^Ijf{(!SulKS=|;v!!Eht3)EgOz_qIU%M#jf3LMR`P zfUA@~$QGd4;<%MqXsv$M`$irLY4bUU`vfKZpy0H@q%7Q9TZlFlE-Abzgw@x;>Z@V( zRopinxBT!%#>b9ClZkTj9 z#U2079(OE003>GduE)q!A{KYNu#=3nwYXy|OlJcB>C+bxsRwG?3WM@%_wxe*FZNPy zc?UpP7;n*|NPDXUik)9xh3QVxFC*?QfRg#t#JTkWR3u9*(zIQ6Zheb=7l~Np>$BU$ ztsdL7#;y0T`Xj9eQ<&P`KB)be{^m|vMR{VkwaNfsEGu5@!6e@;$|03>B5C{g z5D|wt1Krd1^nvhPH&v_tQpU(A5fN@#z8UHzGSjqS@a`l@Ts8_q6-;C@0Ewp-$G?6E zD>{6VsNh4ufYC?Iey&itxnU100Sz46jTe@vKnZkW{;Qw#O}pzs&;z4)Erv4nE--y; zZxUz+dbj+Ua)Idy-BC^nnuwmpL!oO&(&gIgCVw&{g9agD#SXPMqc4G8ZE5RXmUC_0 zEPuw@T84E2BY{*`lb%?aY_XpsDF(x8j8BjIbDbvcG}I7-l9{Lhn%~k5x@DYZ>Z&iW ztc*jdE|x#R5Vao=j1jH4g;OmkS9pW&oOa;KOg3ZjlxHCE#R+I<`7=d%dTt?Pco~ZVQBfNX1YyQz zm^0a!BJK;b?%AK`MEJu z5&649LN6NfEqZHu+d|S;TN;w6`-3qEsGwd=F)2RU>j6czAW0)oXPb#fVae{SCQ+qD zb)8fiZzW5rG~T8?$tQ+)HjVc_e0OxO_>bk$FaZU$gZ(8F*+_5-U1_Ms?zg*ot3f*> zOxCeN`u)jj_FqdEF`6!-gvKN5K-KK*T}-^ag`X7=jS9k1BNIIhpQw~sC>w{KgH@l6 zcLU>P)u$f=k#N=LHjE>B1t|=)5KW-NQL8?us#W*!9_AKuCFP@gK+sKEQ9B;Wj*^Q$ z4`WY4OsU8(dZtYD2jWASayjDAaAR5VH?iEL9`vxvthcr73T}JGy+|@avcvdG zq+nXj(odK2MC;(x;9H%Y?InC4$RIEzv+MNF0cXZ5c*RsCoUPq3+;WDktC#_tp<_qk z;D$tthK_f=_fPW9#zj7dKu-@eSx<)FNp)9gkW!N6vSKMS#+Y|Gh-a&S8!(guimIpf zhw$Z!`SA#D4tideK`X}M6!~5{y86%bc?ng zM6xo`T{|ELalh#W53WV51hm=Q^qNQ=p{-%JZp;0b5Hu9MT1u+^H)jrC&X@2oZc&{* zc{ENii!MlUr4PE5kjYdBXuKJ9BT?!s5qKvfI+|WXSNfME+d?1qqTo!*F3DJqP-GoA zl6RFONev4Go`Q+W=f88BX}p3qm1_W8o<~v$WpzvTH*>$J)-(JAiRMVclE2UkeZlHq zYXi>;MMp^nX`*3neS0+CEQ0DTU|ta!hT!guI{7NWB0CR4iHf49G%IgB39MA70nDSl z`{+jye9_kUt=7uXF5-D08j+%+=xN=+ML;fm+2Co=RmlGC0;t~`OHqsA4Tjqf@IG;o)5dILOil)=F)jn(W`2;mr&?sSd zbi^e8_ArYlu{ZOestKLY`tmarZ`jvhmiq3?K#G}zT*v1$S$8J%Ek1gYk8}dJhXh|Z zz@h6@cDCD3EE4sBh6GzD)lRuOZoymZ((BJs zP!-27w_E9G3 z+)M;0-k&}L1_MMHTURz}6)HyMLUTP1@8&J79`X`4Xjg1nGzJ^SpW=;n6@zxfw}Sw4 zYq|sGUT0jFDQXtDCEE`us=J66TcJ^WPoh}PKByItW=x`Wo6GD`IoW()vXxY*=zb7e zHfMMnyYP&AyT(Q3MXQt>KIr$((x-#)DpadtKNl0{@LNv!_7Z}hAi-ae6%~V~Y#{_6 zMuK}vAgZcWBHe`Zy~DmkvAnC!!!h09gm3q=y}h8F@Q2z9%04T)cSU2C)P@UDw+h_J zX_$mxhOGWHI zPn&TDMrhzkWn^p2=s(TY##L1ge?zMBgkhijQ=E@Yp z&Z2r4t!ub{W74uxXt~Rbp)a4{2zcI|=E>F=>azMSWiOQ%t!!7@FK5jkI$-3D=L>xm z-A4s&9*Z{hi-Ez5{iQQCaNF7LWJS;!&$yF?hX<8^*(fKcbmq%B(>q^2^W|TE&Cj`v zmipXLP<&k8OU1u9A0w+6HV)OI-T+;co;7Q$^060T@jUYzD$d>Xts(!J;zh;kqI;@IHDHtxmM!HXRCVp;Iwb9L<~)ulOj@rHY`S73@y zx))~zFK%})?g(C#x)-a07n9wK4go-paWD1{UJQ0GE(l)i;9e{UUToxEycxXsQeziJ zzR^~`7+1L$!-5yjxfdnDi@V*6hk_Sn?!_m;iz)8KHe36h|K7bA9lRLsUR)Kt*x9{U z8ocQ0UVIt6_?JQAitanXmK~VA?_Qi3yjbR5Tphev^a~X>Xux>4 z6YTf*d{dEo!agKv?nK@tnwdKZ|KK@HNe8?nti)h2%P z2GKa3*QB&|m~CymYV9i3S|#sJ)vZ;cwfTwGhNral9}G#2x~pn!glcU8T3ZAcKyX@s z))pmNYZ{px=~K2f&MQV%4B8H@9rPmG8Cmwt$g-bCa>wqEo+KVjv{G`3(9<|;WW~-u z_eb>?RP~QB%fOC$e8SDEes;3@L8^M8l&a|dv2RF?yHz%v^!=~DRyG_j{~ay=JwpC_ zsQh=h{C6<^i%@pD-V=P#AH3Zq#f%MO*#bpTCK`!Jc&2D2(35;q5o7<7LrRQMcrjMqafTmVYZ3HgJvUNP=o@ z9Z&PkYkX6&>ECgR|Jmzl*%gB_ouG9U!*0hv(M6c;q2(EaNKXI8trN{(h(;PYZwPxm zk&iFnW8mQ803aLoiwyF;bjA+z#8`^;_F?(Pl$#C!HNlzr{j@PRHKF!vrb)c7o0N%UUE zSG)-*9O#AY`aTc^ksxrf6g-P8PHEJ?w7K)eNu~{BTgKT}2fk~hL061o8ruNH1M#0( zo2l0FuG$`iH+JD5dN#nXD+Ti9y0!yae=b334IWc*d@8q;9{QDR1*N0r?OxKgag-*z zsBw=(8*|tOpOVR|F(KDwN2I90IFd0!3RiUBBpBqGTEqeG`>?!W%638d{d9TudW!2c zDUEgQ(Z;)#jqNOv-G2OqQ^Q(_CMSC3yI+3I@E0Q1mAQFy^;O+G_X}&q{Z4ye1_x}# z=A~!VghX;)iwE$rWX5dK+oUL$-Rz?bmZWfquLGXd;zKSn0lcUPc17%lU7epRb~PRG zP_!KuRHA)BPI(R%Hs{uXBxy<(F-K2SzY8aFnJ8~AkGxh9D;oBs4$oY^h`F2vLof`Ba`JUGpFZaG`h-87-@dZ$_8=0zy4z?w=&`Ok_^u{M~{? zKA?!qQ$+S4kIw@@W}Q)%qj&7Y6ZaQW=S-cam7P#YLVoPEC3Y(OZ?Ruqz&~3 z>mdIO_1Ow$oh_p@9)d`H$LtPv3{sbyCDl_Jb)qhA^Ai|?Mdcvncx>_IU(BcE)x|Z~ z7EtzDaoLk9c9D3atajeq(-6ue9FMx+L@uLJKhej%-=Gi4265-wL!wgV9+Z50hUe=e zsW1A3aW%B5W+ZoHmSe?LU&tbMT-*Z}^$R5`@)=R~@8Yr_kUENvLcZEIumlYNUGE@~ z#T62WakxK98L9(R32LD>oR@S#O?&H0J_}yXN&i0t_Y|oW-N$2sX{o(ls4bdps+Hrq z73DuJhUY}*^T6qOZ0YHL(>7@BmHwgfg75LTLdAokpYO5oc&1u4CHV75*%qLAB16K8 z;1V!g9iI%JnEU_Rk|E#Oi zKY|Gc1xzA2xnQvh5S41>;LczmkN2(+{nq$as&-pLNw^%5H>Xkod<_}<%a_ztW6lk^AFf!r&r*0VDO|)ipb*rJy4Ry{Q=h^^ z6ot0W1&He)Qe05LUSWS5lNnsJ0zU*R7lRc-FcijJWj3A2qM0=kbygJJ+s>2%M5d7O zJrK__>UkA2S~C7Y#8$R-BRAK9>n)CH2sF_)c7Cj}Yjk56&MB}>eEJEueSzbe&F8w; zZ|89Ia&777xMSei!j?#F^pZv!2^=tJ_7-8uI@och> z|5S*=a!Ql)5f@xAbTQ3LG>ai|ItQPC0@*=dq9zx1RJZ76FzBCul|k=-nd9ah)fhFZ zem}~ncH*dppeapuF~$B2Hl{ht`QQN5I#$*C0f~25PFL3I5UaHX1Jju0od5MNSeUxs zZ~SLO=up@D+orB3|H~7tzVmcGxHAR7E0P*U>P}MBouH^I<81hJs;*Y!20HFQjXwun zLKItadb|X4EThMBPzlzq^tfU)iUpS59fn}oZ~7U7D!2g5CN;`{_7k|s+Eaajen(77 zv6!o=>Mc|WlrBQt8XbZ~JV4+iNl>J%w-cu6hb8#T=d^{_M&hA-MFjuCsVWyz6Up%K z2nf$3K`aIbA>Vw#uS|5<&Sc3l=ZFQqDt8I|UVlJsu=J_?o{*d?4Pe!-qA0QfUBR|& z6+T|J)nuZ4s&DyN`XT)(&)n!FAXsvf4A%LEwkz3cX#1%<=>ZA|{))L*lzCgOe>>6hJMk;b==w!!1rK9>i#OQTrx7CctGRYV$I z3s}B|_}2v@a7t?B)c0A!QFGo)DqW*oJ_yQ0^$m!B&~OwdX=@=VSXD+ep{9?Z_FB0=nkyih-rkH46&hsqxVLa=3WtV7)w6C->@Q{eE>c4II|K=mSPBXqf%H z*r6TqH=q5KntrtFSdOiR^lA-KHtcCX;+^X$9|mfA`T|O-d%A!gFX^lY+c^Ej0t`KxBk2vI-&(fuDKwDdi43^C7x5RiI??-8SG(?lmlB%_KFLGIm6amkgP8VyQiypmWd`YgQO34_ax!vxsTjz zxbPSGC0yu_5Lk%;8^crpcLyha$O%w2+vm+02Wby&fAOY6+vUyK3BMbz;De3xuF?@j z6*Nk$q&tunHtS1R59uL9U&^i3k+WUHA$8V6I-PvfLz1&`7}8tBV0nh`UFT2~6K?A>_m9@d(*~aFdBJBH!1B3++5AsxTHbgDDn7N3=tY5 z9`6m6%c{n-iCZ9GTr>>Kj}_WgufQYS+aVZ10|FoQfSy%8%)zaB#{gTUtMlQVipvh^ z)bKIeLdB8g6H$&SG)&hVNgFetp}y;Z?&O}~62^*UhvZ9!TA9okWbPDaSCgj4l{6tq z?5Cg@nGnH-MNtG9O%5%By{MbA{+grI=^0j(&R<$OfCB*UpG2^$o*{q51c+9rP1SGd za2e8vki#W68ad?4FOkE$h!KgJz%$Rv^5#5&fj2ye|9HmsOkZbgBH&4E7(`Tq2z{f{ zy^yO{<;r-KLApwXt8!sj<=A+YgS`hbo^Oz8WVa(+8+)RS=+!OoJo~x4qGqK9IOC?3 zgsv7s_=dPxrY6uz{&`BBn)!AM7^d_7o-bs`#!yW(rUEpzUTRl(3RBAZRMQSW1VLmn zrz@&3J5HRGEXQ1xdZ>EMV11^Tcd^K$O!NbSGW<=&!T;mADz*UJfp6Hy6f00jjMumy zRH|U`@?kEBRW$HqJcUWd_STdf17(MuaK6msfsFjm>!980i(jw-c~tp1AB}ob^cUMy zM<9Ymim0Odey}I;jBBHFzMxTnM4ejLgLKdZ#tvJ4OJVz8)-|ah37I7>Zo;^l85hgH zCbm|j+}kVUI7)_jkx6aML!7;nKbtytFZfuUhj;)xm*CBx{Sxl+NePu@W?4Y?zCxRg z&8Bwzq^)^~T|aZPlZN$P9%8rz0-Vt=(L8_tqhDH1L|h8XOC};1KgR{VL5YYtpL!(5 z6A@cu+K1L{B4Sqw9{fba&fo>aHlL2xP2j#(;XySyxZIy!qg47d`dlEfna4H66A`0< zASxzRn@d|uL@a?y+s4l4q}0nabkL1~w7NCPM8vv2sxQ1A(PXFW!}~#9bRy!Ce~P*c zz$`bqvRMzGnvW{?x!RXX<6$D=D*f6^M643GEm-~!h$5bd7;9?4QqcbQn2BWJ;O-#B z4mzHQ*v!#&5mZkHnv?dxsB|LYo(37!Uz#zhB^0w_SuuO>WyM4YE;m9o|) zv07KdHN{QmGiZ4%5pfYPwe5+BccE`~lxc~G8!=fiiHO=r#*{ay3jP?syHw;sUW?P^~vjfWUms$Y8;SPP)lVa2-;h2@k6)iI`q!oc+mSkk@7t~9vv z(-H0??;pp&E{-nF4>SgLSzHP_Ax1eu!lxY~)pVaAO?8^U>G5x$A)YWnET4UfzHUcqf zJpO$x#}EU1BN=J*_y^cR3m7;LF3%8V`gs{u+Kz!YqqN4r?J$dsp4u3lI}B8(hBkBs z>H=iLoahGZ0_JGcd-M}wPBnNGJx3+Xx8(2QQMS_Z^K)qu#5SB3N6_ zih7JkjM5|`zvFqL4pp(d+omO`r=Y{Ou{i2=){O!6K6*QedjEtGYEV2Hal63>q+<-~ z&8`=98G>U@(KXam5A_~)wJ(>(L)5F#uRYXzAJD2%Z!JV&Ii*qWL{med-p`Cs(ot`3 znpif}+tShXC&ZU}RD1Tus5I(5h$9P_{w&0yo9HQuS&d2}zkT7Oo#{09LM@GYC8|~# zYt4?;s$>EvW;$Q2N}%4KkYsE->U{uxv!hHyz44?@quzTT$(S}Gb)Rsis!{I@Neuas z-)Zma5r?SP4T4{_kO3X7}(ciOn`xvgboRz3GAKN?&%49s_QO+|dENA=nc7?sAr`*0Qtqq+mr+30b#YRg4@!N7fxcyXq4BWr04 zoT6&YWvv;pTGzmk;}{sdnZUrQ$g#H_16M=e>?qSP@cZpSoyNeo{~=>)PwGCBjGo#} zaw^IDOL#0q@$JX5$1 z1AklOcQJx3C%Rk79F2NU)(LY~fJf0XDxrsZZxtj(*YAzCqTT?H6#?oUAsJJDzS{@6 z(H2qfs{j0wGT&YJgW%F{N4?6ooU{_C*BgO`k9q_CFRm_ogBMBE`-w)TwdGA@X{;@0 zMLotNMrjhLcap59wLS8C9y)9ri=$qiZVafmu0{YLK)ogyp^p5H)19^*)6tET=T;onmSz)N2o5%z}FR z(ZsT$-nNdeS%@z+>fN{sCS8~bscM6 z8LM?OH_yZ(zxA&rP;V9vC$}B-K8C*8QKq3@^|qi+qu!cTGN!JiE@E<8M}Fr3_(Rm2 z{C7R#5cLMA6;inPOti6NBeHHjnhz&u2s8bxV&0|AsCS=4^@4i4W3f3}(F~d#>isVw zzl*R!-~>69tkM|x*gL|im%y>;1*)Wnfv;m{OI(nBUu`P}?&UEiz`*+i<$Vl14%65c zF|ZUX1Ie7<9qa!~47_HglU)J>cR--wW8hv$TZhWi54=cX;14u8je&nhmd09kRt#i3 zVpJ%Bf#W1s&~^;0f=1iM;uyGzZVYN&zdVV7U%(_a29A-eonYW$I5BE4@QSxYT@Jt` zIJ%a4>tWzxSG!ai4>9mM{o2F8H2_+TfgeK@mQxx7Czu)v1OLMaB|Y*RK!eMMfm=Dc zE(MHg44i^PxCR3s#)cpu`(H7gjjAYS4+94y@#0Kpf7a3%I8D_mXRRw@wQk_9kr)Pk z^-2N*FU4N$wqxLj&^J5EGz`2QhcOKX*1aiX>O|_+F*&UIDOL#SVbzB_we@3{<=8)CXaMtzc@;UJW|M zLbxR2BLqf}AC}X(i=R74X{9*j}MFj+mY< z`z<{+0JBU_ZubCIzw<$0GfsHe-GFwPCv*U2cfnv1iQNUMUyjoTbP{HPfUqOcw79Y_ z5K87f#k^y!V%7mjL*pejjzIr$oJBnyM3m4=`kh^ghS_Svjs2|L3-Br$E^c_yqKSQd zr+eK2a~5rPJs45x&4dH7`v`XTC8m>6A@!1^Xxb;b+JmHVZd`D$57MuRCVF(6VRRZz z4tYLoYztFbA;}W}n})}+F~)E4YuKV5&2b3dHMwUo=#h;&6@%uk6mmKHjjpG{3g-$7 z?EYt#9Y`is3_3xTEo0f!Rap_iS(wrUBB%n;+J>AA9>Z2ZBZIkqk=WLXTbZ%;z6TO& zjoD|R?(Aw}I5S@&Nq?ivQwTq?p+#)W+>F?oX0;cAmW(vKzmU^`GWv9*L`^T4arvtC zzPD>-F)2~X*Km1EwNCA9g|QzRf-o4YTje;!yoXue6~R-jKO>gWsx?W}#F0{EN-Nbm0fBDA-(sqDqb>vX{~ObP zP3{}pIMv!sk-H&6Q;!oX>`z9@RtD4~X(2A&B~jW2yRsT;59*Pd>C z&Tt*UDK@j6L@V&r^mbEP>Bca)$%Jl9cO8Bx`7(*Qx_P>>nIborpJi#%EeL zKEXL{sC_q<{Yr^ebmL$l(zj3EuckO0=*AGLw-&;tFmhWYrW-qcfp;6O8?T8(oNgTP z9lH>Zq@7PE6^+Dn<8D%`$(jaq&ZZl?aWkC~->+3k-Ppvvo&sypy74K)CaoI_xxcVt z*m%ruqeqMqYTbC9r$kiacT3|TZY57PtNB_tni#!$x-r|7R=TkxhMmxjlU#?( z5Y}sQ_a(X7C9Qu&$Zg>C?LWS_^pTt{EUR^6g(~|!%f7A3if*jjn<7r z0SvxwoMuWZ-N=PF61q|7I=mP20!{8ExHH7)#(T?!+}Ga*xu3wtE6zcpS~rejS*;rn zsa!u&Xz zYLrmx#!asFWNAFqjcNL|ryIQq^U9|b%k0&~8^xxy(v5~?DY~(f>#!_ib>ly{LBr_A zv&)3s$1sJBRv;v?64ko#Hp^<=xK@>2%CfhqvZ5Q8(Z2*TIO#{PB`1VWX?R))&oD@l zpf4G3yv{|54cConA`zz>htV_xy1xC&bn5HIQ65>nELln2c#rc61q=^8p45$}-RqtJ zacziW$Uv&Bm4?W3geP&dZw*Pd>iN%K^?(G^x9%+!tTVM;6A zcpV6nz>Q9>!*h|4(B$5Y+dqtM-1(xAd(#^r_dy1}MqFAqrm?KnjSE%TyIJ-!RaSK4 zC7M7WgDGEnEjitI6{$a?8+VxDq~OM5Ofoy&h~FXdI8?{XVdC$m>8!8JuO&yYrII$L z+nJ~av-oT^XV4E~6s&1acIN2q^?!gV%NsP(`&Vg1X6k0n#Y+14`*Djo=}*q%Eu}I) zQ@1kvcWTU7V(Ph*a7bdP3tw?VaxK09j6ezh4;sG4KzM3{xexqVPh*mA6d(RwC_r){N}P@4*+*O+;qGZ)qhd&8FT&f%J0j;E zJ5iXt^x;=%r#VNF`rQJ>56$h5)g(2+VT|BrsD|y0ltZpKZ7F#lrc^Rf36;nDKFm%k zj%YHUg}L#4+k&YrBIn5vbD{&X>QAm1_T#$;IFF1RQ~2G36CPz#CSR2=)OeaO8A4LD zayHe&HmDDC{zWqIJs=^7Clhf$F_A~^nt_aZoT$$t>sdbE*(1f0KuWw14@$7ZI*g#! zXvq=C!1A)`^3!qq!)NNloBM)BYR=pbN_Nq=TptTVXxzr#N_`RB>c}$i0{TPM7uvk* zB77S_nU~j^QDe}l61t);l+YCtWsv{w3qBp$T>}cnP0401`dEI-w?*9-d^%W{0KUpp z?JWeC=KCp0X`!S1Z)``g(X4nn+naN~7kQ=0A1yA0s|MdiBOhmMtMsB1ht;b|j3Jeh zT^`H+Zt`a-?`*b5e-`|7PO@-W(q0*TW-3#dj|>H_rYP*8|1Hq}_K|-<&^MU0JL`0M zj92%qlIHApg^Z3XRij>KmBUZWCYCXrG;-(+WouCmbx}5MZw9|_?LMIJwDsg};!8@J z_q#9z#b!eDT_i(4QXu&;`+;9|qZ(|DhdZJ0%?%$Qc_z7ZW^GQ%IAl=(3JQczQvHpWaBhOtT!v~f_R)gU0wFb4&I{4Dt zPHl11mk|FkLdSRyFNj+*QY?^OoM!`lndmmyfLl8NGa^3ac`p0(zxaOr(A8+i`+ofz zG2b?Pzy6PaGZi>h8oY%IPJF-qd1N8n_v?rMQ}veke*MmnW%&L2KXZa32M?hEvPLCL zoB>HAo4X?>fH>tVhaqcq{BVIJIbdaQ*^M(*olJDVIx@`^h^Nin#y^{F97{0aOCRC)kU{#JJ;+dlS1~9nfbkJw#sP+G z?YK`B?>P;2B+wd6Kr5F@+rc2j#4$+`=86Zp8a8EsDq9GE{Rl3)cn0%D)K5eg%V1Ag z$V~sTV?GQ(S7=>+>ej$vAl8%Jp2P#m7zi77z@H*Y`S#h~NMVGM40=J(UkDXJD2BNu zpv(`A%}rJOuHKMX6g!^2j~5-dFz)HE`AAKG%Q+3ixAyWIU#KLP{Yw<@@)e}Nub)|_ z=FG2ImO76Joj8s*gMYsOV8lv@h?AL_yYKf(ig6@H9EkaCXl9z2->>!BQ8Xo$>fR^g zN(EnOG92995-94$clo95IG384-Ti+xUAm-F4cbdgXS6rEzgea$-fc5^bM8Z9>Ryx~ z)L*$sO|JF0jl$CvKWgNw_`N6*Ml9l0_oCeM9Oj{5{X#4CmzMInW-h1R@tiJ-_w5+c z>U&YXW7MWU`}V-=_+6fR>e0PW1p!TN^HKaIbNm;G|C*Q`xmS+`I1>|7eRMhagujvJ zGIypP*?6dT@5#03#@tH;#m+=Cy$3SdJ=i%2odZJk-6>~%g-J-qH}O1{QVTQ`Rv>A( z;=bOUq(~ReY|goCC%lAA?X+fN&?%pqq#<-FEuKs7;Cei3l%w;FUe@S$FgV}Y4nwr8o$bM2lKISNrDTn=*xY$f7{yKI7}UpEYbmAA z5l(p1$T~)66ue4&>a6FfAE^HAr&;4@X6?r0a0`4uR&SG*x*g)85dK@T)QX09Y2{+| zEN<~rq@1Z|1%`Vv8sbPs@ezv3Eicnw!Baq^hy9>S?j-fxIW-HXC*6}U$b))Mt@RO% z=?racpU;;Lkki0itMrRb$7=gIkc{OO%>JcAxPo@$l`PL|hm%pKUZ207a?r+6pDe>O)4=y{Ad4er!vdNG+L~r3d{4^7I z!uh3GlJ_|8#~Zwn3JJFzq_kT(cox70aFW_h_hg|-UpJcb7o+W9x<^(JZv2#v^&E3O zU_RTJzxCdkT|J@#ao26WuZQ-k5$FM47mfhDs{4*n1HuPRRgA;GG7-lo?*v#P{)@7OuhLWTKU!scuTxl*wPM`_%Y!JZ#*}7_|P#-k%ppBcK5Gelu&y z&o9_*fatl=bOOIxI8yp(^oN2p2~qrZra__UzYOO^=(xZ26KNmy=uC4&x?U4#PV%Z+ zom_?=B;j*|ST5iSN+DKIMhlt6>i%dxlSTR7xTmWzQ@4C;Ij{~5N=6PPzWiG;IX>$^oz zB?0R!^ORMvQ4fET{PXO&2l1+mj<{JX%V&2xO*P0KTy<)+P@l!y1z96huLCm6puT@s z{~h}hY^f@fuicB}e95es1)8tdL`H#uHenl}%kIOsE)F4BcHv_N@T|?Mx7F+4Zdn&UH$d2{%!Ae62t&%vkd@S}OuLkA!v*jmx<#33~u8;0U{>C9^REM7G~N++MiJG34=TOw>2G;yKu>@j?VBz5T1MSygVmgyA{)5U_o};zdN*t8Kt7f9pxlrRKLM!OdmesfC7wWw= zIJ+L$B-_gIK?#aR?vKR3ndmxJD{XFHatPE%p3GEFI^wQi{p4)*WFygxkFZt$S?tlG zW?ZsBM)?y49qmHd(!_CXA^uSayZ0SWYIdI#`H7C$G+K)~DyoyohW`A65)5sk7?grae1xNHYyfHskMSxKUGg>? zWV%=WdH|-MIb3-*WrK=iOCOR8|hbcRYWnI-N@+YGSjMCjq_$OKjQr@sV zb7wBPJ%oja6xCnY&9wFnvH^TDZ8P&^EuJ(|jfEFL{I2Te?_6cy!N+X!MOdDfTF-9F zd@nT-n^UyXUj9WT#EdG7M52(<)cN2Mq{uI%j0(P6tq!Qba7WDpDJUkvU+Hyh9Uypqu^_H<)bPg;cAZ8Nw_a<-$>Y~4LCq#(-KAl zOr#7o>^L6qVZAq$hMLBE7%@VcRq7`~bQ)~R2B~zvn^I^K`nAq?^$Vcuwy5;8$rPT)(<0GjimVt-Rmv%zu3#=l@_K`$#H&3ys>#+a!npO@C_Un%&YS5Yv_3$1SP24&Q{(xs7%APY9;Hpk#> ziCAz=-SiDr6MxfF`G_X8MGLWjF%Z&H!cr--(a@4ASp5=M$D-4U%he}$pnD1#Vba~d zLWpPQk2hj|inp~84yt9MfilqnKq{<5Gt&L0jJs732V}s6NcQ~k zdup;hY=r6@XJ?9PfC-BJdr(;=jzK35VCkcUqofhmhM)VmSS|Z?G7adulcZy64eH9* z67X-1rnLN4lEF7}m2*BZOMZ4M?hEvKt$5jw_KQ(^V@B9pB|%sZETTd1XALzN zzp+H@v4>w*;xI<%SK^183gpeX7W`?LLneW{g`8I6W7s+J6$H4YlqN)K7b85Ak2wfm z%fu^F8!0CcBY6Ke4q6cfB^YHMpM!Z*bQRMGWy_--M33v(_fA;2j9$dj2;$Ar{@O`@ zC>~0Lxf+vS7rEgBN9ton?VvtXa6p(_Z8+dXp`$6T;K#}`jGG@5L~MKsx40vX?@0 zDefav!l8r0Ff-9t=5MoIi{*68x&DC!(qd3k>yuoIoJ~Z=F@O&KmSlzgtu})3x$b?U zGAUH&*>l}Yl158#wx2n>1qmgsJ5J6Vh$H+o5-y)W ztr*lv5q>ZUUx{>xYFrcEWoWw#3qkx2jXc>XO)wpY4I325FSk*OhjeNzOnJHBa(S8G zsIoZvs{YCFh>iTj1^L>8jVxlmF~}Qaqk2zL%?J?eK1R)YufadjR&lDy?j0KUPa*0d zdUI0DZ*s(kvsfahL-IrVs^HZ_`r5EeB(A%f97o6>|IWfz64lwSOJL^tL7!5dTQsN_iovRK2gom3^r_*Dgwqix z{?l;?_t_ckgpr*W@SkCa;GgJq*sm_?^j0ylS|S5BBjzALzde#Cje}8vFY~3MSfjlB z2I$?moxJ28X(6Bx0+*KqNqdkc55Iu16Xq6XrD-jh@s7;X)<(O-vh>1IK01gVdIAjv z3roNqm&zXu_fS(d^<^Ig5RxF@Qam5PSubRv`JLvH9$A9I09{Id56B+P#~dSa`UBXn z|0%)W(CN{&>772U*AZyg&KslCr6OhT`-ROwIJhqQCbAztwZ7RYcD{t~QbLsKA)7>Z zM&`MI=OtjaF5nUQQ(VAJ#svgEky_HhEu$Fxwp0<`MBsnJ!k~vqA{P*!ZHaYY1z1D* zs4M@gbt@n07>&vVsSTegr4Wi>j-!IB-O-QoD<~m7Kf{@2plZ3%RBDquI}u|=?nism zj5vp{XE*l~&CBSk0D59lHzBQK5G#z!g*yr#Ci*ckX2A=P*O(tuM6?M#%tVKR(JFpi zz&~BSZ4v+6w~PcZu0?JHVdVRKczGQkO7JVpXvbKyjB(Zy7SwST*WXa87IAqU9@jCE zj!(q$59~`&F&WBcqV~W(tST@@l-k0(QCO0}BY#KXgJQW%G5X)bh4r$L-w{B=Cc2=B z=*eZIVtF)*=ULRs#4%9*=*uPeyY%IWenh7ya5WO08xcP;Lp$C#{sGSp^dr!F9Yskka4OeM;)mKJX$F zT}<$lRVw@m-IP`iel(kjq{pAx=zNeWBBB97GCMMKR{trWvy&pnr}I3iYw7%&4mD0E z&+qxB+;$FDO0=M0A*Aekic7+aY#cmJ>RQSM%}JrG5K#TwC_97ke=5&birb=EWTR|1 zscR|wOKA#aoL~Gl%FYoHr15P1!>#eGztpvqwM(Py3uK;u8)e<3xirc;Da31mXYXA> zCRxhPls~y=+OvmGnBM}{nG;KG*Rwv*gLG+rCGkK@()>aKbW7Rm5|YO#>v8dKrR*y+ zwI>OIZN6-$G1>I&aj9!58<0lXop8K9&*Jm$J!j*+n(;ezAX)>1mNK1VM35vcot zb8Kx63f*`9vnZRq9YG%_n@S}7Lj&Rfj=4jMz#kOVH!k*iaS;`uZ{>{v1@0Z;R=PxJ z=DuEx16#_;loPGcwm{{6RBY{Q9=ci@)agCf9{QRBS8lTm94qQ0`YV$KBK$@?bQc=( zPW2r{MU-h`?9AAcFcZf&)k`vFJFp}xEd#OtBTUv}m^$dRIZBCzVtS5wqjo@V2{_~X zUQ0ow3ahq<68Q2q+pWg67D?4%bB@jdc7aad_X6_ctB2UMe^&n*&^@x^Ttq%9?pg_} z#8JN`+*QuWe{>`j7p9C&TLW)5-lV9 z<0j)>7NJhX>5<;nrJ`pOCsXJ;yRZ-OAzdbraVEMD{dn)=@|QB(L;ki5#ULxmSYX^C zoDtPQacZGBH6XSI3eKb$V>~<;-M<+-M6gITL_NwxXNZWbt;&8T6>iUl4 z|K-Njj<%yBa56~<rzA?yV-=u4m7O!HV4;?M|0bj4kca-Ux_kEm2vIrtx7Qa{8N> z5W}axse>^>YFXzISx50A;L+QyTnj(pMsX^*)$%9`F3m>K&6U~Sth~U!(PqxTGED-4 zPvDAvXEa_=My*TuOL^;-n|57p#Yrx9u6t|Xxf(Z=80XdR7geI+@z2gP_$Oc%egJCd z&UF_~Q5*@*bqCTr&#h>-utE}^_`Z#{k&9M$85vXj5DtHHDi)S+3aT!rahUUUF-KXn1IJdrQh6ro_P~Pe8vXwtjhL# zs%e#2T_yA$(^1)zQF0JvMe)6FB9_1#A1Bd52la_+U9vzRL}Z@(k`k>wN!1E}=Asji zKP#^F`7;iGwQM<`!F8o`VGm6F(%D1PFX%^%Fll_jUkM+HjV7=lP4%Sylv6B57874C%W&oSS|#9&e)W z7T|C0FmrpITanm1sOCv*-Oawlu4EL}B4R&b@g*KZzK^=;63cnnyrD)@T9E{oF<)}l zgB+Wrg*H0}KdGG+atEVm`DB^t$?H7rSj@({I4NJFrF2r}V)W^6Hi6TOM-AE{r<`ly zBHq(0FhgC-#8@03MCZo^*xxT{DPD#kXUHT%>Z_+=-nRDpkR|nFqBFob9TS}mg`#MT zsKfY2#hgu4w8xP|2x6BY9^Jq@C%&`BhLQL8C5MwRIlv>Nzp&;gKq4XdDrDppgDLC5 zFn-P2gf9=l*hEBuZ^j6F2`qs!xP^CyLDOA<^>OQQIQHuTq;(=zJE-6n4UTSdRMH zl|Qj$t6bV5qI||Kuk*43!a3R6u*dR(C-`>p|gbz2is}A9P}4I40Ac4)&+P`3oRs zX9ODP4Fd(rQeq<3f6bP^IWuiSe1T3x3)UcrK_|`rUZh3oF{#RMYI{ljit?ijgzdNj z?Vs~kGvbtJHcLi*N|ojTs137W(=hYE!0=eIVeRC1kWn8fb*)`I33((k)%ggo2q7ke z82AQR|ASbBOJ`t4WcjX@Lh>`B7j8u2|1#08+)?1tmz^f#&$Qj$0f#Q6V|V%;yxZt3 z$JnwTi_4nel7J9o0FXjdoNl3oeUvZHw)-fTK-a2JcmWz%ge9~}ETzpyBMe37>ur*Q zWD2Oa2G^n>{5vX0N=UW_Z|N7%qy(FbWqro%5(RCt(Xv+WECtPJdx!U`P*Rc&$@MVFy3|Hw%TS>S8;c{=#N5Q4_evj&0Va9ei8!#mXlxtKX=xOR=qQ zwYCs8Rg<)-8aDVSW|!0=f5Wu$aIZj>>>I_e@Z0L^7~RAU5foYF157Il zscU_bz9JJ1kilAo-%0*tVj9*Dyv7OZH=|jc3c7PBXljbp3b4D?4dZvgzU?TD(6xL- zD=za37@-7_J^cb^Jgq44gUnAFB>`7cmDoh>1u^VLABc~x4v-+V%p6rFCCBB^$ZSK%wh~y+ zC>qKkKpLzzyhoWhe9f2N0lwZNb*;L+f?$G!77H{hmJ@+{0lr#yO0v^58M>N@o*@Ix zuDKQJ!noWA@C+;M*qYfx&3?RCLVTxWj7VrRx4-GP?4y_ljUo_5)-wj5VP>ZQ8zr&+ z%GHd%+0Phkd=V*BmVYG63f4>2q4Fn;E<9t2}wnY;eYant~#3v#`Vi2!P!7%-b0jq-58guo=pv{(vLzYWiiM_2kiV{e*0) zD_+*zK|4~T+4)wpOJ_Falyn!#$W%f+YR6|VtZ+`!igu{;*V6ZD$w*q|tC2r5fEbu2 z@lL_UN6}qp2#2zz2S)z^uQ8NPV5G)>6N#m2lZ*%o=4EI7Xwbw3>hup(FbHZzF*DIn zO!gg;_n^R_n~<$?T@?tEtnO!`%jC~ic8rq0at;o09Zr3gA-*$7zYJ z4pjb;gO#??f;@ViZcBhC3`x7APaPXkKL?{zm|ND}PWzsQW@NWhQMH|Oog%<2ripD( z)D!9QeK>Wj%?v`fV!=O>D(E@R#@MR6XiS$RqVc8+8D_AO3PPJy0}{*Pnf_PxaTw?K ztQ<4p;S6}f6&l)bGlm8xN!CG_H%&C0NNsqx)0xgkxC}qW6wJQycP4rjDGrf@PJ#Bl z+sq(kyhqTG#jYVQeD6S5hpwhZ=&sIX`yZ!or)d1mfibYi{caP?106^#g$FPmMrHJ7 zK%;%15O3g#uz@cnO|4v%YM>E7)lvf&g$-PDYMg?}katTB>=QO{qC|E+1-D)hHUNE0 z723y!_FxM4cL1=q9woQY5O@{jwgduSCy3Y@1)0g@x1P&$JJvwLg-d_(&JyS z7(7PYj%dc6^wJ>HzD8_;zRl#WR$ z>c$Ux^g0oR)!^~=nEaH39>`=R4|@ElJS3S0XQSx!4|;gq$$rkG0_l#1Ql>vJ#Bnq! zIp@(E1i@J{g{JsxS`Z{EKzQzsd7Nr&CphLYa=aZ?EzkrMO1iZOL`Mq%;E+Q!7zVDB zIl};4QQbWDA0G2a)!6GqIY3#Uv1N#MHdJGeIn~qHW~GXtSBZQ+ry)s=y+A-Np|O`J zt)Yv=Xr0D>LWcrOAV>X^GYOS#I0gk{DqAEdXI1ul24dy?g}`!OWgqcWwv(a16Dp^u z>|@eMOl8k84d}726z`HnWxpm>Evszm*`%w0iMk$GlvG)tJx1eNIh%CPDQL1SXOlhv z966=^(!gz?v|GDz`AS=R9h9>X33DoKsignKVa>zpN7P%A|7}p(@gmB&(jEs@hSEwj z+Gd=+{SThju7}z~->la58ZCdvakgC4Frl?C&ymszt$kSJ-#X5AEJ6L4(vD_0>u~lI zntPhkE)tgoOYw1b3sg=~+Md!#Oli+F4QQoZeL6eNqO{M_|Fo>MsW^K>D@vP!l1ZHn zaJK1WP|^mRt-{)gQ`sM_{?Hj3KwWp79hI-LA5vupqdrx(Rw5@+*>?fC(G)}$ZNk~D z(Teahz}YcaO4?AI{pU%Z#ug$l@ilhJDET|CvHzfDB_o4wSIFap#!eRWZe3%!A1AJ{ zcZ#-Kn0=JCo~E(iPnEtyjXedGQ#5w6G!oO;CrtxdV|SL0vuJFmV@ZaDMH>_u)H7OF z^-^JW!!>sJNocVR8oLqJm7K;Fr-JN0baRS2Ut@O%(b1Jm6+mOR7tbpi+j{|?FXf^@ zTOxzp)W{%j%7$y~;PIZuUV9$tau~bx2>Cm%vAZfTt+T^Zv#N=+q;x`K$9a)~CzC{A zP|Nx;jQwIF+q4?{OSZt^P~{y$jeQiAQ#7_p8i{Fa2h)Jo*o&m&EE;<}ea~;u*geZ_ zz5kKMUULFkY=g!g2Z(bTd$VL(=%am%oqe>W&ezyKLD!;Hf;!8ib7Wp38hgPVc;1!q zR9iIm&jfJ#tN>$Yj@fXHz4G^-#;%bmhtt@u#q1_A`Y~u^UMvF^wH#8qgZM40W?c1^3YR{05Dk z!MTd6_dn9u_m4-5ZP3`ev9HK!>^dT|sE@C)Z;Z6m`5K$K3YytV_9ZQko|Wi8H1h(c^bRhS!~v6?D!)2JC3o-)kGzZu|Ffc zN9}~hu9H?<*Vx+`Rm3!Q9%trGV{fIYr)g~cxzcy2u}7kE3dSBMjl?weF4KV4*gWYt z3&yUe@A(ZH``TPv?|-DReF$A`(AXyUIj6DvFf)+Q*q&}wzQ%S3(a{v~lgpz{h%R)# zVAJ_{F7t=BXzYPzUJzhx1H$4B)!1Igcp5uh<~dGd7adIc5*WL=0@Jw09&xdhPH5~6 z66d$BvEymSF^zq19NV-}!RO*u`zvjs&Oz{UmC2sUJA@j0D=MdG?A_8xOk=+k43H#8 z6;nqOq~k0aTS(vY8#J~D=PCbFjGYFz+7^u+4xn@zJNx(KYCI~Ke7L2~*VsP-NTQ`O zby*%AFB2%i*b|xglKF$tShWI$EV@|*M1mICKV;SQLPvCvD5x#R=iZu5{2YY0Im=Ja za&4_*Y!6AoAuWE;n1hhXm1NxDN^(P;ap%SwdymA?n5Ec-6Gc*TdF5&h6c=ckogjv8 zq@~*Q9j;hHofFzj^u1V+UNF;3s*h9uu%x<)KInZR@WaoG8;&W?u-QI%gyjgVR>gMg zZhR&MDO+;a5L38H;O*EQe5!pt0mjKrvMtGdiI6bHAuY6bF3k4k8HDj3w~W+Vr1;(Q zElICUN>cou_HJcsqZ5%JX^G-Sur{6IZ=h!BZ|(=g7>bjN2k`v3KY_Dc@8sF4BS|IJ z%48>IhvL56iHT%jCfffT(~a4AJkDQHNINGld*W5ze-Gs!7OTbgptC>8HDY&=BbQNSWz~DO`Y( zEosi?aC7PU@iZVH{mqx6ZT?8aAmg|sE2#8JvaTTW;qcK^PW<^uZ-mW7kCa6KR|^pE zYcebf#m^>jN26WoCTG*>3HBh28-dQb4Kds@egPeCY%AjLgajFHi&Dv@5i6i?i@mh5ut=D-Ma&sG&ZD+Sek&} z6S-D1Wgz>i@K`;%1LhT+>3JA{s$;vo86pSJ8(Kp8M?an$(h@xrFeJ=!VPvH;VHXxR z!yaX5o)4I#qc0$8XTU;B68JYVBbJ-WOKL+uFspaSJHv2wPuh7!g@;T;8=@j-uD4p^~*Sjvj{a|Qw7|e2*zv=GSOWf=XpHP!d^yJyWQ0(XeQ`UPXXS$Xqsdp1Y9fG#}l43P~ZsN-|Nqd1>N(4!t|hQ3lB! zmN2F8me==~7)jY>dDNMf*K+EI=vUS9-k(kfd6|*TVD)}3@6NsD?|3fnOsZFMg>ST2 zQ8Jg;iT)bKr$PzrrigF)+4w*dh^0h&OYX;}LwX*-h+w}9GX+YO~P$G#eDUmOU04lXnmAdH%SSGiQ~Jx}Xs4NZPKaWu>VnT3HEXbJ>qiWL)v=#~y9~wGtnzl)~x5 z;ZUn(ezrUsC#hNi#tC!qyqO#cvmXHxsO_B>n*Q*o+AIYCk$goJ6`#f4RDr0R{wrj};1>Nm+tvYt@RPuXAu6`4XHTphT*{j~aE$ zT2Y<5j(q%apx*4a$B^_m1Po&52f{Sa7_iOE!dt`IW(v4~eD+YhkdYCsT+hUpV3v`F z-Ki(BRn$BIqEv`q)Cvuas<8O{536zKj@F-hVus((tVW0{lQ7J z12rn&AB@G^BwBJlYb=k7r}JO&2cv)(QFJo?PWJ~o=G#{m#&ExTG6j_-I^GFwT7#qh zC!?Ss$Fm9qg6<|*H*-q%p8JvUcAOoh=N}G$ceWkpi{a@(w7WbTejDo)pGKVmM1Rsz zIdSwy86)AJ$fDt~3gjg46cG(Ol&lq(0{4j5YA$r;Xc-+QRfT;J`f;RqHM)-u6jP=; zDwMrUY}exZ`SNG%ke=2{c{xye9duV^3>JueL52t?n0L7+XVtO+-qI6w98 z2~cafz4QRPMSlU~74Dz&;` z(qw8Gne_=|rVpYl1fhrKJ=MqDlOWlJpD#drz+Pq-4lVnNS6Kf8sOV3*hRn88uHl!U zuaqel`-8$x1>oshY$4j~5x&^7M|zQ{g59N^yGv7X5#-+oD+0xaR;WAR4%e-irKbJ* z7_z)`dZ{#hsb?8}YySpp`U_WmFK%mV2Qmb9Ckut455-z*9MIO*_9iq-<^ychBb^Xy z?U%l>)>@SKbx;;>jxF?U!WOl_^{p7=fZo2qivjvNoiqxrz!!M7%(=4OSw+ba{Dn@% zTNd072?>*}Rfy@u{brh>5%~`wEFG%{Aq+9GcG50k#Q6S|3^WssM<5xn?^!jVEAgW;|k+13;l}J}oy~LRKlV;6UO9 zIewiHI{^Gk=fU3TD+{{b7MQa;4&`HX%@SdX53?EU4?#w}b2S#>;r652NaZWQ=yWPC zl}viTEN^rlB4ui*B!yKs>@nT?DeihEN0(k9T63eITPhW_8COp^HCV_j34W@<;gWjSbYaV z%{a8={dsn%3Fhu5V(?(F8@Ow8{rH`UZjr#7PP&$VjuS{`eh1TMT0W-5a^v}|-XNK0 zG_^P1f$AfO1_|Q+1Wq{1I+(XRo*2CCB~F}g;YdgKQKjTxB$nmU$v}ycvik<2H)A#u z_EhbPbP0j3>0&BD>s~$1SFP-_@wwIz#;r zAGQIl{pZ8P`(?4&9igwohx1VRPcd$Jupaj96AJ`j{Xl0V)sN}Ol^t%t#yIP{HtToFQ zb0wsl2!zBMbWOX)`yt^<80>u8idt*IAa#m59yRO)NqQ)0R)h7u({TL#+W_--?c^Hy zO}9q#V%Kv| zKiLesmc!!j6NWPxRrz_1qj&^ZLBa=_5=CG&W)aO#pW$ z#2pKl%9`dCeVg~x*l#%o#l)w3CQ7LpifK_;{np`aDRt=(S4wNF#8t}M=$L|CD|*`; zA^sWRS6tiv+cr5rinp0&h4=r#mh&ez5Z16i@=Z1zaQk6)-h(pEL?1$V00vC+z0F6| zFU)P1B4*0r`(ZnRd)g7UVRSKS-rO!hrxV5;OtWG2d^Iw|RxouSEDsoVrB z%eS>C3Z<1*=f}mj(-QgrX1$%0$-*9lJbq75^!+So2zgLGs#;7=PgY;rl8?C<1lyMH z;o)CdE0S7=04eczEnIqSPTrhyGEA)@`$Y6pL~sU-xiP#uR`~H7f44wTokaGAAl4N_ z+}jewW*Fb%Uq{g6Fd7onFd6q1;$^+cksm+xHARw zD#&^;zUPppbz^!Zq)D@ZmTaAGwNB*aqj)gqis{GsaFV_^K|MV?OA1*6W{f!8W_T{# zi@(kAT$`B?xVaPQEWOQeIn`TlGyHuX2g*C2mynB!QUA~0`M_0O-~0b79*vBQ${HE7 zqmhylp_!49p`xOplA5EU_+|RR`TytpJ?ES!3ofZ5 zJtcjFxq;r%^NbN?&m4}l%z|1#FH;LD;wUm8&)>4ozs#m>-6`U&lKZ&eh^^Q2t3BuG zs~mG%^=i*>c{}L*QKB61`H}%Ea(fJzeXe9k@B43ZV6L~Phs^ImH_x}m*7FIlc^Br-Z*Nc$qrFhEAT=%KCEip-~>IqZi!wxjxN*32FD)Ult?J9R!D?7xX=x zDkBZ+Fsza~stG=<(!yld%0%5n%)u{ERo4iz_k4&)AnXmsZ(0B8gMq%m$VMTOS9`|M zJyrj~`z2XI$e5+?CFXS)KM8ZDtYpjpXG_m8eV;3b0Q!3LY4XuP-)Ia54&HPN zZZFgy7aY;^V;TWHq{bRb^meI#lWOp{ZOF1aI9kZYNAG(^8!(6PclFsUWGs5f7;%0q zo$^5~Sf?q$-eMd|$ynUUh|b6_qoh6|{6e2yL))%8QW=Be6z-W0SfS{(O7VGb2)3eN z%jP$fg0rpCW$j+}EoM+D`>*RWf!^Di*?P;QDqTr3XWg^OArsVlHC1=8bMQhb z0NG%^+M_>jd%ya*nx`1a)z8&zr?^5sS95{BlX3Xfq-L4g27In2Lt?yNA5<4IW6&Bx zKUZ^*pT8J7ta7DcSjnl$cbT`M8+=n(OD%rX<~v zWBgow3Ne$D_S7Tm$-`=IF}}2#6}5|1=3!d5c~kbYGVg~;=V_{WP&)Fwcd)hT=}=MO zb2Y-wl|GBCll~PZo!Q~i*?juxj2w{8*BGb@%$*7`=$`!~Mjo zgDt`eDWN{e&h>Y}aZVmLLQXG@FK%rkx_0C1_6L6YL0<&%d*S-%rGD_?uQ4PN`k9Az*)W=)dDtld^zVhb zZyq0xxXLzuHfYVXQeZ}VHXCo_IEh61ic$V?`c_1qOp{A^)u-!aJZr;Sb8HA>oe}u2 z=1gYBeN=xtpksV-Hc=@ZBRI|s`US+$mSc_~DZval-|^b!id?5OH1p{wxj)7JldN@0 ziC)aqtkm)qN#fs3)*P zN+jg>HU>1yk7SU;lJPtpa>`~|OG{GBUBg?&+&g${o7*BcItHX1QkKCSTt2k5^eQ@# zG!1*Ht*oU7uPcoq^LgvWXGm|YCDSbTLgoZ}{xBO#*vaPf4kZtK$|_W8!EwQsB=sis zXDmARUZiJ8?@Rh+`x8@tW!)I^d88NhBOASHe@NS*fCsK46G;UFRT`2v1{EK1z1;2 zA#9vaMTqm5{@9wi9taob*TToSEJB>yl{m+Si*rr*I8QR>I^}(u66Y(E!{mKx_&A@5 z5a;VM?3-ol3m50j&krt7^EYUoXCAb|j?|p(-XWuena1c~8g_3n2lf|~1(2D7o4Lz8 z-I-^eMN3z(=ACr9LHpp105{?IU?&aaH)AEZtZm*kyWu&@#S~8F9$Ngav7pgsqDCUe zLcJRuP2sh<)~Oqzzb5pnJ%<^$2dzeaNY<>e#cHX>jpnIHFc(bIoxd+v-9&^ai>J7v7Vh=m189gFsjix_e#_5c{e*IOTgJwqOj%Ij? zep#03*1yY&wadz@^>Wks^5rpV*$?T~t|Ky{5vgl~t@QC@(9{EAxAN1%AT#*LkhrmDT>d%91=^LB%SM zR_QPG_{?}UZ9fMJ-8joMIbYDU*z94rDBN(I3pWBc5_bk}9PSdF8#f+TjPu}z#S#WL z9OuG~z>UP6fs4kSi5rDG3#Ym9kBh=-DZIu-;fCW}xDmLKI4vCw7lj*+bKyqdM&i!E zMdQxIjl!LUi@}|Z8;y&_or^mUcRp?m?gHF}xKH8Ya2Mgm;x5L;<35cWhr0yl#*N2K zz+H+!d-z&#$Aa^!A-?Y!(D}&j=LH+19uHB6?ZLeChj_1 z8t!`BES#22zHw2w8+e~C*YdfBi^2`Zxo{(JBXMWoqH$;9M&ZuF#o&h3;U6~~=faJ^ zjl`XSi^iRa8-+Uy7lRv(I~#WnE*5t#?mXQ2xG}g3a2Mh}g^R;ogd2;y7#ENGG;SR3 z5}X@19ybAZDJ}tb8EztO5-t&UIc_p;3N8tE1uhwPC2m+lY(o@oIBr-I`M?dwxo{(J z$;nq1RaTT-srQS#H3by~t2{pQT~TGRCt3QHW~gNqC6#&QWmSG{p3gTIQMlMsRYe$W zwr`a#nVmrTr2H$YT(hRK;>v1&S$S31M9d_Ui;8srw_R2>^RnXG>Dq2yxk;eXx1@5W z`?BK1%c?GSXG@3nxy2}VNm;qaT~ff`V)}nbI6c^6BQx&Of+}}KrQcmvQRb(w7gS90 zyL}#SrO&UJ9~yp8x_X3+6U83Pa+etq`aG*B43CfWLxUIiR_T7K%ggml@h8;7p!g+( z9kHiI;w{Ev_nHE)c3IUa!xOf=vZz3YJ_+TKV#`}&RE=eBNo92fC1w1ZH}zWn&2-;f zac4#4U1kV(Wl>SJkNhONv&%gNRUUUysi){pH=`ZVuS!03KJM}{cG@*F(C#^uScThP z>DC8kr5;aJKFd4>#e-_yT~JZ1+orb!Z-Kv5vMQxDFx^4ry@DKB#Zy^gmRGQShm@F< zrxEA>SiA*3U%|Se#_RLA3vf~wRRwE2hRt@9G>r^xXM?)S@|)+*owt1XDa%I>IFtep zO*dH2zL0u5b-gT-j;+_E-5c~dS;}7ig{Cv8UM2i|85g9B(*3-C-)V)D_FxYxcG`MR zOft**H2UFEucsn3B6ENWwR=EwuBt97qOYDt{+3s+@%T&2DptAg@>TLT*fvAspS@!4 zvH>HU5kR`I{_iyXC%M;pidYG`@ARzGyBn2JZImt}s^_!S1?C9HuqtD0HDgAl*H}H> zcDwtoGWukJyUOnqlNscy*LW)Y#%05fh~X;m?kg(_7mz~m73VRG_}5)26?bJ>MNxTm zvFFN)^2$=?lnVW7a;evAq_ZYZ)^hIg*Sqx`UL;Gna`Y=w>XauRL;nrS60Ei=hw7rXG|@cdiB)e zX`a+;N~X<7ol$V@)mIl>Tj-fKz4)rBGfJjjHDg*~$+W5NtCD9VPt_zJnvpF{H&Zjx z)Cwyr8R09m;Nv@tK~bpsX@5ONAC-iB%p7USA*^f!vjAJ-(9i%DeP& zU8;JLHpwXWQ`b|m$Il#2{dueXR}wGz)T>RJQ5_TWWWeN{^SIlCnAxY&)k-k8Chj>)7hmUIQ%%+iJ?@m6dGq-* zXO8@t?=CBGuPR&XsnGnsYL8}xDA#v0<(lk`s;bc4W`$Ojt@p4Il5+NWO7bY{qC0~{ zEm!aKc)SBH+mRTJ-i#xyxIa=R)`%AKIM zc&pUSnB8m2D05n6QF*1#&sJCZ`IVz!?O6UMR{g-cjC$j_RTrnc;>nj=SQ$>u5MS!>D!gzPMo{UIiDGN ze6mG2&3w+2alkpgd8ZrCeBxQ?9MAmIjAtt2Tt?PB$B3p5ah#jVIF~VZs&jlp9OtGo z&SlJ<<{aM;$GNGDa~X55a*l6^_+*@Oo*$;pJ>7gVzAcClPsZuS!??CALOiECu1#fJyE#HU zgU2;?Ojoe>Tf^Gy<`wg=NM*?zvOYGp1j?38UkRH_!%0_|yG(ohj_g1A)cz+;ksZVp z#tuSFj7?)@1*aFG?IvP+RuFep*(zh(A-fgMq+~Op??v(ot4m7Q@f8>N3lcAPPYJy$ zyDPT6#&JNJY}se&hXTf7Kwd$0jb`uMWSyLN`4zLus`B)`8PT!G2&)?#Kw^%p8hgNX zd13Z}MhtnLa*rJ7P$Z|nkCsg#CrVXU%Jwj?Y>l^E-%PK`Gqh zC;MW{`xUc74{xrRMG2ELJ)B++rwFF|u|k=rQL8-q=8sLZrte|p0IayuW1RYzl=BF%Fz{UFNP!W;X)BzGY}IkVJv<3`Icl;DQkN|Kf{ zsd|zo`>oU1aRv8Vx=mHdMvIc>H%)IHayL`GIz8-FW*IX#+|FV1%>kfQi;~~sGHJtd zuqUg-0&c4Msx<3N(>(v2uivMU{g-~8B^%Z?o;8)eb>p>c_IsRadVG_(vtrzV9k1E` ziwNN6)Y?5ed*Pt#zUm5p8HeGzwy=UoDhkStlYy#%ag>)8vg7v@g^kGXF(a9094Az9 zw&|%T@`!oHeTATD`T$_tS5iiZf{Jy6%HT-XXS8gy^G3htmNP%7<(FE(t2oS6ZxgmiptzSRc@i6NRjD;Y<_|1f! zW4Bvi&=l?r8TD+Jvz2i=3P*ISk`xo=#1wNpp$Ek5V#^9+ysP5ewXB3weA6~}Suy#v>eIaa8@x$d=-~*+&JbS1=5v)8DDe{|o3pv;OUieRG$M|ovW(`S zEufXjAX@D=8$U;Ux=l;u-g1dPN+xT|sw<4EM9yVtI~rr{4#RbGI5#G$s_H^R zXZ0h-rC7xk6g(B&=%qu*jY?@I9(PbaRpO8ev!XDHCRh|@+{X`2 zY-TIag6SHbF_Jm6lp{(r7HI*NL>XC4#>$i3qzq5X*Hsjm1r%zUAqOL~OeB4|(GqMr zdMjh-E-71O-@KQ{DzxR~*1bl`!RXCvtnn{c!e*0{HPb462+b?LDlaI*?y#dDwCzVgf>^L z53-7%0w%&$1T`9{8LOrh6c@X_LO8u!h{mB+{os+CBsdSTHX??5=iWqIABQ4SI63VJi(L~bE z`%TNZ>9MB3UnB#x**+!5W;VH3w}f2LX-dj!26YX?zYKUxmW9=2<#MA-pDLv#>QPvO zP4Ggn=aH}Z#yV3U%NfyVC&p6AO&n%B)}IzBWjVmU%kuBjmm^lAl&Oqix5`t%^DlT( z;-#4wt(-gWgFM;(s)D_N39+oIlo3!e!%$!c^BIww7Sj%V3?fz~)2OPdg61LGbD}-e znQhow>=}*OXlRS|#kE`CYh=z@EahC`k*T1-x6XZ6DU+<(1N5NPhohn%Of-cOTAS$vQqbyFQnltXDv!IHvieB zp=4NZ6iaEMYgp4#A0grOmSQ&T5J8VvRqeQ$4vLm~85U(t#86nUMlI?E9(FLyDJu%9 z?z9ZE!&-JM^>I&*#i4GT8rl4rTrSq9qv%PV;lYn9nDf^jEnMiXLn zr}Sw}la632C3@FPvffGu*B9lQxp=ZVs_soQ<{azUJ$+dyDKjY<3U{JqIbcMpF)O%> zh>a_>zhc(siXb+yweheRGq1{imIVPFPTxJ6A$@*xh%zkDOEjFnfpLk)5PyaGBG^cg zmTwpwOxiKtav9KV69@YCaqN@*siFZ~2YjPo~&#))~bhmFAS5;bRpW%Al8!bj%od`eG*AJZ32mz6R|XP@m$j#K#H20B<3Q>K=j?he$lG!uBVm1%~KhTeMnXx}kb3 zRstd6_1Vr{TFFx(p`B>yN@h`NmWOpQB)qg}-Ch|T{G5PQvC4LvYZO`2 z_{zL;5Mq^Sc>gRHI96iRitNAI%NeeRXJ%wxlPA`!O&0^Bk&V!S$>?rYIjO^thevpV zZQm?!T{i@ur>N2wY^kbl7Uf+9oKS{DVwFQ_QBk?Ol%d02=wmD`k|(6h%qX zDq&KxJgD}D`r~=wY6^HjO)vxuv>{l&c6c)^PiOe0knNh_T*lbY7{@R&SqwO!VHwDo z;KJ-|W7Z@&d1~64uh25geX;7tJe`qMH|Kk_EkApRN*_z;a$~B~r&Y6`a{y!=@>tok zqBqyI#(2P!D#2x;kppXqYU#$>Z&vCwF=I->ib5vBgO z$$VcIfnPNG|FB>iGL5oKW@0Tm4@Qf1`0O$o6`c6c7VBhqh?_;f}ew9G-d$_z;E~WI_iT9@ z+jzNL-*p+U=NL~&8gGnkj~p=b@d)+O%@wkp^c1Vt<}*Y4jSI8o;Z3>SVZ2?meCd+l z>4J}&ImXH(aW=!n#EI_Y)dwfTgu?%uX8K)f;YJ-cAa}r^`Mj+&qw<$RjnyY=CnYsJvw^OU$FM@ zR9sojk_?m8YUH|JzQLlq&K+8>c(5y`ptziy4(5T9?{=$+{?oKi^X8@tue$Hh-9pDCr-TlwyC%OYfxcDQ89C-!=PdfE^uun zd%v)~%^sEicc(tix42Qrt;Cjh?r7n#) zv(pzsX4Jujk7{n{kYvsL5!RqXweu4%S%;dCnk!{7^M9}sj5Ybhf6bbwJ8N2_E9ch# zEc?g*`SrOn@E#fndhwF;nU##VdR9XxuY-pvrn3sJ*@^`oo=3uXQBP_+5OWE z!p(#VzNUuz2?gbeMiZA}o@};gc?3F-FM>JU=xhsyRsh!-r-Vjw)QNr`sjjNk>IjyL z^WYkX};;!62Wl!+60HkL+dm5F{aY^mrU z8p-2+LrA0{UJLbaCm1|x+xh&5$)9mDFmb54`G>JzH$xLNp*_d;Uv%Vo_M`>Bc%YT^s4O-UxcXMXs&tZNV z(jd(PX2Fj_4t44JKb3x^{-D9o=jp-g2U*bSAD0|7I|r4-p@h7`GD`=3dj?h$6~{nVKz<@w~$V?SkWt*vt<*IyA(*TE7 zdHU?0a;&QQ8}D!aZuWn|{mb9YKE0{`&+kwEZuUz@`UFeDznguZaDVZ4v+r-TfB3uE z|G&6Da2VC}%f^@Y|KFXq!hFMT$TC)bgMfecS{IGCf4KL1drkzt@xqC~&v1XneVX?f zxH8-p+$&ut0`qaV;wo_W0u?euJT2!BzEz4H2W+l}fn$v)17|&YGLV3~9=8%# ziL1wb4fi7MKX6BJBX*n&jKf`n%f^-AHsBt^?ZN#F_Z}{Qi+k*3U@C4At_W9)`wH$k z+^e{5+()?cb{g)&uh_4xYyXUN|AXZ-^NEvzf4P%^B--KshTBBC-9vQAkK18tdHtH= z&ZIu?d-!CapT0d5_w7#*W+?IhE&M*}VDpa$T>tfe_XBPD;{g4Ptwz~>8eWC7)*vdE z`=?!%x7^EpVLpTH;hX%q`@$E4$Tx5-T#{;Sf@c`J)oCgEcZSHW$;=M~k|Or4gORVnuvEBRWOb@Qgpn`b7v+&}eNNpDTTot`|t zX@w6S=J^X&sV0TIK3y9^0x53>ho5HT!8ATk|8^{Xc*-(R2`euLg`YZA`O&F)Vf1Ny zS3vL=ss_b6IKC8lsKQtV5Aq!@{Tb#Lg9N`%FH9&q6NA&6s;s-i*ne&CG)Rw^`kJA( zkHUf~&%)EHX~PD>Pt9A*Hw4t=omS9c7Z1NK=?YETHA2(6xAX@xdHui#{eccX@%c3D zg)hU{D>d!cFa?hILw_I}CO|J-1smXQ*qWwkV|x1oo$!u7_6MS`*R-P_;vd$3)E{Vv zJBFPIxMyiv${8mDi8pB4hEXR1Iqwr+)rmm!QB9i(z1Wch8{qd~3;ZeUfN#NGINX0C z5Q}|DkgsCWmceZJ5cI@xUFh7w(6#d}HttDBqC2Xf5%;_0S8S zgbnaL*a8#pB0Ss+d*Pp9EZ-2EUPE|T0<+;(=!M5%1B_iqc(@UEz$alZJOE?)M&N#! z0&CV29v+8Y=)RlqFdMeOJ+K450ej)7TEg=Uz!PB#oCCAr9ncHc!v@#_Tj0B}1O5^A z!r`AIJm2_x0Zf4tVK$rv8=xPy!p*P~J`4Nc_hB60?E4d#3Qxiu7<4R3`S-^g17-S7#R4ktf&B2WsmVLf~cw!q@epZV=xo`9G1da zTd7|-Z5wvbFSo#E_!=ywe@1(!OC1jGaS!gDJ2bX2Txng|5db2bd09U^(o7O|Ta}2V>{b51$}BoDZ{M zKJ>y3umLv17Wg{sfIYAmj%=d+WKcga70!n_un^Y3dtf7c0Jg%HVK;mOYRn4dUnAeJ z2WG;oC&@ST!Ft#To8j}Y9Ug=|@EDBdXD6a}kuJ=JIdCVeft|1s_Q6&->KoJ(Oo4rH z5sX_v`Y;vV4|8B6tbsp)jj$88!sw^48(!E|f220`mXRsUY+)a7G z9@qi@1bg907`sr@#3x6;Xw!utz1eU^* ze51DVGk_aOMVw?+KVs+z5}!2QRsztd>6anTd)OgX{Eg2*zaLC+z;JL zD2EqG7nZ?NSOe=}BW#8-KO!8Q^Ah3Un3oB+ly(f$;U<_5n_(S%1vbGWunnHEpZvlL zpq8y^mqIt32Ge2rk4YCk0PEp)*bJY9?J)Wk@(ahp=$rV?Etm*9+Q}~*^%L?7H~p0Q zg>S)Dn0$bGf~#R4O#2z>E@M3b)8SJvA0C5sPtPSv4WpMczQIJ;2{Yj{ zKPO%IA#8wSIw)s20d~NBun+dbxD}N58>9>0g86Xte~>Pm1DjwuY=c{2H+%&~->hjL z9Hf3>@h{0Q+ycF@6*j;_um$$R4tU|4^aq#%V^=b7!W8I-*>DT=!hNs-9{d&M1>K#L z7hDAUV9bAF_h(oq!BqGZ%!e<+I@k@H;014CH!OwSa0}FOH0`oqQ(o}1FdIGwz3_Li z5yo|qE?oaM;ov5y-9rAM8}`9;IO-kJh2^jwuH{U%8Fs*ScnJ2u<1qSG>b;wEVL8l% zTVW||hV}4G*bGPghVbx0*aIiP=-ZeVUwYeF4za%*OP8J!6{gRloZ&{ORnQ)w8%|k3Il=X?6y|4APOt*Dz&)@7z5;t;CycG6A1owYxDsZ= zV(5h@7m+TEy^(a`WY`I>&mvtf>p|#-TVXof4fEm1#pD;xht04ScEATYhXKUgz-yB7kXhQ+yeXHP8jE7pO8&G!Si7bTn%gBCfEcgETdh*&%kbY z57eqyXTe0c7iPnT<%EN~VI%B^ZE(~I!olfK^V43T8`i)~SiX{Sg7a@79L#~OumpC( zH*Y0eHS>Nh>B0SXkRI%WUKpQ8{lHw<0$+li@F47i=jD^nwWJGEVIj|qV?u5;7A8d!+um>K6(d(JNVIqtx zBRoulr7#oL!&2A`AB64jDcA#BVf5X^2NU7m)#MvS-ATUT7}y9W!dAE%cETFi2hUqW zzH6B`VLHr)`S5O72e-o}_$F+JF%`6HI2Oi!4m)8AEP>hZIp~G&!Up&;Y=JSA^b0s0 z_Q5$Y?jB9M38uo6Fdr`Vk}k}HP4Kg@4Q__r@Cm5hOZ$ax_z}#637@6B;4;_%pMfoK zKkR@K1X@M`LG3Uf*r64_QDro>=)?A zFa?gfhwyMR^ukTB5$=U;upM^8n0v8%6YT`LVFk>DFG4RoqmFPe6}G~J&l3)Q25Mi_ zv~R&g_$JJRF&oG)oB`|Ma@Y*LupNF4_P{^G=z7lE?jt;$4m05^&CrQsRx(|^I<8hh4rupHp9LLNf$P6CS7=7JN9m--RvYB{5mX! z&%=87I&6kVU^`5BoN|JRP1ySo<2X!#ewYoPhF;hK8{kpc0)Ox|(uId%FZ4V~d)`7g zm2lv6auaG`Wh06|6 z9`FfR2lvA!cnr3|nE#@^!zEDLPQ5`l+yT>JE6j(-VI3TEnDFp2*ao-3Uici0eT4BB zroiamQjahbdSM}KfVHp%?tmTe4cH4mfU%8?C+`s+7Q$?}0eWErY=9lG1s;JN(DggQ z!wE3|{gbCh%+BYl_x7sG5=3cYYWY=AGqR@e)>;Yq0NWZwCIlsC+Q>CgxBVFRp# zdtnot`~m60MX(p&nAq1H>f za2%|IvtSck2HRjh?1s<5=&v%a!$f!pX2O103giArx^O0JfmN^rJ_>u`Z(wW_?f4jW zLpRKZHLwP5f{k$apC~Ul9d^Q%un*S3xUZ2uOobo792kF`@bEg=2*2@X?1tKh*bQ%k z+LMegFa;im*>L_}2nScdM(BgBun~5`S709;-AB4#XI_A*@G6)CuZK0T2{yq4uniuC z-7xke(%r>41>NvQm=5b&4VS=dm{0unEqAZLkJ*!++NT0qvWV6LiD4VSzw8 zTnY2xqp%Ks6E?~9s6e0%&KVvE^gthsZf2Zv1p21NIoJYwU?&`PF5%!L822puP?!qW!yMQIYv8o=C~sH=+u#P+4fmZ- zexJjRG2|EK!c4dumco8m54T@Hx^NflfU_iL*I+h$GM;t?UxH2W z5Nv}ZKTSU2Sg1WuIxrC)f|>9`SPEU^$R}I|TVOftfSX`1Y=W^b$oU)T!q;FnjJt&L zh0|ao%z~}31a`t2*atVkxIN5!Fctm)=D^8r?1mYz2{ynsxEFTA!%+J-;vbLQFbihF z5?BiBU_EqAz;3t^H`V) z--bD`57xl*FC)J&8MeYFVGn!>Mz^x=f{F0#iP#Hg!BThtHo*5_3p@@x;EYM63o~Hs z_vlwJ1vbHK_y+XCafyV7yI~uA1$M(@PY z9=Hldf1iCnOoTs$neZc63ZthG9>&3DI19GJT-XD*!{~ji|6n4Vo`l_S3G~7q*a$~o zL3u$p?1U>}AN0Yvf9H7!m4R*q7XH#A;Q{K=G-=9l3n2IEjkoiGzV z3rpdLupYXy2@h|B9k3eq!Y{$tSJ-dD6u1{=!?SOqoZ%$c2xr4qmuFdI&UUbqQ1z_(#5{0r=a=iW@Z za0-n33F9D4g>S-q7`>AEg?X?Uu7mCH%diJN2cuu(d;})K6EG8A{2AhrvgY$1C9BhTL2N+ji3hag1aPn>B7p{d3 za3gGi&9DQ$4trq_jQtt)mrMJAkHCER16T)t4x3=~?W7A=z;5__9{GKp@fD`PQTe0` zFN9t=9yY+0um!#fJK!PM3wvSg&*`rPgoo)c8#clk_%qlDA1|by;OHXi31-4R_>zZq z-NAeV)8Xh6?1n309sD9}f=|LW*aEv@7u0^CX;W7b9)1pH!d)=zc z3GRn&@EzC<`=RzGGbu?Gqk=jWA{{^$pWuCtL#i z;FHk(Ys%{`$_;LX`S4{}2m4?XEUTg1;gWTfJA4dAcVQn)g!^G89JwC5p&Qo2n_x4{ zgYEFouovEbH+H|x^D{6Neh715d@XjvIj|9y!dAE*cEU$sAN&T4dxv%eQ{n3{2fhz$ z;770#p8q+*!||{aUI+W&N*MPp^#Rl2PhdX$FIWd-?jheW9=5?`*bN`Kmwb0~J_r-x z0hkGY4@;q|j(o%OVKZ!l9q?_~3;zORe?vKap8A64!fdz*df^?g0q%vZuoHH|k6|B7 z*nr)KSck$?_&CggQTLH=cnCJZ4`Ca;bR%}dbg2Co^D}hAMwkwthxu?ntb^JY2oHY+ zJK+3H*bSe9v4`oOU&L;B56p&}p%?Ce4X_!u!039y!?CazCc)U>(obLt%!JwS7U+e3 z*Z?=c7T5+m;9Ia4o^?Or-{brlrb6uj@(pi>HLw^q!X2;`ehYTOPS^)OhH<~6KR-ym zVL8l)n_wO6g3T~tGwl?5VK4kVjO}5a08`-OFdM!Oy|D5j(uEJhR``3^4VP@8ynoO7 z2D;&+Fdcpq=EHx3b?`md1T()xy72q355_cL?-9xirow!f1Gm5$xCb`EL$DP_KTLk% z4A=*AVch!^6HJ9Iunv9zo8XwOv=6uf_Q1n1`Y8GTGVL19f|;-wmO>w_hw^WX0d3g& zW!kWs*kKpPoDtnHjIdmf$2Ao2Ec6|`7#%x*bljqI&blkQPP<|3b(613m>@yLPX_L# zd3**@F3cVsyD4h!*&}m^h_|8_h`kE36vhR?UdC!l|fex0hXwe<}23$Q;))mPbiG5UD) zSXIB#);FS`i++}>&$snm=+FPKKX8ev-)igo(Z7Q}R@HB|^~31jLXT7R<+kpM;$8;& zWL5XrdII{h5z@~<-;F+34S$avz8L-I=ozYhudQ!HkEeVKRXyr<-Gj7`UFdh97l-J1 zL47~^lju(NA4cDe?o_|7;gm0WUP$~#0ZaM`=*Q5nRQ1)i{Tb*{)ZZjkFSPYy^ik+@ zRb45cjp)hfb5#9vcKBWB+2{|cdQ`ogfAk-q-=OO2?eK@u-$uVt)uW6CCi!!@7;_dl zmrnxvXmqFcoq;|UJwGJ=+k*BLqu+`?PStO*?c0c+gYH-LsLEjYUFf?bgx`<;47y|g zN8f|)RDZ4!%$ewShoomM26`>JQ~4C5*Q2MX@!zTBAH5O1Skc?E~UTvGcV8e-AD`5l9n%asX}BwWK%A@F(f*BfX|2;qxWsvy*uK z9bJ#7#5o?#gP=O57Zc+#BX$s!)tP0sNoF%OML*W)?QIi79AGh4tv3&%FN>fbN6$ugs_%o?xe|RdX}RRafl~fa8|-$I#)9p|2=T*#J(Ho_*dEF3C z21?PL>`X&1j-cnF=SR?M(Q_l{JJE9@==;!DM9{m?C4Wxqihgvlk8WX?k+ScKKZkqh z=;7u=Dd#lwJoHL6yp*xIcaqEEFM;{zDf}Im@%+~B*lA@lp8w7s&o>j_(dYXEtJL_S zjEzJR*UE`!`V0Mmg%Zz|N<5Ngdva`)*~cV!25u+*m+a{eY+?Q9idXzE+C2O|*YG97 zlHE~-$=dL8d2wH|7PTnLoFEuRkF3iR&^Y9+{7{C=%XkG%EZ` ze#bB`J+tyeAeYy!F`@pWlyy@r3$AzYcOCwM{mk4~ui#QH@w9%wKX4xDyWVZF{bW5f ze6#Dm5yQt~I3J9TdK~*bhW$}DlYywHWcNLiF?cK2&9Om}k#gBXyuO$F17DGHxkRzg zj5q2p#P{T*4`gj#(d|xS1Dgw2b{LLet zCVf3Jz~7hc_M625{0E`H>dnvm^%pZ8SIPi;%(Nt)F5-Fg zmJ@*mYCKVMg6Z_5w?)w7$8fI>eX<%}ITuPpKZedQYUfwkZ{(tPqfb|L%0rgsVqY!# zafk3y?zzlQhtL~&?Yi|jI}It*Ek>D2Jo|}f*{#lY97az^ce2BE0rLoY8gT{dOhklsw;p$7) zEi!*f`WMQ)HRTyQ4-3upLv(2Sh`EUA@OD0%8K1WY1)xbhVH zlh7|m57+KQ&q9wwpQ_q#p6lrOj}TsO|I9}cK85hEFE`s|V4jsqwBk?ZPq)|+g}+M0 z-vWJI8x>`icNgi6%s&zMwmL2he@3r#v8Nw>dx7&lGM;;Gi+LTiLobV{*?OUI zS^TYFzIq#fMbdBIeaiML>(Z4*pCu}7AAjOXoyX%Y^f3;)q_dB7V$rYVwd-LeodM%* z0v%!k{@nNr+O7243{wwRE|N|$x}=k>hL<&rIrhtC$OM)yd9mYq?? zf&pF#J?M8h=u!^bnAdJam-2S~`z||_ti+#-{wDfoLUdyTEP5^a=rZSg?nEEupi4gY zp^rdcqsA}os>te1Mm`VYZxQ~S>~Og$2Xu)um=ELjlU_~<=$p`;$|(cgkM2{$N3GF~ zkbD-Szk=>mP8-oDtmZq0L&ASs4=*+qGjHuhe~{O%KR#*OD0%(7kyr62^XJ$*PXwM8 zf4>~y@2j@IJ^0&+zYOvBvf@wK_le!{;~B5;SHSCFxhT&qq@jO=?o=+h=soBZza8GZ zZ!h+yG5?6}ABi0|D0av=w`!!FfBa4NnfF8PRs0Q~Y7BBu7=68{PWH#)-={hMYjz%tc{^DOUF=LF z1O24KFk<^r?rZ0we~j)lj@F{T7eU{N{sy{7jb9l@_o2sCJCCDX=%dhOehan-b8eG- z%KVvtUK>UGyRpgkW8~9otxpp0TyvLmJT5AFE4m)f6z6!#iRZm_;nz!2o}1Cfu0Ii2 z!Rw&CQA_oPA^L9gCFp6YPWzViHIjCKKdaGC+5Yi2=I#jX|1!=C(4FdaI{K^#;a8xq zMi1A%Bp-hC9Q1JWo8&|0&lTu*@Y*%}t9DsPUtVGNlYPYV0REiT+g<28(VhBXKl%>z zaCV9v@oens(N9&L8Tgx4>s+42=ez~4Un zrK|pw^SRv ip^!zkD9K5oZlwD)~hzaBG*x%nRFeZh2eA9~1n4E9*(S}V{uMbQ1| zJJ2sz?Nsh>Z9|vum?S&4jRi)%OTGBroUfblyP}nRm}f!~O#F2b&tCi`l3%slhK~zQ z8~S+bO2ps2&i9)V&|~VH?ae^HFoIr;?v9{uM4yb#G-unXJioRJJt2a=AAMW|{V;kQ zx>G-JQ4wN)xbl|%nt&c3A^r?Ae6ni4EXpOUIOZF%|5MWLav!tvNShgcwow<7&Sv5n z^Lgifv>QDg9oTUw?dAZ&BJ>}rdelDM5Q+aN`cZVZs+;>5vGV|SioTxLu2<=8*4kbA zn%5q8rn4ZPwSnJ(P~$PSy?T3Efj$kL;l++aIZyPXr=nl2>Pr0E&=;dGQ}rlggCP0X zgI4$4)5p^|~7kwYP)A+p~{Y7+OhgZhmUCe{+=x0g4x^cVhNBY$;yI;jz z$r|&E;p35VNJ4)R-D%wx!#uMWJqy3CV_&h;iLwu1BpyHUyoJBfybk79xo@@&y&aw5 z$=1#LZc?v)(s=_tMe?&nNk{TC(a4YZ+ljxo>dk#_o#Jo6KG(&8>xcMr%9qTa$IzQE zAiXpt9@(cpc7fhtC0|*@<9j0f_$GQe`bP9~cpc35BBQ;Fy;;mpe)L-;U*nW|GS+8{ zC1-m6@%PkMX>Tt4ovHYf{9owOBh>SczbC%VZxIot1=#r0OneWh-Mv;$)l=3&qWv;bPJ3u_m zPdSf=N6~kn=c@ULG9GM_e8f!SybAq7RaeHdB=iI5%T(Q*m&MK)=9QD^dOI1f*r~J= z@i!fR_w1veNPgxj^ztz{AuV-5#rB9-x)!#MSmnh`a97dkC6U8^xYBS??V4(g!uc>pN$ZIJR6*S=;x8w zV1HHC0cq&Zp?^=+>4QzC62OYb$nRTLL}HI?2K{lr^ExR3y%9ZJ{Ydx>^sVUO+KK4J=o=k$mS9>2^W9qX zcCn>$t8HuW9#vGUWv}FG5AluuvGIIkFkjLh4q3j8^6A3g8~B?`7}cLF@x-ov^bgVL z2DWalza*XbYc%c5R|cF%Njm17CBejgnLlUXuS#D3`@?oz=6z=Mc|b|mPrU2#@3fxT zhF*j2)V}thm!O9$Z?XFzdJg)j`ei@IMDR}T_C7rn+omwKv2Uyc4fHGXMd1IEu?`1>pVoa$*m`s8-!dN_=p5J7j% z&k+DrV z2iZnE5B$_Q9zXFkqF*8F#O(v(xzt|IcM;DMe8;}iIYdADF7&Z#zLj&Q`0I@D6I5M! zt|<+@iSQvhd66x!UjOJ%qC55TS~I+pz7zfN2=?tme+2zp)jnlk)rDSa(Buv<_XrPjcx)1KUOLK1s2}!*3ZRtLd2gGbyKy=uPP1?38qh zna8%E>-GKX&2~D{Zj|~yKs?*{4Fsojj-o$_?v%~}(rG}~)A^!vI@7P`xepioYo1UbOg=dWhj5qzive{UHhcO?0R6ILiz_P9n5yQSOJ8qrXLXirLncbHB~# zz35K;VK@2*=;7*5>_31m_J^DArCxS3-)X<#b6g`RhhIHl+hB}iXOA%Z?U)Q#6)Vtl(KARcSPryR`ADwVF`ap*4t*Z4UH2(=M5W_H0vYwOlXxD--#K!@ zDo=CmA^F{hz8O7Sc}O~4=9lLtH^S zjdaY>$jDDQ{$hR=em)g@H=~b3FW_}B9?H$!qRLgho$g6zeS^P{@>SN?2hnTMoyz4n z`fBt?nLpHYhJVSJJGiQ+6F-~hEIY%`yJBw|`l$ar_4>LPf9?1SXOEQsM)bYt^Ce-c zp3O(1MBjzph3+&?>_>k+f_@mi4L#iauGjw@`X#zkc_yHrL=R`bltTvkQS?)lXF2}v z__cFBHlt^whZ}z+o!#h*(9a*Hl#|hS&#-Dv@^cV>S9gWCSM=lP3FuDcJ%#~f5qdZ~ zB>Z&r4D?%hZP%Bva1cAkq_Lkur~7HHZ*H>v(3Y#sjwk-o@V6a*GsNG0ia+C{UR*WW z;V#nKg}-p+FZS$5--^z#VyC5?PaQ^o2Hj~~b!BjFgg#LXUtkz2b|#=dhyHa{H~RM@ z>Nt{xzsYYq-{&nyPefm?#uH`SIFfv9M$biG5uz{ElMsD3dIP#sJ`SL7L3b*rqv*Bh zPWL@x7?58=cUn&+p}&B>GQ>XPfGmM)G0Z$@B6x==oMBKhzW&pY^gOl`-* zx9d$<^lj+3zUw?s?m@3acdD;#*df0~q}y>-css=2efV2#`8&tiU&4I)E&kpjOt8Jn zxZGjYzR^yz@E6-1{ya|7OQJ)>pl9>i_4WJgxCT6zw~ctF<2rPxtu=SS?7`BVJ;iF$JVb%SlMG3Mx>+!KEp3m8*=L-|DU#u~p3 zKd;MKCRg?Oq#S=w9gZ;mp*Nz3%eSOcp3Z&~y;AIXK(U9q816AfK=HQ?f3N@6w0phc zZ-CwXq_^XDCjuXr& zcO&{Pbk6`i%Djgpm-YUSzeD(Qng{a6G&hJ%9GBhK+Bp+})R z#h-=lMz2xRk1_^gNxvMu27PUaZVdLKZ$^I|{Ss9-pBL2gZ|Y9t=K=Jn_npVjqv$ix z)71E>2W!6QTm$TEg!`{}_F; zswq2+^!CCJ|A00uD&*JoU5kDc-Ko9r zGU(4~D&Qp?|*4s0DZKooA=G7-yT4(K|j?#bUt(WB5ONW|9qL^;QFu^@AyJB=d==%dlYjh7OC2Ko&2scQVn zIe9Vq0`zd@A>lWo&p{8@ZloNFnYX5+C-T}gWxbt;BCB;8?ehTfh(D)tIEp?2{T$U! z^E^dwKcsUm`m>VGtLyA^sF$d(+vD-*AM@-{uYdGo=uY|Fg?OS^H{N46p`2HmE?2`1*-{W<#{TS(m+{ZqEzs$clpK~2WFGLSlKa$P? zIz%qI-k#sDw$mEW4yNC1#N*TsR-o^2NJsDg=-VRLxefhU^l;@b;rF2LM!#Au5Az&d z^093R=NRaEJ|_`U~!X&*a3w{j4DDSD**Njdi8?{55s zo8Lu`zm@YQbf{E&A6Z=sVGy(8HCN*tZY;JLn<#R6Z}(h5lP~ zr+(Lu{vQtUOMP@PPaZ~pnAfiND?{t#i}rk(LBn72SLc0J0tT)`|GvcYeI*|2`JiV* z>vtRR9m0P|yHn=xJ?NwRo%bgP(Jw+DOM1a}q&!cz2Ybe#>-&!diaq9-Bf<3i<1Yz+ zGkEX1RPi@p|B;c)bL#kW>aWG<5|2}UHlpXCJMB+)q2G$`v>)4#UV<)m2J=f^WC)Xd z97eB2Upe@kbCG(^8FM@LNlrMomn8H?bfRf9Y=6KZ=Dhw`=x&38yN_{9};`x z@8J2kli}Nu=xOM^=uY{`ML&vuCw_zF5oK(orCw^$Q%OHotrzNT_(-c(3_Ew>&#m#B zhavHNOYbz2@BQd4=$FcZHaK5=Y4{e`LnAhiyzh+R=Z*??FiH0~{&$T!8F+ex;$PDJ zsyqR}HU8=O&*Qf^V^2EPXBzrd=;ufPD_wIxDdBU`C!jl(PwoF_?_I#-n)Wd6J(F3} zTY41)QB^U$s48Lx;qAEs9TpOb(iWpTv5u@7Zg>)2AJ;W%2BIZz`MT}eA zW889j>WJYu(SGmR>)pxDGJATy=lL$r_dRExr~S?TzwiF9wbx#I?X~xwnLr}WO*$p@1tS(Tb@12Q@{No z^$zI0_jK;BdFSE$nnf>yo(J9O{94BKv;7{K+@9j;*pA`cf5+LjN85jO$hQ-GPUF5j zf66?+dca<`Jl5}Aeb=+!b|BxIKIeQb$hQug!(&gHitUp;iQ{M_y5E5U=y zD~{OO^ZBLi`;_I-*Fnb-+TE7!a||nK|RkCxgk7%oRB$>EN2Y#M(CO58phA)rniT zZO>N=U%X`dz9#q{3vA!l4&SoL+xO*Rm)|obyHB=9F?^BI%;!zHodq-SJOuh7c<*T` z?{0g3wi;n`o?QZ84SXfix7Oz4vk&Q$<<+CSweSs<0qcBVyU*DSeGT+X{XnkY4!st7 zrt3Xf-&_nGG3XWc{OUd%zrC&MQ3T(Q@Rizqw(FBJ=sO+nJib;!FMvMQzP@dqTmyYH z^i1Qc+>Qq5h0rJ3*SGz>losfLEbDhbp8!2mKbHCPaG;(9-Km|6pihHdY%d?jll2_E z40;9hVRqg29KDkIV7sm!>zdyuDZ}~cEUb?!F2hrY1#4eha~j_JmaEJ9HX&av{7&uF z20aSh>Hd@lQ=M0!JN27F=xd=LDGRjfg6);3DOr9A^i5gHFNeM%i(Uo23Ht8#{PIKe zSeE(gpuYNwuL^oT@;mLnI_N{P=uOabp*yu-8}w}T_uzn+y3_T0A@okvKa(!UjS}da zp+9GDKlBg%dn^_3&CjwO)zGhjKEj@7rrOVA5Uap>>?Y_R;Js%lE^4g4*mZsD?(w|3 z75PR^b?*0_(957_+Ai7e3UCP>fIih;AKP=jV(4M$a@_8!i|u*!bm&W>K)K` z!un3zlZSzKP!_!idSB>H$6Fb6SwA1j>MEaOw(Q46IR6ZQJ_qkTzfJGUO2iSGn^My9(6R^J1_~W?@&p)6$wMQZJGqUI<(957_Y7e=+ zg*gAn`kULk!}hng2Kl1!53_HNZF?K2J8f?Z^eU|Hw7nhB>!3SrZ{FpYCuY%$pf84= zX?x}VDubS_{*~18?Axb4=4|fo8t4m=KhyTf!lT<9m**H`njiE6Y|{iYSZY}?TZ zU2ca{I}}`jd9Z^n>(PnxYBh9u-t`=FMt9%TKJQ$PJoDijZm+NH^Cwl%Z-Or8$zA=M zb7ZssLthF#)A1zrCg>}nPqwe`Rv%`NdK>gNq3>_kZO^wn=$M~CcWUoK=x;#pF+Wi4 z%~_jj?^5`FcgQ2_Q2{*#{W^O+_(K#@uZBM9ByUx$A%KkD2=b@$0FLa^5?1t8~#xixE z$NaM+vYs`_^V!K;x8ZhAx5uqImDC%c=a*&H<#@CP`qN%AY96cGy1O+XB6Jmi0TRJ6$j2 zU1b6jcwcliWdgfw(WT|H%^fYv*eq92+4f+wt z)wMt5!+0#pdXz(V2e)qf*sjYuY_!f+s=cb=Yk<$`d{hs84Roh^G()e2o@tzy^{B`B zPOg6w-h1SSq^!Op>teg#op&|*&#Bt^2%bw(UD;km(1%067vor0j+{HqhgMQAgC0H2 z`8cYCz5x1Qc)Ql;nMAH%1N|E4e!GtHX5e*_&PyiM=^-eeo}+!@aI zD?QiaJ_&TE^H3r5vCy6Rc?tAF=tJ%0+wM!3Lmv+P7`x67trj3w1$_$iK0U@Wk2#*n z{ZI$J=-jQ_p0Tel#{=~d02wvUGtKb53g2?wcVG9oA@eohym{!%t=nqlJoG=ucek0^ zuZv{CvR(x@;CykO^L(cm`eNv~z1E#W*6ZBv@?@TJ_&&{&rwaP(|3#j9_geve(3?XjR| zdsM==5Wb@{pZtj}*2YT%o@9iQwcjqv>l-)?x{)xNy< zAoW)0Dd@Ou+^yT5%XUH^b7AK7Wjo39=Sb+&@ZQsFiYsLV!Lw6c~>!8oelD~=TJDp$JpkILXGxaZ7zNZTJ4WJj>>&J0H zUQf5p!}$&RhtivKZ1*-;*V7UFtxSG*MCL0)zFjYJzJ9CRj$T83FMEA#*LMxjha!KT zUFZF?Ld0rtz8eDl3B325H@UmM^4NP=9ec8#oyfBoKBxUwa0})y4&})8i=juLXKGhj zkLl1Wp`U>FUHjb}qj>T|SmvpMZ%LMV)IqPwqBlXmGmG8^eF1c*J+R`gxyPWOSv zKtCIL1#)Tov&ZxCGWZ^X&uKd=q1QssH1Cx4tbzUpbf3K(&MBnc0R0{4W9>Tc!^wWv z0=*Ub_V!mNeC`V8dKN6eF@ii!+ffWX?V!tg$n)nG=oo(RoTH?>&E#=tyFXBcJf*0| z_Uc;?-xXQv+YG%Ti{1`BbVOs4rI~CK))`F zUJ89Kbf^=12)qN7{_9k;3Rz4D3O+hUz(wC7@t$oCZdd%)kd-L~ILXr=Dd z?w!!cJD-e-+_LotnYEQ`pUMi>ORkuBTvyKS@u8lvCs$Dx5M`Q zt`2$ubd=D&owm=rG(kTi%ld85$3P!%U*Gn*SWh+PFwmXOw}sFrLw9PY66j|@FSY0A zb@Uj-3UU64K)(a;J%=6Dy$#*Z?YT9opVlB>8~jfDtpWPy(D$*IXM5h=0{utmPW_^T zy3_XNp`x3y{s?=1ehxwI_YQ1N8v07PJy%WW-X3*sH?KD3@iZNIF1-|=Bg6Zy{;7U$ zTiySGUIG0;yKevef9Ug}_p|G^`$vsj-$`$Uek#@Copwce;Ny5Beu1?Vs-d3%-Dw_Q4}Ai3r~AUq(1XyO#@}}6CuPxd7vb-&KtIU79sFLYtlt>uGof#? z>+ZMA#*%s|^dXmP&q-~cqpE;D1v+licel0e_i?JBKLkBff0gZA54|3GseOIf&RtKv z)%ei@-zNB;wEK8}S=O@y`Z-rPZ%5wU`25El=kc%zdR`X24EoN{2iVK!crLf261o@q z_WDaLd~+T0$n9u^9)f+AQ~3vol|gi7b< z#wF0h&?jkm^)b-nSbv~BzwLf}DfAe0r~79W(B*by8b@UQYUoku z#rFKzPV4?$J@lub&$R2dzmw7oy#u<_c-jvA59o7xtglWuvOnb3U_W2Eb=#+QUAFVP zJ;%c$_}t;lOSMeZuakz`wZjstDW-{;yiQ( z^m#JRU4O~b^&FuZc@oz;=cz!RFQJ?Dx%4l2y6V${Jo{hooTmYKra(9IOm)sv5XJMU z`ObMdk*5K=nP)%eJmttEZg$Qi&!0n~n|TI0=cz}YOK)+`BhQ};pqqLAd9dUD>Oh{4 zZ*|Vof;|6%ZsytOoTvDHock9#&(Eeq9|zs(d^`{O80Z7+{ogk4uZ6xV^gO%H`&@Fo zXoNlh`YCqZJ-S#=Vg2hT7Y}akisf(c>j`feV z>o|VpBS4vdI`k0qOxq`quX)hRpr5L(uV1&;!FMx!PUB`1^y{+dZP2gEqI({|@d-WC zJX+SX5c=8Bw^z?n`0mJ3P6hN^v*^{(Z_J|CL!Sq|$Na~3omGwV+iK_^qRyTp59;p! z*7dV?pSBbErrqv*9aVq{%5>;X*HOjPGhIi?cAE};2G-wRyH&w=7kscuH(eB6!QY;L+rZkb5DiP8?)#o(APje$G*N>-H4FwP!4@F z^zF4nHGI2QJGVnU^qsTl&CmyD(c7W-gPy7X$a*&8d^jHZ*?8|6aA0@;k#m5u?s222 z7T0C)?Qh>+`Qa2S%RFV!=R@D#_RfRvN%)-hdoA=wv*?Y`AI_q;LSF{mX+G2my$<>) z`*x_W<(c!7f(J3bfbMktSq%O0Eagv!zADT5^PtP}apl?F*1RuTfKV;;R~_=pcAkgx zSR?c&@ZNI~_J%b-k^9T`oTCkSj=E#(w$<=;d1x4eP!saJ3H?%;=SW)~YuwPDXBREQ zyy#Bn^HLe~PUPFoUY_l_TqX41p=Ua;nf>Q}+^dI<;S9f*F|NDLa=Z8}+1!5kf~dz8 z(zjak+5XOA8_HV&U#9brY-i7MJU@Y+>AF+uh0qs6KLqc)+Ryemk`m}oLCJ41p!=Z@MtNQBWt%rtLLUL$sXc3;Pk^4OJ>`C{#CfX%`dM;5y>V=J z8ERgBqMVmYUoCtO-L0;Vd-&Wv|IUI3H}clQ=Q#dDe+qi0`GMR&CD4~;(aWLV1|4VQ z?z-CUt5k9Q%k8>b4XEb+hyG8jpK1R}y$Sk{(4G2s8}yH$PqgQ^-9Pp`gzG|w^<_I2 zLjNB6Y4-Ki^&I!Ax}GbAFaI9r?W};lFZ50JJhuBs)zCkK?$i$T(8u1Jx!=fo%Jb)B z=;plr8}xi@Jk+k&I*@0{ea`m@@*YOtf$lUO6hV(cpWI_R)B|w2KgyuL1ATwH&hw+x zE1`b@-D!W;K>q}~)BbFL{s#1Y?D_5YzoEB4&vacN+o=P3pQv_x$+1FodbkJpe>v0}D6nZ7zdwSXGYmI|!Ls`76Pr)k88{yA%f2|ODG4wj<$IBJ0 zZI|t+x`K?V&p*Sr;lJ=z!?)$X@HN1<)8g#q%I&Sid9N5gbKKtxz1gaxwY|1+Uv5us z4D+__H5z z(BFaXRKEu3A7;tl0{sK%PW9`6F6-~KeR-HD%JMVK&&>S~UAAwg@kQ!o(0_;Sw0)J( zw`M8726`uSr}{TQPiI-b1$qbcQv3enb+W8q2lQbNIM*-lQT$y&=mYKR+wSugLEi~_ zruxa_s0{ib=opT=`>z}qmRO^q8eitYSDmFEwb1W@?zEp8p)Yc%huqFq=nJ!~-wAyI z^ilSD+OK~eQ{_AD$71MS!%vix>!ef>FEDSV!#&hx?w=wqNCspXN6n`JvxLobDn zJ1^bihVAnJ_0VTP&(vRJJ)5DQ0KLq$uHZgX_0i=lxLx zeKd5ZmD0HXu zK`HdwEP4g>hqCC^&{sou8jtFsFM*!vIF;?t4E=uSPW!VR`Yq6%`a|yD@O`H&>yLqc zNtW_Up5Gc+fj-0Rsi~J7slID zQA_K(2HTQzj;m`NYek-0mpkw8PUtsg(F>l&90j^lI}}5|I?MXgp;toBw4HK0Jig+YbBv+dAa=K1)5CpnsdC9&OOSfbMi$cp5O@hu+WL-nP$~6+(Xwy3_Sm z3H0}%=h@e{eICCYdII|Po(ELJmxgb+J&*148}-nC&!RU&{{#AH`}(%eyR}32tZ+U* z=RSk;GxYuK>)YA_x6?LnEQ4MQz1Xg! z9ptT?F^Co6d~_Q05Aojf{x049-nuWLJ*TWgzHRVdYR~7sus1 zv*?~@aeqFGUI^Xyu=DX#0zEH_UJl)zMX!Rs1?A7^v3+WyF6&g?_4BODUcW-)ehs0Dfe`XTmqwLP!wfIb7dQ+wq#8pesxo!YAi`jJ`mGU)q4cWSR9oPUmh zK2x@r{52Z;yodK!#E)?eS z(EY%GSmZWl2&h+y%`W3^m-x(Ota2+y?^1EHF-yJQ{~+)*@pg`JiD=9*7P!RYImU9A zSdwEr=MuN&7@J(;cenATTl|t^Y|hz36>$JoI$qScjdF3b+qho*%Vpf>5?{HDM_I(K zM~P0CajN*qg}k4*jHg^8>Nf6ntM&iyQ_aQ*idn{WYV!u{58fc2b{X}qJk@TtV>xSm{PAz$YO>WWe&}O7 zB3F4SM|{)QSnG+q9`9>B<`p;hHJ<1t@IKv3+~3!DvbTKyLvQh3U*qOJ;+4L}J$=O5 zzQ)!*Hz5!9+HR+d&pg->%^u@7R|{gddBkrX<6j=tO1XI`qfi`XOao>8me#mj3tR^u ziYbOG@a;h3F}HXz*Z9P}^Zf&I*5rsk`{(?YBi_x;Y4ynWtAzMIH|Gx_-!Jm|e(0am z|>FgsO>7Vm_UxD`@^g9&Ad^SLQ zI?#v>6dMK_>jw%%{v2r5$aS5O8ytpuo$os12)RwSy2ahSjW1*$Sd}Bz^fnqjcpDMo zt=`5f0&h2oqrUHJeB>41_cm_tCARg>S<>rRL>}#ZB%XKm5uf)pYWj)|eT{fu`F?&s ztDO2%P|h~jLB}3}-z{?O_JG&;+$B=Nh`2v;VWB-o{3eWBJyKZf5evM=N1pjoSmQ+o ztT<@C+l?n~ZrPU?$$f{I{a>!^(MO9?<6NZ;I1HSz-`y@FCOZP$eM-d@#;shCfxM$2 z%oOjrjI}P&?lQh}sof^E5l2H?=@Jj+Nb&9*W2IYso@1akzQz|G0Z*fM%yYTM>Rw`1uJLv+ad)m!-CO+G4?9i#(9ig_xA>r+@m(JQ&&IyD zqLt02UAns~PYyA2OaAS0)wsn2sJN^$dWq`Gd%Jc!1W#|*xo&d(nB)4|jWa>ibM&Vk z;|JMiXhYxnFMSvT{o-(JwORg-Clwj8yu~@j3vO|1j`2@7oVZ=1Uj4 zkMie^01p*M89}+cbQjlP88|_l?wZs0I^%ZPf~rQ_1#j9>(lYP6k zxpMb{+?8vas~-kmjDXv0yJ4s6<-qu`OC()r0P(WRsI%?P*WLHIj=>oL+21KNzIKb} z3yoztB35X;kR$FZG^#w}#zNyip0V(JC&W{G8`WO1Y;WTYuUNRZ@vT?!g${`29bKV_{M7_+~Pg2@tIq^;x&GAi?v>3VUAetmF@me zFJq}k+}+Dq;}P?F84HB?pqH^+h^Ah~Q$jr5%lK8?u8x4-XNYr+u&T!Rzq!OmImX+v z2mIg?n{$j?WNk1eRC(lH##as8Vu{E2RldDDN38W2Pvpp7e8wio&LA7*|4W@=DC+zF zRA&(Xb!QmA#w9+$$zJY-luP`SW85U$dzo9@=rNv`Z*p22s>3xhi0VBUPj_oSPu?|9V|?xwPvjW)J16v}TQ>eW zw^Ysec2?W1I3IVGvF9+(xS{<2dGV_qkj+2|wd9UX%MzZBUQx zfp6d`Vvg%JS6}>IBTn0v%dDWaeLcB!M_2y|;#AkgeXlZZ>0U#gI|hcub*{xO@u16i zyj$s!YsWJL8X_dy;Z>JBH(ISR_&vGCZSHq-HuN#RkUM}i9eiVs*pg%1mm|K&G2WEF zumjyv8kb#5f|d&Ud9F??&)PL^fn{T zthozO(p!CuFIm$6+2z6S%VM{a#44=!Br(M}8-BB&|5y8_>qtX=?eAsEo#u+?-Ns!m z@u*v#N*BA0Z(LWKTh>(&x3VAP$6UrnSyve{%ir6jAGaB%AJRT(wrF?bdg4;jaBNsxW+KAe7L;>_i^lFxqBepsoiG`r@3W!8_525xF~a7*tb#+ zAXbk>*}cyZA>$gksj@GgEzWn%cZpkE2C8GZR5NtVt)9gk?E4(oS8@*atY^=UJVr{N zgXwLRag4)U&Gh}^3S>!#di}rWGv}QD?e=bOJ21Di?PJ;W))Z8=z4QLI_t3#|-ybS2 zH%<}LjR^Z_uAIS45hr4dFn3nhj<@&Sc30s29M>mq+{9bz*|X7OY_RV(b=dvaZJqUa z>^)gbHO|wjBF|Mj%6eWDbv^0EOr`Hex12Uvz0B-?&f}`R?Y7rf&Evy1r|xPFH2Nbu z6r;#_qTzm*QR5O11MM#H1yF?vG=MAig^OKAhg|#ecBW-djA2rK&z62DJ7O zUjrR|#JfOqUvbA$l-E~uVyUs8ScfIdydMGTbHz76Q?7UqsO>NA#P3Zu^cR0(sd|8T z4oh_d#A={(fM^4128#EA_JQIqoL#C0iLF>_9VDK|QpX_iD9}7ud<(P<7A-*I5U~h@ zP|Fap4NLVqh!?Qbw1apIsLc~8pdn9u095ZN?p}`ab`8hTX&y zKy`um0jMhw9|4^OqUIr#x4XC*Xy0AD3{(vjYk=0F;zyuksQ4IY-b38?Fv{CQQ~`~{ z#Co7*n0OMX$5jE)w5Rw4s2wh%btrGRxCN-*OS}Tq?IqR%oqLIPpr%lK3bYrB`&Xg7 zy+s6Q-CMj0bnGqu1~mJ`KY%u$XayQah{Z9KH$vPB)Q=Re0Zk*tQ$Xz~u^DI>B|ZbH zM~ep@L3yLa0-$rWXaaCy@-)!CkN780HAZ|6w2l!=R-?Qz;x?dpU-3H7wy$Ua8pnz) zK+9P11yH}ASo$c++fOV6YR8EL&@fIs161!Xeg^9H7heLM`-@tf@@ftcw*&15h`$3> z2Z}h*dZ73h&~c#n3TQ484?d3aibOTgc#wDlXgNqc3)K6?FF=!Dd=1ndESA-yyo1FZ zK=pX>CQvtCtOGj7iw>aX5b+Jreu!B91j;*9+zGTEDmDNehl=Nb=EKCVK-*!W4QMPD zE7qXAVsRHxf4F!HXgXXx57ZtZ(m=xz;#;8lNU`!sly{_91auxLnt_@L;sv06g7^)n zI!dH~)}zEjYf;`&;%=aMqIerjFK-4{j@{SSr0`13$jX>38@eakQaT`UFK zr;87Psw7lbHv|)`WfQpMwB;0{0mF9 z=ZZC0YB*Os2vpA$9|LtW#T!89Oi}eB$~#Z|f~EHJ#FJR6Di_Ou)^hO)&`~bl1e(tm zx4eY%&KDh6Y7B|BSZWE0uWeVMrJRg`y`_#I1~mx%@})m$zf2HGzdUjS8ChERj`HS;|6r+ou6PzpRac7`(0aA_8tAxMybCm6Bko9`ylX@!mKv`W>#)>v zt#|~epC`Tnn&yf3fZFTCoqtDp*NH!|RDHd84oh{{i`78q^`Z@^xk0=SwBI1^dIRNQ zV!IWKt>E)m?f@SJn{RZ9Z^1UO1#H9wdC{9F72Jm9`kP(i1uQp#kAbySxR(qzfFFR> zxS+dx1Ih;HgPmX_ScBQw<6t}Z9ax3i!AYH58eni<0;CEU>jHuHs0nE--9jS zhhY6em$%+=(j*Z*Gs zYk~i@!2ep{e=YF87WiKa{C}VY#y#pWYuU0d?z7;vWwlyfePS5Y6@%iiaDE)UU`hil9GYs-ge%g1WVCv(~SEE&q3&*faR7KU-z zvFf<&&*jn+)bY6sm%n^-r1=h?mNQ?2Nb|F8c)x|Z{M?&-$&hZ6FrtJ>r9)?8luf?Csx z&w5fVTlJjGhUi1TRh~tLjsI*@UlXwQ+g2{mTBGu9S2pJ_~WSmTpNis#INpC63Cw-)!43I%GM25)-86{(6oJ^2OGDW6I@9`|3 z^pSotKnBSW873oSl#G#aGC?NE6qzQyC$N0dNBYSC86-nwn2eB7GDgP91eqjLWSaC& zW%;C!^pgQHNQTHT86l%&jEs{BGD)V$H0hnj@<|`*Cj(@V43S|nLPp6L87C8Dl1!0l z(t9GyCw-)!43I%GM25)-86{(6oJ^2OGDW6I?@275^pSotKnBSW873oSl#G#aGC?NE z6qzQyC$oIgNBYSC86-nwn2eB7GDgP91eqjLWSaDrv3$};`pEzpBtvAFjF3?>M#jkm znIuzWn)IH+@<|`*Cj(@V43S|nLPp6L87C8Dl1!0l(i>#?q>uEI0WwI2$S@fpqhySX zlL<0OrpPquJ(cB?KGIJH$RHUa!(@bvk})z)Cdee2BGaVzG?q{LNIw}MgJg&dlMymX z#>hCCAd_T@Oq1TzSw86_{bYa)k|8ooM#v}`BjaR(Op+-wO?uB@`J|8ZlL0bFhR84( zA){oBjFSm6Nv6m&={=L>lRnZ<2FM^8BEw{ajFK@jPA14CnIhAqcRI@_eWafZkU=s; zhRFySC1YfqOpr-3MW#vbSuCIQk$y5j2FVZ^CL?5&jFE9NK_M#jkmnIuzWn)IH-@<|`*Cj(@V43S|nLPp6L87C8Dl1!0l(mR9YlRnZ< z2FM^8BEw{ajFK@jPA14CnIhAq_gt1w`ba++AcJIx43iNuO2)`InIMy7icFKM#jkmnIuzWn)H^l ze9}ky$p9H7Lu8nYkWn&5#>oVkBvWLX^q$Z1NgwGa17wg4kzq1IM#&f%Clh3nOp$5Q z8)EsSkMxrPGDwEVFc~4EWQ>fH2{K8h$TaD_faQ}u(oY7+AQ>XVWQ2^8F)~gj$RwE} z)1-G6%O`!LpA3*eGDL>S2pJ_~WSmTpNis#IN$+fyPx?qd86bmXhzyevGD^nCIGG@m zWQt6a-V0ei=_CDQfDDo$GE7FuC>bN;WP(hRDKbrZFJk$mkMxrPGDwEVFc~4EWQ>fH z2{K8h$TaD#VELqv^pgQHNQTHT86l%&jEs{BGD)V$H0iyV<&!?rPX@>!86v}Egp86g zGEOGQB$*=9r1uh*Px?qd86bmXhzyevGD^nCIGG@mWQt6a-b-0N=_CDQfDDo$GE7Fu zC>bN;WP(hRDKbrZFJt+nkMxrPGDwEVFc~4EWQ>fH2{K8h$TaD_oaK`~(oY7+AQ>XV zWQ2^8F)~gj$RwE})1>zbmQVUfKN%o{WQYut5i&}~$T*oGlVplalioQjpY)M_GC&5& z5E&*TWR#4NaWX+B$rPC;y_GDV^pSotKnBSW873oSl#G#aGC?NE6qzQySF(K4NBYSC z86-nwn2eB7GDgP91eqjLWSaDbSw86_{bYa)k|8ooM#v}`BjaR(Op+-wO?t0l`J|8Z zlL0bFhR84(A){oBjFSm6Nv6m&>7C2+NgwGa17wg4kzq1IM#&f%Clh3nOp$5Qdo{}^ zeWafZkU=s;hRFySC1YfqOpr-3MW#vbH7uX>k$y5j2FVZ^CL?5&jFE9NK_M#jkmnIuzWn)J?N`J|8ZlL0bFhR84(A){oBjFSm6Nv6m& z>AjBSlRnZ<2FM^8BEw{ajFK@jPA14CnIhAq_j;C3`ba++AcJIx43iNuO2)`InIMy7 zicFK<8(2Q+BmHE643Z%-Oh(8k86)Flf=rSrGEI8tvwYG=`pEzpBtvAFjF3?>M#jkm znIuzWn)Ket@<|`*Cj(@V43S|nLPp6L87C8Dl1!0l(t8ujCw-)!43I%GM25)-86{(6 zoJ^2OGDW6I@69Zq^pSotKnBSW873oSl#G#aGC?NE6qzQyRV<(Mk$y5j2FVZ^CL?5& zjFE9NK_M#jkmnIuzWn)F6kKItR!x z$S4^j<79$Nk|{DxdT(XuEI0WwI2$S@fpqhySXlL<0OrpPquUBL25AL%CpWRMJz zVKPES$ru?Y6J(N1k!jL<8_Oqsq@N6sK{7;!$p{%GV`Q96kV!H{rb+KYmQVUfKN%o{ zWQYut5i&}~$T*oGlVplaliu4|KItR!x$S4^j<79$Nk|{DxdaGGJ=_CDQ zfDDo$GE7FuC>bN;WP(hRDKbrZ?_l|)kMxrPGDwEVFc~4EWQ>fH2{K8h$TaD_ljV~> z(oY7+AQ>XVWQ2^8F)~gj$RwE})1>zS2pJ_~WSmTpNis#IN$-6upY)M_GC&5&5E&*TWR#4NaWX+B z$rPC;y-}7=`ba++AcJIx43iNuO2)`InIMy7icFK<`&mBeBmHE643Z%-Oh(8k86)Fl zf=rSrGEI6HvwYG=`pEzpBtvAFjF3?>M#jkmnIuzWn)E)v@<|`*Cj(@V43S|nLPp6L z87C8Dl1!0l(z}G^lRnZ<2FM^8BEw{ajFK@jPA14CnIhAqcPYy!eWafZkU=s;hRFyS zC1YfqOpr-3MW#t_Ez2i;q@N6sK{7;!$p{%GV`Q96kV!H{rb+LEET8m|elkD?$q*SP zBV?3}k#RCXCdm|;CcVp8KItR!x$S4^j<79$Nk|{DxdY7|&(ntEq02w4h zWSESQQ8Grx$po1sQ)HU-u3-73kMxrPGDwEVFc~4EWQ>fH2{K8h$TaC)$?{1b=_dnZ zkPMMwGD1el7#SxMWRgsgY0~=;%O`!LpA3*eGDL>S2pJ_~WSmTpNis#IN$%>~^pSotKnBSW873oSl#G#aGC?NE6qzQyt5`ni zBmHE643Z%-Oh(8k86)Flf=rSrGEI78ET8m|elkD?$q*SPBV?3}k#RCXCdm|;CcTfa ze9}ky$p9H7Lu8nYkWn&5#>oVkBvWLX^sZ+4r2cvO|1JMt)dHikCOT%)q(g^|Jm!Q` zMh!c7?15wXEs?CN7&-V>jrDyKyEDgdcYV*KXFR9t`zSr*9>ez|iVHx^k4!$KbBV#<*u)fFBGf#hG2EX6a zGd{pr`@HoHnQkS=7-)R?U8eXTW8NdyH)!ns!G`sHq3)=G{W8QMzC+jdhkE++jNkda zqMq>`jZEKj$uV{^tnUT&^viDn{k5Ov;9Et*GRCcMJ#}Z1-}mttKX2=fsCV*fL+%Xi zFTZCc<14iNvM1wPwD`e@yKMEbzF!nD<9}_Ri+Vn4^C`O3u?#$F=%io9RDV^Wzs)&HCqR z{;l)}G=BjqEVp-%=0C`c=Nrdp{?q8s)BG3HKUwq3+psc!zUG&g2l6;QR`b6|e}U%z zkp3x}|3CB()BFYa;-JiboaR5qjQ2MRHNX5RN$D@u{P)s7QuD{@KVI{HL;o1fFYpCT zng0aMzrPvpZ;aFY$Iw4j^It%Jk>upQQQs zG2{J>37Y>T`cKyUH`8CD`B&3lrujdlf3oKPo&Hla|6UkiWq&Kx{8P+$z7f>?*U~>t z^Dn3WRL%c3{bibeGySJ&{$20|Rau|YH2+a%Jl{B7^UtAwy5?U>{~4P9ZTe?u{-5bT zQ}geEF9yr{lxzNj&3L{sUGtw#|18Zvm;SRf|4RBRH2;hApRM`vw;Rm*T&DS5_`x&A%HaF0wu6YyKn5c)oF- z=D&#kD$T!;{&LO#H2n)S|Ht&7ulf7ni|4XF)tY~#8P7LDn*T)l7is=V`Y+J@E9tM% z{7v-F()>TuzgY9Az6(FQtEl=5M6`BF+CD{dJn(i$DG- z>r0hh)x6*&9=06xe7$Em=gXX`$jOQDd zY5tY;uhaY+>Azg__rf2WlKC4o|9CT=Z(O1I=g_}i^RJ|Tj^_WE{wB@e2Y;qWwtuDO zpJc}S8*ga-E9k#c^FL01v*zDKe^~R&Kh`I=ca!Gd4}Zo=*5@kCKiiD=H(E6Ro%GMu z{4dh~k>>xJ{;M_rVEpictWT@vKhTWl8`o(5bLjt4^WRMWwVMBL^tWmL_vxRf`Q;xA zmG${v^Y4W}h9ukPI?aEg8SihjYyLU(U$6O>)4xUYzefKJn*V3|J2d~Gom741YyLyb zcz@$}&3^{{H){S{=?Nb^rJ z{D07Yo8}*ZTL!W|1)6`78P7KsYW~^u57Yd&(to?=f0F(}%`gAFsoY=Hn%{*Ba9N*` zn!nJD=Noru{uAjRqxmnR|4z;S0R7`M|10$0rTKrMzew}%f*+KT^C2Kf#RW8~1Ad8|W|5{B`u#X#V%o8|h!E`Cq4hp61UTuJS*m z`Oh-rnEz}31@u3x`Cp{JO7nk0f1T!c?WNjtf#x4>#`BF;n*VtEt2KWG{V~mdAN`9o z{}c2-qWL${U!(cIrGK^N7lo?+i#7j#W<1|`RP&!lf34uFVO#{ z=KqcU0?j{mf7PBFH2<#xeib{FU^-t@)p% ze~jk;ivD*r|LzB>_8h1AOU-z`u~GBiLVuCwe~$i5n*TTY$7}x4MQVHB)%+Kj@%~1! z=6{g>_cZ@!^iR z$C+`=|26*&^na@PU!=cW^Z$eXR?R=+5VgItH2>*lJm2_C^WR5*h35Y|{hw?8oI_Rq z%QXL?W<1~cLi1lsf2HPsg#Irz|7Y~i)%<-AQ|f4=7b zi2iRh|Bl6Kd#f~mz>Mb`ZJPf^`WI;aTKd1${IAeot@%HrKc)F|4p;SAr1|$VYR_qmTmDzgIL?nZsXT)IcE-msKA7QD*sQ90Zf6e%OZkPOgiG0lq(0{A)4`4iP z#cx;f5;o|4$FST*DlR{VAz!;qX1qqlr!ddGj1ObGg_YgNKbHQ7l>Y|C*DyYT@#`6% zI7Rt~EmnTPc1SWl;Qc*bMOzlHG?jL&#b z#h+*V_!H<~uHx$$|AX;i%T)Xo#vhrg{Ojp&WV{Rqro1mVe}(eD#kl)K6`!$E#b0Oq zNyg_ts^ayGpL~+?Pk2nlmoVPW_=?9>d@;25##e2U(fh##*`AFp;=0JoN?>#E9^+WJe)Fp zUhysXUHClb+zg-F%;f(Ve#7|d=R(`fJUbhMc;2y&$GkIS{++Ro8t3KO^0g=9`I`T5 zGyd1la|Y-irsX-wjQ{m>p0nuht>wQ5@!|OV;i3oCfY*=XOu?BdeqDiT=S`}b#zlQ#%3sX*gNz#iRiE1!f1B|WPgd~_ zjJwWJ`EN|CJcls8fblcOsB&?+YQ9FzQ2w>GD$g(#He!rdoTTDbJHLW>%oyDD_XDhc z)k6Q%@-KnoWwqN@#)p5e>SN{Ucdmg?8~^pSH{!BCOc>D9zl8C*AE@}QT=jj%`+Td) z#js<(cA2U2Y(7}U{fr;T_&x(vJ6L)8V8F>#pAm@5`pi5_FXC zH z%NQTms`B5=Jl`_@=HbeJDC@Zo26oxbQ7;BACJ6P`49d{<+sMAYZ%WhReAQc@-RM+ z+j~6YZ!n%%ui9ZJ#`m~HGV%sq!=}RPh0fPh{M6kSf=TzsL9irz^jY{(muET(0VKIOC^=RsOS= ztGLw;n;GBmp^862f8Z+RUwwco7tLtCY7n>fDMKb6rGLxIDo;O(L*}YHcZ^hdtp4@_ zqEw;eXQcvdE}U@Ri4JbtN1hYuVwu6@hbmpjsw-#DF2YxmES5?T&v=N zR@H7;G+&Q1KIUuX-;41*=VjjB%Mh2x%PAMBJm<08R~-DG((mJW)@q;L*Qq?Cwe#0d z#;++-^|8jKw-|rtF%=)idQQDwOgMOMf0tSh5{6;%Ql;{I*{I^yJaZ!BkIE0m<7Ktm$BbWdkcy9Gp2}NPp2$cQx5m9z z#(z6Xlab}1;uK+ta-~w#`|jXx{Ddl<^0wfuU=*Rp#>_xRnOlUU$en$*3dei zK5?rm*S}c#t^WKo&>;7OVJZ=D(it0ow7ESFO&c<9VNLJpDHzF8fb&o63X7 zCFbiz#!I+=4`TeXJCuLL2(`aPF`jd$iof%Ts^D{s-^RFSn~I;tc%1RdT~xU{FkX0< z%9B1=#gAnCb;j!-S8;1x-F=brFZrj6TgTCI#@Ae+$}PxIiC$v7s7S@F@#I6s7hRGV2#Ri_GzmAm$N<(Gd}lJ6}QHX(f6tS{yq2mNcyKDF8BLM_o;do@Vek##=m4c zSljzA#uss1vg#R%syvf9j#=j^&;16j>325t>+XJt%W|izP~{%X`mAOAkHb_u#@936&Kt(NGrooKwQE&<&SZS@GF9%eJTDJmyqxjz4a$EQTYU7%%Um>SG-j z6CYQ3b{eJplUeRIREi6{>YOm zf8`kEx8hURs<{6%6}R#a`J0MY-J`avlzA!`&pBNAt^K&3@k-uLu>1p_QhDmWQvQj| zGoSH~c>Qt*<8_SB<$TF%pF^Hjd6v#oc?Qsb3FBT~H|@_jKB#COr~Z4D-*+d3cZ_KeE&^$DsSrqh2Ii?@pCAuGRYtF#bCZ3>wJ=@=Ew=Wpqz~kLo_qJzM{%5aO+9` z9k5emdJg4$B@VtB~H{vzMk3U=;*XJ|7%kye} zIA6QZHW6_-4*%ZXvp+O4zHVog$Lfb4GX5~<+tzjsdVzU(Ki0bc`~>4a@;?7i*5{~3 z<-d&Mzx*8s`D$T2kW%$2S7GB{j1On~?8A7k7c=k2T@jbtbv@g8SNe}+{0!c|vbL+5 z@!Vfjxku5z-%Bdb4Qz))7{8eD2`iP~I#1=ito&j2!%_54W87G${8pYP886^{B&(jG z^(xO+o_CICo*xjm`W{0j2EL;Fn;ug6tvt6gKImu_xBAcHjQe?CE7*wL#0SoQHXsd%F{-zZ`{^tr0%3C!~*<0t!7-0DBG zUsrinu$_JMU&Hu&xypYKR}U7Hy{ z;BD1z*7F^)QRNv{tMZ)8Jm)d~6vxjm8UGCNOzqHjlgcyyb(O!2{t)9s4pi|J z{HxiYt#){x@pc|BKKjeBLuG%^$GzE%FX4629QyxYd}1$Eu65iU{=Ujn!2MCUU z64egpGtUQzXWHIh=-;7_c+G49HI8Fb-sR{aeY4g1>7=n#uSI#@F!tXpLjPFy5v;w;i5Td9K-6<^RK_5-n$Z9-qHk<9R3J&o5T( zW0kw-hbm7s=Sx<(A2a^tGb+Dz+?9W%{OgWW?O+`*cQQVa$AvYX?D4VkzsdQhmFGjo zD>=?s@$)`W{vtz_JAm!;F5_c)pUvtI!B5rrV?3woW1UZLL0tB?7r#__to`*TyX1t&A=|Gj=8kb&XynyFHtDf#JRJrx+KURCrW4x5}A1lwh zj0bsN+KLbQQf=?|Z;do7mCveOkGOS=Fl6F6#AW*z@;;bV?mrm!{zJ9r(OhZHSIU0{ z_m@@wB;xY?GHjjN-V5n3`C9ptN!4zpZY6ADd>!wfUqt`ZZ_fe`oKE}`gR{2}y4-nyX4dX8}zEJ!8#t)3&$@|Rb(tkrr z<Ab*`19;P zqnYP(#;5F~{6iVP`3IG!c)V(dJsDrZcss9itvqiszM{X%b1MCJ|ETh8dQs&W&-n9< zSMhTiqZlvxN%^n%Q0=ch7{7&a_hyyHI#2zZ@v|FM{3iPMXjggGboBJ!&G-U7-?7Gr zdd3gsJj5FJeqnss#i~A5{~!4em0v&a&tZJx?#e%m^;yICYFanWbta#te zD*xIN6}QIom5i?rsQkFhH(x{lsr->a%5U|DcNo_{pRn&1<<~!taT4RhUsCx$Vg5SH z|Cwq}tKGh4e9HkUzBm2JOp-~PHXE5~XxRkhLqK+srK|!XA|N1OMMTz$fCz{n2#6IbBAaE?its<@p69&F zeeX;rZCd*0`+WwUIq$jmz4zR6&vx$%IoJM-|Cd|OoK86Fnb-a(?T!YYtAN*_9;p*} ze5ISue8QQ}V~EQ&g8m}lb{)c3J{0u$!#@%F2Y)7fl>Thsm!2!Q(%-H0 zUlCmO@YbISpAVp~bRNG2{ClVe(DhjKZ=v5mE__rEUkrTNh0;&u{}%8YG4CVszN7ym ze0nDf{nvoE13wt~ql1Cp0DLL@pmN}cuX4}(jfC^O+y2)*z*qiC`dvmfX$2mU$g=yV>xAe`-oZLyz@0sWqz37_%E`)mO`4g3-0S#ATq$Nz-> zXy{v=$0p$OekHxk z9`^C`f!_uEwd=z3_$2Tr<_o?j=y#-QTAr816*A5T06!Uc7JmIsz@GvBOj%fuHEke# zo= zfYkh%1Ny75PWQr(%YaWuyy&_O3-~h|d9grX0ALE4cxY_ZCmw}&9B>mhR{Cnsi zhO0k!63%>1zfR~?9wuxmxLsGX0Jxo}cpUhYLuFoc{67Nz@IJy{`%p7n_%E0$^oOIL zR{`GvdO-Eq{|MJ-L2CZ&wVCjF9C;|6-{XN_XT@!P2z+PwFKVA0w7Kwk3UW9ajod&u zkN@SC{r?-#e;suNMWC;x0&E`7m!Vg;$2@*YxU2s=ZYlKtMx6Gm;Bx}u#_zE7#too< z5PD#D(Ek>AJH}x;@S?4R{|SiKsoXvWye=;5@m$b<2>dQ9E-_%i^0 zvW9JhzPeTBy$$?t2mUu&vN{UL^c?#<-mV{Otn-|2p_w4E(b>g6n+U3A_$| zoZ?^ks_=gu`;qcF3ivSWj~(EDJn)vor2jg1%Ya`FeW>f@UBZ|LiOE2YX00yIKS_`i2odl z9}n3@@F{!B_^W=n75L?tca^KBfNzO~3bG&}!H>GmB z9Qd);ylhJa5 z=K`-ADfEkhZ?vb-+x4G!0RLpR@X_`73h*C{6H?i9lL+=~7KcW2lIg!6ly4!>CEbFaZguecrFrO_C6#OQ{z10rC8TiJhNk6roVe?|4ziXNB zA+yT-(+zyY6+*u`@Q;D_Bi=p*@4GN2e4hEc@X`KX4t!hq#rnR(ON4&RLgBv$_+)|K zbg1xAzw8&lpFy35`gd;wzoSp+HP3!(sqnwm@>^d3-Uxp!jdsVE3H^N3t*PC$1bD+s z(odD!ZOVn-t~War_=6VxmB7!l?968fcgg49L4Q5!uGKyqMv9T;ya9fPj!zNrV@Ak0 zG~vB!fu8|CLis!hd=}~lbU%6*_dKMnXz&r5$)PCf?y{B+^3`Y=NU=iHxzvA^p&-SiN_kHI=sy?Q+GIf(!0 zI4lKz&F#{z+AXmO!sl`LJEPIhTL^c_&l8}(b*}Jv1NP^^hYFt;un+DGJ~M%rS#g`o zf!p%%Yv7YnpP>C&349{X0qOYEA13YI0l)G^v^%oO-T#9K=kc`jm5%~{;+pV!TnT*n zQ}VtlKbK9!`@$a6ePP#0g74HTd=x(!_;Zje-Dlq=V=r2axUdOo`_=>*?{XU?71o)H#MLsF4 zZT@+OaF?76pDg^hz&xrQ+XQ?W^0pe+x(4{RR-EKf;4dSet9tSy;1j_|<39&Xk#-OJ zv-Drz>oMTNk$*{{|E~hy4f;XtqW*;NS#H&_Y&KQ!dC>prXSD$Te7wxN>bEn1+x1S* z1Ah>DIE((g3w%%PgWCUz)1+Oy{%RTU9@y==4<1!3^y97i|5Jh6b&{J-7y487midjN zKi?(XHNUqIZt6WCw;8T_@Yk?jGU!ifgS-E|gtH!~fS;@FZg#lfXF-2H2R^fbpM!X#+G*bcZl8m`0{DRm zk%xZpxsPz)N(M-O-U0m`$hWGTuL6GZVZw*D1M|;5jqdk4oN)7A@UK;G3~Lg67x=R} zr`^DNaehJfi&cdGfbS)??xh*iEPQH^ztjEb65#hDKUIoX`#12!t+F1~t~+jq@YxXa zu5$Y+;o3K_Fn@NLDg8MX{ZTuuns9#K1+a^By(|a5v{S}S$8(z_g#J?Or#kOf0Y3}& zr0UP%vxNS?u#0qE{{;B=&y@Gl{=ZK6cEh6I9a{lb+IK?fBlDIl|{P#1&PZPXlh( zDLn~%3F0#vM~xpXd~Pn6eyV)F1iTY<%Su1`7@=>!S?G0LKLfn?KZ0w0W@l39GdMr1 z?{zis=i$fcI4=i&Kjc}*`D@1tpBdKqjx6x|QBOVwLi!`%c0Jyof!~C@fbu^fCH!Y% zU(i5kzf=027NMUHe`+$?eF^yMkWZEKx>liI@?9C9H0V2l&%!#@ILza~pW0RE zbzM(t6Fzq%-=y)Mp8|gievR4#JEw*IbDa0rb@Tx6{X2!fuDdsYUjhD#f4g1wmtm+U z9I4|>IIs7eb`U;2(1#=Eiapj*6KHoi@Ez)8-&gwa3xtn-|G+}v71sW;cc;+T?H6u0 z3EZxyS_J$ZPdz4CS7m*E_)#_f0O6M9?U-VXeyZwQ|eX!olbq2Fx_ z!BwB^3;bf#)u`WbB5?aW+7-ZGhJAGu`0tPv{&pSporLSVfYkhX8uX1Zk{yH5Lm=?Oyr ziWM(f3H+1og^!N;ktYiMjpqu!8{#S}fG_;lkrvta+MgIlfVEI$uKSTKJyP=Hp3h*!gKf#|vJ&f*~#{-`TzxY_tUj+OE?3)^& zd>{B!#JAPA`r4VozZUCS{mps6Z9ncj;6J`r`l;)&?mNO~`5uC+-g^|decm~FmeAYr zx9fo0^-He26|h4_qTMfm-~V)& zzWD;-zv;n(H-r8);1|QsRsDR`g+g!Z)$tbz{y6CO1fL%RKj2B>zW{i}#X_I`MCMoZ z^JBoDgWl72xA~sX+xGyB1AYPg8l_(f{F?WqU5#7Ly2L%seT4Hk+xH;72>hF<*H!-C z`M%I!k9rKn?*;w>>Nb;j-|3eM{q8tdqW;J!z^9@B_U%K!pFtjWSDDUX%~uHh zNZcbd8lC+s@MfGd`XlgXmk9mO5Qo=yd>goZPuliZ3jJ3x&N}|@0>Alk>4@@~aFx){ z!umZH?QVXx;2Rw(^an#<9Rl3G_ux0ckDMX&>d*cU_!69d*#Z2gTqFE#d*(*q_Bq@~ zf!~k3nD+m}Bco1Gnpu-UogR z_B)MZ9C)2u4r>VKb<}c@w7V%0^LpHMIF zdl1IC0r)4M3$FaTfZO@g<-p(GMC?iJ|0zF|cJIddR<$>_x=HZqA4-3!(C!_;H^F@r z#{vHkxP1S(f_xr#%Lmu;Hyw{<(xaF`Z2i{LO%jYoU4b{(D1^gPT&g9~s z2>qUj|Bptyw<(VJo{mHMR-qsJfxMU6br%7D<2D%w-5>u2d>qa}DE)S~37;0^BNVR# zZl7OU1pHCxLzT}hZWlhQ&`*`arGzUDUdjCVE9hIUmi`yv$FKZU_*AMN0el?rZ@_=o z{p$kY*B>o>c7k5L9r)=t$v9{qYVHvJpIG^_dBE-aZ_fjM;oibW_mQ!83ZFTMOX$4( z6Zlzg3%&YH``#t=7bDK8di&SF?;S4mH{gA1eAtqxJ;MJ5#9`E*?IT?00$ntJ z{txtTz^+jFc@g-AuS$PZPhNhn@PF$D!KZ=Qh@T6dg}tP5)eF24`;^A{&jxPC8&?2- z>UQD32ly{qD*R8u{mI&&CxO3W#W5EALg;rwoCXb6 z2Y*@npZcZnvCjiO3jF$eWnHMAEV^Il?fl4hfKS8v()j9)!0o*4GT?2vU+7|t&)dM? z#y+Kb^=rS9b|3$SjHmjE7XTmrP8i?h0io~3xkw%7QNSO3U*1c{?Hb_wo*jPQt$r6Fi+xP5U1pF+-NmQ=(`i=0}0`(y}zmtLA2Rl>s|5CzT^7C;H zeEY|R|5W6gbbhA-zceP}FcI^Z1#a(m-veF(`%U?j{#N+E4*yQ~_alIBg8E*~_x%R= zT_Z$()E@gA@JFmVifx||{&rq|ci^`kAbd3LcQf!+h&$;xzYhHFSA@O}?|bHx!oMH+ z3)LIf0JrO@mI8lch44{1EP6`vWfv|LyLT7xpGr9IgZ6oZ1Hfl*A$-)X zi~mmeti<@M47>#VW!Oa{(e7DK3;j*jd6^G@Px!g?U*~bp-wVBcFZ8p(?fb;WJtOpX z+~YUEmu5w-Qs~dY&kFr6uyb^ryMb?J>EWw@k3ybW^~Q|ngikg0JJowDfPap7zRK0U ze-Qd5kIMU|(Vv@vkANMh>+2QZ_B{ii0dII!-b?+yanB3?TIhe(+cm&5R^D*KKMMWN zU_a};_YKh-Cqzs_PLK5;F};WJsQjw0PpUV{%b$41-|$|!ByUV z3H(^ZLv(x&dQsZl7j>~?H2t>=(oEryk7nad|8XolR0hvIq)T+FS%B5 zmCqK!UE^~a=>Lv;Ep$Cjds+DOpD%pIfd2!)?RQ&mO#7_#bwaw5xo&fWHTOcr@naw}flo0GU5; zg8oXxXEbg<|8K(oSlr*D_Qnd}!#|RCH6C)w8$xg2KfKeMg5MASLdP>sxVe7=`Dk5t z?VulbR(O1V3jEzoq+MM{o37yZ8a}KyA^9wQx2*{0eQK&z57Pm>2yuJ$E58rizGr6i z--XY9W29YOkKYAupHu!9@DC7|(|(q|C47!W9983f3E<-qkGd2ua1Zc{aX;!L;5+_9 z`0S26at-hv!d>%w0qE^>AO8e?{omxh)L%Q~ZQ);m^I}?Wa5V7tAYV{~ z{>=HO_|XlPAAK6(JTGG*S2~{Cy(9RFzlP_x5BN;PZFIl547h!-(c8e^dM!L(SG_Cz z?eh-*240Lf{Al$5C+`XUR{Ex^0sk0`$Bzl2W>&WWpBoeJEpD|iX`3y2eHULdhj_}q)S3zh$V;GZEM zr}52cW-y1Lm{UunSQSulo5fgtMI3 z->E6vQ1FFsi9Dzs{(Zt(FU`3{^wM1LzZdko!+ulyXF1{A|1nnozq-+e&N}l03FrPS zs+953ed=l8_Bq5|HWvC?_-oqF@xVtNA$*F^&o1B-VZW(7JP!P(zsdW)1Nskv+vo50 z{))8wD9$73xw)HwkHa|{wGW>FZlC+B*hKi)-x)a^_$hd=)6t*1fKS6cz-7Q61#ZWG zcH30=Pe47A%Hfs3Yu}aeQMny4T_cTdRbM>^`~$r2EX>#Ez_&!4MEg0H3b$DP@3QXi zT?KqY_@M`Z&xmb=er1Kcm+q(Sz~4AT`g0NJe*)b0U!DQJC-OL>KtFO@UhmO=4$Fa` zNVr+Qh})0Gqb~w)-%9%PGVp%`A7SMeuKcR-*&KGD_F@0+1h@U-6M)<2L^j!8=*w=D zc9qYyz~@8$b-fSYLFli8y`ge`7;yW3{xtBSqh$PbyKZEzub$SJGyFO&c zU4(x5pJkkN+};PiHT)FqXC)O3=pGT2u5QBlz5elE;iK!~L*Qpv=UB&6At=*NSt#S2 zmB)r%2Yed*Ztc(ZUlaOO7YqGEX#8s6Pa{sNa`GVI`tQMh?n(z(xZUkwSB%7uj{vvx zCd+}__Z0PfUHEK_eN+3n1h`#qa3Am=VqNRJya?RBzv4f@?e9rlvb(fvpC^47_z!z! ze7f*n`|Tn0^RAHrR=;U6@GmwJdqd~z3*fgPp09eZcck#Kzeo8e;LklI{8fLJenaT( zd{!IqeXVm1+wUp#cHaLo;2WaeL+j8E-AnAEQxP{%Ke~@_p5I$gH>h#c>wv#tovYn* zZ=pZ`>(YOfnTvqmh`Q*}7|#i#g#JFPUmc&n0skW$J{^ZcM+^NfdkKHlGfx2@wvFKX zfd4zdx2_Ug>&oWsBYY|lN7DY>4tzW0&Gmg>0sfTbe;qnT_*CH>jrM;5aNGag{hLC+ zE7qyXZ4>ZftZTJ5?gBpN_tJl@d)#zi;j{Nqf@@ymTfi%}5d2E$&%Y3^bAh2Te?A8N zy~yk7cpkl<@Xx~U(DnER@ZBF4{+dTSV}GIF1@@%+Bi9hlcJDCD?!6E6_Iqt}fbbcP za~wK8w*%h<`8!=-?*l&u@j9iSRwR6uBR{Kl-TlDF{4qS9dmbqCXMkSc_xr&AK33?J z{~^UfKOO#s+S|7Qx9^{M3Ala!WpqsV+>CtgXiU!cfNu`FItzTC5}~*AQ@;ale?R(& zQlbAL>g}h2&tl-`!oJdWH!c%;`~JZFfq!tfjKllj^Aq5Ink~5Mjb*^wnni!^2l`il zzkqmxj`Qy2(ysko#c9Cp?|pOuzr@Phz6$)aHtElWXm>(|@c$fj*E)}91HTRRu4-40 z87uUUqfSrz`7-eCokh><-aTQQ(65>)@2mWO1pEvu9 zB3%C+uVnu0I39eU57n+X6S!U1bPI61{(J@Smwqkdrg5#U4if$wAkLuUvkdqT`h;HJ z_rJj3`9@eCjz3uV*!KBDz*iN^xT*ZSM!4&JhaVz*uCnz1Nx<#zbL}}n=yyWAPWyQS z@G}pVeyaZ5?NFh=7;-)ugWU++u9vtP_+qP0>}}E@A;XuyLdxs;8Wnc zT_pGj;ESt--oB6GuY|kC=YOEL?^`~8;)e9Syj_M}j&s>6Lnjf=`@8)eicKd8elG4O z*S!2P;LnvwKXqO7R|~y;uV!&v@V>`|{#rDA8F0I9ZCZ`c?+ZEnDd_uw+xHya2;4qz zyY*z@GXeI)W#H2Td<6UvmFH`LKX08we+u|XIJc|uqKYZPzY6(8-H(n2ey?S3Ka>L> zl@LDmx!sF^k3xQ5$GLr~&~I8TdPD0%H<*U^#l4F~c;AzO+vk^G0^W&rq4V11OCYlGH!jqw{8Ia-=&|UfbR#q9rLJq<0jztckTB* zT=?|C9#(zwDDYL+$++qF0#c1aZ{L@45%8ywr#lAySql6-oafbf$utQcJ8$(n;P!W{ zRsi2^w7jpr*Fnv~rvmX_9k=6v+xqrR;C7wBt}}$sf3bd54!;L{-i|W<+V1awPePnf z>4(h}KKGv}<5mGXwj22Wups{4eARs6qwW42_$R2RQhcXb!e=q+ zX_f!e!1wxHc-+2vB>IVcME!+pfZOL^eh=Kv`;VM0eC|S>s_OOyz*}~c_liRvE&*(7DP_lkc6{7~5EI=?fH5kC8(UPagG z+rTIOL;9nB+!jfp|25=I=j9&Yjjs#6?hE@JEA-D|zLft=;PyGn%Yh$;x-1>fty03r zzGtbFaP0$z-u#&k`gZs!+U_5L+wr$uT7U?=G^=tkFe9Ucv>v)c96+ZU8 z3d?}+b*s?pdmY*)^v?j-ab5~MfjUy<|2*)$5&zfq7*C`BTM2(%@5d1C8lQg9-;Z^@ z4F+$6cH#3%k;v^R;3I&Kfm@9ndS#h~@fltJ~rg@5A0nZ{ouJ3j3 zJmF*eSzB~SeC9#a4XS@Vj&R;LBXwoKAHcq*>u~|_taZQBMZh<-{GIms!rwmE_91Zl z-1LG4LVxB4qOa7x-LzBi`>goeHCg(vamAN_KZW?e@?X#=d^WsV630I$AU`l)(#=Tn5<{yy(G;P;`PMBD9E`d`X8s9$p(@Cz3Ru5r{)fu90< zK=u6b-;%iHS=K$nmlMwKYoEtF@4fu`{|TRmVGro|pA7u@dqo~po-YS}Ao4^? z|1|LF_zvuGXm|LTZn@oqaGsYxSoJN(0Jp!tyybUgSO@L?4)Zf&4{54auYpLve( ze-CvOR32vj=?8v3^n;G`LxgiY|3T~it~WtH>p^K(bzSxPexk~A+quH$krEk)deComp5Uh<|EO|!CGbT!=cxF*z`u+9n!eY(^M#N79rwFyzd|%YV9EARV4E*-5 z2p?_tpzjI)#uo)w`ELMjpQk<*c!i~Bj<`hl*!$qK;FGQTfjfcUc&5Csp8KABnb7~%(vueee-PuQ{rLxQ`#!F= z%Z1OM{wMPu$M|0eyaImN$-u{5A@tWwkpBD<_;bMjY1wrKfeS0bu*;h$@p=j9}9mwPTK_hsm)|Ob-mvX+&(YXdW-O}{fK`6 ze+T}B>g`K^0($6Am51$bWjy*%xxvY*s*(Pm4U#l)`+M160Dl7YEgB!V<2Grx5BI#O zy-{(y;P&@hZvbw`|DOQ3u+ex8VawgG?pCo-No4(|Xzs9Wf@Kf~`3 zK6V|^&w<-@eSZV~9?nfHLA#B23LpD^&P#yX--rDb@JIKQeyV*^e3$SUjr$F>pGygM z$=g$)-wtsGT^9{M6F&C$e;);Y(qE-r9nXrph2E|w{1Na?P}ih!iIu>g`CP_xG{)i3 zd%))c!TW(<4E*aK2(In^3iyv&aUeC)cZ&3+-cT}OQm;ja0*5%hK)TJ3$pXRjxv|C^)T zZGI{Ew#ZYcU2!1rub{q8<>VgVyB{cg^!sb4-!FU)gTJ8m%r3tY{9)uHG;T1Ta996- z2Kv#^!?V%PKLg+CdTI9;z<>0B@VD#M%6={Qqo^m;eIWz<_qgv!*YD;J3jH}a2OR_d zDZuUf@qP;2z6WpPhurTqf^fD^7C~>QeBKBAZukY7e>wSKp`Wv2SVZUes{V#yu+9iC{4zGVq_fb;PyH0(q+PD>poe(s&DuBjo@}X^EKe};TO+AKlgoH=)Z})r^Yv~0p5T- zwbqSn_*r%!8x8t9G2R`OLdEYqf^Uf#S^~M(v&hu`6FX>I-4X+BHoxrE+NumFw zO~zpc#`C+tMgPPXlg$_v)bE3H?aKTXenr0QkdKN29R$7$&7#(xm{mtp760KSB9*ZAKBdbLGk?Rht! zD+%ZEc^r0*wzcse1-Ikv-N0|&OXNr66(0caL;areIratN^EK2rCh%UD0^bC7pxOz0 zz9{tl8wr2qa~AM}@x6v`fX}tSi*Sxl-}jI|37=0e&boiS2K*tbKIF=mgx>!C&hx;B z?;!nA{=;7u`u7eO`PB6|9k`v}ycPH-(EnOrec+#k&j|R-N8-I&fUlS)ay}aPxxkm~ zFa6QFtixUrK4&4{p?>92z@N14Dc$l_p}!t_N%w`>!0qphd2sg7v=v6@bOFKeZPnCId-}5 zAMsVeNo>qNe*r!cc?Z>>$NtSd4yO>#>)MWcKMdT?xKK+P~?SsL7 z0l2NNI^PufZOTP%F9rRUD+E9DN?9+Ouc-sR5$vTgpkD}l80>$2-%b85e3l^}r2U)& ze8qvnU*q|m!0mkc3gGs4+rI$5A?i7m|CG0+-R+JFulFqQYWS^ck9`c>J~!I%58>1P zXPIB6e+zgU?6L6}hskdXz5U(Ep8&VN!~T21UF+fl&`-d9GT#ON6aFduw}C&U&n^eP zG0rcmp4t8#p;I_T@)s;ejpOptZfp9*5eJsvjd;1;eCt2e&?L*7OXTXOq z5V_U-Maf4(f4+53>>q*mV_(<_<1qSTp@05vd0*Y{t_FS>>Izg(J^F9=^D5vCPf5EMgZ`9Hh0ix2w>mE?fFA-qus`TG{kPCBeN*_TA2E|~)yW_= zf3k#gJX zeyV(Cf!lSt_W(b4Gtsx&?)$(?VVCOooc&+7oqRdrykG2(Jh<|?2e^G+?2o|h@4Woq zXTs;!GLbj+`>q9U-)H_y;BO#*eI&;DoBtC&D^MSz_Dmaa+wS@?@beFlaZ`O&`ML0U zeSzSkF<-L@SDh1__r;*!2BEO*)US0&gEB{Zv2V>%%rO^JwQ2OMu&c->(TDiM|E@ zyaamt-i(bm5dOzQu5`cd27Wl|h}1uN4)|x-@3jBtZzz0fVb7=@*kU8WXCq#r^LQ$7 zyYA~v;Flx*sqy4#RM6|1-{T2qIp6YCkx!MYdvehK4)kwA-)eulz9M|~K3c~ABbm-& zTW=!x%9Diua~Ra4fGX8S_|?OO{v-H_s%MHe6Wl&;^&Q~1oFx1;Ui9_Nh2Fl`?rPxn{y1j~q5t2$!bjin z5#aVdxc`rTz4s9CgK-W{z~}8Q$-ao_~mnj{}y=YD_<4*O>obEj>ETruRvYIXwYxHozNeJc=7?j)4*-J z>kY-RkE>lVc6;Gte?Rwh;4}Xq{a*t9D}mpEb3Ga-IeQ1;WBW&&?iANY57l6EhIKG$*n0(dR*Zbf*npY1I4m*BgZss~2z zBKYIKlyR5<`eCho*-Um}dwWsqu(otxdTvK1o9;_yyOOP)J>BWduw=5WCpovXrzO>y zY|HlaWs<3di-xuKboF+ov+1^?ipsKLH&L>^qq`%S>g!7_PNuuFeT#>+_oceh$+m@E zU5n|Fz<(u)G8-aI&Th)q#wvR|(yi&^J2L5HYfpD3+qbYaJ1jYSc5Q52GC4OpuP>cy zOQu@09sTKKZ-2a`IErTb=m9r9{ZLm)PjoL#wk}#^Xwr*X=cT&mrjw~|V%*jiZ}jR8`)KUv(rXe}(J{BXr!Ou4L8IK!!*gQ|0kdvPx2G0%W|PEWL9(}} zv!iu!oXN&@clONdN_BU~N|KXn>nB##CX;o^SY>-(Pgin&rl&g})!1y0gQlV_olSLg zR+9Xsk{u*@wG|7hI(z4(!mM0mlT%$?LRK<2T|2gE#-vH{rY7?o1F^AllfaVczP_Hm z?jG;AikhlKZM@q1?^wm$q(BkUSo!q$^s3t0`bnVh9x5rHo3wt5K3&>S)mSw>nW&pt zRh#e|EGeCv4E+*)vY1{c(q!@6qz^})scdYvpNLh?O?qhLfmm6ht~uUVM-v%uY^-ne z->qye^*Q)c^x5J`P4P+gbKF4>jyzM@9FJR1a0fj!@<6OSURRy0uSqt=51$dQn-uqZ zNTYB47J0g)WO6*&P&;}0XVYB)lZ8EPASP-4w?wJSZqdJ(~O3O`o`wK5-EwzO$PAj zQ>FDYnt7&ZNz^wU=?x1@dgzzvljEpolWG$*JBf(=mW(qUw|vOH_n(8=~JoSCnsT)Zv|R6IgrPZ3jt|SLnT&*W|{khA9z_)_dE8QNHEDJ~onI%#2DVt>c_jQI4UvZk$mU z>7(`5K1r09%8VI7CTxz^HPtspSo+fArp(QmUaYFNFo2r!L%&3xEGeen&3dDk@_OSjD5b5yMG)Z= z8|$^B+&m^Fw24Q4jdI|bG;`xrAu`JL?-4E~JQ9XV=Zs{cE>Snx>kYK1izW{@Sxb%$ zLv*s?X3<2s@fZatwTIRgB%_s!F3Oenp#XKFDH(ZX*@v7&QGP5DOq!^#bM0v&5fOqY zAAMWqGrg*=YI3~MAz=R7y6B=@xpy+qE-`_;MhK#Oc+`YUry<9vIf?S|lLs84BC{Yr zTSQTQtj&X@+Dl7VM4RU%jPhmO6C`(ui|8IVRg@!ZbD?#c*s!S_BvD@e?AA1rm1&s( zq1g=(MEUSuEe-NKpkEVb)YQa%^B8+|4zegu|2@U%ubWX@+t3`~C%cyL3z0;4mHPdw zZ)i?TXWcugetN@jk}p?Y z2mO|8dZVY%s}t@x|Ldn!@a6es7X`pCLueSx%3Ad@D90UEE-q3s`MR7zHe14(+g=nJO{Ql|Ns^RSO3--^ZigNV( z$BVS8G5`35X`Y#yD$aTTDW` zR*)ph%h$d-o5@5qiE}tC2G1IOQx)J7M)~?0BT+{?bkmfohUjzz^-+W@!ZYU2NTMmU z$8p5ZTRZYweJ zFI0)dvG()>ywLEkQ4apQvebyK_IZdG3Lf6?5iTXZ^flKv)F&w*5mop>Ikc&w9Q~1s z8(m2g*Gyr9`Z}+3p^>tPqWpaQ5(o(Tyn^~AKoI5Qk5?7BjFaoIZA8>eaO7N+QO^FJ z&oQcoS_)HB7};JjL8*??MYxvw{k2vN$2X1mSuKvK(w8hr@g!u+|G0vMaKuAjdJ(*SC4yjeZ0vL zIVlV7v^j_){7d|G&%!{_?C5?Q)Q}cYl%Kzc*VVIa%cdv=vnLVTnt**C^qU-n5x!-C ze)>$Ar%q=b`2f=sP1CEICr$BYwLH}Kfgg;tTk6kl4P^saAjwv%Nz952Q)qVGv=Q#H z;Ak}`dAsv-MaXLS8;bck!XvO3@S1d+62ZN|riya(dnI`qicdRIo1s^75_+#V z@ks5tEiqy-5BNZ|9ku{mk2N$VXr?LLX165vTmg?n+tSgmt*@ea<#r`w5y`O!Mn4yA zguAhC$&)1|Ix|7FQhu1oAAgY=CsC9i$MpiFA}Ex*_t{C@(M1#G#ycZZ(oW&Fv1C7V zkVJWD&%^@cj`1i3q!*cK!^{Mne3~dX_Q;sBc?y>%Bpd4MDOf=ZBbd%XgenJFgr|{S zCMI21Yf9865pPmSv`4}d;N{}|74}rLMYbt&J?nloc=`oB6lt%NH9+Lii3y63xLZRetbj+N zZSn3Jc@U#nYg~4F)?}Y8?A5T=Sai!AQGdjj94kONOm?k|#@!5aWu- zk^Mll9kwD}kCE1%Tu;7;-H_Om13wXM$*BuO2{av%px92NtsJ_*@l2#O6IF`zk~U*R zJfy6s@b#GE!Dzdo9ZKho+ci}uxtg{1ssWyhv}*jj$lC_$Ts#{FmR3I6D0kD zu^YJ`i?r#spK5AnQ1oq56-7NGYAnZUh&~c&tIRozGwSkKYq@rJ*F(|voYpT_tW!;Y z4)ZXVoI0P1w&;9AeO9(FTWE5~sq3LZwkJAcknIH@inLeC`CM;F@!_53#wtq1xNFT( z-8K+?!L3lTg2EgTfs~wqqm>YSq?NE1<9C_SK=np(jVe|(PL3oDZRyDSRHQ`{jSBTs zCpm{>X;~-Sn)8WhOU``&_R60HHbUf)Hr#q%CD%PW18BkELy`8vdzk4YI@m-7 zd@hw??P0keiL@1C8;h&zOz4#pIi4|EBKB11$LRCyp;RRr8qD+Ia6c+@!oNm17&)XW z->T_}Nj@i>aZA#{!oYbo$YFl!XbUEh@QD&tj!n17_L zoVia}T7^CrZPnBz<>+pnlw&#a(*-{nZQ0})b3C6br&o?_V_}a)+ho0xYphItOmNfp zJ9DreyP!7RP3QEP^X$4NOg9cknW>_tVm_(zq51`M1P9l?ktH! zjBZAtYMW-xH-)Eh%^R#}rOP9-=|ocrr8UZu{DSmy@u@Va4OFqzTi;wv?>cY?aOprr zFHcw>72hRet2+iX3ga@VMM=Yo27)f)=uUMK%i?6oI3Cd?|Jp{^D74bW4J|3YcR^Zh zs-UjN<1N(lLG*b5rfc{rF?o$NtEYGui(}(7ew9kP86(}B?VyVw2GQ5GJif8Y(D<&= zu)2AzAEUY2vgs|C7%i6!FPNN)k(bfn*U_5yEHkySDwdqqI@lGw<}DXGCbS5s$as(n zX3e{2X*lY|*e{_MG(DsP2K)9>E4>wjxM^)sFyCbDR7fPC)TdM zEKbvvLp?3?Y0E8a>suGSs^m_YE1U+^r=gjyl1yu=JK5Pm`(Uaw-Y~7TtBxc$R-Elw zknT=q7x$(Ivx5vxpJJv@bQ2Zb=9F_tijtM3!{~p?_;ZPw3@v1VC6dq!Gh@Escxo49--U6?vJ3uZ{#jg_)H*Ghrw{H|OPxeRng zEd`+0j=?>yJH05IY+0O5Q_L>e$JfWw#vZHSds7kLBqt`5O>_*#T^Tata#EQQj6o>t zA#1uaLle|OcM5msHLUGv8M5}ra{J`L=52lLnf2-8;u34gwNIAOHM*4l$Wp-E6Bha} z3iR^z!)67rqg{ChNhT(c5$+w z))W$;^hEP?IX+Jfqnq_LHC%#fPFW{CHmFOWe|N?l2lCZ;$&A3-7%diNz}F! z^XjflPNW;F&85^y6QH0x3BjcXRv_LSOHvx4sWeGh24Yu76eY=-CaIF7L`gc);7>$h zalT?l?!;uKI0G=LB;L=>Py&m4BH0^hi1Hg`h6GT)MmZKsPUrQ(_gGV`KzgW2pZq;b z`q`YEUE)A+IhKEzcPn*=;gBF6IGzerll7Se1^L)G_MycNz*u{ zTGKSjtqT%US)vdf2-wTldG~r}`%)cQN?&sBl4ifLC7JC@ zr<2{ZYdW%-#cgRknx<=V%99Ng=W1Qc^4nij-G6 z)urny9Y5%VQzkBQL?5+OFa_#faw{Ij+Zw@x(%dR8HE0`PkU3g zw=>a7uNtF!-02#7dHYuGD9Kw>3so`Ef}~|hx}YLS(ZqZsHZ+Sf9-Q#r&H zkM}oH>t3eObUik1VYjyrkcwixlL*{UAEOK4>27uoUaN*lG)&7c$kF6$iDT)+X|5C0 z9~Kjn8z7T`{_;>IoA_RBGfw_p`9sr_n6oEk)DxN->WLXA4$8?Ssbcc7(NK3(EKotE z1kD|7{qfRlM;oSeTu9iB7L1WDrX6!pN7uqGdPAA~RHBQ;QX0wh*0SQr9Cp}by18!B ztXav@qB1aCo0{9-7FaJ=s~!)Gcm-YN-${4#lU#JAyIK~u`^$GednFa5%4ttC??H)o z+P1>NWcII)b`l`Tza~?O1{z5k5gKWFUgp~DRE@Mg@Xlq)nuXo1#w6kWs#?a0))!S) zu=TCFfR=89S2dAKBB`yYIJ$)(XL7Ne*UnVhNz0R7N^(+?Q%hE6ZANEPb&`xBnofE( z`kiz_P}|I+_{G$wI?D93HlWR(?T_vr{+xj^!5{(2lA@BLavlxlFxHbPZvkoukA%i5 zaH%xaE{X2;9_q5#g5?-j73EwbC501(?y=OZ=}A(ytn|r{h4^gEP6j z&b$qZ-m;h^=eULRMGZ2Mnlb^msj#itWe<3B(kq}hd^dZX-6nGdWu(nxI%|TXPjB* zYy(WA&5GCRAX|6HL0*9!nkGW_Y6;ycAF&)o340qE7hd=UXnC{^ui<14eFWDNRiOm@P6pjbZ{;#8GJ{tqE}6@_ zqT^&PP`w`wT3&Nuif=BkGoNdY!-cLvJHVc}94oU7D^oUew&cuy+>;?U$Jl1XhgPT> zAyf~TP0#J=TbyWUB|Vajm3zOy10rK5Hr4}`oE4@Qn4nmc7wDiUN6a{EP1%wPifD4p zVXC#23Ss>=v#Ggp`aYCAGx$J8sYB_~f~9@4VOmpFl8m>eiE4$03^(H@lD#l!Ex!gr zF{vccNbzII8Nk=A09|~(t^|hrbY--rrGOJsVaXVT&Lqg;Ak|nB!^U5x}DWjFpTImk~c^ZuMO@qCCS>rhto>P z0#_fDj71K$wDv5d3g9*g-wp66`-|0`J#*RFNOX6n`I~p_u;frsG$Hn+mPB?CLTG(Y z*ZBPqN!U0NFw~PVN6#T-+eRGG^489D%8N#>=^L03C&za=;>A8{BnU?D$vv^>aBXC) z*+eDNqeb34$E)SpMeRv*?;a#Q$pz`fR50gwTY7|B&u!j0o(V1Z_&6^@NQy@zjv<#( zPfz$`8eV)u76*ug0b~O~@jkl-6ck8uSiET}hXAHwL&-Jro(zQho9h_En%HK>XZPZQ z5|8EWrBF|Es_$t!W{j-AphF*yT;-aUT(9Xgt$DGsF&z$EX&h~k1L@d_CdCqsZEYN5 zrp;DD=k%VgtP28!^eb5-^sQ0}gODj}!lzIw{dDaMjc$x0P-F*@UY$!Nr9@$l9Ay(N zoYj$p&)D{!zT;{8BZb5bH?lcZ(U&&iGs>RPCsOISQi6B$GE;#{tBELRL+E1jO0;!w zdAVY?gPj4VpwaM$L$-X!)g(x`h9wYAPxPDUdpw8TF=Po>R&ZQhK9GvR@v20=Ce>7siK6Q224rW znxG!Z06pp%&=g%={X`-8VA-CidlZx^%O@XnAjxG5lL;bmXOZVncsCXg?2v_NN0-_d z$OkiFKnn0Qa>$mt5iX^&;v54{TM`*02}&ul zq7H@-jLsze%KH)I_B5O0e5qudDAl7&D=zKLv>=f2iy7kb2+Xezv z8_=qjd@5zvq}ah56bbgr$YW>@sQ!k|bRTQ#AA1%jIFD}O;7c8&v_YBOA%s zhQqg>sM3}~&R*1iAdBCj3-U{Iw1dw1jMEgcKO-uIV&k+O(;Rc%f(%KlE43hvh-6?) z@<$`7LVyaAsn5ZRNXipP^n|6RgN1(jYOZ%SZTq`Ck6ZlSHaFdeg2))&bSr4DEdH@gRQ&9i|Mt7C3* z>UOoFnk79H3+-u_GIi|h@eb-&hDVI_l8zBdG|K`e@<0w%*nkSAy*$^_mnoNK)5d-M z<}eO8GSt}_iVU$j4ac~=XflP2B+1>}s{%^bQLxER&p1(KDVs(Nf)=|K681a-Ar4ONS5W${k!ffI?qiG0`IOz2-GeOPGka zR!fUoHdShkCYlx}XOTBh*U}?@y~X!|?I9Wx<17|FHE-0zxMVe-z#t1`(BU||#7%mmn(9PybT_ zi*&h_|IaNLzW?vPs%tm#~A#tGu@WTrex&S16><% zJ5i9Q&2ff^m(5;p^hyLe?HP

KC>mZWF_sowM$DIw+1+Lx;X5D4(tY;UGMedhQ_` znsw&Sk}=(*qFca`_ly^ev!^8NebOpWD^T+`ErSlvx&31DAX_+pB~|1UH0G}(K(CLJ z+#6*|=t|_=`8P5&Xz1CZrESm1W1=UMtfwNJE;^YLv5aNg(^FRFEEI`aV7ej;&_slI zgKeW=I|(W`l&v&3OLfq(DCunu_bz{#C;v)j5?Ky$+Qp`;EDdy|s43e`g@&1U3w4C@ zJ#@gz2wg;egKy=GvGRE721Wla{tzi_660~01@E&YLnF3iDp04TiozDUAyM@eBq-UQ z_IP6(6<{?qnDdM4%nmK5oh*}{Sl+s5QH;tm+)2kuwG-K@@#^|bRsW!jB_ry5SD2o@ zb(l`3QYDlU8frg=Z^Fv3O`P+*1lK_E^-Cr#WH~r|k&1C}{1~Hz7>ONQu8GFMik(Z% z1n)zjLq8@f7&&-D%Sg`MXyU^b&seM{JC71k=DQ{vcp7?QrTb6Xy^YU~0d0f?pka;k%vN~B3^7wP&Nm&8*X6aDdn|$#_RdSqIGh%SEA}Sk_ z%kRvtDwZelbRA}-B^0TM_O^UeK@+*>Nec1@zUV1q&YE~fNys>g*c6tvN_}({Z3nto zNDQUaT)+xr(do@lJH9RquT=@dgxuBA0&iq$GzJ^_(|RS7xzJEQ?S=k?gmNuhT~z44 z8;R7(-DP4f>#=3GFF2stG-M?&p|wUaZa%X_@%QS`6%N!LQaB0Cl&XyR%iE=Lhc>W+ z3!!)^lVq6-x{~A*bWu((!HUPfwX3?9N+{?p#H1kOrxx~fVciZS!CVH7@fs*#5cie< z^@kKP-3zTJg}JyTx(>-q2=2!Yr+`9kq*l$5WqQdq^=g9muohwmxTa9w!p{r3BTvyl zgImq8soBxp+86KSrApc=$4llHtZ1*?783P=WYE7ci;N=6ROYyUSU)ypy|dyJKg)Ag zoa*!gXT=9|d|=S-yL-%OL2w$BB!*l@y11rFLg8|Lz#l+tyz3xg9r*G9&#pVxNXlf6xp~2k3$;o2z5!BLJ z4>SiI;7N9-+p}^hi`O9&LNaBbvT-Q z{g7I3g$M^567n#XOijT7J7$6)o2(% z%nG&soNgNpFXnHAYXqJFkuBsiHbEIjqRB+Di7s1Lqx+bPpkkuDQ06M@jNP0q<77H4w;{ zYk-Y3PDU<0Ilhl<^2lamtio`mK3%9Ir5<#rl-HD}&rOua3*HfP!y#Z-QVzjVa-3E0 zh1T1OW7Ze46D|Gmv8dZA^fQM%VR8N@UUhS8S8Hc2Sz0D@soxQ)OU9ICJl;;{G&{X@ z%+5u7FF)Vf-_Kkq<>OVFyZunIcLJCEwN9n~X74b3!5@2M(L$KPORr071f^HY!$>mZ zJS$!BNvp(M2FBI%bc+3n5OfLH9313+gEmRD2H2+<|{#q@l;yA}KP`jSZv(nA?VW7O!-%!OxUXiar z3p0US`koR4C42^t*7;Np*wEeD9Mhp_trieh_fousfGUcj(Z7?&$SRGZTHS5wMM>O! zJ}c8}f)RYnUScZ$7b`V*JR}KW4N4{W(4d?g<(=NlO$n(5e{Qsb(|qYZJV!+_^FapE z@D5F~zO4gmc5`eLgPqGYiD8gIUgh1@#9kmpz(lPS63o?d1z$qesqJCTt#LIU>274- zNHkH5Ao=zJjklqVIO1d$XZo!>2PT?0ojE>GCuXN)$#mOsE_Vutv-b3Aff`!nY;Mr; z$!|94Rc^dngw98<)2)^qkQ$yWH4}4)f5CQAAWa~Z@u*1g0xvm(gyBF<@M;1cpkU;W z>~hOOV@sV>9?eK!PjBv`4<6<&Eb_bLU)j%}q3d zgFU@#25L2GeFyU6!>ehqYSm>_nixLUMy!1qD{JqdqqE*&SS}t~*At#wk5yWFY@n1K z#COncsbhvcyFO-$SZH8jr4VKhDzt;ii`7c(2hUbRKv=W8eU>3QxnIz&JAqBAk+cQNkJ+j` z+ZfxF`k95guI#8<9x%ta0e97jd8TwKrj8vg@6<=7)BxCChLSYuVYcsuLytEWt_lB4xUY;^5 z`rV7G1p-F-w~a!heif~uFJG>C6``wKgkC&+_}#vixUMNfri&Q-^E%qneXQwPX^=T3 z71DRC>;i)@KpeM_QQ%$Ek8A~Xpo#SNraJl(tKrD5iQJhQVO(Kqs-$w{bmvcl?W@crGdywBqLQ= zVzlsM$H8#WIFmFn{f^x7=h7SDyb~$T98yA4z*6lU^B`T!M+@{MLJn_djW1F%$G^CO z+4ReD8SJ&x7uteKJqH0Ei)RA8-Z7xA4eUo3Is*URxj=1e`z5tP=ot!mO&A_kW z?O0m55UTyvZ2TH;on!#!{-6ehy22ys_sX^H7os@XilrOPD622!1S*eQ3`Y?V1nk%z z%9SMcw@L}%QRTOZ-*G@+ddvI9fZvB!AJiV_wOSxhX+B8Z*O!9v>s$gcfqT=S`4IkHB+VBaj1HqR7#WCwo}#g3eIMMdjsQ| z>Zo9^(bi>5CSLY@u8F7KU-@?ZIH@|!@|S$*k}m2I`_&sLU8#{oa-d}3OYSr&eD!58 ziqYI3M1rtG zAN0&+pn0#{+~n^A8mi54E}PdXEoUkW^^O^tnlAnd&D!2VL-xziY}CdE(sOB+nGsmz zjwA21@Fa!msyqp9?MAx|y|WT7@8keWIG#8(M-+j=*W)tu zwF|l1h`)Z+*PZH=yFe=EXZWk1^ecbk(NNGw4$RFxuW*=M&8=*=JmM^UnVnnG)*;*|j>;Iqx7Iid!XG$>$)u zMJ}ncQ|*p9AtN%^fEv%GnZnLPb5@>A*w#%kRUtbf&p{G>vv)O%2C77VXi8W zcr?|G4tyAJNG6+ufA+mG9CM?7h&#?}E1dmRmn2Yr=X&wgPmV`IL*~XIy;hjER5b|l zk3gYbvL6a5w2+cj4)4IZ^H+l}t#P|`dg@jx%>47Pt^wC!@nay46=2R)83)yT6_KVH z;Q`n8v^c3vpCs(==vqh@@iYzK$F6aDJo@>orVK9jb{YH>;cx|)){aJo(aCEe@aYgT z5ZFqf8bvS63fUO+M&Ot$|APm?J82w2iiSbA=G=CgtP6D8(r~<@0XaGWOz+L{WQ5Zw zp{j8Xl&+dUQ-PIX)Nmn{Vf=}+U}e}^a6H&*<@hTFYiNbj^3WiN(|s>Y5v{%^B;ktO zJjt|f;3yS{#g>#Q~}Lp5K{7Ik8?l<%}I4L$V1&TJ8}~0S2e_5 zr;v6#x$Qm%v|3pCO^yzj*)m;Cy-l%my&pA7Y6&b&RuDBvlV=1pAr|Et+3^ENDAs> zC7D*bQmoTl(9vnW1LwMJP6Bhz&wT}zfO8fHJBCg>l2bRdlkhq)`w1Cy^k3d-UOECs z31QyvaGF;p7>9A_i(3IxU*nMO>s#`>>bd~!xrhawG9~hLa0~T}4xmsZT0R#`5u%Q{ z-RU;2B6k)v(Dxneng-b*MKal#^pY|D=}F4DvaE4k2w6h*y*wPr3ztdICc?HeGmB2G zaSm+9ijMM78zhosV5q@F3y^YEoz9>I-*{3gag826%$n+=8^Bni$x4aTUHIube6O31 z^Og)?vU?Lz*nZ)ZEuICka0WW|PJ^d6=}6hFUPx_r2`=t6F+xo>Bt3WBN;^|)nvsyh zG`;k3Khf{r_t|B#t-$MG7m_nJ!Y#3T?DIC~poc6=?E9iFj|XXDZ`Ou3ygSw#@?;m? zzM7hwrle?b5{i-yW4`oxL7J{mT-=*x`x2+>y|uJDU!kL0eC69}sHo9z?e*|?ebqwK ztvvr67V7ZFtU3j`xYY9{gXRHki+-Soc1T;+X-2FX7uLH{o6j_VA(?-CKTu40B=kv z1j*_)?r~t%I?Xlr0?~W8S$(XARpsAWz?X;(>X5XF1E?8b#c|h{y9sdIGxCA6LO#eS zo;)pHlJlH*!4}1&V9ltjaA?F`!Q{F`S=FAVhd2OZ-=k(K-h!IlB_pkD1p1*%do2u2 zver+M=g@I$TtUdE67^O#*GFxN&8@lS0u~cP3#^>N_dAJp(Beasx}@0k*<^E}Dt&iH ze={`@iGk)}T7GLn_c`AgcW$NRMJ4D^J8y+lKf~ibk1ZYk^onOTJJ6nf9KO zO%6w{sq;i;9hmI{ek9nd6SEe`xL!Kw7A!#YZDw;Sd9b5D2zy+G-y0`q4AB4H$$)A( zSJ4eyAa%5yOWfXYKSflUl6-5k7zNQ=yX1=%kYDm^#F0z_9&}jW>7aEPpPyV#bNtGD z#6r5G69)fU9dAzf<*#VYqdVf=#xp24j;B5TLvw(q4OI$sodm5iew87}i&auVGCpBD zIH!KlbL0+kaa>q*hPhge-il^{f^4CCSBLsSXU$%P&TE=O0{QD+Cg-ht8QO)Ny9&ei zMDn+|+HN7moZ|ffw@}y108?H_dlEy?4;izTOiVR_g;0Zffdcsn>x-?XnHTx?D>+`% zG*ZSlpz?8U6fp?hrcF*FZRK)TLua~sZg!shY@eXy<-J;#!vJNVvEsm~L+`|4 zpe0&+bW_eExx<&-Lo)5?ZmXJ(271RXN`96k8`$9JPf_$YL!bFe&5iT-f=-|g^M{XD z?=-V_WyZR_5P{QZxh_OlUvAA@kX}q?U01JKg0XT5j7nUTYP*~f)QU{Y!meKWbc!Qf zx*k|pA;VU~qxz<)J}y4;F0%_99v|`qi&qHAQSNA_n?rrUjI$%Iyc&5lYKT7gO!paf zQE=OvR_hA77=2Z05LeLADy2#cTAqO`==hyzHRtuaa~XI8FP? zciL8>vmP8@YUNu(wa_ES2z(+1Icq)nYuPb^v^-7dx1i4i=bmK=M%te$#}^^kQStnq)qden z${`kSW3B;FK%}OC3lR!lgTXMa{{k8?3x7)3TbCJQ@09Sf8Twcn9Jo|>o3XM|EuHCj zcsD32$D8EX+5DCZ8#?#$p%q#{B?9NzL)U8L>Qn9{oZS!!Wx!HbG4;(ZzlGJXlS?!N zf(<=z)d?M+=bUt&Q@pg1YZ^VUv$@g|gARu~16i~_`Q#T5fjR$0nzq6Go1C(sSSafk zGV;^iEGEg0_Qb4ApBYa|+NS9cSr>Ud@g&C?sF(ez&V}hXUC&49TPZs*>X_*5*cgA4 zXq`n#rc(WXdD7EP@)Z+LBk`qYQlM%s6ejP zfvh&G&Uj&;>F^p0I3})@x!QMWI7}i+ZSp7X$bm}trRn4io%?PHX;oE9foKvbdm7%g zQzk;+ZDw=CcYE zSu)=p^%Gd8=&OJWY4RA1v_bplJ95v1H#Dx*r^4uKgf-b03!5@8-M79J6VUpPoDV=n z0f{{=0V0?_Kpo_o?)3hbIY@TA%|2Z3#hXCuyi}i8l|)5^6k!%f3hK<&+}@1qVkqfZ zNcV=fC;L*}bJNwuBE$Y>o<1(YXK{SZ=l|DjcL zxH!_g{=ghPl%(i!T)JhxdQTqppbc%;KIv`VxszaIb4)iSW2X2ZGB1%RLwGYHZ4v&4 zG+m8Nvj&}*7{II32^eT;o5M<#EZ*KeMK_Dk_1JXbHplx0*e)m@L25{94%m_5{ni() zm=ysTbf29jV!(moVsso{q0V{>hodIG-I9N&Ep(t*!q=cm+0;>?mIL;{mwbbZOYeDR zX8}`B_pJLq{lE+)Cx9P~py7R`uHZ(b7uB8E!zzbpIjpv2R&YH5RZhAKgLG@(#|FfxV zT8~z)&nioc=GgkD4IyOww4O|TlxB)HMmTpk@S;%6-W!a3jY3&uXp)AzO@s2T8%6HDDGsg z9wqBTK1n}li^pYZlGO5kTruEx==s*1)hQgAT`@VGZSH96kC$dUOw8C9Gj9Wr>@J!n z@=BbHk_%UynW$P}5?in@Uy%WvGk9i|pykrTYwFmAZte6M2|IT7 zzFd-{m_S`OdLd`kxh5|BxkI=gKdf)l!?QE+?XHR z25)4bBD1jHSy%+fkwsI}-4oQ+=0xK`I|c$1V~qvwecs-im@m(JD`2&HntibD%IYe1 zxx|pP*m^KNq2dqP{Ykk}IM0ZPKg>{8>P<+rM zDeEkMbMXxW*GVSt@{fVPE}F@Io~d%EN(+oKHuuueTjW?xq_k-~LxCRm5nVE~_|x#2 zH2JXNwEA^YBt%>?m9sL+9O03{T;py;?!5A!3+yG_v<#-hm@M>nOSIeK9X0BsqKn(rI$CkvmzK zpH*lyk&Rd3ovZ4X3|y-Sl`eg3%5)9u&vf_p(G|b#h#4hIib|w{$0#Ju((8)wW1*e} zpGS}A$|c36FN@r2uq?qelGA$jhPE(CcV|Q?6EmIH6lImXoN!)$z(fSw(n9QYL6uy% zBB6tdVOjE6J-do)RY4h^y}{D==7qgn6Vuh(ncz>UeyKOayMn0U(?1+%9q`_@wW}UO zo@CD3T-j&~-|;roC+X<$IjJ75dg7B^c0fZuZX~i`WKp^@Zmz$uOXvYtfwhNKpRn>+bDWC%6}@ zZC_N7eNnFWjILAJp?5YH)S64i<}T!~V)W6KWfU{C?iU;Ei#y_NfOPs?m8m@1iLB_kKPnN}+n{_5Z8xYhvWuwzCZ3SD*kB6v5;%3ldmeZMWT?ApBGB zzP5esR=4wVg%FkMs%m%LTYs;r+_s|(nIa*kL}G+s6y5+PVL*mNij*N@mLMS*fRqu6 z7!ba1?Y;Kj+2^nNRN1+@_qN?tb?ThG*Iqy0`qo z;`2i(2L*SUhQzyT$ZSUs9I&we{!_^(*AK}uovM*PI-~iIjly8K5v1XIIGyEu>L_o1 zKoy2V5l;flPeO3NrDv)>ypybg_j0+})qNr6ZZeA9_%Gu4g`9-Lt*tH-CjAh0V5q?4#H66Qv(B^XhHD`jEX2!8`CNTzwOB0Dw z^k(Ka!dy#JmZ$7^TBm55-EDIUd=zXmCaA9VtcasiA1+^i5{O_{xmT;wsw`EcAWxtm z*{I*RCKte7pvZ$-$6;&pkg=&!87JA6VklM@G){ z4cwqKp@dQ64VUn8Gd-9alXCr~QXA-f6l>MMb0RRz&Y_V;U2tihS(k=@%@gTVGF@CRCqJW?EJ|X( zEE;e0H0Eziqrrk5A_P*Vh&$J)aFRAb@b7U##&D!D4lKdOn(b2Ns3$-)HbKH-xBco= zyLPos&vm3}-1SoUA7G~tEV1nE6)Lwm6h1Udpg(=0!dHIWyK1Nk1^t?Yuj9e%B~;pK zIh&3?_fg>NU~4>BjFTO-s!jm1aIQsu!a&?DyigC3D$7olN?E|5ine?cTf1g??tB=T zz~PbBZVCQ$xd}j}QAGQM?aVoG;b;u&znw@2-80iV_o3-%Vk~dQv*QsJ18qBwhouO} zVK(!ly7u~-ls}ltAuWG+NoDWuUCRC1#|yb_3u{esx|G;0v_YdgMq#Nh*mzav!)^j= zhb_`ti*hmbu0=EboqOCketsnJvlIhII4RDhbK;PRAAubSM7M-)l}a%KyQ1Ygw4~XB zrKn5@K7@O$m&&_7Ybzv6*86;KB)nq(-cWMxUOc zB!av-G1!3kq4NQYeHva(Z+l2sVYmz63aPfnzFaT+DMH3H$EMu343c41@cUD1#BrMt zJ=^td`F=VoZ~BI}evuFocPb+?LSKh0DIB{Xj{J#@BcIy_uafJFXeYaaQhC_EkC8jL zH=T)_*y*nGS>;7>c9IeAMY34m{PZ8asg$w)DQGq#Z)xPV1EZi>jVg|n`7WFGmn_N* zP$lJ-((5+|zd&nvLj+K7R{8N-02)QVjh4VKPVpm z^LY5j2Zt;whJXG-{d19<3hY@VDFp7n4DQ(L5!Hx7vb$R(0Yb)WG<23qmJ(i?cuz3u zZeILA5-pRl3~1?SDnx@Gb2v8e4Rn%>8Cs8na{)f2+@ecTqkskcsWMI{y0mNd7%k2; z@cSZ034)okM{L5H$2H-_0(A~F(=~R^98!nWc15`=C@ItM1@tT2Gxc!bZ3c}G=aV_R zb_Tk*tC3QvL?CojJf19O3L53^31+4ClSw)!z-2s!*Ra^`Jp@`k21(73$V!Fz4-lcYSeNkH!crEZT`p4Tn4xa@tZff32cNDlFmW!M3u2*8?;(9>kR zoDYVh5xjF}nWURG{D{$V^?5KyeW)2VY-<_NY(+={m)qGk*Yw%Y7SQ_S^2&_M<5WWp zoI;4WS@I2#c<|?s3~!hi1@?dc!o}fYg7zhayZL6_(jmVtYHGX`xT!MqOFn!(vH!%_9YV)U>x+e4^A71Po zUD9BIt1sSIzn7$n+gs1}JkPSj6j{0_0>jFPB29S)Ir?JWhk~VyWzQ8%q90)7K|xG2 zNytGR4j@l>)VK*2C`Q4LAyXq#he5rW49AG;Wd3}8Hf zou3(GrL(?pz;xPuAQ4s|&Q@PM+)RJn0KAu5R1?s;C*8-4nG9_tRvQFO_Ri-`q)XMw zj6SrD;lN}wRm0JE?uw9N*MkH*R>Au0S zo_^ugBIVCblO`Kt;+mfaISO(pxG7wh!R2Z&xmrT+%vKoMKpt zIitk!x})U>xG-bm(pS^rgRM#wvZ)zr7P@pBHd0X&9r2h{F-HR2K3it(^?pwdCn#fG#EI ziU~Uyv#XTH>QzcJU7qn>ox{?Tj1d$=X<~uVf4|t0%&3$S%<9h6&mdGYklLwX^J-KR zI1Dw)y@>-%STqPwcvKR-)VZUG1ZAmFk9JQDSsblH5#I!zS( zk@JskI6ATI#R?G@e(@C50QfTpzKZ@hTZ|{423wYSp1u5TvtnNxyz~-p#{)zQ7qE(aXlpm4@&@%50?VG;h;FTuf|`c8imd?UVT`$L zq(Phk=c34-c+GF>U$BL}lkt@Enb?oq^q=Pg;@?W_$~<;rvGnMbt7Y6nS!s)bR1*TZ z1yz}Sf86H?qc_Wo$$LB%4A|PcdwL($YOl#Trz(T9@(zt;eu6dBRT^JF%IEE zvg;BH=53&+603%&@%y))5tFq z^ew3TDs^Guq2a^Oa8Tl-Z4WUS40P(nFq~`Dld|_fd`;(O;?jw~NI$^GSE7=s+e!n5 zn(^1_y}8|K=7?vIkm;whSKtW{%{(`BFWglOFvE`(#yDFavRX`;dGOrOY|22Z@h8eW z81dwsM)v_i7H{U4(iQM`Lny>#0kw^w9?R-4Y-$^9h*uFTbH@^@@Td<*ab$NFKoUa0 zAbO6S=-#8ZAGWIxTSR-`f-o_BKhc7;8dIF}{PK%d>Qza6hnZNU9CFj{E;vga$wJ;E_nr{hDR#8>N~l0Cy;MY4>`k3ynLf(+)7zb!M8%XOY-~)mD+as z0}7LEo3{h86ihQ7$hu7wLZ%N3@F4zxsxjGXsYHde0IF$0e6A1%K250AD`HdIKGad7 zT_EiHgCi=+NhB^xnXU;tNbS&ojbMOLqL#;SY$6vbl*odOZB()!p;N!(RdDM`a$(d* zMZ~9p%RdKCxd7Idf2IaS@j~9lsZ@y7R6`wSs-eQX!>iVcDyJ4BO|^kl*(vx$ng`uh zM6nb`0pOJ?dec6*#YaeO4VG8m!+%i?vggpfF}ov3%NzVY5Vu@_o);>jeLuA&XiK(D9TZ0BMouj0T z84csDTb_iIL>fbnNw&TFyKF26k4a{%Uom)$8h(%Q4A#VND!wWU2G`&W>Q>Z*P+>hB zDd=DhN_QVfE?cE6Em9@5QP|uk07a3boGv#x!dd_Eddw^)9F(-2oBJMSOooDnpU(;U zWZ>_3al3g`dRM}-w+g?Yu(K2qZGY_Ey>lGRe8Sb0=?rKzDHJK%>%=)%6FUaQT*$w+ zmH(WB)5o)O1sE?IoZ-m;aozHAYnI@q208bU|!_B5bh z!S;Y$1SMQ$0h~Ti{zhDxN6+-qR+N@mwHWz%A_@L8wqt<6HwjT2&BzR7fP>gt>yrGz zewgezA?A#PnT&RkVu4elgV^3&vez1Z5sxpV?4(N^G+DhHq-}f-%M}f0zaKEo(Wn)S zjJ=MU*GOz>;nk>*D@E4(T3Amz8{unUXfPd1_?z2oIzBOwa$`?GqQ}c~F!?7e7S{58 z3xl{W#t#Jr0q|zXIyhnHa{-qL@)w7sOx=tWX<`7cF5PHn< zkVax(r6y=(Wx_GxZc2EGEHuuFvX%c8xQZ@!QF6hyM?UL=796G$rXaJEquFvXF@p65 z)+k~RLTpN;2Cm>4f+dMI3i45Ck!;aq*AZ)&_q9oT1r8-I zz(D@AYuFUd_X&=T@ZV8Qwn51bMkb(`3K0VD)!!F$P5yeZ+jS?0p+Txj8l#0-DApQQ z)Fy!^;!E4H6-8H4lU?-V9nS8h^~)J#Z$eF-$#cVb)jVO>XIUJ8wPBE0#|i}&GV?sx z`&_W;{42HaMU=L+yyOm?WL4-{Vi)bW8bY3Vo(uL3`VM=Fwuq)3`MZ@iu3D~*V_G6=PtwotKQsn1$Z$ZjSu0o74P`e50dp6s3GRUmk!61gBRV?rrn9Ku~GiFtk zBqHL%^&^;}?~|Sh&JSp2figHb69co^E{TDT^qE0#snSel0JAfmXlB4~wYAt~?Zy1L zK8-UP+yh%fmNFdO+Ud3Tnlq0%&}iF0pw*lc&FsQF6u1mVZll}*gdirMZBbctW2429yB1mUMu1uz$;ZwZduTS>(fXnsgnn@G=1 zN;nUzQEp$CufO*N7#cRxPbah2Mo3xJf7i&6j)yQX>J*IoH?qRnyFbPSbaD#&4*@&T8gjX7go1{9B?k zpn&jL5osO|9HeUP5||#SY4!$@73q9%P)d?A!Et#P)NpD{klz-I63m&THMnwhO|4+j z^SQp)drqQQ&TdU1x7D5n>m1I{2~bKM=UmUFEiYV=8_U(FEI!T)HYzw-mHm|}q$=*a zIBKh*cHo1VVF$f86%0k$fL15cp#0n1&TDk zt?H0kjVSFQQ2v8lGp=;UYMcAb*}i3>(QiuEWVR{Q&C zb_DXcP5%L!%dv8JfbBZrlrv|YRCq0(jz+Qb)>ZPf?7aD9a|=L*K$^=P~J=dGfRgCGk_bYdH3Y;V0YS^NyupW;>jwpwh04Gl99_P0{M?j72=RE9E~dW19;3NPSxb^KrIBQ)w7@pvy?BXkbN^=vsrIkVP8 zwVNxfU|8S`BvM~Egk|-9dT6m5;N#uB+pB68v8q2oyK39t6|ArT=G)zrIDSXsRKP!q zLwT6-LiZzK_KDmX%OgjZ88DFBN4b<24)B-EYaoF=%NKY+iT?HxKg~z8*NBEtXK_6E=JnfWPhSrPZw5Vo**@k+y18eFpDf(0?&HDhCC39TXVcN={;{x31jZu7rbNMam-4M}uVs*t&x6qX6huBQowPnjMM#Hh zCkmM4L16dzzX{mj*~eYBf(J+f2VN|x67gZazt?7t&%=!8LnKo->*2%c3_3 zzp1sc${aQyRF^+7WU(x^H~g!xkolo>?`qO$f}?%lFuFz)F|`vwjG`?QBr2C1^u@-)3RAtiR&PJ--`vf>BBWje4p$k@%_k-OW>f@_khz(>bqi5Hvtp3 z0L0QHq--%mw=Tb8>tmMnY=STejZYL)bc`@^dv^!S@+?cp7vd{Fc%h7;78IqxsC)&t zL=3dFLqvWC=@ZT%4LYR|jw@4ILP_bfk{l#TM`tX`yuoo%bTS#v70%Th94^1lBk8V9 zDvRPDRnH`fIDI7CWETGL>O)OxUQ>oF93=Tk!+Bt)SD>LOr`wNiei&R2r!(ti4UQ$F zMX;*od+(sOA}s?2X4@JRhC>GjIu|bZHE4D(*+VVpK@hGexwt9mp$UQDAr?NH&u4QF zyMMtHq8+N@YgTGcL9q$Wo(PB<+(Bw1H$}ynnPBgE97;tgKm1j!{^SG&HF}Wg>Vz|c z@2ch{Ftysegy;u-h>FLq(q2^$;Bq8kS!J@O^8ox*|KbbxRiZEeS0X{ugf`v^?NFH3 zn6DJY?F9o+mdF!l6W(13m~u;+2wV=^kTO6bi5 z>yyj`>r3`eI)62S-$$G|MFZLzflL4ev;t3_>ft+_ZqXy2{Yv$(d{j69iuF2430V-K z^R~mW5$;HJH-1m`i*s~(Th$BNbv6oni{5kCTWKVfse0=jFOi8~UJus81!h1Q)spv3 zAbS-i!|8@*&O&*v#EpExU5oJJdwU#dImVRT&GI^kVw-(cHnbzBl@5`8;36&4_Q`^x zvH-cp9`qxTf<$l(&Kp}m0@&!Srjya+<8)&S!#FH&unpUZL)Z1$!J0F?$+pAyx*Wx! zCm-IS|2fgEW|G|0adYNaw)q|BnMUO%9U*xoCCpMEf|sR!xzvk4 z%t;9`r6-LsB}OS={3xcxgd~oF8G*&a;7fO!EqF%ZLw#&84;uw{i`(IPI$YT7Uu>p( z#}^>6zuYVrKc3De0!5#zuT?ybTA1%nW0{U>CY_D-Y#88?LyZ6rPMGP>85o-y1fY8iodGxI4yuz#K8i)r|$D5#eqa19me`)2!YMz$r_@LS}7N0YZ={SWF z`jv@)4;Aj&4I*^qfkxZA%7(#FCrggB5g#UI>=JG4(!`c5kINe1%Zjagy600$&NV3^ z*>K7pCs%9Lq&@g|j%n;f9Xy)5R9s2gM7qo8u?vQjtD&cG`|7Ho<_Zp2;n^4-bX+cu zuWtZk@;!q&V(a;bc~!Y>=q1ND8>{g4qa;EOu-l=t9XxSBN&{5pm5U@Ot_F z_2hOkqhWInsv>&(Op-magSTuxw|s)6{aXncj=gdbb_3)E);$Hf@=>zMf+((EW$No9mT=unX+u`DJsE| z=_MWL4IlkVC|w2oBF`QApI)M3pFGQ%|phHTkgV##b)|` zF&UdLEDy$NwOe<6-K?0sV^9YE#|O-HvK9B?#uaEb0_-`-r0S~(yx~(I$kMv}4H66?D0!fM(z&nw;@CZe!ic^@0xfDRaOubR?Tt5`P#+ z4;*P}P)!Fuj(d8&Z!nJsvxA0<(L}wmf@#ne-a(1I5NR?fd?_0i-~H~VXec!KG@7h9 zp`ze{`m@5!laF$mDf%o}0Y{gH#hI5K5?3z;ni$1m06tbE1R9djFOAmSRD6- z>mK@)S$)>Y=BKAW$g>BVp8Ng|wNEhT)trJ;(Iy2?1m z7+UIyUJPNc3%K{v=)!ioxOg{s3nwi(kCpBjftIL4Adk0WvWd6Zb`Nu|x=fA{;%fOY zn8atsDr?VUt8$9{qku)3UU$!+j@)cewSPM!nt2t3fXb#=U(b*FNVGE53b00t=aEN+ zOL74dEjd-0$;Y)kTa4kT$!zDdm)~tnww3j$=5I`Y*(~>gSo+a^9}F+@R@0fMm_Q3r zgzQD2G1dmaw2nok`R8h>hP}H^3nR!RD`3rcHQT6!f(c2n_X>fjAgsr&k=^6$(KtQ6 zS}xHsVqw&`EO&_3blteCDlktmE>lfc9&(hsJ*m_nqE4g+zv&008+;%~#5$joKmlMC zt~La%Cb8?)qQdtj(h&JB$IP@*B$Suy^W3T@Oq5C)o?1Xu<+xA>bC-d{mpv0vT=81tuy2B5t~@2L3=X z!Rj^8UGMqf^^~=0z)eI=Y|!e4^nyO;+ZM{#*q7yr#q*b&9PSVEjTR44Aa(z^hfDJ` zXph6zzD(8*+j$B`yJ-sO3fc;;Mi9PeC1Wui+Yl4KUf1Ypn_a;Mgp149u699aZ>p$#h^G)E)_ntWaJ|kEBIVdDMjk#8T55>fuY44fXq@f_h#Aup4!Z4im%|1q0KESj8IyG zj@g47l$J5mjEq9VA=#4h5vLnAhZM;y^+(QkMH8(6T7+3ck>fVf+GerTx;Td})<9kL z0rrxU@f)~xLJjm@Osq+A4bJ>~YapnSClg=5okni}^19VGwszq3g&D$Fflo(?x!_>s zqW`waIB%yC=n*QB?n+FRbxu&p!oV7;j`FLL|GGHb(ZdUGM{+c=wR|qtQu=j-DP(3p z;Tkn+_K}@{t^&NPOQ(s7pT0pjHyC~TG`yPL_UPVirob=2#T;xuufVEZTra`zM_^gh|; zc9!+JVeNs#2W*>Vfo=0DqT3WyJ-<83YkxUC04a6?EH0e%J#($&aHDCPqY?El0>7`u zz+27A+(R)AmNa@0pVmxTHZdixlqAk!6p=)8+=&?IbV(5(C??z6Oz8I#`#`0TW5(W? zR!hb=uT56mZ>0)ZjaBbN8-Cda4FS`m8or2tEXsS(HV1fT*l;0A193(@d=O{%i1op} zX2E5H)=FAqc2H9wH&?I)c;+Hlggjd}+LeK=MV!$5-Q*OU)rkGZGVdxALq@rpURkl5 z-v>qAfiA^rqRwuJ+H0K43Oh zL+}xGT7$A%+q6_wN2Nw$yQ5Bpm1!^7uuJN59riAnKV(Xik$)&%t|pI-4y=LOwwX9LJQ# zpV~gh*1|Y-E=_mh{RgVyr8V_u>=JhZh#px0nj9G)ucKV*l{;LVBe zi*}){6F`;ftf?Wyf|i$@iYc@sa~*o~Vo}8`M2acTEZkw)1CWz$F1caSepS7RlH=6E zgCcu+J`X8yMnPHfA=%?MfG+j|Ikx62Q+ zpc~Nf{gZ07?ayU1|1^tpDfq|aSnS4hZ@mqum~RMDicYN+8UpOpr8VG zaWKTAO11nrRxNq`PXiRX0h^Z^@5z`t=LzdscSgz*$5p8?(s@Xks(I8w?^VO)+5EgL#tFnm}eAYomkmprWmO=dDwy zC{P?v|K3{PsBN1Q46^{r1caNb)};D&x&i-C($e_yk;otdh|i>TGu5d~UcD>Fqy}GCPSX8qO-@+mgA5R9<#CVrZyJRy(ak{e#XQVgo0Cl?d9U+*dOF0Id6{-02-nH%6gr+2`fvWCXf2s!j{J--Eflx&-O=_KtM! z{9voNhpvB#qJde3QG+Pzv>KSV9tP}bMlp*FvN7l4<8nRjYkgW)2h~8i2TML!@{3Ou zzrq9taW@=J1}-O_1%hm`n_XYax8Pg`(td1W!>qT zmO9DU@31(+8%gf8(7!CT8W1E)WL%IkyrgR*`%WDgPg{TZ@n(nyX$6rlces1MaDj(g-_h5MM~b-UTZw8>l3 zgVQ7fPk5Z8XaAGM7VG8;AF%aSbgqTx%Mqdz__UQI$RCwsUSZ z`)|Qj0Ojb%#1XU@$VGt6=XqmOy$D5DFPma6Bp0y~iFxDNz>tBO4_tjakuX#+b-s|z zE#d_y&a`vUQXUMtZbaHW$Ef2ZBN4z!>It|r8H?MYy(@v3UQje%>1l$b=y7s4X;pY< zA@ySp3~@t7xSwAIWeCeqk}0(fsc86YvJ^D~;2F^65{FD762`Hnl+D#0rmL9mT(5po z`tIIi24?HUaOPl`JG!YrajY|`$`4AwF#4nnTN9T9DD>vt|HMzl+1<>6N}1AiS)4pz zELe0h@8L}~;%%7+8by_R9u;3(gH!v)@66-c#_!U~iF(w`flb)A2;RY|u?jwUts`ck z(0i4wU10xqW>MLdkk3)Rux-sl*7vpUqkNYGn#?HGuS_>K!lD>ISor4R@~sL2n##LO zk>rV}KdKp4{Zsu+`&K*IR$I(oSyiMArq=1T3?Ay(bflla(}d!nzZz%P-n{yinJM)| ziW@q$TCR3KuuYUfjZ}5V%!(QTo#UwmW`Va-a|d0eRDGeuT{GX}n;EKu_-18U19ZqB zK$W)bvUx;j$zm6$1$QT?^si7$nL7wv+ET~`=Qc?WqN6S=!SO=SJsZKqXuh%#3TA9s zR*ml#l1y`et1B>Zc*8jcVq0aJ^|cFhPQV!U%%e@#bE}TLx4#2JE9!<^veWd)4?>1m zOz45Bcj4Q@ly9=2q}Wz8)4S?nXAzRAck4rTTXLZgF|`#RX|E))s`fc?cymY|l`2#` zeH>z`t5_|zjpO7vlB#YN@OX_Wio=cN!8omAW0Gzv@p`93g3Tys^|#G1>FQSA0<@qG z=G;N=vCufr+6z-SRIngq?)`Am*@Hr=y5wE)*yMd+7oev?pE&zovMDxaSTQI0ES{5W zPAQ%try-h5mqHJovguN1=O`pVywPI>%PK$|-2h0u$XO?~4HAM>G-iv8__{3cnjSr>M;pp}j zo;Ep5G$kU0oJgT?k?PT~aSAVIl25kox3ekiR&_$j`y;!9uN*~S>M?0O?+LUQXjGWv zHZ<7hx$P`#4I~R|0LN0*8CR8i=}KYrYL*R#nCn|hMjs46eBv>PaY*g^lt?xhJtkg( zQN?OXf+9c#tQ2%onBa&rWWB`r?h(Cs+uRL>4ZC?k63-DyZg~N{P0B$2i`Nh zBnpCPlLTqwAjIxI5K0x`;fY^zhey%|1!GoZiYrhnZCJ&^E6eRdtDXh8X<>L>Xq0Z0 zgk-2?)1sF99r-lyI!%!`r8^vnaOf?zE!E;EDL714WBA@5*5mV0Vh} zqF|QcXQ`Xe83f4G>YXeMF!hZJLbxI}gUjjJ)=NrbmZ=wT>9dGZ>CW=rIrkbk?gYodi*7 zOqsco?Dxu!TJ_4zS{9Xb9M|cm{=602u#h`YIHi4*{4g7l3U?AZPqA>!rBsbz+Tg-h zPzWTJ@qIvSvXDU_r$~0<|kDI~!GYTdx>W`{;VPHs=Rj z?LCo?=B1D2fqkrC$Sv!W-?n%)-uF72wj>%I=M6N5YM|8)FTU80gS$hfzd2o_panP@$bZUlXqi3(a4;ync<|~GtjQZq2 z+Iavj|8J^aYMh`$bEEE*|KIPl@hW?%8qXQaIB4z0NWp=pb@Tx-&$!VZI{04Qp6V}b z;`NmCVP^<;>r3##QlM+^2!tWZ6URQ~c|^!_gfrrvl7L3;(kDob(mOTbwmO={Q_(rp zSXBoZSRR#*JL&iYTz1!r>I(79@LnRtEvEHQr#(q))wEg#7RdsyNOyhrZqR=T(DlWN zvs!J9w0qcuLVH&cY7`U4U($PYFm5 z2)a9$Ol!4Ak)&F^I+nIJiafKAg_1g@_DeY`^`(rZjhgyRiy6&g?x8T#goK8B$si~K zBQ(SfqM)4IbcVB95H7^Ly@J@md-kUX$vF|neXQUyC=R_riQ^|2>;S_{Ujq<7Hd+Gf z>>n#KokaB#6PZuN3Ab#v{KyfgfO5xX%(}Om2B*Pi(R4Ihs?MTENf*HfDRR$tk00|u zrXNy-FN>_g>v`w4yJ-MY&=hQK8BKkWO35VkYMqcEZ^*U-6{*6gfD*f*7Q)8;C+Oj? znHe9nA;)nj%O3Mpstvm@boka~Kz`yfAl@iRx7%u$RoB78XIWr5O(QjEmRoenc06PL zI7P9yMsUuSDl$|}a=Kgl1nOz{-_RVLvf4>7*tVsuP`HnebdN(NU{9@6WaHYZvxF|L}zdr zFRN)6%8|z|L5s5G3w*j$Gqea)*6HFs{y!PcbJL52K_#tFE7q_2Y&7u>B~W@acn9^d zTo2cuP065!NU3c0j>EKs2{E4 zG}H{FJSdle%I#*=JwE{+;O66S6}U%M75Gxy& z1;GOp^uvVn&fyFk_wZZks5M$*p7Hv_XgkFeQ?{R_t8l8XVu%Fhb92C{yi`~WHjA>o z(gV@YX4xGNq88#JGeDd3^rAIPaSP$FUQ9BGdd_@U!9XiCc! z$@}nfHUo}eo{UgPSUiuhyZBu7o;-c`yaaYBm?gM^l(Rq_vg&K(y%Za|+sPrjn?>&~xr2VTL(F@}xnPV7VRv4@JVEqnfwDSI zqSA_PiFiE#iuamn4zZ{cU{PQ$XD1lJm$9=W$@LTew+~qwPheG-pJAN<;@u)yZ}k5z zQWkXd_#(<;JP}Vk&Ih$8l;q+tm)$GLnJ|p7#vNgR@nS$Ho+Au7Bk=1Z+ZQXEbXV=k zl6f2$h*=Td#*mw8;0`G*&!(^$S;J6 zeCX8M3V;#6>Dcdv_d};@-_hb70V1RG&ES)|bN;F(4^Z^Ia;L8^Cw;Umlv06`| zPo`+%t=gYRqM}84U#?!>qB8LZgO|9VRB1pI>8_oW51fq9+qP?PmsqBMgndUf z+Ex_HlId0x(>Ha)?L|p58bM7itUmBfm9}iPX4_lIZ%i~ytXNr^sQ-EJ_WAiM%>l$b zscuAueh;xjwORbTVo7C>-c>&k_oYr>HdDSC;J`T(M)iSERobtV=ziP{2Fhmfael7L zoD~C)e3Y$qrp*@urlD3NvTe@VT*dHwt#Q zceGu0RY`d<)=J99_M)hx9L-$Z#A7C9l|VIb@%R^Lb7B2C-gXoQdX$&$R6N(~;>|lC zt=H4_21z(fc9ztgu@<(pjg=Ex+2#bxIy7YrbI17&`-;~c1Ke{_FK0cma$yunD5QxY z9ByM-BWJSbg!k(afUw`wE8FlXU4|rOCAhmfA?jOOfY0fLGG0aXaVRd(UAzX0$zVSG zG@XB#SK;GQrLYyUTs=2bBv+DB`Sd*+{L35o?S0gL<649(!I|lRtW~^V0 zN5aRUViDt~rQgyRoPN{bdODj8b|_@e-!Bwi8283mN@k(4uJ|QRCdS6ihHLT9T$b|I z3vNyTmiNVM`Tkdj>&4k(FS zV6?HxoY>KuPv+$R*k6Xd6TRKvAx|6_ge<$!g$;F$RGV0TMi8fQ+Q27=ka-QxXtNWM zi(p)MU7e$}Ju+}izELO$wfsRBj}o6cSg;gOZ+o7zon~REqv-jG)K!20wwg{xlaEuh zyzwS}>z7YD(SX`z6ZUY)pT#bDs{SId+h{=PP=G@=7!9|hn_`aw6SbC*ka*(Z$YtBa zna*_X4w`N5qYQR}#qo1`;6m$uxl3#CoQ1D7$pEdKBo`0gqs5nxac+wB8nUB?AaT(M zWQ+aW>Dc>FIbDg)dH7W|bViYCiM0CqHQrKFO@pv{^n)|(aYc(7HYYc+NbV0V5wi>_ z4lxrulMWn$Q>l2n4ZR_q3_SPCP_f`j*;(2jPp-YhZr z0sYSgw?~O>yWFm3ffZNLk4_R0~hzhn@@FeY(BNqR>rb6I@T)WOp{QCrGr+HHeD`T z3&8?#7c}rYpZlI!BB^1jr|a_mJMlD4CnoVr4OJ5$WOZZQ@5QWHpQ~GQ+&e~@9wBex zO+xmOi0Sh0Y|&Z2_U;sofD97>w+nK)6T8lEbX&~w|8PE+kRNESPxT`%?RO+(c-*a z5#h!`*Id+I^^0$lif`03WsTF+S(d{3)E<$QYV=H5P=i`>Np{doC^IXJgL7^cJp$Do z&gk&4BPn6pKzfEt&hYAoJwYeqe?K9qWUxSqq(eCpXGuF$d$7qy=QxdD+^0QtZB*#R z>KN@fDDK42X--I)Hh6f>X`ie?x|osVx;dNmAI!CaJyoK6<_VOIxi0ZZU;uFck#Cq! z=j+qD$JtrVl1eu_fwkoR+U{6hO{I^RcYjU38E$6;Q2JqRfCP=fcD9j@V6?5?vOTcj(^rCfazm8=2b!3L zJRO&fT~{5=9NCA-dvFKN>a=VpvId`*kUJHUs_m$8MZ24332iL_WgR+)+OnMAIQ9-+)@y642b;kGECVVOSHXqNpaZ8d zsmD!omX0N=G%0P6Gdoi8p_W(|6lZILnSC7UbApqa25Bj|8F!nf{pu=c1|jM4vUdJO z&7_vTE6gpXVuOxbu(7abuv$Wh3$uz?@nW2SV^nh-_Lme*ZX6bsRpeZ6{E@KKD?7#H z$Ain4k`ubz^jMbVnGmey=lr@vn~87@u7@+wCKxRlr|2uNfi5$HJ8XD*+Me>PAIZVg z_5?l+MvG_F3U003UR9;u5<|ugF>Xd(R`~1~C1lrJ@>rwi>Zfj+2n#cwP7^3sWUpXP z!UG55r@4YF>4XH&O>$l7UF`#r>lJS%sGm;n!1t92^ZI)CC1#fjo-82zbm$}wuxu+n zn)JzP7Lw-ldl-`zI)ss#-A#KfIL|zwx%OIkDbrpHbCyiINT0>{lA0Au|D5+3#$K^{ z#XE9M9Gx&U+oFuQ9A5U3xUo@a+H?iegW;IDrYo&Ye%`SZc+DW2p{Lb~B3hpEK{^NZ zJOkHhBOdgq$|O%+XNnmso%9~k&+JnH5h#EoR7kux>=^0Quf5q$#@Y2w7n4tD+V^>j z>hi^4JzTt>@SqhZAxxRbd-qeCqBVLbP3IHL{aY^;=v%_~dCN!6DSiYXP-gb2Gfwu`;>22n%RmJ1U;FS} z-vWB%o#(i5tyN%taUy=*SK#M5DPRUU>`yb|FwN~`cZM7>rdOOJ`QC%8UsTSH3CitD?w(wuqVpp3T2!>pcUl}8d8o2+u?gw{kYsX zR75_N;x%V86Pn-8hzrgfy-evyLvWF(9QVWHOI7~5PNS0Q=5YLCdq#flDe4&cW)R)z z_R6KPPNfr#0teDIPr}%%5A)UF!^*s`3fZX&Tv{rjvZ@=OzA|kO&fbxwy+$YG4N&5D z-#(u$7n65v#cGt*i5<(7#yY`curi4YH+Qg3`Fon$v*ttL z(;cUKurrXKT@Z#&GE2HczJ+YX(w-s93N87A8TADOh|RNJYk{RNPtBTHV2M8(B!?X zeP~X}#%zlRaIK6}?bm7Yv$~Qs_3i7lR5HT40bfKbT1v#QaSoq#owLdHwk*kEEK2~? zNLrdcW|VJiU00VA($I>R^}u7B6lq}aXgv|^e?7T|NnD`F8?Qup$V{(3-H!tUuDOxR5U>|$llB2BY&o7}IVaHT=9ogF!a5u0 zY=N|mwhh9U$7C-|aR3$~VZAf7R^bvJxkUxb*ij{H;ritk+_tYLe`7=6E0NWRf6`|e zP5;HhEQlT^w-QmX7!A4Wnp2#R{u6|>$+WOxJ0_C*P z2nh^}T_=6qqbUlSXF;4EyD1GufNIcwMyqK3=~P8}1ey_go1$07c62*e6O_ksDvx5N zaSJ*lG6ia&+U`mQu`;IruhslD#g$HBco!Km1G}}J4`Wg$iE^>5PW3ZMQa$ zhAW&s4)rTyoWboS*hr(eW&UwsG;5q=;X<@R3`6V zAtsB=QZp@3XYYD;r0f$kCN>IBjDXk)Bi_@Y`efr{S@2w-}&fa-$JUDof{Wc zdb=zWbtJyP3)p$N9gGoA>mkcb=-)o@e)Ebvb#my?N&p038^zT)+vNx&gWVF=jJ@;@ zEG%awn=MFb%c0)U*!Yg%4dYOvcM#~zR`yf^s!vrDM-^%~Z2;vSz&aF9nDsHk8#o&7 z$S|M3NACt3;X3cc!*-cdC5M;frs$A5|4%BXgh=4=ZV#jJceawk6AXYMM6C-9ySo|CUxdCL@ za(Qx|r_+Mj?o2GnXTVLo5%-aS8zc0i$`|bhB!J#{O>J;Ic)c9`fFhCEbo9B8Iod&p zrOoH7?xsSX+pcPv&_#;$9EvoC@|sC>MxhnM;)dz%5_3=hF2VLR z1YFWOu`jQY8PiGFx*yh)OZubjq!`I-R03J~!RtuKY?k8Pr9Z1`(4)D=bj-g7^;wm# zi4>?)==O1REWzFS6VFYl$}NgraWsvKsqDB?FExpheMm)OcfbGGHn6aLrR8@5V&sD) zfErhQMHT(7rBsl!`E9@LGt)-(Ipq;FviWHCnq*BHh;LrMefISAVDM(p(@XX-&7tXh z2e%bFD{6M?UwoL&6i?{J0@lLh2-DdN+JtTN5Vcjo`Di4(=)wq@C^Msq{?NJCV@^wh zDl_4gTYY;F_KNwT2Y~KU`n|R^3oOj*AnI4ZoFqOf%4h1o?mZYw2X}eQ4mZr;os~?dYYR|jLM#7YJ z<5NOZP1BSh(N_WfO8pV09w1r(N-C4av`=uj7ZsDy1a%@t$({on7gkkm|I=A38gC`eE<=W~h13JJk(idr5qWY{i$m~cNh4#G%I z`2ikPhim8OaKkQI7v|egnc|azh%;zvXFcu zgBDYZRS<>z;7e8j2E5OB*?SMphwIJcB{0e#PiGTnk<{6C-bJjYyXti1&#r!9e0zaZ z)Oh!v_Kh`ekZ2J#iSuWt&Y&yG#%52QqUKxbSy&}2RMzJ@o2V*4pHP>{^Wryl+#1RH z(XZj*N2W+&dbQB^{H_H+UT(2 zKP>=<6oW+IOla71kKcBavc3|0)F@rAZ90t=y10UqJV8S?B$MG`?rcc&(w&0sG;jcj zcS3Ed;cSy?s2eTq7qrH^5B*CF{56$T@ywIU%|p~HtWaxZd64}%yR(wtUkCwoVM{V; zgeh$OId&`d|7J^!>6)NtgeG^wDJrRVCleu@q47@}>+qIsa$$;21`c~Kw(KB`3hm-u zJWeN}1#=2g7ki&2!%3R}_JMhfG!MEnZJ{r81LF1tVNPlX0`Ehljp)&8fL%jrq}wpR z3vWx1=OB5&$fVe_C7_Q+-m%)s%=Y)J}Rv@)>?#IYj5gT`dOp zf?Y~Tgsq+0ypSsAwET;@qLG(ph?9h@;o)LU;5>yL&7m87T!}v;@+R}>-Jr&?hT#B6 zRo8Fc186c2(XlKW1X;Kj36cT0mhNOwxHD~BxP!|%x!8%XAafp;b6bv$1w27^X#2Uy zow?lZQjE82oK!{7l8BW+Q0Y>717TgQ{WM}SfSn~ACp;@BeNR>&HaF5M9(aYY;KMDW zz&d}V0tv#SMn)A!1s_Z+TLWrDL-pAfW^PIwQ>Yav!7o z@RjsWwDST-ja!xqBcTUUTt7gUyZL0XJ!4Ir2Yp_wOcNwivvy%y$OSqHLb1=98M=s>&cy-fqW6InE zo&SRGX%MaC+8sn0$egd)`dnu36$=zWKL#Yx0qM*^+@f>@ouSPvx8d29NWK1+D%HcF z1$+_O-3}@dufw2=2H^=yMyt2E7-W_1wksIRm7T)IOi}m62ayab?>p7@P0**j^wwZc z7;FM*gimXMBIx$|7CeC}PyE=X+OT;O5VeQ2s#-I+U0ZH4cCT!Ypl^#Cloz$wIC~op zorrT1V@Sa;u+TPY_bJdTUXxz&t!DqS6!X^JepawTGN!M0qoW>^iMa#&CR|0_!`|NRnw*rJu)z5C=n<4w!)a67zre{wUpUSlxF;AV`A=-+Vl z(P&`Cj-!zjTLazSUTrq!l7r!TJ^XC0q<`kscn`R+-0yHc9pMl19r8iAWb}CMb@b2Q z#&!NY{`<4`djI*)m_O*xclXWT_4U7G-}mS2&%cA8e+B>P>pwa&FSwuo!TkLf@Pn7u z&;OG7^}WA~pL|U}{}2DWc|m{v;&1CK=J)pY%wO>T9{&52KlziTzU$4u;??fm)1SZe z*W|D6`uaZo`48~&{|i2U^IPWg`crNi@8NH`PI~|Nqx|zN{?E10>)rf2`+58GCx6wv zPhZ#1>+}Dr{rofYwtGMSkLL6G^LPH4|NP&?AJfkd@jI@`UodaGw>mRBqd)(a{k{Is zebc}HWBeif{6Dv!cR&A$zSI2t>-Mku^L_I9AN^bN4f^vm{(Br7 zuXW|jC-i&u^Em#|{l9+m7v&}W`Iq&L_GkM1zhOVG-|}nr^ZN7cEWW^x;=kVer^)Ak z{@dpJ_2=LF`|$_T@BbIc=l|e;n9u9a-~3PU=XL+}x4)Tu{x|-Od58Y|c_IMm_x~0? z&rke7B4!{5W_s!?ef8V_G=SOlm_dor-{`_yr=kJL|kVe>OVfE-n)6H`Txxyn&dKC{l)Jvul&OQHH!ED=o{_N H^z;88J0#7R diff --git a/third_party/prebuild/x86_64/libmmpa.a b/third_party/prebuild/x86_64/libmmpa.a index bec195ad066b02a4966ccf47860634e9c7200bcc..13ca68db5f5ff60d6d735e48b9dd82cfc60e4f72 100755 GIT binary patch delta 14566 zcmaKz33wGnw#WN+SP}vuK)B7lFd?!=wk!k!A|wdWuxNs;BEu2^QBiolppF7J5CTL% zmLs4u2yqZ+1bsvj6p@gKjzMK6VMI2;#Ca|-Is#4r~hOcb2N^ z?&^Cl#T(^eFPDWCkB$sW9M~VSA`|R&RV8_mmwdDHzfI39Ty!|IGNabQl9|8gu(hDs zzH9aM_2+-_ps*_AaAslUIRy(d&Y{7%>jtOEpfcm4g2J$|ji4~&U64PJFEIhhjr`O#m%s7+3=uG_rpN8DG z%8V;ybpA5_L2xm1O=cU6o`23$Sg8VyKTP%`3O^W8Se+SI(?W;*8Po5hQ#Hc>KlBSa zG-ku98NH9pkNT~Abq!^~fF z(P~-nI9{-CJnI@zaY4+un(o8vTK`SseK7K-RwWC&KNlaJ8HV#!ShnwKeSK!(-r}p! zCA(*!7F%k5Dhtj9PcH^fuLMu6|6I2X51vK`Pum4gT=whi zJk1WCPU6#&(b?VXLDkr$%=|K{@D|#S!#baBW1uw!wKzFu&I${Tklm^RHQ+Vb>8ti0 zxe6D@{-*s$Kh@V4RJ2?cMoz!x4lw_SZsRo@YcaX;b*3@ftXUi;G%U6!v-^mdxa;z8 zdX0&5@rq6z&+anx0cLe{^R3x9U9y$TDl{vSY#g)In$ZprIMOCfjq}yPEi_f?^t7JG#_#aXHDFm@r0#(THfI28sMfbbcD|4d{DB zC#=~RM;OCnH{MD|?I8Uj`+hi;nS})>DQ|umRaWj0OsfR$NM^9dnQQjtktZ2I!T7`` z^!VNTTd15k&=Ua9q?LB+m6G328XaEp#q<##$jWg%7R<`o07W_Ba5<-ChordV#H0a< zeG~h}#os+^W?bfs+_=;}efuQ!?ss!Zv-{>mwCsfUuQfC(dbvHUMU$}Klq{K>5jmt$ zQz2&NhlO2k(!6Q7WpzOLSPcHbaumA~bUYG92E^-P;M)gc12JvM$g+mvl@FQ?@ffod z=BW*M@^wCr`F09o`e$#w-i=)5O&a)njDKhASvF9W36QP)RXe|>w9x5ii9Y;9+89IH&x51GEgG@a|?ik@dmHI+fSvVI`y567-h z2YDc|(D5<-y^BuM&I=fS!dSgqF?*iE_9k5wdtm-%2evIt z@PEq=Yw5^P8aJ1m2B zWsTSPA;yon@FI;jF)nf89U8yQ_@WE{)rRWLj*n^YMmncS{eyD$MWPc8=|LCvLtwFE z_XyjN=fG|}8y!Xi@l1spY&@bG_Tyo2ajoOgWDBGP9QzgBD1&*}_$xL5=M_)C4})Sn zt-@1pBZ_2qR+T>i^HTmz_<&Lw&sQo<<4nfiL}(nTD{G9#lNkTVxEJFY3i}>l+^Lz? z@m;IR6HuP#*h?F2Snj|DJlm+uJzJsU6370tZoCdz*f^*gA3|V><3Gyb9^+nY;BaS@ zk^dCq$<4JsgYoAI+vjxkT_cEI>a~B;tsfgfI=1Rn%hLqrEp_au#%ii#U>i2t>qZ<@ zVdLh;P`%QDdOS^S40$UYNXU2WjK)w@;J`FI9j~pYz*=m~RtIV||70Zl%X#F2Nbtz0C2SV>*Xv ze;)Lu#t^sI@iE=XG>a*Yl%DPICOY)L7~jG;T4OKc?ya?MZrwPATg5Y;$v6`3FX-NC z9=CdOtdUSv;Mh}@i~W$Uey1wWa!6S2_@Cu48~D{=`dFrn{974+?!r4Yew}e5FW4EJ z_JGER7%y<)4>hi4yr1zz_CBd`4dYf`t?%H6qpk_$;mWLxVD^f3c8{h|i$YWNWZkPO z&tp)t!m(Fq?KxHXU&2=-J#FFbbU?G#?$!7m#={st!`(UB6y`tU_?RAL`XbZI+PMWj z!4&&ZpVrMxIN{W*enW43EKlmnTBic}x3Kf9-)Q|Ip7k9HC%?+LI#y%) zv_a){)=#i*?xO?BE$}|$Zanb+(q7h4);F_0koB`B4eL3qpJzQeP76k_WBpmyd$9dA ztp}`w3=$bc;F?L->pRv{SdU@7(@k11f#O&n&AN%xoArgPn>e?yUaWLn6Mx|ZVnMBT;Omethes2^%T|*vVMEK z*3)%meW>+e)^Y}Bx%-?Q*0av96zekUf4Nyl7>w^*D%aFFlEcuBX4&RflcTBCnsp!R zX{`5XM#CT3kMShNW|+fS-^%)9>^7G5nBH2L!1|14Fg?#nV)`IcbI&Yb{Rr!m*(=Zt z(ib{2n0DjWrlA{H&trWndlj)Bo1g`w?`Hh~>wH67Z>ymuA7cEe3m;+qXV%Sn@(H;) zKBj4W8{WyC_)-P+f6I6^V|5Rr$>nBHz3}>cyvFnzJ4fQhri-;JNeeAmcOvx?#&|zt zUX`sLtXI3-`fInrjB8wOBUt~z(SiF|JH6#GVZ~cmsM*y z>%&;@!ES4nn|~AIhZ&n!@pg@OGyW4}bGyE&aT(*U81wyPeGm!hD;*!xHhp!@J2>@e z<=m=<@nptb;KI6&{`0IZW|bd1>wD#t{1fAsUD$4}FX7)<=PPVAYfhKYAIn*i7D6tdHVn z>i|csYXP-OoFt}Ac`{3w+EIF%_Oouf8Wjb_xC}CFeycW5dk1y7%4 zeNL9v`DU>G$of{+)o(0lvPTDfQ&;7>I;^XzD5zNOB(dk`JfrH@7xes!^>#cr+jTe2A6SK)!$~Uesn>TKCHhpR@=8_w*jmlXC0fW3}^jI)*oPfR7?6Lq8-z_#%Y<2 ztb3sRxy>bWobP_=JAD(Ax`riPk4u1&6WGo+RC$F>nb7Imhsk(C==5RU=$C{ZEA&|Y ziG+IIQ|R|`8(0X_I%4#tLZ`LB=m6zA9eY9(sCdh3KiveuWVefKIg`*1=$|jBeDRddN9ex0( z;%YJMjtG6X}?J5v@RL@{X(aO$LN=}u5UG3 zD-1}9#0C~hJOK}*&u5)(=}AJrAnflE`U-seQlU8a;$h5ccP~!w8`-5&8o{&lh@YdShUrIP$6K$vWLV%kVHAx(jvcxN=x7z`_uL zI>nH+wqJp@2{c&fD~0~s81OMSLYEWxY6!g?rWHH(|9WBmUa#j0lw+ff14VnicDw@>#a_>CZ~?W6 zs^MQmFK8^3l_uC^U9GTP?9gQStAy1SVfBRr)s^0y??d3h^h(4+!MEXIf zF4wP7P_@Ugv;8o=#Orww>aj6THInm%$4(LHRiVp)yyJ(s3a{r!NGNgqP3WT#7V5^U zc$jXqZ%;=46oF+gN41BF173S#`wee+?b*uKvk=Nl9DBWL*xR&L0vAv_s;raGieSYe zS7rxnV4*(j!Nc@nV+W|%>9xP?KogwPkH5o_RjIHV$vRE&2BCi~?B(z`(kCJ;6z2^* zOtueWDNP`fKI&m1{1zStFBbYeLf%{k#a@4^<_iBs_>JyFkDLEdf#nmnL}7bJCpEZBY-MABmqn1+&MLVIUMcj+LYISH z5kh|{gnm@(`i&<8_3aW&EcNLj^hF``ogwtALZ2$%coyf{fD3)Xb>niUgiB&hY~{Q4~5VdhR}azod%$nl{q-iSZ=vlCnhix4>NTOgq|z( z={^2z*K=+(mhYEOFDMn?7ux@{5oZlQlH^a7#ZF7$u&Qp;deZ?z0&^$tcoA*@~x zp||K0v_CEE?+>A$?n4W|-6BC%Pv~lwsyv^-?){FZPW>GSHTxak^2A{DFGTb|LCd!t zyMGeo?ef|?lGNg}CrR7VteqFO?}gA$2>n}Oe?EkMRp=LleN-|wENikwi6yv61EqhD z%-je4Lg@FiPWR0Z!u~}_IN4G6}T4`2aoJLuR~2GH&0=?7H@9pCcF<&Isdj69d1yc}nSqF|wGD+8{D3*}DoznRj)+Zd0R z@SqYlSl8tBDjw#t^&hMrmr{Xe3mi5i=(b1b9a(p;@A<-hr?CH@5cZ#H{nsBv`ldnN z`(FQ)G@aj_=b$U%Q2gJSWP5y|jD1w~EpIqLx9}l5#4562W&`*ZYf7E(UweHBo8Kd+#+}GlfQMBgTg`?>H zPyWN`pw&)1HTTLXq080m+-SIntEcDApqsqV-S1SvGwxK+SWcFD#vqGYNi|tPpP^!g zVzYy~{EXQj=2beL4EpQKVjH@~@`LURkXGpgUKSCypoJO4ZlTlVHTqj38a+@`kWqP8 z*bf)>e-(O$(7TRNZm?i)D|^Wp`r*TWR7<~l3w$>QE*5+3)_2p4p~ zvBj*@HOmzG4x#@AnX!)>tK*Qpyh^>s($_^hYb;b8_S!SC!f%Hwg>f4ZWV6Ed+heI~ zU6aQJL*6O6uw3X_B_Dqy78ONk7rHof%z%WyJ3h0ssMbYi!8D|1=TKa;hQ23Y zEmE^VXu6AThH~=lsC{U&1r?Jw87?4wHcp9Ft&1LnjvqU|@0iB%FUQ8W6J#MZ`+BCk z=t2k})w_5stH?!n!EU5xm(DpCJqtA-JASh#r&n{`JCnC{cZfdb^_eX=^Idp3q#bj7 zX1`6bi~a%T9drC<*G-*^+P$C%Lz~?-DJ>dio&;6sY&Oy4x@b)A4WD>@X0uGGiync5 zPw*=7w}(0x&4X!3%@&xHsD^nDz*?kcr%SGj{tV?v&5o8*7hMe(keV$kbuPLKIv#g? zW@k!D%Z7Pz^EXnn0VUT(e}KSo$KNPf`;@w6* zbSYdwYBoF6x#&Nk<0;2yHZ!Eq3WmizN7o<=soAWM>!PI)Kx#H4l)C6B*p1X|I;eBe z{ZNC{Y%WNNX_y!PUH7!(GusDpU9=j~kedH+Q0k%p^G-W{vuEIni_Xb{qSIcV*&;BY zZNtQ$LluTL|J?tCi@pu@7~1@+{yrByJ{A%`_4>>|;$Ly$yWsIpYwQ=t!K5?&@xP<2 zJ(FgCJ`UFBc;Txv@#eJK+4zWa<8+?_F!5}voi_p2ogHGYD}(B@Ln3M?Xx1B?&rvi9DCm}Wi|Qk; z^sGreSa1SWtXrx1%D`IOd?zXmUsaG(Q2{xn79RXknG_bBqiTK?ovv9HkmbRGFG<7l zRXgoT_0;?te^hGWfz;wbgjAG*nut~PSi#_Iay^R=$f9`tMGUf&T;GZ#q44;BQ@0w& z;?$_rqV=kQ#RX(Iy6~ORg|(^0t;k^)HD-_H@{`*^6+@*jET}`%{3`lvh*(uhVYR#H zCJhR9Q;p4{&X`aMW*C~9>wp(JIii?6vni3aDELB9=>=CQhEhXlLKX5DRH@O6C^(45 z*D-544<@wg!s5`K*pY?{!O$kUW99lTe~6Df{x13{CG=dBcR8-2>7| z%CDl66|u_G3}@?0nyum{ttu=SLv}?4DnfC3a?$oNSK(5|jqR8H+|WSt_kx~cC%03W z&s0IzPsiREUpsZjPEd(bCNq>KvFdUw>P3;XaBSE%dj)33tT$?g5$>w!??X6Ec*ff_%RtAN^JE}X)> z-1fp!-O2)L$#v)$&AFCvQ9;7h27g0Q3{DO;@S0(CGtC+bBWWBc23p|h ztz`7uLmy;Yi2X|~EVz?g^Q-7&EgFoTN-$k%%$i!fGmkut!I9L$y?c_&GCPOR)pYm0 z@ZzYb&d$~2`u8W4Onf2dpt_li6gIk7%S=J3G*G1z8mX=K%L)~OR{Cno-aD`|Le5#q@(e2=#B^_M7 zVcC*cR|dR*pNrsa{9Fg8@bmBR3x3u?`=zn2?_t2wSa-As?pgY?cQ^&d@{~SV!}094 zT-FN6eZ=-w4%PBnTp_63iwDu6Q2vPB@nJH?qWXpBJUobYPQyR2v~)wwBja3U5b>z( zDu*HXxt%_}G-YkwF6(|}W?3nGK1|ein&~U7CiD4A3+6vzdzpU4>Ht26x*&4B?Zt*t zha?lc*Su8$%vRb(zZn?)BgEF;vp99|lys4>2`fGB7Qs-9H(M}F$4Z-EH9Rp;veI_P=}Kp)#>#EF zk_h!!8K5h}Aog+FJz7`B!Fa4>2E(@1Hu&++SxSd9kB7mI$0OZKb+vVV7<_@%V(o?Z zd%o>{QM2D88<`*J-mcj$$VK*EFkF1bhVp#d`)^zmX_P0n(H%0A@Esh^w|ze|oz1i_ z_qth#y1<)rr$E{&$J_ytcnfq(pFao!JDhPw9V$4Nk3dA@Q&dS zxs1DUh~e7_Q~Ed>RU+jr0i97nz2K zso9Oi+mSZ!(V6R@NP?A7foTijfUfZtl@xkg&{lg+c8T^hrI< z4ufsWO=Y1@{s>1o)n134!_ilwJ5B-9fAZ7wQKrv?!MUfe(bOBYJeP-fD}}auvTtPv zO>u}+d6V^BteZ<_uWFG0ea7M3&xhFUIO~tHeh2HHnl!A>V7;F8c5zxT`p>K{Vf|LN z55naSXTfj1$bepQs6=RB9f?y_tG7N`KGuV8(t-)pne`s5n>f8#pT@e0lgv6O9hbdd zI6-E(x;z##U)xdZce(VrKwCz$_`}*$3yFEUPNY#zIR(|8x-rx=^-c5e%)!G)FSJ52eqvg)++8OE0xkK)vy zYy36iLA|ujx43mts&rRmxf4!Mb@S{6)L{ zm2tfT?_fP%{U%^pW_k}D2sqyoSd+}6cGk&!d)RtU}{VY>+S=TA&#E;nd3}Z8N zmsr1@Z?WARv_ZQCx7K6!HRDRgtytg4V{Dr0Z4J}G_A;$!n!wMy-mTT;%rut2YEHqg zB~(&ct9eLMC*LHtT0B(K!TMxP*H*vRr-#+awGw(f(;kLB8tpFD`Z{$=dya!7j?v!g zmk<1NGwW@682E~?{?2+T>nY|zjSa^aJ{|jrK7FK5pQuyf7wlOyL1#*0&nv9&xccR)a`A@h(8}aZQ{1lmE6e`$hi1X^vL1Xex3c@@ zTj|$#Dr5D@qWO!)=t^Q z&_m4Y5Nhil`e)YZWDC7Y=w6{87dkD6 zCe9_T`|;;6%c8dw13KUjBUEU(y5nK=yM=zc&^HJ@LFh+?PR~LUr!57>LZ{x=+(s~7 z0J>w0!@$QuQ|PY>JxA#0wXVlwmH-2BeWyYRXw@|d@`XNE z=!bw8BCT3-K`eVxcb<`Y%FXBJ`v-SiwSZBjU8vPvog8@mc|<@}rs> zR4Vjxp_d8$ve4HFy(d&u*zTu2G#2jH^r_kd=?6WoPvLBZ?Y}}#RxFh0IXq0yqv)A~ zh4km~FnYYurTs9WzaZ>0g)Z$M)4IMBXmm`1H31y930)5PVWDpnaXu6JABFxOp>MQ0 zH6|X!KmAdUHwlNQVrcLZUlRCF0>3A8*{&{LwepVeqAeDR`VtvSEzEcBb`O$Q6be+3T{KTGTI!|QEa_rv%%Y*#+S;^%7m+xr1~QM6z@Xn?4E27aU= z?RCu=volm6?ZcFOx8Pwij)C$@+ch7C;ODbs>EA5kR0zwXLYIB-#y??F-q(fwQlZO7 z`5~dpI5G78V_DtQpj3)Lw`rgj0}%SVH>$Z>+JW5kSZ>F&>Dga}F7tdR^qs;!E0&_@ zI8*U#;%pGQTqA$fx^Az0jNBDRVRWDzJexq@-lXp5pKhWX+3)M9j8t#;3#%SNe@^I= zg)Z;BY@tj0)k2ruuhTlTt8C+U-Q3uQ10vTTq06CoAb|d40R8U)bWbOhS1%(nL5c=y zP^3ORfL|2E^^pPenE~`t zt^amF-eLzgCb$m|)82Oj=yd_~&jaXLT`(vX-2s1Ng0)RRqrNqOejn04xbTz+ossuPng@4`EUz8IMq(Ue;)3;z8|wEtcFg9}Lyol+gcA>-riPEWjnsb#++pmQEUjpd6g#L}Ne_!i{64e_&c1E?-WK>L5$<9lMDF%@JN7~i3wd9~!(_V~hU~U|`GUIe zrxQcB_JU279`7lEg9L63<-2X~gx(ZbhYk~T85~AhCTL4R>!4w`?QYeltjgm`gC143 zcUGUqIMKp#D@;S0Nxw5-p$^0d`U;d(*}iWCy+P3CNs#`U$DNi0^Y?mi*HAs$9!ye! zRHU1PZG95N?eVxL_M<1WYc`DEV|!2aYqXVDQw;c#F2AFZb`_EKLB$^19n~MUVZ5UL z>Pi1ne@L$O_&)33=pqC59iXy~hauIr_t^p0H>!i&YTQ9FP?;`(@@m_6Y@jx!B{dk2 zmvq(oVj$$b;hEtbr0XOIz} zy)u8tq1xZs-odOp?Q?}LM}L#n^$$!k!52g6a`jyme*IjHv(tAN)b919-7`!#gVIS{ z8bCi9K#xeFY_86byVv%;n$p-e8mjkn)%r(@8mf@tjW`7VG#AVbtW#V1i>3~B*b}ER zU)akbd1s{NG_;hyR#&ZBtwW!EZQMskLf(Fl``3|bkh;_J5{vOTr)rj!Jqk+qd3+lj z_(#U?kAfP+q4dnaV!R`Gc1?@2uDz~(pp|FBNI9VkYl`Doa- z-!r4rXq7{)Jc9)u7C@gKKwlj|uN$payQ`yVwVM%>)|gEWb(`O5-E`dyb&NwF&V9a- zb?Vf;c$leA`5k5R&vUrtu-it5zJT=_5l)WL38Bluxg0=`9;3RUzj9@Xc6HZ4jk4_b zh_SR*x%0=W^|)*-Y^wISw~kdqc4RCK+35PQDyDLk_hrlUMqP#+By?$C2-OE{?;%0u z7lLz8a=>=g)2HtzwxP+mPjq6`1h{y><4d2=n0TYWcTIq}gPydRjLlRo=P{lq+(w{g zrfiAO>EBe0es}_$tMK@`O>E5eyvVj}A`MyM27zA?c(>5y=zKB}k`H+j<0myn9W9cM z7y5n??%hdhf{sm6UyXlaE9xTsYmaGi>x{;z@~dv5(4$4vp+cATIYO6>Kdto+>RK2l z5+_VQG9U>lDNuv9E1@t_jx$)M7gR3FDEP?OPWAJZ@+u_>r1zTFDL?4i!kv zZmc{9t%S2k+r{a8H4eH2B0jXeFEXV!Mk*%n5g3BhY^zFk&>oPB)NG>4b5MZt4{e{> zDz)E1&)*ESA9}oIOH^3PrinwK0YjS&Q0WfZ6?*&&U;X2?PpN}0f@w%sFs*aYEl~0= z+h?{mMYn32_kZ1>=3gGK*}`Og_c!CQ66!Iu*|4%>uj&tmsIDV;V>Sl*?tt= zx@lq;_>r1jMi~ye2P%-7%|z=Qv?H8FYBmeiIcOMHJzlewr_O;}!C?$-w(LYlHcgxk4M@$FoD2tj2YMW{y=KqN zItLvD(~z1yHFXYJ2qj3()|u!wP4j*N)kw|Wm<$Jf0P2yNy)Ww=bSuOjw|zT$>xgv@ zTHhPek9)jkD@%0Sris(Rf82%@$HUrWI7la`IBt8*Hk5S^S^;N~n*Akp4*D`goUnan z|44LH)0{sfLGlTY*X$66%xW{*ZpyQYcTrb7*4 zvnAtB2QI=#CN|9Muh`(gIS~7i?KAr*PB>^@2Bd%F@tSQDG5l{TW_7p%vpzcO{@r9q zJ=NEB1^lOmxQiykKe9Y<>{Pt@({nmLymYc|b2*GUo#al=guK&(-K+ON&FR76n=&

1V58ufk0iOIgDSS({Hp_#9A1Af@a-Oc4U!G~{X$w(jx`pQzYq9`F Ook?=X*FxTz&i@bbUq`+G From 0e43f8c1d52e60fba16aad837f90ec3ad08fcffc Mon Sep 17 00:00:00 2001 From: yanghaoran Date: Thu, 29 Jul 2021 09:50:53 +0800 Subject: [PATCH 226/226] update headers --- inc/external/acl/acl.h | 4 +- inc/external/acl/acl_base.h | 2 + inc/external/acl/acl_mdl.h | 16 +- inc/external/acl/acl_op.h | 28 + inc/external/acl/acl_op_compiler.h | 6 +- inc/external/acl/acl_prof.h | 37 + inc/external/acl/acl_rt.h | 18 + inc/external/acl/ops/acl_dvpp.h | 109 ++- inc/external/ge/ge_ir_build.h | 28 +- inc/external/hccl/hccl.h | 27 + inc/framework/executor/ge_executor.h | 138 ++-- inc/framework/ge_runtime/task_info.h | 253 +++++-- third_party/fwkacllib/inc/ops/array_ops.h | 68 +- .../fwkacllib/inc/ops/control_flow_ops.h | 2 +- third_party/fwkacllib/inc/ops/ctc_ops.h | 8 +- third_party/fwkacllib/inc/ops/data_flow_ops.h | 41 +- .../inc/ops/elewise_calculation_ops.h | 166 +++-- .../fwkacllib/inc/ops/functional_ops.h | 3 - third_party/fwkacllib/inc/ops/image_ops.h | 311 +++++++- third_party/fwkacllib/inc/ops/linalg_ops.h | 17 +- third_party/fwkacllib/inc/ops/list_ops.h | 64 +- third_party/fwkacllib/inc/ops/lookup_ops.h | 4 +- third_party/fwkacllib/inc/ops/math_ops.h | 135 ++-- .../inc/ops/matrix_calculation_ops.h | 232 ++++-- .../fwkacllib/inc/ops/nn_calculation_ops.h | 699 +++++++++--------- third_party/fwkacllib/inc/ops/nn_detect_ops.h | 153 ++-- third_party/fwkacllib/inc/ops/nn_norm_ops.h | 175 ++--- third_party/fwkacllib/inc/ops/nn_ops.h | 28 +- .../fwkacllib/inc/ops/nn_pooling_ops.h | 97 ++- .../fwkacllib/inc/ops/nn_training_ops.h | 11 +- .../fwkacllib/inc/ops/nonlinear_fuc_ops.h | 85 ++- third_party/fwkacllib/inc/ops/pad_ops.h | 22 +- third_party/fwkacllib/inc/ops/parsing_ops.h | 135 ++-- third_party/fwkacllib/inc/ops/quantize_ops.h | 10 +- .../fwkacllib/inc/ops/ragged_array_ops.h | 9 +- .../fwkacllib/inc/ops/ragged_conversion_ops.h | 3 +- .../fwkacllib/inc/ops/ragged_math_ops.h | 6 +- third_party/fwkacllib/inc/ops/random_ops.h | 143 +++- third_party/fwkacllib/inc/ops/reduce_ops.h | 107 ++- .../fwkacllib/inc/ops/resource_variable_ops.h | 28 +- third_party/fwkacllib/inc/ops/rnn.h | 18 +- third_party/fwkacllib/inc/ops/rpn_ops.h | 6 +- third_party/fwkacllib/inc/ops/sdca_ops.h | 14 +- third_party/fwkacllib/inc/ops/selection_ops.h | 163 ++-- third_party/fwkacllib/inc/ops/sparse_ops.h | 43 +- third_party/fwkacllib/inc/ops/spectral_ops.h | 20 +- .../fwkacllib/inc/ops/split_combination_ops.h | 9 +- third_party/fwkacllib/inc/ops/state_ops.h | 2 +- .../fwkacllib/inc/ops/stateful_random_ops.h | 31 +- third_party/fwkacllib/inc/ops/string_ops.h | 50 +- .../fwkacllib/inc/ops/transformation_ops.h | 17 +- third_party/fwkacllib/inc/runtime/base.h | 4 +- third_party/fwkacllib/inc/runtime/config.h | 15 +- third_party/fwkacllib/inc/runtime/context.h | 4 +- third_party/fwkacllib/inc/runtime/dev.h | 22 +- .../fwkacllib/inc/runtime/dvfsprofile.h | 4 +- third_party/fwkacllib/inc/runtime/event.h | 14 +- third_party/fwkacllib/inc/runtime/kernel.h | 4 +- third_party/fwkacllib/inc/runtime/mem.h | 4 +- third_party/fwkacllib/inc/runtime/rt_ffts.h | 73 +- third_party/fwkacllib/inc/runtime/rt_model.h | 4 +- third_party/fwkacllib/inc/runtime/rt_stars.h | 8 +- third_party/fwkacllib/inc/runtime/stream.h | 4 +- .../fwkacllib/inc/toolchain/prof_acl_api.h | 32 + .../fwkacllib/inc/toolchain/prof_callback.h | 12 + 65 files changed, 2582 insertions(+), 1423 deletions(-) mode change 100755 => 100644 third_party/fwkacllib/inc/runtime/rt_ffts.h diff --git a/inc/external/acl/acl.h b/inc/external/acl/acl.h index 8d261201..a5194472 100644 --- a/inc/external/acl/acl.h +++ b/inc/external/acl/acl.h @@ -25,9 +25,9 @@ extern "C" { #endif -// Current version is 1.0.0 +// Current version is 1.1.0 #define ACL_MAJOR_VERSION 1 -#define ACL_MINOR_VERSION 0 +#define ACL_MINOR_VERSION 1 #define ACL_PATCH_VERSION 0 /** diff --git a/inc/external/acl/acl_base.h b/inc/external/acl/acl_base.h index 64d4bd81..90da8b8f 100644 --- a/inc/external/acl/acl_base.h +++ b/inc/external/acl/acl_base.h @@ -150,6 +150,8 @@ typedef enum { ACL_DOUBLE = 11, ACL_BOOL = 12, ACL_STRING = 13, + ACL_COMPLEX64 = 16, + ACL_COMPLEX128 = 17 } aclDataType; typedef enum { diff --git a/inc/external/acl/acl_mdl.h b/inc/external/acl/acl_mdl.h index 2bf85e29..522dbd38 100644 --- a/inc/external/acl/acl_mdl.h +++ b/inc/external/acl/acl_mdl.h @@ -295,11 +295,23 @@ ACL_FUNC_VISIBILITY aclError aclmdlAddDatasetBuffer(aclmdlDataset *dataset, aclD ACL_FUNC_VISIBILITY aclError aclmdlSetDatasetTensorDesc(aclmdlDataset *dataset, aclTensorDesc *tensorDesc, size_t index); +/** + * @ingroup AscendCL + * @brief Get aclTensorDesc from aclmdlDataset + * + * @param dataset [IN] aclmdlDataset pointer; + * @param index [IN] index of tensorDesc + * + * @retval Get address of aclTensorDesc when executed successfully. + * @retval Failure return NULL + */ +ACL_FUNC_VISIBILITY aclTensorDesc *aclmdlGetDatasetTensorDesc(const aclmdlDataset *dataset, size_t index); + /** * @ingroup AscendCL * @brief Get the number of aclDataBuffer in aclmdlDataset * - * @param dataset [IN] aclmdlDataset poiter + * @param dataset [IN] aclmdlDataset pointer * * @retval the number of aclDataBuffer */ @@ -309,7 +321,7 @@ ACL_FUNC_VISIBILITY size_t aclmdlGetDatasetNumBuffers(const aclmdlDataset *datas * @ingroup AscendCL * @brief Get the aclDataBuffer in aclmdlDataset by index * - * @param dataset [IN] aclmdlDataset poiter + * @param dataset [IN] aclmdlDataset pointer * @param index [IN] the index of aclDataBuffer * * @retval Get successfully, return the address of aclDataBuffer diff --git a/inc/external/acl/acl_op.h b/inc/external/acl/acl_op.h index d2e59bfb..f340b6bc 100644 --- a/inc/external/acl/acl_op.h +++ b/inc/external/acl/acl_op.h @@ -135,6 +135,34 @@ ACL_FUNC_VISIBILITY aclError aclopSetAttrFloat(aclopAttr *attr, const char *attr */ ACL_FUNC_VISIBILITY aclError aclopSetAttrString(aclopAttr *attr, const char *attrName, const char *attrValue); +/** + * @ingroup AscendCL + * @brief set an attribute. the type of the attribute is aclDataType + * + * @param attr [OUT] pointer to the instance of aclopAttr + * @param attrName [IN] attribute name + * @param attrValue [IN] attribute value + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetAttrDataType(aclopAttr *attr, const char *attrName, aclDataType attrValue); + +/** + * @ingroup AscendCL + * @brief set an attribute. the type of the attribute is list of aclDataType + * + * @param attr [OUT] pointer to the instance of aclopAttr + * @param attrName [IN] attribute name + * @param numValues [IN] number of values. false if attrValue is 0, true otherwise. + * @param values [IN] pointer to values + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclopSetAttrListDataType(aclopAttr *attr, const char *attrName, int numValues, + const aclDataType values[]); + /** * @ingroup AscendCL * @brief set an attribute. the type of the attribute is list of bools diff --git a/inc/external/acl/acl_op_compiler.h b/inc/external/acl/acl_op_compiler.h index d9d1b3da..b64b2bad 100644 --- a/inc/external/acl/acl_op_compiler.h +++ b/inc/external/acl/acl_op_compiler.h @@ -86,9 +86,9 @@ ACL_FUNC_VISIBILITY aclError aclopCompile(const char *opType, int numInputs, con * @retval OtherValues Failure */ ACL_FUNC_VISIBILITY aclError aclopCompileAndExecute( - const char *opType, int numInputs, const aclTensorDesc *const inputDesc[], const aclDataBuffer *const inputs[], - int numOutputs, const aclTensorDesc *const outputDesc[], aclDataBuffer *const outputs[], const aclopAttr *attr, - aclopEngineType engineType, aclopCompileType compileFlag, const char *opPath, aclrtStream stream); + const char *opType, int numInputs, const aclTensorDesc *const inputDesc[], const aclDataBuffer *const inputs[], + int numOutputs, const aclTensorDesc *const outputDesc[], aclDataBuffer *const outputs[], const aclopAttr *attr, + aclopEngineType engineType, aclopCompileType compileFlag, const char *opPath, aclrtStream stream); /** * @ingroup AscendCL diff --git a/inc/external/acl/acl_prof.h b/inc/external/acl/acl_prof.h index 3784d8c6..a93374b0 100644 --- a/inc/external/acl/acl_prof.h +++ b/inc/external/acl/acl_prof.h @@ -40,13 +40,20 @@ typedef enum { ACL_AICORE_MEMORY_BANDWIDTH = 2, ACL_AICORE_L0B_AND_WIDTH = 3, ACL_AICORE_RESOURCE_CONFLICT_RATIO = 4, + ACL_AICORE_MEMORY_UB = 5, ACL_AICORE_NONE = 0xFF } aclprofAicoreMetrics; +typedef enum { + ACL_STEP_START = 0, // step start + ACL_STEP_END = 1 // step end +} aclprofStepTag; + typedef struct aclprofConfig aclprofConfig; typedef struct aclprofStopConfig aclprofStopConfig; typedef struct aclprofAicoreEvents aclprofAicoreEvents; typedef struct aclprofSubscribeConfig aclprofSubscribeConfig; +typedef struct aclprofStepInfo aclprofStepInfo; /** * @ingroup AscendCL @@ -322,6 +329,36 @@ ACL_FUNC_VISIBILITY uint64_t aclprofGetOpDuration(const void *opInfo, size_t opI */ ACL_FUNC_VISIBILITY size_t aclprofGetModelId(const void *opInfo, size_t opInfoLen, uint32_t index); +/** + * @ingroup AscendCL + * @brief + * + * @param stepInfo [IN] pointer to stepInfo data + * @param aclprofstepTag [IN] start or end flag + * @param stream [IN] steam info + * + * @retval 0 for failed + */ +ACL_FUNC_VISIBILITY aclError aclprofGetStepTimestamp(aclprofStepInfo *stepInfo, aclprofStepTag tag, aclrtStream stream); + +/** + * @ingroup AscendCL + * @brief create pointer to aclprofStepInfo data + * + * + * @retval aclprofStepInfo pointer + */ +ACL_FUNC_VISIBILITY aclprofStepInfo *aclprofCreateStepInfo(); + +/** + * @ingroup AscendCL + * @brief destroy aclprofStepInfo pointer + * + * + * @retval void + */ +ACL_FUNC_VISIBILITY void aclprofDestroyStepInfo(aclprofStepInfo *stepinfo); + #ifdef __cplusplus } #endif diff --git a/inc/external/acl/acl_rt.h b/inc/external/acl/acl_rt.h index 5ee70724..50dbc34d 100644 --- a/inc/external/acl/acl_rt.h +++ b/inc/external/acl/acl_rt.h @@ -44,6 +44,12 @@ typedef enum aclrtEventStatus { ACL_EVENT_STATUS_RESERVED = 2, } aclrtEventStatus; +typedef enum aclrtEventWaitStatus { + ACL_EVENT_WAIT_STATUS_COMPLETE = 0, + ACL_EVENT_WAIT_STATUS_NOT_READY = 1, + ACL_EVENT_WAIT_STATUS_RESERVED = 0xffff, +} aclrtEventWaitStatus; + typedef enum aclrtCallbackBlockType { ACL_CALLBACK_NO_BLOCK, ACL_CALLBACK_BLOCK, @@ -499,6 +505,18 @@ ACL_FUNC_VISIBILITY aclError aclrtResetEvent(aclrtEvent event, aclrtStream strea */ ACL_FUNC_VISIBILITY aclError aclrtQueryEvent(aclrtEvent event, aclrtEventStatus *status); +/** + * @ingroup AscendCL + * @brief Queries an event's wait-status + * + * @param event [IN] event to query + * @param status [OUT] event wait-status + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + */ +ACL_FUNC_VISIBILITY aclError aclrtQueryEventWaitStatus(aclrtEvent event, aclrtEventWaitStatus *status); + /** * @ingroup AscendCL * @brief Block Host Running, wait event to be complete diff --git a/inc/external/acl/ops/acl_dvpp.h b/inc/external/acl/ops/acl_dvpp.h index dcaa3936..5418ebd3 100644 --- a/inc/external/acl/ops/acl_dvpp.h +++ b/inc/external/acl/ops/acl_dvpp.h @@ -158,6 +158,20 @@ enum acldvppJpegFormat { ACL_JPEG_CSS_UNKNOWN = 1000 }; +enum acldvppChannelDescParamType { ACL_DVPP_CSC_MATRIX_UINT32 = 0 }; + +enum aclvdecChannelDescParamType { ACL_VDEC_CSC_MATRIX_UINT32 = 0 }; + +// Csc Matrix can be used both for acldvppChannelDescParamType and aclvdecChannelDescParamType +enum acldvppCscMatrix { + ACL_DVPP_CSC_MATRIX_BT601_WIDE = 0, + ACL_DVPP_CSC_MATRIX_BT601_NARROW, + ACL_DVPP_CSC_MATRIX_BT709_WIDE, + ACL_DVPP_CSC_MATRIX_BT709_NARROW, + ACL_DVPP_CSC_MATRIX_BT2020_WIDE, + ACL_DVPP_CSC_MATRIX_BT2020_NARROW +}; + /** * @ingroup AscendCL * @brief alloc device memory for dvpp. @@ -1910,9 +1924,9 @@ ACL_FUNC_VISIBILITY aclError acldvppVpcBatchCropAndPasteAsync(acldvppChannelDesc * @see acldvppCreateChannel | acldvppCreateBatchPicDesc | acldvppCreateRoiConfig | acldvppCreateResizeConfig */ ACL_FUNC_VISIBILITY aclError acldvppVpcBatchCropResizePasteAsync( - acldvppChannelDesc *channelDesc, acldvppBatchPicDesc *srcBatchPicDescs, uint32_t *roiNums, uint32_t size, - acldvppBatchPicDesc *dstBatchPicDescs, acldvppRoiConfig *cropAreas[], acldvppRoiConfig *pasteAreas[], - acldvppResizeConfig *resizeConfig, aclrtStream stream); + acldvppChannelDesc *channelDesc, acldvppBatchPicDesc *srcBatchPicDescs, uint32_t *roiNums, uint32_t size, + acldvppBatchPicDesc *dstBatchPicDescs, acldvppRoiConfig *cropAreas[], acldvppRoiConfig *pasteAreas[], + acldvppResizeConfig *resizeConfig, aclrtStream stream); /** * @ingroup AscendCL @@ -2557,10 +2571,93 @@ ACL_FUNC_VISIBILITY aclError acldvppClearHist(acldvppHist *hist); * @see acldvppCreateChannel | acldvppCreateBatchPicDesc | acldvppCreateRoiConfig | acldvppCreateResizeConfig */ ACL_FUNC_VISIBILITY aclError acldvppVpcBatchCropResizeMakeBorderAsync( - acldvppChannelDesc *channelDesc, acldvppBatchPicDesc *srcBatchPicDescs, uint32_t *roiNums, uint32_t size, - acldvppBatchPicDesc *dstBatchPicDescs, acldvppRoiConfig *cropAreas[], acldvppBorderConfig *borderCfgs[], - acldvppResizeConfig *resizeConfig, aclrtStream stream); + acldvppChannelDesc *channelDesc, acldvppBatchPicDesc *srcBatchPicDescs, uint32_t *roiNums, uint32_t size, + acldvppBatchPicDesc *dstBatchPicDescs, acldvppRoiConfig *cropAreas[], acldvppBorderConfig *borderCfgs[], + acldvppResizeConfig *resizeConfig, aclrtStream stream); +/** + * @ingroup AscendCL + * @brief set param for dvpp channel desc + * + * @par Function + * set attribution in dvpp channelDesc for specified type + * + * @param channelDesc [OUT] the channel destruction + * @param paramType [IN] specified param type + * @param length [IN] mem length of param + * @param param [IN] pointer to param + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppGetChannelDescParam | acldvppCreateChannelDesc | acldvppDestroyChannelDesc + */ +ACL_FUNC_VISIBILITY aclError acldvppSetChannelDescParam(acldvppChannelDesc *channelDesc, + acldvppChannelDescParamType paramType, size_t length, + const void *param); + +/** + * @ingroup AscendCL + * @brief get param of dvpp channel desc + * + * @par Function + * get attribution value in dvpp channelDesc for specified type + * + * @param channelDesc [IN] the channel destruction + * @param paramType [IN] specified param type + * @param length [IN] mem length allocated for output param + * @param paramRetSize [OUT] mem length of output param + * @param param [OUT] pointer to output param + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see acldvppSetChannelDescParam | acldvppCreateChannelDesc | acldvppDestroyChannelDesc + */ +ACL_FUNC_VISIBILITY aclError acldvppGetChannelDescParam(const acldvppChannelDesc *channelDesc, + acldvppChannelDescParamType paramType, size_t length, + size_t *paramRetSize, void *param); +/** + * @ingroup AscendCL + * @brief set param for vdec channel desc + * + * @par Function + * set attribution in channelDesc for specified type + * + * @param channelDesc [OUT] the vdec channel destruction + * @param paramType [IN] specified param type + * @param length [IN] mem length of param + * @param param [IN] pointer to param + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclvdecGetChannelDescParam | aclvdecCreateChannelDesc | aclvdecDestroyChannelDesc + */ +ACL_FUNC_VISIBILITY aclError aclvdecSetChannelDescParam(aclvdecChannelDesc *channelDesc, + aclvdecChannelDescParamType paramType, size_t length, + const void *param); +/** + * @ingroup AscendCL + * @brief get param of vdec channel desc + * + * @par Function + * get attribution value in channelDesc for specified type + * + * @param channelDesc [IN] the vdec channel destruction + * @param paramType [IN] specified param type + * @param length [IN] mem length allocated for output param + * @param paramRetSize [OUT] mem length of output param + * @param param [OUT] pointer to output param + * + * @retval ACL_SUCCESS The function is successfully executed. + * @retval OtherValues Failure + * + * @see aclvdecSetChannelDescParam | aclvdecCreateChannelDesc | aclvdecDestroyChannelDesc + */ +ACL_FUNC_VISIBILITY aclError aclvdecGetChannelDescParam(const aclvdecChannelDesc *channelDesc, + aclvdecChannelDescParamType paramType, size_t length, + size_t *paramRetSize, void *param); #ifdef __cplusplus } #endif diff --git a/inc/external/ge/ge_ir_build.h b/inc/external/ge/ge_ir_build.h index 04e059a1..729685a9 100644 --- a/inc/external/ge/ge_ir_build.h +++ b/inc/external/ge/ge_ir_build.h @@ -1,18 +1,18 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd - -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at - -* http://www.apache.org/licenses/LICENSE-2.0 - -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef INC_EXTERNAL_GE_IR_BUILD_H_ #define INC_EXTERNAL_GE_IR_BUILD_H_ diff --git a/inc/external/hccl/hccl.h b/inc/external/hccl/hccl.h index 8261adc4..c24b5374 100644 --- a/inc/external/hccl/hccl.h +++ b/inc/external/hccl/hccl.h @@ -144,6 +144,33 @@ extern HcclResult HcclGetRankId(HcclComm comm, uint32_t *rank); */ extern HcclResult HcclBarrier(HcclComm comm, aclrtStream stream); +/** + * @brief AllGather operator. + * + * @param sendBuff A pointer identifying the input data address of the operator. + * @param count An integer(u64) identifying the number of the send data. + * @param dataType The data type of the operator, must be one of the following types: int8, int32, float16, float32. + * @param destRank An integer identifying the destination rank. + * @param comm A pointer identifying the communication resource based on. + * @param stream A pointer identifying the stream information. + * @return HcclResult + */ +extern HcclResult HcclSend(void *sendBuf, uint64_t count, HcclDataType dataType, uint32_t destRank, HcclComm comm, + aclrtStream stream); +/** + * @brief AllGather operator. + * + * @param recvBuff A pointer identifying the output data address of the operator. + * @param count An integer(u64) identifying the number of the receive data. + * @param dataType The data type of the operator, must be one of the following types: int8, int32, float16, float32. + * @param srcRank An integer identifying the source rank. + * @param comm A pointer identifying the communication resource based on. + * @param stream A pointer identifying the stream information. + * @return HcclResult + */ +extern HcclResult HcclRecv(void *recvBuf, uint64_t count, HcclDataType dataType, uint32_t srcRank, HcclComm comm, + aclrtStream stream); + /** * @brief Destroy HCCL comm * diff --git a/inc/framework/executor/ge_executor.h b/inc/framework/executor/ge_executor.h index fcca561c..ce7c82ac 100644 --- a/inc/framework/executor/ge_executor.h +++ b/inc/framework/executor/ge_executor.h @@ -50,14 +50,30 @@ class GE_FUNC_VISIBILITY GeExecutor { public: GeExecutor(); ~GeExecutor() = default; - ge::Status Initialize(); - ge::Status Finalize(); - ge::Status UnloadModel(uint32_t modelId); + Status Initialize(); + Status Finalize(); + + /// + /// @ingroup ge + /// @brief Initialize global execute environment. + /// @param [in] options: environment variables. + /// @return init result + /// + static Status Initialize(const std::map &options); + + /// + /// @ingroup ge + /// @brief Finalize global execute environment. + /// @return execute result + /// + static Status FinalizeEx(); + + Status UnloadModel(uint32_t modelId); // Get input and output descriptor - ge::Status GetModelDescInfo(uint32_t model_id, std::vector &input_desc, - std::vector &output_desc, bool new_model_desc = false); + Status GetModelDescInfo(uint32_t model_id, std::vector &input_desc, std::vector &output_desc, + bool new_model_desc = false); /// /// @ingroup ge @@ -68,7 +84,7 @@ class GE_FUNC_VISIBILITY GeExecutor { /// @param [in] batch_size: batch size entered by user in dynamic multi-batch scenario /// @return execute result /// - ge::Status SetDynamicBatchSize(uint32_t model_id, void *dynamic_input_addr, uint64_t length, uint64_t batch_size); + Status SetDynamicBatchSize(uint32_t model_id, void *dynamic_input_addr, uint64_t length, uint64_t batch_size); /// /// @ingroup ge @@ -80,8 +96,8 @@ class GE_FUNC_VISIBILITY GeExecutor { /// @param [in] image_width: image width entered by user in dynamic multi-resolution scenario /// @return execute result /// - ge::Status SetDynamicImageSize(uint32_t model_id, void *dynamic_input_addr, uint64_t length, uint64_t image_height, - uint64_t image_width); + Status SetDynamicImageSize(uint32_t model_id, void *dynamic_input_addr, uint64_t length, uint64_t image_height, + uint64_t image_width); /// /// @ingroup ge @@ -93,8 +109,8 @@ class GE_FUNC_VISIBILITY GeExecutor { /// @param [in] dynamic_dims: array of dynamic dimensions /// @return execute result /// - ge::Status SetDynamicDims(uint32_t model_id, void *dynamic_input_addr, uint64_t length, - const std::vector &dynamic_dims); + Status SetDynamicDims(uint32_t model_id, void *dynamic_input_addr, uint64_t length, + const std::vector &dynamic_dims); /// /// @ingroup ge @@ -104,8 +120,8 @@ class GE_FUNC_VISIBILITY GeExecutor { /// @param [out] cur_dynamic_dims: current dynamic dims /// @return execute result /// - ge::Status GetCurDynamicDims(uint32_t model_id, const std::vector &dynamic_dims, - std::vector &cur_dynamic_dims); + Status GetCurDynamicDims(uint32_t model_id, const std::vector &dynamic_dims, + std::vector &cur_dynamic_dims); /// /// @ingroup ge @@ -115,8 +131,7 @@ class GE_FUNC_VISIBILITY GeExecutor { /// @param [out] dynamic_type /// @return execute result /// - ge::Status GetDynamicBatchInfo(uint32_t model_id, std::vector> &batch_info, - int32_t &dynamic_type); + Status GetDynamicBatchInfo(uint32_t model_id, std::vector> &batch_info, int32_t &dynamic_type); /// /// @ingroup ge @@ -125,7 +140,7 @@ class GE_FUNC_VISIBILITY GeExecutor { /// @param [out] batch_info /// @return execute result /// - ge::Status GetCombinedDynamicDims(uint32_t model_id, std::vector> &batch_info); + Status GetCombinedDynamicDims(uint32_t model_id, std::vector> &batch_info); /// /// @ingroup ge @@ -134,9 +149,9 @@ class GE_FUNC_VISIBILITY GeExecutor { /// @param [out] user_designate_shape_order /// @return execute result /// - ge::Status GetUserDesignateShapeOrder(uint32_t model_id, std::vector &user_designate_shape_order); + Status GetUserDesignateShapeOrder(uint32_t model_id, std::vector &user_designate_shape_order); - ge::Status GetCurShape(const uint32_t model_id, std::vector &batch_info, int32_t &dynamic_type); + Status GetCurShape(const uint32_t model_id, std::vector &batch_info, int32_t &dynamic_type); /// /// @ingroup ge @@ -148,22 +163,22 @@ class GE_FUNC_VISIBILITY GeExecutor { /// @param [in] aippParms: kAippDynamicPara by user in dynamic aipp /// @return execute result /// - ge::Status SetDynamicAippData(uint32_t model_id, void *dynamic_input_addr, uint64_t length, - const std::vector &aippBatchPara, - const kAippDynamicPara &aippParms); + Status SetDynamicAippData(uint32_t model_id, void *dynamic_input_addr, uint64_t length, + const std::vector &aipp_batch_para, + const kAippDynamicPara &aippParms); - ge::Status GetAIPPInfo(uint32_t model_id, uint32_t index, AippConfigInfo &aipp_info); + Status GetAIPPInfo(uint32_t model_id, uint32_t index, AippConfigInfo &aipp_info); - ge::Status GetOpAttr(uint32_t model_id, const std::string &op_name, const std::string &attr_name, - std::string &attr_value); + Status GetOpAttr(uint32_t model_id, const std::string &op_name, const std::string &attr_name, + std::string &attr_value); - ge::Status GetModelAttr(uint32_t model_id, std::vector &dynamic_output_shape_info); + Status GetModelAttr(uint32_t model_id, std::vector &dynamic_output_shape_info); - ge::Status GetAippType(uint32_t model_id, uint32_t index, InputAippType &type, size_t &aipp_index); + Status GetAippType(uint32_t model_id, uint32_t index, InputAippType &type, size_t &aipp_index); - ge::Status CommandHandle(const ge::Command &command); + Status CommandHandle(const Command &command); - ge::Status SetDump(const DumpConfig &dump_config); + Status SetDump(const DumpConfig &dump_config); /// /// @ingroup ge @@ -173,7 +188,7 @@ class GE_FUNC_VISIBILITY GeExecutor { /// @return SUCCESS /// @return FAILED /// - ge::Status GetMaxUsedMemory(uint32_t model_id, uint32_t &max_size); + Status GetMaxUsedMemory(uint32_t model_id, uint32_t &max_size); /// /// @ingroup ge @@ -182,7 +197,7 @@ class GE_FUNC_VISIBILITY GeExecutor { /// @param [out] ModelData &model_data: Offline model memory data /// @return SUCCESS handle successfully / others handle failed /// - ge::Status LoadDataFromFile(const std::string &path, ge::ModelData &model_data); + Status LoadDataFromFile(const std::string &path, ModelData &model_data); /// /// @ingroup ge @@ -195,8 +210,8 @@ class GE_FUNC_VISIBILITY GeExecutor { /// @param [out] uint32_t &model_id: Corresponding identification after model loading /// @return SUCCESS handle successfully / others handle failed /// - ge::Status LoadModelFromData(uint32_t &model_id, const ge::ModelData &model_data, void *dev_ptr, size_t mem_size, - void *weight_ptr, size_t weight_size); + Status LoadModelFromData(uint32_t &model_id, const ModelData &model_data, void *dev_ptr, size_t mem_size, + void *weight_ptr, size_t weight_size); /// /// @ingroup ge @@ -207,9 +222,8 @@ class GE_FUNC_VISIBILITY GeExecutor { /// @param [in] output_queue_ids: input queue ids create from user. /// @return: 0 for success / others for fail /// - ge::Status LoadModelWithQ(uint32_t &model_id, const ge::ModelData &model_data, - const std::vector &input_queue_ids, - const std::vector &output_queue_ids); + Status LoadModelWithQ(uint32_t &model_id, const ModelData &model_data, const std::vector &input_queue_ids, + const std::vector &output_queue_ids); /// /// @ingroup ge @@ -221,8 +235,8 @@ class GE_FUNC_VISIBILITY GeExecutor { /// @param [out] domi::OutputData *output_data: Model output data /// @return SUCCESS handle successfully / others handle failed /// - ge::Status ExecModel(uint32_t model_id, void *stream, const ge::RunModelData &input_data, - ge::RunModelData &output_data, bool async_mode = false); + Status ExecModel(uint32_t model_id, void *stream, const RunModelData &input_data, RunModelData &output_data, + bool async_mode = false); /// /// @ingroup ge @@ -236,9 +250,9 @@ class GE_FUNC_VISIBILITY GeExecutor { /// @param [out] std::vector &output_desc: description of model output data /// @return SUCCESS handle successfully / others handle failed /// - ge::Status ExecModel(uint32_t model_id, void *stream, const ge::RunModelData &run_input_data, - const std::vector &input_desc, ge::RunModelData &run_output_data, - std::vector &output_desc, bool async_mode = false); + Status ExecModel(uint32_t model_id, void *stream, const RunModelData &run_input_data, + const std::vector &input_desc, RunModelData &run_output_data, + std::vector &output_desc, bool async_mode = false); /// /// @ingroup ge @@ -248,7 +262,7 @@ class GE_FUNC_VISIBILITY GeExecutor { /// @param [out] size_t &weight_size Weight memory space size /// @return SUCCESS handle successfully / others handle failed /// - ge::Status GetMemAndWeightSize(const std::string &path, size_t &mem_size, size_t &weight_size); + Status GetMemAndWeightSize(const std::string &path, size_t &mem_size, size_t &weight_size); /// /// @ingroup ge @@ -259,39 +273,39 @@ class GE_FUNC_VISIBILITY GeExecutor { /// @param [out] size_t &weight_size Weight memory space size /// @return SUCCESS handle successfully / others handle failed /// - ge::Status GetMemAndWeightSize(const void *model_data, size_t model_size, size_t &mem_size, size_t &weight_size); + Status GetMemAndWeightSize(const void *model_data, size_t model_size, size_t &mem_size, size_t &weight_size); - static ge::Status LoadSingleOp(const std::string &modelName, const ge::ModelData &modelData, void *stream, - SingleOp **single_op); + static Status LoadSingleOp(const std::string &modelName, const ModelData &modelData, void *stream, + SingleOp **single_op); - static ge::Status LoadSingleOpV2(const std::string &modelName, const ge::ModelData &modelData, void *stream, - SingleOp **single_op, const uint64_t model_id); + static Status LoadSingleOpV2(const std::string &modelName, const ModelData &modelData, void *stream, + SingleOp **single_op, const uint64_t model_id); - static ge::Status ExecuteAsync(SingleOp *executor, const std::vector &inputs, - std::vector &outputs); + static Status ExecuteAsync(SingleOp *executor, const std::vector &inputs, + std::vector &outputs); - static ge::Status LoadDynamicSingleOp(const std::string &model_name, const ge::ModelData &modelData, void *stream, - DynamicSingleOp **single_op); + static Status LoadDynamicSingleOp(const std::string &model_name, const ModelData &modelData, void *stream, + DynamicSingleOp **single_op); - static ge::Status LoadDynamicSingleOpV2(const std::string &model_name, const ge::ModelData &modelData, void *stream, - DynamicSingleOp **single_op, const uint64_t model_id); + static Status LoadDynamicSingleOpV2(const std::string &model_name, const ModelData &modelData, void *stream, + DynamicSingleOp **single_op, const uint64_t model_id); - static ge::Status ExecuteAsync(DynamicSingleOp *executor, const std::vector &input_desc, - const std::vector &inputs, std::vector &output_desc, - std::vector &outputs); + static Status ExecuteAsync(DynamicSingleOp *executor, const std::vector &input_desc, + const std::vector &inputs, std::vector &output_desc, + std::vector &outputs); - static ge::Status ReleaseSingleOpResource(void *stream); + static Status ReleaseSingleOpResource(void *stream); - static ge::Status GetDeviceIdByModelId(uint32_t model_id, uint32_t &device_id); + static Status GetDeviceIdByModelId(uint32_t model_id, uint32_t &device_id); - ge::Status GetBatchInfoSize(uint32_t model_id, size_t &shape_count); - ge::Status GetOrigInputInfo(uint32_t model_id, uint32_t index, OriginInputInfo &orig_input_info); - ge::Status GetAllAippInputOutputDims(uint32_t model_id, uint32_t index, std::vector &input_dims, - std::vector &output_dims); - ge::Status GetOpDescInfo(uint32_t device_id, uint32_t stream_id, uint32_t task_id, OpDescInfo &op_desc_info); + Status GetBatchInfoSize(uint32_t model_id, size_t &shape_count); + Status GetOrigInputInfo(uint32_t model_id, uint32_t index, OriginInputInfo &orig_input_info); + Status GetAllAippInputOutputDims(uint32_t model_id, uint32_t index, std::vector &input_dims, + std::vector &output_dims); + Status GetOpDescInfo(uint32_t device_id, uint32_t stream_id, uint32_t task_id, OpDescInfo &op_desc_info); private: - static bool isInit_; + static std::atomic_bool is_inited_; }; } // namespace ge diff --git a/inc/framework/ge_runtime/task_info.h b/inc/framework/ge_runtime/task_info.h index 4530bff7..abc4783d 100644 --- a/inc/framework/ge_runtime/task_info.h +++ b/inc/framework/ge_runtime/task_info.h @@ -50,10 +50,18 @@ enum TaskInfoType { class TaskInfo { public: virtual ~TaskInfo() {} - uint32_t stream_id() const { return stream_id_; } - TaskInfoType type() const { return type_; } - std::string op_name() const { return op_name_; } - bool dump_flag() const { return dump_flag_; } + uint32_t stream_id() const { + return stream_id_; + } + TaskInfoType type() const { + return type_; + } + std::string op_name() const { + return op_name_; + } + bool dump_flag() const { + return dump_flag_; + } protected: TaskInfo(const std::string &op_name, uint32_t stream_id, TaskInfoType type, bool dump_flag) @@ -84,15 +92,33 @@ class CceTaskInfo : public TaskInfo { is_flowtable_(is_flowtable) {} ~CceTaskInfo() override {} - cce::ccOpContext cc_context() const { return ctx_; } - std::string stub_func() const { return stub_func_; } - uint32_t block_dim() const { return block_dim_; } - const std::vector &args() const { return args_; } - uint32_t args_size() const { return args_size_; } - const std::vector &sm_desc() const { return sm_desc_; } - const std::vector &flow_table() const { return flow_table_; } - const std::vector &args_offset() const { return args_offset_; } - bool is_flowtable() const { return is_flowtable_; } + cce::ccOpContext cc_context() const { + return ctx_; + } + std::string stub_func() const { + return stub_func_; + } + uint32_t block_dim() const { + return block_dim_; + } + const std::vector &args() const { + return args_; + } + uint32_t args_size() const { + return args_size_; + } + const std::vector &sm_desc() const { + return sm_desc_; + } + const std::vector &flow_table() const { + return flow_table_; + } + const std::vector &args_offset() const { + return args_offset_; + } + bool is_flowtable() const { + return is_flowtable_; + } private: cce::ccOpContext ctx_; @@ -126,17 +152,39 @@ class TbeTaskInfo : public TaskInfo { workspace_addrs_(workspace_addrs) {} ~TbeTaskInfo() override {} - const std::string &stub_func() const { return stub_func_; } - uint32_t block_dim() const { return block_dim_; } - const std::vector &args() const { return args_; } - uint32_t args_size() const { return args_size_; } - const std::vector &sm_desc() const { return sm_desc_; } - void *binary() const { return binary_; } - uint32_t binary_size() const { return binary_size_; } - const std::vector &meta_data() const { return meta_data_; } - const std::vector &input_data_addrs() const { return input_data_addrs_; } - const std::vector &output_data_addrs() const { return output_data_addrs_; } - const std::vector &workspace_addrs() const { return workspace_addrs_; } + const std::string &stub_func() const { + return stub_func_; + } + uint32_t block_dim() const { + return block_dim_; + } + const std::vector &args() const { + return args_; + } + uint32_t args_size() const { + return args_size_; + } + const std::vector &sm_desc() const { + return sm_desc_; + } + void *binary() const { + return binary_; + } + uint32_t binary_size() const { + return binary_size_; + } + const std::vector &meta_data() const { + return meta_data_; + } + const std::vector &input_data_addrs() const { + return input_data_addrs_; + } + const std::vector &output_data_addrs() const { + return output_data_addrs_; + } + const std::vector &workspace_addrs() const { + return workspace_addrs_; + } void SetBinary(void *binary, uint32_t binary_size) { binary_ = binary; @@ -171,12 +219,24 @@ class AicpuTaskInfo : public TaskInfo { output_data_addrs_(output_data_addrs) {} ~AicpuTaskInfo() override {} - const std::string &so_name() const { return so_name_; } - const std::string &kernel_name() const { return kernel_name_; } - const std::string &node_def() const { return node_def_; } - const std::vector &input_data_addrs() const { return input_data_addrs_; } - const std::vector &output_data_addrs() const { return output_data_addrs_; } - const std::string &ext_info() const { return ext_info_; } + const std::string &so_name() const { + return so_name_; + } + const std::string &kernel_name() const { + return kernel_name_; + } + const std::string &node_def() const { + return node_def_; + } + const std::vector &input_data_addrs() const { + return input_data_addrs_; + } + const std::vector &output_data_addrs() const { + return output_data_addrs_; + } + const std::string &ext_info() const { + return ext_info_; + } private: std::string so_name_; @@ -192,7 +252,9 @@ class LabelSetTaskInfo : public TaskInfo { LabelSetTaskInfo(const std::string &op_name, uint32_t stream_id, uint32_t label_id) : TaskInfo(op_name, stream_id, TaskInfoType::LABEL_SET, false), label_id_(label_id) {} ~LabelSetTaskInfo() override {} - uint32_t label_id() const { return label_id_; } + uint32_t label_id() const { + return label_id_; + } private: uint32_t label_id_; @@ -203,7 +265,9 @@ class LabelGotoTaskInfo : public TaskInfo { LabelGotoTaskInfo(const std::string &op_name, uint32_t stream_id, uint32_t label_id) : TaskInfo(op_name, stream_id, TaskInfoType::LABEL_GOTO, false), label_id_(label_id) {} ~LabelGotoTaskInfo() override {} - uint32_t label_id() const { return label_id_; } + uint32_t label_id() const { + return label_id_; + } private: uint32_t label_id_; @@ -218,9 +282,15 @@ class LabelSwitchTaskInfo : public TaskInfo { label_list_(label_list), cond_(cond) {} ~LabelSwitchTaskInfo() override {} - uint32_t label_size() const { return label_size_; } - const std::vector &label_list() const { return label_list_; } - void *cond() const { return cond_; } + uint32_t label_size() const { + return label_size_; + } + const std::vector &label_list() const { + return label_list_; + } + void *cond() const { + return cond_; + } private: uint32_t label_size_; @@ -230,7 +300,9 @@ class LabelSwitchTaskInfo : public TaskInfo { class EventTaskInfo : public TaskInfo { public: - uint32_t event_id() const { return event_id_; } + uint32_t event_id() const { + return event_id_; + } protected: EventTaskInfo(const std::string &op_name, uint32_t stream_id, TaskInfoType type, uint32_t event_id) @@ -271,14 +343,13 @@ class FusionEndTaskInfo : public TaskInfo { class HcclTaskInfo : public TaskInfo { public: HcclTaskInfo(const std::string &op_name, uint32_t stream_id, const std::string hccl_type, void *input_data_addr, - void *output_data_addr, void *workspace_addr, int64_t workspace_size, int64_t hccl_stream_num, + void *output_data_addr, int64_t workspace_size, int64_t hccl_stream_num, const std::vector &private_def, void *ops_kernel_store, int32_t count, int64_t root_id, int64_t op_type, int64_t data_type, const std::string &group, bool dump_flag) : TaskInfo(op_name, stream_id, TaskInfoType::HCCL, dump_flag), hccl_type_(hccl_type), input_data_addr_(input_data_addr), output_data_addr_(output_data_addr), - workspace_addr_(workspace_addr), workspace_size_(workspace_size), hccl_stream_num_(hccl_stream_num), private_def_(private_def), @@ -290,25 +361,47 @@ class HcclTaskInfo : public TaskInfo { group_(group) {} ~HcclTaskInfo() override {} - const std::string &hccl_type() const { return hccl_type_; } - void *input_data_addr() const { return input_data_addr_; } - void *output_data_addr() const { return output_data_addr_; } - void *workspace_addr() const { return workspace_addr_; } - int64_t workspace_size() const { return workspace_size_; } - int64_t hccl_stream_num() const { return hccl_stream_num_; } - const std::vector &private_def() const { return private_def_; } - void *ops_kernel_store() const { return ops_kernel_store_; } - int32_t count() const { return count_; } - int64_t root_id() const { return root_id_; } - int64_t op_type() const { return op_type_; } - int64_t data_type() const { return data_type_; } - const std::string &group() const { return group_; } + const std::string &hccl_type() const { + return hccl_type_; + } + void *input_data_addr() const { + return input_data_addr_; + } + void *output_data_addr() const { + return output_data_addr_; + } + int64_t workspace_size() const { + return workspace_size_; + } + int64_t hccl_stream_num() const { + return hccl_stream_num_; + } + const std::vector &private_def() const { + return private_def_; + } + void *ops_kernel_store() const { + return ops_kernel_store_; + } + int32_t count() const { + return count_; + } + int64_t root_id() const { + return root_id_; + } + int64_t op_type() const { + return op_type_; + } + int64_t data_type() const { + return data_type_; + } + const std::string &group() const { + return group_; + } private: std::string hccl_type_; void *input_data_addr_; void *output_data_addr_; - void *workspace_addr_; int64_t workspace_size_; int64_t hccl_stream_num_; std::vector private_def_; @@ -329,9 +422,15 @@ class ProfilerTraceTaskInfo : public TaskInfo { flat_(flat) {} ~ProfilerTraceTaskInfo() override {} - uint64_t log_id() const { return log_id_; } - bool notify() const { return notify_; } - uint32_t flat() const { return flat_; } + uint64_t log_id() const { + return log_id_; + } + bool notify() const { + return notify_; + } + uint32_t flat() const { + return flat_; + } private: uint64_t log_id_; @@ -351,11 +450,21 @@ class MemcpyAsyncTaskInfo : public TaskInfo { kind_(kind) {} ~MemcpyAsyncTaskInfo() override {} - void *dst() const { return dst_; } - uint64_t dst_max() const { return dst_max_; } - void *src() const { return src_; } - uint64_t count() const { return count_; } - uint32_t kind() const { return kind_; } + void *dst() const { + return dst_; + } + uint64_t dst_max() const { + return dst_max_; + } + void *src() const { + return src_; + } + uint64_t count() const { + return count_; + } + uint32_t kind() const { + return kind_; + } private: void *dst_; @@ -377,11 +486,21 @@ class StreamSwitchTaskInfo : public TaskInfo { data_type_(data_type) {} ~StreamSwitchTaskInfo() override {} - int64_t true_stream_id() const { return true_stream_id_; } - void *input_addr() const { return input_addr_; } - void *value_addr() const { return value_addr_; } - int64_t cond() const { return cond_; } - int64_t data_type() const { return data_type_; } + int64_t true_stream_id() const { + return true_stream_id_; + } + void *input_addr() const { + return input_addr_; + } + void *value_addr() const { + return value_addr_; + } + int64_t cond() const { + return cond_; + } + int64_t data_type() const { + return data_type_; + } private: int64_t true_stream_id_; @@ -397,7 +516,9 @@ class StreamActiveTaskInfo : public TaskInfo { : TaskInfo(op_name, stream_id, TaskInfoType::STREAM_ACTIVE, false), active_stream_id_(active_stream_id) {} ~StreamActiveTaskInfo() override {} - uint32_t active_stream_id() const { return active_stream_id_; } + uint32_t active_stream_id() const { + return active_stream_id_; + } private: uint32_t active_stream_id_; diff --git a/third_party/fwkacllib/inc/ops/array_ops.h b/third_party/fwkacllib/inc/ops/array_ops.h index fd35b546..450c893e 100644 --- a/third_party/fwkacllib/inc/ops/array_ops.h +++ b/third_party/fwkacllib/inc/ops/array_ops.h @@ -35,7 +35,7 @@ namespace ge { * @li values:A `Tensor`. Must have the same type as `sorted_x`. \n *@par Attributes: -*@li out_type:An optional `DType` from: `int32, int64`. +*out_type:An optional `DType` from: `int32, int64`. Defaults to `int32`. \n *@par Outputs: @@ -504,7 +504,7 @@ REG_OP(Constant) *x: A tensor. \n *@par Outputs: -*y: A tensor. \n +*y: A copy of input tensor. \n *@par Third-party framework compatibility *Compatible with the TensorFlow operator Snapshot. @@ -684,7 +684,9 @@ REG_OP(ExpandDims) *@par Inputs: *@li x: Original tensor. -*@li axis: List of ints. \n + +*@par Attributes: +*@li axes: List of ints indicating the dimensions to be inserted. \n *@par Outputs: *y: Reshape tensor with same data as input. \n @@ -755,10 +757,10 @@ REG_OP(Squeeze) *@brief Returns an integer representing the rank of input tensor. The rank of a tensor is the number of indices required to uniquely select each element of the tensor, that is, the dimension size of the tensor. \n *@par Inputs: -*x: A tensor. \n +*x: A Tensor of type float32, float16, int8, int16, uint16, uint8, int32, int64, uint32, uint64, bool, double. \n *@par Outputs: -*y: A tensor. The rank of input tensor. \n +*y: A tensor. The rank of input tensor. Type is int32. \n *@par Third-party framework compatibility *Compatible with the TensorFlow operator Rank. @@ -848,7 +850,6 @@ REG_OP(PlaceHolder) *x: A tensor. \n *@par Attributes: -*@li dtype: data type of tensor. *@li shape: tensor shape. \n *@par Outputs: @@ -867,13 +868,13 @@ REG_OP(PlaceholderWithDefault) *@brief Reads and returns the value of the input variable tensor. \n *@par Inputs: -*x: A tensor. \n +*x: A tensor must have numeric type. \n *@par Attributes: *dtype: An optional int32 or int64. The output data type. Defaults to int32. \n *@par Outputs: -*y: A tensor. \n +*y: A tensor must have numeric type. \n *@par Third-party framework compatibility *Compatible with the TensorFlow operator ReadVariableOp. @@ -1134,10 +1135,10 @@ This is an M-length vector. This is an R-length vector *@par Attributes: -*@li normalize: boolean (if true, edit distances are normalized by length of truth). \n +*normalize: boolean (if true, edit distances are normalized by length of truth). \n *@par Outputs: -*@li output: A dense float tensor with rank R - 1. \n +*output: A dense float tensor with rank R - 1. \n *@par Third-party framework compatibility * Compatible with TensorFlow EditDistance operator. @@ -1154,18 +1155,17 @@ REG_OP(EditDistance) .OP_END_FACTORY_REG(EditDistance) /** -* @brief sort_v2. +* @brief sort the input tensor without returning the value of index. * @par Inputs: -* @li x: An ND tensor of type float16. +* x: An ND tensor of type float16. * @par Attributes: - * @li axis: An optional int. The dimension to sort along. This value defaults to -1. * @li descending: An optional bool. Controls the sorting order (ascending or descending). This value defaults to False. * @par Outputs: -* @li y: An ND tensor of type float16. +* y: An ND tensor of type float16. * @attention Constraints: * @li Axis should select the last dim. @@ -1206,7 +1206,7 @@ REG_OP(Expand) *@Returns a tensor containing the indices of all non-zero elements of input. \n *@par Inputs: -*@li x: A Tensor. Must be one of the following types: float16, float32, int32, int64. +*x: A Tensor. Must be one of the following types: float16, float32, int32, int64. *@par Attributes: * transpose: the output tensor will be transposed if true. \n @@ -1230,15 +1230,15 @@ REG_OP(NonZero) * @par Inputs: * One inputs, including: -* @li x: A Tensor. Must be one of the following types: +* x: A Tensor. Must be one of the following types: * float16, float32, int32, int8 ,uint8. \n * @par Attributes: -* @li shape: A required listInt to specify the shape that the input tensor expanded to. \n +* shape: A required listInt to specify the shape that the input tensor expanded to. \n * @par Outputs: -* @li y: A Tensor. Has the same type as "x", and the shape specified by input and attr shape \n +* y: A Tensor. Has the same type as "x", and the shape specified by input and attr shape \n * @par Third-party framework compatibility * Compatible with the ONNX operator Expand. @@ -1249,6 +1249,38 @@ REG_OP(ExpandD) .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8})) .REQUIRED_ATTR(shape, ListInt) .OP_END_FACTORY_REG(ExpandD) + +/** +*@brief Finds unique elements in a 1D tensor. \n + +*@par Inputs: +*x: 1D tensor. Must be one of the following types: +* float16, float32, double, int64, int32, int16, uint16, int8 ,uint8. \n + +*@par Attributes: +*@li return_inverse: Whether to also return the indices for where elements in the original +* input ended up in the returned unique list. +*@li return_inverse: Whether to also return the counts for each unique element. + +*@par Outputs: +*@li y1: The output list of unique scalar elements. Has the same type as "x". +*@li y2: Representing the indices for where elements in the original input map to in the output. +*@li y3: Representing the number of occurrences for each unique value or tensor. \n + +* @par Third-party framework compatibility +* Compatible with the troch operator _unique2. +*/ + +REG_OP(UniqueWithCountsAndSorting) + .INPUT(x, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \ + DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE })) + .OUTPUT(y1, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \ + DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE })) + .OUTPUT(y2, TensorType({ DT_INT32, DT_INT64 })) + .OUTPUT(y3, TensorType({ DT_INT32, DT_INT64 })) + .ATTR(return_inverse, Bool, false) + .ATTR(return_counts, Bool, false) + .OP_END_FACTORY_REG(UniqueWithCountsAndSorting) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_ARRAY_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/control_flow_ops.h b/third_party/fwkacllib/inc/ops/control_flow_ops.h index e5bd3534..cd993599 100644 --- a/third_party/fwkacllib/inc/ops/control_flow_ops.h +++ b/third_party/fwkacllib/inc/ops/control_flow_ops.h @@ -96,7 +96,7 @@ REG_OP(RefMerge) * Otherwise, the data is forwarded to "output_false" . \n *@par Inputs: - *@li data: The tensor to be forwarded. \n + *@li data: The tensor to be forwarded. * Must be one of the following types: float16, float32, float64, * int8, int16, int32, int64, uint8, uint16, uint32, uint64, bool. *@li pred: A boolean scalar. The output port that will receive data . \n diff --git a/third_party/fwkacllib/inc/ops/ctc_ops.h b/third_party/fwkacllib/inc/ops/ctc_ops.h index e907b828..bbc610ff 100644 --- a/third_party/fwkacllib/inc/ops/ctc_ops.h +++ b/third_party/fwkacllib/inc/ops/ctc_ops.h @@ -74,7 +74,7 @@ REG_OP(CTCLoss) *@li sequence_length: A vector containing sequence lengths, size `(batch_size)`. \n *@par Attributes: -*@li merge_repeated: If True, merge repeated classes in output. \n +* merge_repeated: If True, merge repeated classes in output. \n *@par Outputs: *@li decoded_indices: Indices matrix, size `(total_decoded_outputs x 2)`, @@ -108,6 +108,8 @@ REG_OP(CTCGreedyDecoder) *@par Attributes: *@li merge_repeated: If True, merge repeated classes in output. \n +*@li beam_width:A scalar >= 0 (beam search beam width). +*@li top_paths:A scalar >= 0, <= beam_width (controls output size). *@par Outputs: *@li decoded_indices: A list (length: top_paths) of indices matrices. Matrix j, @@ -162,7 +164,7 @@ REG_OP(CTCBeamSearchDecoder) * Compatible with Pytorch CTCLoss operator. *@par Restrictions: -*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*The length of Label should in [4, 1000]. */ REG_OP(CTCLossV2) .INPUT(log_probs, TensorType({DT_FLOAT, DT_DOUBLE})) @@ -203,7 +205,7 @@ REG_OP(CTCLossV2) * Compatible with Pytorch CTCLoss operator. *@par Restrictions: -*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*The limit of Label’s length is 1K. */ REG_OP(CTCLossV2Grad) .INPUT(grad_out, TensorType({DT_FLOAT, DT_DOUBLE})) diff --git a/third_party/fwkacllib/inc/ops/data_flow_ops.h b/third_party/fwkacllib/inc/ops/data_flow_ops.h index 6021f4e3..32454d27 100644 --- a/third_party/fwkacllib/inc/ops/data_flow_ops.h +++ b/third_party/fwkacllib/inc/ops/data_flow_ops.h @@ -1201,6 +1201,8 @@ REG_OP(TensorArraySize) *@brief A queue implementation that dequeues elements in a random order. \n *@par Attributes: +*@li component_types:A list of fully-defined Tensortype objects with +the same length as shapes, or None. *@li shapes: (Optional.) A list of fully-defined TensorShape objects with the same length as dtypes, or None. *@li capacity: An integer. The upper bound on the number of elements that may @@ -1281,6 +1283,7 @@ The length of this attr must be either 0 or the same as the length of elements are not constrained, and only one element may be dequeued at a time. *@li container: An optional string. Defaults to "". If non-empty, this queue is placed in the given container. Otherwise, a default container is used. +*@li capacity:An integer. The upper bound on the number of elements that may be stored in this queue. *@li shared_name: An optional string. Defaults to "". If non-empty, this queue will be shared under the given name across multiple sessions. \n @@ -1435,7 +1438,7 @@ REG_OP(OrderedMapClear) *@par Inputs: *Including: -* @li resource: A Tensor of type DT_RESOURCE. +* resource: A Tensor of type DT_RESOURCE. *@par Outputs: *handle: A Tensor of type DT_STRING ref. \n @@ -1526,7 +1529,7 @@ REG_OP(OrderedMapPeek) *@par Inputs: *Including: -* @li indices: A Tensor of type DT_INT32. \n +* indices: A Tensor of type DT_INT32. \n *@par Attributes: *@li capacity: An optional int that is >= 0. Defaults to "0". @@ -2331,6 +2334,40 @@ REG_OP(CacheAllIndexToLocal) .REQUIRED_ATTR(dtype, Type) .OP_END_FACTORY_REG(CacheAllIndexToLocal) +/** +*@brief LRUCacheV2, aicore LRUCache. +*@par Inputs: +*index_list: exchange index list +*data: host data +*cache: gm cache +*tag: cache's tag +*is_last_call: if is last call write all cache to data +*@par Outputs: +*data: output data +*cache: gm cache +*tag: cache's tag +*index_offset_list: index_offset_list +*not_in_cache_index_list: output not in cache's index_list +*not_in_cache_number: scalar +*@par Attributes: +*pre_route_count: types of all outputs +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(LRUCacheV2) + .INPUT(index_list, TensorType::BasicType()) + .INPUT(data, TensorType::BasicType()) + .INPUT(cache, TensorType::BasicType()) + .INPUT(tag, TensorType::BasicType()) + .INPUT(is_last_call, TensorType::BasicType()) + .OUTPUT(data, TensorType::BasicType()) + .OUTPUT(cache, TensorType::BasicType()) + .OUTPUT(tag, TensorType::BasicType()) + .OUTPUT(index_offset_list, TensorType::BasicType()) + .OUTPUT(not_in_cache_index_list, TensorType::BasicType()) + .OUTPUT(not_in_cache_number, TensorType::BasicType()) + .REQUIRED_ATTR(pre_route_count, Int) + .OP_END_FACTORY_REG(LRUCacheV2) + /** *@brief DynamicGetNext, dynamic get next data *@par Inputs: diff --git a/third_party/fwkacllib/inc/ops/elewise_calculation_ops.h b/third_party/fwkacllib/inc/ops/elewise_calculation_ops.h index f61e2939..b4299026 100644 --- a/third_party/fwkacllib/inc/ops/elewise_calculation_ops.h +++ b/third_party/fwkacllib/inc/ops/elewise_calculation_ops.h @@ -624,9 +624,9 @@ REG_OP(Log1p) *@attention Constraints: *@li x2: The input data does not support 0 -*@li When NUM exceeds 2048 , the accuracy of operator cannot guarantee the +*@li When NUM exceeds 2048 , the accuracy of operator cannot guarantee the *requirement of double thousandths in the mini form -*@li Due to different architectures, the calculation results of this operator +*@li Due to different architectures, the calculation results of this operator *on NPU and CPU may be inconsistent *@li If shape is expressed as (D1,D2... ,Dn), then D1*D2... *DN<=1000000,n<=8 @@ -2066,9 +2066,9 @@ REG_OP(FloorDiv) *@attention Constraints: *@li x2: The input data does not support 0 -*@li When NUM exceeds 2048 , the accuracy of operator cannot guarantee the +*@li When NUM exceeds 2048 , the accuracy of operator cannot guarantee the *requirement of double thousandths in the mini form -*@li Due to different architectures, the calculation results of this operator +*@li Due to different architectures, the calculation results of this operator *on NPU and CPU may be inconsistent *@li If shape is expressed as (D1,D2... ,Dn), then D1*D2... *DN<=1000000,n<=8 @@ -2200,9 +2200,9 @@ REG_OP(Tan) *@attention Constraints: *@li x2: The input data does not support 0 -*@li When NUM exceeds 2048 , the accuracy of operator cannot guarantee the +*@li When NUM exceeds 2048 , the accuracy of operator cannot guarantee the *requirement of double thousandths in the mini form -*@li Due to different architectures, the calculation results of this operator +*@li Due to different architectures, the calculation results of this operator *on NPU and CPU may be inconsistent *@li If shape is expressed as (D1,D2... ,Dn), then D1*D2... *DN<=1000000,n<=8 @@ -2467,11 +2467,11 @@ REG_OP(Eltwise) *@par Inputs: *One inputs, including: - * @li input_x: A tensor. Must be one of the following types: + * input_x: A tensor. Must be one of the following types: * float16, float32. \n *@par Outputs: - *y: A Tensor with the same type and shape of input_x's. \n + *output_y: A Tensor with the same type and shape of input_x's. \n *@par Third-party framework compatibility *Compatible with the Pytorch operator Erfinv. \n @@ -3154,13 +3154,13 @@ REG_OP(FusedMulAddNL2loss) *@brief Tests whether the input exceeds a threshold. \n *@par Inputs: -*@li x: A Tensor with any format. Must be one of the following types: float16, float32. \n +* x: A Tensor with any format. Must be one of the following types: float16, float32. \n *@par Attributes: -*@li threshold: A required float32. Defaults to "0.0". "x" is compared with "threshold", outputs "1" for inputs above threshold; "0" otherwise. \n +* threshold: A required float32. Defaults to "0.0". "x" is compared with "threshold", outputs "1" for inputs above threshold; "0" otherwise. \n *@par Outputs: -*@li y: A Tensor with any format. Has the same type as the input. Must be one of the following types: float16, float32. +* y: A Tensor with any format. Has the same type as the input. Must be one of the following types: float16, float32. *@par Third-party framework compatibility * Compatible with the Caffe operator Threshold. */ @@ -3175,7 +3175,7 @@ REG_OP(FusedMulAddNL2loss) *@brief Returns the index number corresponding to the maximum value entered. \n *@par Inputs: -*@li x: A tensor. Must be one of the following types: float16, float32. \n +*x: A tensor. Must be one of the following types: float16, float32. \n *@par Attributes: *@li axis: An optional int. Specify the axis to be cut at the input tensor. If this parameter is not provided, find the topk for each batch. Defaults to 10000 @@ -3203,12 +3203,11 @@ REG_OP(ArgMaxWithK) *@brief Multiply tensor with scale. \n *@par Inputs: -*Five inputs, including: -* @li x1: A Tensor. Must be one of the following types:int32,int16, float16, float32. -* @li x2: A scale. Must be float. \n +*One input, including: +*x: A Tensor. Must be one of the following types:int32,int16, float16, float32. *@par Outputs: -*@li y: A Tensor. Has the same type and shape as "x1". \n +*y: A Tensor. Has the same type and shape as "x1". \n *@par Third-party framework compatibility: * Compatible with the Pytorch operator muls. @@ -3223,12 +3222,11 @@ REG_OP(Muls) *@brief Fill tensor with scale. \n *@par Inputs: -*Five inputs, including: -* @li x1: A Tensor. Must be one of the following types:int32,int16, float16, float32. -* @li x2: A scale. Must be float. \n +*One input, including: +*x1: A Tensor. Must be one of the following types:int32,int16, float16, float32. *@par Outputs: -*@li y: A Tensor. Has the same type and shape as "x1". \n +*y: A Tensor. Has the same type and shape as "x1". \n *@par Third-party framework compatibility: * Compatible with the Pytorch operator fills. @@ -3378,7 +3376,7 @@ REG_OP(TensorMove) *@par Inputs: *One inputs, including: -* @li x: A Tensor. Must be one of the following types: float16, float32, int8, uint8, int16, uint16, int32, uint32, int64, uint64. \n +*x: A Tensor. Must be one of the following types: float16, float32, int8, uint8, int16, uint16, int32, uint32, int64, uint64. \n *@par Outputs: *output_x: A Tensor. Has the same type as "x". \n @@ -3397,7 +3395,7 @@ REG_OP(TensorRedirect) * multiply the result by the scalar value and add it to tensor x1 * @par Inputs: -* Three inputs, including: +* Four inputs, including: * @li input_data: A mutable input Tensor. Must be one of the following types: * float16, float32. * @li x1: A mutable input Tensor of the same type as x1. @@ -3406,7 +3404,7 @@ REG_OP(TensorRedirect) * float16, float32, int32. \n * @par Outputs: -* @li y: A mutable Tensor. Has the same type as "x1". \n +* y: A mutable Tensor. Has the same type as "x1". \n * @par Third-party framework compatibility * Compatible with the Pytorch operator Addcdiv. @@ -3420,12 +3418,12 @@ REG_OP(Addcdiv) .OP_END_FACTORY_REG(Addcdiv) /** -* @brief Performs the element-wise multiplication of tensor x2 by tensor x3, -* multiply the result by the scalar value and add it to tensor input_data +* @brief Performs the element-wise multiplication of tensor x2 by tensor x3, +* multiply the result by the scalar value and add it to tensor input_data * @par Inputs: -* Three inputs, including: +* Four inputs, including: * @li input_data: A mutable input Tensor. Must be one of the following types: * float16, float32, int8, int32, uint8. * @li x1: A mutable input Tensor of the same type as x1. @@ -3433,7 +3431,7 @@ REG_OP(Addcdiv) * @li value: A tensor which includes only one element of the same type as x1. \n * @par Outputs: -* @li y: A mutable output Tensor. Has the same type as "x1". \n +* y: A mutable output Tensor. Has the same type as "x1". \n * @par Third-party framework compatibility * Compatible with the Pytorch operator Addcmul. @@ -3455,7 +3453,7 @@ REG_OP(Addcmul) * @li alpha: A scalar tensor of type float16, float32. \n * @par Outputs: -* @li y: An ND tensor tensor with the same shape and type as "x1". \n +* y: An ND tensor tensor with the same shape and type as "x1". \n * @par Third-party framework compatibility * Compatible with the Pytorch operator Axpy. @@ -3467,25 +3465,6 @@ REG_OP(AxpyV2) .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) .OP_END_FACTORY_REG(AxpyV2) -/** -* @brief Computes the result of x1 - x2. - -* @par Inputs: -* @li x1: An ND tensor of type float16, float, int32. -* @li x2: An ND tensor of type float16, float, int32. \n - -* @par Outputs: -* @li y: An ND tensor tensor with the same type as "x1". \n - -* @par Third-party framework compatibility -* Compatible with the Pytorch operator Sub. -*/ -REG_OP(PtSub) - .INPUT(x1, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) - .INPUT(x2, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) - .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32})) - .OP_END_FACTORY_REG(PtSub) - /** * @brief Add the partial values of two tensors in format NC1HWC0. @@ -3502,7 +3481,7 @@ REG_OP(PtSub) * the difference between C1 and offset in "x1" and "x2". \n * @par Outputs: -* @li y: A Tensor of the same type as "x1", and the same shape as "x1", +* y: A Tensor of the same type as "x1", and the same shape as "x1", * except for the C1 value. Record the result after adding. \n */ REG_OP(StrideAdd) @@ -3523,7 +3502,7 @@ REG_OP(StrideAdd) * @li input_y: A Tensor. the second tensor. \n * @par Outputs: -* @li output_z: A Tensor. Bool type, compare result of the two inputs. \n +*output_z: A Tensor. Bool type, compare result of the two inputs. \n * @par Third-party framework compatibility * Compatible with the Pytorch equal operator. \n @@ -3535,21 +3514,21 @@ REG_OP(TensorEqual) .OP_END_FACTORY_REG(TensorEqual) /** - * @brief Element-wise min of each of the input tensors (with Numpy-style broadcasting support). - * All inputs and outputs must have the same data type. This operator supports multidirectional + * @brief Element-wise min of each of the input tensors (with Numpy-style broadcasting support). + * All inputs and outputs must have the same data type. This operator supports multidirectional * (i.e., Numpy-style) broadcasting - * - * @par inputs + * + * @par Inputs: * one input including: - * @li x: dynamic input A Tensor. Must be one of the following types: float32, float16, double, int32, int64 - * - * @par output + * x: dynamic input A Tensor. Must be one of the following types: float32, float16, double, int32, int64 + * + * @par Outputs: * one output including: - * @li y:A Tensor of the same type as x - * + * y:A Tensor of the same type as x + * */ REG_OP(MaxN) - .DYNAMIC_INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_FLOAT64, DT_INT32, DT_INT64})) + .DYNAMIC_INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_FLOAT64, DT_INT32, DT_INT64})) .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_FLOAT64, DT_INT32, DT_INT64})) .OP_END_FACTORY_REG(MaxN) @@ -3634,16 +3613,16 @@ REG_OP(DataCompare) *which Hardmax will be performed.The output tensor has the same shape and contains the Hardmax values of the *corresponding input. * -*@par inputs +*@par Inputs: *one input including: -*@li x: input A Tensor.Must be one of the following types:float32,float16 +*x: input A Tensor.Must be one of the following types:float32,float16 * *@par Attributes: -*@li axis:A required int attribute that decides which dimension will be used to cal the hard_max +*axis:A required int attribute that decides which dimension will be used to cal the hard_max * -*@par output: +*@par Outputs: *one output including: -*@li y:A Tensor of the same type as x +*y:A Tensor of the same type as x * */ REG_OP(HardMax) @@ -3661,7 +3640,7 @@ REG_OP(HardMax) * @li input_y: A Tensor. the second tensor must be 1d. \n * @par Outputs: -* @li output: A Tensor. Result of the two inputs, must be 1d. \n +* output: A Tensor. Result of the two inputs, must be 1d. \n * @par Third-party framework compatibility * Compatible with the Pytorch dot operator. \n @@ -3671,7 +3650,7 @@ REG_OP(Dot) .INPUT(input_y, TensorType({DT_FLOAT, DT_FLOAT16, DT_UINT8, DT_INT8, DT_INT32})) .OUTPUT(output, TensorType({DT_FLOAT, DT_FLOAT16, DT_UINT8, DT_INT8, DT_INT32})) .OP_END_FACTORY_REG(Dot) - + /** *@brief Returns a new tensor with boolean elements representing \n *if each element of input is “close” to the corresponding element of other \n @@ -3719,7 +3698,7 @@ REG_OP(IsClose) * *@attention Constraints: *@li indices: only support int32,and shape same to "updates" -*@li The value range of "dimension" is [-dims, dims - 1]. "dims" is the dimension length of "x". +*@li The value range of "dimension" is [-dims, dims - 1]. "dims" is the dimension length of "x". *@li y:A Tensor, the type and shape is same to "var" \n *@par Third-party framework compatibility @@ -3754,7 +3733,7 @@ REG_OP(ArgMaxGrad) *@attention Constraints: *@li indices: only support int32,and shape same to "updates" -*@li The value range of "dimension" is [-dims, dims - 1]. "dims" is the dimension length of "x". +*@li The value range of "dimension" is [-dims, dims - 1]. "dims" is the dimension length of "x". *@li y:A Tensor, the type and shape is same to "var" \n *@par Third-party framework compatibility @@ -3805,15 +3784,15 @@ REG_OP(AddMatMatElements) *@par Inputs: *Two inputs, including: -* @li input_x1: A tensor. Must be the following types: -* float32. \n +* @li input_x1: A tensor. Must be the following types: float32. +* @li input_x2: A tensor. Must of the following types: float32. \n -*@par Inputs: -*@li input_x2: A tensor. Must of the following types: -* float32. \n +* @par Attributes: +* @li dim:The type is Int and the default value is 1. +* @li eps:The type is Float and the default value is 1e-8. \n *@par Outputs: -*@li output_y: A Tensor with the same type of input_x's. \n +* output_y: A Tensor with the same type of input_x's. \n *@par Third-party framework compatibility *Compatible with the Pytorch operator CosineSimilarity. \n @@ -3826,6 +3805,45 @@ REG_OP(CosineSimilarity) .ATTR(eps, Float, 1e-8) .OP_END_FACTORY_REG(CosineSimilarity) +/** +*@brief count adam result. \n + +*@par Inputs: +*eleven inputs, including: +* @li var: A Tensor. Support float16/float32.\n +* @li m: A Tensor. Datatype and shape are same as exp_avg.\n +* @li v: A Tensor. Datatype and shape are same as exp_avg.\n +* @li lr: A Tensor. Datatype is same as exp_avg. Shape (1, ).\n +* @li beta1: A Tensor. Datatype is same as exp_avg. Shape (1, ).\n +* @li beta2: A Tensor. Datatype is same as exp_avg. Shape (1, ).\n +* @li epsilon: A Tensor. Datatype is same as exp_avg. Shape (1, ).\n +* @li grad: A Tensor. Datatype and shape are same as exp_avg.\n +* @li max_grad_norm: A Tensor. Datatype is same as exp_avg. Shape (1, ).\n +* @li global_grad_norm: A Tensor. Datatype is same as exp_avg. Shape (1, ).\n +* @li weight_decay: A Tensor. Datatype is same as exp_avg. Shape (1, ).\n + +*@par Outputs: +*three inputs, including: +* @li var: A Tensor. Datatype and shape are same as exp_avg.\n +* @li m: A Tensor. Datatype and shape are same as exp_avg.\n +* @li v: A Tensor. Datatype and shape are same as exp_avg.\n +*/ +REG_OP(ApplyAdamV2) + .INPUT(var, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(m, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(v, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(lr, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(beta1, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(beta2, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(epsilon, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(grad, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(max_grad_norm, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(global_grad_norm, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .INPUT(weight_decay, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .OUTPUT(var, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .OUTPUT(m, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .OUTPUT(v, TensorType({ DT_FLOAT, DT_FLOAT16 })) + .OP_END_FACTORY_REG(ApplyAdamV2) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_ELEWISE_CALCULATION_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/functional_ops.h b/third_party/fwkacllib/inc/ops/functional_ops.h index b09ac058..7cfe39c4 100644 --- a/third_party/fwkacllib/inc/ops/functional_ops.h +++ b/third_party/fwkacllib/inc/ops/functional_ops.h @@ -163,9 +163,6 @@ REG_OP(Case) * if it is not a scalar, non-empty means True and empty means False. *@li body: A subgraph takes 'input' and returns a another list of tensors . \n - *@par Attributes: - *parallel_iterations: An optional int, default as 10 . \n - *@par Outputs: *output: The output tensors returned by "body". Has the same type as "input" . \n diff --git a/third_party/fwkacllib/inc/ops/image_ops.h b/third_party/fwkacllib/inc/ops/image_ops.h index 6909345a..2327e76e 100644 --- a/third_party/fwkacllib/inc/ops/image_ops.h +++ b/third_party/fwkacllib/inc/ops/image_ops.h @@ -28,7 +28,7 @@ namespace ge { *@brief Decode the frame(s) of a GIF-encoded image to a uint8 tensor . \n *@par Inputs: -*@li contents:A Tensor of type string. 0-D. The GIF-encoded image. \n +*contents:A Tensor of type string. 0-D. The GIF-encoded image. \n *@par Outputs: *image:A Tensor of type uint8. \n @@ -128,8 +128,8 @@ crops from the input image tensor and resizes them using bilinear sampling or nearest neighbor sampling to a common output size specified by crop_size . \n *@par Inputs: -*Input images must be a 4-D tensor. Inputs include: -*@li images:A Tensor. Must be one of the following types:uint8, uint16, int8, +*Input x must be a 4-D tensor. Inputs include: +*@li x:A Tensor. Must be one of the following types:uint8, uint16, int8, int16, int32, int64, float16, float, double. A 4-D tensor of shape [batch, image_height, image_width, depth]. The format must be NHWC. *@li boxes: A Tensor of type float. A 2-D tensor of shape [num_boxes, 4]. @@ -266,8 +266,9 @@ depth] containing the original image size. Both image_height and image_width need to be positive . \n *@par Attributes: -method: A string specifying the interpolation method. Only 'bilinear' is -supported for now . \n +*@li method: A string specifying the interpolation method. Only 'bilinear' is +supported for now . +*@li T: output of type \n *@par Outputs: *y:A 4-D tensor of shape [batch, image_height, image_width, depth]. The format @@ -585,9 +586,11 @@ REG_OP(ResizeNearestNeighborV2GradD) channels], The image tensor that was resized . \n *@par Attributes: -*align_corners: An optional bool. Defaults to False. If true, the centers of +*@li align_corners: An optional bool. Defaults to False. If true, the centers of the 4 corner pixels of the input and grad tensors are aligned. Defaults to -false . \n +false . +*@li half_pixel_centers: indicates if the offset coordinates are normalized. Defaults +to false . \n *@par Outputs: *y: A Tensor. Has the same type as original_image . \n @@ -617,9 +620,10 @@ REG_OP(ResizeBilinearV2Grad) size for the images . \n *@par Attributes: -*align_corners: If true, the centers of the 4 corner pixels of the input and +* @li align_corners: If true, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. -Defaults to false . \n +Defaults to false . +* @li half_pixel_centers: An optional bool. Defaults to False . \n *@par Outputs: *y: 4-D with shape [batch, new_height, new_width, channels] . \n @@ -684,6 +688,9 @@ be non-negative. In the case of 0, the cropped area does not need to overlap any of the bounding boxes supplied . *@li aspect_ratio_range: The cropped area of the image must have an aspect ratio = width / height within this range. +*@li area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The +cropped area of the image must contain a fraction of the supplied image +within this range. *@li max_attempts: Number of attempts at generating a cropped region of the image of the specified constraints. After max_attempts failures, return the entire image. @@ -740,6 +747,9 @@ generator is seeded by the given seed. Otherwise, it is seeded by a random seed. *@li seed2: A second seed to avoid seed collision. *@li aspect_ratio_range: The cropped area of the image must have an aspect ratio = width / height within this range. +*@li area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The +cropped area of the image must contain a fraction of the supplied image +within this range. *@li max_attempts: Number of attempts at generating a cropped region of the image of the specified constraints. After max_attempts failures, return the entire image. @@ -787,9 +797,10 @@ REG_OP(SampleDistortedBoundingBoxExt2) The new size for the images . \n *@par Attributes: -*align_corners: If true, the centers of the 4 corner pixels of the input and +*@li align_corners: If true, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Defaults to false . \n +*@li half_pixel_centers: An optional bool. Defaults to False . \n *@par Outputs: *y: 4-D with shape [batch, new_height, new_width, channels] . \n @@ -999,10 +1010,6 @@ deciding whether boxes overlap too. *@li score_threshold: A 0-D float tensor representing the threshold for deciding when to remove boxes based on score . \n -*@par Attributes: -*pad_to_max_output_size: If true, the output selected_indices is padded -to be of length max_output_size. Defaults to false . \n - *@par Outputs: *selected_indices: A 1-D integer tensor of shape [M] representing the selected indices from the boxes tensor, where M <= max_output_size . \n @@ -1094,8 +1101,8 @@ REG_OP(EncodePng) *contents: 0-D. PNG-decoded image . *@par Attributes: -*channels: graph channels \n -*dtype: type of image +*@li channels: graph channels \n +*@li dtype: type of image *@par Outputs: *image: is a 3-D uint8 or uint16 Tensor of shape [height, width, channels] @@ -1116,10 +1123,10 @@ REG_OP(DecodePng) *@brief Bmp-decode an image. \n *@par Inputs: -*@li contents: A Tensor of type string. 0-D. The BMP-encoded image. \n +*contents: A Tensor of type string. 0-D. The BMP-encoded image. \n *@par Attributes: -*@li channels: Decode the desired number of color channels of the image. \n +*channels: Decode the desired number of color channels of the image. \n *@par Outputs: *image: A Tensor dtype of uint8. @@ -1253,6 +1260,7 @@ REG_OP(KeepRatioResizeBilinear) No default value. *@li align_corners: An optional bool. If "true", the centers of the corner pixels of the input and output tensors are aligned. Defaults to "false" . \n +*@li half_pixel_centers: An optional bool. Defaults to False . \n *@par Outputs: *y: A Tensor with the same type and format as input "images" . \n @@ -1381,6 +1389,7 @@ REG_OP(NonMaxSuppressionV5) *@li scale: A `Tensor` of type `float32`. *@li translation: A `Tensor` of type `float32` . \n +*@par Attributes: *@li kernel_type: type is string, default lanczos3 *@li antialias: type is bool, default true \n @@ -1411,6 +1420,7 @@ REG_OP(ScaleAndTranslate) *@li scale: A `Tensor` of type `float32`. *@li translation: A `Tensor` of type `float32` . \n +*@par Attributes: *@li kernel_type: type is string, default lanczos3 *@li antialias: type is bool, default true @@ -1460,9 +1470,10 @@ if they fall beyond [0, 1]. If false, do not do clipping and output the box coordinates as it is. If not specified, defaults to true . \n *@par Outputs: -*nmsed_boxes:type is float -*nmsed_scores:type is float -*nmsed_classes:type is float \n +*@li nmsed_boxes:type is float +*@li nmsed_scores:type is float +*@li nmsed_classes:type is float +*@li valid_detections:type is INT32 \n *@par Third-party framework compatibility * Compatible with tensorflow CombinedNonMaxSuppression operator. @@ -1508,6 +1519,9 @@ REG_OP(IMGWarp) *@par Outputs: *map_img: A Tensor after resize. \n + +*@par Restrictions: +*Warning:THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(Remap) .INPUT(img, TensorType({DT_UINT8, DT_FLOAT16, DT_FLOAT32})) @@ -1524,7 +1538,7 @@ and 4 mean input[(h_top, w_left), (h_top, w_right), (h_bottom, w_left), (h_bott *@li warp_index: the resize offset A 4-D float tensor of shape `[n, 2, h, w]`, 2 means (x, y) for resize point. *@par Outputs: -*remap_img: A Tensor after ResizeBilinear, A 4-D tensor of shape `[n, c, h, w]`. \n +*warp_img: A Tensor after ResizeBilinear, A 4-D tensor of shape `[n, c, h, w]`. \n */ REG_OP(IMGWarpResize) .INPUT(img, TensorType({DT_FLOAT32})) @@ -1558,6 +1572,39 @@ REG_OP(SpatialTransformerD) .ATTR(use_default_theta, ListBool, {}) .OP_END_FACTORY_REG(SpatialTransformerD) +/** +*@brief Function spatial transformer . \n + +*@par Inputs: +*@li x: A Tensor dtype of float16, float32, double, uint8, int8, uint16, int16, int32, uint32, uint64, int64. +*@li theta: A Tensor dtype of float16, float32, double, uint8, int8, uint16, int16, int32, uint32, uint64, int64, + auxiliary coefficients . \n + +*@par Attributes: +*@li output_size: A tuple output size. +*@li default_theta: A tuple default theta +*@li use_default_theta: List use default theta + +*@par Outputs: +*y: A Tensor dtype of float16, float32, double, uint8, int8, uint16, int16, int32, uint32, uint64, int64, + should be same shape and type as x. + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(SpatialTransformer) + .INPUT(x, TensorType({DT_FLOAT,DT_FLOAT16,DT_DOUBLE,DT_UINT8,DT_INT8,DT_UINT16, + DT_INT16,DT_INT32,DT_UINT32,DT_UINT64,DT_INT64})) + .OPTIONAL_INPUT(theta, TensorType({DT_FLOAT,DT_FLOAT16,DT_DOUBLE,DT_UINT8,DT_INT8, + DT_UINT16,DT_INT16,DT_INT32,DT_UINT32,DT_UINT64,DT_INT64})) + .OUTPUT(y, TensorType({DT_FLOAT,DT_FLOAT16,DT_DOUBLE,DT_UINT8,DT_INT8,DT_UINT16, + DT_INT16,DT_INT32,DT_UINT32,DT_UINT64,DT_INT64})) + .ATTR(output_size, ListInt, {-1, -1}) + .ATTR(default_theta, ListFloat, {}) + .ATTR(align_corners, Bool, false) + .ATTR(use_default_theta, ListInt, {}) + .OP_END_FACTORY_REG(SpatialTransformer) + /** * @brief Resize the input tensor. \n currently, only support resize image tensor using nearest neighbor and linear interpolation. @@ -1623,7 +1670,7 @@ REG_OP(Resize) *@brief Function parse image from string to int. \n *@par Inputs: -*@li contents: A Tensor of type string. 0-D. The JPEG-encoded image. \n +* contents: A Tensor of type string. 0-D. The JPEG-encoded image. \n *@par Attributes: *@li channels: An optional int. Defaults to 0. Number of color channels for the decoded image. @@ -1668,7 +1715,7 @@ REG_OP(DenseImageWarp) *@par Inputs: *One inputs, including: -* @li x: A tensor. Must be one of the following types: +* x: A tensor. Must be one of the following types: * float16, float32. \n *@par Attributes: @@ -1713,7 +1760,7 @@ REG_OP(ResizeD) *@par Inputs: *One inputs, including: -* @li grads: A tensor. Must be one of the following types: +* grads: A tensor. Must be one of the following types: * float16, float32. \n *@par Attributes: @@ -1762,8 +1809,8 @@ REG_OP(ResizeGradD) *@li flow: 4-D Tensor with shape `[batch, height, width, 2]`. \n *@par Outputs: -*grad_image: Returns 4-D with the same shape and dtype as `image`. -*grad_flow: Returns 4-D with the same shape and dtype as `flow`. \n +*@li grad_image: Returns 4-D with the same shape and dtype as `image`. +*@li grad_flow: Returns 4-D with the same shape and dtype as `flow`. \n */ REG_OP(DenseImageWarpGrad) .INPUT(grad, TensorType({DT_FLOAT, DT_FLOAT16})) @@ -1817,12 +1864,12 @@ REG_OP(GridSampler2D) *@li assist: Assist matrix, a 4-D tensor of type float16. *@par Attributes: -*@li align_corners: An optional bool. If "true", the centers of the corner +*align_corners: An optional bool. If "true", the centers of the corner pixels of the input and output tensors are aligned. Defaults to "false" . *@par Outputs: -*diff: Returns 4-D Tensor with the same shape and dtype as `grid`. -*position: Returns 4-D Tensor with the same shape as `grid`. +*@li diff: Returns 4-D Tensor with the same shape and dtype as `grid`. +*@li position: Returns 4-D Tensor with the same shape as `grid`. */ REG_OP(GridUnnormal) .INPUT(grid, TensorType({DT_FLOAT16, DT_FLOAT})) @@ -1840,10 +1887,13 @@ REG_OP(GridUnnormal) *@li position: 4-D Tensor with shape `[batch, output_height, output_width, 2]`. *@par Attributes: -*@li padding_mode: An optional string specifying the pad method. Only 'zeros' is supported for now . +*padding_mode: An optional string specifying the pad method. Only 'zeros' is supported for now . *@par Outputs: *y: Returns 4-D Tensor with the same dtype as `x`. + +*@par Restrictions: +*Warning:THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(ImageUnfold) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) @@ -1936,5 +1986,204 @@ REG_OP(GridSampler3DGrad) .ATTR(align_corners, Bool, false) .OP_END_FACTORY_REG(GridSampler3DGrad) +/** +*@brief Upsample the 3-D data with the nearest neighbor ​interpolation algorithm. \n + +*@par Inputs: +*One inputs, including: +*x: A 5-D input tensor [N, C, D, H, W]. Must be one of the following types: +* float16, float32, float64. \n + +*@par Attributes: +*@li output_size: An optional listInt. Defaults to none. + contain 3 elements: output_depth, output_height, output_width. The number of elements of 'output_size' + should be the same as the rank of input 'x'. Only one of 'scales' and 'output_size' can be specified. \n +*@li scales: An optional listFloat. Defaults to none. + The scale array along each dimension, contain 3 elements: scale_depth, scale_height, scale_width. + The number of elements of 'scales' should be the same as the rank of input 'x'. One of 'scales' and + 'output_size' MUST be specified and it is an error if both are specified. \n + +*@par Outputs: +*y: A 5-D tensor. Has the same type as input x, shape depends on x and output_size/scales. \n + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. \n +*/ + +REG_OP(UpsampleNearest3d) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .ATTR(output_size, ListInt, {}) + .ATTR(scales, ListFloat, {}) + .OP_END_FACTORY_REG(UpsampleNearest3d) + +/** +*@brief Upsample the 3-D data with the trilinear ​interpolation algorithm. \n + +*@par Inputs: +*One inputs, including: +*x: A 5-D input tensor [N, C, D, H, W]. Must be one of the following types: +* float16, float32, float64. \n + +*@par Attributes: +*@li output_size: An optional listInt. Defaults to none. + contain 3 elements: output_depth, output_height, output_width. The number of elements of 'output_size' should + be the same as the rank of input 'x'. Only one of 'scales' and 'output_size' can be specified. \n +*@li scales: An optional listFloat. Defaults to none. + The scale array along each dimension, contain 3 elements: scale_depth, scale_height, scale_width. + The number of elements of 'scales' should be the same as the rank of input 'x'. + One of 'scales' and 'output_size' MUST be specified and it is an error if both are specified. \n +*@li align_corners: An optional bool. Defaults to false. + If true, the input and output tensors are aligned by the center points of their corner pixels, preserving the + values at the corner pixels. If false, the input and output tensors are aligned by the corner points of their + corner pixels, and the interpolation use edge value padding for out of boundary values. \n + +*@par Outputs: +*y: A 5-D tensor. Has the same type as input x, shape depends on x and output_size/scales. \n + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. \n +*/ + +REG_OP(UpsampleTrilinear3d) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .ATTR(output_size, ListInt, {}) + .ATTR(scales, ListFloat, {}) + .ATTR(align_corners, Bool, false) + .OP_END_FACTORY_REG(UpsampleTrilinear3d) + +/** +*@brief Upsample the 3-D gradient data with the nearest neighbor ​interpolation algorithm. \n + +*@par Inputs: +*One inputs, including: +*grad_output: A 5-D input tensor [N, C, D, H, W]. Must be one of the following types: +* float16, float32, float64. \n + +*@par Attributes: +*@li input_size: An required listInt. + contain 5 elements: [min_batch, channels, depth, height, width]. Must: + input_size[0] == grad_output_tensor_size[0] + input_size[1] == grad_output_tensor_size[1]. \n +*@li output_size: An optional listInt. Defaults to none. + contain 3 elements: depth, height, width. The number of elements of 'output_size' should + be the same as the rank of input 'grad_output'. Only one of 'scales' and 'output_size' can be specified. Must: + grad_output_tensor_size[2] == floor(input_size[2] * scales[0]) == output_size[0] + grad_output_tensor_size[3] == floor(input_size[3] * scales[1]) == output_size[1] + grad_output_tensor_size[4] == floor(input_size[4] * scales[2]) == output_size[2]. \n +*@li scales: An optional listFloat. Defaults to none. + The scale array along each dimension, contain 3 elements: scale_depth, scale_height, scale_width. + The number of elements of 'scales' should be the same as the rank of input 'grad_output'. + One of 'scales' and 'output_size' MUST be specified and it is an error if both are specified. \n + +*@par Outputs: +*y: A 5-D tensor. Has the same type as input grad_output, shape depends on Attributes:input_size. \n + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ + +REG_OP(UpsampleNearest3dGrad) + .INPUT(grad_output, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .REQUIRED_ATTR(input_size, ListInt) + .ATTR(output_size, ListInt, {}) + .ATTR(scales, ListFloat, {}) + .OP_END_FACTORY_REG(UpsampleNearest3dGrad) + +/** +*@brief Upsample the 3-D gradient data trilinear ​interpolation algorithm. \n + +*@par Inputs: +*One inputs, including: +*grad_output: A 5-D input tensor [N, C, D, H, W]. Must be one of the following types: +* float16, float32, float64. \n + +*@par Attributes: +*@li input_size: An required listInt. + contain 5 elements: [min_batch, channels, depth, height, width]. Must: + input_size[0] == grad_output_tensor_size[0] + input_size[1] == grad_output_tensor_size[1]. \n +*@li output_size: An optional listInt. Defaults to none. + contain 3 elements: depth, height, width. The number of elements of 'output_size' should + be the same as the rank of input 'grad_output'. Only one of 'scales' and 'output_size' can be specified. Must: + grad_output_tensor_size[2] == floor(input_size[2] * scales[0]) == output_size[0] + grad_output_tensor_size[3] == floor(input_size[3] * scales[1]) == output_size[1] + grad_output_tensor_size[4] == floor(input_size[4] * scales[2]) == output_size[2]. \n +*@li scales: An optional listFloat. Defaults to none. + The scale array along each dimension, contain 3 elements: scale_depth, scale_height, scale_width. + The number of elements of 'scales' should be the same as the rank of input 'grad_output'. + One of 'scales' and 'output_size' MUST be specified and it is an error if both are specified. \n + +*@par Outputs: +*y: A Tensor with shape depends on intput_size and output_size/scales. Must be one of the following + types: float16, float32, float64. \n + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ + +REG_OP(UpsampleTrilinear3dGrad) + .INPUT(grad_output, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .REQUIRED_ATTR(input_size, ListInt) + .ATTR(output_size, ListInt, {}) + .ATTR(scales, ListFloat, {}) + .ATTR(align_corners, Bool, false) + .OP_END_FACTORY_REG(UpsampleTrilinear3dGrad) + + +/** +*@brief Upsample the 1-D data with the nearest neighbor ​interpolation algorithm. \n + +*@par Inputs: +*x: A 1-D input tensor [N, C, W]. Must be one of the following types: +* float16, float32, float64. \n + +*@par Attributes: +*@li output_size: An required listInt contains output_width. +*@li scales: An optional listFloat contains scale_width. Defaults to be zero. \n + +*@par Outputs: +*y: A 3-D tensor. Has the same type as input x, shape depends on x and output_size/scales. \n + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. \n +*/ + +REG_OP(UpsampleNearest1d) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .REQUIRED_ATTR(output_size, ListInt) + .ATTR(scales, ListFloat, {}) + .OP_END_FACTORY_REG(UpsampleNearest1d) + +/** +*@brief Upsample the 1-D gradient data with the nearest neighbor ​interpolation algorithm. \n + +*@par Inputs: +*grad_output: A 3-D input tensor [N, C, W]. Must be one of the following types: +* float16, float32, float64. \n + +*@par Attributes: +*@li output_size: An required listInt contains output_width. +*@li scales: An optional listFloat contains scale_width. Defaults to be zero. +*@li input_size: An required listInt contains output_width. \n + +*@par Outputs: +*y: A 3-D tensor. Has the same type as input grad_output, shape depends on Attributes:input_size. \n + +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. \n +*/ + +REG_OP(UpsampleNearest1dGrad) + .INPUT(grad_output, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .REQUIRED_ATTR(input_size, ListInt) + .REQUIRED_ATTR(output_size, ListInt) + .ATTR(scales, ListFloat, {}) + .OP_END_FACTORY_REG(UpsampleNearest1dGrad) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_IMAGE_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/linalg_ops.h b/third_party/fwkacllib/inc/ops/linalg_ops.h index 69c77bf6..f6cc8694 100644 --- a/third_party/fwkacllib/inc/ops/linalg_ops.h +++ b/third_party/fwkacllib/inc/ops/linalg_ops.h @@ -347,6 +347,9 @@ REG_OP(SelfAdjointEig) .OP_END_FACTORY_REG(SelfAdjointEig) /** +*@par Restrictions: +*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. + *@brief Computes the sign and the log of the absolute value of the determinant of one or more square matrices . \n @@ -382,9 +385,10 @@ REG_OP(Slogdet) *x:Tensor of shape [..., M, N]. Let P be the minimum of M and N . \n *@par Attributes: -*compute_uv:If True then left and right singular vectors will be computed and +*@li compute_uv:If True then left and right singular vectors will be computed and returned in u and v, respectively. Otherwise, only the singular values will -be computed, which can be significantly faster . \n +be computed, which can be significantly faster . +*@li full_matrices:the param effect u,v. \n *@par Outputs: *@li sigma:Singular values. Shape is [..., P]. The values are sorted in @@ -427,6 +431,9 @@ denotes the lower triangular factor `L` with unit diagonal. *@li p: upper triangular part denotes the upper triangular factor `U`.Permutation of the rows encoded as a list of indices in `0..M-1`. Shape is `[..., M]` . \n +*@par Attributes: +*output_idx_type: An optional DType from: int32, int64. + *@par Third-party framework compatibility * Compatible with TensorFlow Lu operator. */ @@ -467,6 +474,12 @@ left-hand side . \n *@par Outputs: y: Tensor of shape `[..., M, K]` containing the solutions \n +*@par Attributes: +*partial_pivoting: Whether to perform partial pivoting. `True` by default. +Partial pivoting makes the procedure more stable, but slower. Partial +pivoting is unnecessary in some cases, including diagonally dominant and +symmetric positive definite matrices + *@par Third-party framework compatibility * Compatible with TensorFlow TridiagonalSolve operator. */ diff --git a/third_party/fwkacllib/inc/ops/list_ops.h b/third_party/fwkacllib/inc/ops/list_ops.h index a1b622e9..0aa94e73 100644 --- a/third_party/fwkacllib/inc/ops/list_ops.h +++ b/third_party/fwkacllib/inc/ops/list_ops.h @@ -35,10 +35,10 @@ namespace ge { *@li max_num_elements: The maximum number of elements. \n *@par Attributes: -*@li element_dtype: The type of elements in the list. \n +*element_dtype: The type of elements in the list. \n *@par Outputs: -*@li handle: An empty tensor list . \n +*handle: An empty tensor list . \n *@par Third-party framework compatibility. *Compatible with tensorflow EmptyTensorList operator. @@ -59,10 +59,10 @@ and the other elements of the given list in `input_handle`. \n *@li tensor: The tensor to put on the list. \n *@par Attributes: -*@li element_dtype: The type of elements in the list. \n +*element_dtype: The type of elements in the list. \n *@par Outputs: -*@li output_handle:A list with the elements of old list followed by tensor. \n +*output_handle:A list with the elements of old list followed by tensor. \n *@par Third-party framework compatibility. *Compatible with tensorflow TensorListPushBack operator. @@ -86,7 +86,7 @@ list with all but that element. \n *@li element_shape: A shape compatible with that of elements in the list. \n *@par Attributes: -*@li element_dtype: The type of elements in the list. \n +*element_dtype: The type of elements in the list. \n *@par Outputs: *@li output_handle:A list with the elements of the old list followed by tensor. @@ -110,10 +110,10 @@ REG_OP(TensorListPopBack) *@brief The number of tensors in the input tensor list. \n *@par Inputs: -*@li input_handle: The input list. \n +*input_handle: The input list. \n *@par Outputs: -*@li length:The number of tensors in the list. \n +*length:The number of tensors in the list. \n *@par Third-party framework compatibility. *Compatible with tensorflow TensorListLength operator. @@ -127,13 +127,13 @@ REG_OP(TensorListLength) *@brief The shape of elements in the input tensor list. \n *@par Inputs: -*@li input_handle: The input list. \n +*input_handle: The input list. \n *@par Attributes: -*@li shape_type: The type of shape in the list. \n +*shape_type: The type of shape in the list. \n *@par Outputs: -*@li element_shape:A shape compatible with that of elements in the list. \n +*element_shape:A shape compatible with that of elements in the list. \n *@par Third-party framework compatibility. *Compatible with tensorflow TensorListElementShape operator. @@ -156,7 +156,7 @@ REG_OP(TensorListElementShape) *@li shape_type: The type of shape in the list. \n *@par Outputs: -*@li handle: An output tensor list . \n +*handle: An output tensor list . \n *@par Third-party framework compatibility. *Compatible with tensorflow TensorListReserve operator. @@ -178,10 +178,10 @@ REG_OP(TensorListReserve) *@li element_shape: A shape compatible with that of elements in the list. \n *@par Attributes: -*@li element_dtype: The type of elements in the list. \n +*element_dtype: The type of elements in the list. \n *@par Outputs: -*@li item: An output tensor value of index position . \n +*item: An output tensor value of index position . \n *@par Third-party framework compatibility. *Compatible with tensorflow TensorListGetItem operator. @@ -206,10 +206,10 @@ REG_OP(TensorListGetItem) *@li item: The element to be assigned to that position. \n *@par Attributes: -*@li element_dtype: The type of elements in the list. \n +*element_dtype: The type of elements in the list. \n *@par Outputs: -*@li output_handle: An output tensor list . \n +*output_handle: An output tensor list . \n *@par Third-party framework compatibility. *Compatible with tensorflow TensorListSetItem operator. @@ -233,10 +233,10 @@ REG_OP(TensorListSetItem) *@li tensor: The tensor push into tensor list. \n *@par Attributes: -*@li element_dtype: The type of elements in the list. \n +*element_dtype: The type of elements in the list. \n *@par Outputs: -*@li output_handles: The output tensor lists. \n +*output_handles: The output tensor lists. \n *@par Third-party framework compatibility. *Compatible with tensorflow TensorListPushBackBatch operator. @@ -263,7 +263,7 @@ REG_OP(TensorListPushBackBatch) *@li num_elements: The number of elements in the list. \n *@par Outputs: -*@li tensor: The tensor of list. \n +*tensor: The tensor of list. \n *@par Third-party framework compatibility. *Compatible with tensorflow TensorListStack operator. @@ -293,7 +293,7 @@ the leading dim of input_handle.element_shape or the element_shape input arg is not already set. \n *@par Attributes: -*@li element_dtype: The type of elements in the list. \n +*element_dtype: The type of elements in the list. \n *@par Outputs: *@li tensor: The concated result. @@ -324,10 +324,10 @@ REG_OP(TensorListConcatV2) *@li lengths: Vector of sizes of the 0th dimension of tensors in the list. \n *@par Attributes: -*@li element_dtype: The type of elements in the list. \n +*element_dtype: The type of elements in the list. \n *@par Outputs: -*@li output_handle: The list. \n +*output_handle: The list. \n *@par Third-party framework compatibility. *Compatible with tensorflow TensorListSplit operator. @@ -351,10 +351,10 @@ REG_OP(TensorListSplit) *@li element_shape: The shape of elements in the list. \n *@par Attributes: -*@li element_dtype: The type of elements in the list. \n +*element_dtype: The type of elements in the list. \n *@par Outputs: -*@li output_handle: An output tensor list . \n +*output_handle: An output tensor list . \n *@par Third-party framework compatibility. *Compatible with tensorflow TensorListFromTensor operator. @@ -377,7 +377,7 @@ REG_OP(TensorListFromTensor) *@li size: size of the output list. \n *@par Outputs: -*@li output_handle: The output tensor list. \n +*output_handle: The output tensor list. \n *@par Third-party framework compatibility. *Compatible with tensorflow TensorListResize operator. @@ -397,10 +397,10 @@ REG_OP(TensorListResize) *@li element_shape: The shape of elements in the list. \n *@par Attributes: -*@li element_dtype: The type of elements in the list. \n +*element_dtype: The type of elements in the list. \n *@par Outputs: -*@li values: The tensor. \n +*values: The tensor. \n *@par Third-party framework compatibility. *Compatible with tensorflow TensorListGather operator. @@ -429,10 +429,10 @@ the largest index in indices. If -1, the list is just large enough to include the largest index in indices. \n *@par Attributes: -*@li element_dtype: The type of elements in the list. \n +*element_dtype: The type of elements in the list. \n *@par Outputs: -*@li output_handle: The TensorList. \n +*output_handle: The TensorList. \n *@par Third-party framework compatibility. *Compatible with tensorflow TensorListScatterV2 operator. @@ -458,10 +458,10 @@ REG_OP(TensorListScatterV2) *@li indices: The indices used to index into the list. \n *@par Attributes: -*@li element_dtype: The type of elements in the list. \n +*element_dtype: The type of elements in the list. \n *@par Outputs: -*@li output_handle: The TensorList. \n +*output_handle: The TensorList. \n *@par Third-party framework compatibility. *Compatible with tensorflow TensorListScatterIntoExistingList operator. @@ -485,10 +485,10 @@ REG_OP(TensorListScatterIntoExistingList) *@li input_b: The input tensor list B. \n *@par Attributes: -*@li element_dtype: The type of elements in the list. \n +*element_dtype: The type of elements in the list. \n *@par Outputs: -*@li output: The output list. \n +*output: The output list. \n *@par Third-party framework compatibility. *Compatible with tensorflow TensorListConcatLists operator. diff --git a/third_party/fwkacllib/inc/ops/lookup_ops.h b/third_party/fwkacllib/inc/ops/lookup_ops.h index 5d928e5a..b1fc254f 100644 --- a/third_party/fwkacllib/inc/ops/lookup_ops.h +++ b/third_party/fwkacllib/inc/ops/lookup_ops.h @@ -77,8 +77,8 @@ REG_OP(LookupTableInsert) *handle: A Tensor of type resource. Handle to the table . \n *@par Attributes: -*@li Tkeys: A DType. -*@li Tvalues: A DType . \n +*@li Tkeys: A DType of keys. +*@li Tvalues: A DType of values. *@par Outputs: *@li keys: A Tensor of type Tkeys. diff --git a/third_party/fwkacllib/inc/ops/math_ops.h b/third_party/fwkacllib/inc/ops/math_ops.h index 319bcf70..6eb418d8 100644 --- a/third_party/fwkacllib/inc/ops/math_ops.h +++ b/third_party/fwkacllib/inc/ops/math_ops.h @@ -227,10 +227,10 @@ REG_OP(Bucketize) *@par Inputs: *One inputs, including: -* @li input_x: A tensor. Must be one of the following types: float16, float32, int8, uint8, int32. \n +*input_x: A tensor. Must be one of the following types: float16, float32, int8, uint8, int32. \n *@par Outputs: -*y: A tensor with the same type and shape of input_x \n +*output_y: A tensor with the same type and shape of input_x \n *@par Third-party framework compatibility *Compatible with the Pytorch operator Trunc. \n @@ -298,7 +298,7 @@ REG_OP(SparseSegmentMean) *@par Inputs: *The input grad must have be type float or double. Inputs include: -*@li grad: A Tensor. Must be one of the following types: float, double. +*@li x: A Tensor. Must be one of the following types: float, double. gradient propagated to the SparseSegmentMean op. *@li indices: A Tensor. Must be one of the following types: int32, int64. indices passed to the corresponding SparseSegmentMean op. @@ -365,6 +365,7 @@ REG_OP(InitData) component of an element of this dataset. *@li output_shapes: A nested structure of TensorShape objects corresponding to each component of an element of this dataset. +*@li output_num:output of nums. *@li channel_name: A string. Default "" . \n *@par Outputs: @@ -538,11 +539,11 @@ REG_OP(NextAfter) *@par Inputs: *One inputs, including: -* @li input_x: A tensor. Must be one of the following types: +* input_x: A tensor. Must be one of the following types: * float16, float32. \n *@par Attributes: -*@li p: An optional float.Defaults to 2. \n +*p: An optional float.Defaults to 2. \n *@par Outputs: *y: A Tensor with the same type and shape of input_x's. \n @@ -560,10 +561,10 @@ REG_OP(Pdist) *@brief Compute element-wise finiteness, return a boolean tensor. *@par Inputs: - *x:A Tensor. + *x:A Tensor of type float16, float32, double. *@par Outputs: - *y:A Tensor. Has the same shape as x. + *y:A Tensor. Returns which elements of x are finite *@par Third-party framework compatibility. *Compatible with tensorflow IsFinite operator. @@ -577,10 +578,10 @@ REG_OP(IsFinite) *@brief Compute element-wise infiniteness, return a boolean tensor. *@par Inputs: - *x:A Tensor. + *x:A Tensor of type float16, float32, double. *@par Outputs: - *y:A Tensor. Has the same shape as x. + *y:A Tensor. Has the same shape as x. Returns which elements of x are isinf. *@par Third-party framework compatibility. *Compatible with tensorflow IsInf operator. @@ -594,7 +595,11 @@ REG_OP(IsInf) *@brief Computes the complex absolute value of a tensor. *@par Inputs: - *x:A Tensor. + *x: x of complex numbers, this operation returns a tensor of type + float or double that is the absolute value of each element in x . + +* @par Attributes: +* Tout: representing the output of type. *@par Outputs: *y:A tensor of type `float` or `double` that is the absolute value of each element in `x`. @@ -612,10 +617,10 @@ REG_OP(ComplexAbs) *@brief Returns which elements of x are NaN. *@par Inputs: - *x:A Tensor. + *x:A Tensor of type float16, float32, double. *@par Outputs: - *y:A Tensor. Has the same shape as x. + *y:A Tensor. Has the same shape as x. Returns which elements of x are isnan *@par Third-party framework compatibility. *Compatible with tensorflow IsNan operator. @@ -629,7 +634,10 @@ REG_OP(IsNan) *@brief Returns the real part of a complex number. *@par Inputs: - *input:A Tensor. + *input:A Tensor. Must have numeric type. + + *@par Attributes: + *Tout: Type of outputs. \n *@par Outputs: *output:A Tensor. Has the same shape as input. @@ -670,7 +678,8 @@ REG_OP(Conj) *@li weight: A Tensor dtype of float32 . \n *@par Attributes: -*reduction: An optional attribute. Defaults to "mean" . \n +*@li reduction: An optional attribute. Defaults to "mean" . +*@li ignore_index:An optional attribute.Defaults to -100 . \n *@par Outputs: *@li y: A Tensor dtype of float32. @@ -700,7 +709,8 @@ REG_OP(NLLLoss) *@li total_weight:A Tensor dtype of float32 . \n *@par Attributes: -*reduction: An optional attribute. Defaults to "mean" . \n +*@li reduction: An optional attribute. Defaults to "mean" . +*@li ignore_index:An optional attribute.Defaults to -100 . \n *@par Outputs: *x_grad: A Tensor. Must be the following type: float32 . \n @@ -720,24 +730,24 @@ REG_OP(NLLLossGrad) .OP_END_FACTORY_REG(NLLLossGrad) /** -*@brief The ifmr . \n +*@brief IFMR(Input Feature Map Reconstruction). \n *@par Inputs: -*@li data:A Tensor of feature map -*@li data_min:A Tensor of min value of feature map. -*@li data_max:A Tensor of max value of feature map. -*@li cumsum:A Tensor of cumsum bin of data . \n +*@li data: A Tensor of feature map. +*@li data_min: A Tensor of min value of feature map. +*@li data_max: A Tensor of max value of feature map. +*@li cumsum: A Tensor of cumsum bin of data . \n *@par Attributes: -*min_percentile: min init percentile. -*max_percentile: max init percentile. -*search_range: search range. -*search_step: step size of searching. -*with_offset: whether using offset . \n +*@li min_percentile: min init percentile. +*@li max_percentile: max init percentile. +*@li search_range: search range. +*@li search_step: step size of searching. +*@li with_offset: whether using offset . \n *@par Outputs: -*scale: optimal scale. -*offset: optimal offset . \n +*@li scale: optimal scale. +*@li offset: optimal offset . \n *@par Third-party framework compatibility *Compatible with mindspore @@ -758,16 +768,16 @@ REG_OP(IFMR) .OP_END_FACTORY_REG(IFMR) /** -*@brief weights adaptive range quantization. \n +*@brief Weights Adaptive Range Quantization. \n *@par Inputs: -*@li w:A Tensor of weights. \n -*@li w_min:A Tensor of weights reduce_min. \n -*@li w_max:A Tensor of weights reduce_max. \n +*@li w: A Tensor of weights. \n +*@li w_min: A Tensor of weights reduce_min. \n +*@li w_max: A Tensor of weights reduce_max. \n *@par Attributes: -*num_bits: the bits num used for quantize. -*offset_flag: whether using offset. \n +*@li num_bits: the bits num used for quantize. +*@li offset_flag: whether using offset. \n *@par Outputs: *y: fake quantized weights. \n @@ -789,22 +799,22 @@ REG_OP(WtsARQ) .OP_END_FACTORY_REG(WtsARQ) /** -*@brief The acts_ulq. \n +*@brief Activations Universal Linear Quantization. \n *@par Inputs: -*@li x:A Tensor of feature map -*@li clamp _min:A Tensor of min clamp value of feature map. -*@li clamp _max:A Tensor of max clamp value of feature map. +*@li x: A Tensor of feature map. +*@li clamp _min: A Tensor of min clamp value of feature map. +*@li clamp _max: A Tensor of max clamp value of feature map. *@par Attributes: -*fixed_min: fix min to zero. -*num_bits: quant bits. \n +*@li fixed_min: fix min to zero. +*@li num_bits: quant bits. \n *@par Outputs: -*y: output fake quant feature map. -*clamp_min_mask: where x > clamp_min -*clamp_min_mask: where x < clamp_max -*x_clamped_loss: clamp loss. \n +*@li y: output fake quant feature map. +*@li clamp_min_mask: where x > clamp_min. +*@li clamp_min_mask: where x < clamp_max. +*@li x_clamped_loss: clamp loss. \n *@par Third-party framework compatibility *Compatible with mindspore @@ -826,12 +836,12 @@ REG_OP(ActsULQ) .OP_END_FACTORY_REG(ActsULQ) /** -*@brief The acts_ulq_input_grad. \n +*@brief The gradient of Activations Universal Linear Quantization. \n *@par Inputs: -*@li y_grad: A Tensor of gradient -*@li clamp_min_mask: A Tensor of boolean mask indicating whether an additional one is needed' -*@li clamp_max_mask: A Tensor of boolean mask indicating whether an additional one is needed' +*@li y_grad: A Tensor of gradient. +*@li clamp_min_mask: A Tensor of boolean mask indicating whether an additional one is needed'. +*@li clamp_max_mask: A Tensor of boolean mask indicating whether an additional one is needed'. *@par Outputs: *x_grapd: The gradient of inpust. \n @@ -851,10 +861,10 @@ REG_OP(ActsULQInputGrad) .OP_END_FACTORY_REG(ActsULQInputGrad) /** -*@brief The act_ulq_clamp_max_grad. \n +*@brief The gradient of Activations Universal Linear Quantization clamp max. \n *@par Inputs: -*@li y_grad: A Tensor of gradient +*@li y_grad: A Tensor of gradient. *@li clamp_max_mask: A Tensor of boolean mask indicating whether an additional one is needed. *@li x_clamped_loss: A Tensor of gradient. \n @@ -876,10 +886,10 @@ REG_OP(ActULQClampMaxGrad) .OP_END_FACTORY_REG(ActULQClampMaxGrad) /** -*@brief The act_ulq_clamp_min_grad. \n +*@brief The gradient of Activations Universal Linear Quantization clamp min. \n *@par Inputs: -*@li y_grad: A Tensor of gradient +*@li y_grad: A Tensor of gradient. *@li clamp_min_mask: A Tensor of boolean mask indicating whether an additional one is needed. *@li x_clamped_loss: A Tensor of gradient. \n @@ -904,7 +914,7 @@ REG_OP(ActULQClampMinGrad) * @brief Computes Lp norm. * @par Inputs: -* @li x: An ND tensor of type float16, float32. \n +* x: An ND tensor of type float16, float32. \n * * @par Attributes: * @li p: Int, "inf" or "-inf", default value is 2. @@ -913,7 +923,7 @@ REG_OP(ActULQClampMinGrad) * @li epsilon: Float, default is 1e-12. \n * @par Outputs: -* @li y: An ND tensor of type float16, float32. The shape of y is depending +* y: An ND tensor of type float16, float32. The shape of y is depending * on axes and keepdim. \n * @par Third-party framework compatibility @@ -932,11 +942,13 @@ REG_OP(LpNorm) * @brief get complex. * @par Inputs: -* @li real: An ND tensor of type float32. double -* @li imag: An ND tensor of type float32. double \n +* @li real: An ND tensor of type float32 double, representing the real part of a complex number. +* @li imag: An ND tensor of type float32 double, representing the imaginary part of a complex number. \n * +* @par Attributes: +* Tout: representing the output of type. * @par Outputs: -* @li out: An ND tensor of type complex64, complex128 \n +* out: An ND tensor of type complex64, complex128 \n */ REG_OP(Complex) .INPUT(real, TensorType({DT_FLOAT, DT_DOUBLE})) @@ -949,10 +961,13 @@ REG_OP(Complex) * @brief deal complex. * @par Inputs: -* @li input: An ND tensor of type complex64, complex128 \n -* +* input: An ND tensor of type complex64, complex128 \n + +* @par Attributes: +* Tout: representing the output of type. + * @par Outputs: -* @li output: An ND tensor of type float32. double \n +* output: An ND tensor of type float32. double \n */ REG_OP(Imag) .INPUT(input, TensorType({DT_COMPLEX64, DT_COMPLEX128})) @@ -988,7 +1003,7 @@ REG_OP(Angle) * float16, float32. \n *@par Attributes: -* @li reduction: Specifies the reduction to apply to the output: +* reduction: Specifies the reduction to apply to the output: * 'none' | 'mean' | 'sum'. Default: 'mean'. \n *@par Outputs: diff --git a/third_party/fwkacllib/inc/ops/matrix_calculation_ops.h b/third_party/fwkacllib/inc/ops/matrix_calculation_ops.h index b317be37..81c6a29e 100644 --- a/third_party/fwkacllib/inc/ops/matrix_calculation_ops.h +++ b/third_party/fwkacllib/inc/ops/matrix_calculation_ops.h @@ -61,21 +61,28 @@ REG_OP(MatMul) *@brief Multiplies matrix "a" by matrix "b", producing "a * b" . \n *@par Inputs: -*Two inputs, including: -* @li x1: A matrix Tensor. 2D. Must be one of the following types: float16, -* float32, int32. Has format [ND, NHWC, FRACTAL_NZ]. -* @li x2: A matrix Tensor. 2D. Must be one of the following types: float16, -* float32, int32. Has format [ND, NHWC, FRACTAL_NZ]. -* @li bias: A 1D Tensor. Must be one of the following types: float16, -* float32, int32. Has format [ND, NHWC] . \n +*Four inputs, including: +* @li x1: A matrix Tensor. 2D. Must be one of the following types: float32, + float16, int32, int8. Has format [ND, NHWC, FRACTAL_NZ]. +* @li x2: A matrix Tensor. 2D. Must be one of the following types: float32, + float16, int32, int8. Has format [ND, NHWC, FRACTAL_NZ]. +* @li bias: A 1D Tensor. Must be one of the following types: float32, + float16, int32. Has format [ND, NHWC]. +* @li offset_w: A Optional 1D Tensor for quantized inference. Type is int8. + Reserved. \n *@par Attributes: -*@li transpose_x1: A bool. If True, changes the shape of "x1" from [M, K] to [K, M]. -*@li transpose_x2: A bool. If True, changes the shape of "x2" from [M, K] to [K, M] . \n +* @li transpose_x1: A bool. If True, changes the shape of "x1" from [K, M] to + [M, K]. +* @li transpose_x2: A bool. If True, changes the shape of "x2" from [N, K] to +[K, N]. +* @li offset_x: An optional integer for quantized MatMulV2. +* The negative offset added to the input x1 for int8 type. Ensure offset_x + within the effective range of int8 [-128, 127]. Defaults to "0". \n *@par Outputs: -*y: The result matrix Tensor. 2D. Must be one of the following types: float16, -* float32, int32. Has format [ND, NHWC, FRACTAL_NZ] . \n +*y: The result matrix Tensor. 2D. Must be one of the following types: float32, + float16, int32. Has format [ND, NHWC, FRACTAL_NZ]. \n *@par Third-party framework compatibility * Compatible with the TensorFlow operator BatchMatmul. @@ -95,19 +102,27 @@ REG_OP(MatMulV2) *@brief Multiplies matrix "a" by matrix "b", producing "a * b" . \n *@par Inputs: -*Two inputs, including: +*Five inputs, including: * @li x1: A matrix Tensor. 2D. Must be one of the following types: int8. * @li x2: A matrix Tensor. 2D. Must be one of the following types: int8. * @li compress_index: A compress index matrix of type int8. -* @li bias: A 1D Tensor. Must be one of the following types: int32, float16. +* @li bias: An optional Tensor. 1D. Must be one of the following types: int32, + float16. +* @li offset_w: An optional matrix Tensor. 2D. Must be one of the following + types: int8. \n *@par Attributes: -*@li transpose_x1: A bool. If True, changes the shape of "x1" from [M, K] to [K, M]. -*@li transpose_x2: A bool. If True, changes the shape of "x2" from [M, K] to [K, M] . \n +*@li transpose_x1: A bool. If True, changes the shape of "x1" from [K, M] to + [M, K]. +*@li transpose_x2: A bool. If True, changes the shape of "x2" from [N, K] to + [K, N]. +*@li offset_x: An optional integer for quantized MatMulV2Compress. +*The negative offset added to the input x1 for int8 type. Ensure offset_x + within the effective range of int8 [-128, 127]. Defaults to "0". \n *@par Outputs: -*y: The result matrix Tensor. 2D. Must be one of the following types: float16, -* int32. \n +*y: The result matrix Tensor. 2D. Must be one of the following types: int32, +* float16. \n */ REG_OP(MatMulV2Compress) @@ -488,13 +503,13 @@ REG_OP(ScatterElements) *@par Inputs: * Three inputs, including: -*@li var: An ND Tensor . \n +*@li var: An ND Tensor . *Must be one of the following types: float16, float32, int32, int8, uint8 *@li indices: An ND Tensor of type int32 or int64 -*@li updates: An Tensor. format:NCHW, NHWC . \n +*@li updates: An Tensor. format:NCHW, NHWC . *Must be one of the following types: float16, float32, int32, int8, uint8 @@ -516,6 +531,61 @@ REG_OP(ScatterAdd) .ATTR(use_locking, Bool, false) .OP_END_FACTORY_REG(ScatterAdd) +/** +*@brief Use a scalar to modify the tensor. \n + +*@par Inputs: +*inputs, including: +*@li index: An ND Tensor . \n + +*Must be one of the following types: float16, float32, int32, int8, uint8 + +*@par Attributes: +* dim : the axis along which to index . +* value : the source element(s) to scatter . \n + +*@par Outputs: +*y: A Tensor. Has the same type and format as input "index" . \n + +*@par Third-party framework compatibility +* Compatible with the Pytorch operator ScatterScalar. +*/ +REG_OP(ScatterScalar) + .INPUT(index, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .REQUIRED_ATTR(dim, Int) + .REQUIRED_ATTR(value, Float) + .OP_END_FACTORY_REG(ScatterScalar) + +/** +*@brief Use a tensor to modify the tensor . \n + +*@par Inputs: +* Two inputs, including: +*@li index: An ND Tensor . \n + +*Must be one of the following types: float16, float32, int32, int8, uint8 + +*@li src: An ND Tensor . \n + +*Must be one of the following types: float16, float32, int32, int8, uint8 + +*@par Attributes: +* dim : the axis along which to index . \n + +*@par Outputs: +*y: A Tensor. Has the same type and format as input "index" . \n + +*@par Third-party framework compatibility +* Compatible with the Pytorch operator ScatterTensor. +*/ +REG_OP(ScatterTensor) + .INPUT(index, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .INPUT(src, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8})) + .REQUIRED_ATTR(dim, Int) + .OP_END_FACTORY_REG(ScatterTensor) + /** *@brief Divides a variable reference by sparse updates . \n @@ -530,7 +600,7 @@ REG_OP(ScatterAdd) *Must be one of the following types: float16, float, int32, int8, uint8 *@par Attributes: -*@li use_locking: An optional bool. Defaults to "False". If "True", +*use_locking: An optional bool. Defaults to "False". If "True", * the operation will be protected by a lock . \n *@par Outputs: @@ -752,10 +822,12 @@ REG_OP(DiagPart) *@par Attributes: *@li num_output: Reserved. -*@li transpose: A bool, specifying weight whether to transpose, either "true" or "false". Defaults to "false". +*@li transpose: A bool, specifying weight whether to transpose input w, either "true" or "false". Defaults to "false". *@li axis: Optional. A int, 1 or 2, specifying which dimension the input "K" starts from. Defaults to 1. * The product of the subsequent dimensions starting form first dimension or the second dimension is "K". -*@li offset_x: Reserved . \n +*@li offset_x: An optional integer for quantized FullyConnection. +*The negative offset added to the input image for int8 type. Ensure offset_x within the +*effective range of int8 [-128, 127]. Defaults to "0". \n *@par Outputs: *y: The result tensor of type float16, int32, float32 . \n @@ -779,27 +851,34 @@ REG_OP(FullyConnection) .OP_END_FACTORY_REG(FullyConnection) /** -*@brief Also known as a "fully-connected-compress" layer, computes an inner product with a set of learned weights, and (optionally) adds biases . \n +*@brief Also known as a "fully-connected-compress" layer, computes an inner +product with a set of learned weights, and (optionally) adds biases . \n *@par Inputs: -* Four inputs, including: +* Five inputs, including: *@li x: A Tensor of type uint8, int8. -*@li w: A weight matrix of type int8, int8. -*@li w: A compress index matrix of type int8, int8. -*@li b: A Tensor of type float16, int32, int32. -*@li offset_w: A Tensor of type int8.i +*@li w: A weight matrix of type int8. +*@li compress_index: A compress index matrix of type int8. +*@li b: A Tensor of type int32. +*@li offset_w: A Tensor of type int8. *@par Attributes: -*@li num_output: Reserved. -*@li transpose: A bool, specifying whether to transpose, either "true" or "false". Defaults to "false". -*@li axis: Reserved. -*@li offset_x: Reserved . \n +*@li num_output: A int, specifying the number of outputs. +*@li transpose: A bool, specifying whether to transpose input w, either "true" + or "false". Defaults to "false". +*@li axis: Optional. A int, 1 or 2, specifying which dimension the input "K" +starts from. Defaults to "1". +* The product of the subsequent dimensions starting form first dimension or the +second dimension is "K". +*@li offset_x: An optional integer for quantized FullyConnectionCompress. +*The negative offset added to the input image for int8 type. Ensure offset_x +within the effective range of int8 [-128, 127]. Defaults to "0". \n *@par Outputs: -*y: The result tensor of type int32 . \n +*y: The result tensor of type int32. \n *@par Third-party framework compatibility -* Compatible with the Caffe operator InnerProduct . \n +* Compatible with the Caffe operator InnerProduct. \n *@par Quantization supported or not * Yes @@ -925,13 +1004,13 @@ REG_OP(ScatterMin) *@par Inputs: * Three inputs, including: -*@li var: An ND Tensor . \n +*@li var: An ND Tensor . *Must be one of the following types: float16, float, int32, int8, uint8 *@li indices: An NCHW, NHWC, or ND Tensor . \n *Must be one of the following types: int32 or int64 -*@li updates: An NCHW, NHWC, or ND Tensor . \n +*@li updates: An NCHW, NHWC, or ND Tensor . *Must be one of the following types: float16, float, int32, int8, uint8 @@ -958,13 +1037,13 @@ REG_OP(ScatterMax) *@par Inputs: * Three inputs, including: -*@li var: An ND Tensor . \n +*@li var: An ND Tensor . *Must be one of the following types: float16, float, int32, int8, uint8 *@li indices: An ND Tensor . \n *Must be one of the following types: int32 or int64 -*@li updates: An ND Tensor . \n +*@li updates: An ND Tensor . *Must be one of the following types: float16, float, int32, int8, uint8 @@ -1112,15 +1191,47 @@ REG_OP(IndexAdd) .ATTR(axis, Int, 0) .OP_END_FACTORY_REG(IndexAdd) +/** +* @brief According to the index number of indexes, replace the value +*corresponding to X1 with the value in x2. + +* @par Inputs: +* Three inputs, including: +* @li x1: A Tensor. Must be one of the following types: +* float16, float32, int32, int8, uint8. +* @li x2: A Tensor of the same type as "x1". +* @li indices: A Tensor of the indices, type should be int32. + +* @par Attributes: +* @li accumulate: Does it support self accumulation.Defaults to 0. + +* @par Outputs: +* @li y: A Tensor. Same as input "x1". + +* @par Third-party framework compatibility +* Compatible with the Pytorch operator index_put. + +* @par Restrictions: +* Warning:THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(IndexPut) + .INPUT(x1, TensorType({DT_INT64, DT_INT32, DT_INT8, DT_UINT8, DT_FLOAT32, DT_FLOAT16})) + .INPUT(x2, TensorType({DT_INT64, DT_INT32, DT_INT8, DT_UINT8, DT_FLOAT32, DT_FLOAT16})) + .INPUT(indices, TensorType({DT_INT64, DT_INT32})) + .OUTPUT(y, TensorType({DT_INT64, DT_INT32, DT_INT8, DT_UINT8, DT_FLOAT32, DT_FLOAT16})) + .ATTR(accumulate, Int, 0) + .OP_END_FACTORY_REG(IndexPut) + /** *@brief: Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices input \n *@par Inputs: -* Two inputs, including: -*@li x: A Tensor. Must be one of the following types: -* float16, float32, double, int32, uint8, int16, int8, complex64, int64, -* qint8, quint8, qint32, uint16, complex128, uint32, uint64. -*@li diagonal:(int, optional) – the diagonal to consider。\n +*x: A Tensor. Must be one of the following types: +*float16, float32, double, int32, uint8, int16, int8, complex64, int64, +*qint8, quint8, qint32, uint16, complex128, uint32, uint64. \n + +*@par Attributes: +*diagonal: An optional attribute indicates the diagonal to consider. \n *@par Outputs: *y: A Tensor. Has the same type as "x" . \n @@ -1138,11 +1249,12 @@ REG_OP(Triu) *@brief: Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices input \n *@par Inputs: -* Two inputs, including: -*@li x: A Tensor. Must be one of the following types: -* float16, float32, double, int32, uint8, int16, int8, complex64, int64, -* qint8, quint8, qint32, uint16, complex128, uint32, uint64. -*@li diagonal:(int, optional) – the diagonal to consider。\n +*x: A Tensor. Must be one of the following types: +*float16, float32, double, int32, uint8, int16, int8, complex64, int64, +*qint8, quint8, qint32, uint16, complex128, uint32, uint64. \n + +*@par Attributes: +*diagonal: An optional attribute indicates the diagonal to consider. \n *@par Outputs: *y: A Tensor. Has the same type as "x" . \n @@ -1213,6 +1325,30 @@ REG_OP(Eye) .ATTR(dtype, Int, 0) .OP_END_FACTORY_REG(Eye) +/** +*@brief: Fill diagonal of at least 2 dimension tensors with value . \n + +*@par Inputs: +*x: A Tensor. Must be one of the following types: +* float32, int32, int64 . \n + +*@par Outputs: +*y: A Tensor. Has the same type as "x" . \n + +*@par Attributes: +*fill_value:The value to fill in +*wrap: An optional bool. Defaults to "False". If "True", Use recursive fill. \n + +*@par Third-party framework compatibility +* Compatible with the Pytorch operator FillDiagonal. +*/ +REG_OP(FillDiagonal) + .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT64})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT64})) + .REQUIRED_ATTR(fill_value, Float) + .ATTR(wrap, Bool, false) + .OP_END_FACTORY_REG(FillDiagonal) + } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_MATRIX_CALCULATION_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/nn_calculation_ops.h b/third_party/fwkacllib/inc/ops/nn_calculation_ops.h index 98473c65..a55cebe2 100644 --- a/third_party/fwkacllib/inc/ops/nn_calculation_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_calculation_ops.h @@ -195,7 +195,7 @@ REG_OP(DepthwiseConv2DBackpropInput) .INPUT(input_size, TensorType({DT_INT32, DT_INT64})) .INPUT(filter, TensorType({DT_FLOAT16})) .INPUT(out_backprop, TensorType({DT_FLOAT16})) - .OUTPUT(input_grad, TensorType({DT_FLOAT16})) + .OUTPUT(input_grad, TensorType({DT_FLOAT16, DT_FLOAT32})) .REQUIRED_ATTR(strides, ListInt) .ATTR(dilations, ListInt, {1, 1, 1, 1}) .REQUIRED_ATTR(pads, ListInt) @@ -255,7 +255,7 @@ REG_OP(DepthwiseConv2DBackpropInput) REG_OP(DepthwiseConv2DBackpropInputD) .INPUT(filter, TensorType({DT_FLOAT16})) .INPUT(out_backprop, TensorType({DT_FLOAT16})) - .OUTPUT(input_grad, TensorType({DT_FLOAT16})) + .OUTPUT(input_grad, TensorType({DT_FLOAT16, DT_FLOAT32})) .REQUIRED_ATTR(input_size, ListInt) .REQUIRED_ATTR(strides, ListInt) .ATTR(dilations, ListInt, {1, 1, 1, 1}) @@ -367,19 +367,19 @@ REG_OP(BiasAddGrad) * Gradients with respect to the output of the convolution. *\n *\n - * The following are the supported data types and data formats: -*@verbatim - | Tensor | out_bckprop | filter | y - ------------|-------------|---------|-------- - | Data Type | float16 | float16 | float16 - | |-------------|---------|-------- - | | float32 | float32 | float32 - | |-------------|---------|-------- - | | float64 | float64 | float64 - ------------|-------------|---------|-------- - | Format | NCHW | NCHW | NCHW - | | NHWC | HWCN | NHWC -@endverbatim + * The following are the supported data types and data formats:\n + *\n + | Tensor | out_bckprop | filter | y\n + ------------|-------------|---------|--------\n + | Data Type | float16 | float16 | float16\n + | |-------------|---------|--------\n + | | float32 | float32 | float32\n + | |-------------|---------|--------\n + | | float64 | float64 | float64\n + ------------|-------------|---------|--------\n + | Format | NCHW | NCHW | NCHW\n + | | NHWC | HWCN | NHWC\n + *\n * For float32 and float64 type, the actual calculation on the chip is based on * float16. *\n @@ -398,36 +398,37 @@ REG_OP(BiasAddGrad) * "NHWC". Specify the data format of the input and output data. *\n *\n - * The following value range restrictions must be met: -*@verbatim - | Name | Field | Scope - -------------------|----------|-------------- - | input_size | H | [1, 4096] - | | W | [1, 4096] - -------------------|----------|-------------- - | Filter | H | [1, 255] - | | W | [1, 255] - -------------------|----------|-------------- - | out_backprop | H*strideH| [1, 4096] - | | W*strideW| [1, 4096] - -------------------|----------|-------------- - | y(fmap) | H | [1, 4096] - | | W | [1, 4096] - -------------------|----------|-------------- - | Stride | H | [1, 63] - | | W | [1, 63] - -------------------|----------|-------------- - | Padding | Top | [0, 255] - | | Bottom | [0, 255] - | | Left | [0, 255] - | | Right | [0, 255] - -------------------|----------|-------------- - | Dilation | H | [1, 255] - | | W | [1, 255] + * The following value range restrictions must be met:\n + *\n + | Name | Field | Scope\n + -------------------|----------|--------------\n + | input_size | H | [1, 200000]\n + | | W | [1, 4096]\n + -------------------|----------|--------------\n + | Filter | H | [1, 255]\n + | | W | [1, 255]\n + -------------------|----------|--------------\n + | out_backprop | H*strideH| [1, 200000]\n + | | W*strideW| [1, 4096]\n + -------------------|----------|--------------\n + | y(fmap) | H | [1, 200000]\n + | | W | [1, 4096]\n + -------------------|----------|--------------\n + | Stride | H | [1, 63]\n + | | W | [1, 63]\n + -------------------|----------|--------------\n + | Padding | Top | [0, 255]\n + | | Bottom | [0, 255]\n + | | Left | [0, 255]\n + | | Right | [0, 255]\n + -------------------|----------|--------------\n + | Dilation | H | [1, 255]\n + | | W | [1, 255]\n + *\n -@endverbatim * In Ascend910, fmap or out_backprop's H and W not support 1 when * fmap_h + pad_top + pad_bottom != (filter_height - 1) * dilation_h + 1 + * and filter_width > fmap_width * If filter_h = 1 and filter_w = 1, out_backprop_w * stride_h * stride_w < 4096 *\n * @@ -496,7 +497,7 @@ REG_OP(Conv2DBackpropInput) REG_OP(Conv2DBackpropInputD) .INPUT(filter, TensorType({DT_FLOAT16, DT_INT8})) .INPUT(out_backprop, TensorType({DT_FLOAT16, DT_INT8})) - .OUTPUT(y, TensorType({DT_FLOAT16, DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_INT32, DT_FLOAT32})) .REQUIRED_ATTR(input_size, ListInt) .REQUIRED_ATTR(strides, ListInt) .REQUIRED_ATTR(pads, ListInt) @@ -508,7 +509,7 @@ REG_OP(Conv2DBackpropInputD) /** *@brief Computes the Deconvolution with respect to the input. *@par Inputs: - * Three inputs: + * Two required inputs: * @li x: A Tensor of type float16 or int8. 4D with shape * [batch, out_channels, out_height, out_width]. Gradients with respect * to the output of the convolution. @@ -520,16 +521,16 @@ REG_OP(Conv2DBackpropInputD) * Type is int8. Reserved.\n *\n *\n - * The following are the supported data types and data formats: -*@verbatim - | Tensor | x | filter | bias | y - ------------|---------|---------|---------|-------- - | Data Type | float16 | float16 | float16 | float16 - | |---------|---------|---------|-------- - | | int8 | int8 | int32 | int32 - ------------|---------|---------|---------|-------- - | Format | NCHW | NCHW | ND | NCHW -@endverbatim + * The following are the supported data types and data formats:\n + *\n + | Tensor | x | filter | bias | y\n + ------------|---------|---------|---------|--------\n + | Data Type | float16 | float16 | float16 | float16\n + | |---------|---------|---------|--------\n + | | int8 | int8 | int32 | int32\n + ------------|---------|---------|---------|--------\n + | Format | NCHW | NCHW | ND | NCHW\n + *\n * For int8, a dequant or requant operator must be followed. *\n * @@ -550,35 +551,35 @@ REG_OP(Conv2DBackpropInputD) * within the effective range of int8 [-128, 127]. Defaults to "0". *\n *\n - * The following value range restrictions must be met: -*@verbatim - | Name | Field | Scope - -------------------|----------|-------------- - | x (out_backprop) | H*strideH| [1, 4096] - | | W*strideW| [1, 4096] - -------------------|----------|-------------- - | Filter | H | [1, 255] - | | W | [1, 255] - -------------------|----------|-------------- - | y (fmap) | H | [1, 4096] - | | W | [1, 4096] - -------------------|----------|-------------- - | Stride | H | [1, 63] - | | W | [1, 63] - -------------------|----------|-------------- - | Padding | Top | [0, 255] - | | Bottom | [0, 255] - | | Left | [0, 255] - | | Right | [0, 255] - -------------------|----------|-------------- - | Dilation | H | [1, 255] - | | W | [1, 255] - -------------------|----------|-------------- - | Offset_x | | [-128, 127] - -@endverbatim + * The following value range restrictions must be met:\n + *\n + | Name | Field | Scope\n + -------------------|----------|--------------\n + | x (out_backprop) | H*strideH| [1, 200000]\n + | | W*strideW| [1, 4096]\n + -------------------|----------|--------------\n + | Filter | H | [1, 255]\n + | | W | [1, 255]\n + -------------------|----------|--------------\n + | y (fmap) | H | [1, 200000]\n + | | W | [1, 4096]\n + -------------------|----------|--------------\n + | Stride | H | [1, 63]\n + | | W | [1, 63]\n + -------------------|----------|--------------\n + | Padding | Top | [0, 255]\n + | | Bottom | [0, 255]\n + | | Left | [0, 255]\n + | | Right | [0, 255]\n + -------------------|----------|--------------\n + | Dilation | H | [1, 255]\n + | | W | [1, 255]\n + -------------------|----------|--------------\n + | Offset_x | | [-128, 127]\n + *\n * In Ascend910, fmap or out_backprop's H and W not support 1 when * fmap_h + pad_top + pad_bottom != (filter_height - 1) * dilation_h + 1 + * and filter_width > fmap_width * If filter_h = 1 and filter_w = 1, out_backprop_w * stride_h * stride_w < 4096 *\n * @@ -628,19 +629,19 @@ REG_OP(Deconvolution) * convolution. *\n *\n - * The following are the supported data types and data formats: -*@verbatim - | Tensor | x | out_backprop | y - ------------|---------|--------------|--------- - | Data Type | float16 | float16 | float16 - | |---------|--------------|--------- - | | float32 | float32 | float32 - | |---------|--------------|--------- - | | float64 | float64 | float64 - |-----------|---------|--------------|--------- - | Format | NCHW | NCHW | NCHW - | | NHWC | NHWC | HWCN -@endverbatim + * The following are the supported data types and data formats:\n + *\n + | Tensor | x | out_backprop | y\n + ------------|---------|--------------|---------\n + | Data Type | float16 | float16 | float16\n + | |---------|--------------|---------\n + | | float32 | float32 | float32\n + | |---------|--------------|---------\n + | | float64 | float64 | float64\n + |-----------|---------|--------------|---------\n + | Format | NCHW | NCHW | NCHW\n + | | NHWC | NHWC | HWCN\n + *\n * For float32 and float64 type of x and outbackprop, the actual calculation on the chip * is based on float16. *\n @@ -658,39 +659,34 @@ REG_OP(Deconvolution) * @li data_format: An optional string from: "NHWC", "NCHW". Defaults to * "NHWC". Specify the data format of the input and output data. *\n -*\n -* The following value range restrictions must be met: -*@verbatim - | Name | Field | Scope - -------------------|----------|-------------- - | x(fmap) | H | [1, 4096] - | | W | [1, 4096] - -------------------|----------|-------------- - | Filter Size | H | [1, 255] - | | W | [1, 255] - -------------------|----------|-------------- - | out_backprop | H | [1, 4096] - | | W | [1, 4096] - -------------------|----------|-------------- - | y | H | [1, 4096] - | | W | [1, 4096] - -------------------|----------|-------------- - | Stride | H | [1, 63] - | | W | [1, 63] - -------------------|----------|-------------- - | Padding | Top | [0, 255] - | | Bottom | [0, 255] - | | Left | [0, 255] - | | Right | [0, 255] - -------------------|----------|-------------- - | Dilation | H | [1, 255] - | | W | [1, 255] - -@endverbatim - * In Ascend910, out_backprop's H and W not support 1 when - * fmap_h + pad_top + pad_bottom != (filter_height - 1) * dilation_h + 1 *\n - * + * The following value range restrictions must be met:\n + *\n + | Name | Field | Scope\n + -------------------|----------|--------------\n + | x(fmap) | H | [1, 200000]\n + | | W | [1, 4096]\n + -------------------|----------|--------------\n + | Filter Size | H | [1, 255]\n + | | W | [1, 255]\n + -------------------|----------|--------------\n + | out_backprop | H | [1, 200000]\n + | | W | [1, 4096]\n + -------------------|----------|--------------\n + | y | H | [1, 200000]\n + | | W | [1, 4096]\n + -------------------|----------|--------------\n + | Stride | H | [1, 63]\n + | | W | [1, 63]\n + -------------------|----------|--------------\n + | Padding | Top | [0, 255]\n + | | Bottom | [0, 255]\n + | | Left | [0, 255]\n + | | Right | [0, 255]\n + -------------------|----------|--------------\n + | Dilation | H | [1, 255]\n + | | W | [1, 255]\n + *\n *@par Outputs: * y: A Tensor. Has the same type as x, has the same format as filter_size. *\n @@ -780,16 +776,16 @@ REG_OP(Conv2DBackpropFilterD) *\n *\n * The following are the supported data types and data formats: -*@verbatim - | Tensor | x | filter | bias | y - ------------|---------|---------|---------|-------- - | Data Type | float16 | float16 | float16 | float16 - | | float32 | float32 | float32 | float32 - | | int8 | int8 | int32 | int32 - ------------|---------|---------|---------|-------- - | Format | NCHW | NCHW | ND | NCHW - | | NHWC | HWCN | | NHWC -@endverbatim +*\n +*\n +| Tensor | x | filter | bias | y |\n +| :-------: | :-----: | :-----: | :-----: | :-----: |\n +| Data Type | float16 | float16 | float16 | float16 |\n +| | float32 | float32 | float32 | float32 |\n +| | int8 | int8 | int32 | int32 |\n +| Format | NCHW | NCHW | ND | NCHW |\n +| | NHWC | HWCN | | NHWC |\n +*\n * For float32 type, the actual calculation on the chip is based on * float16. *\n @@ -813,35 +809,30 @@ REG_OP(Conv2DBackpropFilterD) *\n *\n * The following value range restrictions must be met: -*@verbatim - | Name | Field | Scope - -------------------|----------|-------------- - | Input Image Size | H | [1, 100000] - | | W | [1, 4096] - -------------------|----------|-------------- - | Filter Size | H | [1, 255] - | | W | [1, 255] - -------------------|----------|-------------- - | Stride | H | [1, 63] - | | W | [1, 63] - -------------------|----------|-------------- - | Padding | Top | [0, 255] - | | Bottom | [0, 255] - | | Left | [0, 255] - | | Right | [0, 255] - -------------------|----------|-------------- - | Dilation | H | [1, 255] - | | W | [1, 255] - -------------------|----------|-------------- - | Offset_x | | [-128, 127] - -@endverbatim +*\n +*\n +| Name | Field | Scope |\n +| :--------------: | :------: | :---------: |\n +| Input Image Size | H | [1, 100000] |\n +| | W | [1, 4096] |\n +| Filter Size | H | [1, 255] |\n +| | W | [1, 255] |\n +| Stride | H | [1, 63] |\n +| | W | [1, 63] |\n +| Padding | Top | [0, 255] |\n +| | Bottom | [0, 255] |\n +| | Left | [0, 255] |\n +| | Right | [0, 255] |\n +| Dilation | H | [1, 255] |\n +| | W | [1, 255] |\n +| Offset_x | - | [-128, 127] |\n +*\n * The W dimension of the input image supports cases exceeding 4096, but it may * cause compilation errors. *\n * *@par Outputs: -*@li y: A 4D Tensor of output feature map. Has the same type as "x". With the +* y: A 4D Tensor of output feature map. Has the same type as "x". With the * format "NHWC", the data is stored in the order of: [batch, out_height, * out_width, out_channels]. *\n @@ -956,16 +947,15 @@ REG_OP(Conv2DCompress) *\n *\n * The following are the supported data types and data formats: -*@verbatim - | Tensor | x | filter | offsets | bias | y - ------------|---------|---------|---------|----------|-------- - | Data Type | float16 | float16 | float16 | float16 | float16 - | |---------|---------|---------|----------|-------- - | | float32 | float32 | float32 | float32 | float32 - ------------|---------|---------|---------|----------|-------- - | Format | NCHW | NCHW | NCHW | ND | NCHW - | | NHWC | HWCN | NHWC | | NHWC -@endverbatim +*\n +*\n +| Tensor | x | filter | offsets | bias | y |\n +| :-------: | :-----: | :-----: | :-----: | :-----: | :-----: |\n +| Data Type | float16 | float16 | float16 | float16 | float16 |\n +| | float32 | float32 | float32 | float32 | float32 |\n +| Format | NCHW | NCHW | NCHW | ND | NCHW |\n +| | NHWC | HWCN | NCHW | | NHWC |\n +*\n * For float32 type, the actual convolution calculation part on the chip is * based on float16. *\n @@ -992,19 +982,18 @@ REG_OP(Conv2DCompress) *\n *\n * The following value range restrictions must be met: -*@verbatim - | Name | Field | Scope - --------------------|--------|---------------------------- - | Input Image Size | H | [1, 100000 / filter_height] - | | W | [1, 4096 / filter_width] - --------------------|--------|---------------------------- - | Filter Size | H | [1, 63] - | | W | [1, 63] -@endverbatim +*\n +*\n +| Name | Field | Scope |\n +| :--------------: | :------: | :-------------------------: |\n +| Input Image Size | H | [1, 100000 / filter_height] |\n +| | W | [1, 4096 / filter_width] |\n +| Filter Size | H | [1, 63] |\n +| | W | [1, 63] |\n *\n * *@par Outputs: -*@li y: A 4D Tensor of output feature map. Has the same type as "x". With the +* y: A 4D Tensor of output feature map. Has the same type as "x". With the * format "NHWC", the data is stored in the order of: [batch, out_height, * out_width, out_channels]. *\n @@ -1042,41 +1031,38 @@ REG_OP(DeformableConv2D) /** *@brief Computes a 3D convolution given 5D "x" and "filter" tensors. - *@par Inputs: + +*@par Inputs: * @li x: A 5D tensor. Must be one of the following types: float16, * (Currently does not support int8). The format of x is NCDHW or NDHWC. * @li filter: A 5D tensor of the same type as "x". * (Currently does not support int8). - * The format is NCDHW, NDHWC or DHWCN . \n - -*@par Optional input: - * @li bias: An optional 1D tensor of the same type as "x". - * @li offset_w: An optional 1D tensor for quantized deconvolution. Reserved . \n + * The format is NCDHW, NDHWC or DHWCN. + * @li bias: Optional. An 1D tensor of the same type as "x". + * @li offset_w: Optional. An 1D tensor for quantized deconvolution. Reserved. \n -*@par Required Attributes: - * @li strides: A list of 5 integers. Specifies the stride of the sliding window +*@par Attributes: + * @li strides: Required. A list of 5 integers. Specifies the stride of the sliding window * for each dimension of "x". * The N and C dimensions must be 1. Has the same format as "x". - * @li pads: A list of 6 integers. + * @li pads: Required. A list of 6 integers. * Supports only padding along the D, H and W dimensions in sequence of head, - * tail, top, bottom, left and right . \n - -*@par Attributes: - * @li groups: Number of blocked connections from input channels to output + * tail, top, bottom, left and right. + * @li dilations: Optional. A list of 5 integers. Specifies the dilation factor for each + * dimension of "x". + * @li groups: Optional. Number of blocked connections from input channels to output * channels. - * @li data_format: An optional string from: "NDHWC", "NCDHW". + * @li data_format: Optional. An string from: "NDHWC", "NCDHW". * Defaults to "NDHWC". Specify the data format of the input and output data. - * @li dilations: A list of 5 integers. Specifies the dilation factor for each - * dimension of "x". * The N, C and D dimensions must be 1. Has the same format as "x". - * @li offset_x: An optional int. Input offset, used for quantized inference. - * Defaults to 0. Reserved . \n + * @li offset_x: Optional. An int. Input offset, used for quantized inference. + * Defaults to 0. Reserved. \n *@par Outputs: - *y: A Tensor. Has the same type and data format as "x". \n + * y: A Tensor. Has the same type and data format as "x". \n *@attention Constraints: - *The image size after padding is greater than the filter size . \n + * The image size after padding is greater than the filter size. \n *@par Third-party framework compatibility * @li Compatible with the TensorFlow operator conv3d. @@ -1085,9 +1071,9 @@ REG_OP(DeformableConv2D) REG_OP(Conv3D) .INPUT(x, TensorType({DT_FLOAT16})) .INPUT(filter, TensorType({DT_FLOAT16})) - .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT16})) + .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT16, DT_FLOAT32})) .OPTIONAL_INPUT(offset_w, TensorType({DT_INT8})) - .OUTPUT(y, TensorType({DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32})) .REQUIRED_ATTR(strides, ListInt) .REQUIRED_ATTR(pads, ListInt) .ATTR(dilations, ListInt, {1, 1, 1, 1, 1}) @@ -1099,8 +1085,8 @@ REG_OP(Conv3D) /** *@brief Computes the gradients of convolution 3d with respect to the input. + *@par Inputs: - * Three inputs: * @li input_size: A Tensor of type int32, int64. An integer vector representing * the shape of input, where input is a 5-D tensor * [batch, depth, height, width, channels] or @@ -1110,28 +1096,25 @@ REG_OP(Conv3D) * @li out_backprop: A Tensor. Must have the same type as filter. * 5-D with shape [batch, depth, out_height, out_width, out_channels] * or [batch, out_channels, depth, out_height, out_width]. Gradients with - * respect to the output of the convolution . \n + * respect to the output of the convolution. \n -*@par Required Attributes: - * @li strides: A list of 5 integers. Specifies the stride of the sliding window +*@par Attributes: + * @li strides: Required. A list of 5 integers. Specifies the stride of the sliding window * for each dimension of "out_backprop". * The N and C dimensions must be 1. Has the same format as "out_backprop". - * @li pads: A list of 6 integers. + * @li pads: Required. A list of 6 integers. * Supports only padding along the D, H and W dimensions in sequence of head, - * tail, top, bottom, left and right . \n - -*@par Attributes: - * Three attributes: - * @li groups: Number of blocked connections from input channels to output - * channels. - * @li data_format: An optional string from: "NDHWC", "NCDHW". - * Defaults to "NDHWC". Specify the data format of the input and output data. - * @li dilations: A tuple/list of 5 integers, The dilation factor for each + * tail, top, bottom, left and right. + * @li dilations: Optional. A tuple/list of 5 integers, The dilation factor for each * dimension of the input. * The N, C and D dimensions must be 1. Has the same format as "out_backprop". + * @li groups: Optional. Number of blocked connections from input channels to output + * channels. + * @li data_format: Optional. An string from: "NDHWC", "NCDHW". + * Defaults to "NDHWC". Specify the data format of the input and output data. \n *@par Outputs: - * y: A Tensor. Has the same type as filter,and has same format as "input_size" + * y: A Tensor. Has the same type as filter,and has same format as "input_size". \n *@par Third-party framework compatibility * Compatible with Tensorflow's conv3d_backprop_input @@ -1150,45 +1133,44 @@ REG_OP(Conv3DBackpropInput) /** *@brief Computes the gradients of convolution 3d with respect to the input. + *@par Inputs: - * Two inputs: * @li filter: A Tensor whose type is float16. The format of filter is NCDHW, * NDHWC or DHWCN. * @li out_backprop: A Tensor. Must have the same type as filter. The format is - * NDHWC or NCDHW. \n + * NDHWC or NCDHW. \n -*@par Required Attributes: - * @li strides: A list of 5 integers. Specifies the stride of the sliding window +*@par Attributes: + * @li input_size: Required. A tuple/list of type int32, int64. An integer vector + * representing the shape of input, where input is a 5-D tensor + * [batch, depth, height, width, channels] or + * [batch, channels, depth, height, width]. + * @li strides: Required. A list of 5 integers. Specifies the stride of the sliding window * for each dimension of "out_backprop". * The N and C dimensions must be 1. Has the same format as "out_backprop". - * @li pads: A list of 6 integers. Supports only padding along the D, H and W + * @li pads: Required. A list of 6 integers. Supports only padding along the D, H and W * dimensions in sequence of head, tail, top, bottom, left and right. - * @li input_size: A tuple/list of type int32, int64. An integer vector - * representing the shape of input, where input is a 5-D tensor - * [batch, depth, height, width, channels] or - * [batch, channels, depth, height, width] . \n - -*@par Attributes: - * Three attributes: - * @li groups: Number of blocked connections from input channels to output - * channels. - * @li data_format: An optional string from: "NDHWC", "NCDHW". - * Defaults to "NDHWC". Specify the data format of the input and output data. - * @li dilations: A tuple/list of 5 integers, The dilation factor for each + * @li dilations: Optional. A tuple/list of 5 integers, The dilation factor for each * dimension of input. * The N, C and D dimensions must be 1. Has the same format as "out_backprop". + * @li groups: Optional. Number of blocked connections from input channels to output + * channels. + * @li data_format: Optional. An string from: "NDHWC", "NCDHW". + * Defaults to "NDHWC". Specify the data format of the input and output data. \n + *@par Outputs: - * y: A Tensor. Has the same type and data format as "out_backprop". + * y: A Tensor. Has the same type and data format as "out_backprop". \n + *@par Third-party framework compatibility - * Compatible with Tensorflow's conv3d_backprop_input + * Compatible with Tensorflow's conv3d_backprop_input. \n *@par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use Conv3DBackpropInput instead. + * Warning: THIS FUNCTION IS DEPRECATED. Please use Conv3DBackpropInput instead. */ REG_OP(Conv3DBackpropInputD) .INPUT(filter, TensorType({DT_FLOAT16})) .INPUT(out_backprop, TensorType({DT_FLOAT16})) - .OUTPUT(y, TensorType({DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32})) .REQUIRED_ATTR(input_size, ListInt) .REQUIRED_ATTR(strides, ListInt) .REQUIRED_ATTR(pads, ListInt) @@ -1242,8 +1224,8 @@ REG_OP(LSTM) /** *@brief Computes the gradients of convolution3D with respect to the filter + *@par Inputs: - * Three inputs: * @li x: A Tensor. Must be one of the following types: float16, float32. * Currently does not support double. * 5-D with shape [batch, in_depth, in_height, in_width, in_channels] @@ -1258,26 +1240,23 @@ REG_OP(LSTM) * or [batch, out_channels, out_depth, out_height, out_width]. * Gradients with respect to the output of the convolution. \n -*@par Required Attributes: - * @li strides: A tuple/list of 5 integers. Specifies the stride of the sliding +*@par Attributes: + * @li strides: Required. A tuple/list of 5 integers. Specifies the stride of the sliding * window for each dimension of "x". The N and C dimensions must be 1. * Has the same format as "x". - * @li pads: A tuple/list of 6 integers, [front, back, top, bottom, left, right] - * pads on feature map . \n - -*@par Attributes: - * Three attributes: - * @li dilations: A tuple/list of 5 integers, The dilation factor for each + * @li pads: Required. A tuple/list of 6 integers, [front, back, top, bottom, left, right] + * pads on feature map. + * @li dilations: Optional. A tuple/list of 5 integers, The dilation factor for each * dimension of input. * The N, C and D dimensions must be 1. Has the same format as "x". - * @li groups: Number of blocked connections from input channels to output + * @li groups: Optional. Number of blocked connections from input channels to output * channels. - * @li data_format: An optional string from: "NDHWC", "NCDHW". - * Defaults to "NDHWC". Specify the data format of the input and output data. + * @li data_format: Optional. An string from: "NDHWC", "NCDHW". + * Defaults to "NDHWC". Specify the data format of the input and output data. \n *@par Outputs: - * y: A Tensor that has the same type as "x" - * and the format is NDHWC, NCDHW or DHWCN. + * y: A Tensor that has the same type as "x" and the format is NDHWC, NCDHW or DHWCN. \n + *@par Third-party framework compatibility * Compatible with Tensorflow's conv3d_backprop_filter */ @@ -1295,8 +1274,8 @@ REG_OP(Conv3DBackpropFilter) /** *@brief Computes the gradients of convolution with respect to the filter. + *@par Inputs: - * Two inputs: * @li x: A Tensor of type float16. * 5-D with shape [batch, in_depth, in_height, in_width, in_channels] * or [batch, in_channels, in_depth, in_height, in_width]. @@ -1305,37 +1284,34 @@ REG_OP(Conv3DBackpropFilter) * or [batch, out_channels, out_depth, out_height, out_width]. * Gradients with respect to the output of the convolution. \n -*@par Required Attributes: - * @li filter_size: A tuple/list of type integers. An integer vector +*@par Attributes: + * @li filter_size: Required. A tuple/list of type integers. An integer vector * representing the tensor shape of filter, where filter is a 5-D tensor * [filter_depth, filter_height, filter_width, in_channels, out_channels], * [out_channels, filter_depth, filter_height, filter_width, in_channels] * or [out_channels, in_channels, filter_depth, filter_height, filter_width]. - * @li strides: A tuple/list of 5 integers. Specifies the stride of the sliding + * @li strides: Required. A tuple/list of 5 integers. Specifies the stride of the sliding * window for each dimension of "x". * The N and C dimensions must be 1. Has the same format as "x". - * @li pads: A tuple/list of 6 integers, [front, back, top, bottom, left, right] - * pads on feature map. \n - -*@par Attributes: - * Three attributes: - * @li dilations: A tuple/list of 5 integers, The dilation factor for each + * @li pads: Required. A tuple/list of 6 integers, [front, back, top, bottom, left, right] + * pads on feature map. + * @li dilations: Optional. A tuple/list of 5 integers, The dilation factor for each * dimension of input. * The N, C and D dimensions must be 1. Has the same format as "x". - * @li groups: Number of blocked connections from input channels to output + * @li groups: Optional. Number of blocked connections from input channels to output * channels. - * @li data_format: An optional string from: "NDHWC", "NCDHW". - * Defaults to "NDHWC". Specify the data format of the input and output data. + * @li data_format: Optional. An optional string from: "NDHWC", "NCDHW". + * Defaults to "NDHWC". Specify the data format of the input and output data. \n *@par Outputs: - * y: A Tensor of type float32 and the format is NDHWC, NCDHW or DHWCN. + * y: A Tensor of type float32 and the format is NDHWC, NCDHW or DHWCN. \n + *@par Third-party framework compatibility - * Compatible with Tensorflow's conv3d_backprop_filter + * Compatible with Tensorflow's conv3d_backprop_filter. \n + *@par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use Conv3DBackpropFilter instead. + * Warning: THIS FUNCTION IS DEPRECATED. Please use Conv3DBackpropFilter instead. */ - - REG_OP(Conv3DBackpropFilterD) .INPUT(x, TensorType({DT_FLOAT16})) .INPUT(out_backprop, TensorType({DT_FLOAT16})) @@ -1350,37 +1326,32 @@ REG_OP(Conv3DBackpropFilterD) /** *@brief Computes the transpose of convolution 3d with respect to the input. + *@par Inputs: - * Three inputs: * @li input_size: A Tensor of type int32. An integer vector representing the * shape of input. * @li x: A Tensor of type float16, currently does not support int8. The format * is NDHWC or NCDHW. * @li filter: A Tensor of type float16, currently does not support int8. * The format is NDHWC, NCDHW or DHWCN. + * @li bias: Optional. An optional 1D tensor of the same type as "x". Reserved. + * @li offset_w: Optional. An optional 1D tensor for quantized deconvolution. Reserved. \n -*@par Optional input: - * Two optional inputs - * @li bias: An optional 1D tensor of the same type as "x". Reserved. - * @li offset_w: An optional 1D tensor for quantized deconvolution. Reserved . \n - -*@par Required Attributes: - * @li strides: A tuple/list of 5 integers. Specifies the stride of the sliding +*@par Attributes: + * @li strides: Required. A tuple/list of 5 integers. Specifies the stride of the sliding * window for each dimension of "x". * The N and C dimensions must be 1. Has the same format as "x". - * @li pads: A tuple/list of 6 integers - -*@par Attributes: - * Five attributes: - * @li groups: Number of blocked connections from input channels to output - * channels. - * @li dilations: A tuple/list of 5 integers, + * @li pads: Required. A tuple/list of 6 integers. + * @li dilations: Optional. A tuple/list of 5 integers, * The dilation factor for each dimension of input. * The N, C and D dimensions must be 1. Has the same format as "x". - * @li data_format: An optional string from: "NDHWC", "NCDHW". + * @li groups: Optional. Number of blocked connections from input channels to output + * channels. + * @li data_format: Optional. An string from: "NDHWC", "NCDHW". * Defaults to "NDHWC". Specify the data format of the input and output data. - * @li output_padding: The size will be added in the output shape. - * @li offset_x: Input offset_x value. Reserved. + * @li output_padding: Optional. The size will be added in the output shape. + * @li offset_x: Optional. Input offset_x value. Reserved. \n + *@par Outputs: * y: A Tensor. Has the same type and format as "x". */ @@ -1388,9 +1359,9 @@ REG_OP(Conv3DTranspose) .INPUT(input_size, TensorType({DT_INT32, DT_INT64})) .INPUT(x, TensorType({DT_FLOAT16})) .INPUT(filter, TensorType({DT_FLOAT16})) - .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT16})) + .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT16, DT_FLOAT32})) .OPTIONAL_INPUT(offset_w, TensorType({DT_INT8})) - .OUTPUT(y, TensorType({DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32})) .REQUIRED_ATTR(strides, ListInt) .REQUIRED_ATTR(pads, ListInt) .ATTR(dilations, ListInt, {1, 1, 1, 1, 1}) @@ -1402,46 +1373,44 @@ REG_OP(Conv3DTranspose) /** *@brief Computes the transpose of convolution 3d with respect to the input. + *@par Inputs: * @li x: A Tensor of type float16, currently does not support int8. * The format is NDHWC or NCDHW. * @li filter: A Tensor of type float16, currently does not support int8. * The format is NDHWC, NCDHW or DHWCN. + * @li bias: Optional. An 1D tensor of the same type as "x". Reserved. + * @li offset_w: Optional. An 1D tensor for quantized deconvolution. Reserved. \n -*@par Optional inputs: - * @li bias: An optional 1D tensor of the same type as "x". Reserved. - * @li offset_w: An optional 1D tensor for quantized deconvolution. Reserved . \n - -*@par Required Attributes: - * @li input_size: A tuple/list of type int32. - * An integer vector representing the shape of input - * @li strides: A tuple/list of 5 integers. +*@par Attributes: + * @li input_size: Required. A tuple/list of type int32. + * An integer vector representing the shape of input. + * @li strides: Required. A tuple/list of 5 integers. * Specifies the stride of the sliding window for each dimension of "x". * The N and C dimensions must be 1. Has the same format as "x". - * @li pads: A tuple/list of 6 integers . \n - -*@par Attributes: - * Five attributes: - * @li dilations: A tuple/list of 5 integers, The dilation factor for each + * @li pads: Required. A tuple/list of 6 integers. + * @li dilations: Optional. A tuple/list of 5 integers, The dilation factor for each * dimension of input. * The N, C and D dimensions must be 1. Has the same format as "x". - * @li groups: Number of blocked connections from input channels to output + * @li groups: Optional. Number of blocked connections from input channels to output * channels. - * @li data_format: An optional string from: "NDHWC", "NCDHW". + * @li data_format: Optional. An optional string from: "NDHWC", "NCDHW". * Defaults to "NDHWC". Specify the data format of the input and output data. - * @li output_padding: The size will be added in the output shape. - * @li offset_x: Input offset_x value. Reserved. + * @li output_padding: Optional. The size will be added in the output shape. + * @li offset_x: Optional. Input offset_x value. Reserved. \n + *@par Outputs: - * y: A Tensor. Has the same type and format as "x". + * y: A Tensor. Has the same type and format as "x". \n + *@par Restrictions: -* Warning: THIS FUNCTION IS DEPRECATED. Please use Conv3DTranspose instead. + * Warning: THIS FUNCTION IS DEPRECATED. Please use Conv3DTranspose instead. */ REG_OP(Conv3DTransposeD) .INPUT(x, TensorType({DT_FLOAT16})) .INPUT(filter, TensorType({DT_FLOAT16})) - .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT16})) + .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT16, DT_FLOAT32})) .OPTIONAL_INPUT(offset_w, TensorType({DT_INT8})) - .OUTPUT(y, TensorType({DT_FLOAT16})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32})) .REQUIRED_ATTR(input_size, ListInt) .REQUIRED_ATTR(strides, ListInt) .REQUIRED_ATTR(pads, ListInt) @@ -1469,17 +1438,17 @@ REG_OP(Conv3DTransposeD) * @li offset_w: An optional 1D tensor for quantized inference. Reserved. *\n *\n - * The following are the supported data types and data formats: -*@verbatim - | Tensor | x | filter | bias | y - ------------|---------|---------|---------|-------- - | Data Type | float16 | float16 | float16 | float16 - | |---------|---------|---------|-------- - | | int8 | int8 | int32 | int32 - ------------|---------|---------|---------|-------- - | Format | NCHW | NCHW | ND | NCHW - | | NHWC | HWCN | | NHWC -@endverbatim + * The following are the supported data types and data formats:\n + *\n + | Tensor | x | filter | bias | y\n + ------------|---------|---------|---------|--------\n + | Data Type | float16 | float16 | float16 | float16\n + | |---------|---------|---------|--------\n + | | int8 | int8 | int32 | int32\n + ------------|---------|---------|---------|--------\n + | Format | NCHW | NCHW | ND | NCHW\n + | | NHWC | HWCN | | NHWC\n + *\n * For int8, a dequant or requant operator must be followed. *\n * @@ -1504,38 +1473,38 @@ REG_OP(Conv3DTransposeD) * within the effective range of int8 [-128, 127]. Defaults to "0". *\n *\n - * The following value range restrictions must be met: -*@verbatim - | Name | Field | Scope - -------------------|----------|-------------- - | input_size | H | [1, 4096] - | | W | [1, 4096] - -------------------|----------|-------------- - | x (out_backprop) | H*strideH| [1, 4096] - | | W*strideW| [1, 4096] - -------------------|----------|-------------- - | filter | H | [1, 255] - | | W | [1, 255] - -------------------|----------|-------------- - | y (fmap) | H | [1, 4096] - | | W | [1, 4096] - -------------------|----------|-------------- - | Stride | H | [1, 63] - | | W | [1, 63] - -------------------|----------|-------------- - | Padding | Top | [0, 255] - | | Bottom | [0, 255] - | | Left | [0, 255] - | | Right | [0, 255] - -------------------|----------|-------------- - | Dilation | H | [1, 255] - | | W | [1, 255] - -------------------|----------|-------------- - | Offset_x | | [-128, 127] - -@endverbatim + * The following value range restrictions must be met:\n + *\n + | Name | Field | Scope\n + -------------------|----------|--------------\n + | input_size | H | [1, 200000]\n + | | W | [1, 4096]\n + -------------------|----------|--------------\n + | x (out_backprop) | H*strideH| [1, 200000]\n + | | W*strideW| [1, 4096]\n + -------------------|----------|--------------\n + | filter | H | [1, 255]\n + | | W | [1, 255]\n + -------------------|----------|--------------\n + | y (fmap) | H | [1, 200000]\n + | | W | [1, 4096]\n + -------------------|----------|--------------\n + | Stride | H | [1, 63]\n + | | W | [1, 63]\n + -------------------|----------|--------------\n + | Padding | Top | [0, 255]\n + | | Bottom | [0, 255]\n + | | Left | [0, 255]\n + | | Right | [0, 255]\n + -------------------|----------|--------------\n + | Dilation | H | [1, 255]\n + | | W | [1, 255]\n + -------------------|----------|--------------\n + | Offset_x | | [-128, 127]\n + *\n * In Ascend910, fmap or out_backprop's H and W not support 1 when * fmap_h + pad_top + pad_bottom != (filter_height - 1) * dilation_h + 1 + * and filter_width > fmap_width * If filter_h = 1 and filter_w = 1, out_backprop_w * stride_h * stride_w < 4096 *\n * @@ -1557,9 +1526,9 @@ REG_OP(Conv2DTranspose) .INPUT(input_size, TensorType({DT_INT32, DT_INT64})) .INPUT(x, TensorType({DT_FLOAT16, DT_INT8})) .INPUT(filter, TensorType({DT_FLOAT16, DT_INT8})) - .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT16, DT_INT32})) + .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT16, DT_INT32, DT_FLOAT32})) .OPTIONAL_INPUT(offset_w, TensorType({DT_INT8})) - .OUTPUT(y, TensorType({DT_FLOAT16, DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_INT32, DT_FLOAT32})) .REQUIRED_ATTR(strides, ListInt) .REQUIRED_ATTR(pads, ListInt) .ATTR(dilations, ListInt, {1, 1, 1, 1}) @@ -1604,9 +1573,9 @@ REG_OP(Conv2DTranspose) REG_OP(Conv2DTransposeD) .INPUT(x, TensorType({DT_FLOAT16, DT_INT8})) .INPUT(filter, TensorType({DT_FLOAT16, DT_INT8})) - .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT16, DT_INT32})) + .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT16, DT_INT32, DT_FLOAT32})) .OPTIONAL_INPUT(offset_w, TensorType({DT_INT8})) - .OUTPUT(y, TensorType({DT_FLOAT16, DT_INT32})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_INT32, DT_FLOAT32})) .REQUIRED_ATTR(input_size, ListInt) .REQUIRED_ATTR(strides, ListInt) .REQUIRED_ATTR(pads, ListInt) @@ -1623,14 +1592,12 @@ REG_OP(Conv2DTransposeD) * Two inputs: * @li x: A Tensor of type float16,float32 * @li offsets: A Tensor of type float16,float32.Deformation offset parameter. -*@par Required Attributes: +*@par Attributes: * @li strides: A tuple/list of 4 integers.The stride of the sliding window for * height and width for H/W dimension. * @li pads: A tuple/list of 4 integers.Padding added to H/W dimension * of the input. * @li ksize: A tuple/list of 2 integers.kernel size. -*@par Attributes: - * Four attributes: * @li dilations: A tuple/list of 4 integers, The dilation factor for each dimension * of input. Defaults to [1, 1, 1, 1] * @li data_format: An optional string from: "NCHW", "NHWC". Defaults to "NCHW". Specify the data format of the input x. @@ -1659,22 +1626,20 @@ REG_OP(DeformableOffsets) * @li grad: A Tensor of type float16,float32. gradients with respect to DeformableOffsets output * @li x: A Tensor of type float16,float32. * @li offsets: A Tensor of type float16,float32.Deformation offset parameter. -*@par Required Attributes: +*@par Attributes: * @li strides: A tuple/list of 4 integers.The stride of the sliding window for * height and width for H/W dimension. * @li pads: A tuple/list of 4 integers.Padding added to H/W dimension * of the input. * @li ksize: A tuple/list of 2 integers.kernel size. -*@par Attributes: - * Three attributes: * @li dilations: A tuple/list of 4 integers, The dilation factor for each dimension * of input. Defaults to [1, 1, 1, 1] * @li data_format: An optional string from: "NCHW", "NHWC". Defaults to "NCHW". Specify the data format of the input x. * @li deformable_groups: Specify the c-axis grouping number of input x. * @li modulated: Specify version of DeformableConv2D, true means v2, false means v1. *@par Outputs: - * grad_x: A Tensor of type float16, float32. Gradients with respect to input_x - * grad_offsets: A Tensor of type float16, float32. Gradients with respect to input_offsets + * @li grad_x: A Tensor of type float16, float32. Gradients with respect to input_x + * @li grad_offsets: A Tensor of type float16, float32. Gradients with respect to input_offsets */ REG_OP(DeformableOffsetsGrad) .INPUT(grad, TensorType({DT_FLOAT16, DT_FLOAT})) @@ -1695,11 +1660,9 @@ REG_OP(DeformableOffsetsGrad) *@brief Computes the deformed dilation output with the expected input *@par Inputs: * One inputs: - * @li x: A Tensor of type int8, float16, float32 -*@par Required Attributes: - * @li dilations: A tuple/list of integers. + * x: A Tensor of type int8, float16, float32 *@par Attributes: - * Two attributes: + * @li dilations: A tuple/list of integers. * @li padding_value: default value filling in blank * @li pads: A tuple/list of integers. *@par Outputs: diff --git a/third_party/fwkacllib/inc/ops/nn_detect_ops.h b/third_party/fwkacllib/inc/ops/nn_detect_ops.h index 5fa40ad6..bd14df77 100644 --- a/third_party/fwkacllib/inc/ops/nn_detect_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_detect_ops.h @@ -152,6 +152,42 @@ REG_OP(Iou) .ATTR(mode, String, "iou") .OP_END_FACTORY_REG(Iou) +/** +*@brief First calculate the minimum closure area of the two boxes, IoU, +* the proportion of the closed area that does not belong to the two boxes in the closure area, +* and finally subtract this proportion from IoU to get GIoU . \n + +*@par Inputs: +* Two inputs, including: +*@li bboxes: Bounding boxes, a 2D Tensor of type float16 or float32 with +* shape (N, 4). "N" indicates the number of bounding boxes, and the value +* "4" refers to [x1, y1, x2, y2] or [x, y, w, h]. +*@li gtboxes: Ground-truth boxes, a 2D Tensor of type float16 or float32 +* with shape (M, 4). "M" indicates the number of ground truth boxes, and +* the value "4" refers to [x1, y1, x2, y2] or [x, y, w, h] . \n + +*@par Attributes: +*@li trans: An optional bool, true for 'xywh', false for 'xyxy'. +*@li is_cross: An optional bool, control whether the output shape is [M, N] or [1, N] +*@li mode: Computation mode, a character string with the value range of [iou, iof] . \n + +*@par Outputs: +* overlap: A 2D Tensor of type float16 or float32 with shape [M, N] or [1, N], +* specifying the IoU or IoF ratio . \n + +*@attention Constraints: +* Only computation of float16 data is supported. To avoid overflow, the input +* length and width are scaled by 0.2 internally. +*/ +REG_OP(GIoU) + .INPUT(bboxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(gtboxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(overlap, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(trans, Bool, false) + .ATTR(is_cross, Bool, true) + .ATTR(mode, String, "iou") + .OP_END_FACTORY_REG(GIoU) + /** *@brief Performs the backpropagation of ROIAlign for training scenarios . \n @@ -417,7 +453,7 @@ REG_OP(PSROIPooling) *@brief Returns detection result . \n *@par Inputs: -* Four inputs, including: +* Five inputs, including: *@li rois: An NCHW tensor of type floa16 or float32, output from operator proposal_d at the preceding layer, used as the input of operator FSRDetectionOutput. *@li bbox_delta: An NCHWC0 tensor of type floa16 or float32, specifying the prediction offset, used to update the coordinates [x1, y1, x2, y2] of each ROI. *@li score: An NCHWC0 tensor of type floa16 or float32, specifying the probability of each class. Class 0 is the background class. @@ -459,7 +495,7 @@ REG_OP(FSRDetectionOutput) *@brief Returns detection result . \n *@par Inputs: -* Four inputs, including: +* Three inputs, including: *@li bbox_delta: An ND tensor of type floa16 or float32, specifying the box loc predictions, used as the input of operator SSDDetectionOutput. *@li score: An ND tensor of type floa16 or float32, specifying the box confidences data, used as the input of operator SSDDetectionOutput. *@li anchors: An ND tensor of type floa16 or float32, output from operator PriorBoxD, used as the input of operator SSDDetectionOutput. @@ -474,7 +510,6 @@ REG_OP(FSRDetectionOutput) *@li code_type: An optional int32, specify the code type. Defaults to 1(only supports 2). The corner is 1, center_size is 2, corner_size is 3 *@li keep_top_k: An optional int32, specify the topk value after nms. Defaults to -1 *@li confidence_threshold: An optional float32, specify the topk filter threshold. Only consider detections with confidence greater than the threshold -*@li kernel_name: An optional string, specifying the operator name. Defaults to "ssd_detection_output". *@par Outputs: *@li out_boxnum: A tensor of type int32, specifying the number of output boxes. *@li y: A tensor of type float16 or float32 with shape [batch,keep_top_k, 8], describing the information of each output box. @@ -989,26 +1024,26 @@ REG_OP(SPP) * feature map . \n *@attention Constraints: -*@li For the feature map input: -(1) If pooled_h = pooled_w = 2, the feature map size must not exceed 50. -(2) If pooled_h = pooled_w = 3, the feature map size must not exceed 60. -(3) If pooled_h = pooled_w = 4, the feature map size must not exceed 70. -(4) If pooled_h = pooled_w = 5, the feature map size must not exceed 70. -(5) If pooled_h = pooled_w = 6, the feature map size must not exceed 80. -(6) If pooled_h = pooled_w = 7, the feature map size must not exceed 80. -(7) If pooled_h = pooled_w = 8, the feature map size must not exceed 80. -(8) If pooled_h = pooled_w = 9, the feature map size must not exceed 70. -(9) If pooled_h = pooled_w = 10, the feature map size must not exceed 70. -(10) If pooled_h = pooled_w = 11, the feature map size must not exceed 70. -(11) If pooled_h = pooled_w = 12, the feature map size must not exceed 70. -(12) If pooled_h = pooled_w = 13, the feature map size must not exceed 70. -(13) If pooled_h = pooled_w = 14, the feature map size must not exceed 70. -(14) If pooled_h = pooled_w = 15, the feature map size must not exceed 70. -(15) If pooled_h = pooled_w = 16, the feature map size must not exceed 70. -(16) If pooled_h = pooled_w = 17, the feature map size must not exceed 50. -(17) If pooled_h = pooled_w = 18, the feature map size must not exceed 40. -(18) If pooled_h = pooled_w = 19, the feature map size must not exceed 40. -(19) If pooled_h = pooled_w = 20, the feature map size must not exceed 40. +* For the feature map input: +*@li If pooled_h = pooled_w = 2, the feature map size must not exceed 50. +*@li If pooled_h = pooled_w = 3, the feature map size must not exceed 60. +*@li If pooled_h = pooled_w = 4, the feature map size must not exceed 70. +*@li If pooled_h = pooled_w = 5, the feature map size must not exceed 70. +*@li If pooled_h = pooled_w = 6, the feature map size must not exceed 80. +*@li If pooled_h = pooled_w = 7, the feature map size must not exceed 80. +*@li If pooled_h = pooled_w = 8, the feature map size must not exceed 80. +*@li If pooled_h = pooled_w = 9, the feature map size must not exceed 70. +*@li If pooled_h = pooled_w = 10, the feature map size must not exceed 70. +*@li If pooled_h = pooled_w = 11, the feature map size must not exceed 70. +*@li If pooled_h = pooled_w = 12, the feature map size must not exceed 70. +*@li If pooled_h = pooled_w = 13, the feature map size must not exceed 70. +*@li If pooled_h = pooled_w = 14, the feature map size must not exceed 70. +*@li If pooled_h = pooled_w = 15, the feature map size must not exceed 70. +*@li If pooled_h = pooled_w = 16, the feature map size must not exceed 70. +*@li If pooled_h = pooled_w = 17, the feature map size must not exceed 50. +*@li If pooled_h = pooled_w = 18, the feature map size must not exceed 40. +*@li If pooled_h = pooled_w = 19, the feature map size must not exceed 40. +*@li If pooled_h = pooled_w = 20, the feature map size must not exceed 40. *@par Third-party framework compatibility * It is a custom operator. It has no corresponding operator in Caffe. */ @@ -1222,9 +1257,7 @@ REG_OP(RpnProposalsD) * @li box_filter: bool, mark of box_filter. Defaults to "true" * @li core_max_num: int, max number of core. Defaults to "8" *@par Outputs: -* @li sorted_rois: A Tensor. Must be float16. N-D with shape [N, 4]. -* @li sorted_scores: A Tensor. Must be float16. N-D with shape [N, 1]. -* @li sorted_classes: A Tensor. Must be float16. N-D with shape [N, 1]. +*sorted_box: A Tensor. Must be float16. N-D with shape [N, 1]. */ REG_OP(RpnProposalPostProcessing) .INPUT(sorted_proposal, TensorType({DT_FLOAT16})) @@ -1382,7 +1415,7 @@ REG_OP(BatchMultiClassNonMaxSuppression) * @li shape_hw: A 1D Tensor of type int32 . \n * @par Attributes: -* @li reversed_box: An optional bool, specifying the last two dims is "4,num" or +* reversed_box: An optional bool, specifying the last two dims is "4,num" or * "num,4", "true" for "4,num", "false" for "num,4". Defaults to "false" . \n * @par Outputs: @@ -1429,9 +1462,9 @@ REG_OP(NormalizeBBox) * @li anchors: A Tensor. Must be int32. * *@par Attributes: -* @li scales: optional, listfloat, . +* @li scales: optional, listfloat. * @li decode_clip: optional, float, threahold of decode process. -* @li reversed_boxes: optional, bool,. +* @li reversed_boxes: optional, bool. * *@par Outputs: * y: A Tensor. Must have the same type as box_predictions. @@ -1446,16 +1479,16 @@ REG_OP(DecodeBboxV2) .OP_END_FACTORY_REG(DecodeBboxV2) /** -*@brief Computes sort function. +*@brief sort the input tensor and return the value of index. * *@par Inputs: *Inputs include: -* x: A Tensor. Dtype support: flaot16, flaot, int16, int8, +* x: A Tensor. Dtype support: float16, float, int16, int8, uint8, int32, int64. -* + *@par Attributes: -* @li axis: optional, int. -* @li descending: optional,bool. +* @li axis: An optional attribute indicates the sorting axis. +* @li descending: An optional attribute indicates desending sort or not. * *@par Outputs: * @li y1: A Tensor. Must have the same type as x. @@ -1568,16 +1601,18 @@ deciding when to remove boxes based on score . \n the last dim representing (batch_id,class_id,index_id) . \n *@par Attributes: -*center_point_box:Integer indicate the format of the box data. +*@li center_point_box:Integer indicate the format of the box data. The default is 0. 0 - the box data is supplied as [y1, x1, y2, x2] where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute.Mostly used for TF models. 1 - the box data is supplied as [x_center, y_center, width, height]. Mostly used for Pytorch models. \n +*@li max_boxes_size: An optional attribute integer representing the real maximum +*number of boxes to be selected by non max suppression . \n *@par Outputs: -*@li selected_indices: A 2-D integer tensor of shape [M] representing the +*selected_indices: A 2-D integer tensor of shape [M] representing the selected indices from the boxes tensor, where M <= max_output_size. \n *@attention Constraints: @@ -1603,7 +1638,7 @@ REG_OP(NonMaxSuppressionV7) *@brief Obtains the ROI feature matrix from the feature map list. It is a customized fused operator for mmdetection. \n *@par Inputs: -* Three inputs, including: +* Two inputs, including: *@li features: A 5HD Tensor list of type float32 or float16. *@li rois: ROI position. A 2D Tensor of float32 or float16 with shape (N, 5). "N" indicates the number of ROIs, * the value "5" indicates the indexes of images where the ROIs are located, "x0", "y0", "x1", and "y1". @@ -1760,7 +1795,7 @@ REG_OP(AnchorResponseFlags) * "N" indicates the number of ROIs. \n *@par Attributes: -*@li performance_mode: select performance mode, "high_precision" or "high_performance". +*performance_mode: select performance mode, "high_precision" or "high_performance". * select "high_precision" when input type is float32, the output tensor precision * will be smaller than 0.0001, select "high_performance" when input type is float32, * the ops will be best performance, but precision will be only smaller than 0.005. @@ -1795,12 +1830,12 @@ REG_OP(YoloBoxesEncode) *@li num_gts: A Tensor. Support int32. real k. shape (1, ) *@par Attributes: -*@li output_dim: float. IOU threshold for positive bboxes. -*@li group_size: float. minimum iou for a bbox to be considered as a positive bbox -*@li spatial_scale: bool. whether to assign all bboxes with the same highest overlap with some gt to that gt. +*@li pos_iou_thr: float. IOU threshold for positive bboxes. +*@li min_pos_iou: float. minimum iou for a bbox to be considered as a positive bbox +*@li gt_max_assign_all: bool. whether to assign all bboxes with the same highest overlap with some gt to that gt. *@par Outputs: -*@li assigned_gt_inds_pos: A Tensor. Support float16/float32. shape (n, ). +* assigned_gt_inds_pos: A Tensor. Support float16/float32. shape (n, ). */ REG_OP(GridAssignPositive) .INPUT(assigned_gt_inds, TensorType({ DT_FLOAT, DT_FLOAT16 })) @@ -1816,6 +1851,40 @@ REG_OP(GridAssignPositive) .REQUIRED_ATTR(min_pos_iou, Float) .REQUIRED_ATTR(gt_max_assign_all, Bool) .OP_END_FACTORY_REG(GridAssignPositive) + +/** +*@brief GIoUGrad . \n + +*@par Inputs: +*@li dy : data of grad increment, a 1D Tensor of type float16 or float32 with +* shape (N,). +*@li bboxes: Bounding boxes, a 2D Tensor of type float16 or float32 with +* shape (4, N). "N" indicates the number of bounding boxes, and the value +* "4" refers to [x1, y1, x2, y2] or [x, y, w, h]. +*@li gtboxes: Ground-truth boxes, a 2D Tensor of type float16 or float32 +* with shape (4, M). "M" indicates the number of ground truth boxes, and +* the value "4" refers to [x1, y1, x2, y2] or [x, y, w, h] . \n + +*@par Attributes: +*@li trans: An optional attr, true for 'xywh', false for 'xyxy', only support true now. +*@li is_cross: An optional attr, if false M equals N, only support false now. +*@li mode: An optional attr, a character string with the value range of ['iou', 'iof'], +* only support 'iou' now. \n + +*@par Outputs: +*@li dbboxes: A 2D Tensor of type float16 or float32 with shape [4, N]. +*@li dgtboxes: A 2D Tensor of type float16 or float32 with shape [4, M]. +*/ +REG_OP(GIoUGrad) + .INPUT(dy, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(bboxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(gtboxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(dbboxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .OUTPUT(dgtboxes, TensorType({DT_FLOAT16, DT_FLOAT})) + .ATTR(trans, Bool, false) + .ATTR(is_cross, Bool, true) + .ATTR(mode, String, "iou") + .OP_END_FACTORY_REG(GIoUGrad) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_NN_DETECT_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/nn_norm_ops.h b/third_party/fwkacllib/inc/ops/nn_norm_ops.h index b44c0780..9ce7abfd 100644 --- a/third_party/fwkacllib/inc/ops/nn_norm_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_norm_ops.h @@ -54,15 +54,16 @@ REG_OP(LogSoftmaxGrad) *@par Inputs: *Two inputs, including: * @li features: A Tensor. Must be one of the following types: half, float32, double. -* A "batch_size * num_classes" matrix. +*A "batch_size * num_classes" matrix. * @li labels: A Tensor. Must be one of the following types: 'int32', 'int64'. -* batch_size vector with values in [0, num_classes). -* This is the label for the given minibatch entry. +*batch_size vector with values in [0, num_classes). +*This is the label for the given minibatch entry. \n *@par Outputs: -*loss: A Tensor for per example loss (a "batch_size" vector). Has the same type as "features". -*backprop: A Tensor for the backpropagated gradients (a batch_size * num_classes matrix). Has the same type as "features" . \n +*@li loss: A Tensor for per example loss (a "batch_size" vector). Has the same type as "features". +*@li backprop: A Tensor for the backpropagated gradients (a batch_size * num_classes matrix). +Has the same type as "features" . \n *@par Third-party framework compatibility *Compatible with the TensorFlow operator SparseSoftmaxCrossEntropyWithLogits. @@ -84,8 +85,8 @@ REG_OP(SparseSoftmaxCrossEntropyWithLogits) * @li labels: A Tensor of the same type as "features". A "batch_size * num_classes" matrix . \n *@par Outputs: -*loss: A Tensor for per example loss (a "batch_size" vector). Has the same type as "features". -*backprop: A Tensor for the backpropagated gradients (a batch_size * num_classes matrix). Has the same type as "features" . \n +* @li loss: A Tensor for per example loss (a "batch_size" vector). Has the same type as "features". +* @li backprop: A Tensor for the backpropagated gradients (a batch_size * num_classes matrix). Has the same type as "features" . \n *@par Third-party framework compatibility *Compatible with the TensorFlow operator SoftmaxCrossEntropyWithLogits. @@ -127,12 +128,13 @@ REG_OP(SoftmaxGrad) *@brief Computes the sigmoid cross entropy loss of "predict" and "target" . \n *@par Inputs: -* Two inputs, including: +* Three inputs, including: *@li predict: A multi-dimensional Tensor of type float16 or float32, specifying the predictive value. -*@li target: A multi-dimensional Tensor of type float16 or float32, specifying the target value . \n +*@li target: A multi-dimensional Tensor of type float16 or float32, specifying the target value . +*@li dout:A multi-dimensional Tensor of float16 or float32,specifying the gradient transferred from the upper layer. \n *@par Outputs: -*loss: Sigmoid cross entropy between the predictive value and target value. Has the same dimensions as "predict" . \n +*gradient: Sigmoid cross entropy between the predictive value and target value. Has the same dimensions as "predict" . \n *@par Third-party framework compatibility * Compatible with the scenario where "reduction" is set to "none"of PyTorch operator SigmoidCrossEntropyWithLogitsGrad. @@ -148,13 +150,12 @@ REG_OP(SigmoidCrossEntropyWithLogitsGrad) *@brief Performs the backpropagation of SigmoidCrossEntropyWithLogits for training scenarios . \n *@par Inputs: -* Three inputs, including: +* Two inputs, including: *@li predict: A multi-dimensional Tensor of type float16 or float32, specifying the predictive value. -*@li target: A multi-dimensional Tensor of type float16 or float32, specifying the target value. -*@li dout: A multi-dimensional Tensor of float16 or float32, specifying the gradient transferred from the upper layer . \n +*@li target: A multi-dimensional Tensor of type float16 or float32, specifying the target value. \n *@par Outputs: -*gradient: Return gradient. Has the same dimensions and type as "predict" . \n +*loss: Return loss. Has the same dimensions and type as "predict" . \n *@par Third-party framework compatibility * Compatible with the scenario where "reduction" is set to "none"of PyTorch operator SigmoidCrossEntropyWithLogits. @@ -572,7 +573,7 @@ REG_OP(LayerNorm) *@par Inputs: *One input, including: -* @li x: A Tensor. Must be one of the following types: float16, float32 . \n +* x: A Tensor. Must be one of the following types: float16, float32 . \n *@par Attributes: * @li p: Specify L_p norm, the type is float. @@ -581,7 +582,7 @@ REG_OP(LayerNorm) *@par Outputs: *One outputs, including: -* @li y: shape and dtype of output, should be same shape and type as input. +* y: shape and dtype of output, should be same shape and type as input. */ REG_OP(Renorm) .INPUT(x, TensorType::BasicType()) @@ -811,7 +812,7 @@ REG_OP(LayerNormBetaGammaBackpropV2) * shape of "keep_prob" should be (1,) or [1,]. * Has the same type as "x" . \n -*@par Output: +*@par Outputs: *y: A mutable Tensor. Has the same type as "x". */ REG_OP(DropOutDoMask) @@ -839,7 +840,7 @@ REG_OP(DropOutDoMask) * shape of "keep_prob" should be (1,) or [1,]. * Has the same type as "x" . \n -*@par Output: +*@par Outputs: *y: A mutable Tensor. Has the same type as "x". *@par Restrictions: *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. @@ -1010,7 +1011,7 @@ REG_OP(LRNGrad) *@li grads: A Tensor. Has the same type as acts. *@par Attributes: - *@li blank_label: An optional attribute. Defaults to 0. + *blank_label: An optional attribute. Defaults to 0. *@par Third-party framework compatibility * Compatible with TensorFlow RNNTLoss operator. @@ -1198,13 +1199,11 @@ REG_OP(INInferV2D) * @li epsilon: An attribute of type Float. \n * @par Outputs: -*Three outputs, including: +* Three outputs, including: * @li y: A Tensor. Has the same type as "x". \n * @li mean: A Tensor. Has the same type as "x". \n * @li variance: A Tensor. Has the same type as "x". \n -* @par Third-party framework compatibility -* Can be used by onnx InstanceNormalization */ REG_OP(InstanceNorm) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) @@ -1218,24 +1217,22 @@ REG_OP(InstanceNorm) .OP_END_FACTORY_REG(InstanceNorm) /** -*@brief InstanceNormGrad operator interface implementation. +* @brief InstanceNormGrad operator interface implementation. -*@par Inputs: -*Five inputs, including: +* @par Inputs: +* Five inputs, including: * @li dy: A Tensor. Must be one of the following types: float16, float32. * @li x: A Tensor. Must be one of the following types: float16, float32. * @li variance: A Tensor. Must be one of the following types: float16, float32. * @li mean: A Tensor. Must be one of the following types: float16, float32. * @li gamma: A Tensor. Must be one of the following types: float16, float32 . \n -*@par Outputs: -*Three outputs, including: +* @par Outputs: +* Three outputs, including: * @li pd_x: A Tensor. Must be one of the following types: float16, float32. * @li pd_gamma: A Tensor. Must be one of the following types: float16, float32. * @li pd_beta: A Tensor. Must be one of the following types: float16, float32. -*@par Restrictions: -*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. */ REG_OP(InstanceNormGrad) .INPUT(dy, TensorType({DT_FLOAT, DT_FLOAT16})) @@ -1248,58 +1245,6 @@ REG_OP(InstanceNormGrad) .OUTPUT(pd_beta, TensorType({DT_FLOAT, DT_FLOAT16})) .OP_END_FACTORY_REG(InstanceNormGrad) -/** -*@brief InstanceNormXBackprop operator interface implementation. - -*@par Inputs: -*Five inputs, including: -* @li dy: A Tensor. Must be one of the following types: float16, float32. -* @li x: A Tensor. Must be one of the following types: float16, float32. -* @li variance: A Tensor. Must be one of the following types: float16, float32. -* @li mean: A Tensor. Must be one of the following types: float16, float32. -* @li gamma: A Tensor. Must be one of the following types: float16, float32 . \n - -*@par Outputs: -*Two outputs, including: -* @li pd_x: A Tensor. Must be one of the following types: float16, float32. -* @li res_for_gamma: A Tensor. Must be one of the following types: float32. - -*@par Restrictions: -*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. -*/ -REG_OP(InstanceNormXBackprop) - .INPUT(dy, TensorType({DT_FLOAT, DT_FLOAT16})) - .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) - .INPUT(variance, TensorType({DT_FLOAT, DT_FLOAT16})) - .INPUT(mean, TensorType({DT_FLOAT, DT_FLOAT16})) - .INPUT(gamma, TensorType({DT_FLOAT, DT_FLOAT16})) - .OUTPUT(pd_x, TensorType({DT_FLOAT, DT_FLOAT16})) - .OUTPUT(res_for_gamma, TensorType({DT_FLOAT})) - .OP_END_FACTORY_REG(InstanceNormXBackprop) - -/** -*@brief InstanceNormBetaGammaBackprop operator interface implementation. - -*@par Inputs: -*Two inputs, including: -* @li dy: A Tensor. Must be one of the following types: float16, float32. -* @li res_for_gamma: A Tensor. Must be one of the following types: float32.\n - -*@par Outputs: -*Two outputs, including: -* @li pd_gamma: A Tensor. Must be one of the following types: float16, float32. -* @li pd_beta: A Tensor. Must be one of the following types: float16, float32. - -*@par Restrictions: -*Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. -*/ -REG_OP(InstanceNormBetaGammaBackprop) - .INPUT(dy, TensorType({DT_FLOAT, DT_FLOAT16})) - .INPUT(res_for_gamma, TensorType({DT_FLOAT})) - .OUTPUT(pd_gamma, TensorType({DT_FLOAT, DT_FLOAT16})) - .OUTPUT(pd_beta, TensorType({DT_FLOAT, DT_FLOAT16})) - .OP_END_FACTORY_REG(InstanceNormBetaGammaBackprop) - /** * @brief Computes Kl_div_loss_grad or Kl_div_loss_backward. \n @@ -1340,10 +1285,10 @@ REG_OP(KlDivLossGrad) * @li label: A Tensor. Has the same type as "grads". Required. \n * @par Attributes: -* @li reduction: An optional attribute of type String. Defaults to "mean". \n +* reduction: An optional attribute of type String. Defaults to "mean". \n * @par Outputs: -* @li y: A Tensor. Has the same type as "x". \n +* y: A Tensor. Has the same type as "x". \n * @par Third-party framework compatibility * Compatible with the Pytorch operator L1LossGrad. @@ -1368,7 +1313,7 @@ REG_OP(L1LossGrad) * @li reduction: An optional string.Defaults to "mean". \n * @par Outputs: -* @li y: An ND tensor tensor with the same shape and type as "predict". \n +* y: An ND tensor tensor with the same shape and type as "predict". \n * @par Third-party framework compatibility * Compatible with the Pytorch operator LpLoss. @@ -1390,10 +1335,10 @@ REG_OP(LpLoss) * @li dout: An ND tensor of type float16, float32. \n * @par Attributes: -* @li reduction: An optional string.Defaults to "mean". \n +* reduction: An optional string.Defaults to "mean". \n * @par Outputs: -* @li y: An ND tensor tensor with the same shape and type as "predict". \n +* y: An ND tensor tensor with the same shape and type as "predict". \n * @par Third-party framework compatibility * Compatible with the Pytorch operator MseLossGrad. @@ -1414,10 +1359,10 @@ REG_OP(MseLossGrad) * @li label: An ND Tensor of dtype float16 or float32.\n * * @par Attributes: -* @li reduction:An optional str from sum, none, mean, Defaults to "mean".\n +* reduction:An optional str from sum, none, mean, Defaults to "mean".\n * * @par Outputs: -* @li y: when reduction=sum/mean, y is scale. when reduction=none, y has +* y: when reduction=sum/mean, y is scale. when reduction=none, y has * same type and shape as "predict".\n */ REG_OP(MseLoss) @@ -1445,7 +1390,7 @@ REG_OP(MseLoss) * Must be one of the following: "none", "mean", "sum". \n * @par Outputs: -* @li gradient: A Tensor. Has the same type as "predict". \n +* gradient: A Tensor. Has the same type as "predict". \n * @par Third-party framework compatibility * Compatible with the Pytorch operator SmoothL1LossBackward. @@ -1480,7 +1425,7 @@ REG_OP(SmoothL1LossGradV2) * the output,'sum': the output will be summed. Default: 'mean'. \n * @par Outputs: -* @li loss: Indicates the loss between the predictive value and target value. +* loss: Indicates the loss between the predictive value and target value. * Has the same dimensions as "predict". \n * @par Third-party framework compatibility @@ -1498,12 +1443,12 @@ REG_OP(SmoothL1LossV2) * @brief Computes Centralization. result = x - mean(x, axes) * @par Inputs: -* @li x: An ND tensor of type float16, float32. +* x: An ND tensor of type float16, float32. * @par Attributes: -* @li axes: The dimensions to reduce. Must be one of the following types: int, list, tuple, NoneType. +* axes: The dimensions to reduce. Must be one of the following types: int, list, tuple, NoneType. * Must be in the range [-rank(x), rank(x)). * @par Outputs: -* @li y: A Tensor. Has the same type as "x". \n +* y: A Tensor. Has the same type as "x". \n * @par Third-party framework compatibility * custom operator \n @@ -1521,7 +1466,7 @@ REG_OP(Centralization) *@par Inputs: *One inputs, including: -* @li x: A tensor . Must be one of the following types: +* x: A tensor . Must be one of the following types: * float16, float32, int32, uint32, int8, uint8. \n *@par Attributes: @@ -1546,14 +1491,14 @@ REG_OP(Roll) logistic loss between input_x and input_y (containing 1 or -1). \n *@par Inputs: - *One inputs, including: + *Tow inputs, including: * @li input_x: A tensor. Must be one of the following types: * float16, float32. \n * @li input_y: A tensor. Must be one of the following types: * float16, float32. \n *@par Attributes: - *@li lambd: An optional string.Defaults to "mean". \n + *reduction: An optional string.Defaults to "mean". \n *@par Outputs: *output_z: while reduction == "none", A Tensor with the same type and shape of input_x's. \n @@ -1580,10 +1525,10 @@ REG_OP(SoftMarginLoss) * @li pos_weight: An optional ND tensor of type float16, float32. \n * @par Attributes: -* @li reduction: An optional string.Defaults to "mean". \n +* reduction: An optional string.Defaults to "mean". \n * @par Outputs: -* @li gradient: An ND tensor tensor with the same shape and type as "predict". \n +* gradient: An ND tensor tensor with the same shape and type as "predict". \n * @par Third-party framework compatibility * Compatible with the Pytorch operator SigmoidCrossEntropyWithLogitsGrad. @@ -1603,24 +1548,14 @@ REG_OP(SigmoidCrossEntropyWithLogitsGradV2) * @par Inputs: * Two inputs, including: - * @li input_x: A tensor. Must be one of the following types: - * float16, float32. \n - * - * @par Inputs: - * @li target: A tensor. Must be one of the following types: - * float16, float32. \n + * @li input_x: A tensor. Must be one of the following types: float16, float32. + * @li target: A tensor. Must be one of the following types: float16, float32. \n * @par Attributes: * four Attributes, including: - * @li log_input: An optional bool. Defaults to "True" \n - * - * @par Attributes: - * @li full: An optional bool. Defaults to "False" \n - * - * @par Attributes: - * @li eps: An optional float. Defaults to "1e-8" \n - * - * @par Attributes: + * @li log_input: An optional bool. Defaults to "True" + * @li full: An optional bool. Defaults to "False" + * @li eps: An optional float. Defaults to "1e-8" * @li reduction: An optional string. Defaults to "mean" \n * @par Outputs: @@ -1641,14 +1576,14 @@ REG_OP(PoissonNllLoss) /** *@brief rnn_gen_mask * @par Inputs: - * @li seq_length: A ND Tensor of type int32. Recoed the current length of each batch.\n + * seq_length: A ND Tensor of type int32. Recoed the current length of each batch.\n * * @par Attributes: * @li num_step: A required int.\n * @li hidden_size: A required int. \n * * - * @par Output: + * @par Ouputs: * y: A mutable Tensor of type float16, with the shape of [num_step, batch_size, hidden_size]. \n * */ @@ -1666,18 +1601,16 @@ REG_OP(RnnGenMask) * @par Inputs: * Two inputs, including: * @li x: A tensor. Must be one of the following types: -* float16, float32. \n -* -* @par Inputs: +* float16, float32. * @li target: A tensor. Must be the following types: * int32. \n * @par Attributes: -* @li reduction: An optional string. Defaults to "mean" \n +* reduction: An optional string. Defaults to "mean" \n * @par Outputs: -* y: A Tensor has same element type as input x. \n -* is_target: A Tensor has same element type as input target. \n +* @li y: A Tensor has same element type as input x. \n +* @li is_target: A Tensor has same element type as input target. \n * @par Third-party framework compatibility * Compatible with the Pytorch operator MultiLabelMarginLoss. \n diff --git a/third_party/fwkacllib/inc/ops/nn_ops.h b/third_party/fwkacllib/inc/ops/nn_ops.h index 49fd02fa..5b1a4dd0 100644 --- a/third_party/fwkacllib/inc/ops/nn_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_ops.h @@ -106,16 +106,16 @@ REG_OP(FusedBatchNormV2) .OP_END_FACTORY_REG(FusedBatchNormV2) /** - * @brief: Large amount of data sort.First operator of TopK. + * @brief Large amount of data sort.First operator of TopK. * @par Inputs: * two input, including: * @li input_data: A Tensor. Data to be sorted. Support float16 * @li input_index: A Tensor. Range(0, 2048). Datatype and format is same as input_data. * @par Attributes: - * @li k_num: Int.Number to be sorted. + * k_num: Int.Number to be sorted. * @par Outputs: - * 1 output, including: - * @li output_proposal: A Tensor. Datatype and format is same as input_data. Proposal sorted for each channel. + * One output, including: + * output_proposal: A Tensor. Datatype and format is same as input_data. Proposal sorted for each channel. */ REG_OP(SegmentSort) .INPUT(input_data, TensorType({DT_FLOAT16})) @@ -127,13 +127,13 @@ REG_OP(SegmentSort) /** * @brief: Large amount of data sort.Second operator of TopK. * @par Inputs: - * two input, including: - * @li input_proposal: A Tensor. Proposal sorted for each channel. Support float16 + * One input, including: + * input_proposal: A Tensor. Proposal sorted for each channel. Support float16 * @par Attributes: - * @li k_num: Int.Number to be sorted. + * k_num: Int.Number to be sorted. * @par Outputs: - * 1 output, including: - * @li output_proposal: A Tensor. Datatype and format is same as input_data. Proposal sorted for each channel. + * One output, including: + * output_proposal: A Tensor. Datatype and format is same as input_data. Proposal sorted for each channel. */ REG_OP(MultiMerge) .INPUT(input_proposal, TensorType({DT_FLOAT16})) @@ -142,14 +142,14 @@ REG_OP(MultiMerge) .OP_END_FACTORY_REG(MultiMerge) /** - * @brief: Large amount of data sort.Third operator of TopK. + * @brief Large amount of data sort.Third operator of TopK. * @par Inputs: - * two input, including: - * @li input_proposal: A Tensor. Proposal sorted for each channel. Support float16 + * One input, including: + * input_proposal: A Tensor. Proposal sorted for each channel. Support float16 * @par Attributes: - * @li k_num: Int.Number to be sorted. + * k_num: Int.Number to be sorted. * @par Outputs: - * 2 output, including: + * Two output, including: * @li output_data: A Tensor. Datatype and format is same as input_data. Data sorted. * @li output_index: A Tensor. int32. Data index. */ diff --git a/third_party/fwkacllib/inc/ops/nn_pooling_ops.h b/third_party/fwkacllib/inc/ops/nn_pooling_ops.h index 80a21333..72363d18 100644 --- a/third_party/fwkacllib/inc/ops/nn_pooling_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_pooling_ops.h @@ -29,7 +29,7 @@ namespace ge { /** *@brief Performs pooling on the input. *@par Inputs: -*@li x: An NCHW tensor of type float16, float32, int8. +* x: An NCHW tensor of type float16, float32, int8. *@par Attributes: *@li mode: An optional int32, specifying the pooling algorithm, either "0" (max pooling) or "1" (avg pooling). Defaults to "0". *@li global_pooling: An optional bool. Defaults to "false". @@ -50,6 +50,7 @@ namespace ge { *dilation[2]: An optional int32, specifying the left dilation. Defaults to "1". *dilation[3]: An optional int32, specifying the right dilation. Defaults to "1". *@li ceil_mode: An optional int32, either "0" (ceil mode) or "1" (floor mode). Defaults to "0". +*@li data_format: An optional string, Specify the data format of the input and output data. With the default format "NCHW". *@par Outputs: *y: An NCHW tensor of type float16, float32, int32. *@attention Constraints: @@ -204,7 +205,7 @@ REG_OP(AvgPool3D) *y: The average pooled output tensor . \n *@attention Constraints: -*@li "ksize" is in the range [1, 255]. "strides" is in the range [1, 63] +*"ksize" is in the range [1, 255]. "strides" is in the range [1, 63] *@par Third-party framework compatibility * Compatible with the TensorFlow operator AvgPool3D. @@ -281,10 +282,10 @@ REG_OP(AvgPool3DGrad) * @li data_format: A string, format of input data . \n * @par Outputs: -* @output: The average pooled output tensor . \n +* output: The average pooled output tensor . \n * @attention Constraints: -* @li "ksize" is in the range [1, 255]. "strides" is in the range [1, 63] +* "ksize" is in the range [1, 255]. "strides" is in the range [1, 63] * @par Third-party framework compatibility * Compatible with the TensorFlow operator AvgPool3DGradD. @@ -430,6 +431,47 @@ REG_OP(MaxPool3D) .ATTR(data_format, String, "NDHWC") .OP_END_FACTORY_REG(MaxPool3D) +/** +* @brief Performs max pooling3d on both max values and indices. +* +* @par Inputs: +* One input: +* x: An 6D tensor. Supported type: float16. Format as NDC1HWC0. +* @par Attributes: +* @li ksize: A required list of int32 values, +* specifying the size of the window for each dimension of the input tensor. +* No default value. +* @li strides: A required list of int32 values, +* specifying the stride of the sliding window for each dimension of +* the input tensor. No default value. +* @li pads: A required 3*2-dimension-list of int32 values. +* specifying the pad of three dimension of input, implement with 0. +* @li dilation: dilation of kernel. default value is {1,1,1,1,1}. +* @li ceil_mode: default value is false. +* @li data_format: the format of torch input, default value is "NCDHW". +* @li argmax_type: the function of this field is to determine the type of +* output argmax, "bitmask" is the default value, the argmax will return +* a img2col bitmask. "index_int32" and "index_int64" represent the torch +* output indices. +* @par Outputs: +* y: An 6D tensor. the maxpool3d output(max value), format as NDoC1HoWoC0. +* @par Outputs: +* argmax: A 5D uint16 tensor. the indice output. +* format as NC1HWC0, actually it represent N, Do, C1*ksize, Ho*Wo//16, 16. +*/ +REG_OP(MaxPool3DWithArgmax) + .INPUT(x, TensorType::RealNumberType()) + .OUTPUT(y, TensorType::RealNumberType()) + .OUTPUT(argmax, TensorType::IndexNumberType()) + .REQUIRED_ATTR(ksize, ListInt) + .REQUIRED_ATTR(strides, ListInt) + .REQUIRED_ATTR(pads, ListInt) + .ATTR(dilation, ListInt, {1, 1, 1, 1, 1}) + .ATTR(ceil_mode, Bool, false) + .ATTR(data_format, String, "NCDHW") + .ATTR(argmax_type, String, "bitmask") + .OP_END_FACTORY_REG(MaxPool3DWithArgmax) + /** *@brief Applies a 2D adaptive max pooling over an input signal conposed of several input planes. \n * The output is of size H x W, for any input size. @@ -522,8 +564,7 @@ REG_OP(MaxPool3DGradGrad) * y: A mutable tensor. Has the same shape and type as "x1" . \n * @attention Constraints: -* @li Computing gradients of global pooling is not supported, which means -* "ksize < x1". +* @li ksize is limited by buffer with full tiling. * @li "ksize" is in the range [1, 255]. "strides" is in the range [1, 63] * @par Third-party framework compatibility @@ -568,7 +609,7 @@ REG_OP(MaxPoolGrad) * @li Other dimensions of ksize and strides is 1 . \n * @par Outputs: -* @li y: Has the same type and format as input "x1" . \n +* y: Has the same type and format as input "x1" . \n * @par Third-party framework compatibility * @li Compatible with the TensorFlow operator MaxPoolGradGrad. @@ -588,7 +629,7 @@ REG_OP(MaxPoolGradGrad) *@brief Performs max_pool_ext2 on the input . \n *@par Inputs: -* Two inputs: +* Three inputs: *@li x: An NC1HWC0 Tensor of type float16. *@li strides: A required type of int32 values, specifying the stride of the sliding window for each dimension of the input tensor. No default value. *@li ksize: A required type of int32 values, specifying the size of the window for each dimension of the input tensor. No default value. @@ -635,7 +676,8 @@ REG_OP(MaxPoolV2) *@li strides: A required list of int8, int16, int32, or int64 values, * specifying the stride of the sliding window for each dimension of * the input tensor. No default value. -*@li padding: A required string. No default value . \n +*@li padding: A required string. No default value . +*@li Targmax:An optional int with default value 7 . \n *@par Outputs: *@li y: A Tensor. Has the same type and format as input "x". @@ -645,7 +687,7 @@ REG_OP(MaxPoolV2) * ksize[1] * ksize[2] <= 255. *@li "stride is a list that has length 4: strides[0] = 1 or strides[3] = 1, * strides[1] <= 63, strides[0] >= 1, strides[2] <= 63, strides[2] >= 1. -*@li "padding" is either "SAME" or "VALID" . \n +*@li "padding" is either "SAME" or "VALID" . *@par Third-party framework compatibility * Compatible with the TensorFlow operator MaxPoolWithArgmax. @@ -710,14 +752,15 @@ REG_OP(MaxPoolGradWithArgmax) *@brief Performs transform mask to argmax . \n *@par Inputs: -* Two input: -*x: An NC1HWC0 Tensor of type float16. -*mask: An NC1HWC0 Tensor of type uint16 . \n +* Two inputs: +*@li x: An NC1HWC0 Tensor of type float16. +*@li mask: An NC1HWC0 Tensor of type uint16 . \n *@par Attributes: *@li ksize: A required list of int8, int16, int32, or int64 values, specifying the size of the window for each dimension of the input tensor. No default value. *@li strides: A required list of int8, int16, int32, or int64 values, specifying the stride of the sliding window for each dimension of the input tensor. No default value. -*@li padding: A required string. No default value . \n +*@li padding: A required string. No default value . +*@li originshape:A required list of int8, int16, int32, or int64 values, No default value. \n *@par Outputs: *argmax: An NC1HWC0 Tensor of type int32 . \n @@ -754,7 +797,7 @@ REG_OP(Mask2Argmax) * @li strides: A required list, specifying the stride of the sliding window. * @li padding: A required string, window sliding mode. Either SAME or VALID. * @par Outputs: -* @li y:Result tensor. Supported type: float, double, int32, +* y:Result tensor. Supported type: float, double, int32, * uint8, int16, int8, int64, uint16, half, uint32, uint64 * @attention Constraints: @@ -767,7 +810,7 @@ REG_OP(Mask2Argmax) * (shape_max_pool[2] * shape_max_pool[3] + 31) // 16, 16), else failed . \n * @par Third-party framework compatibility -* @li Compatible with the TensorFlow operator MaxPoolGradGradWithArgmax. +* Compatible with the TensorFlow operator MaxPoolGradGradWithArgmax. */ REG_OP(MaxPoolGradGradWithArgmax) .INPUT(x, TensorType::RealNumberType()) @@ -931,11 +974,11 @@ REG_OP(AvgPoolV2GradD) .OP_END_FACTORY_REG(AvgPoolV2GradD) /** -*@brief :upsample the layer +*@brief upsample the layer, similar to the nearest-neighbor difference scaling algorithm. *@par Inputs: * one input, including: -*@li x: A tensor of type float16 or float32. +* x: A tensor of type float16 or float32. *@par Attributes: *@li scale: A optional float32, scale factor of x. Defaults to "1.0". *@li stride_h: An optional int32, broadcast the axis of h. Defaults to "2". @@ -1419,7 +1462,7 @@ REG_OP(MaxPoolV3) * the floor function will be used. Default False \n * @par Outputs: -* y: A mutable tensor. Has the same shape and type as "x1" . \n +* out_grad: A mutable tensor. Has the same shape and type as "x1" . \n * @attention Constraints: * @li Computing gradients of global pooling is not supported, which means @@ -1447,8 +1490,8 @@ REG_OP(MaxPoolV3Grad) *@brief Performs Dilation2D on the input . \n *@par Inputs: -*x: A tensor of shape is 4d, format is support NHWC. -*filter: A tensor of shape is 3d, the type is same with x, and the c dimension is same with x. \n +*@li x: A tensor of shape is 4d, format is support NHWC. +*@li filter: A tensor of shape is 3d, the type is same with x, and the c dimension is same with x. \n *@par Attributes: *@li strides: A required list of 4 ints, specifying the stride of the sliding window. The strides of the N and C dimensions are 1. @@ -1480,9 +1523,9 @@ REG_OP(Dilation2D) *@brief Performs Dilation2DBackpropFilter on the input. \n *@par Inputs: -*x: A tensor of shape is 4d, format is support NHWC. -*filter: A tensor of shape is 3d, the type is same with x, and the c dimension is same with x. -*out_backprop: Has the same type and format as input x and the c dimension is same with x. \n +*@li x: A tensor of shape is 4d, format is support NHWC. +*@li filter: A tensor of shape is 3d, the type is same with x, and the c dimension is same with x. +*@li out_backprop: Has the same type and format as input x and the c dimension is same with x. \n *@par Attributes *@li strides: A required list of 4 ints, specifying the stride of the sliding window. The strides of the N and C dimension are 1. @@ -1519,9 +1562,9 @@ REG_OP(Dilation2DBackpropFilter) *@brief Performs Dilation2DBackpropInput on the input. \n *@par Inputs: -*x: A tensor of shape is 4d, format is support NHWC. -*filter: A tensor of shape is 3d, the type is same with x, and the c dimension is same with x. -*out_backprop: Has the same type and format as input x and the c dimension is same with x. \n +*@li x: A tensor of shape is 4d, format is support NHWC. +*@li filter: A tensor of shape is 3d, the type is same with x, and the c dimension is same with x. +*@li out_backprop: Has the same type and format as input x and the c dimension is same with x. \n *@par Attributes *@li strides: A required list of 4 ints, specifying the stride of the sliding window. The strides of the N and C dimension are 1. diff --git a/third_party/fwkacllib/inc/ops/nn_training_ops.h b/third_party/fwkacllib/inc/ops/nn_training_ops.h index 75e91aee..9dd502cd 100644 --- a/third_party/fwkacllib/inc/ops/nn_training_ops.h +++ b/third_party/fwkacllib/inc/ops/nn_training_ops.h @@ -289,7 +289,8 @@ REG_OP(SparseApplyAdagradV2D) * Should be from a Variable(). *@li lr: A scalar. Has the same type as "var". *@li grad: A tensor for the gradient. Has the same type as "var". -* +*@li momentum: Momentum. Must be a scalar. + *@par Attributes: *@li use_nesterov: An optional bool. Defaults to "False". * If "True", the tensor passed to compute grad will be @@ -701,7 +702,7 @@ REG_OP(ApplyPowerSignD) /** *@brief Updates "var" as FOBOS algorithm with fixed learning rate. * prox_v = var - alpha * delta -* var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} +* var = sign(prox_v)/(1+alpha * l2) * max{|prox_v|-alpha * l1,0} * *@attention Constraints: * the input tensors must have the same shape. @@ -2128,10 +2129,12 @@ REG_OP(FusedMulApplyMomentumExtern) * otherwise the behavior is undefined, but may exhibit less contention. * *@par Outputs: -* var: A mutable tensor. Has the same type as input "var". +* @li var: A mutable tensor. Has the same type as input "var". +* @li accum: A mutable tensor. Has the same type as input "accum". * *@attention Constraints: -* The input tensors must have the same shape. +* @li var: A mutable tensor. Has the same type as input "var". +* @li accum: A mutable tensor. Has the same type as input "accum". * *@par Third-party framework compatibility * Compatible with the TensorFlow operator ResourceApplyKerasMomentum. diff --git a/third_party/fwkacllib/inc/ops/nonlinear_fuc_ops.h b/third_party/fwkacllib/inc/ops/nonlinear_fuc_ops.h index ca1c24eb..01ff77cb 100644 --- a/third_party/fwkacllib/inc/ops/nonlinear_fuc_ops.h +++ b/third_party/fwkacllib/inc/ops/nonlinear_fuc_ops.h @@ -28,8 +28,8 @@ namespace ge { *@brief Computes the for the gelu of "x" . \n *@par Inputs: -*Two inputs, including: -* @li x: A Tensor. Must be one of the following types: float16, float32 +*One input, including: +*x: A Tensor. Must be one of the following types: float16, float32 *@par Outputs: *y: A Tensor. Has the same type as "x". @@ -66,8 +66,8 @@ REG_OP(GeluGrad) *@brief Computes the for the fast_gelu of "x" . \n *@par Inputs: -*Two inputs, including: -* @li x: A Tensor. Must be one of the following types: float16, float32 +*One input, including: +*x: A Tensor. Must be one of the following types: float16, float32 *@par Outputs: *y: A Tensor. Has the same type as "x". @@ -83,7 +83,7 @@ REG_OP(FastGelu) *@brief Computes the gradient for the fast_gelu of "x" . \n *@par Inputs: -*Three inputs, including: +*Two inputs, including: * @li dy: A Tensor. Must be one of the following types: float16, float32 * @li x: A Tensor of the same type as "dy" . \n @@ -169,7 +169,7 @@ REG_OP(Relu) * x: A Tensor of type RealNumberType . \n * @par Outputs: -* y: A Tensor of type RealNumberType . \n +* y: A Tensor with the same type as x . \n * @par Third-party framework compatibility * Compatible with the TensorFlow operator Relu6. @@ -209,8 +209,12 @@ REG_OP(Relu6D) * backprops = gradients * (features > 0) * (features < 6) . \n * @par Inputs: -* @li features: A Tensor of type RealNumberType. -* @li gradients: A Tensor of type RealNumberType . \n +* @li gradients: A Tensor of type RealNumberType. The backpropagated + gradients to the corresponding Relu6 operation. +* @li features: A Tensor with the same type as gradients.he features passed + as input to the corresponding Relu6 operation, or its output; + using either one produces the same result. \n + * @par Outputs: * backprops: A Tensor of type RealNumberType . \n @@ -228,7 +232,7 @@ REG_OP(Relu6Grad) *Applies the element-wise function: * Computes the backward for the elu: if x>0, 1; otherwise elu() + alpha . *@par Inputs: -*One inputs, including: +*Two inputs, including: * @li grads: A tensor. Must be one of the following types: * float16, float32. * @li activations: A tensor. Must be one of the following types: @@ -238,7 +242,7 @@ REG_OP(Relu6Grad) *y: A Tensor with the same type and shape of grads's. * *@par Attributes: -*@li alpha: scalar parameter, default value = 1.0 +*alpha: scalar parameter, default value = 1.0 */ REG_OP(EluGradV2) .INPUT(grads, TensorType({DT_FLOAT, DT_FLOAT16})) @@ -539,13 +543,9 @@ REG_OP(Elu) *x: A float16, float32, for the input data type . \n *@par Attributes: -*alpha1: A float32. Defines at which negative value the ELU saturates. Defaults to "1.0" . \n - -*@par Attributes: -*alpha2: A float32. Defines at which negative value the ELU saturates. Defaults to "1.0" . \n - -*@par Attributes: -*alpha3: A float32. Defines at which positive value the ELU saturates. Defaults to "1.0" . \n +*@li alpha1: A float32. Defines at which negative value the ELU saturates. Defaults to "1.0" . +*@li alpha2: A float32. Defines at which negative value the ELU saturates. Defaults to "1.0" . +*@li alpha3: A float32. Defines at which positive value the ELU saturates. Defaults to "1.0" . \n *@par Outputs: *y: A float16, float32, for the normalized result . \n @@ -706,8 +706,8 @@ REG_OP(Mish) * @li x: A Tensor. Must be one of the following types: float16, float32 * @li tanhx: A Tensor. shape, datatype and format is same as x * @par Outputs: - * 1 output, including: - * @li x_grad: A Tensor. shape, datatype and format is same as x + * One output, including: + * x_grad: A Tensor. shape, datatype and format is same as x */ REG_OP(MishGrad) @@ -721,20 +721,20 @@ REG_OP(MishGrad) * @brief pytorch hardtanh_backward operator. * * @par Inputs: - * 2 inputs, including: + * Two inputs, including: * @li result, minimum tensor of the linear region range, * datatype: float16/float32, format:ND/5HD. * @li grad, maximum tensor of the linear region range, * datatype:float16/float32, format:ND/5HD. \n * @par Attributes: - * 2 attributes, including: + * Two attributes, including: * @li min_val, minimum value of the linear region range, datatype:float. * @li max_val, maximum value of the linear region range, datatype:float. \n * @par Outputs: - * 1 output, including: - * @li y, hardtanh_backward output tensor, datatype and format is same as + * One output, including: + * y, hardtanh_backward output tensor, datatype and format is same as * input result. \n * @attention Constraints: @@ -756,7 +756,7 @@ REG_OP(HardtanhGrad) * @par Inputs: * One inputs, including: -* @li x: A mutable Tensor. Must be one of the following types: +* x: A mutable Tensor. Must be one of the following types: * float16, float32. \n * @par Attributes: @@ -765,7 +765,7 @@ REG_OP(HardtanhGrad) * @li threshold: An optional float. Defaults to "20.0" \n * @par Outputs: -* @li y: A mutable Tensor. Has the same type as "x" \n +* y: A mutable Tensor. Has the same type as "x" \n * @par Third-party framework compatibility * Compatible with the Pytorch operator Softplus. @@ -792,7 +792,7 @@ REG_OP(SoftplusV2) * @li threshold: An optional float. Defaults to "20.0" \n * @par Outputs: -* @li output_backprops: A mutable Tensor. Has the same type as "input_gradients" \n +* output_backprops: A mutable Tensor. Has the same type as "input_gradients" \n * @par Third-party framework compatibility * Compatible with the Pytorch operator SoftplusGrad. @@ -809,13 +809,16 @@ REG_OP(SoftplusV2Grad) * @brief ThresholdedRelu takes one input data (Tensor) and produces one output data (Tensor) * where the rectified linear function, y = x for x > alpha, y = 0 otherwise, is applied to the tensor elementwise. * - * @par inputs + * @par Inputs: * one input including: - * @li x: input A Tensor. Must be one of the following types: float32, float16 + * x: input A Tensor. Must be one of the following types: float32, float16 * - * @par output + * @par Attributes: + * alpha: An optional float. Defaults to 1.0. \n + + * @par Outputs: * one output including: - * @li y:A Tensor of the same type as x + * y:A Tensor of the same type as x * */ REG_OP(ThresholdedRelu) @@ -829,14 +832,14 @@ REG_OP(ThresholdedRelu) * @par Inputs: * One inputs, including: -* @li input_x: A tensor. Must be one of the following types: +* input_x: A tensor. Must be one of the following types: * float16, float32. \n * @par Attributes: -* @li lambd: An optional float. Defaults to 0.5. \n +* lambd: An optional float. Defaults to 0.5. \n * @par Outputs: -* y: A Tensor with the same dtype and shape of input_x's. \n +* output_y: A Tensor with the same dtype and shape of input_x's. \n * @par Third-party framework compatibility * Compatible with the Pytorch operator Hardshrink. \n @@ -863,7 +866,7 @@ REG_OP(HardShrink) *backprops: A Tensor with the same type and shape of features's. \n * *@par Attributes: -*@li lambd: An optional float.Defaults to 0.5. \n +*lambd: An optional float.Defaults to 0.5. \n * *@par Third-party framework compatibility *Compatible with the Pytorch operator Hardshrink_backward. \n @@ -880,7 +883,7 @@ REG_OP(HardShrink) * @par Inputs: * One inputs, including: -* @li input_x: A tensor. Must be one of the following types: +* input_x: A tensor. Must be one of the following types: * float16, float32, int32. \n * @par Attributes: @@ -905,11 +908,11 @@ REG_OP(HardSigmoid) * @par Inputs: * One inputs, including: -* @li input_x: A tensor. Must be one of the following types: +* input_x: A tensor. Must be one of the following types: * float16, float32. \n * @par Attributes: -* @li lambd: An optional float. Defaults to 0.5. \n +* lambd: An optional float. Defaults to 0.5. \n * @par Outputs: * y: A Tensor with the same dtype and shape of input_x's. \n @@ -933,7 +936,7 @@ REG_OP(SoftShrink) * @li input_x: A tensor of the same dtype as "input_grad". \n * @par Attributes: -* @li lambd: An optional float. Defaults to 0.5. \n +* lambd: An optional float. Defaults to 0.5. \n * @par Outputs: * y: A Tensor of the same dtype and shape as "input_graxd". \n @@ -976,12 +979,12 @@ REG_OP(LogSigmoidGrad) *@par Inputs: *One inputs, including: -* @li x: A tensor. Must be one of the following types: +* x: A tensor. Must be one of the following types: * float16, float32. \n *@par Outputs: *One outputs, including: -* @li y: A tensor with the same type and shape of x's. \n +* y: A tensor with the same type and shape of x's. \n *@par Third-party framework compatibility *Compatible with the Pytorch operator LogSigmoid. \n @@ -1003,7 +1006,7 @@ REG_OP(LogSigmoid) *@par Outputs: *One outputs, including: -* @li y: A tensor with the same type and shape of x's. \n +* y: A tensor with the same type and shape of x's. \n * @par Attributes: * @li alpha: An optional float. Defaults to 0.16666666. \n diff --git a/third_party/fwkacllib/inc/ops/pad_ops.h b/third_party/fwkacllib/inc/ops/pad_ops.h index 6854c866..9d0e7a62 100644 --- a/third_party/fwkacllib/inc/ops/pad_ops.h +++ b/third_party/fwkacllib/inc/ops/pad_ops.h @@ -33,8 +33,8 @@ namespace ge { *@li value: A 0D scalar. Specifies the value to fill the returned tensor. * Must be one of the following types: -* float16, float32, double, int32, uint8, int16, int8, complex64, int64, -* qint8, quint8, qint32, uint16, complex128, uint32, uint64. +* float16, float32, double, int32, uint8, int16, int8, complex64, int64, bool, +* qint8, quint8, qint32, qint16, quint16, uint16, complex128, uint32, uint64, . * *@par Outputs: * y: A tensor. Has the same type as "value". @@ -46,8 +46,14 @@ namespace ge { */ REG_OP(Fill) .INPUT(dims, TensorType::IndexNumberType()) - .INPUT(value, TensorType::BasicType()) - .OUTPUT(y, TensorType::BasicType()) + .INPUT(value, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_UINT8, DT_INT16, + DT_INT8, DT_COMPLEX64, DT_INT64, DT_BOOL, DT_QINT8, + DT_QUINT8, DT_QINT32, DT_QINT16, DT_QUINT16, DT_UINT16, + DT_COMPLEX128, DT_FLOAT16, DT_UINT32, DT_UINT64})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_UINT8, DT_INT16, + DT_INT8, DT_COMPLEX64, DT_INT64, DT_BOOL, DT_QINT8, + DT_QUINT8, DT_QINT32, DT_QINT16, DT_QUINT16, DT_UINT16, + DT_COMPLEX128, DT_FLOAT16, DT_UINT32, DT_UINT64})) .OP_END_FACTORY_REG(Fill) /** @@ -213,11 +219,11 @@ REG_OP(PadV2) *@brief Pads a tensor . \n *@par Inputs: -*x: A Tensor. Must be one of the following types: float16, float32, int32 . \n -*constant_values: A Tensor. Must have the same type as input. +*@li x: A Tensor. Must be one of the following types: float16, float32, int32 . \n +*@li constant_values: A Tensor. Must have the same type as input. *@par Attributes: -*paddings: An optional "vector>". Defaults to "{}". +*paddings: A required Attribute. * For each dimension D of input, paddings[D, 0] indicates how many * values to add before the contents of tensor in that dimension, * and paddings[D, 1] indicates how many values to add after the @@ -461,7 +467,7 @@ REG_OP(FillV2) * @li dims: An required listInt to specify the shape that the value to fill. * @par Outputs: -* @li y: A Tensor. Has the shape specify by attr shape, and full of the value specify by attr value. +* y: A Tensor. Has the shape specify by attr shape, and full of the value specify by attr value. * @par Third-party framework compatibility * Compatible with the ONNX operator ConstantOfShape. diff --git a/third_party/fwkacllib/inc/ops/parsing_ops.h b/third_party/fwkacllib/inc/ops/parsing_ops.h index b625180a..e578997c 100644 --- a/third_party/fwkacllib/inc/ops/parsing_ops.h +++ b/third_party/fwkacllib/inc/ops/parsing_ops.h @@ -54,27 +54,26 @@ REG_OP(StringToNumber) /** *@brief Convert serialized tensorflow.TensorProto prototype to Tensor. *@brief Parse an Example prototype. -*@par Input: -*serialized: A Tensor of type string. -*dense_defaults: DYNAMIC INPUT Tensor type as string, float, int64. \n +*@par Inputs: +*@li serialized: A Tensor of type string. +*@li dense_defaults: DYNAMIC INPUT Tensor type as string, float, int64. \n *@par Attributes: -*num_sparse: type int num of inputs sparse_indices , sparse_values, sparse_shapes -*out_type: output type -*sparse_keys: ListString -*sparse_types: types of sparse_values -*dense_keys: ListString -*dense_shapes: output of dense_defaults shape -*dense_types: output of dense_defaults type \n +*@li num_sparse: type int num of inputs sparse_indices , sparse_values, sparse_shapes +*@li sparse_keys: ListString +*@li sparse_types: types of sparse_values +*@li dense_keys: ListString +*@li Tdense: output of dense_defaults type +*@li dense_shapes: output of dense_defaults shape \n *@par Outputs: -*sparse_indices: A Tensor of type string. -*sparse_values: Has the same type as sparse_types. -*sparse_shapes: A Tensor of type int64 -*dense_values: Has the same type as dense_defaults. +*@li sparse_indices: A Tensor of type string. +*@li sparse_values: Has the same type as sparse_types. +*@li sparse_shapes: A Tensor of type int64 +*@li dense_values: Has the same type as dense_defaults. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. -**/ +*/ REG_OP(ParseSingleExample) .INPUT(serialized, TensorType({DT_STRING})) .DYNAMIC_INPUT(dense_defaults, TensorType({DT_STRING,DT_FLOAT,DT_INT64})) @@ -92,16 +91,16 @@ REG_OP(ParseSingleExample) /** *@brief Decodes raw file into tensor . \n -*@par Input: +*@par Inputs: *bytes: A Tensor of type string. *@par Attributes: -*little_endian: bool ture -*out_type: output type +*@li little_endian: bool ture +*@li out_type: output type *@par Outputs: *Output: A Tensor -**/ +*/ REG_OP(DecodeRaw) .INPUT(bytes, TensorType({DT_STRING})) .OUTPUT(output, TensorType({DT_BOOL,DT_FLOAT16,DT_DOUBLE,DT_FLOAT, @@ -147,18 +146,20 @@ REG_OP(ParseTensor) *@par Inputs: *Inputs include: -*records: Each string is a record/row in the csv and all records should have the +*@li records: Each string is a record/row in the csv and all records should have the *same format. \n -*record_defaults: One tensor per column of the input record, with either a +*@li record_defaults: One tensor per column of the input record, with either a *scalar default value for that column or an empty vector if the column is *required. \n *@par Attributes: -*OUT_TYPE: The numeric type to interpret each string in string_tensor as . \n -*field_delim: char delimiter to separate fields in a record. \n -*use_quote_delim: If false, treats double quotation marks as regular characters +*@li OUT_TYPE: The numeric type to interpret each string in string_tensor as . \n +*@li field_delim: char delimiter to separate fields in a record. \n +*@li use_quote_delim: If false, treats double quotation marks as regular characters *inside of the string fields (ignoring RFC 4180, Section 2, Bullet 5). \n -*na_value: Additional string to recognize as NA/NaN. \n +*@li na_value: Additional string to recognize as NA/NaN. \n +*@li select_cols: Optional sorted list of column indices to select. If specified, +only this subset of columns will be parsed and returned. *@par Outputs: *output: A Tensor. Has the same type as x . \n @@ -186,25 +187,25 @@ REG_OP(DecodeCSV) /** *@brief Convert serialized tensorflow.TensorProto prototype to Tensor. *@brief Parse an Example prototype. -*@par Input: -*serialized: A Tensor of type string. \n -*name:A Tensor of type string. \n -*sparse_keys: Dynamic input tensor of string. \n -*dense_keys: Dynamic input tensor of string \n -*dense_defaults: Dynamic input tensor type as string, float, int64. \n +*@par Inputs: +*@li serialized: A Tensor of type string. \n +*@li name:A Tensor of type string. \n +*@li sparse_keys: Dynamic input tensor of string. \n +*@li dense_keys: Dynamic input tensor of string \n +*@li dense_defaults: Dynamic input tensor type as string, float, int64. \n *@par Attributes: -*Nsparse: Number of sparse_keys, sparse_indices and sparse_shapes \n -*Ndense: Number of dense_keys \n -*sparse_types: types of sparse_values \n -*Tdense: Type of dense_defaults dense_defaults and dense_values \n -*dense_shapes: output of dense_defaults shape \n +*@li Nsparse: Number of sparse_keys, sparse_indices and sparse_shapes \n +*@li Ndense: Number of dense_keys \n +*@li sparse_types: types of sparse_values \n +*@li Tdense: Type of dense_defaults dense_defaults and dense_values \n +*@li dense_shapes: output of dense_defaults shape \n *@par Outputs: -*sparse_indices: A Tensor of type string. \n -*sparse_values: Has the same type as sparse_types. \n -*sparse_shapes: A Tensor of type int64 \n -*dense_values: Has the same type as dense_defaults. \n +*@li sparse_indices: A Tensor of type string. \n +*@li sparse_values: Has the same type as sparse_types. \n +*@li sparse_shapes: A Tensor of type int64 \n +*@li dense_values: Has the same type as dense_defaults. \n *@par Third-party framework compatibility \n *@li compatible with tensorflow StringToNumber operator. \n */ @@ -228,37 +229,37 @@ REG_OP(ParseExample) /** *@brief Transforms a scalar brain.SequenceExample proto (as strings) into typed *tensors. -*@par Input: -*serialized: A Tensor of type string. \n -*feature_list_dense_missing_assumed_empty:A Tensor of type string. \n -*context_sparse_keys: Dynamic input tensor of string. \n -*context_dense_keys: Dynamic input tensor of string \n -*feature_list_sparse_keys: Dynamic input tensor of string \n -*feature_list_dense_keys: Dynamic input tensor of string \n -*context_dense_defaults: Dynamic input tensor of string, float, int64 \n -*debug_name: A Tensor of type string. \n +*@par Inputs: +*@li serialized: A Tensor of type string. \n +*@li feature_list_dense_missing_assumed_empty:A Tensor of type string. \n +*@li context_sparse_keys: Dynamic input tensor of string. \n +*@li context_dense_keys: Dynamic input tensor of string \n +*@li feature_list_sparse_keys: Dynamic input tensor of string \n +*@li feature_list_dense_keys: Dynamic input tensor of string \n +*@li context_dense_defaults: Dynamic input tensor of string, float, int64 \n +*@li debug_name: A Tensor of type string. \n *@par Attributes: -*Ncontext_sparse: Number of context_sparse_keys, context_sparse_indices and context_sparse_shapes \n -*Ncontext_dense: Number of context_dense_keys \n -*Nfeature_list_sparse: Number of feature_list_sparse_keys \n -*Nfeature_list_dense: Number of feature_list_dense_keys \n -*context_sparse_types: Types of context_sparse_values \n -*Tcontext_dense: Number of dense_keys \n -*feature_list_dense_types: Types of feature_list_dense_values \n -*context_dense_shapes: Shape of context_dense \n -*feature_list_sparse_types: Type of feature_list_sparse_values \n -*feature_list_dense_shapes: Shape of feature_list_dense \n +*@li Ncontext_sparse: Number of context_sparse_keys, context_sparse_indices and context_sparse_shapes \n +*@li Ncontext_dense: Number of context_dense_keys \n +*@li Nfeature_list_sparse: Number of feature_list_sparse_keys \n +*@li Nfeature_list_dense: Number of feature_list_dense_keys \n +*@li context_sparse_types: Types of context_sparse_values \n +*@li Tcontext_dense: Number of dense_keys \n +*@li feature_list_dense_types: Types of feature_list_dense_values \n +*@li context_dense_shapes: Shape of context_dense \n +*@li feature_list_sparse_types: Type of feature_list_sparse_values \n +*@li feature_list_dense_shapes: Shape of feature_list_dense \n *@par Outputs: -*context_sparse_indices: Dynamic output tensor of type int64. \n -*context_sparse_values: Dynamic output tensor of type string, float, int64. \n -*context_sparse_shapes: Dynamic output tensor of type int64 \n -*context_dense_values: Dynamic output tensor of type string, float, int64. \n -*feature_list_sparse_indices: Dynamic output tensor of type int64. \n -*feature_list_sparse_values: Dynamic output tensor of type string, float, int64. \n -*feature_list_sparse_shapes: Dynamic output tensor of type int64 \n -*feature_list_dense_values: Dynamic output tensor of type string, float, int64. \n +*@li context_sparse_indices: Dynamic output tensor of type int64. \n +*@li context_sparse_values: Dynamic output tensor of type string, float, int64. \n +*@li context_sparse_shapes: Dynamic output tensor of type int64 \n +*@li context_dense_values: Dynamic output tensor of type string, float, int64. \n +*@li feature_list_sparse_indices: Dynamic output tensor of type int64. \n +*@li feature_list_sparse_values: Dynamic output tensor of type string, float, int64. \n +*@li feature_list_sparse_shapes: Dynamic output tensor of type int64 \n +*@li feature_list_dense_values: Dynamic output tensor of type string, float, int64. \n *@par Third-party framework compatibility \n *@li compatible with tensorflow StringToNumber operator. \n */ diff --git a/third_party/fwkacllib/inc/ops/quantize_ops.h b/third_party/fwkacllib/inc/ops/quantize_ops.h index 69d5e67e..0636833c 100644 --- a/third_party/fwkacllib/inc/ops/quantize_ops.h +++ b/third_party/fwkacllib/inc/ops/quantize_ops.h @@ -63,10 +63,11 @@ REG_OP(Dequantize) /** *@brief Quantizes the input . \n *@par Inputs: -*x: shape and dtype of input_x. \n -*scales: shape and dtype of input_scales. \n -*zero_points: shape and dtype of input_zero_points \n +*@li x: shape and dtype of input_x. \n +*@li scales: shape and dtype of input_scales. \n +*@li zero_points: shape and dtype of input_zero_points \n *@par Attributes: +*@li dtype: required, type. *@li axis: the processed dim. \n *@par Outputs: *y: shape and dtype of output_y, should be same shape as input, dtype is same as the quantified type . \n @@ -91,7 +92,8 @@ REG_OP(Quantize) *@li offset: A required float16, specifying the offset. *@li sqrt_mode: A optional bool, specifying whether to perform square root on "scale", either "True" or "False". Defaults to "False". *@li round_mode: An optional string, specifying the float16 to int8 cast type. -* The value range is [Round, Floor, Ceiling, Truncate]. Defaults to "Round" . \n +* The value range is [Round, Floor, Ceil, Truncate]. Defaults to "Round" . +*@li dst_type: A optional int32, specifying the output data type. Defaults to "DT_INT8" . \n *@par Outputs: *y: The quantized output tensor of type int8 and with format NC1HWC0 . \n diff --git a/third_party/fwkacllib/inc/ops/ragged_array_ops.h b/third_party/fwkacllib/inc/ops/ragged_array_ops.h index 20484623..5af2dd74 100644 --- a/third_party/fwkacllib/inc/ops/ragged_array_ops.h +++ b/third_party/fwkacllib/inc/ops/ragged_array_ops.h @@ -37,13 +37,18 @@ namespace ge { *deprecated name. *@li indices: Indices in the outermost dimension of `params` of the values that should be *gathered. + +*@par Attributes: +*@li PARAMS_RAGGED_RANK:The ragged rank of the params_nested_splits. +*@li Tsplits:A type of output_nested_splits. *@li OUTPUT_RAGGED_RANK: The ragged rank of the output RaggedTensor. `output_nested_splits` will contain *this number of `row_splits` tensors. This value should equal *`indices.shape.ndims + params.ragged_rank - 1` . \n *@par Outputs: -*y:A Returns The `nested_row_splits` tensors that define the row-partitioning for the -*returned RaggedTensor.The `flat_values` for the returned RaggedTensor . \n +*@li output_nested_splits:A Returns The `nested_row_splits` tensors that define the row-partitioning for the +*returned RaggedTensor.The `flat_values` for the returned RaggedTensor . +*@li output_dense_values:The `flat_values` for the returned RaggedTensor. \n *@par Third-party framework compatibility * Compatible with tensorflow RaggedGather operator. diff --git a/third_party/fwkacllib/inc/ops/ragged_conversion_ops.h b/third_party/fwkacllib/inc/ops/ragged_conversion_ops.h index 020e3da4..ceaa64e4 100644 --- a/third_party/fwkacllib/inc/ops/ragged_conversion_ops.h +++ b/third_party/fwkacllib/inc/ops/ragged_conversion_ops.h @@ -61,7 +61,6 @@ REG_OP(RaggedTensorToSparse) *@brief Create a dense tensor from a ragged tensor, possibly altering its shape . \n *@par Inputs: -*Six inputs, including: *@li shape:A `Tensor`. Must be one of the following types: `int64`, `int32`. *@li values:A 1D tensor representing the values of the ragged tensor. *@li default_value:A `Tensor`. Must have the same type as `values`. @@ -78,7 +77,7 @@ The types of the row partition tensors. At present, these can be: is preceeded by "FIRST_DIM_SIZE" . \n *@par Outputs: -*@li result: A `Tensor`. Has the same type as `values`. +*result: A `Tensor`. Has the same type as `values`. */ REG_OP(RaggedTensorToTensor) .INPUT(shape, TensorType({DT_INT32, DT_INT64})) diff --git a/third_party/fwkacllib/inc/ops/ragged_math_ops.h b/third_party/fwkacllib/inc/ops/ragged_math_ops.h index 258b0ca1..4376437f 100644 --- a/third_party/fwkacllib/inc/ops/ragged_math_ops.h +++ b/third_party/fwkacllib/inc/ops/ragged_math_ops.h @@ -35,7 +35,11 @@ namespace ge { *@li deltas: The deltas of each range . \n *@par Outputs: -*y:A Returns The `row_splits` for the returned `RaggedTensor`.The `flat_values` for the returned `RaggedTensor` . \n +*@li rt_dense_values:The `flat_values` for the returned `RaggedTensor`. +*@li rt_nested_splits:The `row_splits` for the returned `RaggedTensor`. \n + +*@par Attributes: +*Tsplits:A type of rt_nested_splits. *@attention Constraints: *The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors. diff --git a/third_party/fwkacllib/inc/ops/random_ops.h b/third_party/fwkacllib/inc/ops/random_ops.h index b65a68f1..66f9b65f 100644 --- a/third_party/fwkacllib/inc/ops/random_ops.h +++ b/third_party/fwkacllib/inc/ops/random_ops.h @@ -147,6 +147,32 @@ REG_OP(RandomGamma) .ATTR(seed2, Int, 0) .OP_END_FACTORY_REG(RandomGamma) +/** +*@brief Returns the random permutation of integers from 0 to n-1. \n + +*@par Attributes: +*@li n: An required int. +*@li dtype: An optional str. Defaults to int64 . +*@li layout: An optional int. Defaults to 0 . \n + +*@par Outputs: +*out: A required Tensor. Must be one of the following types: + float16, float32, float32, int8, uint8, int16, int32, int64. \n + +*@attention Constraints: +*The implementation for Randperm on Ascend uses AICPU, with bad performance. + +*@par Third-party framework compatibility +*@li compatible with Pytorch Randperm operator. +*/ +REG_OP(Randperm) + .OUTPUT(out, TensorType({DT_INT64, DT_INT32, DT_INT16, + DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT32, DT_DOUBLE})) + .REQUIRED_ATTR(n, Int) + .ATTR(layout, Int, 0) + .ATTR(dtype, Type, DT_INT64) + .OP_END_FACTORY_REG(Randperm) + /** *@brief Outputs random values from the Poisson distribution(s) described by rate . \n @@ -157,11 +183,12 @@ REG_OP(RandomGamma) *@par Attributes: *@li dtype: An optional type from: half, float32, float64, int32, int64. Defaults to int64. -*@li seed: An optional int. Defaults to 0. -*@li seed2: An optional int. Defaults to 0 . \n +*@li seed: An optional int. Defaults to 0. If either seed or seed2 are set to be non-zero, +the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. +*@li seed2: An optional int. Defaults to 0 . A second seed to avoid seed collision. \n *@par Outputs: -*y: A Tensor of type dtype . \n +*y: A Tensor of type dtype float16, float, double, int32, int64. \n *@attention Constraints: *The implementation for RandomPoisson on Ascend uses AICPU, with bad performance. @@ -188,11 +215,13 @@ REG_OP(RandomPoisson) *x: A Tensor. The tensor to be shuffled . \n *@par Attributes: -*@li seed: An optional int. Defaults to 0. -*@li seed2: An optional int. Defaults to 0 . \n +*@li seed: An optional int. Defaults to 0. If either seed or seed2 are set to be non-zero, +the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. +*@li seed2: An optional int. Defaults to 0 . A second seed to avoid seed collision. \n *@par Outputs: -*y: A Tensor. Has the same type as x . \n +*y: A Tensor. Has the same type as x . A Tensor of type float16, float, +*double, int32, int64, int16, uint16, int8, uint8, int32,int64. \n *@attention Constraints: *The implementation for RandomShuffle on Ascend uses AICPU, with bad performance. @@ -220,11 +249,12 @@ REG_OP(RandomShuffle) *@par Attributes: *@li dtype: A type from: half, float16, float32, float64. The type of the output. -*@li seed: An optional int. Defaults to 0. -*@li seed2: An optional int. Defaults to 0 . \n +*@li seed: An optional int. Defaults to 0. If either seed or seed2 are set to be non-zero, +the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. +*@li seed2: An optional int. Defaults to 0 . A second seed to avoid seed collision. \n *@par Outputs: -*y: A Tensor of type dtype . \n +*y: A Tensor of type float32, float16, double. \n *@attention Constraints: *The implementation for RandomStandardNormal on Ascend uses AICPU, with bad performance. @@ -240,6 +270,28 @@ REG_OP(RandomStandardNormal) .ATTR(seed2, Int, 0) .OP_END_FACTORY_REG(RandomStandardNormal) +/** +*@brief Output random value from separate normal distribution. \n + +*@par Inputs: +*Inputs include: +*mean: The mean is a tensor with the mean of each output element’s normal distribution . +*std: The std is a tensor with the standard deviation of each output element’s normal distribution. \n +*@par Outputs: +*y: A Tensor of type dtype . \n + +*@attention Constraints: +*The implementation for Normal on Ascend uses AICPU, with bad performance. + +*@par Third-party framework compatibility +*@li compatible with Pytorch Normal operator. +*/ +REG_OP(Normal) + .INPUT(mean, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .INPUT(std, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OP_END_FACTORY_REG(Normal) + /** *@brief Outputs random integers from a uniform distribution . \n @@ -250,8 +302,9 @@ REG_OP(RandomStandardNormal) * @li max: A Tensor. Must have the same type as minval. 0-D . \n *@par Attributes: -*@li seed: An optional int. Defaults to 0. -*@li seed2: An optional int. Defaults to 0 . \n +*@li seed: An optional int. Defaults to 0. If either seed or seed2 are set to be non-zero, +the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. +*@li seed2: An optional int. Defaults to 0 . A second seed to avoid seed collision. \n *@par Outputs: *y: A Tensor. Has the same type as min . \n @@ -280,8 +333,9 @@ REG_OP(RandomUniformInt) *@par Attributes: *@li dtype: A type from: half, float16, float32, float64. The type of the output. -*@li seed: An optional int. Defaults to 0. -*@li seed2: An optional int. Defaults to 0 . \n +*@li seed: An optional int. Defaults to 0. If either seed or seed2 are set to be non-zero, +the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed. +*@li seed2: An optional int. Defaults to 0 . A second seed to avoid seed collision. \n *@par Outputs: *y: A Tensor of type dtype . \n @@ -308,11 +362,14 @@ REG_OP(RandomUniform) *shape: A Tensor. Must be one of the following types: int32, int64 . \n *@par Attributes: -*@li seed: An optional int. Defaults to 0. -*@li seed2: An optional int. Defaults to 0 . \n +*@li seed: An optional int. Defaults to 0.If either `seed` or `seed2` +are set to be non-zero, the random number generator is seeded by the given +seed. Otherwise, it is seeded by a random seed. +*@li seed2: An optional int. Defaults to 0 . A second seed to avoid seed collision. \n *@par Outputs: -*size: A Tensor of types: float16, float32, double . \n +*y: A Tensor of types: float16, float32, double . A tensor of the specified shape +filled with random truncated normal values. \n *@attention Constraints: *The implementation for TruncatedNormal on Ascend uses AICPU, with bad performance. @@ -505,15 +562,15 @@ REG_OP(RandomChoiceWithMask) *@par Inputs: *Inputs including: -* @li x: A required Tensor. Must be one of the following types: - float16, float32, int8, uint8, int16, uint16, int32, uint32, int64, uint64 . \n +* x: A required Tensor. Must be one of the following types: + float16, float32, int8, uint8, int16, uint16, int32, uint32, int64, uint64 . \n *@par Attributes: -*@li group: A required int32, specifying the number of groups to split the channel dimension into. Defaults to "1" . \n +* group: A required int32, specifying the number of groups to split the channel dimension into. Defaults to "1" . \n *@par Outputs: -*y: A required Tensor. Has same type and shape as "x". Must be one of the following types: - float16, float32, int8, uint8, int16, uint16, int32, uint32, int64, uint64 . \n +* y: A required Tensor. Has same type and shape as "x". Must be one of the following types: + float16, float32, int8, uint8, int16, uint16, int32, uint32, int64, uint64 . \n *@attention Constraints: *@li "group" must be greater than 0 and must evenly divide the channel dimension size. @@ -584,6 +641,50 @@ REG_OP(DropoutV2) .OUTPUT(seed, TensorType({ DT_FLOAT })) .REQUIRED_ATTR(p, Float) .OP_END_FACTORY_REG(DropoutV2) + +/** +* @brief The Bernoulli distribution with probability . \n + +* @par Inputs: +* @li x: A ND Tensor. Must be one of the following data types: + int8, uint8, int16, int32, int64, bool, float32, float64 . +* @li p: A ND Tensor. The probability of an element to be zeroed. + Must be one of the following data types: float32, float64. \n + +* @par Attributes: +* seed: An Integer, the seed of the random generator. Default value -1 + to use current timestamp, otherwise it should be a positive integer. + +* @par Outputs: +* y: A tensor with the same shape and type as "x". +*/ + +REG_OP(Bernoulli) + .INPUT(x, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE})) + .INPUT(p, TensorType({ DT_FLOAT, DT_DOUBLE })) + .OUTPUT(y, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE})) + .ATTR(seed, Int, -1) + .OP_END_FACTORY_REG(Bernoulli) + +/** + * @brief: Fill the input tensor with values drawn from the uniform distribution U(from, to). \n + + * @par Inputs: + * x: A Tensor. Must be one of the following types: float16, float, double. \n + + * @par Attributes: + * @li from: The lower bound of the uniform. Defaults: 0.0 + * @li to: The upper bound of the uniform. Defaults: 1.0 \n + + * @par Outputs: + * y: A Tensor has the same type as x. \n + */ +REG_OP(Uniform) + .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE})) + .ATTR(from, Float, 0.0) + .ATTR(to, Float, 1.0) + .OP_END_FACTORY_REG(Uniform) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_RANDOM_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/reduce_ops.h b/third_party/fwkacllib/inc/ops/reduce_ops.h index 97c7b8e1..1578ba59 100644 --- a/third_party/fwkacllib/inc/ops/reduce_ops.h +++ b/third_party/fwkacllib/inc/ops/reduce_ops.h @@ -576,7 +576,7 @@ REG_OP(ReduceAll) *@li axis: A mutable Tensor. The dimensions to reduce . \n *@par Attributes: -*@li keep_dims: A bool. If true, retains reduced dimensions with length 1. Defaults to "False" . \n +*keep_dims: A bool. If true, retains reduced dimensions with length 1. Defaults to "False" . \n *@par Outputs: *y: A Tensor. Has the same type and format as input "x" . \n @@ -967,9 +967,9 @@ REG_OP(EuclideanNormD) Defaults to "0.00001" . \n *@par Outputs: -*y: A Tensor of type float16 or float32 for the normalized "x". -*batch_mean: A Tensor of type float32 for the result mean. -*batch_ variance: A Tensor of type float32 for the result variance . \n +*@li y: A Tensor of type float16 or float32 for the normalized "x". +*@li batch_mean: A Tensor of type float32 for the result mean. +*@li batch_ variance: A Tensor of type float32 for the result variance . \n *@attention Constraints: *For Ascend 310, the result accuracy fails to reach 0.001 due to the square root instruction. @@ -987,7 +987,7 @@ REG_OP(INInferV2) .OP_END_FACTORY_REG(INInferV2) /** -*@brief Performs reduced instance normalization . \n +*@brief Performs reduce instance normalization. \n *@par Inputs: *x: A Tensor of type float16 or float32. \n @@ -1008,32 +1008,31 @@ REG_OP(INTrainingReduceV2) /** -*@brief Performs update instance normalization . \n +*@brief Performs update instance normalization. \n *@par Inputs: -* Seven inputs, including: (NC1HWC0supported) +* Seven inputs, including: *@li x: A Tensor of type float16 or float32. *@li sum: A Tensor of type float32 for the output of operator INTrainingReduceV2. *@li square_sum: A Tensor of type float32 for the output of operator INTrainingReduceV2. *@li gamma: A Tensor of type float32, for the scaling gamma. *@li beta: A Tensor of type float32, for the scaling beta. *@li mean: A Tensor of type float32, for the updated mean. -*@li variance: A Tensor of type float32, for the updated variance . \n +*@li variance: A Tensor of type float32, for the updated variance. \n *@par Attributes: *@li momentum: A required float32, specifying the momentum to update mean and var. -*@li epsilon: A required float32, specifying the small value added to variance to avoid dividing by zero . \n +*@li epsilon: A required float32, specifying the small value added to variance to avoid dividing by zero. \n *@par Outputs: * Three outputs *@li y: A Tensor of type float16 or float32, for normalized "x". *@li batch_mean: A Tensor of type float32, for the updated mean. -*@li batch_variance: A Tensor of type float32, for the updated variance . \n +*@li batch_variance: A Tensor of type float32, for the updated variance. \n *@attention Constraints: -*@li This operator is a InstanceNorm fusion operator for updating the moving averages for training. +* This operator is a InstanceNorm fusion operator for updating the moving averages for training. * This operator is used in conjunction with INTrainingReduceV2. -*@li For Ascend 310, the result accuracy fails to reach 1‰ due to the square root instruction. */ REG_OP(INTrainingUpdateV2) .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) @@ -1051,6 +1050,80 @@ REG_OP(INTrainingUpdateV2) .OP_END_FACTORY_REG(INTrainingUpdateV2) +/** +*@brief Performs the backpropagation of InstanceNorm. \n + +*@par Inputs: +* Seven inputs, including: +*@li dy: A Tensor of type float16 or float32. +*@li x: A Tensor of type float16 or float32. +*@li variance: A Tensor of type float32, for the variance of "x". +*@li mean: A Tensor of type float32, for the mean of "x". +*@li res_gamma: A Tensor of type float32. +*@li res_beta: A Tensor of type float32. +*@li gamma: A Tensor of type float32. \n + +*@par Outputs: +*pd_x: A Tensor of type float16 or float32, for the offset of "x". \n + +*@attention Constraints: +* The preceding layer of this operator must be INTrainingUpdateGrad. \n +*/ +REG_OP(INTrainingReduceGrad) + .INPUT(dy, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(variance, TensorType({DT_FLOAT})) + .INPUT(mean, TensorType({DT_FLOAT})) + .INPUT(res_gamma, TensorType({DT_FLOAT})) + .INPUT(res_beta, TensorType({DT_FLOAT})) + .INPUT(gamma, TensorType({DT_FLOAT})) + .OUTPUT(pd_x, TensorType({DT_FLOAT16,DT_FLOAT})) + .OP_END_FACTORY_REG(INTrainingReduceGrad) + +/** +*@brief Performs the backpropagation of InstanceNorm. \n + +*@par Inputs: +* Four inputs, including: +*@li dy: A Tensor of type float16 or float32, for the gradient. +*@li x: A Tensor of type float16 or float32. +*@li variance: A Tensor of type float32, for the variance of "x". +*@li mean: A Tensor of type float32, for the mean of "x". \n + +*@par Outputs: +*@li res_gamma: A Tensor of type float32. +*@li res_beta: A Tensor of type float32. \n + +*/ +REG_OP(INTrainingUpdateGrad) + .INPUT(dy, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT})) + .INPUT(variance, TensorType({DT_FLOAT})) + .INPUT(mean, TensorType({DT_FLOAT})) + .OUTPUT(res_gamma, TensorType({DT_FLOAT})) + .OUTPUT(res_beta, TensorType({DT_FLOAT})) + .OP_END_FACTORY_REG(INTrainingUpdateGrad) + +/** +*@brief Performs the backpropagation of InstanceNorm. \n + +*@par Inputs: +* Two inputs, including: +*@li res_gamma: A Tensor of type float32. +*@li res_beta: A Tensor of type float32. \n + +*@par Outputs: +*@li pd_gamma: A Tensor of type float32. +*@li pd_beta: A Tensor of type float32. \n + +*/ +REG_OP(INTrainingUpdateGradGammaBeta) + .INPUT(res_gamma, TensorType({DT_FLOAT})) + .INPUT(res_beta, TensorType({DT_FLOAT})) + .OUTPUT(pd_gamma, TensorType({DT_FLOAT})) + .OUTPUT(pd_beta, TensorType({DT_FLOAT})) + .OP_END_FACTORY_REG(INTrainingUpdateGradGammaBeta) + /** *@brief Performs reduced group normalization . \n @@ -1063,7 +1136,7 @@ REG_OP(INTrainingUpdateV2) *@par Attributes: -*@li num_groups: Int, specifying the num of groups. required, same to GNTrainingUpdate . \n +*num_groups: Int, specifying the num of groups. required, same to GNTrainingUpdate . \n *@attention Constraints: * This operator is a GroupNorm fusion operator for updating the moving averages for training. @@ -1081,7 +1154,7 @@ REG_OP(GNTrainingReduce) *@brief Performs update group normalization . \n *@par Inputs: -* Eight inputs, including: (NCHW NHWC supported) +* Seven inputs, including: (NCHW NHWC supported) *@li x: A Tensor of type float16 or float32. *@li sum: A 5D Tensor of type float32, shape is [N, G, 1, 1, 1] for NCHW, [N, 1, 1, G, 1] for NHWC @@ -1145,8 +1218,8 @@ include: *@li keep_dims:A bool, An optional bool. Defaults to False. If True, retain reduced dimensions with length 1.. *@li separator:string. -*@par output: -*@li output::A Tensor of type string.. +*@par Outputs: +*output:A Tensor of type string. */ REG_OP(ReduceJoin) .INPUT(input, TensorType({DT_STRING})) @@ -1160,7 +1233,7 @@ REG_OP(ReduceJoin) * @brief Calculates the standard deviation and average value of Tensors. * @par Inputs: -* @li x: A Tensor. Must be one of the following types: +* x: A Tensor. Must be one of the following types: * float16, float32. \n * @par Attributes: diff --git a/third_party/fwkacllib/inc/ops/resource_variable_ops.h b/third_party/fwkacllib/inc/ops/resource_variable_ops.h index 74ac83f8..156f2f34 100644 --- a/third_party/fwkacllib/inc/ops/resource_variable_ops.h +++ b/third_party/fwkacllib/inc/ops/resource_variable_ops.h @@ -33,10 +33,12 @@ namespace ge { *y:A Tensor of type resource. \n *@par Attributes: -* @li container: optional, string. -* @li shared_name: optional, string. -* @li dtype: required, type. -* @li shape: optional, ListInt. \n +* @li container: optional, string. the container this +variable is placed in. +* @li shared_name: optional, string.the name by which + this variable is referred to. +* @li dtype: required, type. the output of type. +* @li shape: optional, ListInt. the output of shape. \n *@see VarHandleOp. */ @@ -53,11 +55,11 @@ REG_OP(VarHandleOp) *@brief Assigns a new value to a variable. \n *@par Inputs: -*resource:Handle to the resource in which to store the variable. -*value:The value to set the new tensor to use. \n +*@li resource:Handle to the resource in which to store the variable. +*@li value:The value to set the new tensor to use. \n *@par Attributes: -* @li dtype: required, type. \n +* dtype: required, type. \n *@see AssignVariableOp. */ @@ -73,11 +75,11 @@ REG_OP(AssignVariableOp) *@brief Adds a value to the current value of a variable. \n *@par Inputs: -*resource:Handle to the resource in which to store the variable. -*value:The value by which the variable will be incremented. \n +*@li resource:Handle to the resource in which to store the variable. +*@li value:The value by which the variable will be incremented. \n *@par Attributes: -* @li dtype: required, type. \n +* dtype: required, type. \n *@see AssignAddVariableOp. */ @@ -93,11 +95,11 @@ REG_OP(AssignAddVariableOp) *@brief Subtracts a value to the current value of a variable. \n *@par Inputs: -*resource:Handle to the resource in which to store the variable. -*value:The value by which the variable will be incremented. \n +*@li resource:Handle to the resource in which to store the variable. +*@li value:The value by which the variable will be incremented. \n *@par Attributes: -* @li dtype: required, type. \n +* dtype: required, type. \n *@see AssignSubVariableOp. */ diff --git a/third_party/fwkacllib/inc/ops/rnn.h b/third_party/fwkacllib/inc/ops/rnn.h index 80546860..20828a89 100644 --- a/third_party/fwkacllib/inc/ops/rnn.h +++ b/third_party/fwkacllib/inc/ops/rnn.h @@ -127,9 +127,7 @@ REG_OP(DynamicLSTM) *@li cell_clip:An float identifying the cell clip in the op. Default to -1. *@li num_proj:An integer identifying the num projection in the op. Default to 0. *@li time_major:An bool identifying the time major in the op. Default to false. -*@li activation:An string identifying the type of activation function in the op. Default to "tanh". Only tanh is currently supported. *@li forget_bias:An float identifying the forget bias in the op. Default to 0. -*@li is_training:An bool identifying is training in the op. Default to true. *@par Outputs: *eight outputs: \n @@ -491,7 +489,6 @@ REG_OP(DynamicLSTMV2) *ten inputs: \n *@li w:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li init_c:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. -*@li h:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li c:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li dy:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li dh:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. @@ -504,10 +501,11 @@ REG_OP(DynamicLSTMV2) *@par Outputs: -*eight outputs: \n +*four outputs: \n *@li dx:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li dh_prev:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. *@li dc_prev:A 4D Tensor. Must be one of the following types: float16, float32. The format must be FRACTAL_NZ. +*@li dgate:A 4D Tensor. Must be one of the following types: float16. The format must be FRACTAL_NZ. */ REG_OP(LSTMInputGrad) .INPUT(w, TensorType({DT_FLOAT16, DT_FLOAT})) @@ -571,13 +569,13 @@ REG_OP(DynamicLSTMGradCell) .INPUT(f, TensorType({DT_FLOAT16, DT_FLOAT})) .INPUT(o, TensorType({DT_FLOAT16, DT_FLOAT})) .INPUT(tanhct, TensorType({DT_FLOAT16, DT_FLOAT})) - .INPUT(mask, TensorType({DT_FLOAT16, DT_FLOAT})) .INPUT(t_state, TensorType({DT_INT32, DT_INT32})) + .INPUT(mask, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(dgate, TensorType({DT_FLOAT16, DT_FLOAT})) .OUTPUT(dct_1, TensorType({DT_FLOAT16, DT_FLOAT})) - .ATTR(forget_bias, Float, 1) - .ATTR(activation, String, "") - .ATTR(direction, String, "Forward") + .ATTR(forget_bias, Float, 1.0) + .ATTR(activation, String, "tanh") + .ATTR(direction, String, "UNIDIRECTIONAL") .ATTR(gate_order, String, "ijfo") .OP_END_FACTORY_REG(DynamicLSTMGradCell) @@ -1070,7 +1068,7 @@ REG_OP(GRUV2HiddenGradCell) * If "False", "grad_weight" will not be scale by word_frequency. \n * @par Outputs: -* @li grad_weight: A mutable output Tensor of new word grad has the same type as "grads". \n +* y: A mutable output Tensor of new word grad has the same type as "grads". \n * @par Third-party framework compatibility * Compatible with the Pytorch operator EmbeddingDenseGrad. @@ -1222,7 +1220,7 @@ REG_OP(CommonGRU) * is equivalent to the size of indices. This matches the CSR format.. \n * @par Outputs: -* @li grad_weight: A mutable output Tensor of new word grad has the same type as "grads". \n +* y: A mutable output Tensor of new word grad has the same type as "grads". \n * @par Third-party framework compatibility * Compatible with the Pytorch operator EmbeddingBag. diff --git a/third_party/fwkacllib/inc/ops/rpn_ops.h b/third_party/fwkacllib/inc/ops/rpn_ops.h index 089af326..850b3e5a 100644 --- a/third_party/fwkacllib/inc/ops/rpn_ops.h +++ b/third_party/fwkacllib/inc/ops/rpn_ops.h @@ -28,12 +28,12 @@ namespace ge { * iou_threshold with higher scoring box according to their * intersection-over-union (IoU) . \n -*@par Input: -* @li box_scores: 2-D tensor with shape of [N, 8], including proposal boxes and +* @par Inputs: +* box_scores: 2-D tensor with shape of [N, 8], including proposal boxes and * corresponding confidence scores . \n * @par Attributes: -* @li iou_threshold: An optional float. The threshold for deciding whether boxes +* iou_threshold: An optional float. The threshold for deciding whether boxes * overlap too much with respect to IOU . \n * @par Outputs: diff --git a/third_party/fwkacllib/inc/ops/sdca_ops.h b/third_party/fwkacllib/inc/ops/sdca_ops.h index 34c6a268..601b360b 100644 --- a/third_party/fwkacllib/inc/ops/sdca_ops.h +++ b/third_party/fwkacllib/inc/ops/sdca_ops.h @@ -45,7 +45,13 @@ namespace ge { *corresponding weights in sparse_weights. This field maybe omitted for the dense approach.It's a dynamic input. *@li sparse_weights: a list of vectors where each value is the weight associated with a sparse feature group. *@li dense_weights: a list of vectors where the values are the weights associated with a dense feature group.It's a dynamic input. -*@li example_state_data: a list of vectors containing the example state data. +*@li example_state_data: a list of vectors containing the example state data. \n + +*@par Attributes: +*@li adaptive: the type is bool default false. +*@li num_sparse_features:The num of sparse. +*@li num_sparse_features_with_values: The num of sparse_feature_values +*@li num_dense_features:The num of dense. *@li loss_type: Type of the primal loss. Currently SdcaSolver supports logistic, squared and hinge losses. *@li l1: Symmetric l1 regularization strength. *@li l2: Symmetric l2 regularization strength. @@ -53,10 +59,10 @@ namespace ge { *@li num_inner_iterations: Number of iterations per mini-batch . \n *@par Outputs: -*y: A Returns a list of vectors containing the updated example state +*@li out_example_state_data: A Returns a list of vectors containing the updated example state *data.a list of vectors where each value is the delta -*weights associated with a sparse feature group.a list of vectors where the values are the delta -*weights associated with a dense feature group . \n +*@li out_delta_sparse_weights:weights associated with a sparse feature group.a list of vectors where the values are the delta +*@li out_delta_dense_weights:weights associated with a dense feature group . \n *@par Third-party framework compatibility * Compatible with tensorflow SdcaOptimizerV2 operator. diff --git a/third_party/fwkacllib/inc/ops/selection_ops.h b/third_party/fwkacllib/inc/ops/selection_ops.h index 1c26e033..43f72ef3 100644 --- a/third_party/fwkacllib/inc/ops/selection_ops.h +++ b/third_party/fwkacllib/inc/ops/selection_ops.h @@ -258,7 +258,7 @@ REG_OP(GatherV2D) REG_OP(GatherElements) .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT64})) - .INPUT(index, TensorType({DT_INT64})) + .INPUT(index, TensorType({DT_INT32, DT_INT64})) .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT64})) .ATTR(dim, Int, 0) .OP_END_FACTORY_REG(GatherElements) @@ -508,7 +508,7 @@ REG_OP(UnsortedSegmentSum) *@par Inputs: *One inputs, including: -* @li assist: A tensor. Must be one of the following types: +* assist: A tensor. Must be one of the following types: * float16, float32. \n * @par Attributes: @@ -970,10 +970,11 @@ REG_OP(TopKV2) * for matrices) . \n * @par Attributes: -* @li sorted: An optional bool. Defaults to true. +* @li sorted: Defaults to true. * If true, the resulting "k" elements will be sorted by the values in descending * order. -* @li T: Indicator of indices type . \n +* @li largest:If true the resulting `k` elements will be sorted by the values in descending order. +* @li dim:0-D. Number of top elements to look for along the last dimension (along each row for matrices). \n * @par Outputs: * @li values: A Tensor, specifying the sorted data. Has the same type as @@ -982,7 +983,7 @@ REG_OP(TopKV2) * @see TopK() * @par Third-party framework compatibility -* @li Compatible with the TensorFlow operator TopKV2. +* Compatible with the TensorFlow operator TopKV2. */ REG_OP(TopK) .INPUT(x, TensorType::RealNumberType()) @@ -1085,7 +1086,6 @@ REG_OP(InTopKD) * @brief Says whether the targets are in the top "k" predictions . \n * @par Inputs: -* Two inputs, including: * @li x1: A 2D Tensor of type float32. A "batch_size * classes" tensor. * @li x2: A 1D Tensor of type IndexNumberType. A batch_size tensor of class ids. * @li k: A 1D Tensor of the same type as "x2". @@ -1618,12 +1618,12 @@ REG_OP(UnsortedSegmentMinD) * y: A Tensor of type RealNumberType . \n * @attention Constraints: -* @li segment_ids must be non-negative tensor. +* segment_ids must be non-negative tensor. * @see UnsortedSegmentSum(), UnsortedSegmentProd(), * @par Third-party framework compatibility -* @li Compatible with the TensorFlow operator UnsortedSegmentMax. +* Compatible with the TensorFlow operator UnsortedSegmentMax. */ REG_OP(UnsortedSegmentMax) .INPUT(x, TensorType::RealNumberType()) @@ -1875,15 +1875,15 @@ REG_OP(Crop) *@par Inputs: *One inputs, including: -* @li x: A tensor . Must be one of the following types: +* x: A tensor . Must be one of the following types: * float16, float32, int32, uint32, int8, uint8. \n *@par Attributes: -* @li axis: Axis along which to cummin. \n +* axis: Axis along which to cummin. \n *@par Outputs: -* y: A Tensor with the same type and shape of x's. \n -* indices: A Tensor with the int32 type and the same shape of x's. \n +* @li y: A Tensor with the same type and shape of x's. +* @li indices: A Tensor with the int32 type and the same shape of x's. \n *@par Third-party framework compatibility *Compatible with the Pytorch operator Cummin. \n @@ -1968,17 +1968,14 @@ REG_OP(WriteSelect) .OP_END_FACTORY_REG(WriteSelect) /** -*@brief Read data by stride . \n +*@brief Read data by stride. *@par Inputs: -*One input: -*x: A Tensor. Must be one of the following types: float16, int8 . \n +*x: A Tensor. Must be one of the following types: float16, int8. \n *@par Attributes: -*@li axis: A required int32, specifying the index of axis to read by stride . \n - -*@par Attributes: -*@li stride: A required int32, specifying the value of reading stride . \n +*@li axis: A required int32, specifying the index of axis to read by stride. \n +*@li stride: A required int32, specifying the value of reading stride. \n *@par Outputs: *y: A Tensor of the same type as "x". @@ -1991,16 +1988,14 @@ REG_OP(StridedRead) .OP_END_FACTORY_REG(StridedRead) /** -*@brief: Write data by stride . \n +*@brief Write data by stride. *@par Inputs: -*x: A Tensor. Must be one of the following types: float16, int8 . \n - -*@par Attributes: -*@li axis: A required int32, specifying the index of axis to write by stride . \n +*x: A Tensor. Must be one of the following types: float16, int8. \n *@par Attributes: -*@li stride: A required int32, specifying the value of writing stride . \n +*@li axis: A required int32, specifying the index of axis to write by stride. \n +*@li stride: A required int32, specifying the value of writing stride. \n *@par Outputs: *y: A Tensor. Has the same type as "x". @@ -2076,10 +2071,10 @@ REG_OP(CumulativeLogsumexpD) * @li updates: A Tensor of the same type as "var". \n * @par Attributes: -* @li axis: An required int to specify the axis to perform indices add. \n +* axis: An required int to specify the axis to perform indices add. \n * @par Outputs: -* @li var: A Tensor. Same as input "var". +* var: A Tensor. Same as input "var". * @par Third-party framework compatibility * Compatible with the Pytorch operator index_add_. @@ -2104,7 +2099,7 @@ REG_OP(InplaceIndexAdd) * @li value: A Tensor of dtype float16 or float32 or int64 or int32 or int8. * @par Outputs: -* @li y: A tensor. Must be one of the following dtypes: +* y: A tensor. Must be one of the following dtypes: * float16, float32, int64, int32, int8. */ REG_OP(MaskedFill) @@ -2123,7 +2118,7 @@ REG_OP(MaskedFill) * @li mask: A Tensor of dtype is bool. \n * @par Outputs: -* @li y: A tensor with the same type as x. \n +* y: A tensor with the same type as x. \n * @par Third-party framework compatibility * Compatible with the Numpy operator select. @@ -2134,13 +2129,50 @@ REG_OP(MaskedSelectV2) .INPUT(mask, TensorType({DT_BOOL})) .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) .OP_END_FACTORY_REG(MaskedSelectV2) + +/** +* @brief Choose the value of X with value according to mask. + +* @par Inputs: +* two inputs, including: +* @li x: A Tensor of dtype is float16 or float32 or float64 or int64 or int32 or int16 or int8 or uint8. +* @li mask: A Tensor of dtype is bool. \n + +* @par Outputs: +* @li y: A tensor with the same type as x. \n + +*/ +REG_OP(MaskedSelect) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_UINT8, DT_INT8, DT_INT16, DT_INT32, DT_INT64})) + .INPUT(mask, TensorType({DT_BOOL})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_UINT8, DT_INT8, DT_INT16, DT_INT32, DT_INT64})) + .OP_END_FACTORY_REG(MaskedSelect) + +/** +* @brief update the value of X with value according to mask. + +* @par Inputs: +* three inputs, including: +* @li x: A Tensor of dtype is float16 or float32 or float64 or int64 or int32 or int16 or int8 or uint8. +* @li mask: A Tensor of dtype is bool. +* @li updates: A tensor with the same type as x. \n + +* @par Outputs: +* @li y: A tensor with the same type as x. \n +*/ +REG_OP(MaskedScatter) + .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_UINT8, DT_INT8, DT_INT16, DT_INT32, DT_INT64})) + .INPUT(mask, TensorType({DT_BOOL})) + .INPUT(updates, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_UINT8, DT_INT8, DT_INT16, DT_INT32, DT_INT64})) + .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_UINT8, DT_INT8, DT_INT16, DT_INT32, DT_INT64})) + .OP_END_FACTORY_REG(MaskedScatter) /** * @brief Slice a tensor at its last dim, e.x. a[..., begin:end:stride]. \n * @par Inputs: * One inputs, including: -* @li x: A Tensor. Must be one of the following types: float16, float32, int16, int32. +* x: A Tensor. Must be one of the following types: float16, float32, int16, int32. * @par Attributes: * @li start: An attribute of type Int, start index of last dim. \n @@ -2148,7 +2180,7 @@ REG_OP(MaskedSelectV2) * @li stride: An attribute of type Int, stride of slice. \n * @par Outputs: -* @li y: A Tensor. Has the same type as "x". \n +* y: A Tensor. Has the same type as "x". \n * @par Third-party framework compatibility * No compatibility @@ -2162,39 +2194,36 @@ REG_OP(SliceLastDim) .OP_END_FACTORY_REG(SliceLastDim) /** -* @brief Extracts a strided slice of a tensor. Roughly speaking, this op \n -* extracts a slice of size (end-begin)/stride from the given input tensor. \n -* Starting at the location specified by begin the slice continues by \n +* @brief Extracts a strided slice of a tensor. Roughly speaking, this op +* extracts a slice of size (end-begin)/stride from the given input tensor. +* Starting at the location specified by begin the slice continues by * adding stride to the index until all dimensions are not less than end. \n * * @par Inputs: -* Four inputs, including: -* @li x: A Tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8, \n -* complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16, \n -* complex128, float16, uint32, uint64, complex64, complex128. \n +* Five inputs, including: +* @li x: A Tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8, +* complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16, +* complex128, float16, uint32, uint64, complex64, complex128. * @li begin: A Tensor of type int32 or int64, for the index of the first value to select. -* * @li end: A Tensor of type int32 or int64, for the index of the last value to select. -* * @li axes: A Tensor of type int32 or int64, indicate axis to be select. -* -* @li strides: A Tensor of type int32 or int64, for the increment. +* @li strides: A Tensor of type int32 or int64, for the increment. \n * * @par Attributes: -* @li begin_mask: A Tensor of type int32. \n -* A bitmask where a bit "i" being "1" means to ignore the begin \n +* @li begin_mask: A Tensor of type int32. +* A bitmask where a bit "i" being "1" means to ignore the begin * value and instead use the largest interval possible. -* @li end_mask: A Tensor of type int32. \n +* @li end_mask: A Tensor of type int32. * Analogous to "begin_mask". -* @li ellipsis_mask: A Tensor of type int32. \n -* A bitmask where bit "i" being "1" means the "i"th position \n +* @li ellipsis_mask: A Tensor of type int32. +* A bitmask where bit "i" being "1" means the "i"th position * is actually an ellipsis. -* @li new_axis_mask: A Tensor of type int32. \n -* A bitmask where bit "i" being "1" means the "i"th \n +* @li new_axis_mask: A Tensor of type int32. +* A bitmask where bit "i" being "1" means the "i"th * specification creates a new shape 1 dimension. -* @li shrink_axis_mask: A Tensor of type int32. \n -* A bitmask where bit "i" implies that the "i"th \n -* specification should shrink the dimensionality. +* @li shrink_axis_mask: A Tensor of type int32. +* A bitmask where bit "i" implies that the "i"th +* specification should shrink the dimensionality. \n * * @par Outputs: * y: A Tensor. Has the same type as "x". @@ -2231,7 +2260,7 @@ REG_OP(StridedSliceV2) * float16, float32, int32. \n * @par Attributes: -* @li dim: A required int. Used to select the dimension of this tensor. \n +* dim: A required int. Used to select the dimension of this tensor. \n *@par Outputs: *y: A Tensor with the same type and shape of input_x's. \n @@ -2307,6 +2336,34 @@ REG_OP(MaskedFillRange) .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32})) .REQUIRED_ATTR(axis, Int) .OP_END_FACTORY_REG(MaskedFillRange) + +/** +* @brief After a set of sorted data and a new set of data are re-sorted, get the first k data. \n +* +* @par Inputs: +* Six inputs, including: +* @li topk_pq_distance: A sorted Tensor, Will be updated after calculation. Must be one of the following types: float32, float16. +* @li topk_pq_index: A Tensor of type int32, index corresponding to topk_pq_distance. +* @li topk_pq_ivf: A Tensor of type int32 , the bucket number corresponding to topk_pq_distance. +* @li pq_distance: A Tensor of type float32 or float16, the new data set will be reordered with topk_pq_distance and updated to topk_pq_distance. +* @li pq_index: A Tensor of type int32, index corresponding to pq_distance. +* @li pq_ivf: A scalar of type int32 , the bucket number corresponding to pq_distance. \n +* +* @par Attributes: +* @li order: A string, indicates the sorting method of topk_pq_distance. \n +* +* @par Restrictions: +* Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use. +*/ +REG_OP(InplaceTopKDistance) + .INPUT(topk_pq_distance, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(topk_pq_index, TensorType({DT_INT32})) + .INPUT(topk_pq_ivf, TensorType({DT_INT32})) + .INPUT(pq_distance, TensorType({DT_FLOAT16, DT_FLOAT})) + .INPUT(pq_index, TensorType({DT_INT32})) + .INPUT(pq_ivf, TensorType({DT_INT32})) + .ATTR(order, String, "asc") + .OP_END_FACTORY_REG(InplaceTopKDistance) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_SELECTION_OPS_H_ diff --git a/third_party/fwkacllib/inc/ops/sparse_ops.h b/third_party/fwkacllib/inc/ops/sparse_ops.h index a1fc9ee6..8eb7b521 100644 --- a/third_party/fwkacllib/inc/ops/sparse_ops.h +++ b/third_party/fwkacllib/inc/ops/sparse_ops.h @@ -281,9 +281,9 @@ REG_OP(SparseSliceGrad) * @li size: A 1D Tensor of type int64. The size of the slice . \n *@par Outputs: -*y_indices: A Tensor of type int64. -*y_values: A Tensor. Has the same type as "values". -*y_values: A Tensor of type int64 . \n +*@li y_indices: A Tensor of type int64. +*@li y_values: A Tensor. Has the same type as "values". +*@li y_shape: A Tensor of type int64 . \n *@par Third-party framework compatibility * Compatible with the TensorFlow operator SparseSlice. @@ -313,8 +313,8 @@ REG_OP(SparseSlice) * @li sum_indices: A 2D Tensor of type int64. The indices of the sum SparseTensor, with size [nnz(sum), ndims] . \n *@par Outputs: -*x1_val_grad: A Tensor. Has the same type as "backprop_val_grad". -*x2_val_grad: A Tensor. Has the same type as "backprop_val_grad" . \n +*@li x1_val_grad: A Tensor. Has the same type as "backprop_val_grad". +*@li x2_val_grad: A Tensor. Has the same type as "backprop_val_grad" . \n *@par Third-party framework compatibility * Compatible with the TensorFlow operator SparseAddGrad. @@ -363,7 +363,7 @@ REG_OP(SparseFillEmptyRowsGrad) *@par Inputs: * @li x1_indices: A 2D Tensor of type int32 or int64. -* @li The indices of the matrix "SparseTensor", with size [nnz, 2]. +*The indices of the matrix "SparseTensor", with size [nnz, 2]. * @li x1_values: A 1D Tensor. The values of the SparseTensor, with size [nnz]. * @li x1_shape: A 1D Tensor of type int64. The shape of the SparseTensor, with size [2]. * @li x2: A dense matrix Tensor of the same type as "x1_values". 2D . \n @@ -373,9 +373,9 @@ REG_OP(SparseFillEmptyRowsGrad) *@par Attributes: *@li adjoint_a: An optional bool. Defaults to "False".Use the adjoint of A in the matrix multiply. -*@li If A is complex, this is transpose(conj(A)). Otherwise it is transpose(A). +*If A is complex, this is transpose(conj(A)). Otherwise it is transpose(A). *@li adjoint_b: An optional bool. Defaults to "False".Use the adjoint of B in the matrix multiply. -*@li If B is complex, this is transpose(conj(B)). Otherwise it is transpose(B) . \n +*If B is complex, this is transpose(conj(B)). Otherwise it is transpose(B) . \n *@par Third-party framework compatibility * Compatible with the TensorFlow operator SparseTensorDenseMatMul. @@ -400,9 +400,13 @@ REG_OP(SparseTensorDenseMatMul) * @li indices: A 0D, 1D, or 2D Tensor of type int32 or int64. * @li output_shape: A 1D Tensor of the same type as "sparse_indices". The shape of the dense output tensor. * @li values: A 1D Tensor. Values corresponding to each row of "sparse_indices", -* @li or a scalar value to be used for all sparse indices. +or a scalar value to be used for all sparse indices. * @li default_value: A Tensor of the same type as "sparse_values" . \n +*@par Attributes: +*validate_indices: If true, indices are checked to make sure they are sorted in +lexicographic order and that there are no repeats. \n + *@par Outputs: *y: A Tensor. Has the same type as "values" . \n @@ -427,7 +431,6 @@ REG_OP(SparseToDense) *Concatenation is with respect to the dense versions of these sparse tensors . \n *@par Inputs: -*3 or 5 inputs,contains: * @li indices:A list of at least 2 `Tensor` objects with type `int64`.2-D. *Indices of each input `SparseTensor`.It's a dynamic input. * @li values:A list with the same length as `indices` of `Tensor` objects with the same type. @@ -700,7 +703,6 @@ REG_OP(SparseReduceMaxSparse) *@brief Computes the sum of elements across dimensions of a SparseTensor . \n *@par Inputs: -*4 or 5 inputs, including: * @li x_indices: A 2D Tensor of type int64. *"N x R" matrix with the indices of non-empty values in a *SparseTensor, possibly not in canonical ordering. @@ -711,13 +713,11 @@ REG_OP(SparseReduceMaxSparse) *A length-"K" vector containing the reduction axes . \n *@par Attributes: -* keep_dims: An optional bool. Defaults to "False". +*keep_dims: An optional bool. Defaults to "False". *If true, retains reduced dimensions with length 1 . \n *@par Outputs: -* @li y_indices: A Tensor of type int64. -* @li y_values: A Tensor. Has the same type as "input_values". -* @li y_shape: A Tensor of type int64 . \n +*y: A Tensor. Has the same type as "x_values". \n *@par Third-party framework compatibility * Compatible with the TensorFlow operator SparseReduceSum. @@ -818,7 +818,6 @@ REG_OP(SparseSplit) *@brief Generates sparse cross from a list of sparse and dense tensors . \n *@par Inputs: -*8 or 10 inputs, including: * @li indices: A list of 2D Tensor objects of type int64. * Indices of each input SparseTensor.It's a dynamic input. * @li values: A list of 1D Tensor objects of type int64 or string. @@ -899,9 +898,8 @@ REG_OP(AddManySparseToTensorsMap) *@brief Reads SparseTensors from a "SparseTensorsMap" and concatenate them . \n *@par Inputs: -*2 or 4 inputs, including: * handles: A 1D Tensor of type int64. -* The "N" serialized SparseTensor objects . \n +*The "N" serialized SparseTensor objects . \n *@par Attributes: * @li dtype: A tf.DType. The "dtype" of the SparseTensor objects stored in the "SparseTensorsMap". @@ -911,9 +909,9 @@ REG_OP(AddManySparseToTensorsMap) *The shared name for the "SparseTensorsMap" read by this op . \n *@par Outputs: -* @li indices: A Tensor of type int64. -* @li values: A Tensor of type "dtype". -* @li shape: A Tensor of type int64 . \n +* @li indices: A Tensor of type int64.2-D. The `indices` of the minibatch `SparseTensor`. +* @li values: A Tensor of type "dtype". 1-D. The `values` of the minibatch `SparseTensor`. +* @li shape: A Tensor of type int64 . 1-D. The `shape` of the minibatch `SparseTensor`. \n *@par Third-party framework compatibility * Compatible with the TensorFlow operator TakeManySparseFromTensorsMap. @@ -989,8 +987,7 @@ REG_OP(SerializeManySparse) *@brief Deserializes SparseTensor objects . \n *@par Inputs: -*Two inputs, including: -* serialized_sparse: A Tensor. The serialized SparseTensor objects. +*serialized_sparse: A Tensor. The serialized SparseTensor objects. *The last dimension must have 3 columns . \n *@par Attributes: diff --git a/third_party/fwkacllib/inc/ops/spectral_ops.h b/third_party/fwkacllib/inc/ops/spectral_ops.h index 34ccb398..ab9e1dec 100644 --- a/third_party/fwkacllib/inc/ops/spectral_ops.h +++ b/third_party/fwkacllib/inc/ops/spectral_ops.h @@ -31,10 +31,10 @@ namespace ge { inner-most dimension of `x`. \n *@par Inputs: -*@li x: A Tensor. Must be the following types: complex64, complex128. \n +*x: A Tensor. Must be the following types: complex64, complex128. \n *@par Outputs: -*@li y: A complex tensor of the same rank as `x`. \n +*y: A complex tensor of the same rank as `x`. \n *@par Third-party framework compatibility * Compatible with TensorFlow IFFT operator. @@ -52,7 +52,7 @@ REG_OP(IFFT) *@li fft_length: An int32 tensor of shape [1]. The FFT length . \n *@par Outputs: -*@li y: A complex64 tensor of the same rank as `input`. The inner-most +*y: A complex64 tensor of the same rank as `input`. The inner-most dimension of `input` is replaced with the `fft_length / 2 + 1` unique frequency components of its 1D Fourier transform . \n @@ -73,7 +73,7 @@ REG_OP(RFFT) *@li fft_length: An int32 tensor of shape [1]. The FFT length. \n *@par Outputs: -*@li y: A float32 tensor of the same rank as `input`. The inner-most +* y: A float32 tensor of the same rank as `input`. The inner-most dimension of `input` is replaced with the `fft_length` samples of its inverse 1D Fourier transform. \n @@ -91,10 +91,10 @@ REG_OP(IRFFT) *@brief 2D fast Fourier transform. \n *@par Inputs: -*@li x: A complex64 tensor. +*x: A complex64 tensor. *@par Outputs: -*@li y: A complex64 tensor of the same shape as `input`. The inner-most 2 +*y: A complex64 tensor of the same shape as `input`. The inner-most 2 dimensions of `input` are replaced with their 2D Fourier transform. \n *@par Third-party framework compatibility @@ -110,10 +110,10 @@ REG_OP(FFT2D) innermost dimension of the input. \n *@par Inputs: -*@li x: A Tensor. Must be the following types: complex64, complex128. \n +*x: A Tensor. Must be the following types: complex64, complex128. \n *@par Outputs: -*@li y: A complex tensor with the same shape as input. The innermost dimension +*y: A complex tensor with the same shape as input. The innermost dimension of the input is replaced by its 1-dimensional Fourier transform. \n *@par Third-party framework compatibility @@ -129,10 +129,10 @@ REG_OP(FFT) innermost dimension of the input. \n *@par Inputs: -*@li x: A Tensor. Must be the following types: complex64, complex128. \n +*x: A Tensor. Must be the following types: complex64, complex128. \n *@par Outputs: -*@li y: A complex tensor with the same shape as input. The innermost dimension +*y: A complex tensor with the same shape as input. The innermost dimension of the input is replaced by its inverse two-dimensional Fourier transform. \n *@par Third-party framework compatibility diff --git a/third_party/fwkacllib/inc/ops/split_combination_ops.h b/third_party/fwkacllib/inc/ops/split_combination_ops.h index fe25a46f..98d4d111 100644 --- a/third_party/fwkacllib/inc/ops/split_combination_ops.h +++ b/third_party/fwkacllib/inc/ops/split_combination_ops.h @@ -161,14 +161,11 @@ REG_OP(SplitVD) /** *@brief Concatenates a list of N tensors along the first dimension. *@par Inputs: -* Two inputs, including: -* @li values: A list of Tensors. Must be one of the following types: int8, int16, int32, +* One input, including: +* values: A list of Tensors. Must be one of the following types: int8, int16, int32, * int64, uint8, uint16, uint32, uint64, float16, float32. * Tensors to be concatenated. All must have size 1 in the first dimension and same shape. -* It's a dynamic input. -* @li shape: A Tensor of the same type as "x". -* The final shape of the result. Should be equal to the shapes of any input -* but with the number of input values in the first dimension . \n +* It's a dynamic input. \n *@par Attributes: * @li shape: A required list of ints. diff --git a/third_party/fwkacllib/inc/ops/state_ops.h b/third_party/fwkacllib/inc/ops/state_ops.h index 3c8e32b6..d1ec00b5 100644 --- a/third_party/fwkacllib/inc/ops/state_ops.h +++ b/third_party/fwkacllib/inc/ops/state_ops.h @@ -104,7 +104,7 @@ REG_OP(DestroyTemporaryVariable) *@brief Checks whether a tensor has been initialized. Outputs boolean scalar indicating whether the tensor has been initialized . \n *@par Inputs: -*x: A tensor . \n +*x: A Tensor of type float16, float32, double, bool, int8, uint8, uint16, int16, int32, uint32, uint64, int64. *@par Outputs: *y: A tensor, indicating whether "x" has been initialized . \n diff --git a/third_party/fwkacllib/inc/ops/stateful_random_ops.h b/third_party/fwkacllib/inc/ops/stateful_random_ops.h index c2f65c6a..f4eb763c 100644 --- a/third_party/fwkacllib/inc/ops/stateful_random_ops.h +++ b/third_party/fwkacllib/inc/ops/stateful_random_ops.h @@ -32,7 +32,10 @@ namespace ge { *@par Inputs: *This op may use some OS-provided source of non-determinism (e.g. an RNG), *so each execution will give different results. Inputs included: -*@li shape: The shape of the output tensor . \n +*shape: The shape of the output tensor . \n + +*@par Attributes: +*dtype: required, type. \n *@par Outputs: *y:A Returns Non-deterministic integer values with specified shape . \n @@ -54,13 +57,10 @@ REG_OP(NonDeterministicInts) *counter is an unspecified implementation detail . \n *@par Inputs: -*@li resource: The handle of the resource variable that stores the state of the RNG. +*@li x: The handle of the resource variable that stores the state of the RNG. *@li algorithm: The RNG algorithm. *@li delta: The amount of advancement . \n -*@par Outputs: -*y:A Returns the created operation . \n - *@par Third-party framework compatibility * Compatible with tensorflow RngSkip operator. */ @@ -81,11 +81,16 @@ power of two. The bias is small for values of `maxval - minval` significantly smaller than the range of the output (either `2^32` or `2^64`) . \n *@par Inputs: -*@li resource: The handle of the resource variable that stores the state of the RNG. +*@li x: The handle of the resource variable that stores the state of the RNG. *@li algorithm: The RNG algorithm. *@li shape: The shape of the output tensor. -*@li minval: Minimum value (inclusive, scalar). -*@li maxval: Maximum value (exclusive, scalar) . \n +*@li counts: A 0/1-D Tensor or Python value. The counts of the binomial +distribution. Must be broadcastable with the leftmost dimension defined by `shape`. +*@li probs: A 0/1-D Tensor or Python value. The probability of success for the +binomial distribution. Must be broadcastable with the leftmost dimension defined by `shape`.\n + +*@par Attributes: +*dtype: required, type. \n *@par Outputs: *y:A Returns Random values with specified shape . \n @@ -109,7 +114,7 @@ REG_OP(StatefulRandomBinomial) *The generated values will have mean 0 and standard deviation 1 . \n *@par Inputs: -*@li resource: The handle of the resource variable that stores the state of the RNG. +*@li x: The handle of the resource variable that stores the state of the RNG. *@li algorithm: The RNG algorithm. *@li shape: The shape of the output tensor . \n @@ -134,7 +139,7 @@ REG_OP(StatefulStandardNormalV2) *deviations from the mean are dropped and re-picked . \n *@par Inputs: -*@li resource: The handle of the resource variable that stores the state of the RNG. +*@li x: The handle of the resource variable that stores the state of the RNG. *@li algorithm: The RNG algorithm. *@li shape: The shape of the output tensor . \n @@ -158,7 +163,7 @@ The generated values follow a uniform distribution in the range `[0, 1)`. The lower bound 0 is included in the range, while the upper bound 1 is excluded. *@par Inputs: -*@li resource: The handle of the resource variable that stores the state of the RNG. +*@li x: The handle of the resource variable that stores the state of the RNG. *@li algorithm: The RNG algorithm. *@li shape: The shape of the output tensor . \n @@ -181,7 +186,7 @@ REG_OP(StatefulUniform) The generated values are uniform integers covering the whole range of `dtype` . \n *@par Inputs: -*@li resource: The handle of the resource variable that stores the state of the RNG. +*@li x: The handle of the resource variable that stores the state of the RNG. *@li algorithm: The RNG algorithm. *@li shape: The shape of the output tensor . \n @@ -209,7 +214,7 @@ power of two. The bias is small for values of `maxval - minval` significantly smaller than the range of the output (either `2^32` or `2^64`) . \n *@par Inputs: -*@li resource: The handle of the resource variable that stores the state of the RNG. +*@li x: The handle of the resource variable that stores the state of the RNG. *@li algorithm: The RNG algorithm. *@li shape: The shape of the output tensor. *@li minval: Minimum value (inclusive, scalar). diff --git a/third_party/fwkacllib/inc/ops/string_ops.h b/third_party/fwkacllib/inc/ops/string_ops.h index f9cc2549..a78d63a1 100644 --- a/third_party/fwkacllib/inc/ops/string_ops.h +++ b/third_party/fwkacllib/inc/ops/string_ops.h @@ -295,7 +295,7 @@ REG_OP(StringSplit) *@par Inputs: include: -*@li input:A Tensor of type string. The text to be processed. \n +*input:A Tensor of type string. The text to be processed. \n *@par Attributes: *@li pattern:A string. The regular expression to match the input. @@ -303,8 +303,8 @@ include: *@li replace_global:An optional bool. Defaults to True. If True, the replacement is global, otherwise the replacement is done only on the first match. -*@par output: -*@li output::A Tensor of type string. +*@par Outputs: +*output::A Tensor of type string. */ REG_OP(StaticRegexReplace) .INPUT(input, TensorType({DT_STRING})) @@ -322,13 +322,13 @@ REG_OP(StaticRegexReplace) *@par Inputs: include: -*@li input:A Tensor of type string. The text to be processed. \n +*input:A Tensor of type string. The text to be processed. \n *@par Attributes: -*@li pattern:A string. The regular expression to match the input. +*pattern:A string. The regular expression to match the input. -*@par output: -*@li output::A bool tensor with the same shape as `input`. +*@par Outputs: +*output::A bool tensor with the same shape as `input`. */ REG_OP(StaticRegexFullMatch) .INPUT(input, TensorType({DT_STRING})) @@ -347,10 +347,10 @@ include: *@li num_segments:A Tensor. Must be one of the following types: int32, int64. A scalar. *@par Attributes: -*@li separator:An optional string. Defaults to "". The separator to use when joining. +*separator:An optional string. Defaults to "". The separator to use when joining. -*@par output: -*@li output::A Tensor of type string.. +*@par Outputs: +*output::A Tensor of type string.. */ REG_OP(UnsortedSegmentJoin) .INPUT(input, TensorType({DT_STRING})) @@ -366,13 +366,13 @@ REG_OP(UnsortedSegmentJoin) *@par Inputs: include: -*@li input:A Tensor of type string. The text to be processed. +*input:A Tensor of type string. The text to be processed. *@par Attributes: -*@li encoding:An optional string. Defaults to "". +*encoding:An optional string. Defaults to "". -*@par output: -*@li output::A Tensor of type string.. +*@par Outputs: +*output::A Tensor of type string.. */ REG_OP(StringLower) .INPUT(input, TensorType({DT_STRING})) @@ -386,13 +386,13 @@ REG_OP(StringLower) *@par Inputs: include: -*@li input:A Tensor of type string. The text to be processed. +*input:A Tensor of type string. The text to be processed. *@par Attributes: -*@li encoding:An optional string. Defaults to "". +*encoding:An optional string. Defaults to "". -*@par output: -*@li output::A Tensor of type string.. +*@par Outputs: +*output::A Tensor of type string.. */ REG_OP(StringUpper) .INPUT(input, TensorType({DT_STRING})) @@ -901,10 +901,10 @@ REG_OP(DecodeBase64) *@brief StringNormalization performs string operations for basic cleaning . \n *@par Inputs: -*@li input: only accepts [C] or [1, C] UTF-8 strings tensor . \n +*input: only accepts [C] or [1, C] UTF-8 strings tensor . \n *@par Outputs: -*@li output: UTF-8 strings tensor after cleaning . \n +*output: UTF-8 strings tensor after cleaning . \n *@par Attributes: *@li stopwords : list of strings (default is empty). @@ -919,13 +919,13 @@ case-sensitive. Default is false. *string enum that cases output to be lowercased/uppercases/unchanged. Valid values are "LOWER", "UPPER", "NONE". Default is "NONE". -*@li local : string (default is "en_US"). +*@li locale : string (default is "C"). *Environment dependent string that denotes the locale according to which output -strings needs to be upper/lowercased.Default en_US or platform specific equivalent -as decided by the implementation . \n +strings needs to be upper/lowercased.Default C or platform specific equivalent +as decided by the implementation. \n *@attention Constraints: -*@li input can be either a 1-D or 2-D tensor, the shape of 2-D tensor must be [1, C]. +*input can be either a 1-D or 2-D tensor, the shape of 2-D tensor must be [1, C]. */ REG_OP(StringNormalizer) .INPUT(input, TensorType({DT_STRING})) @@ -933,7 +933,7 @@ REG_OP(StringNormalizer) .ATTR(stopwords, ListString, {}) .ATTR(is_case_sensitive, Bool, false) .ATTR(case_change_action, String, "NONE") - .ATTR(local, String, "en_US") + .ATTR(locale, String, "C") .OP_END_FACTORY_REG(StringNormalizer) } // namespace ge diff --git a/third_party/fwkacllib/inc/ops/transformation_ops.h b/third_party/fwkacllib/inc/ops/transformation_ops.h index 4a46e35f..f403fe12 100644 --- a/third_party/fwkacllib/inc/ops/transformation_ops.h +++ b/third_party/fwkacllib/inc/ops/transformation_ops.h @@ -29,15 +29,15 @@ namespace ge { *@par Inputs: *The input handle must have the resource type. Inputs include: -*@li x:A list of Tensor objects. One or more tensors from which +*x:A list of Tensor objects. One or more tensors from which the enqueued tensors should be taken . \n *@par Outputs: -*@li y:A list of Tensor objects. One or more tensors from which +*y:A list of Tensor objects. One or more tensors from which the enqueued tensors should be taken . \n *@par Attributes: -*@li type: An optional ge::DataType. It refers to the target data type of outputs . \n +*type: An optional ge::DataType. It refers to the target data type of outputs . \n *@par Third-party framework compatibility *Compatible with tensorflow QueueIsClosed operator. @@ -723,11 +723,12 @@ REG_OP(CompressFcOp) *@brief Performs Col2im for each batch entry. \n *@par Inputs: -*@li input_x: The Col Tensor. 5-D, shape: `(n, c1, kernel_h*kernel_w, ho*wo, c0)`. -where ho/wo is do = (output_d + 2*padding_d - dilation_d*(kernel_d - 1) - 1)//stride_d + 1 \n +*@li x: The Col Tensor. 4-D, shape: `(n, c, kernel_h*kernel_w, ho*wo)`. +where ho/wo is do = (output_d + 2*padding_d - dilation_d*(kernel_d - 1) - 1)//stride_d + 1. +*@li output_size: The img shape Tensor. 1-D, shape:`(2)`, value: (output_h, output_w). \n *@par Outputs: -*@li output_y: The img Tensor. 5-D, shape: `(n, c1, output_h, output_w, c0)`. \n +*y: The img Tensor. 4-D, shape: `(n, c, output_h, output_w)`. \n *@par Attributes: *@li kernel_shape: ListInt, value: `(kernel_h, kernel_w)`, the shape of kernel in convolution. @@ -909,7 +910,7 @@ output shape would be [max(ngram_indexes) + 1]. If input shape is [N, C], this o *@li either pool_strings or pool_int64s attributes must be present but not both. */ -REG_OP(TfidVectorizer) +REG_OP(TfIdfVectorizer) .INPUT(input, TensorType({DT_INT32, DT_INT64, DT_STRING})) .OUTPUT(output, TensorType({DT_FLOAT})) .REQUIRED_ATTR(max_gram_length, Int) @@ -921,7 +922,7 @@ REG_OP(TfidVectorizer) .ATTR(pool_int64s, ListInt, {}) .ATTR(pool_strings, ListString, {}) .ATTR(weights, ListFloat, {}) - .OP_END_FACTORY_REG(TfidVectorizer) + .OP_END_FACTORY_REG(TfIdfVectorizer) } // namespace ge #endif // OPS_BUILT_IN_OP_PROTO_INC_TRANSFORMATION_OPS_H_ diff --git a/third_party/fwkacllib/inc/runtime/base.h b/third_party/fwkacllib/inc/runtime/base.h index 7fc1cdea..70e42dc9 100644 --- a/third_party/fwkacllib/inc/runtime/base.h +++ b/third_party/fwkacllib/inc/runtime/base.h @@ -20,7 +20,7 @@ #include #include "toolchain/prof_callback.h" -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) extern "C" { #endif @@ -357,7 +357,7 @@ RTS_API rtError_t rtLabelCreateExV2(rtLabel_t *label, rtModel_t model, rtStream_ */ RTS_API rtError_t rtGetTaskIdAndStreamID(uint32_t *taskId, uint32_t *streamId); -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) } #endif diff --git a/third_party/fwkacllib/inc/runtime/config.h b/third_party/fwkacllib/inc/runtime/config.h index a244c793..76836e7b 100644 --- a/third_party/fwkacllib/inc/runtime/config.h +++ b/third_party/fwkacllib/inc/runtime/config.h @@ -19,7 +19,7 @@ #include "base.h" -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) extern "C" { #endif @@ -43,6 +43,7 @@ typedef enum tagRtChipType { CHIP_LHISI, CHIP_DC, CHIP_CLOUD_V2, + CHIP_NO_DEVICE, CHIP_END, } rtChipType_t; @@ -53,11 +54,11 @@ typedef enum tagRtAicpuScheType { } rtAicpuScheType; typedef enum tagRtDeviceCapabilityType { - RT_SCHEDULE_SOFTWARE = 0, // SoftWare Schedule - RT_SCHEDULE_SOFTWARE_OPT, - RT_SCHEDULE_HARDWARE, // HWTS Schedule - RT_AICPU_BLOCKING_OP_NOT_SUPPORT, - RT_AICPU_BLOCKING_OP_SUPPORT, // 1910/1980/1951 ts support AICPU blocking operation + RT_SCHEDULE_SOFTWARE = 0, // Software Schedule + RT_SCHEDULE_SOFTWARE_OPT, + RT_SCHEDULE_HARDWARE, // HWTS Schedule + RT_AICPU_BLOCKING_OP_NOT_SUPPORT, + RT_AICPU_BLOCKING_OP_SUPPORT, // 1910/1980/1951 ts support AICPU blocking operation } rtDeviceCapabilityType; typedef enum tagRtVersion { @@ -235,7 +236,7 @@ RTS_API rtError_t rtSetOpWaitTimeOut(uint32_t timeout); */ RTS_API rtError_t rtSetOpExecuteTimeOut(uint32_t timeout); -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) } #endif diff --git a/third_party/fwkacllib/inc/runtime/context.h b/third_party/fwkacllib/inc/runtime/context.h index e95d4c89..c597a657 100644 --- a/third_party/fwkacllib/inc/runtime/context.h +++ b/third_party/fwkacllib/inc/runtime/context.h @@ -19,7 +19,7 @@ #include "base.h" -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) extern "C" { #endif @@ -157,7 +157,7 @@ RTS_API rtError_t rtGetGroupCount(uint32_t *count); */ RTS_API rtError_t rtSetCtxINFMode(bool mode); -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) } #endif diff --git a/third_party/fwkacllib/inc/runtime/dev.h b/third_party/fwkacllib/inc/runtime/dev.h index 18d837eb..4a9a5817 100644 --- a/third_party/fwkacllib/inc/runtime/dev.h +++ b/third_party/fwkacllib/inc/runtime/dev.h @@ -19,7 +19,7 @@ #include "base.h" -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) extern "C" { #endif @@ -80,15 +80,15 @@ typedef enum tagMemoryInfo { } rtMemoryInfo_t; typedef enum tagRtDeviceModuleType { - RT_MODULE_TYPE_SYSTEM = 0, - RT_MODULE_TYPE_AICPU, - RT_MODULE_TYPE_CCPU, - RT_MODULE_TYPE_DCPU, - RT_MODULE_TYPE_AICORE, - RT_MODULE_TYPE_TSCPU, - RT_MODULE_TYPE_PCIE, - RT_MODULE_TYPE_VECTOR_CORE -} tagRtDeviceModuleType_t; + RT_MODULE_TYPE_SYSTEM = 0, /**< system info*/ + RT_MODULE_TYPE_AICPU, /** < aicpu info*/ + RT_MODULE_TYPE_CCPU, /**< ccpu_info*/ + RT_MODULE_TYPE_DCPU, /**< dcpu info*/ + RT_MODULE_TYPE_AICORE, /**< AI CORE info*/ + RT_MODULE_TYPE_TSCPU, /**< tscpu info*/ + RT_MODULE_TYPE_PCIE, /**< PCIE info*/ + RT_MODULE_TYPE_VECTOR_CORE, /**< VECTOR CORE info*/ +} rtDeviceModuleType_t; /** * @ingroup dvrt_dev @@ -380,7 +380,7 @@ RTS_API rtError_t rtSetDeviceWithoutTsd(int32_t device); */ RTS_API rtError_t rtDeviceResetWithoutTsd(int32_t device); -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) } #endif diff --git a/third_party/fwkacllib/inc/runtime/dvfsprofile.h b/third_party/fwkacllib/inc/runtime/dvfsprofile.h index 6e451695..33e2f4c1 100644 --- a/third_party/fwkacllib/inc/runtime/dvfsprofile.h +++ b/third_party/fwkacllib/inc/runtime/dvfsprofile.h @@ -19,7 +19,7 @@ #include "base.h" -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) extern "C" { #endif @@ -56,7 +56,7 @@ RTS_API rtError_t rtUnsetDvfsProfile(); */ RTS_API rtError_t rtGetDvfsProfile(DvfsProfileMode *pmode); -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) } #endif diff --git a/third_party/fwkacllib/inc/runtime/event.h b/third_party/fwkacllib/inc/runtime/event.h index 1cd1a198..81b635c3 100644 --- a/third_party/fwkacllib/inc/runtime/event.h +++ b/third_party/fwkacllib/inc/runtime/event.h @@ -19,7 +19,7 @@ #include "base.h" -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) extern "C" { #endif @@ -41,16 +41,6 @@ typedef enum rtEventWaitStatus { #define RT_EVENT_DDSYNC 0x04U #define RT_EVENT_TIME_LINE 0x08U -#define RT_EVENT_DDSYNC_NS 0x01U -#define RT_EVENT_STREAM_MARK 0x02U -#define RT_EVENT_DDSYNC 0x04U -#define RT_EVENT_TIME_LINE 0x08U - -#define RT_EVENT_DDSYNC_NS 0x01U -#define RT_EVENT_STREAM_MARK 0x02U -#define RT_EVENT_DDSYNC 0x04U -#define RT_EVENT_TIME_LINE 0x08U - /** * @ingroup dvrt_event * @brief create event instance @@ -282,7 +272,7 @@ RTS_API rtError_t rtNotifyGetAddrOffset(rtNotify_t notify, uint64_t *devAddrOffs */ RTS_API rtError_t rtSetIpcNotifyPid(const char *name, int32_t pid[], int num); -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) } #endif diff --git a/third_party/fwkacllib/inc/runtime/kernel.h b/third_party/fwkacllib/inc/runtime/kernel.h index 9b0221c7..c1b9bd6d 100644 --- a/third_party/fwkacllib/inc/runtime/kernel.h +++ b/third_party/fwkacllib/inc/runtime/kernel.h @@ -20,7 +20,7 @@ #include "base.h" #include "stream.h" -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) extern "C" { #endif @@ -647,7 +647,7 @@ RTS_API rtError_t rtStartMDCProfiler(void **addr, uint32_t length); */ RTS_API rtError_t rtStopMDCProfiler(void *addr); -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) } #endif diff --git a/third_party/fwkacllib/inc/runtime/mem.h b/third_party/fwkacllib/inc/runtime/mem.h index bace4bc6..b049e762 100644 --- a/third_party/fwkacllib/inc/runtime/mem.h +++ b/third_party/fwkacllib/inc/runtime/mem.h @@ -24,7 +24,7 @@ #include "config.h" #include "stream.h" -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) extern "C" { #endif @@ -547,7 +547,7 @@ RTS_API rtError_t rtSetIpcMemPid(const char *name, int32_t pid[], int num); */ RTS_API rtError_t rtRDMADBSend(uint32_t dbIndex, uint64_t dbInfo, rtStream_t stream); -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) } #endif diff --git a/third_party/fwkacllib/inc/runtime/rt_ffts.h b/third_party/fwkacllib/inc/runtime/rt_ffts.h old mode 100755 new mode 100644 index 720da7cd..f2809218 --- a/third_party/fwkacllib/inc/runtime/rt_ffts.h +++ b/third_party/fwkacllib/inc/runtime/rt_ffts.h @@ -1,5 +1,5 @@ /* - * Copyright (c) Huawei Technologies Co. , Ltd. 2021. All rights reserved. + * Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. * Description: ffts interface */ @@ -8,7 +8,7 @@ #include "base.h" -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) extern "C" { #endif @@ -19,8 +19,8 @@ extern "C" { #define RT_FFTS_MANUAL_SRC_DEPEND_TBL_LEN 32U typedef enum tagFftsType { - RT_FFTS_TYPE_AUTO_THREAD = 2, // ffts auto thread mode, same as ffts define - RT_FFTS_TYPE_MANUAL_THREAD = 3, // ffts manual thread mode, same as ffts define + RT_FFTS_TYPE_AUTO_THREAD = 2, // ffts auto thread mode, same as ffts define + RT_FFTS_TYPE_MANUAL_THREAD = 3, // ffts manual thread mode, same as ffts define } rtFftsType_t; typedef enum tagFftsSubTaskType { @@ -37,7 +37,7 @@ typedef enum tagFftsSubTaskType { } rtFftsSubTaskType_t; typedef struct tagManualThreadDmuInfo { - uint64_t dataAddr; // device mem + uint64_t dataAddr; // device mem uint16_t numOuter; uint16_t numInner; uint32_t strideOuter; @@ -50,44 +50,43 @@ typedef struct tagManualThreadDependency { } rtManualThreadDependency_t; typedef struct tagManualThreadAicAivInfo { - uint64_t taskParamAddr; // device mem + uint64_t taskParamAddr; // device mem uint16_t taskParamOffset; // when satMode=1 and FP16 computation with none INF inputs overflows/underflows, results will be +/-INF of FP16 - // when satMode=0 and FP16 computation with none INF inputs overflows/underflows - // results will be saturated to +/- MAX of FP16 + // when satMode=0 and FP16 computation with none INF inputs overflows/underflows, + // results will be saturated to +/-MAX of FP16 uint8_t satMode; - uint8_t scheduleMode; // 0:normal mode, 1:batch mode, 2:sync mode, 3: reserved - uint8_t iCachePrefetchCnt; // units is 2K - uint8_t prefetchEnableBitmap; // 8 bit bitmap 1 0 1 0 - uint8_t prefetchOnceBitmap; // 8 bit bitmap 1 0 1 0 - uint16_t prefetchOnceDmuNum; // prefetch_once_dmu_descriptor_index in ffts - // num: thread0_prefetch_dmu_descriptor_index - prefetch_once_dmu_descriptor_index - uint16_t threadPrefetchDmuIdx[RT_FFTS_MAX_MANUAL_THREAD_NUM]; // max valid is threadDim + uint8_t scheduleMode; // 0:normal mode, 1:batch mode, 2:sync mode 3:reserved + uint8_t iCachePrefetchCnt; // units is 2K + uint8_t prefetchEnableBitmap; // 8 bit bitmap 1 0 1 0 + uint8_t prefetchOnceBitmap; // 8 bit bitmap 1 0 1 0 + uint16_t prefetchOnceDmuNum; // prefetch_once_dmu_descriptor_index in ffts + // num: thread0_prefetch_dmu_descriptor_index – prefetch_once_dmu_descriptor_index + uint16_t threadPrefetchDmuIdx[RT_FFTS_MAX_MANUAL_THREAD_NUM]; // max valid is threadDim uint16_t threadBlkDim[RT_FFTS_MAX_MANUAL_THREAD_NUM]; const char *threadTaskFuncStub[RT_FFTS_MAX_MANUAL_THREAD_NUM]; - rtManualThreadDmuInfo_t *prefetchList; // dmu desc 0-64k, length is the last threadPrefetchDmuIdx[threadDim - 1] + rtManualThreadDmuInfo_t *prefetchList; // dmu desc 0-64k, length is the last threadPrefetchDmuIdx[threadDim-1] rtManualThreadDependency_t srcDepTbl[RT_FFTS_MAX_TICKET_CACHE_PER_SUBTASK]; } rtManualThreadAicAivInfo_t; typedef struct tagAutoThreadPrefetch { - uint64_t dataAddr; // device mem + uint64_t dataAddr; // device mem uint32_t dataAddrOffset; uint32_t nonTailDataLen; uint32_t tailDataLen; } rtAutoThreadPrefetch_t; typedef struct tagAutoThreadAicAivInfo { - uint64_t taskParamAddr; // device mem + uint64_t taskParamAddr; // device mem uint16_t taskParamOffset; // when satMode=1 and FP16 computation with none INF inputs overflows/underflows, results will be +/-INF of FP16 - // when satMode=0 and FP16 computation with none INF inputs overflows/underflows - // results will be saturated to +/- MAX of FP16 + // when satMode=0 and FP16 computation with none INF inputs overflows/underflows, results will be saturated to +/-MAX of FP16 uint8_t satMode; - uint8_t scheduleMode; // 0:normal mode, 1:batch mode, 2:sync mode, 3: reserved - uint8_t iCachePrefetchCnt; // units is 2K - uint8_t prefetchEnableBitmap; // 8 bit bitmap - uint8_t prefetchOnceBitmap; // 8 bit bitmap + uint8_t scheduleMode; // 0:normal mode, 1:batch mode, 2:sync mode 3:reserved + uint8_t iCachePrefetchCnt; // units is 2K + uint8_t prefetchEnableBitmap; // 8 bit bitmap + uint8_t prefetchOnceBitmap; // 8 bit bitmap uint16_t tailBlkDim; uint16_t nonTailBlkDim; @@ -95,13 +94,13 @@ typedef struct tagAutoThreadAicAivInfo { const char *nonTailTaskFuncStub; const char *tailTaskFuncStub; - // for prefetch, valid num is prefetchEnableBitmap bit count - // if prefetchEnableBitmap = '00010011', need prefetch number is 3, srcPrefetch is only 0, 1, 2 is valid + // for prefetch, valid num is prefetchEnableBitmap bit count. + // if prefetchEnableBitmap='00010011', need prefetch number is 3, srcPrefetch is only 0, 1, 2 is valid rtAutoThreadPrefetch_t srcPrefetch[RT_FFTS_MAX_TICKET_CACHE_PER_SUBTASK]; } rtAutoThreadAicAivInfo_t; typedef struct tagAutoThreadCacheInfo { - uint64_t dataAddr; // device mem + uint64_t dataAddr; // device mem uint32_t dataAddrOffset; uint32_t nonTailDataLen; uint32_t tailDataLen; @@ -109,7 +108,7 @@ typedef struct tagAutoThreadCacheInfo { } rtAutoThreadCacheInfo_t; typedef struct tagManualThreadCacheInfo { - rtManualThreadDmuInfo_t *dmuList; // 0-64k + rtManualThreadDmuInfo_t *dmuList; // 0-64k uint16_t dmuNum; uint16_t sliceDmuIdx[RT_FFTS_MAX_MANUAL_THREAD_NUM]; uint16_t ticketCacheRefCntTbl[RT_FFTS_MAX_MANUAL_THREAD_NUM]; @@ -152,11 +151,11 @@ typedef struct tagFftsSubTaskInfo { } rtFftsSubTaskInfo_t; typedef struct tagFftsDescInfo { - uint8_t tm; // thread subtask kickstart mode, 0:order, 1:disorder - uint8_t di; // discard invalidate - uint8_t dw; // discard write back - uint8_t df; // discard flush - uint8_t dataSplitUnit; // split source or ticket cache by 2~dataSplitUnit MB + uint8_t tm; // thread subtask kickstart mode, 0:order, 1:disorder + uint8_t di; // discard invalidate + uint8_t dw; // discard write back + uint8_t df; // discard flush + uint8_t dataSplitUnit; // split source or ticket cache by 2^dataSplitUnit MB uint8_t prefetchOstNum; uint8_t cacheMaintainOstNum; uint8_t aicPrefetchUpper; @@ -166,20 +165,20 @@ typedef struct tagFftsDescInfo { } rtFftsDescInfo_t; typedef struct tagFftsTaskInfo { - rtFftsType_t fftsType; + rtFftsType_t fftsType; uint16_t subTaskNum; uint16_t tickCacheNum; rtFftsDescInfo_t fftsDesc; // sub task desc, real num is subTaskNum rtFftsSubTaskInfo_t subTask[RT_FFTS_MAX_SUB_TASK_NUM]; - // ticket cache, real number is ticketCacheNum + // ticket cache, real number is tickCacheNum. rtTicketCache_t ticketCache[RT_FFTS_MAX_TICKET_CACHE_NUM]; } rtFftsTaskInfo_t; RTS_API rtError_t rtFftsTaskLaunch(rtFftsTaskInfo_t *fftsTaskInfo, rtStream_t stream); -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) } #endif -#endif //__CCE_RUNTIME_FFTS_H +#endif // __CCE_RUNTIME_FFTS_H diff --git a/third_party/fwkacllib/inc/runtime/rt_model.h b/third_party/fwkacllib/inc/runtime/rt_model.h index a7618b45..d4af72c5 100644 --- a/third_party/fwkacllib/inc/runtime/rt_model.h +++ b/third_party/fwkacllib/inc/runtime/rt_model.h @@ -19,7 +19,7 @@ #include "base.h" -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) extern "C" { #endif @@ -490,7 +490,7 @@ RTS_API rtError_t rtDebugRegister(rtModel_t model, uint32_t flag, const void *ad */ RTS_API rtError_t rtDebugUnRegister(rtModel_t model); -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) } #endif diff --git a/third_party/fwkacllib/inc/runtime/rt_stars.h b/third_party/fwkacllib/inc/runtime/rt_stars.h index 188656b1..016c352a 100644 --- a/third_party/fwkacllib/inc/runtime/rt_stars.h +++ b/third_party/fwkacllib/inc/runtime/rt_stars.h @@ -8,7 +8,7 @@ #include "base.h" -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) extern "C" { #endif @@ -23,6 +23,7 @@ extern "C" { */ RTS_API rtError_t rtStarsTaskLaunch(const void *taskSqe, uint32_t sqeLen, rtStream_t stream); + /** * @ingroup rt_stars * @brief create cdq instance. @@ -76,10 +77,11 @@ RTS_API rtError_t rtCdqEnQueue(const char *queName, uint32_t cdqeIndex, void *da * @param [in] stream launch task on the stream * @return RT_ERROR_NONE for ok, others failed */ -RTS_API rtError_t rtCdqEnQueuePtrMode(const char *queName, uint32_t cdqeIndex, const void *prtAddr, +RTS_API rtError_t rtCdqEnQueuePtrMode(const char *queName, uint32_t cdqeIndex, const void *ptrAddr, rtStream_t stream); -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) + } #endif #endif // __CCE_RUNTIME_STARS_H diff --git a/third_party/fwkacllib/inc/runtime/stream.h b/third_party/fwkacllib/inc/runtime/stream.h index f9981514..3a078e99 100644 --- a/third_party/fwkacllib/inc/runtime/stream.h +++ b/third_party/fwkacllib/inc/runtime/stream.h @@ -20,7 +20,7 @@ #include "base.h" #include "event.h" -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) extern "C" { #endif @@ -211,7 +211,7 @@ RTS_API rtError_t rtDebugRegisterForStream(rtStream_t stream, uint32_t flag, con */ RTS_API rtError_t rtDebugUnRegisterForStream(rtStream_t stream); -#if defined(__cplusplus) && !defined(COMPILE_OMG_PACKAGE) +#if defined(__cplusplus) } #endif diff --git a/third_party/fwkacllib/inc/toolchain/prof_acl_api.h b/third_party/fwkacllib/inc/toolchain/prof_acl_api.h index 07b32149..9350f9d4 100644 --- a/third_party/fwkacllib/inc/toolchain/prof_acl_api.h +++ b/third_party/fwkacllib/inc/toolchain/prof_acl_api.h @@ -84,6 +84,7 @@ #endif #include +#include namespace Msprofiler { namespace Api { @@ -105,6 +106,37 @@ extern "C" { MSVP_PROF_API uint64_t ProfGetOpExecutionTime(const void *data, uint32_t len, uint32_t index); +typedef int Status; +typedef struct aclprofSubscribeConfig aclprofSubscribeConfig1; +/// +/// @ingroup AscendCL +/// @brief subscribe profiling data of graph +/// @param [in] graphId: the graph id subscribed +/// @param [in] profSubscribeConfig: pointer to config of model subscribe +/// @return Status result of function +/// +Status aclgrphProfGraphSubscribe(const uint32_t graphId, + const aclprofSubscribeConfig1 *profSubscribeConfig); + +/// +/// @ingroup AscendCL +/// @brief unsubscribe profiling data of graph +/// @param [in] graphId: the graph id subscribed +/// @return Status result of function +/// +Status aclgrphProfGraphUnSubscribe(const uint32_t graphId); + +/** + * @ingroup AscendCL + * @brief get graph id from subscription data + * + * @param opInfo [IN] pointer to subscription data + * @param opInfoLen [IN] memory size of subscription data + * + * @retval graph id of subscription data + * @retval 0 for failed + */ +size_t aclprofGetGraphId(const void *opInfo, size_t opInfoLen, uint32_t index); #ifdef __cplusplus } #endif diff --git a/third_party/fwkacllib/inc/toolchain/prof_callback.h b/third_party/fwkacllib/inc/toolchain/prof_callback.h index 5073cfb1..36b55216 100644 --- a/third_party/fwkacllib/inc/toolchain/prof_callback.h +++ b/third_party/fwkacllib/inc/toolchain/prof_callback.h @@ -54,6 +54,17 @@ struct ReporterData { unsigned char *data; // the data content }; +/** + * @name HashData + * @brief struct of data to hash + */ +struct HashData { + int deviceId; // the index of device + size_t dataLen; // the length of data + unsigned char *data; // the data content + uint64_t hashId; // the id of hashed data +}; + /** * @name MsprofReporterModuleId * @brief module id of data to report @@ -75,6 +86,7 @@ enum MsprofReporterCallbackType { MSPROF_REPORTER_INIT, // init reporter MSPROF_REPORTER_UNINIT, // uninit reporter MSPROF_REPORTER_DATA_MAX_LEN, // data max length for calling report callback + MSPROF_REPORTER_HASH // hash data to id }; /**