| @@ -528,9 +528,19 @@ Status GeGenerator::GenerateModel(const Graph &graph, const string &file_name_pr | |||||
| return SUCCESS; | return SUCCESS; | ||||
| } | } | ||||
| Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &inputs, const vector<GeTensor> &outputs, | |||||
| const string &model_file_name, OpEngineType engine_type, ModelBufferData &model_buff, | |||||
| bool is_offline) { | |||||
| namespace { | |||||
| bool IsNeedConnectInputOpForSingleOp(GeTensorDesc &tensor_desc) { | |||||
| bool is_need = true; | |||||
| // format and dtype is all reserved, stand for Optional input. When singleop scene | |||||
| if (tensor_desc.GetFormat() == FORMAT_RESERVED && tensor_desc.GetDataType() == DT_UNDEFINED) { | |||||
| is_need = false; | |||||
| } | |||||
| return is_need; | |||||
| } | |||||
| } | |||||
| Status GeGenerator::CheckForSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &inputs, | |||||
| const vector<GeTensor> &outputs) { | |||||
| GE_CHECK_NOTNULL_EXEC(op_desc, return PARAM_INVALID); | GE_CHECK_NOTNULL_EXEC(op_desc, return PARAM_INVALID); | ||||
| if (!inputs.empty() && (inputs.size() != op_desc->GetAllInputsSize())) { | if (!inputs.empty() && (inputs.size() != op_desc->GetAllInputsSize())) { | ||||
| GELOGE(PARAM_INVALID, "Tensor size: %zu, Inputs size: %zu", inputs.size(), op_desc->GetAllInputsSize()); | GELOGE(PARAM_INVALID, "Tensor size: %zu, Inputs size: %zu", inputs.size(), op_desc->GetAllInputsSize()); | ||||
| @@ -540,7 +550,17 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &in | |||||
| GELOGE(PARAM_INVALID, "Tensor size: %zu, Outputs size: %zu", outputs.size(), op_desc->GetOutputsSize()); | GELOGE(PARAM_INVALID, "Tensor size: %zu, Outputs size: %zu", outputs.size(), op_desc->GetOutputsSize()); | ||||
| return PARAM_INVALID; | return PARAM_INVALID; | ||||
| } | } | ||||
| return SUCCESS; | |||||
| } | |||||
| Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &inputs, const vector<GeTensor> &outputs, | |||||
| const string &model_file_name, OpEngineType engine_type, ModelBufferData &model_buff, | |||||
| bool is_offline) { | |||||
| if (CheckForSingleOp(op_desc, inputs, outputs) != SUCCESS) { | |||||
| GELOGE(PARAM_INVALID, "input param is invalid when build single op!"); | |||||
| return PARAM_INVALID; | |||||
| } | |||||
| OmgContext &omg_context = (impl_ == nullptr) ? domi::GetContext() : impl_->omg_context_; | OmgContext &omg_context = (impl_ == nullptr) ? domi::GetContext() : impl_->omg_context_; | ||||
| omg_context.is_dynamic_input = ContainsDynamicInpus(*op_desc); | omg_context.is_dynamic_input = ContainsDynamicInpus(*op_desc); | ||||
| @@ -575,12 +595,18 @@ Status GeGenerator::BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &in | |||||
| if (inputs.empty()) { | if (inputs.empty()) { | ||||
| for (const auto &input_desc : op_desc->GetAllInputsDescPtr()) { | for (const auto &input_desc : op_desc->GetAllInputsDescPtr()) { | ||||
| GE_CHECK_NOTNULL_EXEC(input_desc, return INTERNAL_ERROR); | GE_CHECK_NOTNULL_EXEC(input_desc, return INTERNAL_ERROR); | ||||
| if (!IsNeedConnectInputOpForSingleOp(*input_desc)) { | |||||
| continue; | |||||
| } | |||||
| GE_CHK_STATUS_RET_NOLOG(AddInputs(compute_graph, op_node, *input_desc, arg_index, false)); | GE_CHK_STATUS_RET_NOLOG(AddInputs(compute_graph, op_node, *input_desc, arg_index, false)); | ||||
| arg_index++; | arg_index++; | ||||
| } | } | ||||
| } else { | } else { | ||||
| for (const auto &in_desc : inputs) { | for (const auto &in_desc : inputs) { | ||||
| GeTensorDesc input_desc = in_desc.GetTensorDesc(); | GeTensorDesc input_desc = in_desc.GetTensorDesc(); | ||||
| if (!IsNeedConnectInputOpForSingleOp(input_desc)) { | |||||
| continue; | |||||
| } | |||||
| GE_CHK_STATUS_RET_NOLOG(AddInputs(compute_graph, op_node, input_desc, arg_index, true)); | GE_CHK_STATUS_RET_NOLOG(AddInputs(compute_graph, op_node, input_desc, arg_index, true)); | ||||
| arg_index++; | arg_index++; | ||||
| } | } | ||||
| @@ -545,7 +545,8 @@ bool CanReuseBySize(const map<string, uint64_t> &reusable_block_counts, const Me | |||||
| } | } | ||||
| bool BlockMemAssigner::IsOutNodeSetContinuousInput(const NodePtr &n, uint32_t out_index, std::string &peer_name, | bool BlockMemAssigner::IsOutNodeSetContinuousInput(const NodePtr &n, uint32_t out_index, std::string &peer_name, | ||||
| uint32_t &peer_input_index, bool &no_need_assign_memory) { | |||||
| uint32_t &peer_input_index, | |||||
| bool &no_need_assign_memory, bool &reset_zero_copy_flag) { | |||||
| if (n == nullptr || n->GetAllOutDataAnchors().size() <= 0) { | if (n == nullptr || n->GetAllOutDataAnchors().size() <= 0) { | ||||
| return false; | return false; | ||||
| } | } | ||||
| @@ -571,6 +572,13 @@ bool BlockMemAssigner::IsOutNodeSetContinuousInput(const NodePtr &n, uint32_t ou | |||||
| return false;); | return false;); | ||||
| // If GetBool fail, is_input_continuous is false. | // If GetBool fail, is_input_continuous is false. | ||||
| bool is_input_continuous_no_padding = false; | |||||
| (void)ge::AttrUtils::GetBool(peer_in_node_desc, ATTR_NAME_NOPADDING_CONTINUOUS_INPUT, | |||||
| is_input_continuous_no_padding); | |||||
| if (is_input_continuous_no_padding) { | |||||
| reset_zero_copy_flag = true; | |||||
| return false; | |||||
| } | |||||
| (void)ge::AttrUtils::GetBool(peer_in_node_desc, ATTR_NAME_CONTINUOUS_INPUT, is_input_continuous); | (void)ge::AttrUtils::GetBool(peer_in_node_desc, ATTR_NAME_CONTINUOUS_INPUT, is_input_continuous); | ||||
| GE_IF_BOOL_EXEC(is_input_continuous && CheckIsZeroMemNodeType(peer_node->GetType()), | GE_IF_BOOL_EXEC(is_input_continuous && CheckIsZeroMemNodeType(peer_node->GetType()), | ||||
| @@ -1249,10 +1257,11 @@ Status BlockMemAssigner::AssignOutputMemoryWithReuse(const NodePtr &node, vector | |||||
| std::string peer_name; | std::string peer_name; | ||||
| uint32_t peer_input_index = 0; | uint32_t peer_input_index = 0; | ||||
| bool out_node_set_continuous_input = false; | bool out_node_set_continuous_input = false; | ||||
| bool reset_zero_copy_flag = false; | |||||
| bool no_need_assign_memory = ((size == 0) || CheckIsZeroMemNodeType(node->GetType())); | bool no_need_assign_memory = ((size == 0) || CheckIsZeroMemNodeType(node->GetType())); | ||||
| if (!no_need_assign_memory) { | if (!no_need_assign_memory) { | ||||
| out_node_set_continuous_input = | out_node_set_continuous_input = | ||||
| IsOutNodeSetContinuousInput(node, i, peer_name, peer_input_index, no_need_assign_memory); | |||||
| IsOutNodeSetContinuousInput(node, i, peer_name, peer_input_index, no_need_assign_memory, reset_zero_copy_flag); | |||||
| GE_IF_BOOL_EXEC(!no_need_assign_memory, | GE_IF_BOOL_EXEC(!no_need_assign_memory, | ||||
| no_need_assign_memory = IsAtomicOutputMemory(node, i, is_atomic, out_node_set_continuous_input);); | no_need_assign_memory = IsAtomicOutputMemory(node, i, is_atomic, out_node_set_continuous_input);); | ||||
| } | } | ||||
| @@ -1269,6 +1278,9 @@ Status BlockMemAssigner::AssignOutputMemoryWithReuse(const NodePtr &node, vector | |||||
| MemoryBlock *mem_block = ApplyOutMemory(node, i, ranges, is_op_reuse_mem_, out_node_set_continuous_input); | MemoryBlock *mem_block = ApplyOutMemory(node, i, ranges, is_op_reuse_mem_, out_node_set_continuous_input); | ||||
| if (mem_block != nullptr) { | if (mem_block != nullptr) { | ||||
| GE_IF_BOOL_EXEC(reset_zero_copy_flag, | |||||
| mem_block->is_zero_copy_ = false; | |||||
| GELOGI("Node[%s] output[%u] need assign memory before reassign.", op_desc->GetName().c_str(), i);); | |||||
| node_out_blocks_[node->GetName()].emplace_back(mem_block); | node_out_blocks_[node->GetName()].emplace_back(mem_block); | ||||
| if (out_node_set_continuous_input) { | if (out_node_set_continuous_input) { | ||||
| node_continuous_input_blocks_[peer_name][peer_input_index] = mem_block; | node_continuous_input_blocks_[peer_name][peer_input_index] = mem_block; | ||||
| @@ -390,7 +390,7 @@ class BlockMemAssigner : public MemAssigner { | |||||
| bool IsZeroCopyBlock(const NodePtr &node, bool continuous); | bool IsZeroCopyBlock(const NodePtr &node, bool continuous); | ||||
| bool IsOutNodeSetContinuousInput(const NodePtr &n, uint32_t out_index, std::string &peer_name, | bool IsOutNodeSetContinuousInput(const NodePtr &n, uint32_t out_index, std::string &peer_name, | ||||
| uint32_t &peer_input_index, bool &no_need_assign_memory); | |||||
| uint32_t &peer_input_index, bool &no_need_assign_memory, bool &reset_zero_copy_flag); | |||||
| /// | /// | ||||
| /// @ingroup GE | /// @ingroup GE | ||||
| @@ -479,13 +479,15 @@ vector<void *> ModelUtils::GetWorkspaceDataAddrs(const RuntimeParam &model_param | |||||
| ge::AttrUtils::GetListInt(op_desc, ATTR_NAME_WORKSPACE_TYPE_LIST, workspace_memory_type); | ge::AttrUtils::GetListInt(op_desc, ATTR_NAME_WORKSPACE_TYPE_LIST, workspace_memory_type); | ||||
| for (size_t i = 0; i < v_workspace_bytes.size(); ++i) { | for (size_t i = 0; i < v_workspace_bytes.size(); ++i) { | ||||
| // Temporary solution, the aicpu workspace of multiple images cannot be shared. | // Temporary solution, the aicpu workspace of multiple images cannot be shared. | ||||
| if (has_workspace_reuse && i < workspace_reuse_flag.size() && !workspace_reuse_flag[i]) { | |||||
| if (has_workspace_reuse && i < workspace_reuse_flag.size() | |||||
| && !workspace_reuse_flag[i] && !model_param.is_single_op) { | |||||
| void *mem_addr = model_param.aicpu_mem_mall->Acquire(v_workspace_offset[i], v_workspace_bytes[i]); | void *mem_addr = model_param.aicpu_mem_mall->Acquire(v_workspace_offset[i], v_workspace_bytes[i]); | ||||
| v_workspace_data_addr.push_back(mem_addr); | v_workspace_data_addr.push_back(mem_addr); | ||||
| GELOGI( | GELOGI( | ||||
| "[IMAS]GetWorkspaceDataAddrs graph_%u type[F] name[%s] aicpu workspace[%zu] offset[%ld] bytes[%ld] " | "[IMAS]GetWorkspaceDataAddrs graph_%u type[F] name[%s] aicpu workspace[%zu] offset[%ld] bytes[%ld] " | ||||
| "memaddr[%p]", | "memaddr[%p]", | ||||
| model_param.graph_id, op_desc->GetName().c_str(), i, v_workspace_offset[i], v_workspace_bytes[i], mem_addr); | model_param.graph_id, op_desc->GetName().c_str(), i, v_workspace_offset[i], v_workspace_bytes[i], mem_addr); | ||||
| continue; | |||||
| } else if (has_mem_type_workspace && workspace_memory_type[i] == RT_MEMORY_P2P_DDR) { | } else if (has_mem_type_workspace && workspace_memory_type[i] == RT_MEMORY_P2P_DDR) { | ||||
| int64_t p2p_workspace_offset = v_workspace_offset[i]; | int64_t p2p_workspace_offset = v_workspace_offset[i]; | ||||
| int64_t p2p_workspace_bytes = v_workspace_bytes[i]; | int64_t p2p_workspace_bytes = v_workspace_bytes[i]; | ||||
| @@ -56,6 +56,7 @@ struct RuntimeParam { | |||||
| uint32_t label_num = 0; | uint32_t label_num = 0; | ||||
| uint64_t session_id = 0; | uint64_t session_id = 0; | ||||
| uint32_t graph_id = 0; | uint32_t graph_id = 0; | ||||
| bool is_single_op = false; | |||||
| std::unique_ptr<TsMemMall> ts_mem_mall; | std::unique_ptr<TsMemMall> ts_mem_mall; | ||||
| std::unique_ptr<TsMemMall> aicpu_mem_mall; | std::unique_ptr<TsMemMall> aicpu_mem_mall; | ||||
| @@ -226,16 +226,11 @@ bool SingleOpParser::Validate(const SingleOpDesc &op_desc) { | |||||
| } | } | ||||
| int index = 0; | int index = 0; | ||||
| for (auto &tensor_desc : op_desc.input_desc) { | |||||
| if (tensor_desc.type == DT_UNDEFINED) { | |||||
| ErrorManager::GetInstance().ATCReportErrMessage("E10027", {"input", "index"}, {"input", std::to_string(index)}); | |||||
| GELOGE(false, "Input's dataType is invalid when the index is %d", index); | |||||
| return false; | |||||
| } | |||||
| if (tensor_desc.format == FORMAT_RESERVED) { | |||||
| ErrorManager::GetInstance().ATCReportErrMessage("E10028", {"input", "index"}, {"input", std::to_string(index)}); | |||||
| GELOGE(PARAM_INVALID, "Input's format is invalid when the index is %d", index); | |||||
| for (auto &tensor_desc : op_desc.output_desc) { | |||||
| if ((tensor_desc.type == DT_UNDEFINED && tensor_desc.format != FORMAT_RESERVED) || | |||||
| (tensor_desc.type != DT_UNDEFINED && tensor_desc.format == FORMAT_RESERVED)){ | |||||
| ErrorManager::GetInstance().ATCReportErrMessage("E10027", {"input", "index"}, {"output", std::to_string(index)}); | |||||
| GELOGE(PARAM_INVALID, "Input's dataType or format is invalid when the index is %d", index); | |||||
| return false; | return false; | ||||
| } | } | ||||
| ++index; | ++index; | ||||
| @@ -45,6 +45,7 @@ std::vector<std::vector<void *>> BuildTaskUtils::GetAddresses(const OpDescPtr &o | |||||
| runtime_para.logic_var_base = kLogicVarBase; | runtime_para.logic_var_base = kLogicVarBase; | ||||
| runtime_para.var_base = kVarBase; | runtime_para.var_base = kVarBase; | ||||
| runtime_para.session_id = kSessionId; | runtime_para.session_id = kSessionId; | ||||
| runtime_para.is_single_op = true; | |||||
| ret.emplace_back(ModelUtils::GetInputDataAddrs(runtime_para, op_desc)); | ret.emplace_back(ModelUtils::GetInputDataAddrs(runtime_para, op_desc)); | ||||
| ret.emplace_back(ModelUtils::GetOutputDataAddrs(runtime_para, op_desc)); | ret.emplace_back(ModelUtils::GetOutputDataAddrs(runtime_para, op_desc)); | ||||
| @@ -53,7 +53,7 @@ class GeGenerator { | |||||
| Status GenerateOfflineModel(const Graph &graph, const std::string &file_name_prefix, | Status GenerateOfflineModel(const Graph &graph, const std::string &file_name_prefix, | ||||
| const std::vector<GeTensor> &inputs = std::vector<GeTensor>()); | const std::vector<GeTensor> &inputs = std::vector<GeTensor>()); | ||||
| Status GenerateOnlineModel(const Graph &graph, const vector<GeTensor> &inputs, ge::ModelBufferData& model); | |||||
| Status GenerateOnlineModel(const Graph &graph, const vector<GeTensor> &inputs, ge::ModelBufferData &model); | |||||
| Status GenerateInfershapeGraph(const Graph &graph); | Status GenerateInfershapeGraph(const Graph &graph); | ||||
| @@ -77,16 +77,16 @@ class GeGenerator { | |||||
| /// @param [in] engine_type: specific engine. | /// @param [in] engine_type: specific engine. | ||||
| /// @param [out] model_buff: model buff of single op. | /// @param [out] model_buff: model buff of single op. | ||||
| /// @return SUCCESS or FAILED | /// @return SUCCESS or FAILED | ||||
| Status BuildSingleOpModel(OpDescPtr &op_desc, const vector<GeTensor> &inputs, | |||||
| const vector<GeTensor> &outputs, OpEngineType engine_type, | |||||
| ModelBufferData &model_buff); | |||||
| Status BuildSingleOpModel(OpDescPtr &op_desc, const vector<GeTensor> &inputs, const vector<GeTensor> &outputs, | |||||
| OpEngineType engine_type, ModelBufferData &model_buff); | |||||
| private: | private: | ||||
| Status GenerateModel(const Graph &graph, const string &file_name_prefix, | |||||
| const vector<GeTensor> &inputs, ge::ModelBufferData& model, bool is_offline = true); | |||||
| Status GenerateModel(const Graph &graph, const string &file_name_prefix, const vector<GeTensor> &inputs, | |||||
| ge::ModelBufferData &model, bool is_offline = true); | |||||
| Status BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &inputs, const vector<GeTensor> &outputs, | Status BuildSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &inputs, const vector<GeTensor> &outputs, | ||||
| const string &model_file_name, OpEngineType engine_type, | |||||
| ModelBufferData &model_buff, bool is_offline = true); | |||||
| const string &model_file_name, OpEngineType engine_type, ModelBufferData &model_buff, | |||||
| bool is_offline = true); | |||||
| Status CheckForSingleOp(OpDescPtr &op_desc, const vector<GeTensor> &inputs, const vector<GeTensor> &outputs); | |||||
| class Impl; | class Impl; | ||||
| @@ -528,7 +528,6 @@ uint32_t Fusion(ComputeGraphPtr model_graph, ComputeGraphPtr fusion_graph, kScop | |||||
| int stream_num = 1; | int stream_num = 1; | ||||
| int flag = 0; | int flag = 0; | ||||
| // make_graph_nd(graph); | |||||
| NodePtr node_a = fusion_graph->AddNode(op_def_a); | NodePtr node_a = fusion_graph->AddNode(op_def_a); | ||||
| NodePtr node_b = fusion_graph->AddNode(op_def_b); | NodePtr node_b = fusion_graph->AddNode(op_def_b); | ||||
| @@ -746,7 +746,6 @@ int TestBuildGraphTest(Func fun, Graph &graph, vector<ge::Tensor> &inputs, vecto | |||||
| shapeTensor.SetTensorDesc(shape_desc); | shapeTensor.SetTensorDesc(shape_desc); | ||||
| vector<float> dataValuec; | vector<float> dataValuec; | ||||
| for (int i = 0; i < sizeshape; i++) { | for (int i = 0; i < sizeshape; i++) { | ||||
| // dataValuec.push_back((float)(i%255)); | |||||
| dataValuec.push_back(1); | dataValuec.push_back(1); | ||||
| } | } | ||||
| @@ -764,7 +763,6 @@ int TestBuildGraphTest(Func fun, Graph &graph, vector<ge::Tensor> &inputs, vecto | |||||
| } | } | ||||
| shapeTensor1.SetData((uint8_t *)dataValuec1.data(), 4 * sizeshape1); | shapeTensor1.SetData((uint8_t *)dataValuec1.data(), 4 * sizeshape1); | ||||
| // inputs.push_back(shapeTensor1); | |||||
| return 0; | return 0; | ||||
| } | } | ||||
| @@ -69,12 +69,10 @@ TEST_F(UtestGeModelUnittest, save_model_to_file_success) { | |||||
| ge::Graph ge_graph = ge::GraphUtils::CreateGraphFromComputeGraph(compute_graph); | ge::Graph ge_graph = ge::GraphUtils::CreateGraphFromComputeGraph(compute_graph); | ||||
| string file_name = "model_data.pb"; | string file_name = "model_data.pb"; | ||||
| setenv("DUMP_MODEL", "1", true); | setenv("DUMP_MODEL", "1", true); | ||||
| // EXPECT_EQ(ge_graph.SaveToFile(file_name), GRAPH_FAILED); | |||||
| setenv("DUMP_MODEL", "0", true); | setenv("DUMP_MODEL", "0", true); | ||||
| } | } | ||||
| TEST_F(UtestGeModelUnittest, load_model_from_file_success) { | TEST_F(UtestGeModelUnittest, load_model_from_file_success) { | ||||
| ge::Graph ge_graph; | ge::Graph ge_graph; | ||||
| string file_name = "model_data.pb"; | string file_name = "model_data.pb"; | ||||
| // EXPECT_EQ(ge_graph.LoadFromFile(file_name), GRAPH_SUCCESS); | |||||
| } | } | ||||
| @@ -182,8 +182,6 @@ TEST_F(UtestModelManagerDavinciModel, contruct_modeldef_createfail) { | |||||
| ge::AttrUtils::SetListInt(op_desc, ge::POOLING_ATTR_WINDOW, vector<int>({1, 1})); | ge::AttrUtils::SetListInt(op_desc, ge::POOLING_ATTR_WINDOW, vector<int>({1, 1})); | ||||
| ge::AttrUtils::SetListInt(op_desc, ge::POOLING_ATTR_STRIDE, vector<int>({1, 1})); | ge::AttrUtils::SetListInt(op_desc, ge::POOLING_ATTR_STRIDE, vector<int>({1, 1})); | ||||
| // EXPECT_EQ(ge::SUCCESS, model.Init()); | |||||
| model.GetEventList(); | model.GetEventList(); | ||||
| } | } | ||||
| @@ -200,7 +198,6 @@ TEST_F(UtestModelManagerDavinciModel, copy_input_data_to_model_fail) { | |||||
| input_data.blobs.push_back(data_buffer); | input_data.blobs.push_back(data_buffer); | ||||
| model.op_list_.clear(); | model.op_list_.clear(); | ||||
| // EXPECT_EQ(ge::PARAM_INVALID, model.CopyInputDataToModel(input_data.blobs, 0)); | |||||
| delete[](char *) data_buffer.data; | delete[](char *) data_buffer.data; | ||||
| } | } | ||||
| @@ -210,7 +207,6 @@ TEST_F(UtestModelManagerDavinciModel, streamnum_success) { | |||||
| DavinciModel *model = new DavinciModel(0, g_label_call_back); | DavinciModel *model = new DavinciModel(0, g_label_call_back); | ||||
| OmeTestOpUtils::InitModel(*model); | OmeTestOpUtils::InitModel(*model); | ||||
| // EXPECT_EQ(ge::SUCCESS, model->Init()); | |||||
| EXPECT_EQ(0, model->StreamNum()); | EXPECT_EQ(0, model->StreamNum()); | ||||
| EXPECT_EQ(ge::INTERNAL_ERROR, model->ModelRunStart()); | EXPECT_EQ(ge::INTERNAL_ERROR, model->ModelRunStart()); | ||||
| @@ -226,8 +222,6 @@ TEST_F(UtestModelManagerDavinciModel, eventnum_success) { | |||||
| OmeTestOpUtils::InitModel(*model); | OmeTestOpUtils::InitModel(*model); | ||||
| // EXPECT_EQ(ge::SUCCESS, model->Init()); | |||||
| EXPECT_EQ(0, model->EventNum()); | EXPECT_EQ(0, model->EventNum()); | ||||
| EXPECT_EQ(ge::INTERNAL_ERROR, model->ModelRunStart()); | EXPECT_EQ(ge::INTERNAL_ERROR, model->ModelRunStart()); | ||||
| @@ -241,8 +235,6 @@ TEST_F(UtestModelManagerDavinciModel, handlelist_success) { | |||||
| OmeTestOpUtils::InitModel(*model); | OmeTestOpUtils::InitModel(*model); | ||||
| // EXPECT_EQ(ge::SUCCESS, model->Init()); | |||||
| EXPECT_EQ(ge::INTERNAL_ERROR, model->ModelRunStart()); | EXPECT_EQ(ge::INTERNAL_ERROR, model->ModelRunStart()); | ||||
| EXPECT_EQ(ge::SUCCESS, model->ModelRunStop()); | EXPECT_EQ(ge::SUCCESS, model->ModelRunStop()); | ||||
| @@ -256,8 +248,6 @@ TEST_F(UtestModelManagerDavinciModel, eventlist_success) { | |||||
| OmeTestOpUtils::InitModel(*model); | OmeTestOpUtils::InitModel(*model); | ||||
| // EXPECT_EQ(ge::SUCCESS, model->Init()); | |||||
| EXPECT_EQ(true, model->GetEventList().empty()); | EXPECT_EQ(true, model->GetEventList().empty()); | ||||
| EXPECT_EQ(ge::INTERNAL_ERROR, model->ModelRunStart()); | EXPECT_EQ(ge::INTERNAL_ERROR, model->ModelRunStart()); | ||||
| @@ -282,7 +272,6 @@ TEST_F(UtestModelManagerDavinciModel, failed_reset_device) { | |||||
| TEST_F(UtestModelManagerDavinciModel, init_not_support_priority) { | TEST_F(UtestModelManagerDavinciModel, init_not_support_priority) { | ||||
| int32_t priority = 8; | int32_t priority = 8; | ||||
| DavinciModel model(priority, g_label_call_back); | DavinciModel model(priority, g_label_call_back); | ||||
| // EXPECT_EQ(ge::PARAM_INVALID, model.Init()); | |||||
| } | } | ||||
| // test GetInputOutputDescInfo | // test GetInputOutputDescInfo | ||||
| @@ -346,7 +335,6 @@ TEST_F(UtestModelManagerDavinciModel, CopyTensorFromSrcVarNode_success) { | |||||
| NodePtr dst_node = graph->AddNode(op_desc_ptr); | NodePtr dst_node = graph->AddNode(op_desc_ptr); | ||||
| DavinciModel model(0, g_label_call_back); | DavinciModel model(0, g_label_call_back); | ||||
| Status ret = model.CopyTensorFromSrcVarNode(src_node, dst_node); | Status ret = model.CopyTensorFromSrcVarNode(src_node, dst_node); | ||||
| // EXPECT_EQ(SUCCESS, ret); | |||||
| } | } | ||||
| TEST_F(UtestModelManagerDavinciModel, CopyVarData_graph_is_nullptr) { | TEST_F(UtestModelManagerDavinciModel, CopyVarData_graph_is_nullptr) { | ||||
| @@ -370,7 +358,6 @@ TEST_F(UtestModelManagerDavinciModel, copy_var_data_success) { | |||||
| DavinciModel model(0, g_label_call_back); | DavinciModel model(0, g_label_call_back); | ||||
| Status ret = model.CopyVarData(graph); | Status ret = model.CopyVarData(graph); | ||||
| // EXPECT_EQ(SUCCESS, ret); | |||||
| } | } | ||||
| TEST_F(UtestModelManagerDavinciModel, get_input_output_desc_info_without_data_op_list) { | TEST_F(UtestModelManagerDavinciModel, get_input_output_desc_info_without_data_op_list) { | ||||
| @@ -540,7 +527,6 @@ TEST_F(UtestModelManagerDavinciModel, get_flow_ctrl_op_list_success) { | |||||
| std::map<uint32_t, uint32_t> flowctrl_op_index_internal_map; | std::map<uint32_t, uint32_t> flowctrl_op_index_internal_map; | ||||
| flowctrl_op_index_internal_map.insert(pair<uint32_t, uint32_t>(1, 1)); | flowctrl_op_index_internal_map.insert(pair<uint32_t, uint32_t>(1, 1)); | ||||
| model.flowctrl_op_index_internal_map_ = flowctrl_op_index_internal_map; | model.flowctrl_op_index_internal_map_ = flowctrl_op_index_internal_map; | ||||
| // EXPECT_EQ(flowctrl_op_index_internal_map_, model.GetFlowctrlOpList()); | |||||
| } | } | ||||
| // test SetFlowctrlOpList | // test SetFlowctrlOpList | ||||
| @@ -1204,10 +1190,8 @@ TEST_F(UtestModelManagerDavinciModel, profiling_model_success) { | |||||
| input_data.index = 0; | input_data.index = 0; | ||||
| input_data.model_id = 1; | input_data.model_id = 1; | ||||
| input_data.blobs.push_back(data_buffer); | input_data.blobs.push_back(data_buffer); | ||||
| // model.SinkModelProfile(&model); | |||||
| rtFreeHost(data.model_data); | rtFreeHost(data.model_data); | ||||
| // delete stream; | |||||
| delete[](char *) data_buffer.data; | delete[](char *) data_buffer.data; | ||||
| delete model_def; | delete model_def; | ||||
| } | } | ||||
| @@ -153,20 +153,6 @@ TEST_F(UtestModelManagerModelManager, case_load_model_encypt_not_match) { | |||||
| delete[](uint8_t *) data.model_data; | delete[](uint8_t *) data.model_data; | ||||
| } | } | ||||
| #if 0 | |||||
| TEST_F(UtestModelManagerModelManager, case_load_model_signature_failed) | |||||
| { | |||||
| ModelManager mm; | |||||
| ge::ModelData data; | |||||
| GenUnencryptModelData(data); | |||||
| uint32_t model_id = 1; | |||||
| MOCKER(&WBDecryptor::CheckSignature).stubs().will(returnValue(false)); | |||||
| EXPECT_EQ(ge::PARAM_INVALID, mm.LoadModelOffline(model_id, data, UTEST_CALL_BACK_FUN)); | |||||
| delete[](uint8_t*)data.model_data; | |||||
| } | |||||
| #endif | |||||
| TEST_F(UtestModelManagerModelManager, case_load_model_encypt_type_unsupported) { | TEST_F(UtestModelManagerModelManager, case_load_model_encypt_type_unsupported) { | ||||
| ModelManager mm; | ModelManager mm; | ||||
| ge::ModelData data; | ge::ModelData data; | ||||
| @@ -178,87 +164,6 @@ TEST_F(UtestModelManagerModelManager, case_load_model_encypt_type_unsupported) { | |||||
| delete[](uint8_t *) data.model_data; | delete[](uint8_t *) data.model_data; | ||||
| } | } | ||||
| #if 0 | |||||
| TEST_F(UtestModelManagerModelManager, case_load_model_header_len_failed) | |||||
| { | |||||
| ModelManager mm; | |||||
| ge::ModelData data; | |||||
| GenEncryptModelData(data); | |||||
| ModelFileHeader *header = (ModelFileHeader*)data.model_data; | |||||
| data.model_len -= header->length; | |||||
| header->length = 0; | |||||
| uint32_t model_id = 1; | |||||
| EXPECT_EQ(ge::PARAM_INVALID, mm.LoadModelOffline(model_id, data, UTEST_CALL_BACK_FUN)); | |||||
| delete[](uint8_t*)data.model_data; | |||||
| } | |||||
| #endif | |||||
| #if 0 | |||||
| TEST_F(UtestModelManagerModelManager, case_load_success) | |||||
| { | |||||
| const char* model_file = "bin/llt/framework/domi/ut/omg/data/leakyrelu.dav"; | |||||
| const char* json_file = "test.json"; | |||||
| const char* key = "bin/llt/framework/domi/ut/omg/data/leakyrelu.dav.PASSCODE"; | |||||
| ge::ModelData model; | |||||
| Status ret = ModelParserBase::LoadFromFile(model_file, key, 0, &model); | |||||
| EXPECT_EQ(ge::SUCCESS, ret); | |||||
| ModelManager mm; | |||||
| uint32_t model_id = 1; | |||||
| ret = mm.LoadModelOffline(model_id, model, UTEST_CALL_BACK_FUN); | |||||
| EXPECT_EQ(ge::SUCCESS, ret); | |||||
| if (model.model_data) | |||||
| delete[](uint8_t*)model.model_data; | |||||
| } | |||||
| #endif | |||||
| #if 0 | |||||
| TEST_F(UtestModelManagerModelManager, case_load_encrypt_model_signature_failed) | |||||
| { | |||||
| ModelManager mm; | |||||
| ge::ModelData data; | |||||
| GenEncryptModelData(data); | |||||
| uint32_t model_id = 1; | |||||
| data.key; | |||||
| EXPECT_EQ(ge::PARAM_INVALID, mm.LoadModelOffline(model_id, data, UTEST_CALL_BACK_FUN)); | |||||
| delete[](uint8_t*)data.model_data; | |||||
| } | |||||
| TEST_F(UtestModelManagerModelManager, case_load_encrypt_model_invalid_key_len) | |||||
| { | |||||
| ModelManager mm; | |||||
| ge::ModelData data; | |||||
| GenEncryptModelData(data); | |||||
| data.key = "0123456789abcdef0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0"; | |||||
| uint32_t model_id = 1; | |||||
| EXPECT_EQ(ge::PARAM_INVALID, mm.LoadModelOffline(model_id, data, UTEST_CALL_BACK_FUN)); | |||||
| delete[](uint8_t*)data.model_data; | |||||
| } | |||||
| TEST_F(UtestModelManagerModelManager, case_load_encrypt_model_invalid_key_char) | |||||
| { | |||||
| ModelManager mm; | |||||
| ge::ModelData data; | |||||
| GenEncryptModelData(data); | |||||
| data.key = "0123456789abcdef0123456789ABCDEF0123456789ABCDEF0123456789ABCDEG"; | |||||
| uint32_t model_id = 1; | |||||
| EXPECT_EQ(ge::PARAM_INVALID, mm.LoadModelOffline(model_id, data, UTEST_CALL_BACK_FUN)); | |||||
| delete[](uint8_t*)data.model_data; | |||||
| } | |||||
| TEST_F(UtestModelManagerModelManager, case_load_encrypt_model_load_failed) | |||||
| { | |||||
| ModelManager mm; | |||||
| ge::ModelData data; | |||||
| GenEncryptModelData(data); | |||||
| uint32_t model_id = 1; | |||||
| EXPECT_EQ(ge::INTERNAL_ERROR, mm.LoadModelOffline(model_id, data, UTEST_CALL_BACK_FUN)); | |||||
| delete[](uint8_t*)data.model_data; | |||||
| } | |||||
| #endif | |||||
| shared_ptr<ge::ModelListener> LabelCallBack(new DModelListener()); | shared_ptr<ge::ModelListener> LabelCallBack(new DModelListener()); | ||||
| // test HandleCommand | // test HandleCommand | ||||
| @@ -76,7 +76,6 @@ class OmeTestOpUtils { | |||||
| return nullptr; | return nullptr; | ||||
| } | } | ||||
| // return std::make_shared<ge::Node>(op_desc, nullptr); | |||||
| auto g = std::make_shared<ge::ComputeGraph>("g"); | auto g = std::make_shared<ge::ComputeGraph>("g"); | ||||
| return g->AddNode(std::move(op_desc)); | return g->AddNode(std::move(op_desc)); | ||||
| } | } | ||||
| @@ -403,8 +402,6 @@ class OmeTestOpDescBuilder { | |||||
| if (SUCCESS != res) { | if (SUCCESS != res) { | ||||
| GELOGE(ge::FAILED, "Finish: GraphUtils::AddEdge failed"); | GELOGE(ge::FAILED, "Finish: GraphUtils::AddEdge failed"); | ||||
| } | } | ||||
| // ge::NodePtr src_node = node->GetOwnerComputeGraph()->AddNodeFront(src_op_desc); | |||||
| // node->AddLinkFrom(src_node); | |||||
| } | } | ||||
| { | { | ||||
| @@ -434,8 +431,6 @@ class OmeTestOpDescBuilder { | |||||
| vector<ge::GeTensorPtr> weights_; | vector<ge::GeTensorPtr> weights_; | ||||
| int64_t eventId_ = -1; | int64_t eventId_ = -1; | ||||
| int64_t scopeid_ = -1; | int64_t scopeid_ = -1; | ||||
| // std::shared_ptr<ge::ComputeGraph> graph_; | |||||
| }; | }; | ||||
| #endif // OME_REBUILD_OME_OP_TEST_UTILS_H | #endif // OME_REBUILD_OME_OP_TEST_UTILS_H | ||||
| @@ -122,7 +122,6 @@ TEST_F(UtestGraphPassesDimensionAdjustPass, node_get_original_type_failed) { | |||||
| std::shared_ptr<DimensionAdjustPass> pass = make_shared<DimensionAdjustPass>(); | std::shared_ptr<DimensionAdjustPass> pass = make_shared<DimensionAdjustPass>(); | ||||
| ge::Status ret = pass->Run(op_node); | ge::Status ret = pass->Run(op_node); | ||||
| // EXPECT_EQ(ge::SUCCESS, ret); | |||||
| } | } | ||||
| TEST_F(UtestGraphPassesDimensionAdjustPass, node_not_register_op) { | TEST_F(UtestGraphPassesDimensionAdjustPass, node_not_register_op) { | ||||
| @@ -93,7 +93,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test2) { | |||||
| shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | ||||
| ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ||||
| // EXPECT_EQ(PARAM_INVALID, status); | |||||
| } | } | ||||
| TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test3) { | TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test3) { | ||||
| @@ -123,7 +122,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test3) { | |||||
| shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | ||||
| ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ||||
| // EXPECT_EQ(PARAM_INVALID, status); | |||||
| } | } | ||||
| TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test4) { | TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test4) { | ||||
| @@ -154,7 +152,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test4) { | |||||
| shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | ||||
| ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ||||
| // EXPECT_EQ(PARAM_INVALID, status); | |||||
| } | } | ||||
| TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test5) { | TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test5) { | ||||
| @@ -186,7 +183,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test5) { | |||||
| shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | ||||
| ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ||||
| // EXPECT_EQ(PARAM_INVALID, status); | |||||
| } | } | ||||
| TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test6) { | TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test6) { | ||||
| @@ -219,7 +215,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test6) { | |||||
| shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | ||||
| ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ||||
| // EXPECT_EQ(PARAM_INVALID, status); | |||||
| } | } | ||||
| TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test7) { | TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test7) { | ||||
| @@ -253,7 +248,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test7) { | |||||
| shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | ||||
| ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ||||
| // EXPECT_EQ(PARAM_INVALID, status); | |||||
| } | } | ||||
| TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test8) { | TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test8) { | ||||
| @@ -288,7 +282,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test8) { | |||||
| shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | ||||
| ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ||||
| // EXPECT_EQ(PARAM_INVALID, status); | |||||
| } | } | ||||
| TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test9) { | TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test9) { | ||||
| @@ -322,7 +315,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test9) { | |||||
| shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | ||||
| ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ||||
| // EXPECT_EQ(PARAM_INVALID, status); | |||||
| } | } | ||||
| TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test10) { | TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test10) { | ||||
| @@ -357,7 +349,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test10) { | |||||
| shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | ||||
| ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ||||
| // EXPECT_EQ(PARAM_INVALID, status); | |||||
| } | } | ||||
| TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test11) { | TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test11) { | ||||
| @@ -392,7 +383,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test11) { | |||||
| shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | ||||
| ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ||||
| // EXPECT_EQ(PARAM_INVALID, status); | |||||
| } | } | ||||
| TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test12) { | TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test12) { | ||||
| @@ -427,7 +417,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test12) { | |||||
| shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | ||||
| ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ||||
| // EXPECT_EQ(PARAM_INVALID, status); | |||||
| } | } | ||||
| TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test13) { | TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test13) { | ||||
| @@ -462,7 +451,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test13) { | |||||
| shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | ||||
| ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ||||
| // EXPECT_EQ(PARAM_INVALID, status); | |||||
| } | } | ||||
| TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test14) { | TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test14) { | ||||
| @@ -497,7 +485,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test14) { | |||||
| shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | ||||
| ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ||||
| // EXPECT_EQ(PARAM_INVALID, status); | |||||
| } | } | ||||
| TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test15) { | TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test15) { | ||||
| @@ -532,7 +519,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test15) { | |||||
| shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | ||||
| ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ||||
| // EXPECT_EQ(PARAM_INVALID, status); | |||||
| } | } | ||||
| TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test16) { | TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test16) { | ||||
| @@ -567,7 +553,6 @@ TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test16) { | |||||
| shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | shared_ptr<Kernel> kernel = KernelFactory::Instance().Create(STRIDEDSLICE); | ||||
| ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ge::Status status = kernel->Compute(op_desc_ptr, input, outputs); | ||||
| // EXPECT_EQ(PARAM_INVALID, status); | |||||
| } | } | ||||
| TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test17) { | TEST_F(UtestGraphPassesFoldingKernelStridedSliceKernel, Test17) { | ||||
| @@ -167,7 +167,6 @@ TEST_F(UtestGraphPassesGuaranteeConstPass, get_origenal_type_fail) { | |||||
| string type2 = "FrameworkOp"; | string type2 = "FrameworkOp"; | ||||
| node->GetOpDesc()->SetType(type2); | node->GetOpDesc()->SetType(type2); | ||||
| ge::Status ret = guarantee_const_op_remove_pass_->Run(node); | ge::Status ret = guarantee_const_op_remove_pass_->Run(node); | ||||
| // EXPECT_EQ(ge::SUCCESS, ret); | |||||
| } | } | ||||
| TEST_F(UtestGraphPassesGuaranteeConstPass, int32_success_6) { | TEST_F(UtestGraphPassesGuaranteeConstPass, int32_success_6) { | ||||
| @@ -135,7 +135,6 @@ TEST_F(UtestIdentityPass, succ) { | |||||
| string type2 = "FrameworkOp"; | string type2 = "FrameworkOp"; | ||||
| node->GetOpDesc()->SetType(type2); | node->GetOpDesc()->SetType(type2); | ||||
| status = pass.Run(node); | status = pass.Run(node); | ||||
| // EXPECT_EQ(ge::SUCCESS, status); | |||||
| NodePtr node_err = AddNode(graph, "Identity", IDENTITY, 1, 2); | NodePtr node_err = AddNode(graph, "Identity", IDENTITY, 1, 2); | ||||
| status = pass.Run(node_err); | status = pass.Run(node_err); | ||||
| @@ -845,7 +845,6 @@ TEST_F(UtestGraphPassesNetOutputPass, out_node_remove_check_fail) { | |||||
| ge::NodePtr mul2 = compute_graph->FindNode("Mul2"); | ge::NodePtr mul2 = compute_graph->FindNode("Mul2"); | ||||
| std::vector<std::pair<ge::NodePtr, int32_t>> output_nodes = {{mul1, 0}, {mul2, 0}}; | std::vector<std::pair<ge::NodePtr, int32_t>> output_nodes = {{mul1, 0}, {mul2, 0}}; | ||||
| compute_graph->SetGraphOutNodesInfo(output_nodes); | compute_graph->SetGraphOutNodesInfo(output_nodes); | ||||
| // compute_graph->RemoveNode(mul1); | |||||
| mul1->GetInDataAnchor(0)->UnlinkAll(); | mul1->GetInDataAnchor(0)->UnlinkAll(); | ||||
| mul1->GetInDataAnchor(1)->UnlinkAll(); | mul1->GetInDataAnchor(1)->UnlinkAll(); | ||||
| GraphUtils::RemoveNodeWithoutRelink(compute_graph, mul1); | GraphUtils::RemoveNodeWithoutRelink(compute_graph, mul1); | ||||
| @@ -75,5 +75,4 @@ TEST_F(UtestPlaceholderWithDefaultPass, succ) { | |||||
| string type2 = "FrameworkOp"; | string type2 = "FrameworkOp"; | ||||
| node->GetOpDesc()->SetType(type2); | node->GetOpDesc()->SetType(type2); | ||||
| pass.Run(node); | pass.Run(node); | ||||
| // EXPECT_EQ(ge::SUCCESS, status); | |||||
| } | } | ||||
| @@ -75,5 +75,4 @@ TEST_F(UtestPreventGradientPass, succ) { | |||||
| string type2 = "FrameworkOp"; | string type2 = "FrameworkOp"; | ||||
| node->GetOpDesc()->SetType(type2); | node->GetOpDesc()->SetType(type2); | ||||
| status = pass.Run(node); | status = pass.Run(node); | ||||
| // EXPECT_EQ(ge::SUCCESS, status); | |||||
| } | } | ||||
| @@ -178,6 +178,5 @@ TEST_F(UtestReshapeRemovePass, reshape_remove_without_const) { | |||||
| EXPECT_EQ(var1->GetOutDataNodes().at(0)->GetName(), "transdata1"); | EXPECT_EQ(var1->GetOutDataNodes().at(0)->GetName(), "transdata1"); | ||||
| EXPECT_NE(const1, nullptr); | EXPECT_NE(const1, nullptr); | ||||
| EXPECT_EQ(const1->GetOutNodes().size(), 1); | EXPECT_EQ(const1->GetOutNodes().size(), 1); | ||||
| // EXPECT_EQ(const1->GetOutDataNodes().at(0)->GetName(), "transdata2"); | |||||
| } | } | ||||
| } // namespace ge | } // namespace ge | ||||
| @@ -78,5 +78,4 @@ TEST_F(UtestSnapshotPass, succ) { | |||||
| string type2 = "FrameworkOp"; | string type2 = "FrameworkOp"; | ||||
| snapshot->GetOpDesc()->SetType(type2); | snapshot->GetOpDesc()->SetType(type2); | ||||
| status = pass.Run(snapshot); | status = pass.Run(snapshot); | ||||
| // EXPECT_EQ(ge::SUCCESS, status); | |||||
| } | } | ||||
| @@ -176,7 +176,6 @@ TEST_F(UtestGraphPassesStopGradientPass, get_origenal_type_fail) { | |||||
| string type2 = "FrameworkOp"; | string type2 = "FrameworkOp"; | ||||
| node->GetOpDesc()->SetType(type2); | node->GetOpDesc()->SetType(type2); | ||||
| ge::Status ret = pass_->Run(node); | ge::Status ret = pass_->Run(node); | ||||
| // EXPECT_EQ(ge::SUCCESS, ret); | |||||
| } | } | ||||
| TEST_F(UtestGraphPassesStopGradientPass, size_check_fail) { | TEST_F(UtestGraphPassesStopGradientPass, size_check_fail) { | ||||
| vector<int64_t> dims_vec_0 = {8, 2}; | vector<int64_t> dims_vec_0 = {8, 2}; | ||||
| @@ -256,7 +256,6 @@ TEST_F(UtestGraphPassesSwitchPass, inactive_output_not_exists) { | |||||
| output_true_node_->GetOutDataAnchor(0)->UnlinkAll(); | output_true_node_->GetOutDataAnchor(0)->UnlinkAll(); | ||||
| GraphUtils::RemoveNodeWithoutRelink(graph_, output_true_node_); | GraphUtils::RemoveNodeWithoutRelink(graph_, output_true_node_); | ||||
| switch_node_->GetOutDataAnchor(1)->UnlinkAll(); | switch_node_->GetOutDataAnchor(1)->UnlinkAll(); | ||||
| // switch_node_->outDataAnchors_.pop_back(); | |||||
| /// input | /// input | ||||
| /// | | /// | | ||||
| @@ -394,7 +393,6 @@ TEST_F(UtestGraphPassesSwitchPass, dead_output_connected_to_merge) { | |||||
| /// Merge | /// Merge | ||||
| bool pred_value = true; | bool pred_value = true; | ||||
| BuildDefaultGraph(false, &pred_value); | BuildDefaultGraph(false, &pred_value); | ||||
| // graph_->RemoveNode(output_false_node_); | |||||
| output_false_node_->GetOutDataAnchor(0)->UnlinkAll(); | output_false_node_->GetOutDataAnchor(0)->UnlinkAll(); | ||||
| GraphUtils::RemoveNodeWithoutRelink(graph_, output_false_node_); | GraphUtils::RemoveNodeWithoutRelink(graph_, output_false_node_); | ||||
| switch_node_->GetOutDataAnchor(0)->UnlinkAll(); | switch_node_->GetOutDataAnchor(0)->UnlinkAll(); | ||||
| @@ -106,7 +106,6 @@ TEST_F(UtestGraphPassesUnusedAndIsolatedOpRemovePass, transpose_and_conv) { | |||||
| Status status = PassManager::Run(graph, passes); | Status status = PassManager::Run(graph, passes); | ||||
| EXPECT_EQ(SUCCESS, status); | EXPECT_EQ(SUCCESS, status); | ||||
| NodePtr found_node0 = graph->FindNode("transpose1"); | NodePtr found_node0 = graph->FindNode("transpose1"); | ||||
| // EXPECT_EQ(nullptr, found_node0); | |||||
| NodePtr found_node = graph->FindNode("conv1"); | NodePtr found_node = graph->FindNode("conv1"); | ||||
| EXPECT_EQ(conv_node, found_node); | EXPECT_EQ(conv_node, found_node); | ||||
| } | } | ||||
| @@ -343,8 +343,6 @@ bool BuildComputeGraph0(ge::ComputeGraphPtr &graph) { | |||||
| if (ge::GraphUtils::AddEdge(node_apply_monetum->GetOutDataAnchor(0), node_5d_to_4d_1->GetInDataAnchor(0)) != | if (ge::GraphUtils::AddEdge(node_apply_monetum->GetOutDataAnchor(0), node_5d_to_4d_1->GetInDataAnchor(0)) != | ||||
| ge::SUCCESS) { | ge::SUCCESS) { | ||||
| /// GELOGE(FAILED, "ge::GraphUtils::AddEdge(node_apply_monetum->GetOutDataAnchor(0), | |||||
| /// node_5d_to_4d_1->GetInDataAnchor(0) ) Failed."); | |||||
| }; | }; | ||||
| ge::GraphUtils::AddEdge(node_5d_to_4d_1->GetOutDataAnchor(0), node_ref->GetInDataAnchor(0)); | ge::GraphUtils::AddEdge(node_5d_to_4d_1->GetOutDataAnchor(0), node_ref->GetInDataAnchor(0)); | ||||
| @@ -395,8 +393,6 @@ bool BuildComputeGraph1(ge::ComputeGraphPtr &graph) { | |||||
| if (ge::GraphUtils::AddEdge(node_apply_monetum->GetOutDataAnchor(0), node_5d_to_4d_1->GetInDataAnchor(0)) != | if (ge::GraphUtils::AddEdge(node_apply_monetum->GetOutDataAnchor(0), node_5d_to_4d_1->GetInDataAnchor(0)) != | ||||
| ge::SUCCESS) { | ge::SUCCESS) { | ||||
| /// GELOGE(FAILED, "ge::GraphUtils::AddEdge(node_apply_monetum->GetOutDataAnchor(0), | |||||
| /// node_5d_to_4d_1->GetInDataAnchor(0) ) Failed."); | |||||
| }; | }; | ||||
| ge::GraphUtils::AddEdge(node_5d_to_4d_1->GetOutDataAnchor(0), node_ref->GetInDataAnchor(0)); | ge::GraphUtils::AddEdge(node_5d_to_4d_1->GetOutDataAnchor(0), node_ref->GetInDataAnchor(0)); | ||||