diff --git a/ge/client/ge_api.cc b/ge/client/ge_api.cc index d76b9120..b3918545 100644 --- a/ge/client/ge_api.cc +++ b/ge/client/ge_api.cc @@ -681,8 +681,35 @@ Status Session::BuildGraph(uint32_t graph_id, const std::vector return SUCCESS; } +// Build Graph +Status Session::BuildGraph(uint32_t graph_id, const std::vector &inputs) { + ErrorManager::GetInstance().SetStage(error_message::kModelCompile, error_message::kOther); + ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); + std::shared_ptr instance_ptr = ge::GELib::GetInstance(); + if (instance_ptr == nullptr || !instance_ptr->InitFlag()) { + GELOGE(GE_CLI_GE_NOT_INITIALIZED, + "[Build][Graph]Failed, the GELib instance is nullptr or is not InitFlag, " + "session_id %lu, graph_id %u", sessionId_, graph_id); + REPORT_INNER_ERROR("E19999", + "Build graph failed, the GELib instance is nullptr or is not InitFlag, " + "session_id %lu, graph_id %u", sessionId_, graph_id); + return FAILED; + } + GELOGT(TRACE_RUNNING, "Building Graph"); + Status ret = instance_ptr->SessionManagerObj().BuildGraph(sessionId_, graph_id, inputs); + if (ret != SUCCESS) { + GELOGE(ret, + "[Build][Graph]Failed, error code:%u, session_id:%lu, graph_id:%u.", + ret, sessionId_, graph_id); + REPORT_CALL_ERROR("E19999", "Build graph failed , error code:%u, " + "session_id:%lu, graph_id:%u", ret, sessionId_, graph_id); + return FAILED; + } + return SUCCESS; +} + // Run Graph Asynchronously -Status Session::RunGraphAsync(uint32_t graph_id, const std::vector &inputs, +Status Session::RunGraphAsync(uint32_t graph_id, const std::vector &inputs, RunAsyncCallback callback) { ErrorManager::GetInstance().SetStage(error_message::kModelExecute, error_message::kModelExecute); ErrorManager::GetInstance().GenWorkStreamIdBySessionGraph(sessionId_, graph_id); diff --git a/ge/graph/execute/graph_execute.cc b/ge/graph/execute/graph_execute.cc index d8d5cf1b..5649d483 100755 --- a/ge/graph/execute/graph_execute.cc +++ b/ge/graph/execute/graph_execute.cc @@ -382,7 +382,7 @@ Status GraphExecutor::ExecuteGraph(GraphId graph_id, const GeRootModelPtr &ge_ro } Status GraphExecutor::ExecuteGraphAsync(GraphId graph_id, const GeRootModelPtr &ge_root_model, - const std::vector &input_tensor, + const std::vector &input_tensor, const RunAsyncCallback& callback) { GELOGI("[GraphExecutor] Start to async execute graph, graph_id=%u", graph_id); if (graph_id != last_graph_id_) { @@ -529,7 +529,7 @@ Status GraphExecutor::SetCallback(uint32_t model_id, const GeRootModelPtr &ge_ro return SUCCESS; } -Status GraphExecutor::AsyncExecuteModel(const GeRootModelPtr &ge_root_model, const std::vector &inputs, +Status GraphExecutor::AsyncExecuteModel(const GeRootModelPtr &ge_root_model, const std::vector &inputs, const RunAsyncCallback &callback) { uint32_t model_id = GetExecuteModelId(ge_root_model); if (model_id == kInvalidModelId) { diff --git a/ge/graph/execute/graph_execute.h b/ge/graph/execute/graph_execute.h index 54687930..aa791c9b 100755 --- a/ge/graph/execute/graph_execute.h +++ b/ge/graph/execute/graph_execute.h @@ -50,7 +50,7 @@ class GraphExecutor { std::vector &output_tensor); ge::Status ExecuteGraphAsync(GraphId graph_id, const GeRootModelPtr &ge_root_model, - const std::vector &input_tensor, const RunAsyncCallback &callback); + const std::vector &input_tensor, const RunAsyncCallback &callback); Status ExecuteGraphWithStream(GraphId graph_id, rtStream_t stream, @@ -137,7 +137,7 @@ class GraphExecutor { Status SyncExecuteModel(uint32_t model_id, const std::vector &input_tensor, std::vector &output_tensor); - Status AsyncExecuteModel(const GeRootModelPtr &ge_root_model, const std::vector &input_tensor, + Status AsyncExecuteModel(const GeRootModelPtr &ge_root_model, const std::vector &input_tensor, const RunAsyncCallback &callback); void InitModelIdInfo(std::vector &out_model_id_info, std::vector &sub_graph_vec, diff --git a/ge/graph/load/model_manager/davinci_model.cc b/ge/graph/load/model_manager/davinci_model.cc index d0624dd8..993400db 100755 --- a/ge/graph/load/model_manager/davinci_model.cc +++ b/ge/graph/load/model_manager/davinci_model.cc @@ -122,6 +122,8 @@ const char* const kInferEndTime = "infer_end_time"; const char* const kOutputBeginTime = "output_start_time"; const char* const kOutputEndTime = "output_end_time"; const uint32_t kStringHeadElems = 2; +const uint32_t kPlacementHostData = 0; +const size_t kAlignment = 64; inline bool IsDataOp(const std::string &node_type) { return (node_type == DATA_TYPE) || (node_type == AIPP_DATA_TYPE) || (node_type == ANN_DATA_TYPE); @@ -2261,8 +2263,7 @@ Status DavinciModel::GetOutputDescInfo(vector &output_descs return SUCCESS; } -Status DavinciModel::CopyInputData(const InputData &input_data, bool device_data) { - rtMemcpyKind_t kind = device_data ? RT_MEMCPY_DEVICE_TO_DEVICE : RT_MEMCPY_HOST_TO_DEVICE; +Status DavinciModel::CopyInputData(const InputData &input_data) { const std::vector &blobs = input_data.blobs; for (const auto &data : input_data_info_) { if (data.first >= blobs.size()) { @@ -2275,6 +2276,8 @@ Status DavinciModel::CopyInputData(const InputData &input_data, bool device_data } const DataBuffer &data_buf = blobs[data.first]; + rtMemcpyKind_t kind = + data_buf.placement == kPlacementHostData ? RT_MEMCPY_HOST_TO_DEVICE : RT_MEMCPY_DEVICE_TO_DEVICE; if (data_buf.length == 0) { GELOGW("No data need to memcpy!"); return SUCCESS; @@ -2615,7 +2618,7 @@ Status DavinciModel::InitOutputTensorInfo(const OpDescPtr &op_desc) { return SUCCESS; } -Status DavinciModel::GenOutputTensorInfo(OutputData *output_data, vector &outputs) { +Status DavinciModel::GenOutputTensorInfo(OutputData *output_data, vector &outputs) { GE_CHECK_NOTNULL(output_data); if (!output_data->blobs.empty()) { GELOGI("No need to generate output tensor info, model id:%u", model_id_); @@ -2644,26 +2647,25 @@ Status DavinciModel::GenOutputTensorInfo(OutputData *output_data, vector data_buf(new (std::nothrow) uint8_t[output_buffer_size[i]]); - if (data_buf == nullptr) { - REPORT_CALL_ERROR("E19999", "New buffer failed, size:%ld, model_id:%u", - output_buffer_size[i], model_id_); - GELOGE(GE_GRAPH_MALLOC_FAILED, "Malloc buffer failed."); - return GE_GRAPH_MALLOC_FAILED; - } - output_data->blobs.push_back({data_buf.get(), static_cast(output_buffer_size[i]), false}); - OutputTensorInfo output; - output.dims = output_shape_info[i]; - output.data = std::move(data_buf); - output.length = output_buffer_size[i]; - outputs.emplace_back(std::move(output)); + auto aligned_ptr = MakeShared(output_buffer_size[i], kAlignment); + GE_CHECK_NOTNULL(aligned_ptr); + GeShape ge_shape(output_shape_info[i]); + GeTensorDesc tensor_desc; + tensor_desc.SetShape(ge_shape); + GeTensor ge_tensor(tensor_desc); + ge_tensor.SetData(aligned_ptr, output_buffer_size[i]); + ge::Tensor output_tensor = TensorAdapter::AsTensor(ge_tensor); + + auto data_ptr = aligned_ptr->MutableGet(); + output_data->blobs.push_back( + {reinterpret_cast(data_ptr), static_cast(output_buffer_size[i]), false}); + outputs.emplace_back(std::move(output_tensor)); GELOGD("Output index:%zu, output dims is %s, data length:%lu.", i, - formats::JoinToString(output.dims).c_str(), output.length); + formats::JoinToString(output_shape_info[i]).c_str(), output_buffer_size[i]); } return SUCCESS; } - /// /// @ingroup ge /// @brief send Output Op result to upper layer @@ -2678,7 +2680,7 @@ Status DavinciModel::GenOutputTensorInfo(OutputData *output_data, vector outputs; + std::vector outputs; // return result is not required if (!rslt_flg && !seq_end_flag) { @@ -2742,7 +2744,7 @@ Status DavinciModel::ReturnNoOutput(uint32_t data_id) { GELOGI("ReturnNoOutput model id:%u.", model_id_); GE_CHK_BOOL_EXEC(listener_ != nullptr, return PARAM_INVALID, "listener_ is null!"); - std::vector outputs; + std::vector outputs; GE_CHK_STATUS(listener_->OnComputeDone(model_id_, data_id, SUCCESS, outputs), "OnComputeDone failed."); return SUCCESS; } @@ -2798,7 +2800,7 @@ void *DavinciModel::Run(DavinciModel *model) { GELOGI("Copy input data, model id:%u", model_id); GE_IF_BOOL_EXEC(ProfilingManager::Instance().ProfilingModelExecuteOn(), model->SetProfileTime(MODEL_PRE_PROC_START)); - ret = model->CopyInputData(current_data, false); + ret = model->CopyInputData(current_data); GE_CHK_BOOL_TRUE_EXEC_WITH_LOG( ret != SUCCESS, (void)model->ReturnResult(current_data.index, false, false, data_wrapper->GetOutput()); continue, "Copy input data to model failed."); // [No need to check value] diff --git a/ge/graph/load/model_manager/davinci_model.h b/ge/graph/load/model_manager/davinci_model.h index 0b06736c..a4abcae6 100755 --- a/ge/graph/load/model_manager/davinci_model.h +++ b/ge/graph/load/model_manager/davinci_model.h @@ -639,7 +639,7 @@ class DavinciModel { Status UpdateIoTaskArgs(const map &data_info, bool is_input, const vector &blobs, bool is_dynamic, const string &batch_label); - Status CopyInputData(const InputData &input_data, bool device_data = false); + Status CopyInputData(const InputData &input_data); Status CopyOutputData(uint32_t data_id, OutputData &output_data, rtMemcpyKind_t kind); @@ -884,7 +884,7 @@ class DavinciModel { Status SinkTimeProfile(const InputData ¤t_data); Status InitOutputTensorInfo(const OpDescPtr &op_desc); - Status GenOutputTensorInfo(OutputData *output_data, vector &outputs); + Status GenOutputTensorInfo(OutputData *output_data, vector &outputs); Status InitInputDescInfo(const OpDescPtr &op_desc); Status InitOutputDescInfo(const OpDescPtr &op_desc, const vector &out_node_name); diff --git a/ge/graph/load/model_manager/model_manager.cc b/ge/graph/load/model_manager/model_manager.cc index 3428dad1..b731aefe 100755 --- a/ge/graph/load/model_manager/model_manager.cc +++ b/ge/graph/load/model_manager/model_manager.cc @@ -542,7 +542,7 @@ Status ModelManager::GetCurDynamicDims(const vector> &user_real_ /// @brief load Input and output TensorInfo for Model /// @return Status run result /// -Status ModelManager::DataInputTensor(uint32_t model_id, const std::vector &inputs) { +Status ModelManager::DataInputTensor(uint32_t model_id, const std::vector &inputs) { std::shared_ptr model = GetModel(model_id); auto hybrid_model = GetHybridModel(model_id); if (hybrid_model == nullptr) { @@ -556,9 +556,11 @@ Status ModelManager::DataInputTensor(uint32_t model_id, const std::vector(const_cast(inputs[i].GetData())); + data.length = inputs[i].GetSize(); + data.placement = static_cast(tensor_desc.GetPlacement()); + input_data.shapes.emplace_back(tensor_desc.GetShape().GetDims()); input_data.blobs.push_back(data); } if (!GetLocalOmgContext().user_input_dims.empty() && GetLocalOmgContext().need_multi_batch) { @@ -608,7 +610,6 @@ Status ModelManager::DataInputTensor(uint32_t model_id, const std::vector &inputs); + ge::Status DataInputTensor(uint32_t model_id, const std::vector &inputs); /// /// @ingroup domi_ome diff --git a/ge/graph/manager/graph_manager.cc b/ge/graph/manager/graph_manager.cc index 17779161..b202daf0 100755 --- a/ge/graph/manager/graph_manager.cc +++ b/ge/graph/manager/graph_manager.cc @@ -106,6 +106,7 @@ #include "graph/common/omg_util.h" #include "common/formats/utils/formats_trans_utils.h" #include "register/custom_pass_helper.h" +#include "external/graph/types.h" namespace { const char *const kSummary = "Summary"; @@ -126,6 +127,7 @@ const uint32_t kNotAdded = 0; const uint32_t kStartAdd = 1; const uint32_t kDoneAdded = 2; const uint32_t kNeverLoaded = 0; +const size_t kAlignment = 64; bool IsTailingOptimization() { string is_tailing_optimization_option; @@ -368,9 +370,9 @@ void GraphManager::RemoveAddGraphCondition(GraphId graph_id) { auto it = graph_id_to_add_graph_cond_.find(graph_id); if (it != graph_id_to_add_graph_cond_.end()) { graph_id_to_add_graph_cond_.erase(it); - GELOGD("Successfully removed add_graph_cond of graph [id:%u].", graph_id); + GELOGD("Successfully remove add_graph_cond of graph [id:%u].", graph_id); } else { - GELOGD("Graph [id:%u] has not been added. no need to remove.", graph_id); + GELOGD("Graph [id:%u] has not been added, no need to be removed.", graph_id); } } @@ -537,7 +539,7 @@ Status GraphManager::CheckGraphAdded(const GraphId &graph_id, const Graph &graph bool graph_has_been_added = false; if (AttrUtils::GetBool(*compute_graph, ATTR_NAME_GRAPH_HAS_BEEN_ADDED, graph_has_been_added) && graph_has_been_added) { - REPORT_INNER_ERROR("E19999", "Get Attr:%s from graph:%u fail", + REPORT_INNER_ERROR("E19999", "Get Attr:%s from graph:%u fail.", ATTR_NAME_GRAPH_HAS_BEEN_ADDED.c_str(), graph_id); GELOGE(GE_GRAPH_GRAPH_ALREADY_EXIST, "[GraphManager] same graph object can not be added again, graph_id = %u.", graph_id); @@ -896,7 +898,7 @@ Status GraphManager::PreRunAfterOptimizeSubGraph(const GraphNodePtr &graph_node, } Status GraphManager::SetRtContext(rtContext_t rt_context, rtCtxMode_t mode, uint64_t session_id, uint32_t graph_id) { - GELOGD("set rt_context: session id: %lu, graph id: %u, mode %d, device id:%u.", + GELOGD("Set rt_context: session id: %lu, graph id: %u, mode %d, device id:%u.", session_id, graph_id, static_cast(mode), ge::GetContext().DeviceId()); rtError_t rt_ret = rtCtxCreate(&rt_context, mode, ge::GetContext().DeviceId()); @@ -942,7 +944,7 @@ Status GraphManager::PreRun(const GraphNodePtr &graph_node, const std::vectorBuildJsonObject(session_id, compute_graph->GetGraphID()), "BuildJsonObject Failed") - GEEVENT("PreRun start: graph node size %zu, session id %lu, graph id %u, graph name %s", + GEEVENT("PreRun start: graph node size %zu, session id %lu, graph id %u, graph name %s.", compute_graph->GetDirectNodesSize(), session_id, compute_graph->GetGraphID(), compute_graph->GetName().c_str()); GE_DUMP(compute_graph, "PreRunBegin"); @@ -963,7 +965,7 @@ Status GraphManager::PreRun(const GraphNodePtr &graph_node, const std::vectorGetName().c_str()); + GELOGE(ret, "Run PreRunOptimizeOriginalGraph failed for graph:%s.", compute_graph->GetName().c_str()); return ret; } } @@ -1058,7 +1060,7 @@ Status GraphManager::StartForRunGraph(const GraphNodePtr &graph_node, const std: // release rts generate context RtContextUtil::GetInstance().DestroyRtContexts(session_id, graph_node->GetGraphId()); if (ret != SUCCESS) { - GELOGE(ret, "PreRun Failed. graph_id:%u.", graph_node->GetGraphId()); + GELOGE(ret, "PreRun Failed, graph_id:%u.", graph_node->GetGraphId()); return ret; } } @@ -2943,7 +2945,7 @@ Status GraphManager::ProcessSubGraphWithMultiThreads(GraphManager *graph_manager } // run graph async on session -Status GraphManager::RunGraphAsync(const GraphId &graph_id, const std::vector &inputs, +Status GraphManager::RunGraphAsync(const GraphId &graph_id, const std::vector &inputs, uint64_t session_id, RunAsyncCallback callback) { ErrorManager::GetInstance().SetStage(error_message::kModelExecute, error_message::kModelExecute); GELOGI("[GraphManager] Start to run graph async, graph_id=%u, inputsSize=%zu.", graph_id, inputs.size()); @@ -3015,14 +3017,6 @@ Status GraphManager::IncreBuild(const GraphNodePtr &graph_node, GeModelPtr &ge_m return FAILED; } -void GraphManager::ConstructGeInput(const vector &inputs, vector &ge_inputs) { - for (auto const &input : inputs) { - GeTensorDesc input_tensor_desc(GeShape(input.dims)); - input_tensor_desc.SetDataType(static_cast(input.data_type)); - ge_inputs.emplace_back(input_tensor_desc); - } -} - Status GraphManager::CheckIncreBuildAndPreRun(GraphManager *graph_manager, const PreRunArgs &args, GraphNodePtr &graph_node, GeRootModelPtr &ge_root_model) { if (!graph_manager->IsGraphNeedBuild(graph_node)) { @@ -3041,7 +3035,9 @@ Status GraphManager::CheckIncreBuildAndPreRun(GraphManager *graph_manager, const GeModelPtr ge_model = nullptr; if (graph_manager->IncreBuild(graph_node, ge_model) != SUCCESS) { std::vector ge_inputs; - ConstructGeInput(args.input_tensor, ge_inputs); + for (const auto &item: args.input_tensor) { + ge_inputs.emplace_back(TensorAdapter::AsGeTensor(item)); + } Status ret = graph_manager->PreRun(graph_node, ge_inputs, ge_root_model, args.session_id); // release rts generate context RtContextUtil::GetInstance().DestroyRtContexts(args.session_id, graph_node->GetGraphId()); @@ -3153,20 +3149,19 @@ void GraphManager::PreRunThread(GraphManager *graph_manager) { } } -void GraphManager::ParseInputsDimsForData(const std::vector &input_tensor) { +void GraphManager::ParseInputsDimsForData(const std::vector &input_tensor) { GELOGD("Start parse input dims from data."); for (size_t i = 0; i < input_tensor.size(); ++i) { - std::vector dynamic_dim; - for (size_t j = 0; j < input_tensor[i].dims.size(); ++j) { - dynamic_dim.emplace_back(input_tensor[i].dims[j]); - } - GELOGD("Input tensor dims is %s.", formats::JoinToString(dynamic_dim).c_str()); - GetLocalOmgContext().user_real_input_dims.emplace_back(input_tensor[i].dims); + const TensorDesc &tensor_desc = input_tensor[i].GetTensorDesc(); + const Shape &shape = tensor_desc.GetShape(); + const auto &shape_dims = shape.GetDims(); + GELOGD("Input tensor dims is %s.", formats::JoinToString(shape_dims).c_str()); + GetLocalOmgContext().user_real_input_dims.emplace_back(shape_dims); } } Status GraphManager::ParseInputsDimsForGetNexNosinkAndData(const vector &dynamic_nodes, - const std::vector &input_tensor) { + const std::vector &input_tensor) { GELOGD("Start parse inputs dims when coexist data and getnext sink."); for (size_t i = 0; i < dynamic_nodes.size(); ++i) { auto op_desc = dynamic_nodes.at(i)->GetOpDesc(); @@ -3189,13 +3184,16 @@ Status GraphManager::ParseInputsDimsForGetNexNosinkAndData(const vector return PARAM_INVALID; } - GetLocalOmgContext().user_real_input_dims.emplace_back(input_tensor.at(index).dims); - GELOGI("Shape dims of %zu data is %s.", index, formats::JoinToString(input_tensor.at(index).dims).c_str()); + const TensorDesc &tensor_desc = input_tensor[i].GetTensorDesc(); + const Shape &shape = tensor_desc.GetShape(); + const auto &shape_dims = shape.GetDims(); + GELOGI("Shape dims of %zu data is %s.", index, formats::JoinToString(shape_dims).c_str()); + GetLocalOmgContext().user_real_input_dims.emplace_back(std::move(shape_dims)); } return SUCCESS; } -Status GraphManager::ParseInputsDims(const std::vector &input_tensor) { +Status GraphManager::ParseInputsDims(const std::vector &input_tensor) { GELOGI("Start parse input dims of %zu input tensor.", input_tensor.size()); GetLocalOmgContext().user_real_input_dims.clear(); if (!GetLocalOmgContext().dynamic_node_type.empty()) { @@ -3326,13 +3324,13 @@ void GraphManager::ReturnError(GraphManager *graph_manager, RunAsyncCallback cal } StopQueue(graph_manager); GELOGE(ret, "%s.", log.c_str()); - std::vector outputs; + std::vector outputs; callback(ret, outputs); } -void GraphManager::ReturnError(GraphManager *graph_manager, GraphNodePtr &graph_node, - RunAsyncCallback callback, Status ret, const string &log) { - std::vector outputs; +void GraphManager::ReturnError(GraphManager *graph_manager, GraphNodePtr &graph_node, RunAsyncCallback callback, + Status ret, const string &log) { + std::vector outputs; auto compute_graph = GraphUtils::GetComputeGraph(*graph_node->GetGraph()); if (graph_manager == nullptr || compute_graph == nullptr) { REPORT_INNER_ERROR("E19999", "Param graph_manager or compute_graph in graph_node is nullptr, " @@ -3348,9 +3346,10 @@ void GraphManager::ReturnError(GraphManager *graph_manager, GraphNodePtr &graph_ } for (size_t i = 0; i < node->GetAllInDataAnchorsSize(); i++) { auto input_desc = node->GetOpDesc()->MutableInputDesc(i); - ge::OutputTensorInfo tensor; - tensor.dims = input_desc->GetShape().GetDims(); - tensor.data_type = static_cast(input_desc->GetDataType()); + GeShape ge_shape(input_desc->GetShape().GetDims()); + GeTensorDesc ge_tensor_desc; + ge_tensor_desc.SetShape(ge_shape); + GeTensor ge_tensor(ge_tensor_desc); int64_t len = 1; if (input_desc->GetShape().GetDims() != std::vector({})) { len = input_desc->GetShape().GetShapeSize(); @@ -3366,30 +3365,19 @@ void GraphManager::ReturnError(GraphManager *graph_manager, GraphNodePtr &graph_ GELOGI("getted shape size is 0.Do process as empty tensor!"); len = 1; } - auto size = GetSizeByDataType(input_desc->GetDataType()); - if (size <= 0) { - REPORT_INNER_ERROR("E19999", "data_type:%s of op:%s(%s) is not support, input_index:%zu check invalid", - ge::TypeUtils::DataTypeToSerialString(input_desc->GetDataType()).c_str(), - node->GetName().c_str(), node->GetType().c_str(), i); - GELOGE(PARAM_INVALID, "Failed to get cube size, the data type %s is invalid", - ge::TypeUtils::DataTypeToSerialString(input_desc->GetDataType()).c_str()); - callback(GRAPH_FAILED, outputs); + auto length = GetSizeInBytes(len, input_desc->GetDataType()); + auto aligned_ptr = MakeShared(length, kAlignment); + if (aligned_ptr == nullptr) { + REPORT_INNER_ERROR("E19999", "Aligned_ptr is nullptr"); + GELOGE(GRAPH_FAILED, "[Analyze Mode] Aligned_ptr is nullptr"); return; } - if (CheckInt64MulOverflow(len, static_cast(size)) != true) { - REPORT_INNER_ERROR("E19999", "shape_size:%ld of op:%s(%s) will overflow after multiply by " - "size:%u of data_type:%s, input_index:%zu, check invalid", len, - node->GetName().c_str(), node->GetType().c_str(), size, - ge::TypeUtils::DataTypeToSerialString(input_desc->GetDataType()).c_str(), i); - GELOGE(MEMALLOC_FAILED, "int64 multiply happens overflow! a:%ld b:%d", len, size); - callback(GRAPH_FAILED, outputs); - return; - } - tensor.length = len * size; - tensor.data.reset(new(std::nothrow) uint8_t[tensor.length]); + ge_tensor.SetData(aligned_ptr, length); + ge::Tensor tensor = TensorAdapter::AsTensor(ge_tensor); // To avoid global step too small and can not stop, totally set a bigger value - for (int64_t i = 0; i < tensor.length; i++) { - tensor.data[i] = 0x7F; // here stands for a positive max value + auto ptr = aligned_ptr->MutableGet(); + for (int64_t i = 0; i < length; i++) { + ptr[i] = 0x7F; // here stands for a positive max value } outputs.emplace_back(std::move(tensor)); } @@ -3737,7 +3725,7 @@ void GraphManager::UpdateLocalOmgContext(GraphId graph_id) { if (iter != omg_contexts_.end()) { SetLocalOmgContext(iter->second); } else { - GELOGW("OmgContext of graph %u not found.", graph_id); + GELOGW("OmgContext of graph %u is not found.", graph_id); } } @@ -3767,9 +3755,9 @@ void GraphManager::RemoveGraphCount(GraphId graph_id) { std::lock_guard lock(graph_count_mutex_); auto it = graph_count_.find(graph_id); if (it == graph_count_.end()) { - GELOGW("Graph of id: %u has not been added, count cannot be decreased.", graph_id); + GELOGW("Graph of id: %u has not been added, count cannot be decreased", graph_id); } else { - GELOGD("RemoveGraphCount success, graph count of id[%u] is %u.", graph_id, graph_count_[graph_id]); + GELOGD("RemoveGraphCount success, graph count of id[%u] is %u", graph_id, graph_count_[graph_id]); graph_count_.erase(it); } } diff --git a/ge/graph/manager/graph_manager.h b/ge/graph/manager/graph_manager.h index c76eabbb..4a1f7a7f 100644 --- a/ge/graph/manager/graph_manager.h +++ b/ge/graph/manager/graph_manager.h @@ -162,9 +162,8 @@ class GraphManager { /// @param [out] callback: callback while run graph async finish /// @return Status result of function /// - Status RunGraphAsync(const GraphId &graph_id, const std::vector &inputs, + Status RunGraphAsync(const GraphId &graph_id, const std::vector &inputs, uint64_t session_id, RunAsyncCallback callback); - /// /// @ingroup ge_graph /// @brief me register the callback function to get the result of summary or checkpoin @@ -221,7 +220,7 @@ class GraphManager { struct PreRunArgs { GraphId graph_id; - std::vector input_tensor; + std::vector input_tensor; uint64_t session_id; struct error_message::Context error_context; GEThreadLocalContext context; @@ -233,7 +232,7 @@ class GraphManager { GraphId graph_id; uint64_t session_id; struct error_message::Context error_context; - std::vector input_tensor; + std::vector input_tensor; GeRootModelPtr ge_root_model; GEThreadLocalContext context; RunAsyncCallback callback; @@ -252,10 +251,10 @@ class GraphManager { uint64_t session_id, const struct error_message::Context &error_context, const GEThreadLocalContext &ge_context); - Status ParseInputsDims(const std::vector &input_tensor); - void ParseInputsDimsForData(const std::vector &input_tensor); + Status ParseInputsDims(const std::vector &input_tensor); + void ParseInputsDimsForData(const std::vector &input_tensor); Status ParseInputsDimsForGetNexNosinkAndData(const vector &dynamic_nodes, - const std::vector &input_tensor); + const std::vector &input_tensor); Status RunCustomPass(const GraphNodePtr &graph_node); Status PreRun(const GraphNodePtr &graph_node, const std::vector &inputs, GeRootModelPtr &ge_root_model, uint64_t session_id = INVALID_SESSION_ID); @@ -369,7 +368,6 @@ class GraphManager { void RemoveModelCacheHelper(const GraphId &graph_id); ModelCacheHelperPtr FindModelCacheHelper(GraphId graph_id); - static void ConstructGeInput(const std::vector &inputs, std::vector &ge_inputs); static void PreRunThread(GraphManager *graph_manager); static void RunThread(GraphManager *graph_manager); static void StopQueue(GraphManager *graph_manager); diff --git a/ge/graph/manager/graph_manager_utils.cc b/ge/graph/manager/graph_manager_utils.cc index d24b7821..0f93654c 100644 --- a/ge/graph/manager/graph_manager_utils.cc +++ b/ge/graph/manager/graph_manager_utils.cc @@ -114,7 +114,7 @@ GraphModelListener::GraphModelListener(std::mutex &mutex, std::condition_variabl : result_code_(0), is_finished_(false), mutex_(mutex), condition_(cond) {} Status GraphModelListener::OnComputeDone(uint32_t model_id, uint32_t task_id, uint32_t result, - std::vector &outputs) { + std::vector &outputs) { GELOGI( "[GraphManager] graph compute call back, model_id:%u, task_id:%u, " "resultCode:%u.", @@ -151,7 +151,7 @@ void RunAsyncListener::SetCallback(const RunAsyncCallback &callback) { } Status RunAsyncListener::OnComputeDone(uint32_t model_id, uint32_t task_id, uint32_t result, - std::vector &outputs) { + std::vector &outputs) { GELOGI("[GraphManager] run graph async call back, modelId:%u, taskId:%u, resultCode:%u.", model_id, task_id, result); GE_CHECK_NOTNULL(callback_); diff --git a/ge/graph/manager/graph_manager_utils.h b/ge/graph/manager/graph_manager_utils.h index 4ff3db94..d38b4321 100644 --- a/ge/graph/manager/graph_manager_utils.h +++ b/ge/graph/manager/graph_manager_utils.h @@ -130,7 +130,7 @@ class RunAsyncListener : public ge::ModelListener { // callback Status OnComputeDone(uint32_t model_id, uint32_t task_id, uint32_t result, - std::vector &outputs) override; + std::vector &outputs) override; private: RunAsyncCallback callback_; @@ -224,7 +224,7 @@ class GraphModelListener : public ge::ModelListener { // callback Status OnComputeDone(uint32_t model_id, uint32_t task_id, uint32_t result, - std::vector &outputs) override; + std::vector &outputs) override; Status ResetResult(); diff --git a/ge/hybrid/executor/hybrid_model_async_executor.cc b/ge/hybrid/executor/hybrid_model_async_executor.cc index dc8b496c..af06e27b 100644 --- a/ge/hybrid/executor/hybrid_model_async_executor.cc +++ b/ge/hybrid/executor/hybrid_model_async_executor.cc @@ -26,6 +26,7 @@ namespace { const int kDataOutputIndex = 0; const size_t kMinimumPiplineStages = 2; const int kDefaultLoopCount = 10; +const size_t kAlignment = 64; } HybridModelAsyncExecutor::HybridModelAsyncExecutor(HybridModel *model) : model_(model), run_flag_(false), data_dumper_(nullptr) { @@ -70,6 +71,8 @@ Status HybridModelAsyncExecutor::Start(const std::shared_ptr &lis GetThreadLocalContext() = *executor_->GetContext()->ge_context; GetContext().SetSessionId(executor_->GetContext()->session_id); GetContext().SetContextId(executor_->GetContext()->context_id); + GE_CHECK_NOTNULL(executor_->GetContext()->ge_context); + GetThreadLocalContext() = *executor_->GetContext()->ge_context; return RunInternal(); }); @@ -197,7 +200,7 @@ Status HybridModelAsyncExecutor::HandleResult(Status exec_ret, HybridModelExecutor::ExecuteArgs &args, OutputData *output_data) { GELOGD("Start to handle result. model id = %u, data index = %u, execution ret = %u", model_id_, data_id, exec_ret); - std::vector output_tensor_info_list; + std::vector output_tensor_info_list; if (args.is_eos) { GELOGI("End of sequence, model id = %u", model_id_); GE_CHK_STATUS_RET_NOLOG(OnComputeDone(data_id, END_OF_SEQUENCE, output_tensor_info_list)); @@ -368,7 +371,7 @@ Status HybridModelAsyncExecutor::InitInputDesc() { } Status HybridModelAsyncExecutor::OnComputeDone(uint32_t data_index, uint32_t result_code, - std::vector &outputs) { + std::vector &outputs) { GELOGD("OnComputeDone. model id = %u, data index = %u, execution ret = %u", model_id_, data_index, result_code); if (listener_ != nullptr) { GE_CHK_STATUS(listener_->OnComputeDone(model_id_, data_index, result_code, outputs), @@ -378,9 +381,8 @@ Status HybridModelAsyncExecutor::OnComputeDone(uint32_t data_index, uint32_t res return result_code; } -Status HybridModelAsyncExecutor::CopyOutputs(HybridModelExecutor::ExecuteArgs &args, - OutputData *output_data, - std::vector &outputs) { +Status HybridModelAsyncExecutor::CopyOutputs(HybridModelExecutor::ExecuteArgs &args, OutputData *output_data, + std::vector &outputs) { // copy output data from op to designated position std::vector &output_tensor_desc_list = args.output_desc; std::vector &output_tensors = args.outputs; @@ -395,6 +397,12 @@ Status HybridModelAsyncExecutor::CopyOutputs(HybridModelExecutor::ExecuteArgs &a } GELOGD("Number of outputs = %zu", output_tensor_desc_list.size()); + string execute_mode; + auto result = ge::GetContext().GetOption(OPTION_EXEC_DYNAMIC_EXECUTE_MODE, execute_mode); + if (result != SUCCESS) { + GELOGW("Can not get dynamic execute mode attr"); + } + GELOGD("The dynamic execute is %s", execute_mode.c_str()); for (size_t i = 0; i < output_tensors.size(); ++i) { GELOGD("Start to process output[%zu]", i); auto &output_tensor = output_tensors[i]; @@ -429,32 +437,28 @@ Status HybridModelAsyncExecutor::CopyOutputs(HybridModelExecutor::ExecuteArgs &a return INTERNAL_ERROR; } - ge::OutputTensorInfo output; - output.data_type = static_cast(tensor_desc->GetDataType()); - output.dims = tensor_desc->GetShape().GetDims(); - output.length = output_size; + GeShape ge_shape(tensor_desc->GetShape().GetDims()); + GeTensorDesc ge_tensor_desc; + ge_tensor_desc.SetShape(ge_shape); + GeTensor ge_tensor(ge_tensor_desc); if (output_size > 0) { - std::unique_ptr data_buf(new(std::nothrow) uint8_t[output_size]); + auto aligned_ptr = MakeShared(output_size, kAlignment); + GE_CHECK_NOTNULL(aligned_ptr); + auto data_buf = aligned_ptr->MutableGet(); GE_CHECK_NOTNULL(data_buf); - GE_CHK_RT_RET(rtMemcpy(data_buf.get(), - output_size, - output_tensor.GetData(), - output_size, - RT_MEMCPY_DEVICE_TO_HOST)); - output.data = std::move(data_buf); - output_data->blobs.emplace_back(data_buf.get(), static_cast(output_size), false); + GE_CHK_RT_RET(rtMemcpy(data_buf, output_size, output_tensor.GetData(), output_size, RT_MEMCPY_DEVICE_TO_HOST)); + ge_tensor.SetData(aligned_ptr, output_size); + output_data->blobs.emplace_back(data_buf, static_cast(output_size), false); } else { GELOGW("Output[%zu] is empty. shape = [%s]", i, tensor_desc->GetShape().ToString().c_str()); - output.data = nullptr; + ge_tensor.SetData(nullptr, 0U); output_data->blobs.emplace_back(nullptr, 0U, false); } - - outputs.emplace_back(std::move(output)); - GELOGD("Output[%zu] added, type = %s, shape = [%s], size = %ld", - i, + auto tensor = TensorAdapter::AsTensor(ge_tensor); + outputs.emplace_back(std::move(tensor)); + GELOGD("Output[%zu] added, type = %s, shape = [%s], size = %ld", i, TypeUtils::DataTypeToSerialString(tensor_desc->GetDataType()).c_str(), - tensor_desc->GetShape().ToString().c_str(), - output_size); + tensor_desc->GetShape().ToString().c_str(), output_size); } return SUCCESS; @@ -507,7 +511,7 @@ Status HybridModelAsyncExecutor::Execute(const vector &inputs, vector< GELOGD("Done copying input data successfully."); GE_CHK_STATUS_RET(executor_->Execute(args), "[Invoke][Execute] Failed, model_id = %u.", model_id_); - std::vector output_tensor_info_list; + std::vector output_tensor_info_list; OutputData output_data; GE_CHK_STATUS_RET(CopyOutputs(args, &output_data, output_tensor_info_list), "[Invoke][CopyOutputs]Failed to copy outputs, model_id = %u.", model_id_); @@ -517,15 +521,15 @@ Status HybridModelAsyncExecutor::Execute(const vector &inputs, vector< outputs.resize(output_tensor_info_list.size()); for (auto &out_tensor_info : output_tensor_info_list) { auto &ge_tensor = outputs[out_index]; - if (out_tensor_info.length > 0) { - GE_CHK_GRAPH_STATUS_RET(ge_tensor.SetData(out_tensor_info.data.get(), out_tensor_info.length), + if (out_tensor_info.GetSize() > 0) { + GE_CHK_GRAPH_STATUS_RET(ge_tensor.SetData(out_tensor_info.GetData(), out_tensor_info.GetSize()), "Failed to set output[%d].", out_index); } ge_tensor.MutableTensorDesc() = *args.output_desc[out_index]; GELOGD("Set output[%d], tensor size = %ld, shape = [%s]", out_index, - out_tensor_info.length, + out_tensor_info.GetSize(), ge_tensor.MutableTensorDesc().MutableShape().ToString().c_str()); ++out_index; } diff --git a/ge/hybrid/executor/hybrid_model_async_executor.h b/ge/hybrid/executor/hybrid_model_async_executor.h index f3cc8d50..c6d99c7c 100644 --- a/ge/hybrid/executor/hybrid_model_async_executor.h +++ b/ge/hybrid/executor/hybrid_model_async_executor.h @@ -77,9 +77,9 @@ class HybridModelAsyncExecutor { Status CopyOutputs(HybridModelExecutor::ExecuteArgs &args, OutputData *output_data, - std::vector &outputs); + std::vector &outputs); - Status OnComputeDone(uint32_t data_index, uint32_t result_code, std::vector &outputs); + Status OnComputeDone(uint32_t data_index, uint32_t result_code, std::vector &outputs); Status PreRun(InputData ¤t_data, HybridModelExecutor::ExecuteArgs &args); diff --git a/ge/session/inner_session.cc b/ge/session/inner_session.cc index fb038fdd..9308e267 100755 --- a/ge/session/inner_session.cc +++ b/ge/session/inner_session.cc @@ -408,7 +408,26 @@ Status InnerSession::BuildGraph(uint32_t graph_id, const std::vector &inputs, +Status InnerSession::BuildGraph(uint32_t graph_id, const std::vector &inputs) { + UpdateThreadContext(graph_id); + GELOGI("[InnerSession:%lu] build graph on session, graph_id=%u.", session_id_, graph_id); + std::vector ge_inputs; + for (const auto &input : inputs) { + ge_inputs.emplace_back(TensorAdapter::AsGeTensor(input)); + } + GeRootModelPtr ge_root_model = nullptr; + Status ret = graph_manager_.BuildGraph(graph_id, ge_inputs, ge_root_model, session_id_, true); + if (ret != SUCCESS) { + GELOGE(ret, "[Build][Graph] failed, InnerSession:%lu graph_id=%u.", session_id_, graph_id); + REPORT_CALL_ERROR("E19999", + "GraphManager BuildGraph failed, InnerSession:%lu graph_id=%u.", session_id_, graph_id); + return ret; + } + GELOGI("[InnerSession:%lu] build graph success, graph_id=%u.", session_id_, graph_id); + return ret; +} + +Status InnerSession::RunGraphAsync(uint32_t graph_id, const std::vector &inputs, RunAsyncCallback callback) { UpdateThreadContext(graph_id); GELOGI("[InnerSession:%lu] run graph on session, graph_id=%u.", session_id_, graph_id); @@ -422,7 +441,6 @@ Status InnerSession::RunGraphAsync(uint32_t graph_id, const std::vector &options) { diff --git a/ge/session/inner_session.h b/ge/session/inner_session.h index ce7402bb..a2ec35df 100644 --- a/ge/session/inner_session.h +++ b/ge/session/inner_session.h @@ -48,7 +48,9 @@ class InnerSession { Status BuildGraph(uint32_t graph_id, const std::vector &inputs); - Status RunGraphAsync(uint32_t graph_id, const std::vector &inputs, RunAsyncCallback callback); + Status BuildGraph(uint32_t graph_id, const std::vector &inputs); + + Status RunGraphAsync(uint32_t graph_id, const std::vector &inputs, RunAsyncCallback callback); Status Finalize(); diff --git a/ge/session/session_manager.cc b/ge/session/session_manager.cc index 51a4d2e8..fdf37d06 100755 --- a/ge/session/session_manager.cc +++ b/ge/session/session_manager.cc @@ -384,8 +384,29 @@ Status SessionManager::BuildGraph(SessionId session_id, uint32_t graph_id, const return innerSession->BuildGraph(graph_id, inputs); } +Status SessionManager::BuildGraph(SessionId session_id, uint32_t graph_id, const std::vector &inputs) { + if (!init_flag_) { + GELOGE(GE_SESSION_MANAGER_NOT_INIT, "[Build][Graph]fail for Session manager is not initialized," + "session_id:%lu, graph_id:%u.", session_id, graph_id); + REPORT_INNER_ERROR("E19999", "BuildGraph fail for Session manager is not initialized," + "session_id:%lu, graph_id:%u.", session_id, graph_id); + return GE_SESSION_MANAGER_NOT_INIT; + } + SessionPtr innerSession = nullptr; + { + std::lock_guard lock(mutex_); + std::map::iterator it = session_manager_map_.find(session_id); + if (it == session_manager_map_.end()) { + return GE_SESSION_NOT_EXIST; + } else { + innerSession = it->second; + } + } + return innerSession->BuildGraph(graph_id, inputs); +} + Status SessionManager::RunGraphAsync(SessionId session_id, uint32_t graph_id, - const std::vector &inputs, RunAsyncCallback callback) { + const std::vector &inputs, RunAsyncCallback callback) { if (!init_flag_) { GELOGE(GE_SESSION_MANAGER_NOT_INIT, "[AsyncRun][Graph]fail for Session manager is not initialized, session_id:%lu, graph_id:%u.", diff --git a/ge/session/session_manager.h b/ge/session/session_manager.h index f06f8719..17152b0a 100644 --- a/ge/session/session_manager.h +++ b/ge/session/session_manager.h @@ -139,6 +139,8 @@ class SessionManager { /// Status BuildGraph(SessionId session_id, uint32_t graph_id, const std::vector &inputs); + Status BuildGraph(SessionId session_id, uint32_t graph_id, const std::vector &inputs); + /// /// @ingroup ge_session /// @brief run a graph of the session with specific session id for train asynchronously @@ -147,7 +149,7 @@ class SessionManager { /// @param [in] inputs input data /// @return Status result of function /// - Status RunGraphAsync(SessionId session_id, uint32_t graph_id, const std::vector &inputs, + Status RunGraphAsync(SessionId session_id, uint32_t graph_id, const std::vector &inputs, RunAsyncCallback callback); /// diff --git a/inc/external/ge/ge_api.h b/inc/external/ge/ge_api.h index d3b6e1cb..b111563e 100644 --- a/inc/external/ge/ge_api.h +++ b/inc/external/ge/ge_api.h @@ -142,6 +142,8 @@ class GE_FUNC_VISIBILITY Session { /// Status BuildGraph(uint32_t graphId, const std::vector &inputs); + Status BuildGraph(uint32_t graphId, const std::vector &inputs); + /// /// @ingroup ge_graph /// @brief run graph in the session with specific session id asynchronously @@ -152,7 +154,7 @@ class GE_FUNC_VISIBILITY Session { /// Please ensure that the implementation of the function is trusted. /// @return Status result of function /// - Status RunGraphAsync(uint32_t graphId, const std::vector &inputs, RunAsyncCallback callback); + Status RunGraphAsync(uint32_t graphId, const std::vector &inputs, RunAsyncCallback callback); /// /// @ingroup ge_graph diff --git a/inc/external/ge/ge_api_types.h b/inc/external/ge/ge_api_types.h index cef3fc42..388f0fe0 100644 --- a/inc/external/ge/ge_api_types.h +++ b/inc/external/ge/ge_api_types.h @@ -23,6 +23,7 @@ #include #include #include +#include "graph/tensor.h" namespace ge { // Option key: graph run mode @@ -356,7 +357,8 @@ struct OutputTensorInfo { }; using Status = uint32_t; -using RunAsyncCallback = std::function &)>; +using RunAsyncCallback = std::function &)>; + // for ir build namespace ir_option { static const char *const INPUT_FORMAT = "input_format"; diff --git a/inc/framework/common/ge_types.h b/inc/framework/common/ge_types.h index 1fc25b1d..01fc7468 100644 --- a/inc/framework/common/ge_types.h +++ b/inc/framework/common/ge_types.h @@ -226,7 +226,7 @@ class GE_FUNC_VISIBILITY ModelListener { /// @param [in] resultCode Execution results /// virtual Status OnComputeDone(uint32_t model_id, uint32_t data_index, uint32_t result_code, - std::vector &outputs) = 0; + std::vector &outputs) = 0; }; // OMM configuration item diff --git a/tests/ut/ge/CMakeLists.txt b/tests/ut/ge/CMakeLists.txt index c3337487..cce72399 100755 --- a/tests/ut/ge/CMakeLists.txt +++ b/tests/ut/ge/CMakeLists.txt @@ -97,6 +97,7 @@ set(GRAPH_SRC_FILES "${GE_CODE_DIR}/metadef/graph/ge_tensor.cc" "${GE_CODE_DIR}/metadef/graph/ref_relation.cc" "${GE_CODE_DIR}/metadef/graph/tensor.cc" + "${GE_CODE_DIR}/metadef/graph/types.cc" "${GE_CODE_DIR}/metadef/graph/detail/attributes_holder.cc" "${GE_CODE_DIR}/metadef/graph/utils/anchor_utils.cc" "${GE_CODE_DIR}/metadef/graph/utils/graph_utils.cc" @@ -793,6 +794,8 @@ set(MULTI_PARTS_TEST_FILES "graph/manager/graph_manager_unittest.cc" "session/omg_omg_unittest.cc" "session/ge_api_unittest.cc" + "session/inner_session_unittest.cc" + "session/session_manager_unittest.cc" ) set(GENERATOR_TEST_FILES diff --git a/tests/ut/ge/graph/execute/graph_execute_unittest.cc b/tests/ut/ge/graph/execute/graph_execute_unittest.cc index e340df2f..6d982454 100644 --- a/tests/ut/ge/graph/execute/graph_execute_unittest.cc +++ b/tests/ut/ge/graph/execute/graph_execute_unittest.cc @@ -115,7 +115,7 @@ TEST_F(UtestGraphExecuteTest, test_set_callback) { ComputeGraphPtr graph = MakeShared("test"); // is_unknown_shape_graph_ = false GeRootModelPtr ge_root_model = MakeShared(graph); - RunAsyncCallback callback = [](Status, std::vector &) {}; + RunAsyncCallback callback = [](Status, std::vector &) {}; auto model_manager = ModelManager::GetInstance(); auto listener = MakeShared(); diff --git a/tests/ut/ge/graph/ge_executor_unittest.cc b/tests/ut/ge/graph/ge_executor_unittest.cc index 3c6a9903..3969ad9c 100644 --- a/tests/ut/ge/graph/ge_executor_unittest.cc +++ b/tests/ut/ge/graph/ge_executor_unittest.cc @@ -75,7 +75,7 @@ class DModelListener : public ge::ModelListener { DModelListener() { }; Status OnComputeDone(uint32_t model_id, uint32_t data_index, uint32_t resultCode, - std::vector &outputs) { + std::vector &outputs) { GELOGI("In Call back. OnComputeDone"); return SUCCESS; } @@ -276,7 +276,7 @@ TEST_F(UtestGeExecutor, execute_graph_with_stream) { EXPECT_EQ(model.task_list_.size(), 2); OutputData output_data; - vector outputs; + vector outputs; EXPECT_EQ(model.GenOutputTensorInfo(&output_data, outputs), SUCCESS); diff --git a/tests/ut/ge/graph/load/davinci_model_unittest.cc b/tests/ut/ge/graph/load/davinci_model_unittest.cc index 120a194e..4771ca8d 100644 --- a/tests/ut/ge/graph/load/davinci_model_unittest.cc +++ b/tests/ut/ge/graph/load/davinci_model_unittest.cc @@ -32,7 +32,7 @@ extern OpDescPtr CreateOpDesc(string name, string type); class DModelListener : public ModelListener { public: DModelListener(){}; - uint32_t OnComputeDone(uint32_t model_id, uint32_t data_index, uint32_t result, vector &outputs) { + uint32_t OnComputeDone(uint32_t model_id, uint32_t data_index, uint32_t result, vector &outputs) { return 0; } }; @@ -138,7 +138,7 @@ TEST_F(UtestDavinciModel, init_success) { EXPECT_EQ(model.task_list_.size(), 2); OutputData output_data; - vector outputs; + vector outputs; EXPECT_EQ(model.GenOutputTensorInfo(&output_data, outputs), SUCCESS); EXPECT_EQ(output_data.blobs.size(), 1); EXPECT_EQ(outputs.size(), 1); @@ -1024,7 +1024,7 @@ TEST_F(UtestDavinciModel, NnExecute) { rtStream_t stream = nullptr; InputData input_data; OutputData output_data; - vector outputs; + vector outputs; EXPECT_EQ(model.GenOutputTensorInfo(&output_data, outputs), SUCCESS); EXPECT_EQ(output_data.blobs.size(), 1); EXPECT_EQ(outputs.size(), 1); diff --git a/tests/ut/ge/graph/load/model_manager_unittest.cc b/tests/ut/ge/graph/load/model_manager_unittest.cc index 83d694d4..0cbe61b5 100644 --- a/tests/ut/ge/graph/load/model_manager_unittest.cc +++ b/tests/ut/ge/graph/load/model_manager_unittest.cc @@ -414,8 +414,8 @@ TEST_F(UtestModelManagerModelManager, test_data_input_tensor) { mm.model_map_[1] = model; mm.hybrid_model_map_[1] = std::make_shared(); - auto input_tensor = InputTensorInfo(); - vector inputs; + ge::Tensor input_tensor; + vector inputs; inputs.emplace_back(input_tensor); auto ret = mm.DataInputTensor(model_id,inputs); EXPECT_EQ(PARAM_INVALID, ret); // HybridDavinciModel::impl_ is null. diff --git a/tests/ut/ge/graph/manager/graph_manager_unittest.cc b/tests/ut/ge/graph/manager/graph_manager_unittest.cc index b61cb524..fafd7168 100644 --- a/tests/ut/ge/graph/manager/graph_manager_unittest.cc +++ b/tests/ut/ge/graph/manager/graph_manager_unittest.cc @@ -280,7 +280,7 @@ TEST_F(UtestGraphManagerTest, test_pre_run_thread) { graph_manager.thread_run_flag_ = true; GraphId graph_id = 1; - std::vector input_tensor; + std::vector input_tensor; uint64_t session_id = 0; error_message::Context error_context; GEThreadLocalContext context; @@ -306,7 +306,7 @@ TEST_F(UtestGraphManagerTest, test_pre_run_thread_2) { graph_manager.IncreaseGraphCount(graph_id); graph_manager.IncreaseGraphCount(graph_id); graph_node_1->SetBuildFlag(true); - std::vector input_tensor; + std::vector input_tensor; uint64_t session_id = 0; error_message::Context error_context; GEThreadLocalContext context; @@ -381,7 +381,7 @@ TEST_F(UtestGraphManagerTest, test_check_incre_build_and_pre_run_2) { ComputeGraphPtr compute_graph = MakeShared("test_graph"); GeRootModelPtr ge_root_model = MakeShared(compute_graph); GraphManager::PreRunArgs arg; - arg.callback = [](Status, std::vector &) {}; + arg.callback = [](Status, std::vector &) {}; GraphNodePtr graph_node = MakeShared(graph_id); graph_node->SetBuildFlag(true); graph_node->Lock(); @@ -397,7 +397,7 @@ TEST_F(UtestGraphManagerTest, test_check_incre_build_and_pre_run_3) { ComputeGraphPtr compute_graph = MakeShared("test_graph"); GeRootModelPtr ge_root_model = MakeShared(compute_graph); GraphManager::PreRunArgs arg; - arg.callback = [](Status, std::vector &) {}; + arg.callback = [](Status, std::vector &) {}; GraphNodePtr graph_node = MakeShared(graph_id); graph_node->SetBuildFlag(false); graph_node->Lock(); @@ -434,3 +434,34 @@ TEST_F(UtestGraphManagerTest, test_add_graph_with_copy_fail) { status = graph_manager.AddGraphWithCopy(graph_id, graph, options, context); EXPECT_NE(status, ge::SUCCESS); } + +TEST_F(UtestGraphManagerTest, ParseInputsDimsForData_success) { + GraphManager graph_manager; + std::vector input_tensors; + ge::Tensor tensor; + input_tensors.emplace_back(tensor); + graph_manager.ParseInputsDimsForData(input_tensors); +} + +// TEST_F(UtestGraphManagerTest, ParseInputsDimsForGetNexNosinkAndData_success) { +// GraphManager graph_manager; + +// ge::ComputeGraphPtr graph = std::make_shared("default"); + +// // save1 +// ge::OpDescPtr save_op = std::make_shared(); +// save_op->SetType("Save"); +// save_op->SetName("Save1"); +// save_op->AddInputDesc(ge::GeTensorDesc()); +// save_op->AddOutputDesc(ge::GeTensorDesc()); +// AttrUtils::SetInt(save_op, ATTR_NAME_INDEX, 1); +// ge::NodePtr save_node = graph->AddNode(save_op); + +// std::vector nodes; +// nodes.emplace_back(save_node); +// ge::Tensor tensor; +// std::vector input_tensors; +// input_tensors.emplace_back(tensor); +// auto ret = graph_manager.ParseInputsDimsForGetNexNosinkAndData(nodes, input_tensors); +// EXPECT_EQ(ret, ge::SUCCESS); +// } diff --git a/tests/ut/ge/session/ge_api_unittest.cc b/tests/ut/ge/session/ge_api_unittest.cc index 00c904bb..371efdfa 100644 --- a/tests/ut/ge/session/ge_api_unittest.cc +++ b/tests/ut/ge/session/ge_api_unittest.cc @@ -55,4 +55,12 @@ TEST_F(UtestGeApi, run_graph_with_stream) { ret = inner_session.RunGraphWithStreamAsync(10, nullptr, inputs, outputs); ASSERT_NE(ret, SUCCESS); } + +TEST_F(UtestGeApi, build_graph_success) { + vector inputs; + std::map options; + Session session(options); + auto ret = session.BuildGraph(1, inputs); + ASSERT_NE(ret, SUCCESS); +} } // namespace ge diff --git a/tests/ut/ge/session/inner_session_unittest.cc b/tests/ut/ge/session/inner_session_unittest.cc new file mode 100644 index 00000000..19f75d9f --- /dev/null +++ b/tests/ut/ge/session/inner_session_unittest.cc @@ -0,0 +1,47 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#define private public +#define protected public +#include "session/inner_session.h" +#undef private +#undef protected + + +using namespace std; + +namespace ge { +class Utest_Inner_session : public testing::Test { + protected: + void SetUp() override {} + + void TearDown() override {} +}; + +TEST_F(Utest_Inner_session, build_graph_success) { + std::map options; + uint64_t session_id = 1; + InnerSession inner_seesion(session_id, options); + std::vector inputs; + ge::Tensor tensor; + inputs.emplace_back(tensor); + Status ret = inner_seesion.BuildGraph(1, inputs); + EXPECT_NE(ret, ge::SUCCESS); +} + +} // namespace ge diff --git a/tests/ut/ge/session/session_manager_unittest.cc b/tests/ut/ge/session/session_manager_unittest.cc new file mode 100644 index 00000000..3ba5def1 --- /dev/null +++ b/tests/ut/ge/session/session_manager_unittest.cc @@ -0,0 +1,78 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#define private public +#define protected public +#include "session/session_manager.h" +#undef private +#undef protected + + +using namespace std; + +namespace ge { +class Utest_SessionManager : public testing::Test { + protected: + void SetUp() override {} + + void TearDown() override {} +}; + +TEST_F(Utest_SessionManager, build_graph_failed) { + map session_manager_option; + map session_option; + SessionManager *session_manager = new SessionManager(); + uint64_t session_id = 0; + uint32_t graph_id = 0; + std::vector inputs; + + Status ret = session_manager->BuildGraph(session_id, graph_id, inputs); + EXPECT_EQ(ret, ge::GE_SESSION_MANAGER_NOT_INIT); + + session_manager->Initialize(session_manager_option); + ret = session_manager->BuildGraph(session_id, graph_id, inputs); + EXPECT_NE(ret, ge::SUCCESS); + delete session_manager; +} + +TEST_F(Utest_SessionManager, RungraphAsync_before_init) { + SessionManager *session_manager = new SessionManager(); + SessionId session_id; + uint32_t graph_id = 0; + std::vector inputs; + RunAsyncCallback callback; + Status ret = session_manager->RunGraphAsync(session_id, graph_id, inputs, callback); + EXPECT_EQ(ret, ge::GE_SESSION_MANAGER_NOT_INIT); + delete session_manager; +} + +TEST_F(Utest_SessionManager, RungraphAsync_failed) { + map session_manager_option; + SessionManager *session_manager = new SessionManager(); + session_manager->Initialize(session_manager_option); + + SessionId session_id; + uint32_t graph_id = 0; + std::vector inputs; + RunAsyncCallback callback; + Status ret = session_manager->RunGraphAsync(session_id, graph_id, inputs, callback); + EXPECT_EQ(ret, ge::GE_SESSION_NOT_EXIST); + delete session_manager; +} + +} // namespace ge