Browse Source

!1384 Performance optimization and fix memory leak.

From: @tangqunzhang
Reviewed-by: @ji_chen,@xchu42
Signed-off-by: @ji_chen
tags/v1.3.0
mindspore-ci-bot Gitee 3 years ago
parent
commit
0714c8173e
8 changed files with 138 additions and 33 deletions
  1. +5
    -4
      ge/generator/ge_generator.cc
  2. +2
    -2
      ge/graph/build/memory/block_mem_assigner.cc
  3. +30
    -7
      ge/graph/build/memory/var_mem_assign_util.cc
  4. +3
    -1
      ge/graph/build/memory/var_mem_assign_util.h
  5. +13
    -17
      ge/graph/load/model_manager/davinci_model.cc
  6. +13
    -0
      tests/ut/ge/generator/ge_generator_unittest.cc
  7. +38
    -2
      tests/ut/ge/graph/build/mem_assigner_unittest.cc
  8. +34
    -0
      tests/ut/ge/graph/load/davinci_model_unittest.cc

+ 5
- 4
ge/generator/ge_generator.cc View File

@@ -36,6 +36,7 @@
#include "graph/utils/type_utils.h"
#include "init/gelib.h"
#include "model/ge_model.h"
#include "analyzer/analyzer.h"

using std::map;
using std::string;
@@ -1007,13 +1008,13 @@ Status GeGenerator::Impl::BuildModel(const Graph &graph, const vector<GeTensor>
ErrorManager::GetInstance().SetStage(ErrorMessage::kModelCompile, ErrorMessage::kOther);
if (ret != SUCCESS) {
GELOGE(GE_GENERATOR_GRAPH_MANAGER_BUILD_GRAPH_FAILED, "GraphManager build graph fail, graph id: %u", graph_id);
VarManagerPool::Instance().RemoveVarManager(session_id);
return GE_GENERATOR_GRAPH_MANAGER_BUILD_GRAPH_FAILED;
ret = GE_GENERATOR_GRAPH_MANAGER_BUILD_GRAPH_FAILED;
}

RtContextUtil::GetInstance().DestroyRtContexts(session_id);
Analyzer::GetInstance()->DestroySessionJsonObject(session_id);
VarManagerPool::Instance().RemoveVarManager(session_id);

return SUCCESS;
return ret;
}

Status GeGenerator::Impl::GenerateInfershapeGraph(const Graph &graph) {


+ 2
- 2
ge/graph/build/memory/block_mem_assigner.cc View File

@@ -1735,7 +1735,7 @@ Status BlockMemAssigner::AssignOutputMemoryWithReuse(const NodePtr &node, vector
///
void BlockMemAssigner::AssignMemoryWithReuse(vector<int64_t> &ranges) {
(void)ge::GetContext().GetOption(OPTION_EXEC_DISABLE_REUSED_MEMORY, ge_disable_reuse_mem_env_);
GELOGD("Reuse memory %s", ge_disable_reuse_mem_env_ == "1" ? "close" : "open");
GEEVENT("Reuse memory %s", ge_disable_reuse_mem_env_ == "1" ? "close" : "open");
string op_no_reuse_mem_str;
const char *op_no_reuse_mem = std::getenv(OP_NO_REUSE_MEM);
GE_IF_BOOL_EXEC(op_no_reuse_mem != nullptr, op_no_reuse_mem_str = string(op_no_reuse_mem);
@@ -2125,7 +2125,7 @@ void SetBlockOpMemOffset(MemoryBlock *block, int32_t child_block_level) {

child_block_level++;
for (MemoryBlock *child_block : block->ChildBlockList()) {
SetBlockOpMemOffset(child_block, child_block_level);
SetBlockOpMemOffset(child_block, child_block_level);
}
}



+ 30
- 7
ge/graph/build/memory/var_mem_assign_util.cc View File

@@ -311,6 +311,7 @@ Status VarMemAssignUtil::SetOutTransNodeToAssign(const ge::NodePtr &node, const
}

Status VarMemAssignUtil::AssignMemory2HasRefAttrNode(ge::ComputeGraphPtr &compute_graph) {
GraphToNodeMap graph_to_node;
for (const ge::NodePtr &n : compute_graph->GetAllNodes()) {
string ref_var_src_var_name;
auto op_desc = n->GetOpDesc();
@@ -318,7 +319,8 @@ Status VarMemAssignUtil::AssignMemory2HasRefAttrNode(ge::ComputeGraphPtr &comput
for (uint32_t idx = 0; idx < op_desc->GetOutputsSize(); idx += 1) {
const auto out_desc = op_desc->MutableOutputDesc(idx);
if (ge::AttrUtils::GetStr(out_desc, REF_VAR_SRC_VAR_NAME, ref_var_src_var_name)) {
GE_CHK_STATUS_RET(AssignData2VarRef(n, ref_var_src_var_name, compute_graph->GetSessionID(), idx));
GE_CHK_STATUS_RET(
AssignData2VarRef(n, ref_var_src_var_name, compute_graph->GetSessionID(), idx, graph_to_node));
}
}
}
@@ -326,16 +328,37 @@ Status VarMemAssignUtil::AssignMemory2HasRefAttrNode(ge::ComputeGraphPtr &comput
}

Status VarMemAssignUtil::AssignData2VarRef(const ge::NodePtr &has_ref_attr_node, const string &src_var_name,
uint64_t session_id, uint32_t out_index) {
uint64_t session_id, uint32_t out_index,
GraphToNodeMap &graph_to_node) {
// Get ref_var_src_var address
auto root_graph = GraphUtils::FindRootGraph(has_ref_attr_node->GetOwnerComputeGraph());
GE_CHECK_NOTNULL(root_graph);
ge::NodePtr var_ref_src_var = root_graph->FindNode(src_var_name);
if (var_ref_src_var == nullptr) {
// Cache mapping (name to nodeptr) simproves query performance
auto &name_to_node = graph_to_node[root_graph];
if (name_to_node.empty()) {
for (const ge::NodePtr &n : root_graph->GetDirectNode()) {
name_to_node.emplace(n->GetName(), n);
}
for (auto sub_graph : root_graph->GetAllSubgraphs()) {
auto &name_to_node_sub = graph_to_node[sub_graph];
if (name_to_node_sub.empty()) {
for (const ge::NodePtr &n : sub_graph->GetDirectNode()) {
name_to_node_sub.emplace(n->GetName(), n);
}
}
}
}

ge::NodePtr var_ref_src_var = nullptr;
auto it = name_to_node.find(src_var_name);
if ((it != name_to_node.end()) && (it->second != nullptr)) {
var_ref_src_var = it->second;
} else {
for (auto sub_graph : root_graph->GetAllSubgraphs()) {
auto node_ptr = sub_graph->FindNode(src_var_name);
if (node_ptr != nullptr) {
var_ref_src_var = node_ptr;
auto &name_to_node_sub = graph_to_node[sub_graph];
it = name_to_node_sub.find(src_var_name);
if ((it != name_to_node_sub.end()) && (it->second != nullptr)) {
var_ref_src_var = it->second;
break;
}
}


+ 3
- 1
ge/graph/build/memory/var_mem_assign_util.h View File

@@ -22,6 +22,8 @@
#include "graph/utils/node_utils.h"

namespace ge {
using GraphToNodeMap = std::map<ge::ComputeGraphPtr, std::map<std::string, ge::NodePtr>>;

class VarMemAssignUtil {
public:
static Status AssignVarMemory(ge::ComputeGraphPtr &compute_graph);
@@ -47,7 +49,7 @@ class VarMemAssignUtil {
static Status DealTransNode(const ge::NodePtr &final_trans_node);
static Status DealExportTransNode(const ge::NodePtr &node, const ge::NodePtr &final_trans_node);
static Status AssignData2VarRef(const ge::NodePtr &variable_ref, const std::string &src_var_name, uint64_t session_id,
uint32_t out_index);
uint32_t out_index, GraphToNodeMap &graph_to_node);

static Status SetOutTransNodeToAssign(const ge::NodePtr &node, const ge::NodePtr &final_trans_node, size_t index);
};


+ 13
- 17
ge/graph/load/model_manager/davinci_model.cc View File

@@ -2137,7 +2137,6 @@ Status DavinciModel::CopyInputData(const InputData &input_data, bool device_data

Status DavinciModel::SyncVarData() {
GELOGI("Sync var data, model id:%u", model_id_);
Status ret = SUCCESS;

if (global_step_addr_ != nullptr && global_step_size_ != 0) {
const vector<uint64_t> v_step = { iterator_count_ };
@@ -2145,7 +2144,7 @@ Status DavinciModel::SyncVarData() {
RT_MEMCPY_HOST_TO_DEVICE));
}

return ret;
return SUCCESS;
}

Status DavinciModel::InitModelProfile() {
@@ -3262,11 +3261,9 @@ Status DavinciModel::CopyModelData(const InputData &input_data, OutputData &outp
///
Status DavinciModel::UpdateIoTaskArgs(const std::map<uint32_t, ZeroCopyOffset> &data_info, bool is_input,
const vector<DataBuffer> &blobs, bool is_dynamic, const string &batch_label) {
string input_or_output;
is_input ? input_or_output = "input" : input_or_output = "output";
if (blobs.size() != data_info.size()) {
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "Verify %s data num failed: model requires %zu, but user actually feeds %zu",
input_or_output.c_str(), data_info.size(), blobs.size());
is_input ? "input" : "output", data_info.size(), blobs.size());
return ACL_ERROR_GE_PARAM_INVALID;
}

@@ -3274,7 +3271,7 @@ Status DavinciModel::UpdateIoTaskArgs(const std::map<uint32_t, ZeroCopyOffset> &
if (data.first >= blobs.size()) { // check data index.
GELOGE(ACL_ERROR_GE_PARAM_INVALID,
"Verify %s data num failed: can not find No.%u data, because user only feeds %zu",
input_or_output.c_str(), data.first, blobs.size());
is_input ? "input" : "output", data.first, blobs.size());
return ACL_ERROR_GE_PARAM_INVALID;
}

@@ -3306,21 +3303,20 @@ Status DavinciModel::UpdateIoTaskArgs(const std::map<uint32_t, ZeroCopyOffset> &
}

for (size_t count = 0; count < data.second.GetDataCount(); ++count) {
int64_t size = data.second.GetDataInfo().at(count).first;
void *addr = data.second.GetDataInfo().at(count).second;
void *buffer_addr = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(buffer.data) +
data.second.GetRelativeOffset().at(count));
GELOGI("[ZCPY] Copy %s blobs_index %u, virtual_addr: %p, size: %ld, user_data_addr: %p, batch_label: %s",
input_or_output.c_str(), data.first, addr, size, buffer_addr, batch_label.c_str());
is_input ? "input" : "output", data.first, addr, data.second.GetDataInfo().at(count).first,
buffer_addr, batch_label.c_str());
// For input data, just copy for rts task.
for (ZeroCopyTask &task : zero_copy_tasks_) {
if (task.GetBatchLabel() != kDefaultBatchLable && task.GetBatchLabel() != batch_label) {
for (auto &task : zero_copy_tasks_) {
bool not_same_batch = (task.GetBatchLabel() != kDefaultBatchLable && task.GetBatchLabel() != batch_label);
if (not_same_batch) {
continue;
}
uintptr_t addr_val = reinterpret_cast<uintptr_t>(addr);
if (task.UpdateTaskParam(addr_val, buffer_addr) != SUCCESS) {
return ACL_ERROR_GE_PARAM_INVALID;
}
(void)task.UpdateTaskParam(addr_val, buffer_addr);
}
}
}
@@ -3980,7 +3976,7 @@ Status DavinciModel::InitOrigInputInfo(uint32_t index, const OpDescPtr &op_desc)
Status DavinciModel::GetOrigInputInfo(uint32_t index, OriginInputInfo &orig_input_info) const {
const auto it = orig_input_info_.find(index);
if (it == orig_input_info_.end()) {
GELOGE(ACL_ERROR_GE_AIPP_NOT_EXIST, "there is not AIPP related with index %u.", index);
GELOGE(ACL_ERROR_GE_AIPP_NOT_EXIST, "There is not AIPP related with index %u.", index);
return ACL_ERROR_GE_AIPP_NOT_EXIST;
}

@@ -4014,7 +4010,7 @@ void DavinciModel::ParseAIPPInfo(std::string in_out_info, InputOutputDims &dims_

Status DavinciModel::InitAippInputOutputDims(uint32_t index, const OpDescPtr &op_desc) {
if (!op_desc->HasAttr(ATTR_NAME_AIPP_INPUTS) || !op_desc->HasAttr(ATTR_NAME_AIPP_OUTPUTS)) {
GELOGI("there is not AIPP related with index %u.", index);
GELOGI("There is not AIPP related with index %u.", index);
return SUCCESS;
}

@@ -4031,7 +4027,7 @@ Status DavinciModel::InitAippInputOutputDims(uint32_t index, const OpDescPtr &op
ConstGeTensorDescPtr data_input_desc = op_desc->GetInputDescPtr(kDataIndex);
int64_t data_input_size;
(void)TensorUtils::GetSize(*(op_desc->GetInputDescPtr(kDataIndex)), data_input_size);
GELOGD("related Data[%d]: tensor_name: %s, dim_num: %zu, tensor_size: %zu, format: %s, data_type: %s, shape: %s.",
GELOGD("Related Data[%d]: tensor_name: %s, dim_num: %zu, tensor_size: %zu, format: %s, data_type: %s, shape: %s.",
index, op_desc->GetName().c_str(), data_input_desc->GetShape().GetDimNum(), data_input_size,
TypeUtils::FormatToSerialString(data_input_desc->GetFormat()).c_str(),
TypeUtils::DataTypeToSerialString(data_input_desc->GetDataType()).c_str(),
@@ -4058,7 +4054,7 @@ Status DavinciModel::GetAllAippInputOutputDims(uint32_t index, vector<InputOutpu
vector<InputOutputDims> &output_dims) const {
const auto it = aipp_dims_info_.find(index);
if (it == aipp_dims_info_.end()) {
GELOGE(ACL_ERROR_GE_AIPP_NOT_EXIST, "there is not AIPP related with index %u.", index);
GELOGE(ACL_ERROR_GE_AIPP_NOT_EXIST, "There is not AIPP related with index %u.", index);
return ACL_ERROR_GE_AIPP_NOT_EXIST;
}



+ 13
- 0
tests/ut/ge/generator/ge_generator_unittest.cc View File

@@ -155,4 +155,17 @@ TEST_F(UtestGeGenerator, test_remove_const) {
vector<GeTensor> outputs;
generator.RemoveConst(inputs, outputs);
}

TEST_F(UtestGeGenerator, test_generate_online_model) {
GeTensorDesc tensor_desc;
GeTensor tensor(tensor_desc);
const vector<GeTensor> inputs = { tensor, tensor };
auto compute_graph = MakeGraph();
compute_graph->TopologicalSorting();
Graph graph = ge::GraphUtils::CreateGraphFromComputeGraph(compute_graph);
GeGenerator generator;
generator.Initialize({});
std::string name;
EXPECT_NE(generator.GenerateOfflineModel(graph, name, inputs), SUCCESS);
}
} // namespace ge

+ 38
- 2
tests/ut/ge/graph/build/mem_assigner_unittest.cc View File

@@ -33,6 +33,7 @@
#include "graph/build/memory/graph_mem_assigner.h"
#include "graph/build/memory/hybrid_mem_assigner.h"
#include "graph/build/memory/max_block_mem_assigner.h"
#include "graph/manager/graph_var_manager.h"
#undef protected
#undef private

@@ -77,8 +78,8 @@ class UtestMemoryAssignerTest : public testing::Test {
op_def->SetWorkspaceBytes(workspace_bytes);
return op_def;
}
void MakeGraph(ge::ComputeGraphPtr &graph) {
ge::OpDescPtr op_def_a = CreateOpWithWsSize("A", 6000);
void MakeGraph(ge::ComputeGraphPtr &graph, const string &type = "some") {
ge::OpDescPtr op_def_a = CreateOpWithWsSize("A", 6000, type);
op_def_a->SetStreamId(0);
ge::OpDescPtr op_def_b = CreateOpWithWsSize("B", 120000);
op_def_b->SetStreamId(0);
@@ -263,3 +264,38 @@ TEST_F(UtestMemoryAssignerTest, graph_memory_set_last_used_attr) {
(void) ge::AttrUtils::GetInt(node_f->GetOpDesc()->GetInputDesc(0), ATTR_NAME_IS_END_OF_INPUTMEM_LIFECYCLE, flag);
EXPECT_EQ(flag, 1);
}

TEST_F(UtestMemoryAssignerTest, graph_memory_assign_ref_var) {
ge::ComputeGraphPtr graph = make_shared<ge::ComputeGraph>("");
MakeGraph(graph, VARIABLE);
auto node_a = graph->FindNode("A");
auto node_b = graph->FindNode("B");
std::string value = "A";
(void) ge::AttrUtils::SetStr(node_b->GetOpDesc()->MutableOutputDesc(0), REF_VAR_SRC_VAR_NAME, value);
MemoryAssigner memory_assigner(graph);
map<int64_t, size_t> mem_offset;
size_t zero_memory_size = 0;
VarManager::Instance(0)->Init(0, 0, 0, 0);
EXPECT_EQ(memory_assigner.AssignMemory(false, mem_offset, zero_memory_size), GRAPH_SUCCESS);

EXPECT_EQ(node_b->GetOpDesc()->GetOutputOffset()[0], node_a->GetOpDesc()->GetOutputOffset()[0]);
}

TEST_F(UtestMemoryAssignerTest, graph_memory_assign_ref_var_not_found) {
ge::ComputeGraphPtr graph = make_shared<ge::ComputeGraph>("");
MakeGraph(graph, VARIABLE);

ge::ComputeGraphPtr sub_graph = make_shared<ge::ComputeGraph>("");
MakeReuseGraph(sub_graph);
graph->AddSubGraph(sub_graph);

auto node_a = graph->FindNode("A");
auto node_b = graph->FindNode("B");
std::string value = "M";
(void) ge::AttrUtils::SetStr(node_b->GetOpDesc()->MutableOutputDesc(0), REF_VAR_SRC_VAR_NAME, value);
MemoryAssigner memory_assigner(graph);
map<int64_t, size_t> mem_offset;
size_t zero_memory_size = 0;
VarManager::Instance(0)->Init(0, 0, 0, 0);
EXPECT_NE(memory_assigner.AssignMemory(false, mem_offset, zero_memory_size), GRAPH_SUCCESS);
}

+ 34
- 0
tests/ut/ge/graph/load/davinci_model_unittest.cc View File

@@ -22,6 +22,7 @@
#include "graph/utils/graph_utils.h"
#include "common/profiling/profiling_manager.h"
#include "graph/load/model_manager/davinci_model.h"
#include "graph/manager/graph_var_manager.h"

using namespace std;

@@ -51,6 +52,10 @@ int32_t MsprofReport(uint32_t moduleId, uint32_t type, void *data, uint32_t len)

TEST_F(UtestDavinciModel, init_success) {
DavinciModel model(0, nullptr);
VarManager::Instance(0)->Init(0, 0, 0, 0);
map<string, string> options;
options[GRAPH_MEMORY_MAX_SIZE] = "1048576";
VarManager::Instance(0)->SetMemoryMallocSize(options);
ComputeGraphPtr graph = make_shared<ComputeGraph>("default");
ProfilingManager::Instance().is_load_profiling_ = true;

@@ -777,6 +782,10 @@ TEST_F(UtestDavinciModel, init_data_aipp_input_dims_normal) {

// test label_set_task Init
TEST_F(UtestDavinciModel, label_task_success) {
VarManager::Instance(0)->Init(0, 0, 0, 0);
map<string, string> options;
options[GRAPH_MEMORY_MAX_SIZE] = "1048576";
VarManager::Instance(0)->SetMemoryMallocSize(options);
DavinciModel model(0, nullptr);
ComputeGraphPtr graph = make_shared<ComputeGraph>("default");

@@ -944,6 +953,11 @@ TEST_F(UtestDavinciModel, simple_test_gmock) {
}

TEST_F(UtestDavinciModel, NnExecute) {
VarManager::Instance(0)->Init(0, 0, 0, 0);
map<string, string> options;
options[GRAPH_MEMORY_MAX_SIZE] = "1048576";
VarManager::Instance(0)->SetMemoryMallocSize(options);

DavinciModel model(0, nullptr);
ComputeGraphPtr graph = make_shared<ComputeGraph>("default");
ProfilingManager::Instance().is_load_profiling_ = true;
@@ -967,6 +981,26 @@ TEST_F(UtestDavinciModel, NnExecute) {
NodePtr node = graph->AddNode(op_desc); // op_index = 0
}

{
OpDescPtr op_desc = CreateOpDesc("memcpy", MEMCPYASYNC);
op_desc->AddInputDesc(tensor);
op_desc->AddOutputDesc(tensor);
op_desc->SetInputOffset({1024});
op_desc->SetOutputOffset({5120});
NodePtr node = graph->AddNode(op_desc);

domi::TaskDef *task_def = model_task_def->add_task();
task_def->set_stream_id(0);
task_def->set_type(RT_MODEL_TASK_MEMCPY_ASYNC);
domi::MemcpyAsyncDef *memcpy_async = task_def->mutable_memcpy_async();
memcpy_async->set_src(1024);
memcpy_async->set_dst(5120);
memcpy_async->set_dst_max(512);
memcpy_async->set_count(1);
memcpy_async->set_kind(RT_MEMCPY_DEVICE_TO_DEVICE);
memcpy_async->set_op_index(op_desc->GetId());
}

{
OpDescPtr op_desc = CreateOpDesc("output", NETOUTPUT);
op_desc->AddInputDesc(tensor);


Loading…
Cancel
Save