You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

graph_builder.cc 13 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/build/graph_builder.h"
  17. #include "common/ge/ge_util.h"
  18. #include "common/helper/model_helper.h"
  19. #include "common/opskernel/ops_kernel_info_types.h"
  20. #include "graph/build/run_context.h"
  21. #include "graph/build/stream_graph_optimizer.h"
  22. #include "graph/manager/graph_var_manager.h"
  23. #include "graph/utils/node_utils.h"
  24. #include "graph/utils/type_utils.h"
  25. #include "init/gelib.h"
  26. #include "model/ge_model.h"
  27. using domi::BuildMode;
  28. namespace {
  29. const int32_t kInvalidPerfLevel = -1;
  30. } // namespace
  31. namespace ge {
  32. GraphBuilder::GraphBuilder() : build_mode_(BuildMode::GEN_TASK_WITH_FUSION), hcom_parallel_(false) {}
  33. void GraphBuilder::SetOptions(const ge::GraphManagerOptions &options) {
  34. stream_max_parallel_num_ = options.stream_max_parallel_num;
  35. hcom_parallel_ = options.hcom_parallel;
  36. if (options.perf_level == kInvalidPerfLevel) {
  37. build_mode_ = static_cast<int>(BuildMode::GEN_TASK_WITH_FUSION);
  38. } else {
  39. build_mode_ = options.perf_level;
  40. }
  41. }
  42. Status GraphBuilder::CalcOpParam(const ge::ComputeGraphPtr &graph) {
  43. GELOGI("Begin to calculate op running param.");
  44. GE_CHECK_NOTNULL(graph);
  45. auto instance_ptr = ge::GELib::GetInstance();
  46. if (instance_ptr == nullptr || !instance_ptr->InitFlag()) {
  47. GELOGE(GE_CLI_GE_NOT_INITIALIZED, "GraphBuilder: GE is not initialized");
  48. return GE_CLI_GE_NOT_INITIALIZED;
  49. }
  50. for (const auto &node_ptr : graph->GetAllNodes()) {
  51. GE_CHECK_NOTNULL(node_ptr->GetOpDesc());
  52. std::string kernel_lib_name = node_ptr->GetOpDesc()->GetOpKernelLibName();
  53. if (kernel_lib_name.empty()) {
  54. // reset op kernel lib
  55. (void)instance_ptr->DNNEngineManagerObj().GetDNNEngineName(node_ptr->GetOpDesc());
  56. kernel_lib_name = node_ptr->GetOpDesc()->GetOpKernelLibName();
  57. if (kernel_lib_name.empty()) {
  58. GELOGE(INTERNAL_ERROR, "Get node:%s(%s) kernel lib failed.", node_ptr->GetName().c_str(),
  59. node_ptr->GetType().c_str());
  60. return INTERNAL_ERROR;
  61. }
  62. }
  63. OpsKernelInfoStorePtr kernel_info = instance_ptr->OpsKernelManagerObj().GetOpsKernelInfoStore(kernel_lib_name);
  64. if (kernel_info != nullptr) {
  65. auto ret = SetInputSize(node_ptr);
  66. if (ret != SUCCESS) {
  67. GELOGE(ret, "Set node inputDesc size failed, node name is %s", node_ptr->GetName().c_str());
  68. return ret;
  69. }
  70. ret = kernel_info->CalcOpRunningParam(*node_ptr);
  71. if (ret != SUCCESS) {
  72. GELOGE(ret, "Calculate op running param failed, node name is %s", node_ptr->GetName().c_str());
  73. return ret;
  74. }
  75. } else {
  76. GELOGE(GE_GRAPH_PARAM_NULLPTR, "Get op %s ops kernel info store failed", node_ptr->GetName().c_str());
  77. return INTERNAL_ERROR;
  78. }
  79. }
  80. GELOGI("Success to calculate op running param.");
  81. return SUCCESS;
  82. }
  83. Status GraphBuilder::Build(ComputeGraphPtr &comp_graph, std::vector<SubGraphInfoPtr> &subgraph_ptr_list,
  84. GeModelPtr &ge_model_ptr, uint64_t session_id) {
  85. GELOGI("Start to build model.");
  86. if (comp_graph == nullptr) {
  87. GELOGE(GE_GRAPH_PARAM_NULLPTR, "Graph build comp_graph is null.");
  88. return GE_GRAPH_PARAM_NULLPTR;
  89. }
  90. Status ret = SecondPartition(comp_graph, subgraph_ptr_list);
  91. GE_CHK_STATUS_RET(ret, "Graph second partition Failed.");
  92. auto subgraph_map = graph_partitioner_.GetSubGraphMap();
  93. GE_TIMESTAMP_START(BuildSubgraph);
  94. ge::ModelBuilder builder(comp_graph, subgraph_map, stream_max_parallel_num_, hcom_parallel_, build_mode_);
  95. GELOGI("[Build] invoke the other opskernel to generate task.");
  96. GraphUtils::DumpGEGraph(comp_graph, "BeforePreBuildModel");
  97. GraphUtils::DumpGEGraphToOnnx(*comp_graph, "BeforePreBuildModel");
  98. GE_TIMESTAMP_START(PreBuildModel);
  99. GE_CHK_STATUS_RET(builder.PreBuildModel(), "Builder PreBuildModel() return fail.");
  100. GE_TIMESTAMP_END(PreBuildModel, "GraphBuilder::PreBuildModel");
  101. GraphUtils::DumpGEGraph(comp_graph, "AfterPrebuildmodel");
  102. GraphUtils::DumpGEGraphToOnnx(*comp_graph, "AfterPrebuildmodel");
  103. GE_TIMESTAMP_START(CalcOpParam);
  104. GE_CHK_STATUS_RET(CalcOpParam(comp_graph), "Builder CalcOpParam() return fail.");
  105. GE_TIMESTAMP_END(CalcOpParam, "GraphBuilder::CalcOpParam");
  106. GraphUtils::DumpGEGraph(comp_graph, "AfterCalcOpParam");
  107. GraphUtils::DumpGEGraphToOnnx(*comp_graph, "AfterCalcOpParam");
  108. ModelPtr model_ptr = MakeShared<ge::Model>();
  109. if (model_ptr == nullptr) {
  110. return MEMALLOC_FAILED;
  111. }
  112. GE_TIMESTAMP_START(BuildModelForGetTask);
  113. GE_CHK_STATUS_RET(builder.BuildModelForGetTask(*model_ptr), "Builder BuildModelForGetTask() return fail.");
  114. GE_TIMESTAMP_END(BuildModelForGetTask, "GraphBuilder::BuildModelForGetTask");
  115. GraphUtils::DumpGEGraph(comp_graph, "AfterBuildModel");
  116. GraphUtils::DumpGEGraphToOnnx(*comp_graph, "AfterBuildModel");
  117. for (auto graph : comp_graph->GetAllSubgraphs()) {
  118. GraphUtils::DumpGEGraphToOnnx(*graph, "SubgraphGetTask");
  119. }
  120. GE_TIMESTAMP_START(GetTaskInfo);
  121. ret = GetTaskInfo(builder, model_ptr, comp_graph, subgraph_map, session_id);
  122. GE_TIMESTAMP_END(GetTaskInfo, "GraphBuilder::GetTaskInfo");
  123. GraphUtils::DumpGEGraph(comp_graph, "AfterGetTask");
  124. GraphUtils::DumpGEGraphToOnnx(*comp_graph, "AfterGetTask");
  125. if (ret != SUCCESS) {
  126. GELOGE(ret, "Builder GetTaskInfo() return fail.");
  127. return ret;
  128. }
  129. ge_model_ptr = MakeShared<ge::GeModel>();
  130. if (ge_model_ptr == nullptr) {
  131. return MEMALLOC_FAILED;
  132. }
  133. GE_CHK_STATUS_RET(builder.SaveDataToModel(*model_ptr, *ge_model_ptr), "model builder SaveDataToModel() return fail.");
  134. GELOGI("Success to build model.");
  135. GE_TIMESTAMP_END(BuildSubgraph, "GraphBuilder::Build");
  136. return SUCCESS;
  137. }
  138. Status GraphBuilder::GetTaskInfo(const ge::ModelBuilder &builder, const ModelPtr &model_ptr,
  139. ComputeGraphPtr &comp_graph, Graph2SubGraphInfoList &subgraph_map,
  140. uint64_t session_id) {
  141. GE_CHECK_NOTNULL(model_ptr);
  142. GE_CHECK_NOTNULL(comp_graph);
  143. int64_t memory_size = 0;
  144. if (!AttrUtils::GetInt(model_ptr, ATTR_MODEL_MEMORY_SIZE, memory_size)) {
  145. GELOGE(INTERNAL_ERROR, "Get memory size fail.");
  146. return INTERNAL_ERROR;
  147. }
  148. int64_t weight_size = 0;
  149. if (!AttrUtils::GetInt(model_ptr, ATTR_MODEL_WEIGHT_SIZE, weight_size)) {
  150. GELOGE(INTERNAL_ERROR, "Get weight memory size fail.");
  151. return INTERNAL_ERROR;
  152. }
  153. auto *get_mem_base = reinterpret_cast<uint8_t *>(ge::VarManager::Instance(0)->GetVarMemMaxSize());
  154. uint8_t *get_weight_mem_base = get_mem_base;
  155. if (weight_size > 0) {
  156. get_weight_mem_base = get_mem_base + memory_size;
  157. }
  158. RunContextUtil run_context;
  159. Status ret = run_context.InitMemInfo(get_mem_base, memory_size, get_weight_mem_base, weight_size);
  160. if (ret != SUCCESS) {
  161. GELOGE(ret, "task_generator init mem info fail.");
  162. return ret;
  163. }
  164. auto weight_buffer = builder.GetWeightBuffer();
  165. ret = run_context.CreateRunContext(*model_ptr, comp_graph, weight_buffer, session_id);
  166. if (ret != SUCCESS) {
  167. GELOGE(ret, "runContext create run context fail.");
  168. return ret;
  169. }
  170. StreamGraphOptimizer stream_optimizer;
  171. ret = stream_optimizer.OptimizeStreamedSubGraph(comp_graph, subgraph_map, run_context.GetRunContext());
  172. if (ret != SUCCESS) {
  173. GELOGE(ret, "Optimize streamed subGraph fail.");
  174. return ret;
  175. }
  176. GraphUtils::DumpGEGraph(comp_graph, "AfterOptimizeStreamedSubGraph");
  177. GraphUtils::DumpGEGraphToOnnx(*comp_graph, "AfterOptimizeStreamedSubGraph");
  178. auto *get_var_mem_base = reinterpret_cast<uint8_t *>(ge::VarManager::Instance(0)->GetVarMemLogicBase());
  179. uint64_t var_size = (ge::VarManager::Instance(session_id)->GetVarMemSize(RT_MEMORY_HBM) > 0)
  180. ? ge::VarManager::Instance(0)->GetVarMemMaxSize()
  181. : 0;
  182. TaskGenerator task_generator(get_var_mem_base, var_size);
  183. ret = task_generator.GetTaskInfo(*model_ptr, comp_graph, session_id, run_context.GetRunContext());
  184. return ret;
  185. }
  186. Status GraphBuilder::SetInputSize(const ge::NodePtr &node_ptr) {
  187. // set input_desc.size = src_node.output_desc.size
  188. if (node_ptr->GetType() == DATA) {
  189. if (UpdateDataInputSize(node_ptr) != SUCCESS) {
  190. GELOGE(FAILED, "Update data input size failed.");
  191. return FAILED;
  192. }
  193. }
  194. for (const auto &in_data_anchor : node_ptr->GetAllInDataAnchors()) {
  195. const auto &peer_out_anchor = in_data_anchor->GetPeerOutAnchor();
  196. GE_IF_BOOL_EXEC(peer_out_anchor == nullptr, continue);
  197. const auto &src_node = peer_out_anchor->GetOwnerNode();
  198. const auto &src_op = src_node->GetOpDesc();
  199. GE_IF_BOOL_EXEC(src_op == nullptr, continue);
  200. auto node_op_desc = node_ptr->GetOpDesc();
  201. GE_IF_BOOL_EXEC(node_op_desc == nullptr, continue);
  202. // set dst_node.input_desc = src_node.output_desc
  203. ge::GeTensorDesc desc_temp(src_op->GetOutputDesc(peer_out_anchor->GetIdx()));
  204. int64_t size = 0;
  205. GE_IF_BOOL_EXEC(ge::TensorUtils::GetSize(desc_temp, size) != SUCCESS, GELOGI("Get size failed!"));
  206. GELOGD("src node %s output desc, dim_size: %zu, mem_size: %ld, format: %s, type: %s.", src_node->GetName().c_str(),
  207. desc_temp.GetShape().GetDimNum(), size, TypeUtils::FormatToSerialString(desc_temp.GetFormat()).c_str(),
  208. TypeUtils::DataTypeToSerialString(desc_temp.GetDataType()).c_str());
  209. for (size_t i = 0; i < desc_temp.GetShape().GetDimNum(); ++i) {
  210. GELOGD("dims[%zu]: %ld", i, desc_temp.GetShape().GetDim(i));
  211. }
  212. auto input_desc = node_op_desc->GetInputDescPtr(in_data_anchor->GetIdx());
  213. GE_CHECK_NOTNULL(input_desc);
  214. ge::TensorUtils::SetSize(const_cast<GeTensorDesc &>(*input_desc), size);
  215. GE_CHK_STATUS_RET(node_op_desc->UpdateInputDesc(in_data_anchor->GetIdx(), *input_desc));
  216. GELOGD("%s input desc, dim_size: %zu, mem_size: %u, format: %s, type: %s.", node_ptr->GetName().c_str(),
  217. input_desc->GetShape().GetDimNum(), size, TypeUtils::FormatToSerialString(input_desc->GetFormat()).c_str(),
  218. TypeUtils::DataTypeToSerialString(input_desc->GetDataType()).c_str());
  219. }
  220. return SUCCESS;
  221. }
  222. Status GraphBuilder::UpdateDataInputSize(const ge::NodePtr &node_ptr) {
  223. const auto &op_desc = node_ptr->GetOpDesc();
  224. if (op_desc == nullptr) {
  225. GELOGE(FAILED, "Op desc is nullptr.");
  226. return FAILED;
  227. }
  228. // data op only has one output anchor
  229. ge::GeTensorDesc output_desc = op_desc->GetOutputDesc(0);
  230. int64_t output_size = 0;
  231. if (ge::TensorUtils::GetSize(output_desc, output_size) != SUCCESS) {
  232. GELOGW("Get size failed!");
  233. }
  234. if (output_size > 0) {
  235. GELOGI("No need to update data input size.");
  236. return SUCCESS;
  237. } else {
  238. int64_t real_dim_size = 0;
  239. ge::graphStatus graph_status = TensorUtils::GetTensorSizeInBytes(output_desc, real_dim_size);
  240. if (graph_status != GRAPH_SUCCESS) {
  241. GELOGE(FAILED, "Get tensor size in bytes failed.");
  242. return FAILED;
  243. }
  244. // data op only has one input anchor
  245. ge::GeTensorDesc input_desc = op_desc->GetInputDesc(0);
  246. ge::TensorUtils::SetSize(input_desc, real_dim_size);
  247. if (op_desc->UpdateInputDesc(0, input_desc) != GRAPH_SUCCESS) {
  248. GELOGE(FAILED, "Update input desc size failed.");
  249. return FAILED;
  250. }
  251. }
  252. return SUCCESS;
  253. }
  254. Status GraphBuilder::SecondPartition(ge::ComputeGraphPtr &comp_graph, vector<ge::SubGraphInfoPtr> &subgraph_ptr_list) {
  255. GELOGI("[SecondPartition] second partition.");
  256. GE_TIMESTAMP_START(GraphPartition2);
  257. auto ret = graph_partitioner_.Partition(comp_graph, GraphPartitioner::kSecondPartitioning);
  258. if (ret != SUCCESS) {
  259. GELOGE(ret, "Graph partition Failed");
  260. return ret;
  261. }
  262. GE_CHK_STATUS_RET(ret, "Graph partition Failed.");
  263. auto graph_2_subgraphlist = graph_partitioner_.GetSubGraphMap();
  264. if (graph_2_subgraphlist.find(comp_graph) != graph_2_subgraphlist.end()) {
  265. subgraph_ptr_list = graph_2_subgraphlist[comp_graph];
  266. } else {
  267. GELOGE(FAILED, "Find subgraph failed.");
  268. return FAILED;
  269. }
  270. GE_TIMESTAMP_END(GraphPartition2, "GraphPartitioner::Partition2");
  271. return ret;
  272. }
  273. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知.