You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

hybrid_model_builder_unittest.cc 18 kB

4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392
  1. /**
  2. * Copyright 2019-2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <gtest/gtest.h>
  17. #include <gmock/gmock.h>
  18. #include <vector>
  19. #define private public
  20. #define protected public
  21. #include "hybrid/model/hybrid_model_builder.h"
  22. #include "hybrid/node_executor/node_executor.h"
  23. #include "graph/manager/host_mem_manager.h"
  24. #include "graph/utils/tensor_utils.h"
  25. #include "graph/utils/graph_utils.h"
  26. #include "graph/debug/ge_attr_define.h"
  27. #include "graph/ge_local_context.h"
  28. #include "graph/common/omg_util.h"
  29. using namespace std;
  30. using namespace testing;
  31. namespace ge {
  32. using namespace hybrid;
  33. class UtestHybridModelBuilder : public testing::Test {
  34. protected:
  35. void SetUp() {}
  36. void TearDown() { }
  37. };
  38. static NodePtr CreateNode(ComputeGraph &graph, const string &name, const string &type, int in_num, int out_num) {
  39. OpDescPtr op_desc = std::make_shared<OpDesc>(name, type);
  40. op_desc->SetStreamId(0);
  41. static int32_t index = 0;
  42. op_desc->SetId(index++);
  43. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  44. TensorUtils::SetSize(tensor, 512);
  45. vector<int64_t> input_offset;
  46. for (int i = 0; i < in_num; i++) {
  47. op_desc->AddInputDesc(tensor);
  48. input_offset.emplace_back(1024);
  49. }
  50. op_desc->SetInputOffset(input_offset);
  51. vector<int64_t> output_offset;
  52. for (int i = 0; i < out_num; i++) {
  53. op_desc->AddOutputDesc(tensor);
  54. output_offset.emplace_back(1024);
  55. }
  56. op_desc->SetOutputOffset(output_offset);
  57. op_desc->SetWorkspace({});
  58. op_desc->SetWorkspaceBytes({});
  59. op_desc->SetOpKernelLibName("DNN_VM_RTS_OP_STORE");
  60. return graph.AddNode(op_desc);
  61. }
  62. static NodePtr CreateConstantNode(const ComputeGraphPtr &graph, const string &name, size_t size) {
  63. OpDescPtr op_desc = std::make_shared<OpDesc>(name, CONSTANTOP);
  64. op_desc->AddOutputDesc(GeTensorDesc());
  65. GeTensorPtr value = std::make_shared<GeTensor>(GeTensorDesc(), size);
  66. (void)AttrUtils::SetTensor(op_desc, ATTR_NAME_WEIGHTS, value);
  67. return graph->AddNode(op_desc);
  68. }
  69. TEST_F(UtestHybridModelBuilder, normal_hybrid_model_build) {
  70. /*******************************************************************************
  71. * Exit Identify
  72. * \ / \.
  73. * \ / \.
  74. * Switch Add
  75. * / | |
  76. * Active / | |
  77. * / | |
  78. * LoopCond | |
  79. * \ | |
  80. * \ | |
  81. * \ | |
  82. * Less | |
  83. * \ | NextIteration
  84. * \ | |
  85. * \ | | Active
  86. * Merge <---------|
  87. * |
  88. * | Active
  89. * |
  90. * Enter
  91. ******************************************************************************/
  92. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
  93. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
  94. ge_root_model->SetModelName("test_name");
  95. GeModelPtr ge_sub_model = make_shared<GeModel>();
  96. ge_root_model->SetSubgraphInstanceNameToModel("sub", ge_sub_model);
  97. auto data1 = CreateNode(*graph, "data", DATA, 1, 1);
  98. auto enter1 = CreateNode(*graph, "enter", ENTER, 1, 1);
  99. auto merge1 = CreateNode(*graph, "merge", STREAMMERGE, 2, 2);
  100. auto less1 = CreateNode(*graph, "less", LESS, 2, 1);
  101. less1->GetOpDesc()->SetOpKernelLibName("AIcoreEngine");
  102. auto loop1 = CreateNode(*graph, "loopcond", LOOPCOND, 1, 1);
  103. auto switch_t = CreateNode(*graph, "switch_t", STREAMSWITCH, 2, 0);
  104. auto switch_f = CreateNode(*graph, "switch_f", STREAMSWITCH, 2, 0);
  105. auto ident1 = CreateNode(*graph, "identity", IDENTITY, 2, 1);
  106. auto add1 = CreateNode(*graph, "add", ADD, 2, 1);
  107. add1->GetOpDesc()->SetOpKernelLibName("AIcoreEngine");
  108. auto next1 = CreateNode(*graph, "next", NEXTITERATION, 1, 1);
  109. auto exit1 = CreateNode(*graph, "exit", EXIT, 1, 1);
  110. auto value0 = CreateNode(*graph, "const1", CONSTANT, 0, 1);
  111. auto value1 = CreateNode(*graph, "const2", CONSTANT, 0, 1);
  112. auto active1 = CreateNode(*graph, "active1", STREAMACTIVE, 0, 0);
  113. auto active2 = CreateNode(*graph, "active2", STREAMACTIVE, 0, 0);
  114. auto active3 = CreateNode(*graph, "active3", STREAMACTIVE, 0, 0);
  115. auto output1 = CreateNode(*graph, "net_output", NETOUTPUT, 1, 1);
  116. GraphUtils::AddEdge(data1->GetOutDataAnchor(0), enter1->GetInDataAnchor(0));
  117. GraphUtils::AddEdge(enter1->GetOutDataAnchor(0), merge1->GetInDataAnchor(0));
  118. GraphUtils::AddEdge(merge1->GetOutDataAnchor(0), less1->GetInDataAnchor(0));
  119. GraphUtils::AddEdge(value1->GetOutDataAnchor(0), less1->GetInDataAnchor(1));
  120. GraphUtils::AddEdge(less1->GetOutDataAnchor(0), loop1->GetInDataAnchor(0));
  121. GraphUtils::AddEdge(loop1->GetOutDataAnchor(0), switch_t->GetInDataAnchor(0));
  122. GraphUtils::AddEdge(value1->GetOutDataAnchor(0), switch_t->GetInDataAnchor(1));
  123. GraphUtils::AddEdge(loop1->GetOutDataAnchor(0), switch_f->GetInDataAnchor(0));
  124. GraphUtils::AddEdge(value0->GetOutDataAnchor(0), switch_f->GetInDataAnchor(1));
  125. GraphUtils::AddEdge(switch_f->GetOutControlAnchor(), exit1->GetInControlAnchor());
  126. GraphUtils::AddEdge(merge1->GetOutDataAnchor(0), exit1->GetInDataAnchor(0));
  127. GraphUtils::AddEdge(switch_t->GetOutControlAnchor(), ident1->GetInControlAnchor());
  128. GraphUtils::AddEdge(merge1->GetOutDataAnchor(0), ident1->GetInDataAnchor(0));
  129. GraphUtils::AddEdge(ident1->GetOutDataAnchor(0), add1->GetInDataAnchor(0));
  130. GraphUtils::AddEdge(value1->GetOutDataAnchor(0), add1->GetInDataAnchor(1));
  131. GraphUtils::AddEdge(add1->GetOutDataAnchor(0), next1->GetInDataAnchor(0));
  132. GraphUtils::AddEdge(enter1->GetOutControlAnchor(), active1->GetInControlAnchor());
  133. GraphUtils::AddEdge(active1->GetOutControlAnchor(), merge1->GetInControlAnchor());
  134. GraphUtils::AddEdge(next1->GetOutControlAnchor(), active3->GetInControlAnchor());
  135. SetNextIteration(merge1, next1); // for relink NextIteration --> StreamMerge
  136. GraphUtils::AddEdge(active1->GetOutControlAnchor(), switch_t->GetInControlAnchor()); // Test for not merge.
  137. GraphUtils::AddEdge(loop1->GetOutControlAnchor(), active2->GetInControlAnchor());
  138. GraphUtils::AddEdge(active2->GetOutControlAnchor(), switch_f->GetInControlAnchor());
  139. GraphUtils::AddEdge(active2->GetOutControlAnchor(), switch_t->GetInControlAnchor());
  140. GraphUtils::AddEdge(exit1->GetOutDataAnchor(0), output1->GetInDataAnchor(0));
  141. AttrUtils::SetBool(enter1->GetOpDesc(), ATTR_NAME_INSERT_FP_PROFILILNG_TASK, true);
  142. AttrUtils::SetBool(output1->GetOpDesc(), ATTR_NAME_INSERT_BP_PROFILILNG_TASK, true);
  143. AttrUtils::SetBool(add1->GetOpDesc(), ATTR_NAME_INSERT_FP_PROFILILNG_TASK, true);
  144. AttrUtils::SetBool(add1->GetOpDesc(), ATTR_NAME_INSERT_BP_PROFILILNG_TASK, true);
  145. SetControlFlowGroup(enter1, loop1->GetOpDesc()->GetId());
  146. SetControlFlowGroup(active1, loop1->GetOpDesc()->GetId());
  147. SetControlFlowGroup(merge1, loop1->GetOpDesc()->GetId());
  148. SetControlFlowGroup(loop1, loop1->GetOpDesc()->GetId());
  149. SetControlFlowGroup(active2, switch_t->GetOpDesc()->GetId());
  150. SetControlFlowGroup(switch_t, switch_t->GetOpDesc()->GetId());
  151. SetControlFlowGroup(switch_f, switch_t->GetOpDesc()->GetId());
  152. SetControlFlowGroup(next1, loop1->GetOpDesc()->GetId());
  153. SetControlFlowGroup(active3, loop1->GetOpDesc()->GetId());
  154. SetControlFlowGroup(exit1, loop1->GetOpDesc()->GetId());
  155. // Build -> IndexSpecialNodes --> stream_merge_op_nodes_
  156. // Build -> LoadGraph -> RelinkNextIteration
  157. // Build -> LoadGraph -> LoadDynamicSubgraph --> BuildNodeItem --> NodeItem::SetDataSend
  158. // Build -> LoadGraph -> LoadDynamicSubgraph --> BuildControlFlowGroup --> NodeItem::SetCtrlSend
  159. auto &engine_mapping = NodeExecutorManager::GetInstance().engine_mapping_;
  160. engine_mapping.emplace("AIcoreEngine", NodeExecutorManager::ExecutorType::AICORE);
  161. engine_mapping.emplace("DNN_VM_GE_LOCAL_OP_STORE", NodeExecutorManager::ExecutorType::GE_LOCAL);
  162. engine_mapping.emplace("aicpu_tf_kernel", NodeExecutorManager::ExecutorType::AICPU_TF);
  163. engine_mapping.emplace("aicpu_ascend_kernel", NodeExecutorManager::ExecutorType::AICPU_TF);
  164. engine_mapping.emplace("ops_kernel_info_hccl", NodeExecutorManager::ExecutorType::HCCL);
  165. engine_mapping.emplace("DNN_VM_RTS_OP_STORE", NodeExecutorManager::ExecutorType::RTS);
  166. engine_mapping.emplace("DNN_VM_HOST_CPU_OP_STORE", NodeExecutorManager::ExecutorType::HOST_CPU);
  167. auto &task_executor = NodeExecutorManager::GetInstance().executors_;
  168. task_executor.emplace(NodeExecutorManager::ExecutorType::AICORE, std::unique_ptr<NodeExecutor>(new NodeExecutor()));
  169. task_executor.emplace(NodeExecutorManager::ExecutorType::GE_LOCAL, std::unique_ptr<NodeExecutor>(new NodeExecutor()));
  170. task_executor.emplace(NodeExecutorManager::ExecutorType::AICPU_TF, std::unique_ptr<NodeExecutor>(new NodeExecutor()));
  171. task_executor.emplace(NodeExecutorManager::ExecutorType::HCCL, std::unique_ptr<NodeExecutor>(new NodeExecutor()));
  172. task_executor.emplace(NodeExecutorManager::ExecutorType::RTS, std::unique_ptr<NodeExecutor>(new NodeExecutor()));
  173. task_executor.emplace(NodeExecutorManager::ExecutorType::HOST_CPU, std::unique_ptr<NodeExecutor>(new NodeExecutor()));
  174. const auto control_group_index = loop1->GetOpDesc()->GetId();
  175. HybridModel hybrid_model(ge_root_model);
  176. HybridModelBuilder hybrid_model_builder(hybrid_model);
  177. ASSERT_EQ(hybrid_model_builder.Build(), SUCCESS);
  178. const auto TestFrameGroup = [&hybrid_model](const NodePtr &n, int64_t index) {
  179. const auto it = hybrid_model.node_items_.find(n);
  180. ASSERT_NE(hybrid_model.node_items_.end(), it);
  181. ASSERT_EQ(it->second->frame_index_, index);
  182. ASSERT_EQ(it->second->parent_frame_, -1);
  183. };
  184. auto root_graph = hybrid_model.root_graph_;
  185. auto enter1_node = root_graph->FindNode("enter");
  186. auto active1_node = root_graph->FindNode("active1");
  187. auto active2_node = root_graph->FindNode("active2");
  188. auto active3_node = root_graph->FindNode("active3");
  189. auto output1_node = root_graph->FindNode("net_output");
  190. TestFrameGroup(enter1_node, control_group_index);
  191. TestFrameGroup(active1_node, control_group_index);
  192. TestFrameGroup(active2_node, control_group_index);
  193. TestFrameGroup(active3_node, control_group_index);
  194. TestFrameGroup(output1_node, -1);
  195. engine_mapping.clear();
  196. task_executor.clear();
  197. }
  198. TEST_F(UtestHybridModelBuilder, create_called_invalid) {
  199. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
  200. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
  201. HybridModel hybrid_model(ge_root_model);
  202. HybridModelBuilder hybrid_model_builder(hybrid_model);
  203. auto node = CreateNode(*graph, "node", PARTITIONEDCALL, 1, 1);
  204. NodeItem node_item(node);
  205. ASSERT_EQ(hybrid_model_builder.CreateStreamActiveGroup(node, &node_item), INTERNAL_ERROR);
  206. ASSERT_EQ(hybrid_model_builder.CreateStreamSwitchGroup(node, &node_item), INTERNAL_ERROR);
  207. ASSERT_EQ(hybrid_model_builder.CreateNextIterationGroup(node, &node_item), INTERNAL_ERROR);
  208. ASSERT_EQ(hybrid_model_builder.CreateStreamSwitchNGroup(node, &node_item), INTERNAL_ERROR);
  209. ASSERT_EQ(hybrid_model_builder.CreateSwitchGroup(node, &node_item), INTERNAL_ERROR);
  210. ASSERT_EQ(hybrid_model_builder.CreateLabelSetGroup(node, &node_item), INTERNAL_ERROR);
  211. node_item.node_type = LABELSET;
  212. ASSERT_EQ(hybrid_model_builder.CreateLabelSetGroup(node, &node_item), UNSUPPORTED);
  213. ASSERT_EQ(hybrid_model_builder.CreateLabelGotoGroup(node, &node_item), INTERNAL_ERROR);
  214. node_item.node_type = LABELGOTO;
  215. ASSERT_EQ(hybrid_model_builder.CreateLabelGotoGroup(node, &node_item), UNSUPPORTED);
  216. ASSERT_EQ(hybrid_model_builder.CreateLabelSwitchGroup(node, &node_item), INTERNAL_ERROR);
  217. node_item.node_type = LABELSWITCH;
  218. ASSERT_EQ(hybrid_model_builder.CreateLabelSwitchGroup(node, &node_item), UNSUPPORTED);
  219. }
  220. TEST_F(UtestHybridModelBuilder, stream_switch_n_group) {
  221. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
  222. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
  223. HybridModel hybrid_model(ge_root_model);
  224. HybridModelBuilder hybrid_model_builder(hybrid_model);
  225. auto switch_n = CreateNode(*graph, "switch_n", STREAMSWITCHN, 1, 0);
  226. NodeItem node_item(switch_n);
  227. // no batch_num
  228. ASSERT_EQ(hybrid_model_builder.CreateStreamSwitchNGroup(switch_n, &node_item), INTERNAL_ERROR);
  229. uint32_t batch_num = 0;
  230. AttrUtils::SetInt(switch_n->GetOpDesc(), ATTR_NAME_BATCH_NUM, batch_num);
  231. ASSERT_EQ(hybrid_model_builder.CreateStreamSwitchNGroup(switch_n, &node_item), SUCCESS);
  232. batch_num = 3;
  233. AttrUtils::SetInt(switch_n->GetOpDesc(), ATTR_NAME_BATCH_NUM, batch_num);
  234. ASSERT_EQ(hybrid_model_builder.CreateStreamSwitchNGroup(switch_n, &node_item), SUCCESS);
  235. }
  236. TEST_F(UtestHybridModelBuilder, init_constant_op_host_) {
  237. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
  238. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
  239. HybridModel hybrid_model(ge_root_model);
  240. HybridModelBuilder hybrid_model_builder(hybrid_model);
  241. auto const_1 = CreateConstantNode(graph, "const_1", 0);
  242. hybrid_model_builder.constant_op_nodes_.emplace(const_1->GetName(), const_1);
  243. auto const_2 = CreateConstantNode(graph, "const_2", 10);
  244. hybrid_model_builder.constant_op_nodes_.emplace(const_2->GetName(), const_2);
  245. std::map<std::string, string> options;
  246. options["ge.exec.placement"] = "HOST";
  247. GetThreadLocalContext().SetGraphOption(options);
  248. EXPECT_EQ(hybrid_model_builder.InitConstantOps(), SUCCESS);
  249. EXPECT_EQ(hybrid_model_builder.hybrid_model_.variable_tensors_.size(), 2);
  250. }
  251. TEST_F(UtestHybridModelBuilder, init_host_var_with_host_mem) {
  252. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
  253. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
  254. HybridModel hybrid_model(ge_root_model);
  255. HybridModelBuilder hybrid_model_builder(hybrid_model);
  256. OpDescPtr op_desc = std::make_shared<OpDesc>("host_params", VARIABLE);
  257. GeTensorDesc tensor_desc(GeShape(),FORMAT_NHWC,DT_FLOAT);
  258. TensorUtils::SetSize(tensor_desc, 512);
  259. op_desc->AddOutputDesc(tensor_desc);
  260. auto host_var = graph->AddNode(op_desc);
  261. hybrid_model.host_variable_nodes_.emplace("host_params", host_var);
  262. std::map<std::string, string> options;
  263. options["ge.exec.placement"] = "HOST";
  264. GetThreadLocalContext().SetGraphOption(options);
  265. EXPECT_EQ(hybrid_model_builder.InitVariableTensors(), SUCCESS);
  266. EXPECT_EQ(hybrid_model_builder.hybrid_model_.variable_tensors_.size(), 1);
  267. }
  268. TEST_F(UtestHybridModelBuilder, init_host_var_with_host_shared_mem) {
  269. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
  270. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
  271. HybridModel hybrid_model(ge_root_model);
  272. HybridModelBuilder hybrid_model_builder(hybrid_model);
  273. OpDescPtr op_desc = std::make_shared<OpDesc>("host_params", VARIABLE);
  274. GeTensorDesc tensor_desc(GeShape(),FORMAT_NHWC,DT_FLOAT);
  275. TensorUtils::SetSize(tensor_desc, 512);
  276. op_desc->AddOutputDesc(tensor_desc);
  277. auto host_var = graph->AddNode(op_desc);
  278. hybrid_model.host_variable_nodes_.emplace("host_params", host_var);
  279. std::map<std::string, string> options;
  280. options["ge.exec.placement"] = "HOST";
  281. GetThreadLocalContext().SetGraphOption(options);
  282. SharedMemInfo info;
  283. uint8_t tmp(0);
  284. info.device_address = &tmp;
  285. std::shared_ptr<AlignedPtr> aligned_ptr = std::make_shared<AlignedPtr>(512, 16);
  286. info.host_aligned_ptr = aligned_ptr;
  287. info.fd=0;
  288. info.mem_size = 100;
  289. info.op_name = "host_params";
  290. HostMemManager::Instance().var_memory_base_map_["host_params"] = info;
  291. EXPECT_EQ(hybrid_model_builder.InitVariableTensors(), SUCCESS);
  292. EXPECT_EQ(hybrid_model_builder.hybrid_model_.variable_tensors_.size(), 1);
  293. HostMemManager::Instance().var_memory_base_map_.clear();
  294. }
  295. TEST_F(UtestHybridModelBuilder, TestInitHcclExecutorOnDemand) {
  296. NodeExecutorManager::GetInstance().builders_.erase(NodeExecutorManager::ExecutorType::HCCL);
  297. // build aicore task
  298. domi::ModelTaskDef model_task_def;
  299. std::shared_ptr<domi::ModelTaskDef> model_task_def_ptr = make_shared<domi::ModelTaskDef>(model_task_def);
  300. GeModelPtr ge_model = make_shared<GeModel>();
  301. ge_model->SetModelTaskDef(model_task_def_ptr);
  302. // No hccl task
  303. domi::TaskDef *task_def = model_task_def_ptr->add_task();
  304. task_def->set_type(RT_MODEL_TASK_MEMCPY_ASYNC);
  305. ASSERT_EQ(HybridModelBuilder::InitHcclExecutorOnDemand(ge_model), SUCCESS);
  306. // get executor failed due to no builder
  307. task_def = model_task_def_ptr->add_task();
  308. task_def->set_type(RT_MODEL_TASK_HCCL);
  309. ASSERT_EQ(HybridModelBuilder::InitHcclExecutorOnDemand(ge_model), INTERNAL_ERROR);
  310. // get executor success
  311. REGISTER_NODE_EXECUTOR_BUILDER(NodeExecutorManager::ExecutorType::HCCL, NodeExecutor);
  312. ASSERT_EQ(HybridModelBuilder::InitHcclExecutorOnDemand(ge_model), SUCCESS);
  313. // repeat get, do not access builder
  314. NodeExecutorManager::GetInstance().builders_.erase(NodeExecutorManager::ExecutorType::HCCL);
  315. ASSERT_EQ(HybridModelBuilder::InitHcclExecutorOnDemand(ge_model), SUCCESS);
  316. }
  317. TEST_F(UtestHybridModelBuilder, copy_graph_success) {
  318. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
  319. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
  320. HybridModel hybrid_model(ge_root_model);
  321. HybridModelBuilder hybrid_model_builder(hybrid_model);
  322. Status st = hybrid_model_builder.CopyGraph();
  323. EXPECT_EQ(st, SUCCESS);
  324. }
  325. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示