You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

hybrid_model_builder_unittest.cc 11 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. /**
  2. * Copyright 2019-2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <gtest/gtest.h>
  17. #include <gmock/gmock.h>
  18. #include <vector>
  19. #define private public
  20. #define protected public
  21. #include "hybrid/model/hybrid_model_builder.h"
  22. #include "hybrid/node_executor/node_executor.h"
  23. #include "graph/utils/tensor_utils.h"
  24. #include "graph/utils/graph_utils.h"
  25. #include "graph/debug/ge_attr_define.h"
  26. using namespace std;
  27. using namespace testing;
  28. namespace ge {
  29. using namespace hybrid;
  30. class UtestHybridModelBuilder : public testing::Test {
  31. protected:
  32. void SetUp() {}
  33. void TearDown() { }
  34. };
  35. static NodePtr CreateNode(ComputeGraph &graph, const string &name, const string &type, int in_num, int out_num) {
  36. OpDescPtr op_desc = std::make_shared<OpDesc>(name, type);
  37. op_desc->SetStreamId(0);
  38. static int32_t index = 0;
  39. op_desc->SetId(index++);
  40. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  41. TensorUtils::SetSize(tensor, 512);
  42. vector<int64_t> input_offset;
  43. for (int i = 0; i < in_num; i++) {
  44. op_desc->AddInputDesc(tensor);
  45. input_offset.emplace_back(1024);
  46. }
  47. op_desc->SetInputOffset(input_offset);
  48. vector<int64_t> output_offset;
  49. for (int i = 0; i < out_num; i++) {
  50. op_desc->AddOutputDesc(tensor);
  51. output_offset.emplace_back(1024);
  52. }
  53. op_desc->SetOutputOffset(output_offset);
  54. op_desc->SetWorkspace({});
  55. op_desc->SetWorkspaceBytes({});
  56. op_desc->SetOpKernelLibName("DNN_VM_RTS_OP_STORE");
  57. return graph.AddNode(op_desc);
  58. }
  59. TEST_F(UtestHybridModelBuilder, normal_hybrid_model_build) {
  60. /*******************************************************************************
  61. * Exit Identify
  62. * \ / \.
  63. * \ / \.
  64. * Switch Add
  65. * / | |
  66. * / | |
  67. * / | |
  68. * LoopCond | |
  69. * \ | |
  70. * \ | |
  71. * \ | |
  72. * Less | |
  73. * \ | NextIteration
  74. * \ | |
  75. * \ | |
  76. * Merge <---------|
  77. * |
  78. * |
  79. * Enter
  80. ******************************************************************************/
  81. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
  82. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
  83. ge_root_model->SetModelName("test_name");
  84. GeModelPtr ge_sub_model = make_shared<GeModel>();
  85. ge_root_model->SetSubgraphInstanceNameToModel("sub", ge_sub_model);
  86. auto enter1 = CreateNode(*graph, "enter", ENTER, 1, 1);
  87. auto merge1 = CreateNode(*graph, "merge", STREAMMERGE, 2, 2);
  88. auto less1 = CreateNode(*graph, "less", LESS, 2, 1);
  89. less1->GetOpDesc()->SetOpKernelLibName("AIcoreEngine");
  90. auto loop1 = CreateNode(*graph, "loopcond", LOOPCOND, 1, 1);
  91. auto switch_t = CreateNode(*graph, "switch_t", STREAMSWITCH, 2, 0);
  92. auto switch_f = CreateNode(*graph, "switch_f", STREAMSWITCH, 2, 0);
  93. auto ident1 = CreateNode(*graph, "identity", IDENTITY, 2, 1);
  94. auto add1 = CreateNode(*graph, "add", ADD, 2, 1);
  95. add1->GetOpDesc()->SetOpKernelLibName("AIcoreEngine");
  96. auto next1 = CreateNode(*graph, "next", NEXTITERATION, 1, 1);
  97. auto exit1 = CreateNode(*graph, "exit", EXIT, 1, 1);
  98. auto value0 = CreateNode(*graph, "const", CONSTANT, 0, 1);
  99. auto value1 = CreateNode(*graph, "const", CONSTANT, 0, 1);
  100. auto active1 = CreateNode(*graph, "active1", STREAMACTIVE, 0, 0);
  101. auto active2 = CreateNode(*graph, "active2", STREAMACTIVE, 0, 0);
  102. auto active3 = CreateNode(*graph, "active3", STREAMACTIVE, 0, 0);
  103. auto output1 = CreateNode(*graph, "net_output", NETOUTPUT, 1, 1);
  104. GraphUtils::AddEdge(enter1->GetOutDataAnchor(0), merge1->GetInDataAnchor(0));
  105. GraphUtils::AddEdge(merge1->GetOutDataAnchor(0), less1->GetInDataAnchor(0));
  106. GraphUtils::AddEdge(value1->GetOutDataAnchor(0), less1->GetInDataAnchor(1));
  107. GraphUtils::AddEdge(less1->GetOutDataAnchor(0), loop1->GetInDataAnchor(0));
  108. GraphUtils::AddEdge(loop1->GetOutDataAnchor(0), switch_t->GetInDataAnchor(0));
  109. GraphUtils::AddEdge(value1->GetOutDataAnchor(0), switch_t->GetInDataAnchor(1));
  110. GraphUtils::AddEdge(loop1->GetOutDataAnchor(0), switch_f->GetInDataAnchor(0));
  111. GraphUtils::AddEdge(value0->GetOutDataAnchor(0), switch_f->GetInDataAnchor(1));
  112. GraphUtils::AddEdge(switch_f->GetOutControlAnchor(), exit1->GetInControlAnchor());
  113. GraphUtils::AddEdge(merge1->GetOutDataAnchor(0), exit1->GetInDataAnchor(0));
  114. GraphUtils::AddEdge(switch_t->GetOutControlAnchor(), ident1->GetInControlAnchor());
  115. GraphUtils::AddEdge(merge1->GetOutDataAnchor(0), ident1->GetInDataAnchor(0));
  116. GraphUtils::AddEdge(ident1->GetOutDataAnchor(0), add1->GetInDataAnchor(0));
  117. GraphUtils::AddEdge(value1->GetOutDataAnchor(0), add1->GetInDataAnchor(1));
  118. GraphUtils::AddEdge(add1->GetOutDataAnchor(0), next1->GetInDataAnchor(0));
  119. GraphUtils::AddEdge(enter1->GetOutControlAnchor(), active1->GetInControlAnchor());
  120. GraphUtils::AddEdge(active1->GetOutControlAnchor(), merge1->GetInControlAnchor());
  121. GraphUtils::AddEdge(loop1->GetOutControlAnchor(), active2->GetInControlAnchor());
  122. GraphUtils::AddEdge(active2->GetOutControlAnchor(), switch_f->GetInControlAnchor());
  123. GraphUtils::AddEdge(active2->GetOutControlAnchor(), switch_t->GetInControlAnchor());
  124. GraphUtils::AddEdge(next1->GetOutControlAnchor(), active3->GetInControlAnchor());
  125. GraphUtils::AddEdge(exit1->GetOutDataAnchor(0), output1->GetInDataAnchor(0));
  126. AttrUtils::SetStr(merge1->GetOpDesc(), ATTR_NAME_NEXT_ITERATION, next1->GetName());
  127. AttrUtils::SetBool(enter1->GetOpDesc(), ATTR_NAME_INSERT_FP_PROFILILNG_TASK, true);
  128. AttrUtils::SetBool(output1->GetOpDesc(), ATTR_NAME_INSERT_BP_PROFILILNG_TASK, true);
  129. AttrUtils::SetBool(add1->GetOpDesc(), ATTR_NAME_INSERT_FP_PROFILILNG_TASK, true);
  130. AttrUtils::SetBool(add1->GetOpDesc(), ATTR_NAME_INSERT_BP_PROFILILNG_TASK, true);
  131. // Build -> IndexSpecialNodes --> stream_merge_op_nodes_
  132. // Build -> LoadGraph -> RelinkNextIteration
  133. // Build -> LoadGraph -> LoadDynamicSubgraph --> BuildNodeItem --> NodeItem::SetDataSend
  134. // Build -> LoadGraph -> LoadDynamicSubgraph --> BuildControlFlowGroup --> NodeItem::SetCtrlSend
  135. auto &engine_mapping = NodeExecutorManager::GetInstance().engine_mapping_;
  136. engine_mapping.emplace("AIcoreEngine", NodeExecutorManager::ExecutorType::AICORE);
  137. engine_mapping.emplace("DNN_VM_GE_LOCAL_OP_STORE", NodeExecutorManager::ExecutorType::GE_LOCAL);
  138. engine_mapping.emplace("aicpu_tf_kernel", NodeExecutorManager::ExecutorType::AICPU_TF);
  139. engine_mapping.emplace("aicpu_ascend_kernel", NodeExecutorManager::ExecutorType::AICPU_TF);
  140. engine_mapping.emplace("ops_kernel_info_hccl", NodeExecutorManager::ExecutorType::HCCL);
  141. engine_mapping.emplace("DNN_VM_RTS_OP_STORE", NodeExecutorManager::ExecutorType::RTS);
  142. engine_mapping.emplace("DNN_VM_HOST_CPU_OP_STORE", NodeExecutorManager::ExecutorType::HOST_CPU);
  143. auto &task_executor = NodeExecutorManager::GetInstance().executors_;
  144. task_executor.emplace(NodeExecutorManager::ExecutorType::AICORE, std::unique_ptr<NodeExecutor>(new NodeExecutor()));
  145. task_executor.emplace(NodeExecutorManager::ExecutorType::GE_LOCAL, std::unique_ptr<NodeExecutor>(new NodeExecutor()));
  146. task_executor.emplace(NodeExecutorManager::ExecutorType::AICPU_TF, std::unique_ptr<NodeExecutor>(new NodeExecutor()));
  147. task_executor.emplace(NodeExecutorManager::ExecutorType::HCCL, std::unique_ptr<NodeExecutor>(new NodeExecutor()));
  148. task_executor.emplace(NodeExecutorManager::ExecutorType::RTS, std::unique_ptr<NodeExecutor>(new NodeExecutor()));
  149. task_executor.emplace(NodeExecutorManager::ExecutorType::HOST_CPU, std::unique_ptr<NodeExecutor>(new NodeExecutor()));
  150. HybridModel hybrid_model(ge_root_model);
  151. HybridModelBuilder hybrid_model_builder(hybrid_model);
  152. ASSERT_EQ(hybrid_model_builder.Build(), SUCCESS);
  153. engine_mapping.clear();
  154. task_executor.clear();
  155. }
  156. TEST_F(UtestHybridModelBuilder, create_called_invalid) {
  157. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
  158. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
  159. HybridModel hybrid_model(ge_root_model);
  160. HybridModelBuilder hybrid_model_builder(hybrid_model);
  161. auto node = CreateNode(*graph, "node", PARTITIONEDCALL, 1, 1);
  162. NodeItem node_item(node);
  163. ASSERT_EQ(hybrid_model_builder.CreateStreamActiveGroup(node, &node_item), INTERNAL_ERROR);
  164. ASSERT_EQ(hybrid_model_builder.CreateStreamSwitchGroup(node, &node_item), INTERNAL_ERROR);
  165. ASSERT_EQ(hybrid_model_builder.CreateNextIterationGroup(node, &node_item), INTERNAL_ERROR);
  166. ASSERT_EQ(hybrid_model_builder.CreateStreamSwitchNGroup(node, &node_item), INTERNAL_ERROR);
  167. ASSERT_EQ(hybrid_model_builder.CreateSwitchGroup(node, &node_item), INTERNAL_ERROR);
  168. ASSERT_EQ(hybrid_model_builder.CreateLabelSetGroup(node, &node_item), INTERNAL_ERROR);
  169. node_item.node_type = LABELSET;
  170. ASSERT_EQ(hybrid_model_builder.CreateLabelSetGroup(node, &node_item), UNSUPPORTED);
  171. ASSERT_EQ(hybrid_model_builder.CreateLabelGotoGroup(node, &node_item), INTERNAL_ERROR);
  172. node_item.node_type = LABELGOTO;
  173. ASSERT_EQ(hybrid_model_builder.CreateLabelGotoGroup(node, &node_item), UNSUPPORTED);
  174. ASSERT_EQ(hybrid_model_builder.CreateLabelSwitchGroup(node, &node_item), INTERNAL_ERROR);
  175. node_item.node_type = LABELSWITCH;
  176. ASSERT_EQ(hybrid_model_builder.CreateLabelSwitchGroup(node, &node_item), UNSUPPORTED);
  177. }
  178. TEST_F(UtestHybridModelBuilder, stream_switch_n_group) {
  179. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
  180. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
  181. HybridModel hybrid_model(ge_root_model);
  182. HybridModelBuilder hybrid_model_builder(hybrid_model);
  183. auto switch_n = CreateNode(*graph, "switch_n", STREAMSWITCHN, 1, 0);
  184. NodeItem node_item(switch_n);
  185. // no batch_num
  186. ASSERT_EQ(hybrid_model_builder.CreateStreamSwitchNGroup(switch_n, &node_item), INTERNAL_ERROR);
  187. uint32_t batch_num = 0;
  188. AttrUtils::SetInt(switch_n->GetOpDesc(), ATTR_NAME_BATCH_NUM, batch_num);
  189. ASSERT_EQ(hybrid_model_builder.CreateStreamSwitchNGroup(switch_n, &node_item), SUCCESS);
  190. batch_num = 3;
  191. AttrUtils::SetInt(switch_n->GetOpDesc(), ATTR_NAME_BATCH_NUM, batch_num);
  192. ASSERT_EQ(hybrid_model_builder.CreateStreamSwitchNGroup(switch_n, &node_item), SUCCESS);
  193. }
  194. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示