You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ge_hybrid_unittest.cc 29 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659
  1. /**
  2. * Copyright 2019-2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <gtest/gtest.h>
  17. #include <gmock/gmock.h>
  18. #include <vector>
  19. #include "runtime/rt.h"
  20. #include "graph/utils/node_utils.h"
  21. #define protected public
  22. #define private public
  23. #include "hybrid/model/hybrid_model_builder.h"
  24. #include "hybrid/model/hybrid_model.h"
  25. #include "hybrid/node_executor/node_executor.h"
  26. #include "model/ge_model.h"
  27. #include "model/ge_root_model.h"
  28. #include "hybrid/node_executor/aicore/aicore_op_task.h"
  29. #include "framework/common/taskdown_common.h"
  30. #include "framework/common/debug/log.h"
  31. #include "graph/ge_context.h"
  32. #include "hybrid/executor/hybrid_execution_context.h"
  33. #include "hybrid/executor/hybrid_model_executor.h"
  34. #include "hybrid/node_executor/aicore/aicore_task_builder.h"
  35. #include "graph/load/model_manager/tbe_handle_store.h"
  36. #include "graph/manager/graph_mem_allocator.h"
  37. #include "hybrid/common/npu_memory_allocator.h"
  38. #include "graph/types.h"
  39. #include "graph/utils/tensor_utils.h"
  40. #include "graph/testcase/ge_graph/graph_builder_utils.h"
  41. #undef private
  42. #undef protected
  43. using namespace std;
  44. using namespace testing;
  45. using namespace ge;
  46. using namespace hybrid;
  47. class UtestGeHybrid : public testing::Test {
  48. protected:
  49. void SetUp() {}
  50. void TearDown() {
  51. NpuMemoryAllocator::allocators_.clear();
  52. }
  53. };
  54. static ge::OpDescPtr CreateOpDesc(string name = "", string type = "") {
  55. auto op_desc = std::make_shared<ge::OpDesc>(name, type);
  56. op_desc->SetStreamId(0);
  57. op_desc->SetId(0);
  58. op_desc->SetWorkspace({});
  59. ;
  60. op_desc->SetWorkspaceBytes({});
  61. op_desc->SetInputOffset({});
  62. op_desc->SetOutputOffset({});
  63. ge::AttrUtils::SetStr(op_desc, ge::TVM_ATTR_NAME_MAGIC, "RT_DEV_BINARY_MAGIC_ELF_AIVEC");
  64. bool support_dynamic = true;
  65. ge::AttrUtils::GetBool(op_desc, "support_dynamicshape", support_dynamic);
  66. return op_desc;
  67. }
  68. TEST_F(UtestGeHybrid, aicore_op_task_init_success) {
  69. // build aicore task
  70. auto aicore_task = std::unique_ptr<hybrid::AiCoreOpTask>(new(std::nothrow)hybrid::AiCoreOpTask());
  71. domi::TaskDef task_def;
  72. task_def.set_type(RT_MODEL_TASK_ALL_KERNEL);
  73. domi::KernelDefWithHandle *kernel_with_handle = task_def.mutable_kernel_with_handle();
  74. kernel_with_handle->set_original_kernel_key("");
  75. kernel_with_handle->set_node_info("");
  76. kernel_with_handle->set_block_dim(32);
  77. kernel_with_handle->set_args_size(64);
  78. string args(64, '1');
  79. kernel_with_handle->set_args(args.data(), 64);
  80. domi::KernelContext *context = kernel_with_handle->mutable_context();
  81. context->set_op_index(1);
  82. context->set_kernel_type(2); // ccKernelType::TE
  83. uint16_t args_offset[9] = {0};
  84. context->set_args_offset(args_offset, 9 * sizeof(uint16_t));
  85. OpDescPtr op_desc = CreateOpDesc("Add", "Add");
  86. std::vector<char> kernelBin;
  87. TBEKernelPtr tbe_kernel = std::make_shared<ge::OpKernelBin>("name/Add", std::move(kernelBin));
  88. op_desc->SetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel);
  89. std::string kernel_name("kernel/Add");
  90. AttrUtils::SetStr(op_desc, op_desc->GetName() + "_kernelname", kernel_name);
  91. ASSERT_EQ(aicore_task->InitWithTaskDef(*op_desc.get(), task_def), SUCCESS);
  92. rtStream_t stream = nullptr;
  93. rtStreamCreate(&stream, 0);
  94. ASSERT_EQ(aicore_task->LaunchKernel(stream), SUCCESS);
  95. char *handle = "";
  96. aicore_task->handle_ = handle;
  97. aicore_task->tiling_key_ = 1;
  98. ASSERT_EQ(aicore_task->LaunchKernel(stream), SUCCESS);
  99. }
  100. TEST_F(UtestGeHybrid, task_update_tiling_info) {
  101. auto aicore_task = std::unique_ptr<hybrid::AiCoreOpTask>(new(std::nothrow)hybrid::AiCoreOpTask());
  102. aicore_task->is_single_op_ = true;
  103. auto graph = make_shared<ComputeGraph>("graph");
  104. OpDescPtr op_desc = CreateOpDesc("Add", "Add");
  105. ge::AttrUtils::SetStr(op_desc, "compile_info_key", "key");
  106. ge::AttrUtils::SetStr(op_desc, "compile_info_json", "json");
  107. auto node = graph->AddNode(op_desc);
  108. optiling::OpRunInfo tiling_info;
  109. ASSERT_EQ(aicore_task->CalcTilingInfo(node, tiling_info), SUCCESS);
  110. }
  111. TEST_F(UtestGeHybrid, index_taskdefs_failed) {
  112. // build aicore task
  113. domi::ModelTaskDef model_task_def;
  114. std::shared_ptr<domi::ModelTaskDef> model_task_def_ptr = make_shared<domi::ModelTaskDef>(model_task_def);
  115. domi::TaskDef *task_def = model_task_def_ptr->add_task();
  116. GeModelPtr ge_model = make_shared<GeModel>();
  117. ge_model->SetModelTaskDef(model_task_def_ptr);
  118. auto aicore_task = std::unique_ptr<hybrid::AiCoreOpTask>(new(std::nothrow)hybrid::AiCoreOpTask());
  119. task_def->set_type(RT_MODEL_TASK_ALL_KERNEL);
  120. domi::KernelDefWithHandle *kernel_with_handle = task_def->mutable_kernel_with_handle();
  121. kernel_with_handle->set_original_kernel_key("");
  122. kernel_with_handle->set_node_info("");
  123. kernel_with_handle->set_block_dim(32);
  124. kernel_with_handle->set_args_size(64);
  125. string args(64, '1');
  126. kernel_with_handle->set_args(args.data(), 64);
  127. domi::KernelContext *context = kernel_with_handle->mutable_context();
  128. context->set_op_index(1);
  129. context->set_kernel_type(2); // ccKernelType::TE
  130. uint16_t args_offset[9] = {0};
  131. context->set_args_offset(args_offset, 9 * sizeof(uint16_t));
  132. OpDescPtr op_desc = CreateOpDesc("Add", "Add");
  133. std::vector<char> kernelBin;
  134. TBEKernelPtr tbe_kernel = std::make_shared<ge::OpKernelBin>("name/Add", std::move(kernelBin));
  135. op_desc->SetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel);
  136. std::string kernel_name("kernel/Add");
  137. AttrUtils::SetStr(op_desc, op_desc->GetName() + "_kernelname", kernel_name);
  138. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
  139. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
  140. HybridModel hybrid_model(ge_root_model);
  141. HybridModelBuilder hybrid_model_builder(hybrid_model);
  142. ASSERT_EQ(hybrid_model_builder.IndexTaskDefs(graph, ge_model), INTERNAL_ERROR);
  143. }
  144. TEST_F(UtestGeHybrid, parse_force_infershape_nodes) {
  145. const char *const kForceInfershape = "_force_infershape_when_running";
  146. auto graph = make_shared<ComputeGraph>("graph");
  147. OpDescPtr op_desc = CreateOpDesc("Conv2D", "Conv2D");
  148. ge::AttrUtils::SetBool(op_desc, kForceInfershape, true);
  149. auto node = graph->AddNode(op_desc);
  150. std::unique_ptr<NodeItem> new_node;
  151. NodeItem::Create(node, new_node);
  152. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
  153. HybridModel hybrid_model(ge_root_model);
  154. HybridModelBuilder hybrid_model_builder(hybrid_model);
  155. ASSERT_EQ(hybrid_model_builder.ParseForceInfershapeNodes(node, *new_node), SUCCESS);
  156. }
  157. static ComputeGraphPtr BuildDataDirectConnectGraph() {
  158. const char *kRefIndex = "_parent_node_index";
  159. ge::ut::GraphBuilder builder("subgraph");
  160. auto data = builder.AddNode("Data", "Data", 1, 1);
  161. auto netoutput = builder.AddNode("NetOutput", "NetOutput", 1, 1);
  162. (void)AttrUtils::SetInt(netoutput->GetOpDesc()->MutableInputDesc(0), kRefIndex, 0);
  163. builder.AddDataEdge(data, 0, netoutput, 0);
  164. return builder.GetGraph();
  165. }
  166. TEST_F(UtestGeHybrid, data_direct_connect) {
  167. std::unique_ptr<NodeItem> node_item;
  168. auto root_graph = make_shared<ComputeGraph>("root_graph");
  169. OpDescPtr op_desc = CreateOpDesc("PartitionedCall", "PartitionedCall");
  170. auto node = root_graph->AddNode(op_desc);
  171. node->SetOwnerComputeGraph(root_graph);
  172. auto sub_graph = BuildDataDirectConnectGraph();
  173. sub_graph->SetParentGraph(root_graph);
  174. sub_graph->SetParentNode(node);
  175. node->GetOpDesc()->AddSubgraphName("subgraph");
  176. node->GetOpDesc()->SetSubgraphInstanceName(0, "subgraph");
  177. root_graph->AddSubgraph("subgraph", sub_graph);
  178. std::unique_ptr<NodeItem> new_node;
  179. NodeItem::Create(node, new_node);
  180. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(root_graph);
  181. HybridModel hybrid_model(ge_root_model);
  182. HybridModelBuilder hybrid_model_builder(hybrid_model);
  183. auto ret = hybrid_model_builder.IdentifyVariableOutputs(*new_node.get());
  184. ASSERT_EQ(ret, SUCCESS);
  185. }
  186. TEST_F(UtestGeHybrid, index_taskdefs_success) {
  187. // build aicore task
  188. domi::ModelTaskDef model_task_def;
  189. std::shared_ptr<domi::ModelTaskDef> model_task_def_ptr = make_shared<domi::ModelTaskDef>(model_task_def);
  190. domi::TaskDef *task_def = model_task_def_ptr->add_task();
  191. GeModelPtr ge_model = make_shared<GeModel>();
  192. ge_model->SetModelTaskDef(model_task_def_ptr);
  193. auto aicore_task = std::unique_ptr<hybrid::AiCoreOpTask>(new(std::nothrow)hybrid::AiCoreOpTask());
  194. task_def->set_type(RT_MODEL_TASK_ALL_KERNEL);
  195. domi::KernelDefWithHandle *kernel_with_handle = task_def->mutable_kernel_with_handle();
  196. kernel_with_handle->set_original_kernel_key("");
  197. kernel_with_handle->set_node_info("");
  198. kernel_with_handle->set_block_dim(32);
  199. kernel_with_handle->set_args_size(64);
  200. string args(64, '1');
  201. kernel_with_handle->set_args(args.data(), 64);
  202. domi::KernelContext *context = kernel_with_handle->mutable_context();
  203. context->set_op_index(0);
  204. context->set_kernel_type(2); // ccKernelType::TE
  205. uint16_t args_offset[9] = {0};
  206. context->set_args_offset(args_offset, 9 * sizeof(uint16_t));
  207. OpDescPtr op_desc = CreateOpDesc("Add", "Add");
  208. std::vector<char> kernelBin;
  209. TBEKernelPtr tbe_kernel = std::make_shared<ge::OpKernelBin>("name/Add", std::move(kernelBin));
  210. op_desc->SetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel);
  211. std::string kernel_name("kernel/Add");
  212. AttrUtils::SetStr(op_desc, op_desc->GetName() + "_kernelname", kernel_name);
  213. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
  214. NodePtr node = graph->AddNode(op_desc);
  215. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
  216. HybridModel hybrid_model(ge_root_model);
  217. HybridModelBuilder hybrid_model_builder(hybrid_model);
  218. hybrid_model_builder.Build();
  219. hybrid_model_builder.BuildForSingleOp();
  220. ASSERT_EQ(hybrid_model_builder.IndexTaskDefs(graph, ge_model), SUCCESS);
  221. }
  222. TEST_F(UtestGeHybrid, init_weight_success) {
  223. NpuMemoryAllocator::allocators_.emplace(make_pair(0, nullptr));
  224. // make graph with sub_graph
  225. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("root_graph");
  226. OpDescPtr op_desc = CreateOpDesc("if", IF);
  227. NodePtr node = graph->AddNode(op_desc);
  228. // make sub graph
  229. ComputeGraphPtr sub_graph = std::make_shared<ComputeGraph>("if_sub_graph");
  230. OpDescPtr const_op_desc = CreateOpDesc("const", CONSTANT);
  231. vector<int64_t> dims_vec_0 = {2, 1, 4, 1, 2};
  232. vector<int32_t> data_vec_0 = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
  233. GeTensorDesc tensor_desc_0(GeShape(dims_vec_0), FORMAT_NCHW, DT_INT32);
  234. (void)TensorUtils::SetRealDimCnt(tensor_desc_0, dims_vec_0.size());
  235. ConstGeTensorPtr constTensor_0 =
  236. std::make_shared<GeTensor>(tensor_desc_0, (uint8_t *)&data_vec_0[0], data_vec_0.size() * sizeof(int32_t));
  237. AttrUtils::SetTensor(const_op_desc, ge::ATTR_NAME_WEIGHTS, constTensor_0);
  238. const_op_desc->AddOutputDesc(tensor_desc_0);
  239. NodePtr const_node = sub_graph->AddNode(const_op_desc);
  240. graph->AddSubgraph("sub", sub_graph);
  241. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
  242. GeModelPtr ge_sub_model = make_shared<GeModel>();
  243. //Buffer weight_buffer = Buffer(128,0);
  244. //ge_sub_model->SetWeight(weight_buffer);
  245. ge_root_model->SetSubgraphInstanceNameToModel("sub",ge_sub_model);
  246. HybridModel hybrid_model(ge_root_model);
  247. HybridModelBuilder hybrid_model_builder(hybrid_model);
  248. auto ret = hybrid_model_builder.InitWeights();
  249. ASSERT_EQ(ret,SUCCESS);
  250. Buffer weight_buffer = Buffer(128,0);
  251. ge_sub_model->SetWeight(weight_buffer);
  252. ret = hybrid_model_builder.InitWeights();
  253. ASSERT_EQ(ret,PARAM_INVALID);
  254. }
  255. TEST_F(UtestGeHybrid, hybrid_model_executor) {
  256. ComputeGraphPtr compute_graph = MakeShared<ComputeGraph>("abc");
  257. GeRootModelPtr root_model = MakeShared<ge::GeRootModel>(compute_graph);
  258. HybridModel model(root_model);
  259. HybridModel *model_ptr = &model;
  260. uint32_t device_id = 0;
  261. rtStream_t stream;
  262. HybridModelExecutor executor(model_ptr, device_id, stream);
  263. executor.Init();
  264. }
  265. TEST_F(UtestGeHybrid, test_parse_parallel_group) {
  266. NodeExecutorManager::GetInstance().engine_mapping_.emplace("ops_kernel_info_hccl",
  267. NodeExecutorManager::ExecutorType::HCCL);
  268. ComputeGraphPtr compute_graph = MakeShared<ComputeGraph>("test");
  269. OpDescPtr op_desc = CreateOpDesc("AllReduce", "AllReduce");
  270. op_desc->SetId(0);
  271. ge::AttrUtils::SetStr(op_desc, ATTR_NAME_PARALLEL_GROUP, "group_1");
  272. auto node = compute_graph->AddNode(op_desc);
  273. std::unique_ptr<NodeItem> node_item;
  274. NodeItem::Create(node, node_item);
  275. node_item->node_id = 0;
  276. op_desc->SetOpKernelLibName("ops_kernel_info_hccl");
  277. GeRootModelPtr root_model = MakeShared<ge::GeRootModel>(compute_graph);
  278. HybridModel model(root_model);
  279. model.root_graph_ = compute_graph;
  280. HybridModelBuilder builder(model);
  281. ASSERT_EQ(builder.CollectParallelGroups(node_item.get()), SUCCESS);
  282. ASSERT_EQ(builder.node_to_parallel_groups_.size(), 1);
  283. ASSERT_EQ(builder.parallel_group_to_nodes_.size(), 1);
  284. OpDescPtr op_desc_1 = CreateOpDesc("subgraph", "PartitionedCall");
  285. op_desc_1->AddSubgraphName("subgraph");
  286. auto node_1 = compute_graph->AddNode(op_desc_1);
  287. ComputeGraphPtr subgraph = MakeShared<ComputeGraph>("subgraph");
  288. ASSERT_EQ(NodeUtils::SetSubgraph(*node_1, 0, subgraph), GRAPH_SUCCESS);
  289. std::unique_ptr<NodeItem> node_item_1;
  290. NodeItem::Create(node_1, node_item_1);
  291. node_item_1->node_id = 1;
  292. ASSERT_EQ(builder.CollectParallelGroups(node_item_1.get()), SUCCESS);
  293. ASSERT_EQ(builder.node_to_parallel_groups_.size(), 1);
  294. ASSERT_EQ(builder.parallel_group_to_nodes_.size(), 1);
  295. OpDescPtr op_desc_2 = CreateOpDesc("sub_node_1", "AllReduce");
  296. ge::AttrUtils::SetStr(op_desc_2, ATTR_NAME_PARALLEL_GROUP, "group_1");
  297. auto node_2 = subgraph->AddNode(op_desc_2);
  298. ASSERT_TRUE(node_2 != nullptr);
  299. OpDescPtr op_desc_3 = CreateOpDesc("sub_node_2", "AllReduce2");
  300. ge::AttrUtils::SetStr(op_desc_3, ATTR_NAME_PARALLEL_GROUP, "group_2");
  301. auto node_3 = subgraph->AddNode(op_desc_3);
  302. ASSERT_TRUE(node_3 != nullptr);
  303. ASSERT_EQ(builder.CollectParallelGroups(node_item_1.get()), SUCCESS);
  304. ASSERT_EQ(builder.node_to_parallel_groups_.size(), 2);
  305. ASSERT_EQ(builder.parallel_group_to_nodes_.size(), 2);
  306. ASSERT_EQ(builder.parallel_group_to_nodes_["group_1"].size(), 2);
  307. ASSERT_EQ(builder.parallel_group_to_nodes_["group_2"].size(), 1);
  308. builder.parallel_group_to_nodes_.clear();
  309. builder.node_ref_inputs_.clear();
  310. model.node_items_[node] = std::move(node_item);
  311. model.node_items_[node_1] = std::move(node_item_1);
  312. ASSERT_FALSE(model.node_items_[node]->has_observer);
  313. ASSERT_TRUE(model.node_items_[node_1]->dependents_for_execution.empty());
  314. ASSERT_EQ(builder.ParseDependentByParallelGroup(), SUCCESS);
  315. ASSERT_TRUE(model.node_items_[node]->has_observer);
  316. ASSERT_EQ(model.node_items_[node_1]->dependents_for_execution.size(), 1);
  317. ASSERT_EQ(model.node_items_[node_1]->dependents_for_execution[0], node);
  318. // repeat parse
  319. ASSERT_EQ(builder.ParseDependentByParallelGroup(), SUCCESS);
  320. ASSERT_TRUE(model.node_items_[node]->has_observer);
  321. ASSERT_EQ(model.node_items_[node_1]->dependents_for_execution.size(), 1);
  322. ASSERT_EQ(model.node_items_[node_1]->dependents_for_execution[0], node);
  323. }
  324. TEST_F(UtestGeHybrid, unfold_subgraphs_success) {
  325. ComputeGraphPtr merged_graph = nullptr;
  326. ComputeGraphPtr sub_sub_graph1 = std::make_shared<ComputeGraph>("while_cond");
  327. OpDescPtr sub_sub_graph_while_cond_data_op_desc = CreateOpDesc("cond_data", DATA);
  328. NodePtr sub_sub_graph_while_cond_data_node = sub_sub_graph1->AddNode(sub_sub_graph_while_cond_data_op_desc);
  329. ComputeGraphPtr sub_sub_graph2 = std::make_shared<ComputeGraph>("while_body");
  330. /*OpDescPtr sub_sub_graph_while_body_const_op_desc = CreateOpDesc("body_const", CONSTANT);
  331. NodePtr sub_sub_graph_while_body_const_node = sub_sub_graph2->AddNode(sub_sub_graph_while_body_const_op_desc);*/
  332. OpDescPtr sub_sub_graph_while_body_data_op_desc = CreateOpDesc("body_data", DATA);
  333. NodePtr sub_sub_graph_while_body_data_node = sub_sub_graph2->AddNode(sub_sub_graph_while_body_data_op_desc);
  334. sub_sub_graph2->SetGraphUnknownFlag(true);
  335. /*OpDescPtr sub_sub_graph_while_body_add_op_desc = CreateOpDesc("body_add", ADD);
  336. NodePtr sub_sub_graph_while_body_add_node = sub_sub_graph2->AddNode(sub_sub_graph_while_body_add_node);
  337. sub_sub_graph_while_body_add_node->AddLinkFrom(sub_sub_graph_while_body_data_node);
  338. sub_sub_graph_while_body_add_node->AddLinkFrom(sub_sub_graph_while_body_const_node);*/
  339. ComputeGraphPtr sub_graph = std::make_shared<ComputeGraph>("sub_graph");
  340. OpDescPtr sub_graph_while_op_desc = CreateOpDesc("while", WHILE);
  341. NodePtr sub_graph_while_node = sub_graph->AddNode(sub_graph_while_op_desc);
  342. sub_graph->SetGraphUnknownFlag(true);
  343. sub_graph_while_node->GetOpDesc()->AddSubgraphName("while_cond");
  344. sub_graph_while_node->GetOpDesc()->AddSubgraphName("while_body");
  345. sub_graph_while_node->GetOpDesc()->SetSubgraphInstanceName(0, "while_cond");
  346. sub_graph_while_node->GetOpDesc()->SetSubgraphInstanceName(1, "while_body");
  347. ComputeGraphPtr root_graph = std::make_shared<ComputeGraph>("root_graph");
  348. auto partitioned_call_op_desc = MakeShared<OpDesc>("partitioned_call", PARTITIONEDCALL);
  349. auto partitioned_call_node = root_graph->AddNode(partitioned_call_op_desc);
  350. partitioned_call_node->GetOpDesc()->AddSubgraphName("sub_graph");
  351. partitioned_call_node->GetOpDesc()->SetSubgraphInstanceName(0, "sub_graph");
  352. root_graph->AddSubGraph(sub_sub_graph1);
  353. root_graph->AddSubGraph(sub_sub_graph2);
  354. sub_sub_graph1->SetParentGraph(root_graph);
  355. sub_sub_graph2->SetParentGraph(root_graph);
  356. sub_sub_graph1->SetParentNode(sub_graph_while_node);
  357. sub_sub_graph2->SetParentNode(sub_graph_while_node);
  358. root_graph->AddSubGraph(sub_graph);
  359. sub_graph->SetParentNode(partitioned_call_node);
  360. sub_graph->SetParentGraph(root_graph);
  361. GeRootModelPtr root_model = MakeShared<ge::GeRootModel>(root_graph);
  362. HybridModel hybrid_model(root_model);
  363. HybridModelBuilder hybrid_model_builder(hybrid_model);
  364. EXPECT_EQ(hybrid_model_builder.UnfoldSubgraphs(root_graph, merged_graph), SUCCESS);
  365. }
  366. TEST_F(UtestGeHybrid, TestTaskContext) {
  367. auto graph = make_shared<ComputeGraph>("graph");
  368. OpDescPtr op_desc = CreateOpDesc("Add", "Add");
  369. GeShape shape({2, 16});
  370. GeTensorDesc tensor_desc(shape);
  371. op_desc->AddInputDesc(tensor_desc);
  372. op_desc->AddInputDesc(tensor_desc);
  373. op_desc->AddOutputDesc(tensor_desc);
  374. auto node = graph->AddNode(op_desc);
  375. std::unique_ptr<NodeItem> node_item;
  376. NodeItem::Create(node, node_item);
  377. node_item->input_start = 0;
  378. node_item->output_start = 0;
  379. GraphExecutionContext execution_context;
  380. SubgraphContext subgraph_context(nullptr, &execution_context);
  381. subgraph_context.all_inputs_.resize(2);
  382. subgraph_context.all_outputs_.resize(1);
  383. NodeState node_state(*node_item, &subgraph_context);
  384. auto task_context = TaskContext::Create(&node_state, &execution_context, &subgraph_context);
  385. ASSERT_TRUE(task_context != nullptr);
  386. auto desc = task_context->MutableInputDesc(2);
  387. ASSERT_TRUE(desc == nullptr);
  388. desc = task_context->MutableOutputDesc(0);
  389. ASSERT_TRUE(desc != nullptr);
  390. ASSERT_EQ(desc->GetShape().GetDims(), shape.GetDims());
  391. GeTensorDesc output_desc;
  392. ASSERT_EQ(task_context->GetOutputDesc(0, output_desc), SUCCESS);
  393. ASSERT_EQ(output_desc.GetShape().GetDims(), shape.GetDims());
  394. desc = task_context->MutableInputDesc(0);
  395. ASSERT_TRUE(desc != nullptr);
  396. ASSERT_EQ(desc->GetShape().GetDims(), shape.GetDims());
  397. GeShape new_shape({8, 2});
  398. tensor_desc.SetShape(new_shape);
  399. task_context->UpdateInputDesc(1, tensor_desc);
  400. GeTensorDesc new_desc;
  401. ASSERT_EQ(task_context->GetInputDesc(1, new_desc), SUCCESS);
  402. ASSERT_EQ(new_desc.GetShape().GetDims(), new_shape.GetDims());
  403. }
  404. TEST_F(UtestGeHybrid, hybrid_model_executor_update_args) {
  405. auto aicore_task = std::unique_ptr<hybrid::AiCoreOpTask>(new(std::nothrow)hybrid::AiCoreOpTask());
  406. auto graph = make_shared<ComputeGraph>("graph");
  407. OpDescPtr op_desc = CreateOpDesc("Add", "Add");
  408. GeShape shape({2, 16});
  409. GeTensorDesc tensor_desc(shape);
  410. op_desc->AddInputDesc(tensor_desc);
  411. op_desc->AddInputDesc(tensor_desc);
  412. op_desc->AddOutputDesc(tensor_desc);
  413. auto node = graph->AddNode(op_desc);
  414. std::unique_ptr<NodeItem> node_item;
  415. NodeItem::Create(node, node_item);
  416. node_item->input_start = 0;
  417. node_item->output_start = 0;
  418. GraphExecutionContext execution_context;
  419. SubgraphContext subgraph_context(nullptr, &execution_context);
  420. subgraph_context.all_inputs_.resize(2);
  421. subgraph_context.all_outputs_.resize(1);
  422. NodeState node_state(*node_item, &subgraph_context);
  423. auto task_context = TaskContext::Create(&node_state, &execution_context, &subgraph_context);
  424. int32_t buffer[1];
  425. aicore_task->tiling_buffer_ = TensorBuffer::Create(buffer, sizeof(buffer));
  426. EXPECT_NE(aicore_task->tiling_buffer_, nullptr);
  427. aicore_task->max_arg_count_ = 0;
  428. EXPECT_EQ(aicore_task->UpdateArgs(*task_context), ACL_ERROR_GE_MEMORY_OPERATE_FAILED);
  429. aicore_task->args_ = std::unique_ptr<uint8_t[]>(new uint8_t[sizeof(uintptr_t) * 2]);
  430. EXPECT_EQ(aicore_task->UpdateArgs(*task_context), SUCCESS);
  431. }
  432. TEST_F(UtestGeHybrid, hybrid_model_executor_check_shape) {
  433. HybridModelExecutor::ExecuteArgs args;
  434. GeTensorDescPtr ge_tensor = make_shared<GeTensorDesc>(GeTensorDesc());
  435. vector<int64_t> dim = {2 , 3};
  436. ge_tensor->SetShape(GeShape(dim));
  437. args.input_desc.push_back(ge_tensor);
  438. // create node
  439. ge::ComputeGraphPtr graph = std::make_shared<ComputeGraph>("God");
  440. OpDescPtr op_desc = std::make_shared<OpDesc>("data", DATA);
  441. GeTensorDesc tensor_desc(GeShape({2, 3}));
  442. std::vector<std::pair<int64_t, int64_t>> shape_range({std::pair<int64_t, int64_t>(1, 3),
  443. std::pair<int64_t, int64_t>(2, 4)});
  444. tensor_desc.SetShapeRange(shape_range);
  445. op_desc->AddInputDesc(tensor_desc);
  446. op_desc->AddOutputDesc(tensor_desc);
  447. NodePtr node = graph->AddNode(op_desc);
  448. std::unique_ptr<NodeItem> new_node;
  449. NodeItem::Create(node, new_node);
  450. new_node->is_dynamic = true;
  451. GraphItem graph_item;
  452. graph_item.input_nodes_.emplace_back(new_node.get());
  453. Status ret = HybridModelExecutor::CheckInputShapeByShapeRange(&graph_item, args);
  454. ASSERT_EQ(ret, ge::SUCCESS);
  455. HybridModelExecutor::ExecuteArgs args1;
  456. ret = HybridModelExecutor::CheckInputShapeByShapeRange(&graph_item, args1);
  457. ASSERT_EQ(ret, ge::INTERNAL_ERROR);
  458. HybridModelExecutor::ExecuteArgs args2;
  459. GeTensorDescPtr ge_tensor2 = make_shared<GeTensorDesc>(GeTensorDesc());
  460. vector<int64_t> dim2 = {-1 , 3};
  461. ge_tensor2->SetShape(GeShape(dim2));
  462. args2.input_desc.push_back(ge_tensor2);
  463. ret = HybridModelExecutor::CheckInputShapeByShapeRange(&graph_item, args1);
  464. ASSERT_EQ(ret, ge::INTERNAL_ERROR);
  465. HybridModelExecutor::ExecuteArgs args3;
  466. ret = HybridModelExecutor::CheckInputShapeByShapeRange(&graph_item, args3);
  467. ASSERT_EQ(ret, ge::INTERNAL_ERROR);
  468. }
  469. TEST_F(UtestGeHybrid, TestOptimizeDependenciesForConstInputs) {
  470. ComputeGraphPtr compute_graph = MakeShared<ComputeGraph>("test");
  471. GeRootModelPtr root_model = MakeShared<ge::GeRootModel>(compute_graph);
  472. HybridModel model(root_model);
  473. model.root_graph_ = compute_graph;
  474. HybridModelBuilder builder(model);
  475. GeShape shape({2, 16});
  476. GeTensorDesc tensor_desc(shape);
  477. std::unique_ptr<NodeItem> const_node_item;
  478. {
  479. OpDescPtr const_op_desc = CreateOpDesc("Constant", "Const");
  480. const_op_desc->AddOutputDesc(tensor_desc);
  481. auto const_node = compute_graph->AddNode(const_op_desc);
  482. NodeItem::Create(const_node, const_node_item);
  483. }
  484. std::unique_ptr<NodeItem> non_const_node_item;
  485. {
  486. OpDescPtr op_desc = CreateOpDesc("Add", "Add");
  487. op_desc->AddOutputDesc(tensor_desc);
  488. auto const_node = compute_graph->AddNode(op_desc);
  489. NodeItem::Create(const_node, non_const_node_item);
  490. }
  491. std::unique_ptr<NodeItem> known_node_item;
  492. {
  493. OpDescPtr known_op_desc = CreateOpDesc("known", "PartitionedCall");
  494. known_op_desc->AddOutputDesc(tensor_desc);
  495. known_op_desc->AddOutputDesc(tensor_desc);
  496. auto known_node = compute_graph->AddNode(known_op_desc);
  497. NodeItem::Create(known_node, known_node_item);
  498. }
  499. std::unique_ptr<NodeItem> dst_node_item;
  500. {
  501. OpDescPtr known_op_desc = CreateOpDesc("SomeOp", "SomeOpType ");
  502. known_op_desc->AddOutputDesc(tensor_desc);
  503. known_op_desc->AddOutputDesc(tensor_desc);
  504. auto known_node = compute_graph->AddNode(known_op_desc);
  505. NodeItem::Create(known_node, dst_node_item);
  506. }
  507. float buffer[2 * 16];
  508. unique_ptr<TensorValue> tensor_value(new TensorValue(buffer, sizeof(buffer)));
  509. model.constant_tensors_[const_node_item->node] = std::move(tensor_value);
  510. // Case 1. connect to Const
  511. auto output_id = 1;
  512. builder.host_input_value_dependencies_[dst_node_item.get()].emplace_back(output_id, const_node_item.get());
  513. builder.host_input_value_dependencies_[dst_node_item.get()].emplace_back(0, non_const_node_item.get());
  514. dst_node_item->dependents_for_shape_inference.emplace_back(const_node_item->node);
  515. dst_node_item->dependents_for_shape_inference.emplace_back(non_const_node_item->node);
  516. ASSERT_EQ(builder.OptimizeDependenciesForConstantInputs(), SUCCESS);
  517. ASSERT_EQ(dst_node_item->dependents_for_shape_inference.size(), 1);
  518. ASSERT_EQ(dst_node_item->dependents_for_shape_inference[0], non_const_node_item->node);
  519. // Case 2. connect to known-subgraph, netoutput connect to Const
  520. builder.host_input_value_dependencies_.clear();
  521. dst_node_item->dependents_for_shape_inference.clear();
  522. builder.known_subgraph_constant_output_refs_[known_node_item.get()].emplace(output_id, const_node_item->node);
  523. builder.host_input_value_dependencies_[dst_node_item.get()].emplace_back(output_id, known_node_item.get());
  524. builder.host_input_value_dependencies_[dst_node_item.get()].emplace_back(0, non_const_node_item.get());
  525. dst_node_item->dependents_for_shape_inference.emplace_back(known_node_item->node);
  526. dst_node_item->dependents_for_shape_inference.emplace_back(non_const_node_item->node);
  527. ASSERT_EQ(builder.OptimizeDependenciesForConstantInputs(), SUCCESS);
  528. ASSERT_EQ(dst_node_item->dependents_for_shape_inference.size(), 1);
  529. ASSERT_EQ(dst_node_item->dependents_for_shape_inference[0], non_const_node_item->node);
  530. }
  531. TEST_F(UtestGeHybrid, test_key_for_kernel_bin) {
  532. auto aicore_task = std::unique_ptr<hybrid::AiCoreOpTask>(new(std::nothrow)hybrid::AiCoreOpTask());
  533. OpDesc op_desc("Sum", "Sum");
  534. EXPECT_EQ(aicore_task->GetKeyForTbeKernel(), OP_EXTATTR_NAME_TBE_KERNEL);
  535. EXPECT_EQ(aicore_task->GetKeyForTvmMagic(), TVM_ATTR_NAME_MAGIC);
  536. EXPECT_EQ(aicore_task->GetKeyForTvmMetaData(), TVM_ATTR_NAME_METADATA);
  537. EXPECT_EQ(aicore_task->GetKeyForKernelName(op_desc), "Sum_kernelname");
  538. auto atomic_task = std::unique_ptr<hybrid::AtomicAddrCleanOpTask>(new(std::nothrow)hybrid::AtomicAddrCleanOpTask());
  539. EXPECT_EQ(atomic_task->GetKeyForTbeKernel(), EXT_ATTR_ATOMIC_TBE_KERNEL);
  540. EXPECT_EQ(atomic_task->GetKeyForTvmMagic(), ATOMIC_ATTR_TVM_MAGIC);
  541. EXPECT_EQ(atomic_task->GetKeyForTvmMetaData(), ATOMIC_ATTR_TVM_METADATA);
  542. EXPECT_EQ(atomic_task->GetKeyForKernelName(op_desc), "Sum_atomic_kernelname");
  543. }
  544. TEST_F(UtestGeHybrid, TestParseDependentInputNodesForHccl) {
  545. NodeExecutorManager::GetInstance().engine_mapping_.emplace("ops_kernel_info_hccl",
  546. NodeExecutorManager::ExecutorType::HCCL);
  547. ComputeGraphPtr compute_graph = MakeShared<ComputeGraph>("test");
  548. OpDescPtr op_desc = CreateOpDesc("Add", "Add");
  549. auto node = compute_graph->AddNode(op_desc);
  550. std::unique_ptr<NodeItem> node_item;
  551. NodeItem::Create(node, node_item);
  552. node_item->node_id = 0;
  553. OpDescPtr op_desc_1 = CreateOpDesc("AllReduce", "AllReduce");
  554. op_desc_1->SetOpKernelLibName("ops_kernel_info_hccl");
  555. auto node_1 = compute_graph->AddNode(op_desc_1);
  556. std::unique_ptr<NodeItem> node_item_1;
  557. NodeItem::Create(node_1, node_item_1);
  558. node_item_1->node_id = 1;
  559. node->GetOutControlAnchor()->LinkTo(node_1->GetInControlAnchor());
  560. GeRootModelPtr root_model = MakeShared<ge::GeRootModel>(compute_graph);
  561. HybridModel model(root_model);
  562. model.root_graph_ = compute_graph;
  563. model.node_items_.emplace(node, std::move(node_item));
  564. HybridModelBuilder builder(model);
  565. std::vector<std::string> deps;
  566. ASSERT_EQ(builder.ParseDependentInputNodes(*node_item_1, deps), SUCCESS);
  567. ASSERT_TRUE(model.GetNodeItem(node)->has_observer);
  568. ASSERT_EQ(node_item_1->dependents_for_execution.size(), 1);
  569. }

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示