You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ge_hybrid_unittest.cc 34 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779
  1. /**
  2. * Copyright 2019-2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <gtest/gtest.h>
  17. #include <gmock/gmock.h>
  18. #include <vector>
  19. #include "runtime/rt.h"
  20. #define protected public
  21. #define private public
  22. #include "graph/utils/node_utils.h"
  23. #include "hybrid/model/hybrid_model_builder.h"
  24. #include "hybrid/model/hybrid_model.h"
  25. #include "hybrid/node_executor/node_executor.h"
  26. #include "model/ge_model.h"
  27. #include "model/ge_root_model.h"
  28. #include "hybrid/node_executor/aicore/aicore_op_task.h"
  29. #include "framework/common/taskdown_common.h"
  30. #include "framework/common/debug/log.h"
  31. #include "graph/ge_context.h"
  32. #include "hybrid/executor/hybrid_execution_context.h"
  33. #include "hybrid/executor/hybrid_model_executor.h"
  34. #include "hybrid/node_executor/aicore/aicore_task_builder.h"
  35. #include "hybrid/node_executor/aicore/aicore_node_executor.h"
  36. #include "graph/load/model_manager/tbe_handle_store.h"
  37. #include "graph/manager/graph_mem_allocator.h"
  38. #include "hybrid/common/npu_memory_allocator.h"
  39. #include "graph/types.h"
  40. #include "graph/utils/tensor_utils.h"
  41. #include "graph/testcase/ge_graph/graph_builder_utils.h"
  42. #include "single_op/task/build_task_utils.h"
  43. #include "graph/op_desc_impl.h"
  44. #undef private
  45. #undef protected
  46. using namespace std;
  47. using namespace testing;
  48. using namespace ge;
  49. using namespace hybrid;
  50. class UtestGeHybrid : public testing::Test {
  51. protected:
  52. void SetUp() {}
  53. void TearDown() {
  54. NpuMemoryAllocator::allocators_.clear();
  55. }
  56. };
  57. static ge::OpDescPtr CreateOpDesc(string name = "", string type = "") {
  58. auto op_desc = std::make_shared<ge::OpDesc>(name, type);
  59. op_desc->SetStreamId(0);
  60. op_desc->SetId(0);
  61. op_desc->SetWorkspace({});
  62. ;
  63. op_desc->SetWorkspaceBytes({});
  64. op_desc->SetInputOffset({});
  65. op_desc->SetOutputOffset({});
  66. ge::AttrUtils::SetStr(op_desc, ge::TVM_ATTR_NAME_MAGIC, "RT_DEV_BINARY_MAGIC_ELF_AIVEC");
  67. bool support_dynamic = true;
  68. ge::AttrUtils::GetBool(op_desc, "support_dynamicshape", support_dynamic);
  69. return op_desc;
  70. }
  71. TEST_F(UtestGeHybrid, aicore_op_task_init_success) {
  72. // build aicore task
  73. auto aicore_task = std::unique_ptr<hybrid::AiCoreOpTask>(new(std::nothrow)hybrid::AiCoreOpTask());
  74. domi::TaskDef task_def;
  75. task_def.set_type(RT_MODEL_TASK_ALL_KERNEL);
  76. domi::KernelDefWithHandle *kernel_with_handle = task_def.mutable_kernel_with_handle();
  77. kernel_with_handle->set_original_kernel_key("");
  78. kernel_with_handle->set_node_info("");
  79. kernel_with_handle->set_block_dim(32);
  80. kernel_with_handle->set_args_size(64);
  81. string args(64, '1');
  82. kernel_with_handle->set_args(args.data(), 64);
  83. domi::KernelContext *context = kernel_with_handle->mutable_context();
  84. context->set_op_index(1);
  85. context->set_kernel_type(2); // ccKernelType::TE
  86. uint16_t args_offset[9] = {0};
  87. context->set_args_offset(args_offset, 9 * sizeof(uint16_t));
  88. OpDescPtr op_desc = CreateOpDesc("Add", "Add");
  89. std::vector<char> kernelBin;
  90. TBEKernelPtr tbe_kernel = std::make_shared<ge::OpKernelBin>("name/Add", std::move(kernelBin));
  91. op_desc->SetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel);
  92. std::string kernel_name("kernel/Add");
  93. AttrUtils::SetStr(op_desc, op_desc->GetName() + "_kernelname", kernel_name);
  94. ASSERT_EQ(aicore_task->InitWithTaskDef(*op_desc.get(), task_def), SUCCESS);
  95. rtStream_t stream = nullptr;
  96. rtStreamCreate(&stream, 0);
  97. ASSERT_EQ(aicore_task->LaunchKernel(stream), SUCCESS);
  98. char *handle = "";
  99. aicore_task->handle_ = handle;
  100. aicore_task->tiling_key_ = 1;
  101. ASSERT_EQ(aicore_task->LaunchKernel(stream), SUCCESS);
  102. }
  103. TEST_F(UtestGeHybrid, aicore_op_task_init_success2) {
  104. // build aicore task
  105. auto aicore_task = std::unique_ptr<hybrid::AiCoreOpTask>(new(std::nothrow)hybrid::AiCoreOpTask());
  106. aicore_task->is_single_op_ = true;
  107. domi::TaskDef task_def;
  108. task_def.set_type(RT_MODEL_TASK_KERNEL);
  109. domi::KernelDef *kernel = task_def.mutable_kernel();
  110. kernel->set_block_dim(32);
  111. kernel->set_args_size(64);
  112. string args(64, '1');
  113. kernel->set_args(args.data(), 64);
  114. domi::KernelContext *context = kernel->mutable_context();
  115. context->set_op_index(1);
  116. context->set_kernel_type(2); // ccKernelType::TE
  117. uint16_t args_offset[9] = {0};
  118. context->set_args_offset(args_offset, 9 * sizeof(uint16_t));
  119. OpDescPtr op_desc = CreateOpDesc("Add", "Add");
  120. std::vector<char> kernelBin;
  121. TBEKernelPtr tbe_kernel = std::make_shared<ge::OpKernelBin>("name/Add", std::move(kernelBin));
  122. op_desc->SetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel);
  123. std::string kernel_name("kernel/Add");
  124. AttrUtils::SetStr(op_desc, op_desc->GetName() + "_kernelname", kernel_name);
  125. ASSERT_EQ(aicore_task->InitWithTaskDef(*op_desc.get(), task_def), SUCCESS);
  126. rtStream_t stream = nullptr;
  127. rtStreamCreate(&stream, 0);
  128. ASSERT_EQ(aicore_task->LaunchKernel(stream), SUCCESS);
  129. char *handle = "";
  130. aicore_task->handle_ = handle;
  131. aicore_task->tiling_key_ = 1;
  132. ASSERT_EQ(aicore_task->LaunchKernel(stream), SUCCESS);
  133. }
  134. TEST_F(UtestGeHybrid, task_update_tiling_info) {
  135. auto aicore_task = std::unique_ptr<hybrid::AiCoreOpTask>(new(std::nothrow)hybrid::AiCoreOpTask());
  136. auto graph = make_shared<ComputeGraph>("graph");
  137. OpDescPtr op_desc = CreateOpDesc("Add", "Add");
  138. ge::AttrUtils::SetStr(op_desc, "compile_info_key", "key");
  139. ge::AttrUtils::SetStr(op_desc, "compile_info_json", "json");
  140. ge::AttrUtils::SetBool(op_desc, "support_dynamicshape", true);
  141. ge::AttrUtils::SetInt(op_desc, "op_para_size", 1);
  142. auto node = graph->AddNode(op_desc);
  143. std::unique_ptr<NodeItem> node_item;
  144. NodeItem::Create(node, node_item);
  145. node_item->input_start = 0;
  146. node_item->output_start = 0;
  147. GraphExecutionContext execution_context;
  148. SubgraphContext subgraph_context(nullptr, &execution_context);
  149. auto node_state = subgraph_context.GetOrCreateNodeState(node_item.get());
  150. ASSERT_EQ(aicore_task->InitTilingInfo(*op_desc), SUCCESS);
  151. ASSERT_EQ(aicore_task->UpdateTilingInfo(*node_state->GetTaskContext()), SUCCESS);
  152. }
  153. TEST_F(UtestGeHybrid, index_taskdefs_failed) {
  154. // build aicore task
  155. domi::ModelTaskDef model_task_def;
  156. std::shared_ptr<domi::ModelTaskDef> model_task_def_ptr = make_shared<domi::ModelTaskDef>(model_task_def);
  157. domi::TaskDef *task_def = model_task_def_ptr->add_task();
  158. GeModelPtr ge_model = make_shared<GeModel>();
  159. ge_model->SetModelTaskDef(model_task_def_ptr);
  160. auto aicore_task = std::unique_ptr<hybrid::AiCoreOpTask>(new(std::nothrow)hybrid::AiCoreOpTask());
  161. task_def->set_type(RT_MODEL_TASK_ALL_KERNEL);
  162. domi::KernelDefWithHandle *kernel_with_handle = task_def->mutable_kernel_with_handle();
  163. kernel_with_handle->set_original_kernel_key("");
  164. kernel_with_handle->set_node_info("");
  165. kernel_with_handle->set_block_dim(32);
  166. kernel_with_handle->set_args_size(64);
  167. string args(64, '1');
  168. kernel_with_handle->set_args(args.data(), 64);
  169. domi::KernelContext *context = kernel_with_handle->mutable_context();
  170. context->set_op_index(1);
  171. context->set_kernel_type(2); // ccKernelType::TE
  172. uint16_t args_offset[9] = {0};
  173. context->set_args_offset(args_offset, 9 * sizeof(uint16_t));
  174. OpDescPtr op_desc = CreateOpDesc("Add", "Add");
  175. std::vector<char> kernelBin;
  176. TBEKernelPtr tbe_kernel = std::make_shared<ge::OpKernelBin>("name/Add", std::move(kernelBin));
  177. op_desc->SetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel);
  178. std::string kernel_name("kernel/Add");
  179. AttrUtils::SetStr(op_desc, op_desc->GetName() + "_kernelname", kernel_name);
  180. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
  181. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
  182. ge_root_model->SetModelName("test_name");
  183. HybridModel hybrid_model(ge_root_model);
  184. HybridModelBuilder hybrid_model_builder(hybrid_model);
  185. ASSERT_EQ(hybrid_model_builder.Build(), INTERNAL_ERROR);
  186. ASSERT_EQ(hybrid_model_builder.IndexTaskDefs(graph, ge_model), INTERNAL_ERROR);
  187. }
  188. TEST_F(UtestGeHybrid, parse_force_infershape_nodes) {
  189. const char *const kForceInfershape = "_force_infershape_when_running";
  190. auto graph = make_shared<ComputeGraph>("graph");
  191. OpDescPtr op_desc = CreateOpDesc("Conv2D", "Conv2D");
  192. ge::AttrUtils::SetBool(op_desc, kForceInfershape, true);
  193. auto node = graph->AddNode(op_desc);
  194. std::unique_ptr<NodeItem> new_node;
  195. NodeItem::Create(node, new_node);
  196. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
  197. HybridModel hybrid_model(ge_root_model);
  198. HybridModelBuilder hybrid_model_builder(hybrid_model);
  199. ASSERT_EQ(hybrid_model_builder.ParseForceInfershapeNodes(node, *new_node), SUCCESS);
  200. }
  201. static ComputeGraphPtr BuildDataDirectConnectGraph() {
  202. const char *kRefIndex = "_parent_node_index";
  203. ge::ut::GraphBuilder builder("subgraph");
  204. auto data = builder.AddNode("Data", "Data", 1, 1);
  205. auto netoutput = builder.AddNode("NetOutput", "NetOutput", 1, 1);
  206. (void)AttrUtils::SetInt(netoutput->GetOpDesc()->MutableInputDesc(0), kRefIndex, 0);
  207. builder.AddDataEdge(data, 0, netoutput, 0);
  208. return builder.GetGraph();
  209. }
  210. TEST_F(UtestGeHybrid, data_direct_connect) {
  211. std::unique_ptr<NodeItem> node_item;
  212. auto root_graph = make_shared<ComputeGraph>("root_graph");
  213. OpDescPtr op_desc = CreateOpDesc("PartitionedCall", "PartitionedCall");
  214. auto node = root_graph->AddNode(op_desc);
  215. node->SetOwnerComputeGraph(root_graph);
  216. auto sub_graph = BuildDataDirectConnectGraph();
  217. sub_graph->SetParentGraph(root_graph);
  218. sub_graph->SetParentNode(node);
  219. node->GetOpDesc()->AddSubgraphName("subgraph");
  220. node->GetOpDesc()->SetSubgraphInstanceName(0, "subgraph");
  221. root_graph->AddSubgraph("subgraph", sub_graph);
  222. std::unique_ptr<NodeItem> new_node;
  223. NodeItem::Create(node, new_node);
  224. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(root_graph);
  225. HybridModel hybrid_model(ge_root_model);
  226. HybridModelBuilder hybrid_model_builder(hybrid_model);
  227. auto ret = hybrid_model_builder.IdentifyVariableOutputs(*new_node.get(), sub_graph);
  228. ASSERT_EQ(ret, SUCCESS);
  229. }
  230. TEST_F(UtestGeHybrid, index_taskdefs_success) {
  231. // build aicore task
  232. domi::ModelTaskDef model_task_def;
  233. std::shared_ptr<domi::ModelTaskDef> model_task_def_ptr = make_shared<domi::ModelTaskDef>(model_task_def);
  234. domi::TaskDef *task_def = model_task_def_ptr->add_task();
  235. GeModelPtr ge_model = make_shared<GeModel>();
  236. ge_model->SetModelTaskDef(model_task_def_ptr);
  237. auto aicore_task = std::unique_ptr<hybrid::AiCoreOpTask>(new(std::nothrow)hybrid::AiCoreOpTask());
  238. task_def->set_type(RT_MODEL_TASK_ALL_KERNEL);
  239. domi::KernelDefWithHandle *kernel_with_handle = task_def->mutable_kernel_with_handle();
  240. kernel_with_handle->set_original_kernel_key("");
  241. kernel_with_handle->set_node_info("");
  242. kernel_with_handle->set_block_dim(32);
  243. kernel_with_handle->set_args_size(64);
  244. string args(64, '1');
  245. kernel_with_handle->set_args(args.data(), 64);
  246. domi::KernelContext *context = kernel_with_handle->mutable_context();
  247. context->set_op_index(0);
  248. context->set_kernel_type(2); // ccKernelType::TE
  249. uint16_t args_offset[9] = {0};
  250. context->set_args_offset(args_offset, 9 * sizeof(uint16_t));
  251. OpDescPtr op_desc = CreateOpDesc("Add", "Add");
  252. std::vector<char> kernelBin;
  253. TBEKernelPtr tbe_kernel = std::make_shared<ge::OpKernelBin>("name/Add", std::move(kernelBin));
  254. op_desc->SetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel);
  255. std::string kernel_name("kernel/Add");
  256. AttrUtils::SetStr(op_desc, op_desc->GetName() + "_kernelname", kernel_name);
  257. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
  258. NodePtr node = graph->AddNode(op_desc);
  259. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
  260. HybridModel hybrid_model(ge_root_model);
  261. HybridModelBuilder hybrid_model_builder(hybrid_model);
  262. ASSERT_EQ(hybrid_model_builder.IndexTaskDefs(graph, ge_model), SUCCESS);
  263. }
  264. TEST_F(UtestGeHybrid, init_weight_success) {
  265. NpuMemoryAllocator::allocators_.emplace(make_pair(0, nullptr));
  266. // make graph with sub_graph
  267. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("root_graph");
  268. OpDescPtr op_desc = CreateOpDesc("if", IF);
  269. NodePtr node = graph->AddNode(op_desc);
  270. // make sub graph
  271. ComputeGraphPtr sub_graph = std::make_shared<ComputeGraph>("if_sub_graph");
  272. OpDescPtr const_op_desc = CreateOpDesc("const", CONSTANT);
  273. vector<int64_t> dims_vec_0 = {2, 1, 4, 1, 2};
  274. vector<int32_t> data_vec_0 = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
  275. GeTensorDesc tensor_desc_0(GeShape(dims_vec_0), FORMAT_NCHW, DT_INT32);
  276. (void)TensorUtils::SetRealDimCnt(tensor_desc_0, dims_vec_0.size());
  277. ConstGeTensorPtr constTensor_0 =
  278. std::make_shared<GeTensor>(tensor_desc_0, (uint8_t *)&data_vec_0[0], data_vec_0.size() * sizeof(int32_t));
  279. AttrUtils::SetTensor(const_op_desc, ge::ATTR_NAME_WEIGHTS, constTensor_0);
  280. const_op_desc->AddOutputDesc(tensor_desc_0);
  281. NodePtr const_node = sub_graph->AddNode(const_op_desc);
  282. graph->AddSubgraph("sub", sub_graph);
  283. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
  284. GeModelPtr ge_sub_model = make_shared<GeModel>();
  285. //Buffer weight_buffer = Buffer(128,0);
  286. //ge_sub_model->SetWeight(weight_buffer);
  287. ge_root_model->SetSubgraphInstanceNameToModel("sub",ge_sub_model);
  288. HybridModel hybrid_model(ge_root_model);
  289. HybridModelBuilder hybrid_model_builder(hybrid_model);
  290. auto ret = hybrid_model_builder.InitWeights();
  291. ASSERT_EQ(ret,SUCCESS);
  292. Buffer weight_buffer = Buffer(128,0);
  293. ge_sub_model->SetWeight(weight_buffer);
  294. ret = hybrid_model_builder.InitWeights();
  295. ASSERT_EQ(ret,PARAM_INVALID);
  296. }
  297. TEST_F(UtestGeHybrid, hybrid_model_executor) {
  298. ComputeGraphPtr compute_graph = MakeShared<ComputeGraph>("abc");
  299. GeRootModelPtr root_model = MakeShared<ge::GeRootModel>(compute_graph);
  300. HybridModel model(root_model);
  301. HybridModel *model_ptr = &model;
  302. uint32_t device_id = 0;
  303. rtStream_t stream = nullptr;
  304. HybridModelExecutor executor(model_ptr, device_id, stream);
  305. executor.Init();
  306. }
  307. TEST_F(UtestGeHybrid, test_parse_parallel_group) {
  308. NodeExecutorManager::GetInstance().engine_mapping_.emplace("ops_kernel_info_hccl",
  309. NodeExecutorManager::ExecutorType::HCCL);
  310. ComputeGraphPtr compute_graph = MakeShared<ComputeGraph>("test");
  311. OpDescPtr op_desc = CreateOpDesc("AllReduce", "AllReduce");
  312. op_desc->SetId(0);
  313. ge::AttrUtils::SetStr(op_desc, ATTR_NAME_PARALLEL_GROUP, "group_1");
  314. auto node = compute_graph->AddNode(op_desc);
  315. std::unique_ptr<NodeItem> node_item;
  316. NodeItem::Create(node, node_item);
  317. node_item->node_id = 0;
  318. op_desc->SetOpKernelLibName("ops_kernel_info_hccl");
  319. GeRootModelPtr root_model = MakeShared<ge::GeRootModel>(compute_graph);
  320. HybridModel model(root_model);
  321. model.root_graph_ = compute_graph;
  322. HybridModelBuilder builder(model);
  323. ASSERT_EQ(builder.CollectParallelGroups(node_item.get()), SUCCESS);
  324. ASSERT_EQ(builder.node_to_parallel_groups_.size(), 1);
  325. ASSERT_EQ(builder.parallel_group_to_nodes_.size(), 1);
  326. OpDescPtr op_desc_1 = CreateOpDesc("subgraph", "PartitionedCall");
  327. op_desc_1->AddSubgraphName("subgraph");
  328. auto node_1 = compute_graph->AddNode(op_desc_1);
  329. ComputeGraphPtr subgraph = MakeShared<ComputeGraph>("subgraph");
  330. ASSERT_EQ(NodeUtils::SetSubgraph(*node_1, 0, subgraph), GRAPH_SUCCESS);
  331. std::unique_ptr<NodeItem> node_item_1;
  332. NodeItem::Create(node_1, node_item_1);
  333. node_item_1->node_id = 1;
  334. ASSERT_EQ(builder.CollectParallelGroups(node_item_1.get()), SUCCESS);
  335. ASSERT_EQ(builder.node_to_parallel_groups_.size(), 1);
  336. ASSERT_EQ(builder.parallel_group_to_nodes_.size(), 1);
  337. OpDescPtr op_desc_2 = CreateOpDesc("sub_node_1", "AllReduce");
  338. ge::AttrUtils::SetStr(op_desc_2, ATTR_NAME_PARALLEL_GROUP, "group_1");
  339. auto node_2 = subgraph->AddNode(op_desc_2);
  340. ASSERT_TRUE(node_2 != nullptr);
  341. OpDescPtr op_desc_3 = CreateOpDesc("sub_node_2", "AllReduce2");
  342. ge::AttrUtils::SetStr(op_desc_3, ATTR_NAME_PARALLEL_GROUP, "group_2");
  343. auto node_3 = subgraph->AddNode(op_desc_3);
  344. ASSERT_TRUE(node_3 != nullptr);
  345. ASSERT_EQ(builder.CollectParallelGroups(node_item_1.get()), SUCCESS);
  346. ASSERT_EQ(builder.node_to_parallel_groups_.size(), 2);
  347. ASSERT_EQ(builder.parallel_group_to_nodes_.size(), 2);
  348. ASSERT_EQ(builder.parallel_group_to_nodes_["group_1"].size(), 2);
  349. ASSERT_EQ(builder.parallel_group_to_nodes_["group_2"].size(), 1);
  350. builder.parallel_group_to_nodes_.clear();
  351. builder.node_ref_inputs_.clear();
  352. model.node_items_[node] = std::move(node_item);
  353. model.node_items_[node_1] = std::move(node_item_1);
  354. ASSERT_FALSE(model.node_items_[node]->has_observer);
  355. ASSERT_TRUE(model.node_items_[node_1]->dependents_for_execution.empty());
  356. ASSERT_EQ(builder.ParseDependentByParallelGroup(), SUCCESS);
  357. ASSERT_TRUE(model.node_items_[node]->has_observer);
  358. ASSERT_EQ(model.node_items_[node_1]->dependents_for_execution.size(), 1);
  359. ASSERT_EQ(model.node_items_[node_1]->dependents_for_execution[0], node);
  360. // repeat parse
  361. ASSERT_EQ(builder.ParseDependentByParallelGroup(), SUCCESS);
  362. ASSERT_TRUE(model.node_items_[node]->has_observer);
  363. ASSERT_EQ(model.node_items_[node_1]->dependents_for_execution.size(), 1);
  364. ASSERT_EQ(model.node_items_[node_1]->dependents_for_execution[0], node);
  365. }
  366. TEST_F(UtestGeHybrid, unfold_subgraphs_success) {
  367. ComputeGraphPtr merged_graph = nullptr;
  368. ComputeGraphPtr sub_sub_graph1 = std::make_shared<ComputeGraph>("while_cond");
  369. OpDescPtr sub_sub_graph_while_cond_data_op_desc = CreateOpDesc("cond_data", DATA);
  370. NodePtr sub_sub_graph_while_cond_data_node = sub_sub_graph1->AddNode(sub_sub_graph_while_cond_data_op_desc);
  371. ComputeGraphPtr sub_sub_graph2 = std::make_shared<ComputeGraph>("while_body");
  372. /*OpDescPtr sub_sub_graph_while_body_const_op_desc = CreateOpDesc("body_const", CONSTANT);
  373. NodePtr sub_sub_graph_while_body_const_node = sub_sub_graph2->AddNode(sub_sub_graph_while_body_const_op_desc);*/
  374. OpDescPtr sub_sub_graph_while_body_data_op_desc = CreateOpDesc("body_data", DATA);
  375. NodePtr sub_sub_graph_while_body_data_node = sub_sub_graph2->AddNode(sub_sub_graph_while_body_data_op_desc);
  376. sub_sub_graph2->SetGraphUnknownFlag(true);
  377. /*OpDescPtr sub_sub_graph_while_body_add_op_desc = CreateOpDesc("body_add", ADD);
  378. NodePtr sub_sub_graph_while_body_add_node = sub_sub_graph2->AddNode(sub_sub_graph_while_body_add_node);
  379. sub_sub_graph_while_body_add_node->AddLinkFrom(sub_sub_graph_while_body_data_node);
  380. sub_sub_graph_while_body_add_node->AddLinkFrom(sub_sub_graph_while_body_const_node);*/
  381. ComputeGraphPtr sub_graph = std::make_shared<ComputeGraph>("sub_graph");
  382. OpDescPtr sub_graph_while_op_desc = CreateOpDesc("while", WHILE);
  383. NodePtr sub_graph_while_node = sub_graph->AddNode(sub_graph_while_op_desc);
  384. sub_graph->SetGraphUnknownFlag(true);
  385. sub_graph_while_node->GetOpDesc()->AddSubgraphName("while_cond");
  386. sub_graph_while_node->GetOpDesc()->AddSubgraphName("while_body");
  387. sub_graph_while_node->GetOpDesc()->SetSubgraphInstanceName(0, "while_cond");
  388. sub_graph_while_node->GetOpDesc()->SetSubgraphInstanceName(1, "while_body");
  389. ComputeGraphPtr root_graph = std::make_shared<ComputeGraph>("root_graph");
  390. auto partitioned_call_op_desc = MakeShared<OpDesc>("partitioned_call", PARTITIONEDCALL);
  391. auto partitioned_call_node = root_graph->AddNode(partitioned_call_op_desc);
  392. partitioned_call_node->GetOpDesc()->AddSubgraphName("sub_graph");
  393. partitioned_call_node->GetOpDesc()->SetSubgraphInstanceName(0, "sub_graph");
  394. root_graph->AddSubGraph(sub_sub_graph1);
  395. root_graph->AddSubGraph(sub_sub_graph2);
  396. sub_sub_graph1->SetParentGraph(root_graph);
  397. sub_sub_graph2->SetParentGraph(root_graph);
  398. sub_sub_graph1->SetParentNode(sub_graph_while_node);
  399. sub_sub_graph2->SetParentNode(sub_graph_while_node);
  400. root_graph->AddSubGraph(sub_graph);
  401. sub_graph->SetParentNode(partitioned_call_node);
  402. sub_graph->SetParentGraph(root_graph);
  403. GeRootModelPtr root_model = MakeShared<ge::GeRootModel>(root_graph);
  404. HybridModel hybrid_model(root_model);
  405. HybridModelBuilder hybrid_model_builder(hybrid_model);
  406. EXPECT_EQ(hybrid_model_builder.UnfoldSubgraphs(root_graph, merged_graph), SUCCESS);
  407. }
  408. TEST_F(UtestGeHybrid, TestTaskContext) {
  409. auto graph = make_shared<ComputeGraph>("graph");
  410. OpDescPtr op_desc = CreateOpDesc("Add", "Add");
  411. GeShape shape({2, 16});
  412. GeTensorDesc tensor_desc(shape);
  413. op_desc->AddInputDesc(tensor_desc);
  414. op_desc->AddInputDesc(tensor_desc);
  415. op_desc->AddOutputDesc(tensor_desc);
  416. auto node = graph->AddNode(op_desc);
  417. std::unique_ptr<NodeItem> node_item;
  418. NodeItem::Create(node, node_item);
  419. node_item->input_start = 0;
  420. node_item->output_start = 0;
  421. GraphExecutionContext execution_context;
  422. GraphItem graph_item;
  423. SubgraphContext subgraph_context(&graph_item, &execution_context);
  424. ASSERT_EQ(subgraph_context.Init(), SUCCESS);
  425. subgraph_context.all_inputs_.resize(2);
  426. subgraph_context.all_outputs_.resize(1);
  427. auto node_state = subgraph_context.GetOrCreateNodeState(node_item.get());
  428. auto task_context = node_state->GetTaskContext();
  429. ASSERT_TRUE(task_context != nullptr);
  430. auto desc = task_context->MutableInputDesc(2);
  431. ASSERT_TRUE(desc == nullptr);
  432. desc = task_context->MutableOutputDesc(0);
  433. ASSERT_TRUE(desc != nullptr);
  434. ASSERT_EQ(desc->GetShape().GetDims(), shape.GetDims());
  435. GeTensorDesc output_desc;
  436. ASSERT_EQ(task_context->GetOutputDesc(0, output_desc), SUCCESS);
  437. ASSERT_EQ(output_desc.GetShape().GetDims(), shape.GetDims());
  438. desc = task_context->MutableInputDesc(0);
  439. ASSERT_TRUE(desc != nullptr);
  440. ASSERT_EQ(desc->GetShape().GetDims(), shape.GetDims());
  441. GeShape new_shape({8, 2});
  442. tensor_desc.SetShape(new_shape);
  443. task_context->UpdateInputDesc(1, tensor_desc);
  444. GeTensorDesc new_desc;
  445. ASSERT_EQ(task_context->GetInputDesc(1, new_desc), SUCCESS);
  446. ASSERT_EQ(new_desc.GetShape().GetDims(), new_shape.GetDims());
  447. }
  448. TEST_F(UtestGeHybrid, hybrid_model_executor_update_args) {
  449. auto aicore_task = std::unique_ptr<hybrid::AiCoreOpTask>(new(std::nothrow)hybrid::AiCoreOpTask());
  450. auto graph = make_shared<ComputeGraph>("graph");
  451. OpDescPtr op_desc = CreateOpDesc("Add", "Add");
  452. GeShape shape({2, 16});
  453. GeTensorDesc tensor_desc(shape);
  454. op_desc->AddInputDesc(tensor_desc);
  455. op_desc->AddInputDesc(tensor_desc);
  456. op_desc->AddOutputDesc(tensor_desc);
  457. auto node = graph->AddNode(op_desc);
  458. std::unique_ptr<NodeItem> node_item;
  459. NodeItem::Create(node, node_item);
  460. node_item->input_start = 0;
  461. node_item->output_start = 0;
  462. GraphExecutionContext execution_context;
  463. GraphItem graph_item;
  464. SubgraphContext subgraph_context(&graph_item, &execution_context);
  465. ASSERT_EQ(subgraph_context.Init(), SUCCESS);
  466. subgraph_context.all_inputs_.resize(2);
  467. subgraph_context.all_outputs_.resize(1);
  468. auto node_state = subgraph_context.GetOrCreateNodeState(node_item.get());
  469. auto task_context = node_state->GetTaskContext();
  470. int32_t buffer[1];
  471. aicore_task->tiling_buffer_ = TensorBuffer::Create(buffer, sizeof(buffer));
  472. EXPECT_NE(aicore_task->tiling_buffer_, nullptr);
  473. aicore_task->max_arg_count_ = 0;
  474. EXPECT_EQ(aicore_task->UpdateArgs(*task_context), ACL_ERROR_GE_MEMORY_OPERATE_FAILED);
  475. aicore_task->args_ = std::unique_ptr<uint8_t[]>(new uint8_t[sizeof(uintptr_t) * 2]);
  476. EXPECT_EQ(aicore_task->UpdateArgs(*task_context), SUCCESS);
  477. }
  478. TEST_F(UtestGeHybrid, hybrid_model_executor_check_shape) {
  479. HybridModelExecutor::ExecuteArgs args;
  480. GeTensorDescPtr ge_tensor = make_shared<GeTensorDesc>(GeTensorDesc());
  481. vector<int64_t> dim = {2 , 3};
  482. ge_tensor->SetShape(GeShape(dim));
  483. args.input_desc.push_back(ge_tensor);
  484. // create node
  485. ge::ComputeGraphPtr graph = std::make_shared<ComputeGraph>("God");
  486. OpDescPtr op_desc = std::make_shared<OpDesc>("data", DATA);
  487. GeTensorDesc tensor_desc(GeShape({2, 3}));
  488. std::vector<std::pair<int64_t, int64_t>> shape_range({std::pair<int64_t, int64_t>(1, 3),
  489. std::pair<int64_t, int64_t>(2, 4)});
  490. tensor_desc.SetShapeRange(shape_range);
  491. op_desc->AddInputDesc(tensor_desc);
  492. op_desc->AddOutputDesc(tensor_desc);
  493. NodePtr node = graph->AddNode(op_desc);
  494. std::unique_ptr<NodeItem> new_node;
  495. NodeItem::Create(node, new_node);
  496. new_node->is_dynamic = true;
  497. GraphItem graph_item;
  498. graph_item.input_nodes_.emplace_back(new_node.get());
  499. Status ret = HybridModelExecutor::CheckInputShapeByShapeRange(&graph_item, args);
  500. ASSERT_EQ(ret, ge::SUCCESS);
  501. HybridModelExecutor::ExecuteArgs args1;
  502. ret = HybridModelExecutor::CheckInputShapeByShapeRange(&graph_item, args1);
  503. ASSERT_EQ(ret, ge::INTERNAL_ERROR);
  504. HybridModelExecutor::ExecuteArgs args2;
  505. GeTensorDescPtr ge_tensor2 = make_shared<GeTensorDesc>(GeTensorDesc());
  506. vector<int64_t> dim2 = {-1 , 3};
  507. ge_tensor2->SetShape(GeShape(dim2));
  508. args2.input_desc.push_back(ge_tensor2);
  509. ret = HybridModelExecutor::CheckInputShapeByShapeRange(&graph_item, args1);
  510. ASSERT_EQ(ret, ge::INTERNAL_ERROR);
  511. HybridModelExecutor::ExecuteArgs args3;
  512. ret = HybridModelExecutor::CheckInputShapeByShapeRange(&graph_item, args3);
  513. ASSERT_EQ(ret, ge::INTERNAL_ERROR);
  514. }
  515. TEST_F(UtestGeHybrid, TestOptimizeDependenciesForConstInputs) {
  516. ComputeGraphPtr compute_graph = MakeShared<ComputeGraph>("test");
  517. GeRootModelPtr root_model = MakeShared<ge::GeRootModel>(compute_graph);
  518. HybridModel model(root_model);
  519. model.root_graph_ = compute_graph;
  520. HybridModelBuilder builder(model);
  521. GeShape shape({2, 16});
  522. GeTensorDesc tensor_desc(shape);
  523. std::unique_ptr<NodeItem> const_node_item;
  524. {
  525. OpDescPtr const_op_desc = CreateOpDesc("Constant", "Const");
  526. const_op_desc->AddOutputDesc(tensor_desc);
  527. auto const_node = compute_graph->AddNode(const_op_desc);
  528. NodeItem::Create(const_node, const_node_item);
  529. }
  530. std::unique_ptr<NodeItem> non_const_node_item;
  531. {
  532. OpDescPtr op_desc = CreateOpDesc("Add", "Add");
  533. op_desc->AddOutputDesc(tensor_desc);
  534. auto const_node = compute_graph->AddNode(op_desc);
  535. NodeItem::Create(const_node, non_const_node_item);
  536. }
  537. std::unique_ptr<NodeItem> known_node_item;
  538. {
  539. OpDescPtr known_op_desc = CreateOpDesc("known", "PartitionedCall");
  540. known_op_desc->AddOutputDesc(tensor_desc);
  541. known_op_desc->AddOutputDesc(tensor_desc);
  542. auto known_node = compute_graph->AddNode(known_op_desc);
  543. NodeItem::Create(known_node, known_node_item);
  544. }
  545. std::unique_ptr<NodeItem> dst_node_item;
  546. {
  547. OpDescPtr known_op_desc = CreateOpDesc("SomeOp", "SomeOpType ");
  548. known_op_desc->AddOutputDesc(tensor_desc);
  549. known_op_desc->AddOutputDesc(tensor_desc);
  550. auto known_node = compute_graph->AddNode(known_op_desc);
  551. NodeItem::Create(known_node, dst_node_item);
  552. }
  553. float buffer[2 * 16];
  554. unique_ptr<TensorValue> tensor_value(new TensorValue(buffer, sizeof(buffer)));
  555. model.constant_tensors_[const_node_item->node] = std::move(tensor_value);
  556. // Case 1. connect to Const
  557. auto output_id = 1;
  558. builder.host_input_value_dependencies_[dst_node_item.get()].emplace_back(output_id, const_node_item.get());
  559. builder.host_input_value_dependencies_[dst_node_item.get()].emplace_back(0, non_const_node_item.get());
  560. dst_node_item->dependents_for_shape_inference.emplace_back(const_node_item->node);
  561. dst_node_item->dependents_for_shape_inference.emplace_back(non_const_node_item->node);
  562. ASSERT_EQ(builder.OptimizeDependenciesForConstantInputs(), SUCCESS);
  563. ASSERT_EQ(dst_node_item->dependents_for_shape_inference.size(), 1);
  564. ASSERT_EQ(dst_node_item->dependents_for_shape_inference[0], non_const_node_item->node);
  565. // Case 2. connect to known-subgraph, netoutput connect to Const
  566. builder.host_input_value_dependencies_.clear();
  567. dst_node_item->dependents_for_shape_inference.clear();
  568. builder.known_subgraph_constant_output_refs_[known_node_item.get()].emplace(output_id, const_node_item->node);
  569. builder.host_input_value_dependencies_[dst_node_item.get()].emplace_back(output_id, known_node_item.get());
  570. builder.host_input_value_dependencies_[dst_node_item.get()].emplace_back(0, non_const_node_item.get());
  571. dst_node_item->dependents_for_shape_inference.emplace_back(known_node_item->node);
  572. dst_node_item->dependents_for_shape_inference.emplace_back(non_const_node_item->node);
  573. ASSERT_EQ(builder.OptimizeDependenciesForConstantInputs(), SUCCESS);
  574. ASSERT_EQ(dst_node_item->dependents_for_shape_inference.size(), 1);
  575. ASSERT_EQ(dst_node_item->dependents_for_shape_inference[0], non_const_node_item->node);
  576. }
  577. TEST_F(UtestGeHybrid, test_key_for_kernel_bin) {
  578. auto aicore_task = std::unique_ptr<hybrid::AiCoreOpTask>(new(std::nothrow)hybrid::AiCoreOpTask());
  579. OpDesc op_desc("Sum", "Sum");
  580. EXPECT_EQ(aicore_task->GetKeyForTbeKernel(), OP_EXTATTR_NAME_TBE_KERNEL);
  581. EXPECT_EQ(aicore_task->GetKeyForTvmMagic(), TVM_ATTR_NAME_MAGIC);
  582. EXPECT_EQ(aicore_task->GetKeyForTvmMetaData(), TVM_ATTR_NAME_METADATA);
  583. EXPECT_EQ(aicore_task->GetKeyForKernelName(op_desc), "Sum_kernelname");
  584. auto atomic_task = std::unique_ptr<hybrid::AtomicAddrCleanOpTask>(new(std::nothrow)hybrid::AtomicAddrCleanOpTask());
  585. EXPECT_EQ(atomic_task->GetKeyForTbeKernel(), EXT_ATTR_ATOMIC_TBE_KERNEL);
  586. EXPECT_EQ(atomic_task->GetKeyForTvmMagic(), ATOMIC_ATTR_TVM_MAGIC);
  587. EXPECT_EQ(atomic_task->GetKeyForTvmMetaData(), ATOMIC_ATTR_TVM_METADATA);
  588. EXPECT_EQ(atomic_task->GetKeyForKernelName(op_desc), "Sum_atomic_kernelname");
  589. }
  590. TEST_F(UtestGeHybrid, TestParseDependentInputNodesForHccl) {
  591. NodeExecutorManager::GetInstance().engine_mapping_.emplace("ops_kernel_info_hccl",
  592. NodeExecutorManager::ExecutorType::HCCL);
  593. ComputeGraphPtr compute_graph = MakeShared<ComputeGraph>("test");
  594. OpDescPtr op_desc = CreateOpDesc("Add", "Add");
  595. auto node = compute_graph->AddNode(op_desc);
  596. std::unique_ptr<NodeItem> node_item;
  597. NodeItem::Create(node, node_item);
  598. node_item->node_id = 0;
  599. OpDescPtr op_desc_1 = CreateOpDesc("AllReduce", "AllReduce");
  600. op_desc_1->SetOpKernelLibName("ops_kernel_info_hccl");
  601. auto node_1 = compute_graph->AddNode(op_desc_1);
  602. std::unique_ptr<NodeItem> node_item_1;
  603. NodeItem::Create(node_1, node_item_1);
  604. node_item_1->node_id = 1;
  605. node->GetOutControlAnchor()->LinkTo(node_1->GetInControlAnchor());
  606. OpDescPtr op_desc_2 = CreateOpDesc("net_output", NETOUTPUT);
  607. auto node_2 = compute_graph->AddNode(op_desc_2);
  608. std::unique_ptr<NodeItem> node_item_2;
  609. NodeItem::Create(node_2, node_item_2);
  610. node_item_2->node_id = 2;
  611. node_1->GetOutControlAnchor()->LinkTo(node_2->GetInControlAnchor());
  612. GeRootModelPtr root_model = MakeShared<ge::GeRootModel>(compute_graph);
  613. HybridModel model(root_model);
  614. model.root_graph_ = compute_graph;
  615. model.node_items_.emplace(node, std::move(node_item));
  616. model.node_items_.emplace(node_1, std::move(node_item_1));
  617. model.node_items_.emplace(node_2, std::move(node_item_2));
  618. HybridModelBuilder builder(model);
  619. std::vector<std::string> deps;
  620. ASSERT_EQ(builder.ParseDependentInputNodes(*model.node_items_[node_1], deps), SUCCESS);
  621. ASSERT_EQ(builder.ParseDependentInputNodes(*model.node_items_[node_2], deps), SUCCESS);
  622. ASSERT_FALSE(model.GetNodeItem(node)->has_observer);
  623. ASSERT_TRUE(model.GetNodeItem(node_1)->has_observer);
  624. ASSERT_EQ(model.node_items_[node_1]->dependents_for_execution.size(), 0);
  625. ASSERT_EQ(model.node_items_[node_2]->dependents_for_execution.size(), 1);
  626. }
  627. TEST_F(UtestGeHybrid, TestParseDependencies) {
  628. // make graph
  629. ut::GraphBuilder graph_builder = ut::GraphBuilder("graph");
  630. auto data = graph_builder.AddNode("Data", "Data", 0, 1);
  631. auto netoutput = graph_builder.AddNode("Netoutput", "NetOutput", 1, 0);
  632. graph_builder.AddDataEdge(data, 0, netoutput, 0);
  633. auto graph = graph_builder.GetGraph();
  634. GeRootModelPtr root_model = MakeShared<ge::GeRootModel>(graph);
  635. HybridModel model(root_model);
  636. HybridModelBuilder builder(model);
  637. std::unique_ptr<NodeItem> node_item;
  638. NodeItem::Create(netoutput, node_item);
  639. std::unique_ptr<NodeItem> node_item2;
  640. NodeItem::Create(data, node_item2);
  641. model.node_items_.emplace(data, std::move(node_item2));
  642. std::vector<std::string> deps;
  643. deps.push_back("Data");
  644. auto op_desc = netoutput->GetOpDesc();
  645. op_desc->impl_->input_name_idx_["Data"] = 0;
  646. auto data_desc = data->GetOpDesc();
  647. auto tensor = std::make_shared<GeTensor>();
  648. auto tensor_desc = data_desc->MutableInputDesc(0);
  649. AttrUtils::SetTensor(tensor_desc, "_value", tensor);
  650. std::set<NodePtr> dependent_for_shape_inference;
  651. ASSERT_EQ(builder.ParseDependencies(*node_item, deps, dependent_for_shape_inference), SUCCESS);
  652. }
  653. TEST_F(UtestGeHybrid, TestTaskExecuteAsync) {
  654. auto graph = make_shared<ComputeGraph>("graph");
  655. OpDescPtr op_desc = CreateOpDesc("Add", "Add");
  656. GeShape shape({2, 16});
  657. GeTensorDesc tensor_desc(shape);
  658. op_desc->AddInputDesc(tensor_desc);
  659. op_desc->AddInputDesc(tensor_desc);
  660. op_desc->AddOutputDesc(tensor_desc);
  661. auto node = graph->AddNode(op_desc);
  662. std::unique_ptr<NodeItem> node_item;
  663. NodeItem::Create(node, node_item);
  664. node_item->input_start = 0;
  665. node_item->output_start = 0;
  666. GraphExecutionContext execution_context;
  667. SubgraphContext subgraph_context(nullptr, &execution_context);
  668. subgraph_context.all_inputs_.resize(2);
  669. subgraph_context.all_outputs_.resize(1);
  670. NodeState node_state(*node_item, &subgraph_context);
  671. auto task_context = TaskContext::Create(&node_state, &execution_context, &subgraph_context);
  672. ASSERT_NE(BuildTaskUtils::GetTaskInfo(*task_context.get()), "");
  673. std::unique_ptr<AiCoreOpTask> task1(new AiCoreOpTask());
  674. std::vector<std::unique_ptr<AiCoreOpTask>> tasks;
  675. AiCoreNodeTask node_task(std::move(tasks));
  676. ASSERT_EQ(node_task.ExecuteAsync(*(task_context.get()), nullptr), SUCCESS);
  677. }

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示