You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ge_hybrid_unittest.cc 10 kB

4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /**
  2. * Copyright 2019-2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <gtest/gtest.h>
  17. #include <gmock/gmock.h>
  18. #include <vector>
  19. #include "runtime/rt.h"
  20. #define protected public
  21. #define private public
  22. #include "hybrid/model/hybrid_model_builder.h"
  23. #include "hybrid/model/hybrid_model.h"
  24. #include "model/ge_model.h"
  25. #include "model/ge_root_model.h"
  26. #include "hybrid/node_executor/aicore/aicore_op_task.h"
  27. #include "framework/common/taskdown_common.h"
  28. #include "framework/common/debug/log.h"
  29. #include "graph/ge_context.h"
  30. #include "hybrid/executor/hybrid_execution_context.h"
  31. #include "hybrid/node_executor/aicore/aicore_task_builder.h"
  32. #include "graph/load/model_manager/tbe_handle_store.h"
  33. #include "graph/manager/graph_mem_allocator.h"
  34. #include "hybrid/common/npu_memory_allocator.h"
  35. #include "graph/types.h"
  36. #include "graph/utils/tensor_utils.h"
  37. #undef private
  38. #undef protected
  39. using namespace std;
  40. using namespace testing;
  41. using namespace ge;
  42. using namespace hybrid;
  43. class UtestGeHybrid : public testing::Test {
  44. protected:
  45. void SetUp() {}
  46. void TearDown() {}
  47. };
  48. static ge::OpDescPtr CreateOpDesc(string name = "", string type = "") {
  49. auto op_desc = std::make_shared<ge::OpDesc>(name, type);
  50. op_desc->SetStreamId(0);
  51. op_desc->SetId(0);
  52. op_desc->SetWorkspace({});
  53. ;
  54. op_desc->SetWorkspaceBytes({});
  55. op_desc->SetInputOffset({});
  56. op_desc->SetOutputOffset({});
  57. ge::AttrUtils::SetStr(op_desc, ge::TVM_ATTR_NAME_MAGIC, "RT_DEV_BINARY_MAGIC_ELF_AIVEC");
  58. bool support_dynamic = true;
  59. ge::AttrUtils::GetBool(op_desc, "support_dynamicshape", support_dynamic);
  60. return op_desc;
  61. }
  62. TEST_F(UtestGeHybrid, aicore_op_task_init_success) {
  63. // build aicore task
  64. auto aicore_task = std::unique_ptr<hybrid::AiCoreOpTask>(new(std::nothrow)hybrid::AiCoreOpTask());
  65. domi::TaskDef task_def;
  66. task_def.set_type(RT_MODEL_TASK_ALL_KERNEL);
  67. domi::KernelDefWithHandle *kernel_with_handle = task_def.mutable_kernel_with_handle();
  68. kernel_with_handle->set_original_kernel_key("");
  69. kernel_with_handle->set_node_info("");
  70. kernel_with_handle->set_block_dim(32);
  71. kernel_with_handle->set_args_size(64);
  72. string args(64, '1');
  73. kernel_with_handle->set_args(args.data(), 64);
  74. domi::KernelContext *context = kernel_with_handle->mutable_context();
  75. context->set_op_index(1);
  76. context->set_kernel_type(2); // ccKernelType::TE
  77. uint16_t args_offset[9] = {0};
  78. context->set_args_offset(args_offset, 9 * sizeof(uint16_t));
  79. OpDescPtr op_desc = CreateOpDesc("Add", "Add");
  80. std::vector<char> kernelBin;
  81. TBEKernelPtr tbe_kernel = std::make_shared<ge::OpKernelBin>("name/Add", std::move(kernelBin));
  82. op_desc->SetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel);
  83. std::string kernel_name("kernel/Add");
  84. AttrUtils::SetStr(op_desc, op_desc->GetName() + "_kernelname", kernel_name);
  85. ASSERT_EQ(aicore_task->InitWithTaskDef(*op_desc.get(), task_def), SUCCESS);
  86. rtStream_t stream = nullptr;
  87. rtStreamCreate(&stream, 0);
  88. ASSERT_EQ(aicore_task->LaunchKernel(stream), SUCCESS);
  89. char *handle = "";
  90. aicore_task->handle_ = handle;
  91. aicore_task->tiling_key_ = 1;
  92. ASSERT_EQ(aicore_task->LaunchKernel(stream), SUCCESS);
  93. }
  94. TEST_F(UtestGeHybrid, task_update_tiling_info) {
  95. auto aicore_task = std::unique_ptr<hybrid::AiCoreOpTask>(new(std::nothrow)hybrid::AiCoreOpTask());
  96. aicore_task->is_single_op_ = true;
  97. auto graph = make_shared<ComputeGraph>("graph");
  98. OpDescPtr op_desc = CreateOpDesc("Add", "Add");
  99. ge::AttrUtils::SetStr(op_desc, "compile_info_key", "key");
  100. ge::AttrUtils::SetStr(op_desc, "compile_info_json", "json");
  101. auto node = graph->AddNode(op_desc);
  102. optiling::OpRunInfo tiling_info;
  103. ASSERT_EQ(aicore_task->CalcTilingInfo(node, tiling_info), SUCCESS);
  104. }
  105. TEST_F(UtestGeHybrid, index_taskdefs_failed) {
  106. // build aicore task
  107. domi::ModelTaskDef model_task_def;
  108. std::shared_ptr<domi::ModelTaskDef> model_task_def_ptr = make_shared<domi::ModelTaskDef>(model_task_def);
  109. domi::TaskDef *task_def = model_task_def_ptr->add_task();
  110. GeModelPtr ge_model = make_shared<GeModel>();
  111. ge_model->SetModelTaskDef(model_task_def_ptr);
  112. auto aicore_task = std::unique_ptr<hybrid::AiCoreOpTask>(new(std::nothrow)hybrid::AiCoreOpTask());
  113. task_def->set_type(RT_MODEL_TASK_ALL_KERNEL);
  114. domi::KernelDefWithHandle *kernel_with_handle = task_def->mutable_kernel_with_handle();
  115. kernel_with_handle->set_original_kernel_key("");
  116. kernel_with_handle->set_node_info("");
  117. kernel_with_handle->set_block_dim(32);
  118. kernel_with_handle->set_args_size(64);
  119. string args(64, '1');
  120. kernel_with_handle->set_args(args.data(), 64);
  121. domi::KernelContext *context = kernel_with_handle->mutable_context();
  122. context->set_op_index(1);
  123. context->set_kernel_type(2); // ccKernelType::TE
  124. uint16_t args_offset[9] = {0};
  125. context->set_args_offset(args_offset, 9 * sizeof(uint16_t));
  126. OpDescPtr op_desc = CreateOpDesc("Add", "Add");
  127. std::vector<char> kernelBin;
  128. TBEKernelPtr tbe_kernel = std::make_shared<ge::OpKernelBin>("name/Add", std::move(kernelBin));
  129. op_desc->SetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel);
  130. std::string kernel_name("kernel/Add");
  131. AttrUtils::SetStr(op_desc, op_desc->GetName() + "_kernelname", kernel_name);
  132. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
  133. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
  134. HybridModel hybrid_model(ge_root_model);
  135. HybridModelBuilder hybrid_model_builder(hybrid_model);
  136. ASSERT_EQ(hybrid_model_builder.IndexTaskDefs(graph, ge_model), INTERNAL_ERROR);
  137. }
  138. TEST_F(UtestGeHybrid, parse_force_infershape_nodes) {
  139. const char *const kForceInfershape = "_force_infershape_when_running";
  140. auto graph = make_shared<ComputeGraph>("graph");
  141. OpDescPtr op_desc = CreateOpDesc("Conv2D", "Conv2D");
  142. ge::AttrUtils::SetBool(op_desc, kForceInfershape, true);
  143. auto node = graph->AddNode(op_desc);
  144. std::unique_ptr<NodeItem> new_node;
  145. NodeItem::Create(node, new_node);
  146. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
  147. HybridModel hybrid_model(ge_root_model);
  148. HybridModelBuilder hybrid_model_builder(hybrid_model);
  149. ASSERT_EQ(hybrid_model_builder.ParseForceInfershapeNodes(node, *new_node), SUCCESS);
  150. }
  151. TEST_F(UtestGeHybrid, index_taskdefs_success) {
  152. // build aicore task
  153. domi::ModelTaskDef model_task_def;
  154. std::shared_ptr<domi::ModelTaskDef> model_task_def_ptr = make_shared<domi::ModelTaskDef>(model_task_def);
  155. domi::TaskDef *task_def = model_task_def_ptr->add_task();
  156. GeModelPtr ge_model = make_shared<GeModel>();
  157. ge_model->SetModelTaskDef(model_task_def_ptr);
  158. auto aicore_task = std::unique_ptr<hybrid::AiCoreOpTask>(new(std::nothrow)hybrid::AiCoreOpTask());
  159. task_def->set_type(RT_MODEL_TASK_ALL_KERNEL);
  160. domi::KernelDefWithHandle *kernel_with_handle = task_def->mutable_kernel_with_handle();
  161. kernel_with_handle->set_original_kernel_key("");
  162. kernel_with_handle->set_node_info("");
  163. kernel_with_handle->set_block_dim(32);
  164. kernel_with_handle->set_args_size(64);
  165. string args(64, '1');
  166. kernel_with_handle->set_args(args.data(), 64);
  167. domi::KernelContext *context = kernel_with_handle->mutable_context();
  168. context->set_op_index(0);
  169. context->set_kernel_type(2); // ccKernelType::TE
  170. uint16_t args_offset[9] = {0};
  171. context->set_args_offset(args_offset, 9 * sizeof(uint16_t));
  172. OpDescPtr op_desc = CreateOpDesc("Add", "Add");
  173. std::vector<char> kernelBin;
  174. TBEKernelPtr tbe_kernel = std::make_shared<ge::OpKernelBin>("name/Add", std::move(kernelBin));
  175. op_desc->SetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel);
  176. std::string kernel_name("kernel/Add");
  177. AttrUtils::SetStr(op_desc, op_desc->GetName() + "_kernelname", kernel_name);
  178. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
  179. NodePtr node = graph->AddNode(op_desc);
  180. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
  181. HybridModel hybrid_model(ge_root_model);
  182. HybridModelBuilder hybrid_model_builder(hybrid_model);
  183. ASSERT_EQ(hybrid_model_builder.IndexTaskDefs(graph, ge_model), SUCCESS);
  184. }
  185. TEST_F(UtestGeHybrid, init_weight_success) {
  186. NpuMemoryAllocator::allocators_.emplace(make_pair(0, nullptr));
  187. // make graph with sub_graph
  188. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("root_graph");
  189. OpDescPtr op_desc = CreateOpDesc("if", IF);
  190. NodePtr node = graph->AddNode(op_desc);
  191. // make sub graph
  192. ComputeGraphPtr sub_graph = std::make_shared<ComputeGraph>("if_sub_graph");
  193. OpDescPtr const_op_desc = CreateOpDesc("const", CONSTANT);
  194. vector<int64_t> dims_vec_0 = {2, 1, 4, 1, 2};
  195. vector<int32_t> data_vec_0 = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
  196. GeTensorDesc tensor_desc_0(GeShape(dims_vec_0), FORMAT_NCHW, DT_INT32);
  197. (void)TensorUtils::SetRealDimCnt(tensor_desc_0, dims_vec_0.size());
  198. ConstGeTensorPtr constTensor_0 =
  199. std::make_shared<GeTensor>(tensor_desc_0, (uint8_t *)&data_vec_0[0], data_vec_0.size() * sizeof(int32_t));
  200. AttrUtils::SetTensor(const_op_desc, ge::ATTR_NAME_WEIGHTS, constTensor_0);
  201. const_op_desc->AddOutputDesc(tensor_desc_0);
  202. NodePtr const_node = sub_graph->AddNode(const_op_desc);
  203. graph->AddSubgraph("sub", sub_graph);
  204. GeRootModelPtr ge_root_model = make_shared<GeRootModel>(graph);
  205. GeModelPtr ge_sub_model = make_shared<GeModel>();
  206. //Buffer weight_buffer = Buffer(128,0);
  207. //ge_sub_model->SetWeight(weight_buffer);
  208. ge_root_model->SetSubgraphInstanceNameToModel("sub",ge_sub_model);
  209. HybridModel hybrid_model(ge_root_model);
  210. HybridModelBuilder hybrid_model_builder(hybrid_model);
  211. auto ret = hybrid_model_builder.InitWeights();
  212. ASSERT_EQ(ret,SUCCESS);
  213. Buffer weight_buffer = Buffer(128,0);
  214. ge_sub_model->SetWeight(weight_buffer);
  215. ret = hybrid_model_builder.InitWeights();
  216. ASSERT_EQ(ret,PARAM_INVALID);
  217. }

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示