You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

infershape_pass_unittest.cc 10 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <gtest/gtest.h>
  17. #define protected public
  18. #define private public
  19. #include "graph/passes/infershape_pass.h"
  20. #include "graph/utils/tensor_utils.h"
  21. #include "graph/utils/graph_utils.h"
  22. #include "graph/operator_factory.h"
  23. #include "graph/operator_reg.h"
  24. #include "graph_builder_utils.h"
  25. using namespace std;
  26. using namespace testing;
  27. namespace ge {
  28. namespace {
  29. // do nothing stub infer_func
  30. const auto stub_func = [](Operator &op) { return GRAPH_SUCCESS; };
  31. // infer from input to output stub infer_func (input size == output size)
  32. const auto stub_mapping_func = [](Operator &op) {
  33. size_t in_num = op.GetInputsSize();
  34. for (size_t i = 0; i < in_num; ++i) {
  35. auto in_desc = op.GetInputDesc(i);
  36. auto out_desc = op.GetOutputDesc(i);
  37. out_desc.SetShape(in_desc.GetShape());
  38. out_desc.SetDataType(in_desc.GetDataType());
  39. op.UpdateOutputDesc(out_desc.GetName(), out_desc);
  40. }
  41. return GRAPH_SUCCESS;
  42. };
  43. // merge infer_func
  44. // while infer_func
  45. const auto while_infer_func = [](Operator &op) {
  46. size_t in_num = op.GetInputsSize();
  47. size_t out_num = op.GetOutputsSize();
  48. if (in_num != out_num) {
  49. return GRAPH_FAILED;
  50. }
  51. bool need_infer_again = false;
  52. for (size_t i = 0; i < in_num; ++i) {
  53. auto in_desc = op.GetDynamicInputDesc("input", i);
  54. auto out_desc = op.GetDynamicOutputDesc("output", i);
  55. auto data_shape = in_desc.GetShape();
  56. auto out_shape = out_desc.GetShape();
  57. if(out_shape.GetDims() == DUMMY_SHAPE){
  58. return GRAPH_SUCCESS;
  59. }
  60. // check datatype between output and input
  61. if (in_desc.GetDataType() != out_desc.GetDataType()) {
  62. return GRAPH_FAILED;
  63. }
  64. if (data_shape.GetDims() != out_shape.GetDims()) {
  65. need_infer_again = true;
  66. if (data_shape.GetDimNum() != out_shape.GetDimNum()) {
  67. in_desc.SetUnknownDimNumShape();
  68. } else {
  69. size_t data_dim_num = data_shape.GetDimNum();
  70. std::vector<std::pair<int64_t, int64_t>> data_shape_range = {data_dim_num, std::make_pair(1, UNKNOWN_DIM)};
  71. for (size_t j = 0; j < data_dim_num; ++j) {
  72. if (data_shape.GetDim(j) != out_shape.GetDim(j)) {
  73. data_shape.SetDim(j, UNKNOWN_DIM);
  74. }
  75. if (data_shape.GetDim(j) != UNKNOWN_DIM) {
  76. data_shape_range[j] = std::make_pair(data_shape.GetDim(j), data_shape.GetDim(j));
  77. }
  78. }
  79. in_desc.SetShape(data_shape);
  80. in_desc.SetShapeRange(data_shape_range);
  81. }
  82. op.UpdateDynamicOutputDesc("output", i, in_desc);
  83. op.UpdateDynamicInputDesc("input", i, in_desc);
  84. }
  85. }
  86. return need_infer_again ? GRAPH_NODE_NEED_REPASS : GRAPH_SUCCESS;
  87. };
  88. }
  89. class UtestGraphInfershapePass : public testing::Test {
  90. protected:
  91. void SetUp() {}
  92. void TearDown() {}
  93. };
  94. static NodePtr CreateNode(ComputeGraph &graph, const string &name, const string &type, int in_num, int out_num,
  95. std::function<graphStatus(Operator &)> infer_func = stub_func) {
  96. OpDescPtr op_desc = std::make_shared<OpDesc>(name, type);
  97. op_desc->SetStreamId(0);
  98. static int32_t index = 0;
  99. op_desc->SetId(index++);
  100. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  101. TensorUtils::SetSize(tensor, 512);
  102. vector<int64_t> input_offset;
  103. for (int i = 0; i < in_num; i++) {
  104. op_desc->AddInputDesc(tensor);
  105. input_offset.emplace_back(1024);
  106. }
  107. op_desc->SetInputOffset(input_offset);
  108. vector<int64_t> output_offset;
  109. for (int i = 0; i < out_num; i++) {
  110. op_desc->AddOutputDesc(tensor);
  111. output_offset.emplace_back(1024);
  112. }
  113. op_desc->SetOutputOffset(output_offset);
  114. op_desc->SetWorkspace({});
  115. op_desc->SetWorkspaceBytes({});
  116. op_desc->SetOpKernelLibName("DNN_VM_RTS_OP_STORE");
  117. op_desc->AddInferFunc(infer_func);
  118. return graph.AddNode(op_desc);
  119. }
  120. /*
  121. TEST_F(UtestGraphInfershapePass, infershape_pass_failed) {
  122. GeTensorDesc ge_tensor_desc(GeShape({-2, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT16);
  123. string type = "AddN";
  124. auto addn_op_desc = std::make_shared<OpDesc>("AddN", type);
  125. addn_op_desc->AddInputDesc(ge_tensor_desc);
  126. addn_op_desc->AddOutputDesc(ge_tensor_desc);
  127. auto graph = std::make_shared<ComputeGraph>("test");
  128. auto addn_node = std::make_shared<Node>(addn_op_desc, graph);
  129. addn_node->Init();
  130. InferShapePass infershape_pass;
  131. EXPECT_EQ(infershape_pass.Run(addn_node), GE_GRAPH_INFERSHAPE_FAILED);
  132. }
  133. */
  134. TEST_F(UtestGraphInfershapePass, delete_need_infer_again) {
  135. auto graph = std::make_shared<ComputeGraph>("test");
  136. auto no_op_desc = std::make_shared<OpDesc>("No", "NoOp");
  137. auto no_op_node = graph->AddNode(no_op_desc);
  138. AttrUtils::SetBool(no_op_desc, "_need_infer_again", false);
  139. InferShapePass infershape_pass;
  140. infershape_pass.options_[kOptimizeAfterSubGraph] = "yes";
  141. EXPECT_EQ(infershape_pass.Run(no_op_node), SUCCESS);
  142. }
  143. TEST_F(UtestGraphInfershapePass, infer_from_pre_to_next) {
  144. /*
  145. * cast->shape
  146. */
  147. auto graph = std::make_shared<ComputeGraph>("test_infer_shape");
  148. auto data1 = CreateNode(*graph, "dataq", DATA, 0, 1);
  149. auto cast1 = CreateNode(*graph, "cast1", CAST, 1, 1, stub_mapping_func);
  150. auto cast_in_desc = cast1->GetOpDesc()->MutableInputDesc(0);
  151. cast_in_desc->SetShape(GeShape({1,2,3}));
  152. cast_in_desc->SetDataType(DT_INT32);
  153. auto transdata1 = CreateNode(*graph, "transdata1", TRANSDATA, 1, 1, stub_mapping_func);
  154. GraphUtils::AddEdge(data1->GetOutDataAnchor(0), cast1->GetInDataAnchor(0));
  155. GraphUtils::AddEdge(cast1->GetOutDataAnchor(0), transdata1->GetInDataAnchor(0));
  156. // check before infer cast1
  157. auto cast_before = graph->FindNode("cast1");
  158. vector<int64_t> expect_cast1_shape_dim = {1,2,3};
  159. auto real_cast1_before_shape_dim = cast_before->GetOpDesc()->GetInputDesc(0).GetShape().GetDims();
  160. auto transdata1_before = graph->FindNode("transdata1");
  161. vector<int64_t> expect_transdata1_shape_dim = {};
  162. auto real_transdata1_before_shape_dim = transdata1_before->GetOpDesc()->GetInputDesc(0).GetShape().GetDims();
  163. EXPECT_EQ(real_cast1_before_shape_dim, expect_cast1_shape_dim);
  164. EXPECT_EQ(real_transdata1_before_shape_dim, expect_transdata1_shape_dim);
  165. // run infershape pass
  166. InferShapePass infer_shape_pass;
  167. infer_shape_pass.Run(cast_before);
  168. // check cast1 add transdata1 to repass_immediately
  169. infer_shape_pass.GetNodesNeedRePassImmediately();
  170. EXPECT_TRUE(!infer_shape_pass.GetNodesNeedRePassImmediately().empty());
  171. // check transdata input_shape & datatype after infer
  172. auto transdata1_after = graph->FindNode("transdata1");
  173. auto transdata1_opdesc = transdata1_before->GetOpDesc();
  174. auto real_transdata1_after_shape_dim = transdata1_opdesc->GetInputDesc(0).GetShape().GetDims();
  175. EXPECT_EQ(real_transdata1_after_shape_dim, expect_cast1_shape_dim);
  176. auto transdata1_datatype_after = transdata1_opdesc->GetInputDesc(0).GetDataType();
  177. EXPECT_EQ(transdata1_datatype_after, DT_INT32);
  178. }
  179. TEST_F(UtestGraphInfershapePass, stop_node_for_while_loop) {
  180. /*******************************************************************************
  181. * Exit Identify
  182. * \ / \.
  183. * \ / \.
  184. * Switch Add
  185. * / | |
  186. * / | |
  187. * / | |
  188. * LoopCond | |
  189. * \ | |
  190. * \ | |
  191. * \ | |
  192. * Less | |
  193. * \ | NextIteration
  194. * \ | |
  195. * \ | |
  196. * Merge <---------|
  197. * |
  198. * |
  199. * Enter
  200. ******************************************************************************/
  201. auto graph = std::make_shared<ComputeGraph>("test_infer_shape");
  202. auto data1 = CreateNode(*graph, "data", DATA, 1, 1);
  203. auto enter1 = CreateNode(*graph, "enter", ENTER, 1, 1);
  204. auto merge1 = CreateNode(*graph, "merge", MERGE, 2, 2);
  205. auto less1 = CreateNode(*graph, "less", LESS, 2, 1);
  206. auto loop1 = CreateNode(*graph, "loopcond", LOOPCOND, 1, 1);
  207. auto switch1 = CreateNode(*graph, "switch", SWITCH, 2, 2);
  208. auto ident1 = CreateNode(*graph, "identity", IDENTITY, 1, 1);
  209. auto add1 = CreateNode(*graph, "add", ADD, 2, 1);
  210. auto next1 = CreateNode(*graph, "next", NEXTITERATION, 1, 1);
  211. auto exit1 = CreateNode(*graph, "exit", EXIT, 1, 1);
  212. auto value0 = CreateNode(*graph, "const", CONSTANT, 0, 1);
  213. auto value1 = CreateNode(*graph, "const", CONSTANT, 0, 1);
  214. auto output1 = CreateNode(*graph, "net_output", NETOUTPUT, 1, 1);
  215. GraphUtils::AddEdge(data1->GetOutDataAnchor(0), enter1->GetInDataAnchor(0));
  216. GraphUtils::AddEdge(enter1->GetOutDataAnchor(0), merge1->GetInDataAnchor(0));
  217. GraphUtils::AddEdge(merge1->GetOutDataAnchor(0), less1->GetInDataAnchor(0));
  218. GraphUtils::AddEdge(value1->GetOutDataAnchor(0), less1->GetInDataAnchor(1));
  219. GraphUtils::AddEdge(less1->GetOutDataAnchor(0), loop1->GetInDataAnchor(0));
  220. GraphUtils::AddEdge(loop1->GetOutDataAnchor(0), switch1->GetInDataAnchor(0));
  221. GraphUtils::AddEdge(merge1->GetOutDataAnchor(0), switch1->GetInDataAnchor(1));
  222. GraphUtils::AddEdge(switch1->GetOutDataAnchor(0), exit1->GetInDataAnchor(0));
  223. GraphUtils::AddEdge(switch1->GetOutDataAnchor(1), ident1->GetInDataAnchor(0));
  224. GraphUtils::AddEdge(ident1->GetOutDataAnchor(0), add1->GetInDataAnchor(0));
  225. GraphUtils::AddEdge(value1->GetOutDataAnchor(0), add1->GetInDataAnchor(1));
  226. GraphUtils::AddEdge(add1->GetOutDataAnchor(0), next1->GetInDataAnchor(0));
  227. GraphUtils::AddEdge(next1->GetOutDataAnchor(0), merge1->GetInDataAnchor(1));
  228. GraphUtils::AddEdge(exit1->GetOutDataAnchor(0), output1->GetInDataAnchor(0));
  229. GEPass ge_passes(graph);
  230. NamesToPass names_to_passes;
  231. InferShapePass infer_shape_pass;
  232. names_to_passes.emplace_back("InferShapePass", &infer_shape_pass);
  233. EXPECT_EQ(ge_passes.Run(names_to_passes), SUCCESS);
  234. }
  235. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示