You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

infershape_pass_unittest.cc 6.2 kB

5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <gtest/gtest.h>
  17. #define protected public
  18. #define private public
  19. #include "graph/passes/infershape_pass.h"
  20. #include "graph/utils/tensor_utils.h"
  21. #include "graph/utils/graph_utils.h"
  22. #include "graph/operator_factory.h"
  23. #include "graph/operator_reg.h"
  24. #include "graph_builder_utils.h"
  25. using namespace std;
  26. using namespace testing;
  27. namespace ge {
  28. class UtestGraphInfershapePass : public testing::Test {
  29. protected:
  30. void SetUp() {}
  31. void TearDown() {}
  32. };
  33. static NodePtr CreateNode(ComputeGraph &graph, const string &name, const string &type, int in_num, int out_num) {
  34. OpDescPtr op_desc = std::make_shared<OpDesc>(name, type);
  35. op_desc->SetStreamId(0);
  36. static int32_t index = 0;
  37. op_desc->SetId(index++);
  38. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  39. TensorUtils::SetSize(tensor, 512);
  40. vector<int64_t> input_offset;
  41. for (int i = 0; i < in_num; i++) {
  42. op_desc->AddInputDesc(tensor);
  43. input_offset.emplace_back(1024);
  44. }
  45. op_desc->SetInputOffset(input_offset);
  46. vector<int64_t> output_offset;
  47. for (int i = 0; i < out_num; i++) {
  48. op_desc->AddOutputDesc(tensor);
  49. output_offset.emplace_back(1024);
  50. }
  51. op_desc->SetOutputOffset(output_offset);
  52. op_desc->SetWorkspace({});
  53. op_desc->SetWorkspaceBytes({});
  54. op_desc->SetOpKernelLibName("DNN_VM_RTS_OP_STORE");
  55. const auto stub_func = [](Operator &op) { return GRAPH_SUCCESS; };
  56. op_desc->AddInferFunc(stub_func);
  57. op_desc->AddInferFormatFunc(stub_func);
  58. op_desc->AddVerifierFunc(stub_func);
  59. return graph.AddNode(op_desc);
  60. }
  61. TEST_F(UtestGraphInfershapePass, infershape_pass_failed) {
  62. GeTensorDesc ge_tensor_desc(GeShape({-2, 2, 3, 4}), ge::FORMAT_NCHW, DT_FLOAT16);
  63. string type = "AddN";
  64. auto addn_op_desc = std::make_shared<OpDesc>("AddN", type);
  65. addn_op_desc->AddInputDesc(ge_tensor_desc);
  66. addn_op_desc->AddOutputDesc(ge_tensor_desc);
  67. auto graph = std::make_shared<ComputeGraph>("test");
  68. auto addn_node = std::make_shared<Node>(addn_op_desc, graph);
  69. addn_node->Init();
  70. InferShapePass infershape_pass;
  71. EXPECT_EQ(infershape_pass.Run(addn_node), GE_GRAPH_INFERSHAPE_FAILED);
  72. }
  73. TEST_F(UtestGraphInfershapePass, delete_need_infer_again) {
  74. auto graph = std::make_shared<ComputeGraph>("test");
  75. auto no_op_desc = std::make_shared<OpDesc>("No", "NoOp");
  76. auto no_op_node = graph->AddNode(no_op_desc);
  77. AttrUtils::SetBool(no_op_desc, "_need_infer_again", false);
  78. InferShapePass infershape_pass;
  79. infershape_pass.options_[kOptimizeAfterSubGraph] = "yes";
  80. EXPECT_EQ(infershape_pass.Run(no_op_node), SUCCESS);
  81. }
  82. TEST_F(UtestGraphInfershapePass, stop_node_for_while_loop) {
  83. /*******************************************************************************
  84. * Exit Identify
  85. * \ / \.
  86. * \ / \.
  87. * Switch Add
  88. * / | |
  89. * / | |
  90. * / | |
  91. * LoopCond | |
  92. * \ | |
  93. * \ | |
  94. * \ | |
  95. * Less | |
  96. * \ | NextIteration
  97. * \ | |
  98. * \ | |
  99. * Merge <---------|
  100. * |
  101. * |
  102. * Enter
  103. ******************************************************************************/
  104. auto graph = std::make_shared<ComputeGraph>("test_infer_shape");
  105. auto data1 = CreateNode(*graph, "data", DATA, 1, 1);
  106. auto enter1 = CreateNode(*graph, "enter", ENTER, 1, 1);
  107. auto merge1 = CreateNode(*graph, "merge", MERGE, 2, 2);
  108. auto less1 = CreateNode(*graph, "less", LESS, 2, 1);
  109. auto loop1 = CreateNode(*graph, "loopcond", LOOPCOND, 1, 1);
  110. auto switch1 = CreateNode(*graph, "switch", SWITCH, 2, 2);
  111. auto ident1 = CreateNode(*graph, "identity", IDENTITY, 1, 1);
  112. auto add1 = CreateNode(*graph, "add", ADD, 2, 1);
  113. auto next1 = CreateNode(*graph, "next", NEXTITERATION, 1, 1);
  114. auto exit1 = CreateNode(*graph, "exit", EXIT, 1, 1);
  115. auto value0 = CreateNode(*graph, "const", CONSTANT, 0, 1);
  116. auto value1 = CreateNode(*graph, "const", CONSTANT, 0, 1);
  117. auto output1 = CreateNode(*graph, "net_output", NETOUTPUT, 1, 1);
  118. GraphUtils::AddEdge(data1->GetOutDataAnchor(0), enter1->GetInDataAnchor(0));
  119. GraphUtils::AddEdge(enter1->GetOutDataAnchor(0), merge1->GetInDataAnchor(0));
  120. GraphUtils::AddEdge(merge1->GetOutDataAnchor(0), less1->GetInDataAnchor(0));
  121. GraphUtils::AddEdge(value1->GetOutDataAnchor(0), less1->GetInDataAnchor(1));
  122. GraphUtils::AddEdge(less1->GetOutDataAnchor(0), loop1->GetInDataAnchor(0));
  123. GraphUtils::AddEdge(loop1->GetOutDataAnchor(0), switch1->GetInDataAnchor(0));
  124. GraphUtils::AddEdge(merge1->GetOutDataAnchor(0), switch1->GetInDataAnchor(1));
  125. GraphUtils::AddEdge(switch1->GetOutDataAnchor(0), exit1->GetInDataAnchor(0));
  126. GraphUtils::AddEdge(switch1->GetOutDataAnchor(1), ident1->GetInDataAnchor(0));
  127. GraphUtils::AddEdge(ident1->GetOutDataAnchor(0), add1->GetInDataAnchor(0));
  128. GraphUtils::AddEdge(value1->GetOutDataAnchor(0), add1->GetInDataAnchor(1));
  129. GraphUtils::AddEdge(add1->GetOutDataAnchor(0), next1->GetInDataAnchor(0));
  130. GraphUtils::AddEdge(next1->GetOutDataAnchor(0), merge1->GetInDataAnchor(1));
  131. GraphUtils::AddEdge(exit1->GetOutDataAnchor(0), output1->GetInDataAnchor(0));
  132. GEPass ge_passes(graph);
  133. NamesToPass names_to_passes;
  134. InferShapePass infer_shape_pass;
  135. names_to_passes.emplace_back("InferShapePass", &infer_shape_pass);
  136. EXPECT_EQ(ge_passes.Run(names_to_passes), SUCCESS);
  137. }
  138. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示