You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

dimension_adjust_pass_unittest.cc 6.6 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <gtest/gtest.h>
  17. #define protected public
  18. #define private public
  19. #include "graph/passes/dimension_adjust_pass.h"
  20. #include "common/debug/log.h"
  21. #include "common/debug/memory_dumper.h"
  22. #include "common/ge_inner_error_codes.h"
  23. #include "common/op/ge_op_utils.h"
  24. #include "common/types.h"
  25. #include "graph/types.h"
  26. #include "graph/utils/graph_utils.h"
  27. #include "graph/utils/op_desc_utils.h"
  28. #include "inc/kernel.h"
  29. #include "inc/kernel_factory.h"
  30. #undef protected
  31. #undef private
  32. using namespace std;
  33. using namespace testing;
  34. namespace ge {
  35. class TestExpandDimKernel : public Kernel {
  36. public:
  37. Status Compute(const NodePtr &node_ptr) override {
  38. return SUCCESS;
  39. }
  40. };
  41. REGISTER_KERNEL(EXPANDDIMS, TestExpandDimKernel);
  42. class TestExpandDimKernelNotChange : public Kernel {
  43. public:
  44. Status Compute(const NodePtr &node_ptr) override {
  45. return NOT_CHANGED;
  46. }
  47. };
  48. class UtestGraphPassesDimensionAdjustPass : public testing::Test {
  49. protected:
  50. void SetUp() {}
  51. void TearDown() {
  52. KernelFactory::Instance().creator_map_.clear();
  53. }
  54. };
  55. TEST_F(UtestGraphPassesDimensionAdjustPass, succ) {
  56. ge::ComputeGraphPtr graph = std::make_shared<ge::ComputeGraph>("default");
  57. ge::OpDescPtr data_op_desc = make_shared<ge::OpDesc>("data", CONSTANTOP);
  58. int64_t dims_size = 1;
  59. vector<int64_t> data_vec = {1, 2, 3};
  60. for_each(data_vec.begin(), data_vec.end(), [&](int64_t &data) { dims_size *= data; });
  61. vector<int32_t> data_value_vec(dims_size, 1);
  62. GeTensorDesc data_tensor_desc(GeShape(data_vec), FORMAT_NCHW, DT_INT32);
  63. GeTensorPtr data_tensor = make_shared<GeTensor>(data_tensor_desc, (uint8_t *)data_value_vec.data(),
  64. data_value_vec.size() * sizeof(int32_t));
  65. OpDescUtils::SetWeights(data_op_desc, data_tensor);
  66. data_op_desc->AddOutputDesc(data_tensor_desc);
  67. NodePtr data_node = graph->AddNode(data_op_desc);
  68. data_node->Init();
  69. // add dim node
  70. ge::OpDescPtr dim_op_desc = make_shared<ge::OpDesc>("dim", CONSTANTOP);
  71. vector<int32_t> dim_value_vec = {0};
  72. GeTensorDesc dim_tensor_desc(ge::GeShape(), FORMAT_NCHW, DT_INT32);
  73. GeTensorPtr dim_tensor =
  74. make_shared<GeTensor>(dim_tensor_desc, (uint8_t *)dim_value_vec.data(), dim_value_vec.size() * sizeof(int32_t));
  75. OpDescUtils::SetWeights(dim_op_desc, dim_tensor);
  76. dim_op_desc->AddOutputDesc(dim_tensor_desc);
  77. NodePtr dim_node = graph->AddNode(dim_op_desc);
  78. dim_node->Init();
  79. // add expanddims node
  80. OpDescPtr expanddims_op_desc = std::make_shared<OpDesc>("Expanddims", EXPANDDIMS);
  81. vector<int64_t> expanddims_vec = {1, 1, 2, 3};
  82. GeTensorDesc expanddims_tensor_desc(ge::GeShape(expanddims_vec), FORMAT_NCHW, DT_INT32);
  83. GeTensorPtr expanddims_tensor = make_shared<GeTensor>(expanddims_tensor_desc, (uint8_t *)data_value_vec.data(),
  84. data_value_vec.size() * sizeof(int32_t));
  85. OpDescUtils::SetWeights(expanddims_op_desc, expanddims_tensor);
  86. expanddims_op_desc->AddInputDesc(data_tensor_desc);
  87. expanddims_op_desc->AddInputDesc(dim_tensor_desc);
  88. expanddims_op_desc->AddOutputDesc(expanddims_tensor_desc);
  89. NodePtr op_node = graph->AddNode(expanddims_op_desc);
  90. op_node->Init();
  91. // add output node
  92. OpDescPtr netoutput_op_desc = std::make_shared<OpDesc>("NetOutput", "NetOutput");
  93. netoutput_op_desc->AddInputDesc(expanddims_tensor_desc);
  94. NodePtr netoutput_node = graph->AddNode(netoutput_op_desc);
  95. netoutput_node->Init();
  96. // add edge
  97. GraphUtils::AddEdge(data_node->GetOutDataAnchor(0), op_node->GetInDataAnchor(0));
  98. GraphUtils::AddEdge(dim_node->GetOutDataAnchor(0), op_node->GetInDataAnchor(1));
  99. GraphUtils::AddEdge(op_node->GetOutDataAnchor(0), netoutput_node->GetInDataAnchor(0));
  100. std::shared_ptr<DimensionAdjustPass> pass = make_shared<DimensionAdjustPass>();
  101. NamesToPass names_to_passes;
  102. EXPECT_EQ(4, graph->GetDirectNodesSize());
  103. ge::Status ret = pass->Run(op_node);
  104. EXPECT_EQ(SUCCESS, ret);
  105. EXPECT_EQ(2, op_node->GetOwnerComputeGraph()->GetDirectNodesSize());
  106. }
  107. TEST_F(UtestGraphPassesDimensionAdjustPass, input_node_is_nullptr) {
  108. std::shared_ptr<DimensionAdjustPass> pass = make_shared<DimensionAdjustPass>();
  109. ge::NodePtr node = nullptr;
  110. ge::Status ret = pass->Run(node);
  111. EXPECT_EQ(PARAM_INVALID, ret);
  112. }
  113. TEST_F(UtestGraphPassesDimensionAdjustPass, node_op_desc_is_nullptr) {
  114. NodePtr op_node = make_shared<Node>(nullptr, nullptr);
  115. std::shared_ptr<DimensionAdjustPass> pass = make_shared<DimensionAdjustPass>();
  116. ge::Status ret = pass->Run(op_node);
  117. EXPECT_EQ(PARAM_INVALID, ret);
  118. }
  119. TEST_F(UtestGraphPassesDimensionAdjustPass, node_get_original_type_failed) {
  120. ge::ComputeGraphPtr graph = std::make_shared<ge::ComputeGraph>("default");
  121. OpDescPtr expanddim_op_desc = std::make_shared<OpDesc>("Expanddims", FRAMEWORKOP);
  122. NodePtr op_node = make_shared<Node>(expanddim_op_desc, graph);
  123. std::shared_ptr<DimensionAdjustPass> pass = make_shared<DimensionAdjustPass>();
  124. ge::Status ret = pass->Run(op_node);
  125. }
  126. TEST_F(UtestGraphPassesDimensionAdjustPass, node_not_register_op) {
  127. ge::ComputeGraphPtr graph = std::make_shared<ge::ComputeGraph>("default");
  128. OpDescPtr expanddim_op_desc = std::make_shared<OpDesc>("Expanddims", FRAMEWORKOP);
  129. AttrUtils::SetStr(expanddim_op_desc, ATTR_NAME_FRAMEWORK_ORIGINAL_TYPE, "expanddims_fake");
  130. NodePtr op_node = make_shared<Node>(expanddim_op_desc, graph);
  131. std::shared_ptr<DimensionAdjustPass> pass = make_shared<DimensionAdjustPass>();
  132. ge::Status ret = pass->Run(op_node);
  133. EXPECT_EQ(SUCCESS, ret);
  134. }
  135. TEST_F(UtestGraphPassesDimensionAdjustPass, node_compute_failed) {
  136. ge::ComputeGraphPtr graph = std::make_shared<ge::ComputeGraph>("default");
  137. OpDescPtr expanddim_op_desc = std::make_shared<OpDesc>("Expanddims", EXPANDDIMS);
  138. NodePtr op_node = make_shared<Node>(expanddim_op_desc, graph);
  139. std::shared_ptr<DimensionAdjustPass> pass = make_shared<DimensionAdjustPass>();
  140. ge::Status ret = pass->Run(op_node);
  141. EXPECT_EQ(SUCCESS, ret);
  142. }
  143. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示