You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

dimension_adjust_pass_unittest.cc 6.2 kB

5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <gtest/gtest.h>
  17. #define protected public
  18. #define private public
  19. #include "graph/passes/dimension_adjust_pass.h"
  20. #include "common/debug/log.h"
  21. #include "common/debug/memory_dumper.h"
  22. #include "common/ge_inner_error_codes.h"
  23. #include "common/op/ge_op_utils.h"
  24. #include "common/types.h"
  25. #include "graph/types.h"
  26. #include "graph/utils/graph_utils.h"
  27. #include "graph/utils/op_desc_utils.h"
  28. #include "inc/kernel_factory.h"
  29. #undef protected
  30. #undef private
  31. using namespace std;
  32. using namespace testing;
  33. using namespace domi;
  34. namespace ge {
  35. class UTEST_graph_passes_dimension_adjust_pass : public testing::Test {
  36. protected:
  37. void SetUp() {}
  38. void TearDown() {}
  39. };
  40. TEST_F(UTEST_graph_passes_dimension_adjust_pass, succ) {
  41. ge::ComputeGraphPtr graph = std::make_shared<ge::ComputeGraph>("default");
  42. ge::OpDescPtr data_op_desc = make_shared<ge::OpDesc>("data", CONSTANTOP);
  43. int64_t dims_size = 1;
  44. vector<int64_t> data_vec = {1, 2, 3};
  45. for_each(data_vec.begin(), data_vec.end(), [&](int64_t &data) { dims_size *= data; });
  46. vector<int32_t> data_value_vec(dims_size, 1);
  47. GeTensorDesc data_tensor_desc(GeShape(data_vec), FORMAT_NCHW, DT_INT32);
  48. GeTensorPtr data_tensor = make_shared<GeTensor>(data_tensor_desc, (uint8_t *)data_value_vec.data(),
  49. data_value_vec.size() * sizeof(int32_t));
  50. OpDescUtils::SetWeights(data_op_desc, data_tensor);
  51. data_op_desc->AddOutputDesc(data_tensor_desc);
  52. NodePtr data_node = graph->AddNode(data_op_desc);
  53. data_node->Init();
  54. // add dim node
  55. ge::OpDescPtr dim_op_desc = make_shared<ge::OpDesc>("dim", CONSTANTOP);
  56. vector<int32_t> dim_value_vec = {0};
  57. GeTensorDesc dim_tensor_desc(ge::GeShape(), FORMAT_NCHW, DT_INT32);
  58. GeTensorPtr dim_tensor =
  59. make_shared<GeTensor>(dim_tensor_desc, (uint8_t *)dim_value_vec.data(), dim_value_vec.size() * sizeof(int32_t));
  60. OpDescUtils::SetWeights(dim_op_desc, dim_tensor);
  61. dim_op_desc->AddOutputDesc(dim_tensor_desc);
  62. NodePtr dim_node = graph->AddNode(dim_op_desc);
  63. dim_node->Init();
  64. // add expanddims node
  65. OpDescPtr expanddims_op_desc = std::make_shared<OpDesc>("Expanddims", EXPANDDIMS);
  66. vector<int64_t> expanddims_vec = {1, 1, 2, 3};
  67. GeTensorDesc expanddims_tensor_desc(ge::GeShape(expanddims_vec), FORMAT_NCHW, DT_INT32);
  68. GeTensorPtr expanddims_tensor = make_shared<GeTensor>(expanddims_tensor_desc, (uint8_t *)data_value_vec.data(),
  69. data_value_vec.size() * sizeof(int32_t));
  70. OpDescUtils::SetWeights(expanddims_op_desc, expanddims_tensor);
  71. expanddims_op_desc->AddInputDesc(data_tensor_desc);
  72. expanddims_op_desc->AddInputDesc(dim_tensor_desc);
  73. expanddims_op_desc->AddOutputDesc(expanddims_tensor_desc);
  74. NodePtr op_node = graph->AddNode(expanddims_op_desc);
  75. op_node->Init();
  76. // add output node
  77. OpDescPtr netoutput_op_desc = std::make_shared<OpDesc>("NetOutput", "NetOutput");
  78. netoutput_op_desc->AddInputDesc(expanddims_tensor_desc);
  79. NodePtr netoutput_node = graph->AddNode(netoutput_op_desc);
  80. netoutput_node->Init();
  81. // add edge
  82. GraphUtils::AddEdge(data_node->GetOutDataAnchor(0), op_node->GetInDataAnchor(0));
  83. GraphUtils::AddEdge(dim_node->GetOutDataAnchor(0), op_node->GetInDataAnchor(1));
  84. GraphUtils::AddEdge(op_node->GetOutDataAnchor(0), netoutput_node->GetInDataAnchor(0));
  85. std::shared_ptr<DimensionAdjustPass> pass = make_shared<DimensionAdjustPass>();
  86. ge::Status ret = pass->Run(op_node);
  87. EXPECT_EQ(domi::SUCCESS, ret);
  88. }
  89. TEST_F(UTEST_graph_passes_dimension_adjust_pass, input_node_is_nullptr) {
  90. std::shared_ptr<DimensionAdjustPass> pass = make_shared<DimensionAdjustPass>();
  91. ge::NodePtr node = nullptr;
  92. ge::Status ret = pass->Run(node);
  93. EXPECT_EQ(PARAM_INVALID, ret);
  94. }
  95. TEST_F(UTEST_graph_passes_dimension_adjust_pass, node_op_desc_is_nullptr) {
  96. NodePtr op_node = make_shared<Node>(nullptr, nullptr);
  97. std::shared_ptr<DimensionAdjustPass> pass = make_shared<DimensionAdjustPass>();
  98. ge::Status ret = pass->Run(op_node);
  99. EXPECT_EQ(PARAM_INVALID, ret);
  100. }
  101. TEST_F(UTEST_graph_passes_dimension_adjust_pass, node_get_original_type_failed) {
  102. ge::ComputeGraphPtr graph = std::make_shared<ge::ComputeGraph>("default");
  103. OpDescPtr expanddim_op_desc = std::make_shared<OpDesc>("Expanddims", FRAMEWORKOP);
  104. NodePtr op_node = make_shared<Node>(expanddim_op_desc, graph);
  105. std::shared_ptr<DimensionAdjustPass> pass = make_shared<DimensionAdjustPass>();
  106. ge::Status ret = pass->Run(op_node);
  107. // EXPECT_EQ(ge::SUCCESS, ret);
  108. }
  109. TEST_F(UTEST_graph_passes_dimension_adjust_pass, node_not_register_op) {
  110. ge::ComputeGraphPtr graph = std::make_shared<ge::ComputeGraph>("default");
  111. OpDescPtr expanddim_op_desc = std::make_shared<OpDesc>("Expanddims", FRAMEWORKOP);
  112. AttrUtils::SetStr(expanddim_op_desc, ATTR_NAME_FRAMEWORK_ORIGINAL_TYPE, "expanddims_fake");
  113. NodePtr op_node = make_shared<Node>(expanddim_op_desc, graph);
  114. std::shared_ptr<DimensionAdjustPass> pass = make_shared<DimensionAdjustPass>();
  115. ge::Status ret = pass->Run(op_node);
  116. EXPECT_EQ(SUCCESS, ret);
  117. }
  118. TEST_F(UTEST_graph_passes_dimension_adjust_pass, node_compute_failed) {
  119. ge::ComputeGraphPtr graph = std::make_shared<ge::ComputeGraph>("default");
  120. OpDescPtr expanddim_op_desc = std::make_shared<OpDesc>("Expanddims", EXPANDDIMS);
  121. NodePtr op_node = make_shared<Node>(expanddim_op_desc, graph);
  122. std::shared_ptr<DimensionAdjustPass> pass = make_shared<DimensionAdjustPass>();
  123. ge::Status ret = pass->Run(op_node);
  124. EXPECT_EQ(SUCCESS, ret);
  125. }
  126. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示