You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

dynamic_shape_partition_unittest.cc 4.8 kB

4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <gtest/gtest.h>
  17. #define private public
  18. #define protected public
  19. #include "graph/partition/dynamic_shape_partition.h"
  20. #include "compute_graph.h"
  21. #include "inc/framework/common/types.h"
  22. #include "utils/graph_utils.h"
  23. #include "graph/debug/ge_attr_define.h"
  24. namespace ge {
  25. namespace {
  26. GeTensorDescPtr CreateTensorDesc(std::initializer_list<int64_t> shape, Format format = FORMAT_NCHW,
  27. DataType data_type = DT_FLOAT) {
  28. GeShape ge_shape{vector<int64_t>(shape)};
  29. GeTensorDescPtr tensor_desc = std::make_shared<GeTensorDesc>();
  30. tensor_desc->SetShape(ge_shape);
  31. tensor_desc->SetFormat(format);
  32. tensor_desc->SetDataType(data_type);
  33. return tensor_desc;
  34. }
  35. class NodeBuilder {
  36. public:
  37. NodeBuilder(const std::string &name, const std::string &type) { op_desc_ = std::make_shared<OpDesc>(name, type); }
  38. NodeBuilder &AddInputDesc(std::initializer_list<int64_t> shape = {1, 1, 224, 224}, Format format = FORMAT_NCHW,
  39. DataType data_type = DT_FLOAT) {
  40. op_desc_->AddInputDesc(CreateTensorDesc(shape, format, data_type)->Clone());
  41. return *this;
  42. }
  43. NodeBuilder &AddOutputDesc(std::initializer_list<int64_t> shape = {1, 1, 224, 224}, Format format = FORMAT_NCHW,
  44. DataType data_type = DT_FLOAT) {
  45. op_desc_->AddOutputDesc(CreateTensorDesc(shape, format, data_type)->Clone());
  46. return *this;
  47. }
  48. NodeBuilder &AddOutputDesc(GeTensorDescPtr tensor_desc) {
  49. op_desc_->AddOutputDesc(tensor_desc->Clone());
  50. return *this;
  51. }
  52. NodePtr Build(const ComputeGraphPtr &graph) {
  53. NodePtr node = graph->AddNode(op_desc_);
  54. return node;
  55. }
  56. private:
  57. OpDescPtr op_desc_;
  58. };
  59. } // namespace
  60. class UtestDynamicShapePartition : public testing::Test {
  61. protected:
  62. void SetUp() {}
  63. void TearDown() {}
  64. };
  65. TEST_F(UtestDynamicShapePartition, single_op_scene_success) {
  66. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("default");
  67. NodePtr node1 =
  68. NodeBuilder("node1", CONSTANTOP).AddInputDesc({1, 1, 224, 224}).AddOutputDesc({1, 1, 224, 224}).Build(graph);
  69. NodePtr add_n_node =
  70. NodeBuilder("add_n_node", ADDN).AddInputDesc({1, 1, 224, 224}).AddOutputDesc({1, 1, 224, 224}).Build(graph);
  71. NodePtr node2 =
  72. NodeBuilder("node2", RELU).AddInputDesc({1, 1, 224, 224}).AddOutputDesc({1, 1, 224, 224}).Build(graph);
  73. GraphUtils::AddEdge(node1->GetOutDataAnchor(0), add_n_node->GetInDataAnchor(0));
  74. GraphUtils::AddEdge(add_n_node->GetOutDataAnchor(0), node2->GetInDataAnchor(0));
  75. (void)AttrUtils::SetBool(add_n_node->GetOpDesc(), ATTR_SINGLE_OP_SCENE, true);
  76. DynamicShapePartitioner partitioner(graph);
  77. EXPECT_EQ(partitioner.Partition(), SUCCESS);
  78. }
  79. TEST_F(UtestDynamicShapePartition, merge_control_flow_group) {
  80. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("default");
  81. AttrUtils::SetStr(*graph, ATTR_NAME_SESSION_GRAPH_ID, "session_graph_id");
  82. NodePtr data1 = NodeBuilder("data1", DATA).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
  83. NodePtr data2 = NodeBuilder("data2", DATA).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
  84. NodePtr merge = NodeBuilder("node2", MERGE).AddInputDesc({1}).AddInputDesc({1})
  85. .AddOutputDesc({1}).AddOutputDesc({}).Build(graph);
  86. GraphUtils::AddEdge(data1->GetOutDataAnchor(0), merge->GetInDataAnchor(0));
  87. GraphUtils::AddEdge(data2->GetOutDataAnchor(0), merge->GetInDataAnchor(1));
  88. (void)AttrUtils::SetBool(data1->GetOpDesc(), ATTR_NAME_FORCE_UNKNOWN_SHAPE, true);
  89. (void)AttrUtils::SetInt(data1->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, 3);
  90. (void)AttrUtils::SetBool(data2->GetOpDesc(), ATTR_NAME_FORCE_UNKNOWN_SHAPE, true);
  91. (void)AttrUtils::SetInt(data2->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, 3);
  92. (void)AttrUtils::SetBool(merge->GetOpDesc(), ATTR_NAME_FORCE_UNKNOWN_SHAPE, true);
  93. (void)AttrUtils::SetInt(merge->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, 3);
  94. EXPECT_EQ(graph->sub_graph_.size(), 0);
  95. DynamicShapePartitioner partitioner(graph);
  96. EXPECT_EQ(partitioner.Partition(), SUCCESS);
  97. EXPECT_EQ(graph->sub_graph_.size(), 1);
  98. }
  99. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示