You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

print_op_pass_unittest.cc 2.8 kB

5 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/passes/print_op_pass.h"
  17. #include <gtest/gtest.h>
  18. #include "omg/omg_inner_types.h"
  19. #include "utils/op_desc_utils.h"
  20. using namespace domi;
  21. namespace ge {
  22. class UTEST_graph_passes_print_op_pass : public testing::Test {
  23. protected:
  24. void SetUp() {}
  25. void TearDown() {}
  26. public:
  27. void make_graph(ComputeGraphPtr graph, bool match = true, int flag = 0) {
  28. auto data = std::make_shared<OpDesc>("Data", DATA);
  29. GeTensorDesc tensorDescData(GeShape({1, 1, 1, 1}));
  30. data->AddInputDesc(tensorDescData);
  31. data->AddOutputDesc(tensorDescData);
  32. auto dataNode = graph->AddNode(data);
  33. auto data1 = std::make_shared<OpDesc>("Data", DATA);
  34. data1->AddInputDesc(tensorDescData);
  35. data1->AddOutputDesc(tensorDescData);
  36. auto dataNode1 = graph->AddNode(data1);
  37. auto printDesc = std::make_shared<OpDesc>("Print", "Print");
  38. printDesc->AddInputDesc(tensorDescData);
  39. printDesc->AddInputDesc(tensorDescData);
  40. printDesc->AddOutputDesc(tensorDescData);
  41. auto printNode = graph->AddNode(printDesc);
  42. auto retValDesc = std::make_shared<OpDesc>("RetVal", "RetVal");
  43. retValDesc->AddInputDesc(tensorDescData);
  44. retValDesc->AddOutputDesc(tensorDescData);
  45. auto retValNode = graph->AddNode(retValDesc);
  46. auto ret = GraphUtils::AddEdge(dataNode->GetOutDataAnchor(0), printNode->GetInDataAnchor(0));
  47. ret = GraphUtils::AddEdge(dataNode1->GetOutDataAnchor(0), printNode->GetInDataAnchor(1));
  48. ret = GraphUtils::AddEdge(printNode->GetOutDataAnchor(0), retValNode->GetInDataAnchor(0));
  49. }
  50. };
  51. TEST_F(UTEST_graph_passes_print_op_pass, apply_success) {
  52. GetContext().out_nodes_map.clear();
  53. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test_graph");
  54. make_graph(graph);
  55. ge::PrintOpPass applyPass;
  56. NamesToPass names_to_pass;
  57. names_to_pass.emplace_back("Test", &applyPass);
  58. GEPass pass(graph);
  59. domi::Status status = pass.Run(names_to_pass);
  60. EXPECT_EQ(domi::SUCCESS, status);
  61. }
  62. TEST_F(UTEST_graph_passes_print_op_pass, param_invalid) {
  63. ge::NodePtr node = nullptr;
  64. ge::PrintOpPass applyPass;
  65. domi::Status status = applyPass.Run(node);
  66. EXPECT_EQ(ge::PARAM_INVALID, status);
  67. }
  68. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示