You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

dynamic_single_op_reset_shape_pass.cc 6.1 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/passes/dynamic_single_op_reset_shape_pass.h"
  17. #include "common/ge_inner_error_codes.h"
  18. #include "graph/utils/node_utils.h"
  19. #include "graph/utils/graph_utils.h"
  20. #include "graph/utils/tensor_utils.h"
  21. #include "graph/utils/op_desc_utils.h"
  22. #include "graph/utils/type_utils.h"
  23. #include "graph/debug/ge_attr_define.h"
  24. namespace ge {
  25. namespace {
  26. const int64_t kDynamicShapeDim = -2;
  27. const char *const kAICPUKernelLibName = "aicpu_tf_kernel";
  28. } // namespace
  29. Status DynamicSingleOpResetShapePass::Run(ComputeGraphPtr graph) {
  30. GE_CHECK_NOTNULL(graph);
  31. std::shared_ptr<GELib> instance = ge::GELib::GetInstance();
  32. if (instance == nullptr || !instance->InitFlag()) {
  33. GELOGE(ge::GE_CLI_GE_NOT_INITIALIZED, "Run CompileNodesPass failed.");
  34. return ge::GE_CLI_GE_NOT_INITIALIZED;
  35. }
  36. for (const auto &node : graph->GetDirectNode()) {
  37. GE_CHECK_NOTNULL(node->GetOpDesc());
  38. // pass input node
  39. if (node->GetType() == DATA || node->GetType() == CONSTANT || node->GetType() == CONSTANTOP) {
  40. continue;
  41. }
  42. // pass output node
  43. if (node->GetType() == NETOUTPUT) {
  44. continue;
  45. }
  46. bool single_aicpu_unknown = false;
  47. if (!AttrUtils::GetBool(node->GetOpDesc(), ATTR_DYNAMIC_SHAPE_SINGLE_AICPU, single_aicpu_unknown) ||
  48. !single_aicpu_unknown) {
  49. continue;
  50. }
  51. // pass node aicpu node.
  52. string kernel_lib_name;
  53. if (GetSupportedKernel(node, instance, kernel_lib_name) != GRAPH_SUCCESS) {
  54. GELOGE(GRAPH_FAILED, "Get kernel lib failed of node[%s].", node->GetName().c_str());
  55. return GRAPH_FAILED;
  56. }
  57. if (kernel_lib_name != kAICPUKernelLibName) {
  58. continue;
  59. }
  60. // reset aicpu shape to unknown shape
  61. auto op_desc = node->GetOpDesc();
  62. std::vector<int64_t> dynamic_shape_dims = {kDynamicShapeDim};
  63. GeShape dynamic_shape(dynamic_shape_dims);
  64. for (size_t i = 0; i < op_desc->GetAllInputsDesc().size(); i++) {
  65. auto input_desc = op_desc->MutableInputDesc(static_cast<uint32_t>(i));
  66. GE_CHECK_NOTNULL(input_desc);
  67. // pass scalar input desc
  68. auto dims_ori = input_desc->GetShape().GetDims();
  69. if (dims_ori.size() == 0) {
  70. continue;
  71. }
  72. input_desc->SetShape(dynamic_shape);
  73. }
  74. GELOGD("Reset dynamic aicpu node [%s] shape success!", node->GetName().c_str());
  75. }
  76. GELOGD("Reset dynamic aicpu nodes shape of graph [%s] success!", graph->GetName().c_str());
  77. return SUCCESS;
  78. }
  79. graphStatus DynamicSingleOpResetShapePass::GetSupportedKernel(const NodePtr &node,
  80. const std::shared_ptr<GELib> instance,
  81. string &kernel_lib_name) {
  82. auto op_desc = node->GetOpDesc();
  83. if (op_desc == nullptr) {
  84. GELOGE(ge::GE_GRAPH_PARAM_NULLPTR, "Get op %s opdesc failed", node->GetName().c_str());
  85. return ge::GE_GRAPH_PARAM_NULLPTR;
  86. }
  87. // reset op kernel lib, find supported kernel
  88. kernel_lib_name = op_desc->GetOpKernelLibName();
  89. if (kernel_lib_name.empty()) {
  90. (void)instance->DNNEngineManagerObj().GetDNNEngineName(node);
  91. kernel_lib_name = op_desc->GetOpKernelLibName();
  92. if (kernel_lib_name.empty()) {
  93. GELOGE(GRAPH_FAILED, "Get node:%s, type:%s kernel lib failed.", node->GetName().c_str(),
  94. op_desc->GetType().c_str());
  95. return GRAPH_FAILED;
  96. }
  97. }
  98. OpsKernelInfoStorePtr kernel_info = instance->OpsKernelManagerObj().GetOpsKernelInfoStore(kernel_lib_name);
  99. if (kernel_info == nullptr) {
  100. GELOGE(ge::GE_GRAPH_PARAM_NULLPTR, "Get op %s ops kernel info store failed", node->GetName().c_str());
  101. return ge::GE_GRAPH_PARAM_NULLPTR;
  102. }
  103. // begin accuracy supported check
  104. if (!CheckAccuracySupport(kernel_info, instance, op_desc)) {
  105. // if check accuracy support failed , try to go to other engine.
  106. GELOGD("Check Accuracy Supported return not support, node name is %s. Try to go to other engine.",
  107. op_desc->GetName().c_str());
  108. string kernel_name_origin = kernel_lib_name;
  109. OpsKernelManager &ops_kernel_manager = instance->OpsKernelManagerObj();
  110. auto kernel_map = ops_kernel_manager.GetAllOpsKernelInfoStores();
  111. for (auto it = kernel_map.begin(); it != kernel_map.end(); ++it) {
  112. string tmp_kernel_name = it->first;
  113. if (tmp_kernel_name == kernel_name_origin) {
  114. continue;
  115. }
  116. OpsKernelInfoStorePtr tmp_kernel_info = it->second;
  117. if (CheckAccuracySupport(tmp_kernel_info, instance, op_desc)) {
  118. kernel_lib_name = tmp_kernel_name;
  119. GELOGD("Find kernel lib %s support node:%s, type:%s , get kernel lib success.", tmp_kernel_name.c_str(),
  120. node->GetName().c_str(), op_desc->GetType().c_str());
  121. return GRAPH_SUCCESS;
  122. }
  123. }
  124. GELOGE(GRAPH_FAILED, "Cannot find kernel lib support node:%s, type:%s , get kernel lib failed.",
  125. node->GetName().c_str(), op_desc->GetType().c_str());
  126. return GRAPH_FAILED;
  127. }
  128. return GRAPH_SUCCESS;
  129. }
  130. bool DynamicSingleOpResetShapePass::CheckAccuracySupport(const OpsKernelInfoStorePtr &kernel_info,
  131. const std::shared_ptr<GELib> instance, OpDescPtr &op_desc) {
  132. auto ge_desc = MakeShared<ge::OpDescPtr>(op_desc);
  133. if (ge_desc == nullptr) {
  134. GELOGE(GE_GRAPH_MEMORY_ALLOC_FAILED, "Fail to malloc op desc.");
  135. return false;
  136. }
  137. string reason;
  138. if (!(kernel_info->CheckAccuracySupported(*ge_desc, reason, true))) {
  139. return false;
  140. }
  141. return true;
  142. }
  143. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示