You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

node_item.cc 10 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "node_item.h"
  17. #include <sstream>
  18. #include "common/debug/log.h"
  19. #include "graph/common/omg_util.h"
  20. #include "graph/compute_graph.h"
  21. #include "graph/debug/ge_attr_define.h"
  22. #include "graph/utils/node_utils.h"
  23. #include "hybrid/node_executor/node_executor.h"
  24. #include "hybrid/executor/worker/shape_inference_engine.h"
  25. namespace ge {
  26. namespace hybrid {
  27. namespace {
  28. const char *const kAttrNameOriginalFusionGraph = "_original_fusion_graph";
  29. const char *const kNodeTypeRetVal = "_RetVal";
  30. std::set<std::string> kControlOpTypes{
  31. IF, STATELESSIF, CASE, WHILE, STATELESSWHILE
  32. };
  33. Status ParseInputMapping(Node &node, OpDesc &op_desc, FusedSubgraph &fused_subgraph) {
  34. uint32_t parent_index = 0;
  35. if (!AttrUtils::GetInt(op_desc, ATTR_NAME_PARENT_NODE_INDEX, parent_index)) {
  36. GELOGE(FAILED,
  37. "[%s] Failed to get attr [%s]",
  38. op_desc.GetName().c_str(),
  39. ATTR_NAME_PARENT_NODE_INDEX.c_str());
  40. return FAILED;
  41. }
  42. for (auto &node_and_anchor : node.GetOutDataNodesAndAnchors()) {
  43. auto dst_op_desc = node_and_anchor.first->GetOpDesc();
  44. GE_CHECK_NOTNULL(dst_op_desc);
  45. auto in_idx = node_and_anchor.second->GetIdx();
  46. auto tensor_desc = dst_op_desc->MutableInputDesc(in_idx);
  47. fused_subgraph.input_mapping[static_cast<int>(parent_index)].emplace_back(tensor_desc);
  48. GELOGD("Input[%u] mapped to [%s:%u]", parent_index, dst_op_desc->GetName().c_str(), in_idx);
  49. }
  50. return SUCCESS;
  51. }
  52. Status ParseOutputMapping(const OpDescPtr &op_desc, FusedSubgraph &fused_subgraph) {
  53. uint32_t parent_index = 0;
  54. if (!AttrUtils::GetInt(op_desc, ATTR_NAME_PARENT_NODE_INDEX, parent_index)) {
  55. GELOGE(FAILED,
  56. "[%s] Failed to get attr [%s]",
  57. op_desc->GetName().c_str(),
  58. ATTR_NAME_PARENT_NODE_INDEX.c_str());
  59. return FAILED;
  60. }
  61. fused_subgraph.output_mapping.emplace(static_cast<int>(parent_index), op_desc);
  62. return SUCCESS;
  63. }
  64. Status ParseFusedSubgraph(NodeItem &node_item) {
  65. if (!node_item.op_desc->HasAttr(kAttrNameOriginalFusionGraph)) {
  66. return SUCCESS;
  67. }
  68. GELOGI("[%s] Start to parse fused subgraph.", node_item.node_name.c_str());
  69. auto fused_subgraph = std::unique_ptr<FusedSubgraph>(new(std::nothrow)FusedSubgraph());
  70. GE_CHECK_NOTNULL(fused_subgraph);
  71. ComputeGraphPtr fused_graph;
  72. (void) AttrUtils::GetGraph(*node_item.op_desc, kAttrNameOriginalFusionGraph, fused_graph);
  73. GE_CHECK_NOTNULL(fused_graph);
  74. fused_graph->SetGraphUnknownFlag(true);
  75. fused_subgraph->graph = fused_graph;
  76. GE_CHK_GRAPH_STATUS_RET(fused_graph->TopologicalSorting());
  77. for (auto &node : fused_graph->GetAllNodes()) {
  78. GE_CHECK_NOTNULL(node);
  79. auto op_desc = node->GetOpDesc();
  80. GE_CHECK_NOTNULL(op_desc);
  81. std::string node_type;
  82. GE_CHK_STATUS_RET(GetOriginalType(node, node_type));
  83. if (node_type == DATA) {
  84. GE_CHK_GRAPH_STATUS_RET(ParseInputMapping(*node, *op_desc, *fused_subgraph));
  85. } else if (node_type == kNodeTypeRetVal) {
  86. GE_CHK_GRAPH_STATUS_RET(ParseOutputMapping(op_desc, *fused_subgraph));
  87. } else {
  88. fused_subgraph->nodes.emplace_back(node);
  89. }
  90. }
  91. node_item.fused_subgraph = std::move(fused_subgraph);
  92. GELOGI("[%s] Done parsing fused subgraph successfully.", node_item.NodeName().c_str());
  93. return SUCCESS;
  94. }
  95. } // namespace
  96. bool IsControlOp(const std::string &op_type) {
  97. return kControlOpTypes.count(op_type) > 0;
  98. }
  99. NodeItem::NodeItem(NodePtr node) : node(std::move(node)) {
  100. this->op_desc = this->node->GetOpDesc().get();
  101. this->node_name = this->node->GetName();
  102. this->node_type = this->node->GetType();
  103. }
  104. Status NodeItem::Create(const NodePtr &node, std::unique_ptr<NodeItem> &node_item) {
  105. GE_CHECK_NOTNULL(node);
  106. GE_CHECK_NOTNULL(node->GetOpDesc());
  107. std::unique_ptr<NodeItem> instance(new(std::nothrow)NodeItem(node));
  108. GE_CHECK_NOTNULL(instance);
  109. GE_CHK_STATUS_RET(instance->Init(), "Failed to init NodeItem [%s] .", node->GetName().c_str());
  110. node_item = std::move(instance);
  111. return SUCCESS;
  112. }
  113. void NodeItem::ResolveOptionalInputs() {
  114. if (op_desc->GetAllInputsSize() != op_desc->GetInputsSize()) {
  115. has_optional_inputs = true;
  116. for (size_t i = 0; i < op_desc->GetAllInputsSize(); ++i) {
  117. const auto &input_desc = op_desc->MutableInputDesc(i);
  118. if (input_desc == nullptr) {
  119. GELOGD("[%s] Input[%zu] is optional and invalid", NodeName().c_str(), i);
  120. } else {
  121. input_desc_indices_.emplace_back(static_cast<uint32_t>(i));
  122. }
  123. }
  124. }
  125. }
  126. Status NodeItem::InitInputsAndOutputs() {
  127. GE_CHECK_LE(op_desc->GetInputsSize(), INT32_MAX);
  128. GE_CHECK_LE(op_desc->GetOutputsSize(), INT32_MAX);
  129. num_inputs = static_cast<int>(op_desc->GetInputsSize());
  130. num_outputs = static_cast<int>(op_desc->GetOutputsSize());
  131. ResolveOptionalInputs();
  132. return SUCCESS;
  133. }
  134. Status NodeItem::ResolveDynamicState() {
  135. (void) AttrUtils::GetBool(op_desc, ATTR_NAME_FORCE_UNKNOWN_SHAPE, is_dynamic);
  136. GELOGD("node name = %s, is_dynamic = %d.", this->node_name.c_str(), is_dynamic);
  137. if (!is_dynamic) {
  138. GE_CHK_STATUS_RET(NodeUtils::GetNodeUnknownShapeStatus(*node, is_dynamic),
  139. "[%s] Failed to get shape status.",
  140. node->GetName().c_str());
  141. }
  142. return SUCCESS;
  143. }
  144. Status NodeItem::ResolveStaticInputsAndOutputs() {
  145. for (int i = 0; i < num_inputs; ++i) {
  146. const auto &input_desc = MutableInputDesc(i);
  147. GE_CHECK_NOTNULL(input_desc);
  148. if (input_desc->MutableShape().IsUnknownShape()) {
  149. is_input_shape_static_.push_back(false);
  150. } else {
  151. num_static_input_shapes++;
  152. is_input_shape_static_.push_back(true);
  153. GELOGD("[%s] The shape of input[%d] is static. shape = [%s]",
  154. NodeName().c_str(), i, input_desc->MutableShape().ToString().c_str());
  155. }
  156. }
  157. for (int i = 0; i < num_outputs; ++i) {
  158. const auto &output_desc = op_desc->MutableOutputDesc(i);
  159. GE_CHECK_NOTNULL(output_desc);
  160. if (output_desc->MutableShape().IsUnknownShape()) {
  161. is_output_shape_static = false;
  162. break;
  163. }
  164. }
  165. if (is_output_shape_static) {
  166. GE_CHK_STATUS_RET_NOLOG(ShapeInferenceEngine::CalcOutputTensorSizes(*this));
  167. }
  168. return SUCCESS;
  169. }
  170. void NodeItem::ResolveUnknownShapeType() {
  171. if (IsControlOp() || node_type == PARTITIONEDCALL) {
  172. shape_inference_type = DEPEND_COMPUTE;
  173. } else {
  174. int32_t unknown_shape_type_val = 0;
  175. (void) AttrUtils::GetInt(op_desc, ::ge::ATTR_NAME_UNKNOWN_SHAPE_TYPE, unknown_shape_type_val);
  176. shape_inference_type = static_cast<UnknowShapeOpType>(unknown_shape_type_val);
  177. }
  178. }
  179. Status NodeItem::Init() {
  180. GE_CHK_STATUS_RET_NOLOG(InitInputsAndOutputs());
  181. GE_CHK_STATUS_RET_NOLOG(ResolveDynamicState());
  182. if (is_dynamic) {
  183. ResolveUnknownShapeType();
  184. GE_CHK_STATUS_RET_NOLOG(ResolveStaticInputsAndOutputs());
  185. GE_CHK_STATUS_RET(ParseFusedSubgraph(*this), "[%s] Failed to parse fused subgraph", node_name.c_str());
  186. }
  187. return SUCCESS;
  188. }
  189. bool NodeItem::IsControlOp() const {
  190. return ge::hybrid::IsControlOp(op_desc->GetType());
  191. }
  192. std::string NodeItem::DebugString() const {
  193. std::stringstream ss;
  194. ss << "Node: ";
  195. ss << "id = " << node_id;
  196. ss << ", name = [" << node->GetName();
  197. ss << "], type = " << node->GetType();
  198. ss << ", is_dynamic = " << (is_dynamic ? "True" : "False");
  199. ss << ", is_output_static = " << (is_output_shape_static ? "True" : "False");
  200. ss << ", unknown_shape_op_type = " << shape_inference_type;
  201. ss << ", input_start = " << input_start;
  202. ss << ", num_inputs = " << num_inputs;
  203. ss << ", output_start = " << output_start;
  204. ss << ", num_outputs = " << num_outputs;
  205. ss << ", dependent_nodes = [";
  206. for (const auto &dep_node : dependents_for_shape_inference) {
  207. ss << dep_node->GetName() << ", ";
  208. }
  209. ss << "]";
  210. int index = 0;
  211. for (auto &items : outputs) {
  212. ss << ", output[" << index++ << "]: ";
  213. for (auto &item : items) {
  214. ss << "(" << item.second->NodeName() << ":" << item.first << "), ";
  215. }
  216. }
  217. return ss.str();
  218. }
  219. void NodeItem::SetToDynamic() {
  220. num_static_input_shapes = 0;
  221. is_dynamic = true;
  222. for (size_t i = 0; i < is_input_shape_static_.size(); ++i) {
  223. is_input_shape_static_[i] = false;
  224. }
  225. if (kernel_task != nullptr && !kernel_task->IsSupportDynamicShape()) {
  226. GELOGD("[%s] Dynamic shape is not supported, clear node task.", node_name.c_str());
  227. kernel_task = nullptr;
  228. }
  229. }
  230. GeTensorDescPtr NodeItem::MutableInputDesc(int index) const {
  231. if (!has_optional_inputs) {
  232. return op_desc->MutableInputDesc(static_cast<uint32_t>(index));
  233. }
  234. if (index < 0 || index >= num_inputs) {
  235. GELOGE(PARAM_INVALID,
  236. "[%s] Invalid input index, num inputs = %d, index = %d",
  237. node_name.c_str(),
  238. num_inputs,
  239. index);
  240. return nullptr;
  241. }
  242. return op_desc->MutableInputDesc(input_desc_indices_[index]);
  243. }
  244. Status NodeItem::GetCanonicalInputIndex(uint32_t index, int &canonical_index) const {
  245. if (!has_optional_inputs) {
  246. canonical_index = index;
  247. return SUCCESS;
  248. }
  249. auto iter = std::find(input_desc_indices_.begin(), input_desc_indices_.end(), index);
  250. if (iter == input_desc_indices_.end()) {
  251. GELOGE(INTERNAL_ERROR, "[%s] Invalid input index: %u", node_name.c_str(), index);
  252. return INTERNAL_ERROR;
  253. }
  254. canonical_index = static_cast<int>(iter - input_desc_indices_.begin());
  255. GELOGD("[%s] Canonicalize input index from [%u] to [%d]", node_name.c_str(), index, canonical_index);
  256. return SUCCESS;
  257. }
  258. bool NodeItem::IsInputShapeStatic(int index) const {
  259. if (!is_dynamic) {
  260. return true;
  261. }
  262. if (static_cast<size_t>(index) >= is_input_shape_static_.size()) {
  263. GELOGE(PARAM_INVALID, "Input index(%d) out of range: [0, %zu)", index, is_input_shape_static_.size());
  264. return false;
  265. }
  266. return is_input_shape_static_[index];
  267. }
  268. } // namespace hybrid
  269. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示