You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

graph_optimize.cc 13 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/optimize/graph_optimize.h"
  17. #include <utility>
  18. #include "framework/common/debug/ge_log.h"
  19. #include "graph/anchor.h"
  20. #include "graph/passes/dimension_adjust_pass.h"
  21. #include "graph/utils/graph_utils.h"
  22. #include "inc/pass_manager.h"
  23. #include "init/gelib.h"
  24. #include "opskernel_manager/ops_kernel_manager.h"
  25. namespace {
  26. const char *const kVectorCore = "VectorCore";
  27. const char *const kVectorEngine = "VectorEngine";
  28. const char *const kAicoreEngine = "AIcoreEngine";
  29. } // namespace
  30. namespace ge {
  31. GraphOptimize::GraphOptimize()
  32. : optimize_type_(domi::FrameworkType::FMK_TYPE_T),
  33. cal_config_(""),
  34. insert_op_config_(""),
  35. parse_out_node_(""),
  36. core_type_(""),
  37. graph_context_(nullptr) {}
  38. void AddNodeInputProperty(ComputeGraphPtr &compute_graph) {
  39. if (compute_graph == nullptr) {
  40. GELOGE(GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL, "[AddNodeInputProperty]: compute_graph is nullptr.");
  41. return;
  42. }
  43. for (ge::NodePtr &node : compute_graph->GetDirectNode()) {
  44. auto node_op_desc = node->GetOpDesc();
  45. GE_IF_BOOL_EXEC(node_op_desc == nullptr, GELOGW("node_op_desc is nullptr!"); return );
  46. auto in_control_anchor = node->GetInControlAnchor();
  47. vector<string> src_name_list;
  48. vector<string> input_name_list;
  49. vector<int64_t> src_index_list;
  50. GE_IF_BOOL_EXEC(
  51. in_control_anchor != nullptr, string src_name_temp; for (auto &out_control_anchor
  52. : in_control_anchor->GetPeerOutControlAnchors()) {
  53. ge::NodePtr src_node = out_control_anchor->GetOwnerNode();
  54. GE_IF_BOOL_EXEC(src_node == nullptr, GELOGW("src_node is nullptr!"); continue);
  55. src_name_temp = src_name_temp == "" ? src_node->GetName() : src_name_temp + ":" + src_node->GetName();
  56. } GE_IF_BOOL_EXEC(src_name_temp != "", src_name_list.emplace_back(src_name_temp);
  57. node_op_desc->SetSrcName(src_name_list);))
  58. for (auto &in_data_anchor : node->GetAllInDataAnchors()) {
  59. auto peer_out_anchor = in_data_anchor->GetPeerOutAnchor();
  60. GE_IF_BOOL_EXEC(
  61. peer_out_anchor == nullptr, GELOGW("peer_out_anchor is nullptr! node: %s", node->GetName().c_str()); continue);
  62. ge::NodePtr src_node = peer_out_anchor->GetOwnerNode();
  63. src_index_list = node_op_desc->GetSrcIndex();
  64. src_name_list.emplace_back(src_node->GetName());
  65. src_index_list.emplace_back(peer_out_anchor->GetIdx());
  66. node_op_desc->SetSrcName(src_name_list);
  67. node_op_desc->SetSrcIndex(src_index_list);
  68. GE_IF_BOOL_EXEC(!(node_op_desc->GetType() == NETOUTPUT && domi::GetContext().type == domi::FMK_TYPE_T),
  69. ge::NodePtr peer_owner_node = peer_out_anchor->GetOwnerNode();
  70. input_name_list.emplace_back(
  71. peer_owner_node->GetName() +
  72. (peer_out_anchor->GetIdx() == 0 ? "" : ": " + to_string(peer_out_anchor->GetIdx())));
  73. node_op_desc->SetInputName(input_name_list);)
  74. }
  75. }
  76. }
  77. Status GraphOptimize::OptimizeSubGraph(ComputeGraphPtr &compute_graph, const std::string &engine_name) {
  78. if (compute_graph == nullptr) {
  79. GELOGE(GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL, "[OptimizeSubGraph]: compute_graph is nullptr.");
  80. return GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL;
  81. }
  82. Status ret = SUCCESS;
  83. vector<GraphOptimizerPtr> graph_optimizer;
  84. std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
  85. if (instance_ptr == nullptr || !instance_ptr->InitFlag()) {
  86. GELOGE(GE_CLI_GE_NOT_INITIALIZED, "GraphOptimzer: GE is not initialized");
  87. return GE_CLI_GE_NOT_INITIALIZED;
  88. }
  89. if (instance_ptr->DNNEngineManagerObj().IsEngineRegistered(engine_name)) {
  90. instance_ptr->OpsKernelManagerObj().GetGraphOptimizerByEngine(engine_name, graph_optimizer);
  91. AddNodeInputProperty(compute_graph);
  92. if (compute_graph->GetDirectNode().size() == 0) {
  93. GELOGW("[OptimizeSubGraph] compute_graph do not has any node.");
  94. return SUCCESS;
  95. }
  96. for (auto iter = graph_optimizer.begin(); iter != graph_optimizer.end(); ++iter) {
  97. ret = (*iter)->OptimizeFusedGraph(*(compute_graph));
  98. if (ret != SUCCESS) {
  99. GELOGE(ret, "[OptimizeSubGraph][OptimizeFusedGraph]: graph optimize failed, ret:%d", ret);
  100. return ret;
  101. }
  102. }
  103. } else {
  104. GELOGI("Engine: %s is not registered. do nothing in subGraph Optimize by ATC.", engine_name.c_str());
  105. }
  106. return ret;
  107. }
  108. Status GraphOptimize::OptimizeOriginalGraph(ComputeGraphPtr &compute_graph) {
  109. if (compute_graph == nullptr) {
  110. GELOGE(GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL, "[OptimizeOriginalGraph]: compute_graph is nullptr.");
  111. return GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL;
  112. }
  113. Status ret = SUCCESS;
  114. std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
  115. if (instance_ptr == nullptr || !instance_ptr->InitFlag()) {
  116. GELOGE(GE_CLI_GE_NOT_INITIALIZED, "OptimizeOriginalGraph failed.");
  117. return GE_CLI_GE_NOT_INITIALIZED;
  118. }
  119. auto graph_optimizer = instance_ptr->OpsKernelManagerObj().GetAllGraphOptimizerObjsByPriority();
  120. GELOGI("optimize by opskernel in original graph optimize phase. num of graph_optimizer is %lu.",
  121. graph_optimizer.size());
  122. string exclude_core_Type = (core_type_ == kVectorCore) ? kAicoreEngine : kVectorEngine;
  123. GELOGD("[OptimizeOriginalGraph]: engine type will exclude: %s", exclude_core_Type.c_str());
  124. if (graph_optimizer.size() != 0) {
  125. for (auto iter = graph_optimizer.begin(); iter != graph_optimizer.end(); ++iter) {
  126. if (iter->first == exclude_core_Type) {
  127. continue;
  128. }
  129. ret = (iter->second)->OptimizeOriginalGraph(*compute_graph);
  130. if (ret != SUCCESS) {
  131. GELOGE(ret, "[OptimizeOriginalGraph]: graph optimize failed, ret:%d", ret);
  132. return ret;
  133. }
  134. }
  135. }
  136. return ret;
  137. }
  138. Status GraphOptimize::OptimizeOriginalGraphJudgeInsert(ComputeGraphPtr &compute_graph) {
  139. GELOGD("OptimizeOriginalGraphJudgeInsert in");
  140. GE_CHECK_NOTNULL(compute_graph);
  141. Status ret = SUCCESS;
  142. std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
  143. if (instance_ptr == nullptr || !instance_ptr->InitFlag()) {
  144. GELOGE(GE_CLI_GE_NOT_INITIALIZED, "OptimizeOriginalGraph failed.");
  145. return GE_CLI_GE_NOT_INITIALIZED;
  146. }
  147. auto graph_optimizer = instance_ptr->OpsKernelManagerObj().GetAllGraphOptimizerObjsByPriority();
  148. GELOGI("optimize by opskernel in original graph optimize phase. num of graph_optimizer is %lu.",
  149. graph_optimizer.size());
  150. string exclude_core_Type = (core_type_ == kVectorCore) ? kAicoreEngine : kVectorEngine;
  151. if (graph_optimizer.size() != 0) {
  152. for (auto iter = graph_optimizer.begin(); iter != graph_optimizer.end(); ++iter) {
  153. if (iter->first == exclude_core_Type) {
  154. GELOGI("[OptimizeOriginalGraphJudgeInsert]: engine type will exclude: %s", exclude_core_Type.c_str());
  155. continue;
  156. }
  157. GELOGI("Begin to refine running format by engine %s", iter->first.c_str());
  158. ret = (iter->second)->OptimizeOriginalGraphJudgeInsert(*compute_graph);
  159. if (ret != SUCCESS) {
  160. GELOGE(ret, "[OptimizeOriginalGraphJudgeInsert]: graph optimize failed, ret:%d", ret);
  161. return ret;
  162. }
  163. }
  164. }
  165. return ret;
  166. }
  167. Status GraphOptimize::NewOptimizeOriginalGraph(ComputeGraphPtr &compute_graph) {
  168. GELOGD("NewOptimizeOriginalGraph in");
  169. if (compute_graph == nullptr) {
  170. GELOGE(GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL, "[OptimizeOriginalGraph]: compute_graph is nullptr.");
  171. return GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL;
  172. }
  173. Status ret = SUCCESS;
  174. std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
  175. if (instance_ptr == nullptr || !instance_ptr->InitFlag()) {
  176. GELOGE(GE_CLI_GE_NOT_INITIALIZED, "OptimizeOriginalGraph failed.");
  177. return GE_CLI_GE_NOT_INITIALIZED;
  178. }
  179. auto graph_optimizer = instance_ptr->OpsKernelManagerObj().GetAllGraphOptimizerObjsByPriority();
  180. GELOGI("optimize by opskernel in original graph optimize phase. num of graph_optimizer is %lu.",
  181. graph_optimizer.size());
  182. string exclude_core_Type = (core_type_ == kVectorCore) ? kAicoreEngine : kVectorEngine;
  183. GELOGD("[OptimizeOriginalGraph]: engine type will exclude: %s", exclude_core_Type.c_str());
  184. if (graph_optimizer.size() != 0) {
  185. for (auto iter = graph_optimizer.begin(); iter != graph_optimizer.end(); ++iter) {
  186. if (iter->first == exclude_core_Type) {
  187. continue;
  188. }
  189. ret = (iter->second)->OptimizeOriginalGraph(*compute_graph);
  190. if (ret != SUCCESS) {
  191. GELOGE(ret, "[OptimizeOriginalGraph]: graph optimize failed, ret:%d", ret);
  192. return ret;
  193. }
  194. // call fe
  195. ret = (iter->second)->OptimizeOriginalGraphJudgeInsert(*compute_graph);
  196. if (ret != SUCCESS) {
  197. GELOGE(ret, "[OptimizeOriginalGraphForInsert]: graph optimize failed, ret:%d", ret);
  198. return ret;
  199. }
  200. }
  201. }
  202. return ret;
  203. }
  204. Status GraphOptimize::OptimizeOriginalGraphForQuantize(ComputeGraphPtr &compute_graph) {
  205. if (compute_graph == nullptr) {
  206. GELOGE(GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL, "[OptimizeOriginalGraph]: compute_graph is nullptr.");
  207. return GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL;
  208. }
  209. std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
  210. if (instance_ptr == nullptr || !instance_ptr->InitFlag()) {
  211. GELOGE(GE_CLI_GE_NOT_INITIALIZED, "OptimizeOriginalGraph failed.");
  212. return GE_CLI_GE_NOT_INITIALIZED;
  213. }
  214. auto graph_optimizer = instance_ptr->OpsKernelManagerObj().GetAllGraphOptimizerObjsByPriority();
  215. GELOGI("optimize by opskernel in original graph optimize quantize phase. num of graph_optimizer is %zu.",
  216. graph_optimizer.size());
  217. Status ret = SUCCESS;
  218. string exclude_core_Type = (core_type_ == kVectorCore) ? kAicoreEngine : kVectorEngine;
  219. GELOGD("[OptimizeOriginalGraphForQuantize]: engine type will exclude: %s", exclude_core_Type.c_str());
  220. if (graph_optimizer.size() != 0) {
  221. for (auto iter = graph_optimizer.begin(); iter != graph_optimizer.end(); ++iter) {
  222. if (iter->first == exclude_core_Type || iter->second == nullptr) {
  223. continue;
  224. }
  225. ret = iter->second->OptimizeGraphPrepare(*compute_graph);
  226. if (ret != SUCCESS) {
  227. GELOGE(ret, "[OptimizeOriginalGraphForQuantize]: graph optimize failed, ret:%u", ret);
  228. return ret;
  229. }
  230. }
  231. }
  232. return ret;
  233. }
  234. Status GraphOptimize::SetOptions(const ge::GraphManagerOptions &options) {
  235. if (options.framework_type >= static_cast<int32_t>(domi::FrameworkType::FMK_TYPE_RESERVED)) {
  236. GELOGE(GE_GRAPH_OPTIONS_INVALID, "Optimize Type %d invalid.", options.framework_type);
  237. return GE_GRAPH_OPTIONS_INVALID;
  238. }
  239. optimize_type_ = static_cast<domi::FrameworkType>(options.framework_type);
  240. cal_config_ = options.calibration_conf_file;
  241. insert_op_config_ = options.insert_op_file;
  242. train_graph_flag_ = options.train_graph_flag;
  243. local_fmk_op_flag_ = options.local_fmk_op_flag;
  244. func_bin_path_ = options.func_bin_path;
  245. core_type_ = options.core_type;
  246. return SUCCESS;
  247. }
  248. void GraphOptimize::TranFrameOp(ComputeGraphPtr &compute_graph) {
  249. GE_CHECK_NOTNULL_JUST_RETURN(compute_graph);
  250. vector<string> local_framework_op_vec = {
  251. "TensorDataset", "QueueDataset", "DeviceQueueDataset", "ParallelMapDataset", "BatchDatasetV2",
  252. "IteratorV2", "MakeIterator", "IteratorGetNext", "FilterDataset", "MapAndBatchDatasetV2"};
  253. for (auto &nodePtr : compute_graph->GetAllNodes()) {
  254. OpDescPtr op = nodePtr->GetOpDesc();
  255. GE_IF_BOOL_EXEC(op == nullptr, GELOGW("op is nullptr!"); continue);
  256. // fwkop black-white sheet
  257. vector<string>::iterator iter =
  258. std::find(local_framework_op_vec.begin(), local_framework_op_vec.end(), op->GetType());
  259. if (iter != local_framework_op_vec.end()) {
  260. // set - original_type
  261. if (!AttrUtils::SetStr(op, ATTR_NAME_FRAMEWORK_ORIGINAL_TYPE, op->GetType())) {
  262. GELOGW("TranFrameOp SetStr ATTR_NAME_FRAMEWORK_ORIGINAL_TYPE failed");
  263. }
  264. // set - framework_type
  265. // [No need to verify return value]
  266. op->SetType("FrameworkOp");
  267. if (!AttrUtils::SetInt(op, ATTR_NAME_FRAMEWORK_FWK_TYPE, domi::FrameworkType::FMK_TYPE_T)) {
  268. GELOGW("TranFrameOp SetInt ATTR_NAME_FRAMEWORK_FWK_TYPE failed");
  269. }
  270. }
  271. }
  272. }
  273. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知.