You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

hccl_continuous_memcpy_pass.cc 16 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/passes/hccl_continuous_memcpy_pass.h"
  17. #include <string>
  18. #include "common/debug/log.h"
  19. #include "framework/common/debug/ge_log.h"
  20. #include "common/ge_inner_error_codes.h"
  21. #include "common/ge/ge_util.h"
  22. #include "framework/common/types.h"
  23. #include "graph/utils/graph_utils.h"
  24. namespace {
  25. const int kAnchorNum = 0;
  26. const int32_t kAnchorAssignRefIndex = 0;
  27. const int32_t kAnchorAssignValueIndex = 1;
  28. } // namespace
  29. namespace ge {
  30. Status HcclContinuousMemcpyPass::Run(ge::ComputeGraphPtr graph) {
  31. GE_CHECK_NOTNULL(graph);
  32. for (const auto &node : graph->GetDirectNode()) {
  33. auto op_desc = node->GetOpDesc();
  34. if (op_desc == nullptr) {
  35. GELOGE(INTERNAL_ERROR, "node has no op_desc, node_name : %s.", node->GetName().c_str());
  36. return INTERNAL_ERROR;
  37. }
  38. Status ret = ContinuousInputProcess(graph, node);
  39. if (ret != SUCCESS) {
  40. GELOGE(INTERNAL_ERROR, "failed ProcessBroadcastMemcpy, node_name:%s.", node->GetName().c_str());
  41. return ret;
  42. }
  43. ret = P2pmemInputProcess(graph, node);
  44. if (ret != SUCCESS) {
  45. GELOGE(INTERNAL_ERROR, "failed P2pmemInputProcess, node_name:%s.", node->GetName().c_str());
  46. return ret;
  47. }
  48. }
  49. return SUCCESS;
  50. }
  51. // If broadcast input size is bigger than 1, and input from variable,
  52. // cause by broadcast input memory should be continuous,
  53. // another featuremap mem will be allocated for broadcast input.
  54. // In this condition, move data from variable mem to broadcast input featuremap mem will be executed each step.
  55. // In order to avoid move action out of model, use memcpy node instead of move action code.
  56. Status HcclContinuousMemcpyPass::ContinuousInputProcess(const ComputeGraphPtr &graph, const NodePtr node) {
  57. auto op_desc = node->GetOpDesc();
  58. bool is_input_continuous = false;
  59. (void)ge::AttrUtils::GetBool(op_desc, ATTR_NAME_CONTINUOUS_INPUT, is_input_continuous);
  60. if (is_input_continuous && op_desc->GetInputsSize() > 1) {
  61. GELOGI("continuous input op is:%s.", op_desc->GetName().c_str());
  62. // if input size bigger than one, insert memcpy between var data for support continous mem alloc
  63. for (auto &hccl_in_anchor : node->GetAllInDataAnchors()) {
  64. if (hccl_in_anchor == nullptr) {
  65. continue;
  66. }
  67. auto src_out_anchor = hccl_in_anchor->GetPeerOutAnchor();
  68. if (src_out_anchor == nullptr) {
  69. GELOGE(INTERNAL_ERROR, "hcom op input has no peer anchor, node_name:%s", node->GetName().c_str());
  70. return INTERNAL_ERROR;
  71. }
  72. if (IsDataNode(src_out_anchor->GetOwnerNode()->GetType())) {
  73. Status ret = ModifyEdgeConnection(graph, src_out_anchor, hccl_in_anchor);
  74. if (ret != SUCCESS) {
  75. GELOGE(INTERNAL_ERROR, "Failed to modify the connection.");
  76. return ret;
  77. }
  78. }
  79. }
  80. }
  81. return SUCCESS;
  82. }
  83. // if input is var type, and node input need p2p mem, then memcpy should be insert between the two
  84. Status HcclContinuousMemcpyPass::P2pmemInputProcess(const ComputeGraphPtr &graph, const NodePtr node) {
  85. auto op_desc = node->GetOpDesc();
  86. vector<int64_t> input_memory_types;
  87. (void) ge::AttrUtils::GetListInt(op_desc, ATTR_NAME_INPUT_MEM_TYPE_LIST, input_memory_types);
  88. if (input_memory_types.empty()) {
  89. return SUCCESS;
  90. }
  91. for (uint32_t index = 0; index < input_memory_types.size() && index < op_desc->GetInputsSize(); index++) {
  92. if (input_memory_types[index] != RT_MEMORY_P2P_DDR) {
  93. continue;
  94. }
  95. GELOGD("p2p input op is:%s.", op_desc->GetName().c_str());
  96. auto hccl_in_anchor = node->GetInDataAnchor(index);
  97. if (hccl_in_anchor == nullptr) {
  98. continue;
  99. }
  100. auto src_out_anchor = hccl_in_anchor->GetPeerOutAnchor();
  101. if (src_out_anchor == nullptr) {
  102. GELOGE(INTERNAL_ERROR, "hcom op input has no peer anchor, node_name:%s", node->GetName().c_str());
  103. return INTERNAL_ERROR;
  104. }
  105. if (IsDataNode(src_out_anchor->GetOwnerNode()->GetType())) {
  106. Status ret = ModifyEdgeConnection(graph, src_out_anchor, hccl_in_anchor);
  107. if (ret != SUCCESS) {
  108. GELOGE(INTERNAL_ERROR, "Failed to modify the connection.");
  109. return ret;
  110. }
  111. }
  112. }
  113. return SUCCESS;
  114. }
  115. bool HcclContinuousMemcpyPass::IsDataNode(const std::string& node_type) {
  116. return (node_type == CONSTANTOP) || (node_type == VARIABLE) || (node_type == DATA) || (node_type == CONSTANT);
  117. }
  118. ///
  119. /// @brief Add Identity Node
  120. /// @param [in] ge::ComputeGraphPtr graph
  121. /// @param [in] ge::OutDataAnchorPtr in_node
  122. /// @return ge::NodePtr
  123. ///
  124. NodePtr HcclContinuousMemcpyPass::CreateIdentityNode(const ComputeGraphPtr &graph,
  125. const OutDataAnchorPtr &out_data_anchor) {
  126. GE_CHECK_NOTNULL_EXEC(graph, return nullptr);
  127. NodePtr pre_node = out_data_anchor->GetOwnerNode();
  128. OpDescPtr pre_op_desc = pre_node->GetOpDesc();
  129. if (pre_op_desc == nullptr) {
  130. GELOGE(INTERNAL_ERROR, "OpDesc of pre node is invalid.");
  131. return nullptr;
  132. }
  133. std::string node_name = pre_node->GetName() + "_" + IDENTITY;
  134. node_name = CheckDuplicateName(node_name);
  135. OpDescPtr op_desc = MakeShared<OpDesc>(node_name.c_str(), IDENTITY);
  136. if (op_desc == nullptr) {
  137. GELOGE(INTERNAL_ERROR, "Create Identity op: MakeShared op_desc fail.");
  138. return nullptr;
  139. }
  140. GELOGI("Create Identity op:%s.", op_desc->GetName().c_str());
  141. graphStatus ret = op_desc->AddInputDesc("x", pre_op_desc->GetOutputDesc(out_data_anchor->GetIdx()));
  142. if (ret != GRAPH_SUCCESS) {
  143. GELOGE(INTERNAL_ERROR, "Create Identity op: add input desc fail.");
  144. return nullptr;
  145. }
  146. ret = op_desc->AddOutputDesc("y", pre_op_desc->GetOutputDesc(out_data_anchor->GetIdx()));
  147. if (ret != GRAPH_SUCCESS) {
  148. GELOGE(INTERNAL_ERROR, "Create Identity op: add output desc fail.");
  149. return nullptr;
  150. }
  151. // because history reason ,this pass can not do work after constant fold so mark it
  152. (void)AttrUtils::SetBool(op_desc, ATTR_NO_NEED_CONSTANT_FOLDING, false);
  153. NodePtr memcpy_node = graph->AddNode(op_desc);
  154. if (memcpy_node == nullptr) {
  155. GELOGE(INTERNAL_ERROR, "Insert Identity node fail.");
  156. return nullptr;
  157. }
  158. return memcpy_node;
  159. }
  160. ///
  161. /// @brief Check duplicate node_name
  162. /// @param [in] std::string& node_name
  163. /// @return std::string
  164. ///
  165. std::string HcclContinuousMemcpyPass::CheckDuplicateName(const std::string &node_name) {
  166. std::string tmp_name = node_name;
  167. auto iter = node_num_map_.find(tmp_name);
  168. if (iter != node_num_map_.end()) {
  169. tmp_name = tmp_name + "_" + std::to_string(iter->second);
  170. (iter->second)++;
  171. } else {
  172. node_num_map_[tmp_name] = 1;
  173. }
  174. return tmp_name;
  175. }
  176. ///
  177. /// @brief Modify edge connection
  178. /// @param [in] ComputeGraphPtr graph
  179. /// @param [in] OutDataAnchorPtr src_out_anchor
  180. /// @param [in] InDataAnchorPtr hccl_in_anchor
  181. /// @return status
  182. ///
  183. Status HcclContinuousMemcpyPass::ModifyEdgeConnection(const ComputeGraphPtr &graph,
  184. const OutDataAnchorPtr &src_out_anchor,
  185. const InDataAnchorPtr &hccl_in_anchor) {
  186. GE_CHECK_NOTNULL(src_out_anchor->GetOwnerNode());
  187. GE_CHECK_NOTNULL(hccl_in_anchor->GetOwnerNode());
  188. Status ret = InsertIdentityBeforeHccl(graph, src_out_anchor, hccl_in_anchor);
  189. if (ret != SUCCESS) {
  190. GELOGE(INTERNAL_ERROR, "add identity failed, var_node:%s, hccl_node:%s.",
  191. src_out_anchor->GetOwnerNode()->GetName().c_str(),
  192. hccl_in_anchor->GetOwnerNode()->GetName().c_str());
  193. return ret;
  194. }
  195. ret = InsertAssignAfterBroadcastIfNeed(graph, src_out_anchor, hccl_in_anchor);
  196. if (ret != SUCCESS) {
  197. GELOGE(INTERNAL_ERROR, "add assign failed, var_node:%s, hccl_node:%s.",
  198. src_out_anchor->GetOwnerNode()->GetName().c_str(),
  199. hccl_in_anchor->GetOwnerNode()->GetName().c_str());
  200. return ret;
  201. }
  202. return SUCCESS;
  203. }
  204. ///
  205. /// @brief Insert Identity node Between Hccl node and variable
  206. /// @param [in] ComputeGraphPtr graph
  207. /// @param [in] OutDataAnchorPtr src_out_anchor
  208. /// @param [in] InDataAnchorPtr hccl_in_anchor
  209. /// @return status
  210. ///
  211. Status HcclContinuousMemcpyPass::InsertIdentityBeforeHccl(const ComputeGraphPtr &graph,
  212. const OutDataAnchorPtr &src_out_anchor,
  213. const InDataAnchorPtr &hccl_in_anchor) {
  214. GELOGI("Between op %s and op %s need insert memcpy async op.", src_out_anchor->GetOwnerNode()->GetName().c_str(),
  215. hccl_in_anchor->GetOwnerNode()->GetName().c_str());
  216. NodePtr memcpy_node = CreateIdentityNode(graph, src_out_anchor);
  217. GE_CHECK_NOTNULL(memcpy_node);
  218. Status ret1 = src_out_anchor->Unlink(hccl_in_anchor);
  219. if (ret1 != SUCCESS) {
  220. GELOGE(INTERNAL_ERROR, "The op %s Unlink anchor %s fail.", src_out_anchor->GetOwnerNode()->GetName().c_str(),
  221. hccl_in_anchor->GetOwnerNode()->GetName().c_str());
  222. return FAILED;
  223. }
  224. auto out_data_anchor_0 = memcpy_node->GetOutDataAnchor(kAnchorNum);
  225. GE_CHECK_NOTNULL(out_data_anchor_0);
  226. ret1 = out_data_anchor_0->LinkTo(hccl_in_anchor);
  227. if (ret1 != SUCCESS) {
  228. GELOGE(INTERNAL_ERROR, "The op %s link anchor %s fail.", memcpy_node->GetName().c_str(),
  229. hccl_in_anchor->GetOwnerNode()->GetName().c_str());
  230. return FAILED;
  231. }
  232. Status ret = src_out_anchor->LinkTo(memcpy_node->GetInDataAnchor(kAnchorNum));
  233. if (ret != SUCCESS) {
  234. GELOGE(INTERNAL_ERROR, "The op %s link anchor %s fail.", src_out_anchor->GetOwnerNode()->GetName().c_str(),
  235. memcpy_node->GetName().c_str());
  236. return FAILED;
  237. }
  238. return SUCCESS;
  239. }
  240. ///
  241. /// @brief Insert assign node after broadcast node and variable to refresh variable data
  242. /// @param [in] ComputeGraphPtr graph
  243. /// @param [in] OutDataAnchorPtr var_out_anchor
  244. /// @param [in] InDataAnchorPtr hccl_in_anchor
  245. /// @return status
  246. ///
  247. Status HcclContinuousMemcpyPass::InsertAssignAfterBroadcastIfNeed(const ComputeGraphPtr &graph,
  248. const OutDataAnchorPtr &var_out_anchor,
  249. const InDataAnchorPtr &hccl_in_anchor) {
  250. if (hccl_in_anchor->GetOwnerNode()->GetType() != HCOMBROADCAST) {
  251. GELOGD("%s not broadcast, no need to insert assign node", hccl_in_anchor->GetOwnerNode()->GetName().c_str());
  252. return SUCCESS;
  253. }
  254. if (var_out_anchor->GetOwnerNode()->GetType() != VARIABLE) {
  255. GELOGD("%s not variable, no need to insert assign node", var_out_anchor->GetOwnerNode()->GetName().c_str());
  256. return SUCCESS;
  257. }
  258. GELOGI("after op %s and op %s need insert assign op.", var_out_anchor->GetOwnerNode()->GetName().c_str(),
  259. hccl_in_anchor->GetOwnerNode()->GetName().c_str());
  260. for (auto peer_in_anchor : var_out_anchor->GetPeerInDataAnchors()) {
  261. if (peer_in_anchor->GetOwnerNode()->GetType() == ASSIGN) {
  262. GELOGD("variable %s out assign node is exist.", var_out_anchor->GetOwnerNode()->GetName().c_str());
  263. return SUCCESS;
  264. }
  265. }
  266. NodePtr assign_node = CreateAssignNode(graph, var_out_anchor);
  267. GE_CHECK_NOTNULL(assign_node);
  268. OutDataAnchorPtr hccl_out_anchor = hccl_in_anchor->GetOwnerNode()->GetOutDataAnchor(hccl_in_anchor->GetIdx());
  269. GE_CHECK_NOTNULL(hccl_out_anchor);
  270. Status ret = hccl_out_anchor->LinkTo(assign_node->GetInDataAnchor(kAnchorAssignValueIndex));
  271. if (ret != SUCCESS) {
  272. GELOGE(INTERNAL_ERROR, "The op %s link anchor %s fail.", hccl_out_anchor->GetOwnerNode()->GetName().c_str(),
  273. assign_node->GetName().c_str());
  274. return FAILED;
  275. }
  276. ret = var_out_anchor->LinkTo(assign_node->GetInDataAnchor(kAnchorAssignRefIndex));
  277. if (ret != SUCCESS) {
  278. GELOGE(INTERNAL_ERROR, "The op %s link anchor %s fail.", var_out_anchor->GetOwnerNode()->GetName().c_str(),
  279. assign_node->GetName().c_str());
  280. return FAILED;
  281. }
  282. // add control edge between assign node and node after broadcast node
  283. OutControlAnchorPtr assign_out_control_anchor = assign_node->GetOutControlAnchor();
  284. GE_CHECK_NOTNULL(assign_out_control_anchor);
  285. for (auto in_data_anchor : hccl_out_anchor->GetPeerInDataAnchors()) {
  286. if (in_data_anchor->GetOwnerNode()->GetName() == assign_node->GetName()) {
  287. continue;
  288. }
  289. ret = assign_out_control_anchor->LinkTo(in_data_anchor->GetOwnerNode()->GetInControlAnchor());
  290. if (ret != SUCCESS) {
  291. GELOGE(INTERNAL_ERROR, "The op %s link control anchor %s fail.",
  292. assign_out_control_anchor->GetOwnerNode()->GetName().c_str(),
  293. in_data_anchor->GetOwnerNode()->GetName().c_str());
  294. return FAILED;
  295. }
  296. }
  297. for (auto in_control_anchor : hccl_out_anchor->GetOwnerNode()->GetOutControlAnchor()->GetPeerInControlAnchors()) {
  298. if (in_control_anchor->GetOwnerNode()->GetName() == assign_node->GetName()) {
  299. continue;
  300. }
  301. ret = assign_out_control_anchor->LinkTo(in_control_anchor);
  302. if (ret != SUCCESS) {
  303. GELOGE(INTERNAL_ERROR, "The op %s link control anchor %s fail.",
  304. assign_out_control_anchor->GetOwnerNode()->GetName().c_str(),
  305. in_control_anchor->GetOwnerNode()->GetName().c_str());
  306. return FAILED;
  307. }
  308. }
  309. return SUCCESS;
  310. }
  311. ///
  312. /// @brief create assign Node, add to graph
  313. /// @param [in] ge::ComputeGraphPtr graph
  314. /// @param [in] ge::OutDataAnchorPtr variable node out anchor
  315. /// @return ge::NodePtr
  316. ///
  317. NodePtr HcclContinuousMemcpyPass::CreateAssignNode(const ComputeGraphPtr &graph,
  318. const OutDataAnchorPtr &out_data_anchor) {
  319. GE_CHECK_NOTNULL_EXEC(graph, return nullptr);
  320. NodePtr pre_node = out_data_anchor->GetOwnerNode();
  321. OpDescPtr pre_op_desc = pre_node->GetOpDesc();
  322. if (pre_op_desc == nullptr) {
  323. GELOGE(INTERNAL_ERROR, "OpDesc of pre node is invalid.");
  324. return nullptr;
  325. }
  326. std::string node_name = pre_node->GetName() + "_" + ASSIGN;
  327. node_name = CheckDuplicateName(node_name);
  328. OpDescPtr op_desc = MakeShared<OpDesc>(node_name.c_str(), ASSIGN);
  329. if (op_desc == nullptr) {
  330. GELOGE(INTERNAL_ERROR, "Create Assign op: MakeShared op_desc fail.");
  331. return nullptr;
  332. }
  333. GELOGI("Create Assign op:%s.", op_desc->GetName().c_str());
  334. if (!AttrUtils::SetBool(op_desc, ATTR_NEED_COMPILE, true)) {
  335. GELOGE(INTERNAL_ERROR, "Set ATTR_NEED_COMPILE Attr for node:%s fail.", op_desc->GetName().c_str());
  336. return nullptr;
  337. }
  338. graphStatus ret = op_desc->AddInputDesc("ref", pre_op_desc->GetOutputDesc(out_data_anchor->GetIdx()));
  339. if (ret != GRAPH_SUCCESS) {
  340. GELOGE(INTERNAL_ERROR, "Create Assign op: add ref input desc fail.");
  341. return nullptr;
  342. }
  343. ret = op_desc->AddInputDesc("value", pre_op_desc->GetOutputDesc(out_data_anchor->GetIdx()));
  344. if (ret != GRAPH_SUCCESS) {
  345. GELOGE(INTERNAL_ERROR, "Create Assign op: add value input desc fail.");
  346. return nullptr;
  347. }
  348. ret = op_desc->AddOutputDesc("ref", pre_op_desc->GetOutputDesc(out_data_anchor->GetIdx()));
  349. if (ret != GRAPH_SUCCESS) {
  350. GELOGE(INTERNAL_ERROR, "Create Assign op: add output desc fail.");
  351. return nullptr;
  352. }
  353. NodePtr assign_node = graph->AddNode(op_desc);
  354. if (assign_node == nullptr) {
  355. GELOGE(INTERNAL_ERROR, "Insert Identity node fail.");
  356. return nullptr;
  357. }
  358. return assign_node;
  359. }
  360. ///
  361. /// @brief Clear Status, used for subgraph pass
  362. /// @return SUCCESS
  363. ///
  364. Status HcclContinuousMemcpyPass::ClearStatus() {
  365. node_num_map_.clear();
  366. return SUCCESS;
  367. }
  368. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示