You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

task_generator.cc 47 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/build/task_generator.h"
  17. #include <string>
  18. #include <utility>
  19. #include "common/profiling/profiling_manager.h"
  20. #include "common/types.h"
  21. #include "common/util.h"
  22. #include "framework/common/debug/ge_log.h"
  23. #include "graph/debug/ge_attr_define.h"
  24. #include "graph/ge_context.h"
  25. #include "graph/manager/graph_var_manager.h"
  26. #include "graph/model_serialize.h"
  27. #include "graph/utils/node_utils.h"
  28. #include "graph/utils/tensor_utils.h"
  29. #include "graph/utils/type_utils.h"
  30. #include "graph/common/ge_call_wrapper.h"
  31. #include "init/gelib.h"
  32. #include "graph/ge_local_context.h"
  33. #include "ge/ge_api_types.h"
  34. #include "opskernel_manager/ops_kernel_builder_manager.h"
  35. using domi::LogTimeStampDef;
  36. using domi::ModelTaskDef;
  37. using domi::TaskDef;
  38. using std::map;
  39. using std::set;
  40. using std::string;
  41. using std::vector;
  42. namespace {
  43. const char *const kIsFirstNode = "is_first_node";
  44. const char *const kIsLastNode = "is_last_node";
  45. const char *const kIsInputVar = "INPUT_IS_VAR";
  46. const char *const kIsOutputVar = "OUTPUT_IS_VAR";
  47. const char *const kProfilingMode = "PROFILING_MODE";
  48. const uint32_t kProfilingArStep = 2;
  49. const uint64_t kProfilingFpStartLogid = 1;
  50. const uint64_t kProfilingBpEndLogid = 2;
  51. const uint64_t kProfilingArStartLogid = 3;
  52. const uint64_t kProfilingArEndLogid = 4;
  53. const uint64_t kProfilingIterEndLogid = 65535;
  54. const int64_t kHashFactor = 100000;
  55. const int64_t kInvalidGroupId = -1;
  56. } // namespace
  57. namespace ge {
  58. TaskGenerator::TaskGenerator(uint8_t *var_mem_base, uint64_t var_mem_size) {
  59. var_mem_base_ = var_mem_base;
  60. var_mem_size_ = var_mem_size;
  61. }
  62. TaskGenerator::~TaskGenerator() {}
  63. Status TaskGenerator::GetTaskInfo(Model &model, ComputeGraphPtr &graph, uint64_t session_id, RunContext &run_context) {
  64. GELOGD("Begin to Get TaskInfo. session_id=%lu", session_id);
  65. // Check params
  66. if (graph == nullptr) {
  67. GELOGE(PARAM_INVALID, "GetTaskInfo param graph is null. session_id=%lu", session_id);
  68. return PARAM_INVALID;
  69. }
  70. std::vector<TaskDef> task_def_list;
  71. std::map<uint32_t, string> op_name_map;
  72. GE_DUMP(graph, "GenerateTaskBefore");
  73. Status ret = GenerateTask(run_context, graph, task_def_list, op_name_map);
  74. GE_DUMP(graph, "GenerateTaskAfter");
  75. if (ret != SUCCESS) {
  76. GELOGE(ret, "GenerateTask failed. session_id=%lu", session_id);
  77. return ret;
  78. }
  79. // op_name_map used when graph load
  80. graph->SetGraphOpName(op_name_map);
  81. // Set op_name for infer profiling
  82. vector<string> op_name;
  83. for (auto &iter : op_name_map) {
  84. op_name.push_back(iter.second);
  85. }
  86. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListStr(model, ATTR_MODEL_TASK_INDEX_OP_NAME, op_name),
  87. GELOGE(FAILED, "SetListStr failed.");
  88. return FAILED);
  89. GELOGI("GenerateTask Success, task list:%zu, op map:%zu, logic mem base:%p, logic weight base:%p, logic var base:%p",
  90. task_def_list.size(), op_name_map.size(), run_context.dataMemBase, run_context.weightMemBase, var_mem_base_);
  91. // Init and serialize model_task_def
  92. ModelTaskDef model_task_def;
  93. model_task_def.set_memory_size(run_context.dataMemSize);
  94. model_task_def.set_weight_size(run_context.weightMemSize);
  95. for (const TaskDef &task_def_temp : task_def_list) {
  96. TaskDef *task_def = model_task_def.add_task();
  97. if (task_def == nullptr) {
  98. GELOGE(FAILED, "task_def is nullptr.");
  99. return FAILED;
  100. }
  101. *task_def = task_def_temp;
  102. }
  103. ret = AddModelTaskToModel(model_task_def, session_id, model, run_context);
  104. if (ret != SUCCESS) {
  105. GELOGE(ret, "AddModelTaskToModel failed. session_id=%lu", session_id);
  106. return ret;
  107. }
  108. GELOGD("Get TaskInfo success. session_id=%lu", session_id);
  109. return SUCCESS;
  110. }
  111. Status TaskGenerator::AddModelTaskToModel(const ModelTaskDef &model_task_def, uint64_t session_id, ge::Model &model,
  112. RunContext &run_context) {
  113. GE_CHK_BOOL_EXEC(
  114. AttrUtils::SetInt(model, MODEL_ATTR_TASK_GEN_BASE_ADDR, reinterpret_cast<uintptr_t>(run_context.dataMemBase)),
  115. GELOGE(FAILED, "SetInt MODEL_ATTR_TASK_GEN_BASE_ADDR failed.");
  116. return FAILED);
  117. GE_CHK_BOOL_EXEC(
  118. AttrUtils::SetInt(model, MODEL_ATTR_TASK_GEN_WEIGHT_ADDR, reinterpret_cast<uintptr_t>(run_context.weightMemBase)),
  119. GELOGE(FAILED, "SetInt MODEL_ATTR_TASK_GEN_WEIGHT_ADDR failed.");
  120. return FAILED);
  121. GE_CHK_BOOL_EXEC(AttrUtils::SetInt(model, ATTR_MODEL_TASK_GEN_VAR_ADDR, reinterpret_cast<uintptr_t>(var_mem_base_)),
  122. GELOGE(FAILED, "SetInt ATTR_MODEL_TASK_GEN_VAR_ADDR failed.");
  123. return FAILED);
  124. GE_CHK_BOOL_EXEC(AttrUtils::SetInt(model, ATTR_MODEL_VAR_SIZE, var_mem_size_),
  125. GELOGE(FAILED, "SetInt ATTR_MODEL_VAR_SIZE failed.");
  126. return FAILED);
  127. GE_CHK_BOOL_EXEC(AttrUtils::SetInt(model, MODEL_ATTR_SESSION_ID, session_id),
  128. GELOGE(FAILED, "SetInt MODEL_ATTR_SESSION_ID failed.");
  129. return FAILED);
  130. size_t task_size = model_task_def.ByteSizeLong();
  131. ge::Buffer serial_buff(task_size);
  132. if (!model_task_def.SerializePartialToArray(serial_buff.GetData(), static_cast<int>(task_size))) {
  133. GELOGE(FAILED, "model_task_def's serialize failed, model name = %s, task_size=%zu.", model.GetName().c_str(),
  134. task_size);
  135. return FAILED;
  136. }
  137. if (!AttrUtils::SetZeroCopyBytes(model, MODEL_ATTR_TASKS, std::move(serial_buff))) {
  138. GELOGE(FAILED, "Set model task to model failed, model name = %s, task_size=%zu.", model.GetName().c_str(),
  139. task_size);
  140. return FAILED;
  141. }
  142. return SUCCESS;
  143. }
  144. Status TaskGenerator::UpdateOpIsVarAttr(const OpDescPtr &op_desc, uint64_t session_id) {
  145. vector<int64_t> input_offsets = op_desc->GetInputOffset();
  146. GELOGD("Update is var attr, node[name:%s(%s), id:%ld, stream_id:%ld].", op_desc->GetName().c_str(),
  147. op_desc->GetType().c_str(), op_desc->GetId(), op_desc->GetStreamId());
  148. if (!(input_offsets.empty())) {
  149. vector<bool> input_var;
  150. for (int64_t input : input_offsets) {
  151. input_var.push_back(VarManager::Instance(session_id)->IsVarAddr(input));
  152. }
  153. GE_CHK_BOOL_EXEC(AttrUtils::SetListBool(op_desc, kIsInputVar, input_var), GELOGE(FAILED, "SetListBool failed.");
  154. return FAILED);
  155. }
  156. vector<int64_t> output_offsets = op_desc->GetOutputOffset();
  157. if (!(output_offsets.empty())) {
  158. vector<bool> output_var;
  159. for (int64_t output : output_offsets) {
  160. output_var.push_back(VarManager::Instance(session_id)->IsVarAddr(output));
  161. }
  162. GE_CHK_BOOL_EXEC(AttrUtils::SetListBool(op_desc, kIsOutputVar, output_var), GELOGE(FAILED, "SetListBool failed.");
  163. return FAILED);
  164. }
  165. return SUCCESS;
  166. }
  167. Status TaskGenerator::SaveFusionNodes(map<int64_t, std::vector<NodePtr>> &fusion_nodes, ComputeGraphPtr &graph) {
  168. std::map<NodePtr, int64_t> nodes_with_group_attr;
  169. for (auto &node : graph->GetNodes(graph->GetGraphUnknownFlag())) {
  170. OpDescPtr op_desc = node->GetOpDesc();
  171. GE_CHECK_NOTNULL(op_desc);
  172. int64_t group_id = kInvalidGroupId;
  173. string name = node->GetName();
  174. string type = node->GetType();
  175. // For fusion ddb pass, task def must be continuous.
  176. // Part1: store
  177. // If op_desc have this tag, store it in the map firstly,
  178. // call the elements in the map GenerateTask at last
  179. // l1 and l2 is for now
  180. if (ge::AttrUtils::GetInt(op_desc, ATTR_NAME_L1_FUSION_GROUP_ID, group_id) ||
  181. ge::AttrUtils::GetInt(op_desc, ATTR_NAME_L2_FUSION_GROUP_ID, group_id)) {
  182. auto stream_id = op_desc->GetStreamId();
  183. auto group_key = group_id + stream_id * kHashFactor;
  184. (void)ge::AttrUtils::SetInt(op_desc, ATTR_NAME_FUSION_GROUP_KEY, group_key);
  185. GELOGD("Fusion: store node[name:%s(%s), group id:%ld, group key:%ld, stream_id:%ld] task.", name.c_str(),
  186. type.c_str(), group_id, group_key, op_desc->GetStreamId());
  187. fusion_nodes[group_key].push_back(node);
  188. nodes_with_group_attr.insert({node, group_id});
  189. }
  190. // if node's all in nodes both with same group attr
  191. // and it have no attr or group attr different
  192. // which means bad case, return error
  193. bool call_check = true;
  194. std::unordered_set<int64_t> input_group_ids;
  195. for (const auto &input_node : node->GetInNodes()) {
  196. auto iter = nodes_with_group_attr.find(input_node);
  197. if (iter == nodes_with_group_attr.end()) {
  198. call_check = false;
  199. break;
  200. } else {
  201. input_group_ids.insert(iter->second);
  202. }
  203. }
  204. call_check = (call_check && (input_group_ids.size() == 1));
  205. if (call_check) {
  206. auto input_group_id = *input_group_ids.begin();
  207. if (group_id != input_group_id) {
  208. GELOGW("Fusion: node[name:%s(%s) with group id:%ld and diff from it's input nodes's group id:%ld ",
  209. name.c_str(), type.c_str(), group_id, input_group_id);
  210. }
  211. }
  212. }
  213. GELOGD("Fusion: get fusion group numbers [%zu].", fusion_nodes.size());
  214. return SUCCESS;
  215. }
  216. Status TaskGenerator::GenerateTask(RunContext &run_context, ComputeGraphPtr &graph,
  217. vector<domi::TaskDef> &task_def_list, map<uint32_t, string> &op_name_map) {
  218. GELOGD("Beign to generate task, graph name is %s.", graph->GetName().c_str());
  219. std::shared_ptr<GELib> ge_lib = GELib::GetInstance();
  220. if ((ge_lib == nullptr) || !ge_lib->InitFlag()) {
  221. GELOGE(GE_CLI_GE_NOT_INITIALIZED, "GenerateTask failed.");
  222. return GE_CLI_GE_NOT_INITIALIZED;
  223. }
  224. GE_CHK_STATUS_RET(MarkNodeAndSetIndex(graph), "MarkNodeAndSetIndex failed.");
  225. ProfilingPoint profiling_point;
  226. vector<uint32_t> all_reduce_nodes;
  227. GE_CHK_STATUS_RET(FindProfilingTaskIndex(graph, profiling_point, all_reduce_nodes));
  228. const OpsKernelManager &ops_kernel_manager = ge_lib->OpsKernelManagerObj();
  229. GE_TIMESTAMP_CALLNUM_START(GenerateTask);
  230. // map store fusion nodes
  231. map<int64_t, std::vector<NodePtr>> fusion_nodes;
  232. string buffer_optimize = "off_optimize";
  233. (void)ge::GetContext().GetOption(BUFFER_OPTIMIZE, buffer_optimize);
  234. if (buffer_optimize != "off_optimize") {
  235. GE_CHK_STATUS_RET(SaveFusionNodes(fusion_nodes, graph));
  236. }
  237. std::unordered_set<Node *> fusion_nodes_seen;
  238. int64_t group_key;
  239. uint32_t node_index = 0;
  240. rtStream_t stream = nullptr;
  241. bool is_unknown_shape = graph->GetGraphUnknownFlag() || GetContext().GetHostExecFlag();
  242. if (is_unknown_shape) {
  243. GE_CHK_STATUS_RET(SetUnknownShapeStream(run_context, stream), "Set unknown shape stream failed.");
  244. }
  245. std::function<void()> callback = [&]() {
  246. if (is_unknown_shape) {
  247. if (DestroyUnknownShapeStream(run_context, stream) != SUCCESS) {
  248. GELOGE(FAILED, "Destory unknown shape stream failed.");
  249. }
  250. }
  251. };
  252. GE_MAKE_GUARD(release, callback);
  253. uint64_t all_reduce_node_idx = 0;
  254. for (auto &node : graph->GetNodes(graph->GetGraphUnknownFlag())) {
  255. OpDescPtr op_desc = node->GetOpDesc();
  256. GE_CHECK_NOTNULL(op_desc);
  257. node_index++;
  258. string name = node->GetName();
  259. string type = node->GetType();
  260. bool attr_notask = false;
  261. bool get_attr_notask_flag = ge::AttrUtils::GetBool(op_desc, ATTR_NAME_NOTASK, attr_notask);
  262. GE_IF_BOOL_EXEC(get_attr_notask_flag && attr_notask,
  263. GELOGI("Node[name:%s, type:%s] does not need to generate task.", name.c_str(), type.c_str());
  264. continue);
  265. GE_CHK_STATUS_RET(UpdateOpIsVarAttr(op_desc, graph->GetSessionID()));
  266. string op_kernel_lib_name = op_desc->GetOpKernelLibName();
  267. // For fusion ddb pass, task def must be continuous.
  268. // Part2: Call
  269. auto fusion_task_info =
  270. FusionTaskInfo{run_context, graph, node, op_desc, node_index, ge_lib,
  271. ops_kernel_manager, task_def_list, op_name_map, profiling_point, all_reduce_nodes, all_reduce_node_idx};
  272. GE_CHK_STATUS_RET(GenerateTaskForFusionNode(fusion_task_info, fusion_nodes, fusion_nodes_seen),
  273. "Call GenerateTaskForFusionNode node:%s(%s) failed", name.c_str(), type.c_str());
  274. // continue directly
  275. if (ge::AttrUtils::GetInt(op_desc, ATTR_NAME_FUSION_GROUP_KEY, group_key)) {
  276. GELOGI("Fusion node[name:%s, type:%s] do not need generate task again.", name.c_str(), type.c_str());
  277. continue;
  278. }
  279. if (op_kernel_lib_name.empty()) {
  280. GELOGI("Node[name:%s, type:%s] does not need to generate task.", name.c_str(), type.c_str());
  281. continue;
  282. }
  283. auto kernel_info_store = ops_kernel_manager.GetOpsKernelInfoStore(op_kernel_lib_name);
  284. if (kernel_info_store == nullptr) {
  285. GELOGE(INTERNAL_ERROR,
  286. "No ops kernel store or ops kernel builder found. node:%s(%s), op_kernel_lib_name=%s.",
  287. name.c_str(),
  288. type.c_str(), op_kernel_lib_name.c_str());
  289. return INTERNAL_ERROR;
  290. }
  291. GE_CHK_STATUS_RET(UpdateAnchorStatus(node), "Call UpdateAnchorStatus node:%s(%s) failed", name.c_str(),
  292. type.c_str());
  293. // Profiling task
  294. size_t task_list_size_before = task_def_list.size();
  295. GE_CHK_STATUS_RET(InsertProfilingTaskBefore(op_desc, profiling_point, all_reduce_nodes,
  296. node_index, task_def_list, all_reduce_node_idx));
  297. int64_t op_id = op_desc->GetId();
  298. // Compatible with dynamic shape scenes, the default is 0
  299. int64_t stream_id = 0;
  300. if (!is_unknown_shape) {
  301. stream_id = op_desc->GetStreamId();
  302. GE_CHK_STATUS_RET(SetKnownShapeStream(run_context, stream_id), "node[name:%s(%s), id:%ld] stream id is invalid.",
  303. name.c_str(), type.c_str(), op_id);
  304. }
  305. GELOGD("Call %s to generate node[name:%s(%s), id:%ld, stream_id:%ld] task.", op_kernel_lib_name.c_str(),
  306. name.c_str(), type.c_str(), op_id, stream_id);
  307. GE_TIMESTAMP_RESTART(GenerateTask);
  308. auto ret = OpsKernelBuilderManager::Instance().GenerateTask(*node, run_context, task_def_list);
  309. GE_TIMESTAMP_ADD(GenerateTask);
  310. if (ret != SUCCESS) {
  311. GELOGE(ret, "Call %s to generate node[name:%s(%s), id:%ld, stream_id:%ld] task failed.",
  312. op_kernel_lib_name.c_str(), name.c_str(), type.c_str(), op_id, stream_id);
  313. return ret;
  314. }
  315. // Profiling task
  316. GE_CHK_STATUS_RET(InsertProfilingTaskAfter(op_desc, profiling_point, all_reduce_nodes,
  317. node_index, task_def_list, all_reduce_node_idx));
  318. size_t task_list_size_after = task_def_list.size();
  319. // If tasks is reduced
  320. if (task_list_size_after < task_list_size_before) {
  321. GELOGE(FAILED, "Call %s to generate node[name:%s(%s), id:%ld, stream_id:%ld] task. but task num from %zu to %zu.",
  322. op_kernel_lib_name.c_str(), name.c_str(), type.c_str(), op_id, stream_id, task_list_size_before,
  323. task_list_size_after);
  324. return FAILED;
  325. }
  326. // Reset stream id to ge stream id, as graph load must use ge stream to reassign stream
  327. void *ops_kernel_info_store_ptr = kernel_info_store.get();
  328. for (size_t idx = task_list_size_before; idx < task_list_size_after; ++idx) {
  329. task_def_list[idx].set_stream_id(static_cast<uint32_t>(stream_id));
  330. op_name_map[idx] = name;
  331. // Set opsKernelInfoStorePtr and op_index, the two fields be use in DistributeTask and InitTaskInfo
  332. TaskDef *task_def_ptr = &task_def_list[idx];
  333. GE_CHECK_NOTNULL(task_def_ptr);
  334. task_def_ptr->set_ops_kernel_store_ptr(reinterpret_cast<uintptr_t>(ops_kernel_info_store_ptr));
  335. }
  336. GELOGD("Call %s to generate node[name:%s(%s), id:%ld, stream_id:%ld] task finished, generate %zu task(s).",
  337. op_kernel_lib_name.c_str(), name.c_str(), type.c_str(), op_id, stream_id,
  338. task_list_size_after - task_list_size_before);
  339. }
  340. GE_TIMESTAMP_CALLNUM_EVENT_END(GenerateTask, "GraphBuild::GenerateTask");
  341. return SUCCESS;
  342. }
  343. Status TaskGenerator::GenerateTaskForFusionNode(FusionTaskInfo &fusion_task_info,
  344. std::map<int64_t, std::vector<NodePtr>> &fusion_nodes,
  345. std::unordered_set<Node *> &fusion_nodes_seen) {
  346. Status ret = SUCCESS;
  347. int64_t group_key;
  348. auto &run_context = fusion_task_info.run_context;
  349. auto &graph = fusion_task_info.graph;
  350. auto &node = fusion_task_info.node;
  351. auto &fusion_op_desc = fusion_task_info.fusion_op_desc;
  352. auto &node_index = fusion_task_info.node_index;
  353. const auto &ops_kernel_manager = fusion_task_info.ops_kernel_manager;
  354. auto &task_def_list = fusion_task_info.task_def_list;
  355. auto &op_name_map = fusion_task_info.op_name_map;
  356. auto &profiling_point = fusion_task_info.profiling_point;
  357. auto &all_reduce_nodes = fusion_task_info.all_reduce_nodes;
  358. auto &all_reduce_idx = fusion_task_info.all_reduce_node_idx;
  359. // If op_desc have this attr, call nodes with same group key in a stream together
  360. if (ge::AttrUtils::GetInt(fusion_op_desc, ATTR_NAME_FUSION_GROUP_KEY, group_key) &&
  361. (fusion_nodes_seen.count(node.get()) == 0)) {
  362. GELOGI("Fusion: start fusion group index[%ld], nodes size[%zu].", group_key, fusion_nodes[group_key].size());
  363. for (auto &fusion_node : fusion_nodes[group_key]) {
  364. OpDescPtr op_desc = fusion_node->GetOpDesc();
  365. UpdateOpIsVarAttr(op_desc, graph->GetSessionID());
  366. std::string fusion_node_name = fusion_node->GetName();
  367. std::string fusion_node_type = fusion_node->GetType();
  368. std::string op_kernel_lib_name = op_desc->GetOpKernelLibName();
  369. if (op_kernel_lib_name.empty()) {
  370. GELOGI("Fusion: fusion_node[name:%s(%s)] task no need to generate task.", fusion_node_name.c_str(),
  371. fusion_node_type.c_str());
  372. continue;
  373. }
  374. bool attr_notask = false;
  375. GE_IF_BOOL_EXEC(ge::AttrUtils::GetBool(op_desc, ATTR_NAME_NOTASK, attr_notask) && attr_notask,
  376. GELOGI("Fusion: fusion_node[name:%s, type:%s] does not need to generate task.",
  377. fusion_node_name.c_str(), fusion_node_type.c_str());
  378. continue);
  379. size_t task_list_size_before = task_def_list.size();
  380. OpsKernelInfoStorePtr kernel_info_store = ops_kernel_manager.GetOpsKernelInfoStore(op_kernel_lib_name);
  381. if (kernel_info_store == nullptr) {
  382. GELOGE(INTERNAL_ERROR,
  383. "Fusion: No ops kernel store or ops kernel builder found. fusion_node:%s(%s), op_kernel_lib_name=%s.",
  384. fusion_node_name.c_str(), fusion_node_type.c_str(), op_kernel_lib_name.c_str());
  385. return INTERNAL_ERROR;
  386. }
  387. ret = UpdateAnchorStatus(fusion_node);
  388. if (ret != SUCCESS) {
  389. GELOGE(ret, "Fusion: Call UpdateAnchorStatus fusion_node:%s(%s) failed", fusion_node_name.c_str(),
  390. fusion_node_type.c_str());
  391. return ret;
  392. }
  393. int64_t op_id = op_desc->GetId();
  394. int64_t stream_id = op_desc->GetStreamId();
  395. if (stream_id < 0 || stream_id >= (int64_t)run_context.graphStreamList.size()) {
  396. GELOGE(INTERNAL_ERROR, "Fusion: fusion_node[name:%s(%s), id:%ld] stream id is invalid, stream list size=%zu",
  397. fusion_node_name.c_str(), fusion_node_type.c_str(), op_id, run_context.graphStreamList.size());
  398. return INTERNAL_ERROR;
  399. }
  400. // profiling task
  401. (void)InsertProfilingTaskBefore(op_desc, profiling_point, all_reduce_nodes,
  402. node_index, task_def_list, all_reduce_idx);
  403. run_context.stream = run_context.graphStreamList[stream_id];
  404. GELOGI("Fusion: Call %s to generate fusion_node:[fusion_node_name:%s(%s), id:%ld, stream_id:%ld] task.",
  405. op_kernel_lib_name.c_str(), fusion_node_name.c_str(), fusion_node_type.c_str(), op_id, stream_id);
  406. ret = OpsKernelBuilderManager::Instance().GenerateTask(*fusion_node, run_context, task_def_list);
  407. if (ret != SUCCESS) {
  408. GELOGE(ret,
  409. "Fusion: Call %s to generate fusion_node:[fusion_node_name:%s(%s), "
  410. "id:%ld, stream_id:%ld] task failed.",
  411. op_kernel_lib_name.c_str(), fusion_node_name.c_str(), fusion_node_type.c_str(), op_id, stream_id);
  412. return ret;
  413. }
  414. // profiling task
  415. (void)InsertProfilingTaskAfter(op_desc, profiling_point, all_reduce_nodes,
  416. node_index, task_def_list, all_reduce_idx);
  417. size_t task_list_size_after = task_def_list.size();
  418. // if tasks is reduced
  419. if (task_list_size_after < task_list_size_before) {
  420. GELOGE(FAILED,
  421. "Fusion: Call %s to generate fusion_node:[fusion_node_name:%s(%s), "
  422. "id:%ld, stream_id:%ld] task. but task num from %zu to %zu.",
  423. op_kernel_lib_name.c_str(), fusion_node_name.c_str(), fusion_node_type.c_str(), op_id, stream_id,
  424. task_list_size_before, task_list_size_after);
  425. return FAILED;
  426. }
  427. // reset stream id to ge stream id, as graph load must use ge stream to reassign stream
  428. void *ops_kernel_info_store_ptr = kernel_info_store.get();
  429. for (size_t idx = task_list_size_before; idx < task_list_size_after; ++idx) {
  430. task_def_list[idx].set_stream_id(static_cast<uint32_t>(stream_id));
  431. op_name_map[idx] = fusion_node_name;
  432. // set opsKernelInfoStorePtr and op_index, the two fields be use in DistributeTask and InitTaskInfo
  433. TaskDef *task_def_ptr = &task_def_list[idx];
  434. task_def_ptr->set_ops_kernel_store_ptr(reinterpret_cast<uintptr_t>(ops_kernel_info_store_ptr));
  435. }
  436. GELOGI("Fusion: Call %s to generate fusion_node:[fusion_node_name:%s(%s), id:%ld, stream_id:%ld]"
  437. " task finished, generate %zu task(s).",
  438. op_kernel_lib_name.c_str(), fusion_node_name.c_str(), fusion_node_type.c_str(), op_id, stream_id,
  439. task_list_size_after - task_list_size_before);
  440. // record nodes which have call generate task successfully
  441. fusion_nodes_seen.insert(fusion_node.get());
  442. node_index++;
  443. }
  444. }
  445. // without tag or has been seen, skip directly
  446. return ret;
  447. }
  448. Status TaskGenerator::UpdateAnchorStatus(const NodePtr &node) {
  449. if (NodeUtils::SetAllAnchorStatus(node) != GRAPH_SUCCESS) {
  450. GELOGE(INTERNAL_ERROR, "NodeUtils::SetAllAnchorStatus failed.");
  451. return INTERNAL_ERROR;
  452. }
  453. for (auto &anchor : node->GetAllInDataAnchors()) {
  454. auto peer_anchor = anchor->GetPeerOutAnchor();
  455. if (peer_anchor == nullptr) {
  456. if (AnchorUtils::SetStatus(anchor, ANCHOR_SUSPEND) != GRAPH_SUCCESS) {
  457. GELOGE(INTERNAL_ERROR, "AnchorUtils::SetStatus failed.");
  458. return INTERNAL_ERROR;
  459. }
  460. continue;
  461. }
  462. std::string const_type;
  463. bool is_const = NodeUtils::GetConstOpType(peer_anchor->GetOwnerNode(), const_type);
  464. if (is_const && (const_type == CONSTANT)) {
  465. if (AnchorUtils::SetStatus(anchor, ANCHOR_CONST) != GRAPH_SUCCESS) {
  466. GELOGE(INTERNAL_ERROR, "AnchorUtils::SetStatus failed.");
  467. return INTERNAL_ERROR;
  468. }
  469. } else {
  470. if (AnchorUtils::SetStatus(anchor, ANCHOR_DATA) != GRAPH_SUCCESS) {
  471. GELOGE(INTERNAL_ERROR, "AnchorUtils::SetStatus failed.");
  472. return INTERNAL_ERROR;
  473. }
  474. }
  475. }
  476. return SUCCESS;
  477. }
  478. Status TaskGenerator::MarkNodeAndSetIndex(ComputeGraphPtr &graph) {
  479. auto ge_lib = GELib::GetInstance();
  480. if ((ge_lib == nullptr) || !ge_lib->InitFlag()) {
  481. GELOGE(GE_CLI_GE_NOT_INITIALIZED, "GE is not initialized or is finalized.");
  482. return GE_CLI_GE_NOT_INITIALIZED;
  483. }
  484. const auto all_nodes = graph->GetNodes(graph->GetGraphUnknownFlag());
  485. if (all_nodes.empty()) {
  486. GELOGE(GE_GRAPH_GRAPH_NODE_NULL, "Graph's node is empty");
  487. return GE_GRAPH_GRAPH_NODE_NULL;
  488. }
  489. int64_t node_index = 0;
  490. for (auto &node : all_nodes) {
  491. OpDescPtr op_desc = node->GetOpDesc();
  492. GE_CHECK_NOTNULL(op_desc);
  493. op_desc->SetId(node_index++);
  494. }
  495. map<int64_t, vector<OpDescPtr>> all_stream_ops;
  496. for (auto &node : all_nodes) {
  497. OpDescPtr op_desc = node->GetOpDesc();
  498. GE_CHECK_NOTNULL(op_desc);
  499. // Reset op kernel lib name
  500. if (op_desc->GetOpKernelLibName().empty()) {
  501. (void)ge_lib->DNNEngineManagerObj().GetDNNEngineName(node);
  502. }
  503. (void)op_desc->DelAttr(kIsFirstNode);
  504. (void)op_desc->DelAttr(kIsLastNode);
  505. all_stream_ops[op_desc->GetStreamId()].emplace_back(op_desc);
  506. }
  507. bool is_single_stream = all_stream_ops.size() == 1;
  508. for (const auto &stream_ops : all_stream_ops) {
  509. Status status = MarkFirstAndLastOps(stream_ops.second, is_single_stream);
  510. if (status != SUCCESS) {
  511. GELOGE(status, "Mark first and last nodes failed.");
  512. return status;
  513. }
  514. }
  515. return SUCCESS;
  516. }
  517. Status TaskGenerator::MarkFirstAndLastOps(const vector<OpDescPtr> &ops, bool is_single_stream) const {
  518. vector<vector<OpDescPtr>> continuous_op_lists(1);
  519. const set<string> separator_types(
  520. {LABELSET, LABELGOTO, LABELGOTOEX, LABELSWITCH, LABELSWITCHBYINDEX, STREAMSWITCH, STREAMSWITCHN});
  521. for (auto &op_desc : ops) {
  522. bool attr_notask = false;
  523. if (ge::AttrUtils::GetBool(op_desc, ATTR_NAME_NOTASK, attr_notask) && attr_notask) {
  524. continue;
  525. }
  526. string op_type = op_desc->GetType();
  527. if ((!is_single_stream && !op_desc->GetSubgraphInstanceNames().empty()) || separator_types.count(op_type) != 0) {
  528. continuous_op_lists.emplace_back(vector<OpDescPtr>());
  529. } else {
  530. continuous_op_lists.back().emplace_back(op_desc);
  531. }
  532. }
  533. GELOGD("Number of continuous node lists is %zu.", continuous_op_lists.size());
  534. for (const auto &continuous_ops : continuous_op_lists) {
  535. map<string, std::pair<OpDescPtr, OpDescPtr>> first_and_last_ops;
  536. for (auto &op_desc : continuous_ops) {
  537. string op_kernel_lib_name = op_desc->GetOpKernelLibName();
  538. if (op_kernel_lib_name.empty()) {
  539. GELOGE(INTERNAL_ERROR, "node:%s(%s) get op kernel lib failed.", op_desc->GetName().c_str(),
  540. op_desc->GetType().c_str());
  541. return INTERNAL_ERROR;
  542. }
  543. auto it = first_and_last_ops.find(op_kernel_lib_name);
  544. if (it == first_and_last_ops.end()) {
  545. first_and_last_ops.emplace(op_kernel_lib_name, std::make_pair(op_desc, op_desc));
  546. } else {
  547. it->second.second = op_desc;
  548. }
  549. }
  550. for (auto &it : first_and_last_ops) {
  551. auto &op_pair = it.second;
  552. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetBool(op_pair.first, kIsFirstNode, true), GELOGE(FAILED, "SetBool failed.");
  553. return FAILED);
  554. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetBool(op_pair.second, kIsLastNode, true), GELOGE(FAILED, "SetBool failed.");
  555. return FAILED);
  556. }
  557. }
  558. return SUCCESS;
  559. }
  560. Status TaskGenerator::AutoFindFpOpIndex(const ComputeGraphPtr &graph, ProfilingPoint &profiling_point) const {
  561. GELOGI("Start AutoFindFpOpIndex");
  562. OpDescPtr fp_op_desc = nullptr;
  563. uint32_t current_idx = 0;
  564. uint32_t first_fp = 0;
  565. for (auto &node : graph->GetNodes(graph->GetGraphUnknownFlag())) {
  566. OpDescPtr op_desc = node->GetOpDesc();
  567. GE_CHECK_NOTNULL(op_desc);
  568. string op_kernel_lib_name = op_desc->GetOpKernelLibName();
  569. if (op_kernel_lib_name.empty()) {
  570. continue;
  571. }
  572. if (op_desc->GetType() == GETNEXT || op_desc->GetType() == DATA) {
  573. auto out_anchor = node->GetOutDataAnchor(0);
  574. for (auto &peer_in_anchor : out_anchor->GetPeerInDataAnchors()) {
  575. GE_CHECK_NOTNULL(peer_in_anchor);
  576. auto in_node_desc = peer_in_anchor->GetOwnerNode()->GetOpDesc();
  577. GE_CHECK_NOTNULL(in_node_desc);
  578. if (fp_op_desc == nullptr || ((in_node_desc->GetId()) < (fp_op_desc->GetId()))) {
  579. fp_op_desc = in_node_desc;
  580. }
  581. }
  582. break;
  583. }
  584. }
  585. if (fp_op_desc == nullptr) {
  586. GELOGW("not find fp_op_desc.");
  587. return SUCCESS;
  588. }
  589. GELOGI("Find fp_op_desc is %s, id is %ld", fp_op_desc->GetName().c_str(), fp_op_desc->GetId());
  590. for (auto &node : graph->GetNodes(graph->GetGraphUnknownFlag())) {
  591. OpDescPtr op_desc = node->GetOpDesc();
  592. GE_CHECK_NOTNULL(op_desc);
  593. current_idx++;
  594. if (op_desc->GetName() == fp_op_desc->GetName()) {
  595. first_fp = current_idx;
  596. GELOGI("First fp name is %s, idx is %u", op_desc->GetName().c_str(), first_fp);
  597. break;
  598. }
  599. }
  600. profiling_point.fp_index = first_fp;
  601. return SUCCESS;
  602. }
  603. Status TaskGenerator::AutoFindBpOpIndex(const ComputeGraphPtr &graph, ProfilingPoint &profiling_point,
  604. vector<uint32_t> &all_reduce_nodes) const {
  605. GELOGI("Start AutoFindBpOpIndex");
  606. NodePtr bp_node = nullptr;
  607. uint32_t current_idx = 0;
  608. for (auto &node : graph->GetNodes(graph->GetGraphUnknownFlag())) {
  609. OpDescPtr op_desc = node->GetOpDesc();
  610. GE_CHECK_NOTNULL(op_desc);
  611. current_idx++;
  612. string op_kernel_lib_name = op_desc->GetOpKernelLibName();
  613. if (op_kernel_lib_name.empty()) {
  614. continue;
  615. }
  616. if (op_desc->GetType() == HCOMALLREDUCE || op_desc->GetType() == HVDCALLBACKALLREDUCE) {
  617. bp_node = node;
  618. all_reduce_nodes.emplace_back(current_idx);
  619. GELOGI("Allreduce name %s, idx %u", op_desc->GetName().c_str(), current_idx);
  620. }
  621. if (op_desc->GetName() == NODE_NAME_NET_OUTPUT) {
  622. if (bp_node == nullptr) {
  623. bp_node = node;
  624. }
  625. }
  626. if (graph->GetNeedIteration()) {
  627. if (op_desc->GetName() == NODE_NAME_FLOWCTRL_LOOP_ASSIGNADD) {
  628. profiling_point.end_index.insert(current_idx);
  629. GELOGI("Iter end name %s, idx %u, from Node_Output_IteratorCtrl_StreamSwitch_StreamActive",
  630. op_desc->GetName().c_str(), current_idx);
  631. }
  632. if (op_desc->GetName() == NODE_NAME_FLOWCTRL_LOOP_ASSIGN) {
  633. profiling_point.end_index.insert(current_idx);
  634. GELOGI("Iter end name %s, idx %u, from FlowCtrl_LoopCond_ASSIGN",
  635. op_desc->GetName().c_str(), current_idx);
  636. }
  637. } else {
  638. if (op_desc->GetName() == NODE_NAME_NET_OUTPUT) {
  639. profiling_point.end_index.insert(current_idx);
  640. GELOGI("Iter end name %s, idx %u, from NETOUTPUT", op_desc->GetName().c_str(), current_idx);
  641. }
  642. }
  643. }
  644. if (bp_node == nullptr) {
  645. GELOGW("not find bp_node.");
  646. return SUCCESS;
  647. }
  648. profiling_point.bp_index = FindLastBpFromBpNode(graph, bp_node);
  649. return SUCCESS;
  650. }
  651. uint32_t TaskGenerator::FindLastBpFromBpNode(const ComputeGraphPtr &graph, const NodePtr &bp_node) const {
  652. uint32_t last_bp = 0;
  653. OpDescPtr bp_op_desc = nullptr;
  654. for (auto &in_anchor : bp_node->GetAllInDataAnchors()) {
  655. auto out_anchor = in_anchor->GetPeerOutAnchor();
  656. if (out_anchor == nullptr || out_anchor->GetOwnerNode() == nullptr) {
  657. continue;
  658. }
  659. auto out_node_desc = out_anchor->GetOwnerNode()->GetOpDesc();
  660. GE_CHECK_NOTNULL(out_node_desc);
  661. if (bp_op_desc == nullptr || ((out_node_desc->GetId()) > (bp_op_desc->GetId()))) {
  662. bp_op_desc = out_node_desc;
  663. }
  664. GELOGI("bp_op_desc is %s, id is %ld", bp_op_desc->GetName().c_str(), bp_op_desc->GetId());
  665. }
  666. GE_CHECK_NOTNULL(bp_op_desc);
  667. uint32_t current_idx = 0;
  668. for (auto &node : graph->GetNodes(graph->GetGraphUnknownFlag())) {
  669. OpDescPtr op_desc = node->GetOpDesc();
  670. GE_CHECK_NOTNULL(op_desc);
  671. current_idx++;
  672. if (op_desc->GetName() == bp_op_desc->GetName()) {
  673. last_bp = current_idx;
  674. GELOGI("First bp name %s, idx %u", op_desc->GetName().c_str(), last_bp);
  675. break;
  676. }
  677. }
  678. return last_bp;
  679. }
  680. Status TaskGenerator::FindFpOfEnv(const ComputeGraphPtr &graph, const std::string &fp_point_str,
  681. ProfilingPoint &profiling_point) const {
  682. GELOGI("Start FindFpOfEnv");
  683. uint32_t current_idx = 0;
  684. uint32_t first_fp = 0;
  685. for (auto &node : graph->GetNodes(graph->GetGraphUnknownFlag())) {
  686. OpDescPtr op_desc = node->GetOpDesc();
  687. GE_CHECK_NOTNULL(node->GetOpDesc());
  688. current_idx++;
  689. string op_kernel_lib_name = op_desc->GetOpKernelLibName();
  690. if (op_kernel_lib_name.empty()) {
  691. continue;
  692. }
  693. if (first_fp == 0 && IsProfPoint(op_desc, fp_point_str)) {
  694. first_fp = current_idx;
  695. GELOGI("First fp name from env is %s, idx %u", op_desc->GetName().c_str(), first_fp);
  696. }
  697. }
  698. profiling_point.fp_index = first_fp;
  699. return SUCCESS;
  700. }
  701. Status TaskGenerator::FindBpOfEnv(const ComputeGraphPtr &graph, const std::string &bp_point_str,
  702. ProfilingPoint &profiling_point, vector<uint32_t> &all_reduce_nodes) const {
  703. GELOGI("Start FindBpOfEnv");
  704. uint32_t current_idx = 0;
  705. uint32_t last_bp = 0;
  706. for (auto &node : graph->GetNodes(graph->GetGraphUnknownFlag())) {
  707. OpDescPtr op_desc = node->GetOpDesc();
  708. GE_CHECK_NOTNULL(node->GetOpDesc());
  709. current_idx++;
  710. string op_kernel_lib_name = op_desc->GetOpKernelLibName();
  711. if (op_kernel_lib_name.empty()) {
  712. continue;
  713. }
  714. if (graph->GetNeedIteration()) {
  715. if (op_desc->GetName() == NODE_NAME_NET_OUTPUT + '_' + NODE_NAME_STREAM_SWITCH + "_StreamActive") {
  716. profiling_point.end_index.insert(current_idx);
  717. GELOGI("Iter end name %s, idx %u, from Node_Output_IteratorCtrl_StreamSwitch_StreamActive",
  718. op_desc->GetName().c_str(), current_idx);
  719. }
  720. if (op_desc->GetName() == NODE_NAME_FLOWCTRL_LOOP_ASSIGN) {
  721. profiling_point.end_index.insert(current_idx);
  722. GELOGI("Iter end name %s, idx %u, from FlowCtrl_LoopCond_ASSIGN",
  723. op_desc->GetName().c_str(), current_idx);
  724. }
  725. } else {
  726. if (op_desc->GetName() == NODE_NAME_NET_OUTPUT) {
  727. profiling_point.end_index.insert(current_idx);
  728. GELOGI("Iter end name %s, idx %u, from NETOUTPUT", op_desc->GetName().c_str(), current_idx);
  729. }
  730. }
  731. if (op_desc->GetType() == HCOMALLREDUCE || op_desc->GetType() == HVDCALLBACKALLREDUCE) {
  732. all_reduce_nodes.emplace_back(current_idx);
  733. GELOGI("Allreduce name %s, idx %u", op_desc->GetName().c_str(), current_idx);
  734. }
  735. if (IsProfPoint(op_desc, bp_point_str)) {
  736. last_bp = current_idx;
  737. GELOGI("Last bp name from env is %s, idx %u", op_desc->GetName().c_str(), last_bp);
  738. }
  739. }
  740. profiling_point.bp_index = last_bp;
  741. return SUCCESS;
  742. }
  743. Status TaskGenerator::GetFpBpIndex(const ComputeGraphPtr &graph, ProfilingPoint &profiling_point,
  744. vector<uint32_t> &all_reduce_nodes, std::string &fp_point_str,
  745. std::string &bp_point_str) const {
  746. ProfilingManager::Instance().GetFpBpPoint(fp_point_str, bp_point_str);
  747. Status ret = SUCCESS;
  748. if (fp_point_str.empty()) {
  749. ret = AutoFindFpOpIndex(graph, profiling_point);
  750. if (ret != SUCCESS) {
  751. GELOGW("First forward profiling op_index not set and FindFpOpIndex failed.");
  752. return FAILED;
  753. }
  754. }
  755. if (bp_point_str.empty()) {
  756. ret = AutoFindBpOpIndex(graph, profiling_point, all_reduce_nodes);
  757. if (ret != SUCCESS) {
  758. GELOGW("Last backward profiling op_index not set and FindBpOpIndex failed.");
  759. return FAILED;
  760. }
  761. }
  762. return SUCCESS;
  763. }
  764. Status TaskGenerator::FindProfilingNodeIndex(const ComputeGraphPtr &graph, ProfilingPoint &profiling_point,
  765. std::vector<uint32_t> &all_reduce_nodes) {
  766. return FindProfilingTaskIndex(graph, profiling_point, all_reduce_nodes);
  767. }
  768. Status TaskGenerator::FindProfilingTaskIndex(const ComputeGraphPtr &graph, ProfilingPoint &profiling_point,
  769. vector<uint32_t> &all_reduce_nodes) const {
  770. GE_CHECK_NOTNULL(graph);
  771. const char *profiling_mode = std::getenv(kProfilingMode);
  772. bool is_profiling = (profiling_mode != nullptr) || ProfilingManager::Instance().ProfilingOn() ||
  773. ProfilingManager::Instance().ProfilingTrainingTraceOn();
  774. if (!is_profiling) {
  775. GELOGD("Profiling is not open.");
  776. return SUCCESS;
  777. }
  778. GELOGI("Start get FP/BP index.");
  779. std::string fp_point_str;
  780. std::string bp_point_str;
  781. Status ret = GetFpBpIndex(graph, profiling_point, all_reduce_nodes, fp_point_str, bp_point_str);
  782. if (ret != SUCCESS) {
  783. GELOGW("Get FP_POINT BP_POINT failed.");
  784. return SUCCESS;
  785. }
  786. GELOGI("fp_point_str:%s, bp_point_str:%s.", fp_point_str.c_str(), bp_point_str.c_str());
  787. if (!fp_point_str.empty()) {
  788. ret = FindFpOfEnv(graph, fp_point_str, profiling_point);
  789. if (ret != SUCCESS) {
  790. GELOGW("First backward profiling op name set but FindFpOfEnv failed.");
  791. return SUCCESS;
  792. }
  793. }
  794. if (!bp_point_str.empty()) {
  795. ret = FindBpOfEnv(graph, bp_point_str, profiling_point, all_reduce_nodes);
  796. if (ret != SUCCESS) {
  797. GELOGW("Last backward profiling op name set but FindBpOfEnv failed.");
  798. return SUCCESS;
  799. }
  800. }
  801. bool train_graph = graph->GetNeedIteration();
  802. if (profiling_point.fp_index == 0 && train_graph) {
  803. GELOGW("First forward op name can't be found in graph for training trace.");
  804. }
  805. if (profiling_point.bp_index == 0 && train_graph) {
  806. GELOGW("Last backward op name can't be found in graph for training trace.");
  807. }
  808. return SUCCESS;
  809. }
  810. Status TaskGenerator::InsertProfilingTaskBefore(const OpDescPtr &op_desc, const ProfilingPoint &profiling_point,
  811. vector<uint32_t> &all_reduce_nodes, uint32_t node_index,
  812. vector<domi::TaskDef> &task_def_list, uint64_t &all_reduce_node_idx) {
  813. const char *profiling_mode = std::getenv(kProfilingMode);
  814. bool is_profiling = (profiling_mode != nullptr) || ProfilingManager::Instance().ProfilingOn() ||
  815. ProfilingManager::Instance().ProfilingTrainingTraceOn();
  816. bool is_insert_fp_profiling_task = false;
  817. (void)ge::AttrUtils::GetBool(op_desc, ATTR_NAME_INSERT_FP_PROFILILNG_TASK, is_insert_fp_profiling_task);
  818. bool is_insert_bp_profiling_task = false;
  819. (void)ge::AttrUtils::GetBool(op_desc, ATTR_NAME_INSERT_BP_PROFILILNG_TASK, is_insert_bp_profiling_task);
  820. bool no_insert_profiling_task = ((profiling_point.fp_index == 0) || (profiling_point.bp_index == 0) ||
  821. (profiling_point.end_index.empty())) &&
  822. (!(is_insert_fp_profiling_task || is_insert_bp_profiling_task));
  823. if (!is_profiling || no_insert_profiling_task) {
  824. return SUCCESS;
  825. }
  826. GELOGD("Insert fp profiling task: %d, insert bp profiling task: %d, fp index: %u, bp index: %u, end index size: %zu",
  827. is_insert_fp_profiling_task, is_insert_bp_profiling_task, profiling_point.fp_index, profiling_point.bp_index,
  828. profiling_point.end_index.size());
  829. if ((profiling_point.fp_index == node_index) || is_insert_fp_profiling_task) {
  830. uint64_t jobid_log_id = ge::GetContext().TraceId();
  831. GELOGI("The first FP operator is %s, idx %u, job_id %lu", op_desc->GetName().c_str(), node_index, jobid_log_id);
  832. TaskDef job_task_def;
  833. job_task_def.set_type(RT_MODEL_TASK_PROFILER_TRACE);
  834. job_task_def.set_stream_id(op_desc->GetStreamId());
  835. LogTimeStampDef *job_log_def = job_task_def.mutable_log_timestamp();
  836. if (job_log_def != nullptr) {
  837. job_log_def->set_logid(jobid_log_id);
  838. job_log_def->set_notify(false);
  839. }
  840. task_def_list.emplace_back(job_task_def);
  841. TaskDef fp_task_def;
  842. fp_task_def.set_type(RT_MODEL_TASK_PROFILER_TRACE);
  843. fp_task_def.set_stream_id(op_desc->GetStreamId());
  844. LogTimeStampDef *fp_log_def = fp_task_def.mutable_log_timestamp();
  845. if (fp_log_def != nullptr) {
  846. fp_log_def->set_logid(kProfilingFpStartLogid);
  847. fp_log_def->set_notify(false);
  848. }
  849. task_def_list.emplace_back(fp_task_def);
  850. }
  851. bool is_all_reduce = (op_desc->GetType() == HCOMALLREDUCE || op_desc->GetType() == HVDCALLBACKALLREDUCE);
  852. uint64_t all_reduce_task_idx = 0;
  853. bool is_insert_all_reduce_task = false;
  854. if (is_all_reduce && is_insert_bp_profiling_task) {
  855. all_reduce_task_idx = all_reduce_node_idx;
  856. is_insert_all_reduce_task = true;
  857. }
  858. if (is_all_reduce) {
  859. all_reduce_node_idx++;
  860. }
  861. if (!is_insert_all_reduce_task) {
  862. for (size_t i = 0; i < all_reduce_nodes.size(); i++) {
  863. if (all_reduce_nodes[i] == node_index) {
  864. all_reduce_task_idx = i;
  865. is_insert_all_reduce_task = true;
  866. break;
  867. }
  868. }
  869. }
  870. if (is_insert_all_reduce_task) {
  871. GELOGI("The start allreduce operator is %s, idx %u", op_desc->GetName().c_str(), node_index);
  872. TaskDef ar_task_def;
  873. ar_task_def.set_type(RT_MODEL_TASK_PROFILER_TRACE);
  874. ar_task_def.set_stream_id(op_desc->GetStreamId());
  875. LogTimeStampDef *ar_log_def = ar_task_def.mutable_log_timestamp();
  876. if (ar_log_def != nullptr) {
  877. GE_IF_BOOL_EXEC(TypeUtils::CheckUint64MulOverflow(all_reduce_task_idx, kProfilingArStep),
  878. GELOGE(FAILED, "Multiply result is out of range.");
  879. return FAILED);
  880. auto log_id = all_reduce_task_idx * kProfilingArStep + kProfilingArStartLogid;
  881. ar_log_def->set_logid(log_id);
  882. ar_log_def->set_notify(false);
  883. (void)ge::AttrUtils::SetInt(op_desc, ATTR_NAME_INSERT_PROFILILNG_TASK_LOG_ID, log_id);
  884. }
  885. task_def_list.push_back(ar_task_def);
  886. }
  887. return SUCCESS;
  888. }
  889. Status TaskGenerator::InsertProfilingTaskAfter(const OpDescPtr &op_desc, const ProfilingPoint &profiling_point,
  890. vector<uint32_t> &all_reduce_nodes, uint32_t node_index,
  891. vector<domi::TaskDef> &task_def_list, uint64_t all_reduce_node_idx) {
  892. GE_CHECK_NOTNULL(op_desc);
  893. const char *profiling_mode = std::getenv(kProfilingMode);
  894. bool is_profiling = (profiling_mode != nullptr) || ProfilingManager::Instance().ProfilingOn() ||
  895. ProfilingManager::Instance().ProfilingTrainingTraceOn();
  896. bool is_insert_bp_profiling_task = false;
  897. (void)ge::AttrUtils::GetBool(op_desc, ATTR_NAME_INSERT_BP_PROFILILNG_TASK, is_insert_bp_profiling_task);
  898. bool is_insert_end_profiling_task = false;
  899. (void)ge::AttrUtils::GetBool(op_desc, ATTR_NAME_INSERT_END_PROFILILNG_TASK, is_insert_end_profiling_task);
  900. bool no_insert_profiling_task = ((profiling_point.fp_index == 0) || (profiling_point.bp_index == 0) ||
  901. (profiling_point.end_index.empty())) &&
  902. (!(is_insert_bp_profiling_task || is_insert_end_profiling_task));
  903. if (!is_profiling || no_insert_profiling_task) {
  904. return SUCCESS;
  905. }
  906. GELOGD("Insert bp profiling task: %d, insert end profiling task: %d, fp index: %u, bp index: %u, end index size: %zu",
  907. is_insert_bp_profiling_task, is_insert_end_profiling_task, profiling_point.fp_index, profiling_point.bp_index,
  908. profiling_point.end_index.size() );
  909. bool is_all_reduce = (op_desc->GetType() == HCOMALLREDUCE || op_desc->GetType() == HVDCALLBACKALLREDUCE);
  910. if ((profiling_point.bp_index == node_index) || (!is_all_reduce && is_insert_bp_profiling_task)) {
  911. GELOGI("The last BP operator is %s, idx %u", op_desc->GetName().c_str(), node_index);
  912. TaskDef bp_task_def;
  913. bp_task_def.set_type(RT_MODEL_TASK_PROFILER_TRACE);
  914. bp_task_def.set_stream_id(op_desc->GetStreamId());
  915. LogTimeStampDef *bp_log_def = bp_task_def.mutable_log_timestamp();
  916. GE_CHECK_NOTNULL(bp_log_def);
  917. bp_log_def->set_logid(kProfilingBpEndLogid);
  918. bp_log_def->set_notify(false);
  919. task_def_list.emplace_back(bp_task_def);
  920. }
  921. if (profiling_point.end_index.find(node_index) != profiling_point.end_index.end() ||
  922. is_insert_end_profiling_task) {
  923. GELOGI("The iteration end operator is %s, idx %u", op_desc->GetName().c_str(), node_index);
  924. TaskDef end_task_def;
  925. end_task_def.set_type(RT_MODEL_TASK_PROFILER_TRACE);
  926. end_task_def.set_stream_id(op_desc->GetStreamId());
  927. LogTimeStampDef *end_log_def = end_task_def.mutable_log_timestamp();
  928. GE_CHECK_NOTNULL(end_log_def);
  929. end_log_def->set_logid(kProfilingIterEndLogid);
  930. end_log_def->set_notify(true);
  931. task_def_list.emplace_back(end_task_def);
  932. }
  933. uint32_t all_reduce_task_idx = 0;
  934. bool is_insert_all_reduce_task = false;
  935. if (is_all_reduce && is_insert_bp_profiling_task) {
  936. all_reduce_task_idx = all_reduce_node_idx;
  937. is_insert_all_reduce_task = true;
  938. }
  939. for (size_t i = 0; i < all_reduce_nodes.size(); i++) {
  940. if (all_reduce_nodes[i] == node_index) {
  941. all_reduce_task_idx = i;
  942. is_insert_all_reduce_task = true;
  943. break;
  944. }
  945. }
  946. if (is_insert_all_reduce_task) {
  947. GELOGI("The end allreduce operator is %s, idx %u", op_desc->GetName().c_str(), node_index);
  948. TaskDef ar_task_def;
  949. ar_task_def.set_type(RT_MODEL_TASK_PROFILER_TRACE);
  950. ar_task_def.set_stream_id(op_desc->GetStreamId());
  951. LogTimeStampDef *ar_log_def = ar_task_def.mutable_log_timestamp();
  952. GE_CHECK_NOTNULL(ar_log_def);
  953. GE_IF_BOOL_EXEC(TypeUtils::CheckUint64MulOverflow(all_reduce_task_idx, kProfilingArStep),
  954. GELOGE(FAILED, "Multiply result is out of range.");
  955. return FAILED);
  956. auto log_id = all_reduce_task_idx * kProfilingArStep + kProfilingArEndLogid;
  957. ar_log_def->set_logid(log_id);
  958. ar_log_def->set_notify(false);
  959. task_def_list.emplace_back(ar_task_def);
  960. }
  961. return SUCCESS;
  962. }
  963. bool TaskGenerator::IsProfPoint(const OpDescPtr &op, const std::string &name) {
  964. if (op == nullptr) {
  965. return false;
  966. }
  967. if (op->GetName() == name) {
  968. return true;
  969. }
  970. std::vector<std::string> original_op_names;
  971. bool ret = AttrUtils::GetListStr(op, ge::ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES, original_op_names);
  972. if (!ret) {
  973. return false;
  974. }
  975. for (auto &origin_name : original_op_names) {
  976. if (origin_name == name) {
  977. return true;
  978. }
  979. }
  980. return false;
  981. }
  982. Status TaskGenerator::SetUnknownShapeStream(RunContext &run_context, rtStream_t &stream) {
  983. GE_CHK_RT_RET(rtStreamCreate(&stream, 0));
  984. run_context.stream = stream;
  985. rtError_t rt_ret = rtModelBindStream(run_context.model, stream, 0);
  986. if (rt_ret != RT_ERROR_NONE) {
  987. GELOGE(FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
  988. GE_CHK_RT_RET(rtStreamDestroy(stream));
  989. return FAILED;
  990. }
  991. return SUCCESS;
  992. }
  993. Status TaskGenerator::DestroyUnknownShapeStream(RunContext &run_context, rtStream_t &stream) {
  994. GE_CHK_RT(rtModelUnbindStream(run_context.model, stream));
  995. GE_CHK_RT_RET(rtStreamDestroy(stream));
  996. return SUCCESS;
  997. }
  998. Status TaskGenerator::SetKnownShapeStream(RunContext &run_context, int64_t stream_id) {
  999. if (stream_id < 0 || stream_id >= static_cast<int64_t>(run_context.graphStreamList.size())) {
  1000. GELOGE(INTERNAL_ERROR, "Stream id[%ld] is invalid, stream list size=%zu", stream_id,
  1001. run_context.graphStreamList.size());
  1002. return INTERNAL_ERROR;
  1003. }
  1004. run_context.stream = run_context.graphStreamList[stream_id];
  1005. return SUCCESS;
  1006. }
  1007. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知.