You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

task_generator.cc 47 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/build/task_generator.h"
  17. #include <string>
  18. #include <utility>
  19. #include "common/profiling/profiling_manager.h"
  20. #include "common/types.h"
  21. #include "common/util.h"
  22. #include "framework/common/debug/ge_log.h"
  23. #include "graph/debug/ge_attr_define.h"
  24. #include "graph/ge_context.h"
  25. #include "graph/manager/graph_var_manager.h"
  26. #include "graph/model_serialize.h"
  27. #include "graph/utils/node_utils.h"
  28. #include "graph/utils/tensor_utils.h"
  29. #include "graph/utils/type_utils.h"
  30. #include "graph/common/ge_call_wrapper.h"
  31. #include "init/gelib.h"
  32. #include "graph/ge_local_context.h"
  33. #include "ge/ge_api_types.h"
  34. #include "opskernel_manager/ops_kernel_builder_manager.h"
  35. using domi::LogTimeStampDef;
  36. using domi::ModelTaskDef;
  37. using domi::TaskDef;
  38. using std::map;
  39. using std::set;
  40. using std::string;
  41. using std::vector;
  42. namespace {
  43. const char *const kIsFirstNode = "is_first_node";
  44. const char *const kIsLastNode = "is_last_node";
  45. const char *const kIsInputVar = "INPUT_IS_VAR";
  46. const char *const kIsOutputVar = "OUTPUT_IS_VAR";
  47. const char *const kProfilingMode = "PROFILING_MODE";
  48. const uint32_t kProfilingArStep = 2;
  49. const uint64_t kProfilingFpStartLogid = 1;
  50. const uint64_t kProfilingBpEndLogid = 2;
  51. const uint64_t kProfilingArStartLogid = 3;
  52. const uint64_t kProfilingArEndLogid = 4;
  53. const uint64_t kProfilingIterEndLogid = 65535;
  54. const int64_t kHashFactor = 100000;
  55. const int64_t kInvalidGroupId = -1;
  56. } // namespace
  57. namespace ge {
  58. TaskGenerator::TaskGenerator(uint8_t *var_mem_base, uint64_t var_mem_size) {
  59. var_mem_base_ = var_mem_base;
  60. var_mem_size_ = var_mem_size;
  61. }
  62. TaskGenerator::~TaskGenerator() {}
  63. Status TaskGenerator::GetTaskInfo(Model &model, ComputeGraphPtr &graph, uint64_t session_id, RunContext &run_context) {
  64. GELOGD("Begin to Get TaskInfo. session_id=%lu", session_id);
  65. // Check params
  66. if (graph == nullptr) {
  67. GELOGE(PARAM_INVALID, "GetTaskInfo param graph is null. session_id=%lu", session_id);
  68. return PARAM_INVALID;
  69. }
  70. std::vector<TaskDef> task_def_list;
  71. std::map<uint32_t, string> op_name_map;
  72. GE_DUMP(graph, "GenerateTaskBefore");
  73. Status ret = GenerateTask(run_context, graph, task_def_list, op_name_map);
  74. GE_DUMP(graph, "GenerateTaskAfter");
  75. if (ret != SUCCESS) {
  76. GELOGE(ret, "GenerateTask failed. session_id=%lu", session_id);
  77. return ret;
  78. }
  79. // op_name_map used when graph load
  80. graph->SetGraphOpName(op_name_map);
  81. // Set op_name for infer profiling
  82. vector<string> op_name;
  83. for (auto &iter : op_name_map) {
  84. op_name.push_back(iter.second);
  85. }
  86. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListStr(model, ATTR_MODEL_TASK_INDEX_OP_NAME, op_name),
  87. GELOGE(FAILED, "SetListStr failed.");
  88. return FAILED);
  89. GELOGI("GenerateTask Success, task list:%zu, op map:%zu, logic mem base:%p, logic weight base:%p, logic var base:%p",
  90. task_def_list.size(), op_name_map.size(), run_context.dataMemBase, run_context.weightMemBase, var_mem_base_);
  91. // Init and serialize model_task_def
  92. ModelTaskDef model_task_def;
  93. model_task_def.set_memory_size(run_context.dataMemSize);
  94. model_task_def.set_weight_size(run_context.weightMemSize);
  95. for (const TaskDef &task_def_temp : task_def_list) {
  96. TaskDef *task_def = model_task_def.add_task();
  97. if (task_def == nullptr) {
  98. GELOGE(FAILED, "task_def is nullptr.");
  99. return FAILED;
  100. }
  101. *task_def = task_def_temp;
  102. }
  103. ret = AddModelTaskToModel(model_task_def, session_id, model, run_context);
  104. if (ret != SUCCESS) {
  105. GELOGE(ret, "AddModelTaskToModel failed. session_id=%lu", session_id);
  106. return ret;
  107. }
  108. GELOGD("Get TaskInfo success. session_id=%lu", session_id);
  109. return SUCCESS;
  110. }
  111. Status TaskGenerator::AddModelTaskToModel(const ModelTaskDef &model_task_def, uint64_t session_id, ge::Model &model,
  112. RunContext &run_context) {
  113. GE_CHK_BOOL_EXEC(
  114. AttrUtils::SetInt(model, MODEL_ATTR_TASK_GEN_BASE_ADDR, reinterpret_cast<uintptr_t>(run_context.dataMemBase)),
  115. GELOGE(FAILED, "SetInt MODEL_ATTR_TASK_GEN_BASE_ADDR failed.");
  116. return FAILED);
  117. GE_CHK_BOOL_EXEC(
  118. AttrUtils::SetInt(model, MODEL_ATTR_TASK_GEN_WEIGHT_ADDR, reinterpret_cast<uintptr_t>(run_context.weightMemBase)),
  119. GELOGE(FAILED, "SetInt MODEL_ATTR_TASK_GEN_WEIGHT_ADDR failed.");
  120. return FAILED);
  121. GE_CHK_BOOL_EXEC(AttrUtils::SetInt(model, ATTR_MODEL_TASK_GEN_VAR_ADDR, reinterpret_cast<uintptr_t>(var_mem_base_)),
  122. GELOGE(FAILED, "SetInt ATTR_MODEL_TASK_GEN_VAR_ADDR failed.");
  123. return FAILED);
  124. GE_CHK_BOOL_EXEC(AttrUtils::SetInt(model, ATTR_MODEL_VAR_SIZE, var_mem_size_),
  125. GELOGE(FAILED, "SetInt ATTR_MODEL_VAR_SIZE failed.");
  126. return FAILED);
  127. GE_CHK_BOOL_EXEC(AttrUtils::SetInt(model, MODEL_ATTR_SESSION_ID, session_id),
  128. GELOGE(FAILED, "SetInt MODEL_ATTR_SESSION_ID failed.");
  129. return FAILED);
  130. size_t task_size = model_task_def.ByteSizeLong();
  131. ge::Buffer serial_buff(task_size);
  132. if (!model_task_def.SerializePartialToArray(serial_buff.GetData(), static_cast<int>(task_size))) {
  133. GELOGE(FAILED, "model_task_def's serialize failed, model name = %s, task_size=%zu.", model.GetName().c_str(),
  134. task_size);
  135. return FAILED;
  136. }
  137. if (!AttrUtils::SetZeroCopyBytes(model, MODEL_ATTR_TASKS, std::move(serial_buff))) {
  138. GELOGE(FAILED, "Set model task to model failed, model name = %s, task_size=%zu.", model.GetName().c_str(),
  139. task_size);
  140. return FAILED;
  141. }
  142. return SUCCESS;
  143. }
  144. Status TaskGenerator::UpdateOpIsVarAttr(const OpDescPtr &op_desc, uint64_t session_id) {
  145. vector<int64_t> input_offsets = op_desc->GetInputOffset();
  146. GELOGD("Update is var attr, node[name:%s(%s), id:%ld, stream_id:%ld].", op_desc->GetName().c_str(),
  147. op_desc->GetType().c_str(), op_desc->GetId(), op_desc->GetStreamId());
  148. if (!(input_offsets.empty())) {
  149. vector<bool> input_var;
  150. for (int64_t input : input_offsets) {
  151. input_var.push_back(VarManager::Instance(session_id)->IsVarAddr(input));
  152. }
  153. GE_CHK_BOOL_EXEC(AttrUtils::SetListBool(op_desc, kIsInputVar, input_var), GELOGE(FAILED, "SetListBool failed.");
  154. return FAILED);
  155. }
  156. vector<int64_t> output_offsets = op_desc->GetOutputOffset();
  157. if (!(output_offsets.empty())) {
  158. vector<bool> output_var;
  159. for (int64_t output : output_offsets) {
  160. output_var.push_back(VarManager::Instance(session_id)->IsVarAddr(output));
  161. }
  162. GE_CHK_BOOL_EXEC(AttrUtils::SetListBool(op_desc, kIsOutputVar, output_var), GELOGE(FAILED, "SetListBool failed.");
  163. return FAILED);
  164. }
  165. return SUCCESS;
  166. }
  167. Status TaskGenerator::SaveFusionNodes(map<int64_t, std::vector<NodePtr>> &fusion_nodes, ComputeGraphPtr &graph) {
  168. std::map<NodePtr, int64_t> nodes_with_group_attr;
  169. for (auto &node : graph->GetNodes(graph->GetGraphUnknownFlag())) {
  170. OpDescPtr op_desc = node->GetOpDesc();
  171. GE_CHECK_NOTNULL(op_desc);
  172. int64_t group_id = kInvalidGroupId;
  173. string name = node->GetName();
  174. string type = node->GetType();
  175. // For fusion ddb pass, task def must be continuous.
  176. // Part1: store
  177. // If op_desc have this tag, store it in the map firstly,
  178. // call the elements in the map GenerateTask at last
  179. // l1 and l2 is for now
  180. if (ge::AttrUtils::GetInt(op_desc, ATTR_NAME_L1_FUSION_GROUP_ID, group_id) ||
  181. ge::AttrUtils::GetInt(op_desc, ATTR_NAME_L2_FUSION_GROUP_ID, group_id)) {
  182. auto stream_id = op_desc->GetStreamId();
  183. auto group_key = group_id + stream_id * kHashFactor;
  184. (void)ge::AttrUtils::SetInt(op_desc, ATTR_NAME_FUSION_GROUP_KEY, group_key);
  185. GELOGD("Fusion: store node[name:%s(%s), group id:%ld, group key:%ld, stream_id:%ld] task.", name.c_str(),
  186. type.c_str(), group_id, group_key, op_desc->GetStreamId());
  187. fusion_nodes[group_key].push_back(node);
  188. nodes_with_group_attr.insert({node, group_id});
  189. }
  190. // if node's all in nodes both with same group attr
  191. // and it have no attr or group attr different
  192. // which means bad case, return error
  193. bool call_check = true;
  194. std::unordered_set<int64_t> input_group_ids;
  195. for (const auto &input_node : node->GetInNodes()) {
  196. auto iter = nodes_with_group_attr.find(input_node);
  197. if (iter == nodes_with_group_attr.end()) {
  198. call_check = false;
  199. break;
  200. } else {
  201. input_group_ids.insert(iter->second);
  202. }
  203. }
  204. call_check = (call_check && (input_group_ids.size() == 1));
  205. if (call_check) {
  206. auto input_group_id = *input_group_ids.begin();
  207. if (group_id != input_group_id) {
  208. GELOGW("Fusion: node[name:%s(%s) with group id:%ld and diff from it's input nodes's group id:%ld ",
  209. name.c_str(), type.c_str(), group_id, input_group_id);
  210. }
  211. }
  212. }
  213. GELOGD("Fusion: get fusion group numbers [%zu].", fusion_nodes.size());
  214. return SUCCESS;
  215. }
  216. Status TaskGenerator::GenerateTask(RunContext &run_context, ComputeGraphPtr &graph,
  217. vector<domi::TaskDef> &task_def_list, map<uint32_t, string> &op_name_map) {
  218. GELOGD("Beign to generate task, graph name is %s.", graph->GetName().c_str());
  219. std::shared_ptr<GELib> ge_lib = GELib::GetInstance();
  220. if ((ge_lib == nullptr) || !ge_lib->InitFlag()) {
  221. GELOGE(GE_CLI_GE_NOT_INITIALIZED, "GenerateTask failed.");
  222. return GE_CLI_GE_NOT_INITIALIZED;
  223. }
  224. GE_CHK_STATUS_RET(MarkNodeAndSetIndex(graph), "MarkNodeAndSetIndex failed.");
  225. ProfilingPoint profiling_point;
  226. vector<uint32_t> all_reduce_nodes;
  227. GE_CHK_STATUS_RET(FindProfilingTaskIndex(graph, profiling_point, all_reduce_nodes));
  228. const OpsKernelManager &ops_kernel_manager = ge_lib->OpsKernelManagerObj();
  229. GE_TIMESTAMP_CALLNUM_START(GenerateTask);
  230. // map store fusion nodes
  231. map<int64_t, std::vector<NodePtr>> fusion_nodes;
  232. string buffer_optimize = "off_optimize";
  233. (void)ge::GetContext().GetOption(BUFFER_OPTIMIZE, buffer_optimize);
  234. if (buffer_optimize != "off_optimize") {
  235. GE_CHK_STATUS_RET(SaveFusionNodes(fusion_nodes, graph));
  236. }
  237. std::unordered_set<Node *> fusion_nodes_seen;
  238. int64_t group_key;
  239. uint32_t node_index = 0;
  240. rtStream_t stream = nullptr;
  241. bool is_unknown_shape = graph->GetGraphUnknownFlag() || GetContext().GetHostExecFlag();
  242. if (is_unknown_shape) {
  243. GE_CHK_STATUS_RET(SetUnknownShapeStream(run_context, stream), "Set unknown shape stream failed.");
  244. }
  245. std::function<void()> callback = [&]() {
  246. if (is_unknown_shape) {
  247. if (DestroyUnknownShapeStream(run_context, stream) != SUCCESS) {
  248. GELOGE(FAILED, "Destory unknown shape stream failed.");
  249. }
  250. }
  251. };
  252. GE_MAKE_GUARD(release, callback);
  253. uint64_t all_reduce_node_idx = 0;
  254. for (auto &node : graph->GetNodes(graph->GetGraphUnknownFlag())) {
  255. OpDescPtr op_desc = node->GetOpDesc();
  256. GE_CHECK_NOTNULL(op_desc);
  257. node_index++;
  258. string name = node->GetName();
  259. string type = node->GetType();
  260. bool attr_notask = false;
  261. bool get_attr_notask_flag = ge::AttrUtils::GetBool(op_desc, ATTR_NAME_NOTASK, attr_notask);
  262. GE_IF_BOOL_EXEC(get_attr_notask_flag && attr_notask,
  263. GELOGI("Node[name:%s, type:%s] does not need to generate task.", name.c_str(), type.c_str());
  264. continue);
  265. GE_CHK_STATUS_RET(UpdateOpIsVarAttr(op_desc, graph->GetSessionID()));
  266. string op_kernel_lib_name = op_desc->GetOpKernelLibName();
  267. // For fusion ddb pass, task def must be continuous.
  268. // Part2: Call
  269. auto fusion_task_info =
  270. FusionTaskInfo{run_context, graph, node, op_desc, node_index, ge_lib,
  271. ops_kernel_manager, task_def_list, op_name_map, profiling_point, all_reduce_nodes, all_reduce_node_idx};
  272. GE_CHK_STATUS_RET(GenerateTaskForFusionNode(fusion_task_info, fusion_nodes, fusion_nodes_seen),
  273. "Call GenerateTaskForFusionNode node:%s(%s) failed", name.c_str(), type.c_str());
  274. // continue directly
  275. if (ge::AttrUtils::GetInt(op_desc, ATTR_NAME_FUSION_GROUP_KEY, group_key)) {
  276. GELOGI("Fusion node[name:%s, type:%s] do not need generate task again.", name.c_str(), type.c_str());
  277. continue;
  278. }
  279. if (op_kernel_lib_name.empty()) {
  280. GELOGI("Node[name:%s, type:%s] does not need to generate task.", name.c_str(), type.c_str());
  281. continue;
  282. }
  283. auto kernel_info_store = ops_kernel_manager.GetOpsKernelInfoStore(op_kernel_lib_name);
  284. if (kernel_info_store == nullptr) {
  285. GELOGE(INTERNAL_ERROR,
  286. "No ops kernel store or ops kernel builder found. node:%s(%s), op_kernel_lib_name=%s.",
  287. name.c_str(),
  288. type.c_str(), op_kernel_lib_name.c_str());
  289. return INTERNAL_ERROR;
  290. }
  291. GE_CHK_STATUS_RET(UpdateAnchorStatus(node), "Call UpdateAnchorStatus node:%s(%s) failed", name.c_str(),
  292. type.c_str());
  293. // Profiling task
  294. size_t task_list_size_before = task_def_list.size();
  295. GE_CHK_STATUS_RET(InsertProfilingTaskBefore(op_desc, profiling_point, all_reduce_nodes,
  296. node_index, task_def_list, all_reduce_node_idx));
  297. int64_t op_id = op_desc->GetId();
  298. // Compatible with dynamic shape scenes, the default is 0
  299. int64_t stream_id = 0;
  300. if (!is_unknown_shape) {
  301. stream_id = op_desc->GetStreamId();
  302. GE_CHK_STATUS_RET(SetKnownShapeStream(run_context, stream_id), "node[name:%s(%s), id:%ld] stream id is invalid.",
  303. name.c_str(), type.c_str(), op_id);
  304. }
  305. GELOGD("Call %s to generate node[name:%s(%s), id:%ld, stream_id:%ld] task.", op_kernel_lib_name.c_str(),
  306. name.c_str(), type.c_str(), op_id, stream_id);
  307. GE_TIMESTAMP_RESTART(GenerateTask);
  308. auto ret = OpsKernelBuilderManager::Instance().GenerateTask(*node, run_context, task_def_list);
  309. GE_TIMESTAMP_ADD(GenerateTask);
  310. if (ret != SUCCESS) {
  311. GELOGE(ret, "Call %s to generate node[name:%s(%s), id:%ld, stream_id:%ld] task failed.",
  312. op_kernel_lib_name.c_str(), name.c_str(), type.c_str(), op_id, stream_id);
  313. return ret;
  314. }
  315. // Profiling task
  316. GE_CHK_STATUS_RET(InsertProfilingTaskAfter(op_desc, profiling_point, all_reduce_nodes,
  317. node_index, task_def_list, all_reduce_node_idx));
  318. size_t task_list_size_after = task_def_list.size();
  319. // If tasks is reduced
  320. if (task_list_size_after < task_list_size_before) {
  321. GELOGE(FAILED, "Call %s to generate node[name:%s(%s), id:%ld, stream_id:%ld] task. but task num from %zu to %zu.",
  322. op_kernel_lib_name.c_str(), name.c_str(), type.c_str(), op_id, stream_id, task_list_size_before,
  323. task_list_size_after);
  324. return FAILED;
  325. }
  326. // Reset stream id to ge stream id, as graph load must use ge stream to reassign stream
  327. void *ops_kernel_info_store_ptr = kernel_info_store.get();
  328. for (size_t idx = task_list_size_before; idx < task_list_size_after; ++idx) {
  329. task_def_list[idx].set_stream_id(static_cast<uint32_t>(stream_id));
  330. op_name_map[idx] = name;
  331. // Set opsKernelInfoStorePtr and op_index, the two fields be use in DistributeTask and InitTaskInfo
  332. TaskDef *task_def_ptr = &task_def_list[idx];
  333. GE_CHECK_NOTNULL(task_def_ptr);
  334. task_def_ptr->set_ops_kernel_store_ptr(reinterpret_cast<uintptr_t>(ops_kernel_info_store_ptr));
  335. }
  336. GELOGD("Call %s to generate node[name:%s(%s), id:%ld, stream_id:%ld] task finished, generate %zu task(s).",
  337. op_kernel_lib_name.c_str(), name.c_str(), type.c_str(), op_id, stream_id,
  338. task_list_size_after - task_list_size_before);
  339. }
  340. GE_TIMESTAMP_CALLNUM_EVENT_END(GenerateTask, "GraphBuild::GenerateTask");
  341. return SUCCESS;
  342. }
  343. Status TaskGenerator::GenerateTaskForFusionNode(FusionTaskInfo &fusion_task_info,
  344. std::map<int64_t, std::vector<NodePtr>> &fusion_nodes,
  345. std::unordered_set<Node *> &fusion_nodes_seen) {
  346. Status ret = SUCCESS;
  347. int64_t group_key;
  348. auto &run_context = fusion_task_info.run_context;
  349. auto &graph = fusion_task_info.graph;
  350. auto &node = fusion_task_info.node;
  351. auto &fusion_op_desc = fusion_task_info.fusion_op_desc;
  352. auto &node_index = fusion_task_info.node_index;
  353. const auto &ops_kernel_manager = fusion_task_info.ops_kernel_manager;
  354. auto &task_def_list = fusion_task_info.task_def_list;
  355. auto &op_name_map = fusion_task_info.op_name_map;
  356. auto &profiling_point = fusion_task_info.profiling_point;
  357. auto &all_reduce_nodes = fusion_task_info.all_reduce_nodes;
  358. auto &all_reduce_idx = fusion_task_info.all_reduce_node_idx;
  359. // If op_desc have this attr, call nodes with same group key in a stream together
  360. if (ge::AttrUtils::GetInt(fusion_op_desc, ATTR_NAME_FUSION_GROUP_KEY, group_key) &&
  361. (fusion_nodes_seen.count(node.get()) == 0)) {
  362. GELOGI("Fusion: start fusion group index[%ld], nodes size[%zu].", group_key, fusion_nodes[group_key].size());
  363. for (auto &fusion_node : fusion_nodes[group_key]) {
  364. OpDescPtr op_desc = fusion_node->GetOpDesc();
  365. UpdateOpIsVarAttr(op_desc, graph->GetSessionID());
  366. std::string fusion_node_name = fusion_node->GetName();
  367. std::string fusion_node_type = fusion_node->GetType();
  368. std::string op_kernel_lib_name = op_desc->GetOpKernelLibName();
  369. if (op_kernel_lib_name.empty()) {
  370. GELOGI("Fusion: fusion_node[name:%s(%s)] task no need to generate task.", fusion_node_name.c_str(),
  371. fusion_node_type.c_str());
  372. continue;
  373. }
  374. bool attr_notask = false;
  375. GE_IF_BOOL_EXEC(ge::AttrUtils::GetBool(op_desc, ATTR_NAME_NOTASK, attr_notask) && attr_notask,
  376. GELOGI("Fusion: fusion_node[name:%s, type:%s] does not need to generate task.",
  377. fusion_node_name.c_str(), fusion_node_type.c_str());
  378. continue);
  379. size_t task_list_size_before = task_def_list.size();
  380. OpsKernelInfoStorePtr kernel_info_store = ops_kernel_manager.GetOpsKernelInfoStore(op_kernel_lib_name);
  381. if (kernel_info_store == nullptr) {
  382. GELOGE(INTERNAL_ERROR,
  383. "Fusion: No ops kernel store or ops kernel builder found. fusion_node:%s(%s), op_kernel_lib_name=%s.",
  384. fusion_node_name.c_str(), fusion_node_type.c_str(), op_kernel_lib_name.c_str());
  385. return INTERNAL_ERROR;
  386. }
  387. ret = UpdateAnchorStatus(fusion_node);
  388. if (ret != SUCCESS) {
  389. GELOGE(ret, "Fusion: Call UpdateAnchorStatus fusion_node:%s(%s) failed", fusion_node_name.c_str(),
  390. fusion_node_type.c_str());
  391. return ret;
  392. }
  393. int64_t op_id = op_desc->GetId();
  394. int64_t stream_id = op_desc->GetStreamId();
  395. if (stream_id < 0 || stream_id >= (int64_t)run_context.graphStreamList.size()) {
  396. GELOGE(INTERNAL_ERROR, "Fusion: fusion_node[name:%s(%s), id:%ld] stream id is invalid, stream list size=%zu",
  397. fusion_node_name.c_str(), fusion_node_type.c_str(), op_id, run_context.graphStreamList.size());
  398. return INTERNAL_ERROR;
  399. }
  400. // profiling task
  401. (void)InsertProfilingTaskBefore(op_desc, profiling_point, all_reduce_nodes,
  402. node_index, task_def_list, all_reduce_idx);
  403. run_context.stream = run_context.graphStreamList[stream_id];
  404. GELOGI("Fusion: Call %s to generate fusion_node:[fusion_node_name:%s(%s), id:%ld, stream_id:%ld] task.",
  405. op_kernel_lib_name.c_str(), fusion_node_name.c_str(), fusion_node_type.c_str(), op_id, stream_id);
  406. ret = OpsKernelBuilderManager::Instance().GenerateTask(*fusion_node, run_context, task_def_list);
  407. if (ret != SUCCESS) {
  408. GELOGE(ret,
  409. "Fusion: Call %s to generate fusion_node:[fusion_node_name:%s(%s), "
  410. "id:%ld, stream_id:%ld] task failed.",
  411. op_kernel_lib_name.c_str(), fusion_node_name.c_str(), fusion_node_type.c_str(), op_id, stream_id);
  412. return ret;
  413. }
  414. // profiling task
  415. (void)InsertProfilingTaskAfter(op_desc, profiling_point, all_reduce_nodes,
  416. node_index, task_def_list, all_reduce_idx);
  417. size_t task_list_size_after = task_def_list.size();
  418. // if tasks is reduced
  419. if (task_list_size_after < task_list_size_before) {
  420. GELOGE(FAILED,
  421. "Fusion: Call %s to generate fusion_node:[fusion_node_name:%s(%s), "
  422. "id:%ld, stream_id:%ld] task. but task num from %zu to %zu.",
  423. op_kernel_lib_name.c_str(), fusion_node_name.c_str(), fusion_node_type.c_str(), op_id, stream_id,
  424. task_list_size_before, task_list_size_after);
  425. return FAILED;
  426. }
  427. // reset stream id to ge stream id, as graph load must use ge stream to reassign stream
  428. void *ops_kernel_info_store_ptr = kernel_info_store.get();
  429. for (size_t idx = task_list_size_before; idx < task_list_size_after; ++idx) {
  430. task_def_list[idx].set_stream_id(static_cast<uint32_t>(stream_id));
  431. op_name_map[idx] = fusion_node_name;
  432. // set opsKernelInfoStorePtr and op_index, the two fields be use in DistributeTask and InitTaskInfo
  433. TaskDef *task_def_ptr = &task_def_list[idx];
  434. task_def_ptr->set_ops_kernel_store_ptr(reinterpret_cast<uintptr_t>(ops_kernel_info_store_ptr));
  435. }
  436. GELOGI(
  437. "Fusion: Call %s to generate fusion_node:[fusion_node_name:%s(%s), id:%ld, stream_id:%ld]"
  438. " task finished, generate %u task(s).",
  439. op_kernel_lib_name.c_str(), fusion_node_name.c_str(), fusion_node_type.c_str(), op_id, stream_id,
  440. task_list_size_after - task_list_size_before);
  441. // record nodes which have call generate task successfully
  442. fusion_nodes_seen.insert(fusion_node.get());
  443. node_index++;
  444. }
  445. }
  446. // without tag or has been seen, skip directly
  447. return ret;
  448. }
  449. Status TaskGenerator::UpdateAnchorStatus(const NodePtr &node) {
  450. if (NodeUtils::SetAllAnchorStatus(node) != GRAPH_SUCCESS) {
  451. GELOGE(INTERNAL_ERROR, "NodeUtils::SetAllAnchorStatus failed.");
  452. return INTERNAL_ERROR;
  453. }
  454. for (auto &anchor : node->GetAllInDataAnchors()) {
  455. auto peer_anchor = anchor->GetPeerOutAnchor();
  456. if (peer_anchor == nullptr) {
  457. if (AnchorUtils::SetStatus(anchor, ANCHOR_SUSPEND) != GRAPH_SUCCESS) {
  458. GELOGE(INTERNAL_ERROR, "AnchorUtils::SetStatus failed.");
  459. return INTERNAL_ERROR;
  460. }
  461. continue;
  462. }
  463. std::string const_type;
  464. bool is_const = NodeUtils::GetConstOpType(peer_anchor->GetOwnerNode(), const_type);
  465. if (is_const && (const_type == CONSTANT)) {
  466. if (AnchorUtils::SetStatus(anchor, ANCHOR_CONST) != GRAPH_SUCCESS) {
  467. GELOGE(INTERNAL_ERROR, "AnchorUtils::SetStatus failed.");
  468. return INTERNAL_ERROR;
  469. }
  470. } else {
  471. if (AnchorUtils::SetStatus(anchor, ANCHOR_DATA) != GRAPH_SUCCESS) {
  472. GELOGE(INTERNAL_ERROR, "AnchorUtils::SetStatus failed.");
  473. return INTERNAL_ERROR;
  474. }
  475. }
  476. }
  477. return SUCCESS;
  478. }
  479. Status TaskGenerator::MarkNodeAndSetIndex(ComputeGraphPtr &graph) {
  480. auto ge_lib = GELib::GetInstance();
  481. if ((ge_lib == nullptr) || !ge_lib->InitFlag()) {
  482. GELOGE(GE_CLI_GE_NOT_INITIALIZED, "GE is not initialized or is finalized.");
  483. return GE_CLI_GE_NOT_INITIALIZED;
  484. }
  485. const auto all_nodes = graph->GetNodes(graph->GetGraphUnknownFlag());
  486. if (all_nodes.empty()) {
  487. GELOGE(GE_GRAPH_GRAPH_NODE_NULL, "Graph's node is empty");
  488. return GE_GRAPH_GRAPH_NODE_NULL;
  489. }
  490. int64_t node_index = 0;
  491. for (auto &node : all_nodes) {
  492. OpDescPtr op_desc = node->GetOpDesc();
  493. GE_CHECK_NOTNULL(op_desc);
  494. op_desc->SetId(node_index++);
  495. }
  496. map<int64_t, vector<OpDescPtr>> all_stream_ops;
  497. for (auto &node : all_nodes) {
  498. OpDescPtr op_desc = node->GetOpDesc();
  499. GE_CHECK_NOTNULL(op_desc);
  500. // Reset op kernel lib name
  501. if (op_desc->GetOpKernelLibName().empty()) {
  502. (void)ge_lib->DNNEngineManagerObj().GetDNNEngineName(node);
  503. }
  504. (void)op_desc->DelAttr(kIsFirstNode);
  505. (void)op_desc->DelAttr(kIsLastNode);
  506. all_stream_ops[op_desc->GetStreamId()].emplace_back(op_desc);
  507. }
  508. bool is_single_stream = all_stream_ops.size() == 1;
  509. for (const auto &stream_ops : all_stream_ops) {
  510. Status status = MarkFirstAndLastOps(stream_ops.second, is_single_stream);
  511. if (status != SUCCESS) {
  512. GELOGE(status, "Mark first and last nodes failed.");
  513. return status;
  514. }
  515. }
  516. return SUCCESS;
  517. }
  518. Status TaskGenerator::MarkFirstAndLastOps(const vector<OpDescPtr> &ops, bool is_single_stream) const {
  519. vector<vector<OpDescPtr>> continuous_op_lists(1);
  520. const set<string> separator_types(
  521. {LABELSET, LABELGOTO, LABELGOTOEX, LABELSWITCH, LABELSWITCHBYINDEX, STREAMSWITCH, STREAMSWITCHN});
  522. for (auto &op_desc : ops) {
  523. bool attr_notask = false;
  524. if (ge::AttrUtils::GetBool(op_desc, ATTR_NAME_NOTASK, attr_notask) && attr_notask) {
  525. continue;
  526. }
  527. string op_type = op_desc->GetType();
  528. if ((!is_single_stream && !op_desc->GetSubgraphInstanceNames().empty()) || separator_types.count(op_type) != 0) {
  529. continuous_op_lists.emplace_back(vector<OpDescPtr>());
  530. } else {
  531. continuous_op_lists.back().emplace_back(op_desc);
  532. }
  533. }
  534. GELOGD("Number of continuous node lists is %zu.", continuous_op_lists.size());
  535. for (const auto &continuous_ops : continuous_op_lists) {
  536. map<string, std::pair<OpDescPtr, OpDescPtr>> first_and_last_ops;
  537. for (auto &op_desc : continuous_ops) {
  538. string op_kernel_lib_name = op_desc->GetOpKernelLibName();
  539. if (op_kernel_lib_name.empty()) {
  540. GELOGE(INTERNAL_ERROR, "node:%s(%s) get op kernel lib failed.", op_desc->GetName().c_str(),
  541. op_desc->GetType().c_str());
  542. return INTERNAL_ERROR;
  543. }
  544. auto it = first_and_last_ops.find(op_kernel_lib_name);
  545. if (it == first_and_last_ops.end()) {
  546. first_and_last_ops.emplace(op_kernel_lib_name, std::make_pair(op_desc, op_desc));
  547. } else {
  548. it->second.second = op_desc;
  549. }
  550. }
  551. for (auto &it : first_and_last_ops) {
  552. auto &op_pair = it.second;
  553. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetBool(op_pair.first, kIsFirstNode, true), GELOGE(FAILED, "SetBool failed.");
  554. return FAILED);
  555. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetBool(op_pair.second, kIsLastNode, true), GELOGE(FAILED, "SetBool failed.");
  556. return FAILED);
  557. }
  558. }
  559. return SUCCESS;
  560. }
  561. Status TaskGenerator::AutoFindFpOpIndex(const ComputeGraphPtr &graph, ProfilingPoint &profiling_point) const {
  562. GELOGI("Start AutoFindFpOpIndex");
  563. OpDescPtr fp_op_desc = nullptr;
  564. uint32_t current_idx = 0;
  565. uint32_t first_fp = 0;
  566. for (auto &node : graph->GetNodes(graph->GetGraphUnknownFlag())) {
  567. OpDescPtr op_desc = node->GetOpDesc();
  568. GE_CHECK_NOTNULL(op_desc);
  569. string op_kernel_lib_name = op_desc->GetOpKernelLibName();
  570. if (op_kernel_lib_name.empty()) {
  571. continue;
  572. }
  573. if (op_desc->GetType() == GETNEXT || op_desc->GetType() == DATA) {
  574. auto out_anchor = node->GetOutDataAnchor(0);
  575. for (auto &peer_in_anchor : out_anchor->GetPeerInDataAnchors()) {
  576. GE_CHECK_NOTNULL(peer_in_anchor);
  577. auto in_node_desc = peer_in_anchor->GetOwnerNode()->GetOpDesc();
  578. GE_CHECK_NOTNULL(in_node_desc);
  579. if (fp_op_desc == nullptr || ((in_node_desc->GetId()) < (fp_op_desc->GetId()))) {
  580. fp_op_desc = in_node_desc;
  581. }
  582. }
  583. break;
  584. }
  585. }
  586. if (fp_op_desc == nullptr) {
  587. GELOGW("not find fp_op_desc.");
  588. return SUCCESS;
  589. }
  590. GELOGI("Find fp_op_desc is %s, id is %ld", fp_op_desc->GetName().c_str(), fp_op_desc->GetId());
  591. for (auto &node : graph->GetNodes(graph->GetGraphUnknownFlag())) {
  592. OpDescPtr op_desc = node->GetOpDesc();
  593. GE_CHECK_NOTNULL(op_desc);
  594. current_idx++;
  595. if (op_desc->GetName() == fp_op_desc->GetName()) {
  596. first_fp = current_idx;
  597. GELOGI("First fp name is %s, idx is %u", op_desc->GetName().c_str(), first_fp);
  598. break;
  599. }
  600. }
  601. profiling_point.fp_index = first_fp;
  602. return SUCCESS;
  603. }
  604. Status TaskGenerator::AutoFindBpOpIndex(const ComputeGraphPtr &graph, ProfilingPoint &profiling_point,
  605. vector<uint32_t> &all_reduce_nodes) const {
  606. GELOGI("Start AutoFindBpOpIndex");
  607. NodePtr bp_node = nullptr;
  608. uint32_t current_idx = 0;
  609. for (auto &node : graph->GetNodes(graph->GetGraphUnknownFlag())) {
  610. OpDescPtr op_desc = node->GetOpDesc();
  611. GE_CHECK_NOTNULL(op_desc);
  612. current_idx++;
  613. string op_kernel_lib_name = op_desc->GetOpKernelLibName();
  614. if (op_kernel_lib_name.empty()) {
  615. continue;
  616. }
  617. if (op_desc->GetType() == HCOMALLREDUCE || op_desc->GetType() == HVDCALLBACKALLREDUCE) {
  618. bp_node = node;
  619. all_reduce_nodes.emplace_back(current_idx);
  620. GELOGI("Allreduce name %s, idx %u", op_desc->GetName().c_str(), current_idx);
  621. }
  622. if (op_desc->GetName() == NODE_NAME_NET_OUTPUT) {
  623. if (bp_node == nullptr) {
  624. bp_node = node;
  625. }
  626. }
  627. if (graph->GetNeedIteration()) {
  628. if (op_desc->GetName() == NODE_NAME_NET_OUTPUT + '_' + NODE_NAME_STREAM_SWITCH + "_StreamActive") {
  629. profiling_point.end_index.insert(current_idx);
  630. GELOGI("Iter end name %s, idx %u, from Node_Output_IteratorCtrl_StreamSwitch_StreamActive",
  631. op_desc->GetName().c_str(), current_idx);
  632. }
  633. if (op_desc->GetName() == NODE_NAME_FLOWCTRL_LOOP_ASSIGN) {
  634. profiling_point.end_index.insert(current_idx);
  635. GELOGI("Iter end name %s, idx %u, from FlowCtrl_LoopCond_ASSIGN",
  636. op_desc->GetName().c_str(), current_idx);
  637. }
  638. } else {
  639. if (op_desc->GetName() == NODE_NAME_NET_OUTPUT) {
  640. profiling_point.end_index.insert(current_idx);
  641. GELOGI("Iter end name %s, idx %u, from NETOUTPUT", op_desc->GetName().c_str(), current_idx);
  642. }
  643. }
  644. }
  645. if (bp_node == nullptr) {
  646. GELOGW("not find bp_node.");
  647. return SUCCESS;
  648. }
  649. profiling_point.bp_index = FindLastBpFromBpNode(graph, bp_node);
  650. return SUCCESS;
  651. }
  652. uint32_t TaskGenerator::FindLastBpFromBpNode(const ComputeGraphPtr &graph, const NodePtr &bp_node) const {
  653. uint32_t last_bp = 0;
  654. OpDescPtr bp_op_desc = nullptr;
  655. for (auto &in_anchor : bp_node->GetAllInDataAnchors()) {
  656. auto out_anchor = in_anchor->GetPeerOutAnchor();
  657. if (out_anchor == nullptr || out_anchor->GetOwnerNode() == nullptr) {
  658. continue;
  659. }
  660. auto out_node_desc = out_anchor->GetOwnerNode()->GetOpDesc();
  661. GE_CHECK_NOTNULL(out_node_desc);
  662. if (bp_op_desc == nullptr || ((out_node_desc->GetId()) > (bp_op_desc->GetId()))) {
  663. bp_op_desc = out_node_desc;
  664. }
  665. GELOGI("bp_op_desc is %s, id is %ld", bp_op_desc->GetName().c_str(), bp_op_desc->GetId());
  666. }
  667. GE_CHECK_NOTNULL(bp_op_desc);
  668. uint32_t current_idx = 0;
  669. for (auto &node : graph->GetNodes(graph->GetGraphUnknownFlag())) {
  670. OpDescPtr op_desc = node->GetOpDesc();
  671. GE_CHECK_NOTNULL(op_desc);
  672. current_idx++;
  673. if (op_desc->GetName() == bp_op_desc->GetName()) {
  674. last_bp = current_idx;
  675. GELOGI("First bp name %s, idx %u", op_desc->GetName().c_str(), last_bp);
  676. break;
  677. }
  678. }
  679. return last_bp;
  680. }
  681. Status TaskGenerator::FindFpOfEnv(const ComputeGraphPtr &graph, const std::string &fp_point_str,
  682. ProfilingPoint &profiling_point) const {
  683. GELOGI("Start FindFpOfEnv");
  684. uint32_t current_idx = 0;
  685. uint32_t first_fp = 0;
  686. for (auto &node : graph->GetNodes(graph->GetGraphUnknownFlag())) {
  687. OpDescPtr op_desc = node->GetOpDesc();
  688. GE_CHECK_NOTNULL(node->GetOpDesc());
  689. current_idx++;
  690. string op_kernel_lib_name = op_desc->GetOpKernelLibName();
  691. if (op_kernel_lib_name.empty()) {
  692. continue;
  693. }
  694. if (first_fp == 0 && IsProfPoint(op_desc, fp_point_str)) {
  695. first_fp = current_idx;
  696. GELOGI("First fp name from env is %s, idx %u", op_desc->GetName().c_str(), first_fp);
  697. }
  698. }
  699. profiling_point.fp_index = first_fp;
  700. return SUCCESS;
  701. }
  702. Status TaskGenerator::FindBpOfEnv(const ComputeGraphPtr &graph, const std::string &bp_point_str,
  703. ProfilingPoint &profiling_point, vector<uint32_t> &all_reduce_nodes) const {
  704. GELOGI("Start FindBpOfEnv");
  705. uint32_t current_idx = 0;
  706. uint32_t last_bp = 0;
  707. for (auto &node : graph->GetNodes(graph->GetGraphUnknownFlag())) {
  708. OpDescPtr op_desc = node->GetOpDesc();
  709. GE_CHECK_NOTNULL(node->GetOpDesc());
  710. current_idx++;
  711. string op_kernel_lib_name = op_desc->GetOpKernelLibName();
  712. if (op_kernel_lib_name.empty()) {
  713. continue;
  714. }
  715. if (graph->GetNeedIteration()) {
  716. if (op_desc->GetName() == NODE_NAME_NET_OUTPUT + '_' + NODE_NAME_STREAM_SWITCH + "_StreamActive") {
  717. profiling_point.end_index.insert(current_idx);
  718. GELOGI("Iter end name %s, idx %u, from Node_Output_IteratorCtrl_StreamSwitch_StreamActive",
  719. op_desc->GetName().c_str(), current_idx);
  720. }
  721. if (op_desc->GetName() == NODE_NAME_FLOWCTRL_LOOP_ASSIGN) {
  722. profiling_point.end_index.insert(current_idx);
  723. GELOGI("Iter end name %s, idx %u, from FlowCtrl_LoopCond_ASSIGN",
  724. op_desc->GetName().c_str(), current_idx);
  725. }
  726. } else {
  727. if (op_desc->GetName() == NODE_NAME_NET_OUTPUT) {
  728. profiling_point.end_index.insert(current_idx);
  729. GELOGI("Iter end name %s, idx %u, from NETOUTPUT", op_desc->GetName().c_str(), current_idx);
  730. }
  731. }
  732. if (op_desc->GetType() == HCOMALLREDUCE || op_desc->GetType() == HVDCALLBACKALLREDUCE) {
  733. all_reduce_nodes.emplace_back(current_idx);
  734. GELOGI("Allreduce name %s, idx %u", op_desc->GetName().c_str(), current_idx);
  735. }
  736. if (IsProfPoint(op_desc, bp_point_str)) {
  737. last_bp = current_idx;
  738. GELOGI("Last bp name from env is %s, idx %u", op_desc->GetName().c_str(), last_bp);
  739. }
  740. }
  741. profiling_point.bp_index = last_bp;
  742. return SUCCESS;
  743. }
  744. Status TaskGenerator::GetFpBpIndex(const ComputeGraphPtr &graph, ProfilingPoint &profiling_point,
  745. vector<uint32_t> &all_reduce_nodes, std::string &fp_point_str,
  746. std::string &bp_point_str) const {
  747. ProfilingManager::Instance().GetFpBpPoint(fp_point_str, bp_point_str);
  748. Status ret = SUCCESS;
  749. if (fp_point_str.empty()) {
  750. ret = AutoFindFpOpIndex(graph, profiling_point);
  751. if (ret != SUCCESS) {
  752. GELOGW("First forward profiling op_index not set and FindFpOpIndex failed.");
  753. return FAILED;
  754. }
  755. }
  756. if (bp_point_str.empty()) {
  757. ret = AutoFindBpOpIndex(graph, profiling_point, all_reduce_nodes);
  758. if (ret != SUCCESS) {
  759. GELOGW("Last backward profiling op_index not set and FindBpOpIndex failed.");
  760. return FAILED;
  761. }
  762. }
  763. return SUCCESS;
  764. }
  765. Status TaskGenerator::FindProfilingNodeIndex(const ComputeGraphPtr &graph, ProfilingPoint &profiling_point,
  766. std::vector<uint32_t> &all_reduce_nodes) {
  767. return FindProfilingTaskIndex(graph, profiling_point, all_reduce_nodes);
  768. }
  769. Status TaskGenerator::FindProfilingTaskIndex(const ComputeGraphPtr &graph, ProfilingPoint &profiling_point,
  770. vector<uint32_t> &all_reduce_nodes) const {
  771. GE_CHECK_NOTNULL(graph);
  772. const char *profiling_mode = std::getenv(kProfilingMode);
  773. bool is_profiling = (profiling_mode != nullptr) || ProfilingManager::Instance().ProfilingOn() ||
  774. ProfilingManager::Instance().ProfilingTrainingTraceOn();
  775. if (!is_profiling) {
  776. GELOGD("Profiling is not open.");
  777. return SUCCESS;
  778. }
  779. GELOGI("Start get FP/BP index.");
  780. std::string fp_point_str;
  781. std::string bp_point_str;
  782. Status ret = GetFpBpIndex(graph, profiling_point, all_reduce_nodes, fp_point_str, bp_point_str);
  783. if (ret != SUCCESS) {
  784. GELOGW("Get FP_POINT BP_POINT failed.");
  785. return SUCCESS;
  786. }
  787. GELOGI("fp_point_str:%s, bp_point_str:%s.", fp_point_str.c_str(), bp_point_str.c_str());
  788. if (!fp_point_str.empty()) {
  789. ret = FindFpOfEnv(graph, fp_point_str, profiling_point);
  790. if (ret != SUCCESS) {
  791. GELOGW("First backward profiling op name set but FindFpOfEnv failed.");
  792. return SUCCESS;
  793. }
  794. }
  795. if (!bp_point_str.empty()) {
  796. ret = FindBpOfEnv(graph, bp_point_str, profiling_point, all_reduce_nodes);
  797. if (ret != SUCCESS) {
  798. GELOGW("Last backward profiling op name set but FindBpOfEnv failed.");
  799. return SUCCESS;
  800. }
  801. }
  802. bool train_graph = graph->GetNeedIteration();
  803. if (profiling_point.fp_index == 0 && train_graph) {
  804. GELOGW("First forward op name can't be found in graph for training trace.");
  805. }
  806. if (profiling_point.bp_index == 0 && train_graph) {
  807. GELOGW("Last backward op name can't be found in graph for training trace.");
  808. }
  809. return SUCCESS;
  810. }
  811. Status TaskGenerator::InsertProfilingTaskBefore(const OpDescPtr &op_desc, const ProfilingPoint &profiling_point,
  812. vector<uint32_t> &all_reduce_nodes, uint32_t node_index,
  813. vector<domi::TaskDef> &task_def_list, uint64_t &all_reduce_node_idx) {
  814. const char *profiling_mode = std::getenv(kProfilingMode);
  815. bool is_profiling = (profiling_mode != nullptr) || ProfilingManager::Instance().ProfilingOn() ||
  816. ProfilingManager::Instance().ProfilingTrainingTraceOn();
  817. bool is_insert_fp_profiling_task = false;
  818. (void)ge::AttrUtils::GetBool(op_desc, ATTR_NAME_INSERT_FP_PROFILILNG_TASK, is_insert_fp_profiling_task);
  819. bool is_insert_bp_profiling_task = false;
  820. (void)ge::AttrUtils::GetBool(op_desc, ATTR_NAME_INSERT_BP_PROFILILNG_TASK, is_insert_bp_profiling_task);
  821. bool no_insert_profiling_task = ((profiling_point.fp_index == 0) || (profiling_point.bp_index == 0) ||
  822. (profiling_point.end_index.empty())) &&
  823. (!(is_insert_fp_profiling_task || is_insert_bp_profiling_task));
  824. if (!is_profiling || no_insert_profiling_task) {
  825. return SUCCESS;
  826. }
  827. GELOGD("Insert fp profiling task: %d, insert bp profiling task: %d, fp index: %u, bp index: %u, end index size: %zu",
  828. is_insert_fp_profiling_task, is_insert_bp_profiling_task, profiling_point.fp_index, profiling_point.bp_index,
  829. profiling_point.end_index.size());
  830. if ((profiling_point.fp_index == node_index) || is_insert_fp_profiling_task) {
  831. uint64_t jobid_log_id = ge::GetContext().TraceId();
  832. GELOGI("The first FP operator is %s, idx %u, job_id %lu", op_desc->GetName().c_str(), node_index, jobid_log_id);
  833. TaskDef job_task_def;
  834. job_task_def.set_type(RT_MODEL_TASK_PROFILER_TRACE);
  835. job_task_def.set_stream_id(op_desc->GetStreamId());
  836. LogTimeStampDef *job_log_def = job_task_def.mutable_log_timestamp();
  837. if (job_log_def != nullptr) {
  838. job_log_def->set_logid(jobid_log_id);
  839. job_log_def->set_notify(false);
  840. }
  841. task_def_list.emplace_back(job_task_def);
  842. TaskDef fp_task_def;
  843. fp_task_def.set_type(RT_MODEL_TASK_PROFILER_TRACE);
  844. fp_task_def.set_stream_id(op_desc->GetStreamId());
  845. LogTimeStampDef *fp_log_def = fp_task_def.mutable_log_timestamp();
  846. if (fp_log_def != nullptr) {
  847. fp_log_def->set_logid(kProfilingFpStartLogid);
  848. fp_log_def->set_notify(false);
  849. }
  850. task_def_list.emplace_back(fp_task_def);
  851. }
  852. bool is_all_reduce = (op_desc->GetType() == HCOMALLREDUCE || op_desc->GetType() == HVDCALLBACKALLREDUCE);
  853. uint64_t all_reduce_task_idx = 0;
  854. bool is_insert_all_reduce_task = false;
  855. if (is_all_reduce && is_insert_bp_profiling_task) {
  856. all_reduce_task_idx = all_reduce_node_idx;
  857. is_insert_all_reduce_task = true;
  858. }
  859. if (is_all_reduce) {
  860. all_reduce_node_idx++;
  861. }
  862. if (!is_insert_all_reduce_task) {
  863. for (size_t i = 0; i < all_reduce_nodes.size(); i++) {
  864. if (all_reduce_nodes[i] == node_index) {
  865. all_reduce_task_idx = i;
  866. is_insert_all_reduce_task = true;
  867. break;
  868. }
  869. }
  870. }
  871. if (is_insert_all_reduce_task) {
  872. GELOGI("The start allreduce operator is %s, idx %u", op_desc->GetName().c_str(), node_index);
  873. TaskDef ar_task_def;
  874. ar_task_def.set_type(RT_MODEL_TASK_PROFILER_TRACE);
  875. ar_task_def.set_stream_id(op_desc->GetStreamId());
  876. LogTimeStampDef *ar_log_def = ar_task_def.mutable_log_timestamp();
  877. if (ar_log_def != nullptr) {
  878. GE_IF_BOOL_EXEC(TypeUtils::CheckUint64MulOverflow(all_reduce_task_idx, kProfilingArStep),
  879. GELOGE(FAILED, "Multiply result is out of range.");
  880. return FAILED);
  881. auto log_id = all_reduce_task_idx * kProfilingArStep + kProfilingArStartLogid;
  882. ar_log_def->set_logid(log_id);
  883. ar_log_def->set_notify(false);
  884. (void)ge::AttrUtils::SetInt(op_desc, ATTR_NAME_INSERT_PROFILILNG_TASK_LOG_ID, log_id);
  885. }
  886. task_def_list.push_back(ar_task_def);
  887. }
  888. return SUCCESS;
  889. }
  890. Status TaskGenerator::InsertProfilingTaskAfter(const OpDescPtr &op_desc, const ProfilingPoint &profiling_point,
  891. vector<uint32_t> &all_reduce_nodes, uint32_t node_index,
  892. vector<domi::TaskDef> &task_def_list, uint64_t all_reduce_node_idx) {
  893. GE_CHECK_NOTNULL(op_desc);
  894. const char *profiling_mode = std::getenv(kProfilingMode);
  895. bool is_profiling = (profiling_mode != nullptr) || ProfilingManager::Instance().ProfilingOn() ||
  896. ProfilingManager::Instance().ProfilingTrainingTraceOn();
  897. bool is_insert_bp_profiling_task = false;
  898. (void)ge::AttrUtils::GetBool(op_desc, ATTR_NAME_INSERT_BP_PROFILILNG_TASK, is_insert_bp_profiling_task);
  899. bool is_insert_end_profiling_task = false;
  900. (void)ge::AttrUtils::GetBool(op_desc, ATTR_NAME_INSERT_END_PROFILILNG_TASK, is_insert_end_profiling_task);
  901. bool no_insert_profiling_task = ((profiling_point.fp_index == 0) || (profiling_point.bp_index == 0) ||
  902. (profiling_point.end_index.empty())) &&
  903. (!(is_insert_bp_profiling_task || is_insert_end_profiling_task));
  904. if (!is_profiling || no_insert_profiling_task) {
  905. return SUCCESS;
  906. }
  907. GELOGD("Insert bp profiling task: %d, insert end profiling task: %d, fp index: %u, bp index: %u, end index size: %zu",
  908. is_insert_bp_profiling_task, is_insert_end_profiling_task, profiling_point.fp_index, profiling_point.bp_index,
  909. profiling_point.end_index.size() );
  910. bool is_all_reduce = (op_desc->GetType() == HCOMALLREDUCE || op_desc->GetType() == HVDCALLBACKALLREDUCE);
  911. if ((profiling_point.bp_index == node_index) || (!is_all_reduce && is_insert_bp_profiling_task)) {
  912. GELOGI("The last BP operator is %s, idx %u", op_desc->GetName().c_str(), node_index);
  913. TaskDef bp_task_def;
  914. bp_task_def.set_type(RT_MODEL_TASK_PROFILER_TRACE);
  915. bp_task_def.set_stream_id(op_desc->GetStreamId());
  916. LogTimeStampDef *bp_log_def = bp_task_def.mutable_log_timestamp();
  917. GE_CHECK_NOTNULL(bp_log_def);
  918. bp_log_def->set_logid(kProfilingBpEndLogid);
  919. bp_log_def->set_notify(false);
  920. task_def_list.emplace_back(bp_task_def);
  921. }
  922. if (profiling_point.end_index.find(node_index) != profiling_point.end_index.end() ||
  923. is_insert_end_profiling_task) {
  924. GELOGI("The iteration end operator is %s, idx %u", op_desc->GetName().c_str(), node_index);
  925. TaskDef end_task_def;
  926. end_task_def.set_type(RT_MODEL_TASK_PROFILER_TRACE);
  927. end_task_def.set_stream_id(op_desc->GetStreamId());
  928. LogTimeStampDef *end_log_def = end_task_def.mutable_log_timestamp();
  929. GE_CHECK_NOTNULL(end_log_def);
  930. end_log_def->set_logid(kProfilingIterEndLogid);
  931. end_log_def->set_notify(true);
  932. task_def_list.emplace_back(end_task_def);
  933. }
  934. uint32_t all_reduce_task_idx = 0;
  935. bool is_insert_all_reduce_task = false;
  936. if (is_all_reduce && is_insert_bp_profiling_task) {
  937. all_reduce_task_idx = all_reduce_node_idx;
  938. is_insert_all_reduce_task = true;
  939. }
  940. for (size_t i = 0; i < all_reduce_nodes.size(); i++) {
  941. if (all_reduce_nodes[i] == node_index) {
  942. all_reduce_task_idx = i;
  943. is_insert_all_reduce_task = true;
  944. break;
  945. }
  946. }
  947. if (is_insert_all_reduce_task) {
  948. GELOGI("The end allreduce operator is %s, idx %u", op_desc->GetName().c_str(), node_index);
  949. TaskDef ar_task_def;
  950. ar_task_def.set_type(RT_MODEL_TASK_PROFILER_TRACE);
  951. ar_task_def.set_stream_id(op_desc->GetStreamId());
  952. LogTimeStampDef *ar_log_def = ar_task_def.mutable_log_timestamp();
  953. GE_CHECK_NOTNULL(ar_log_def);
  954. GE_IF_BOOL_EXEC(TypeUtils::CheckUint64MulOverflow(all_reduce_task_idx, kProfilingArStep),
  955. GELOGE(FAILED, "Multiply result is out of range.");
  956. return FAILED);
  957. auto log_id = all_reduce_task_idx * kProfilingArStep + kProfilingArEndLogid;
  958. ar_log_def->set_logid(log_id);
  959. ar_log_def->set_notify(false);
  960. task_def_list.emplace_back(ar_task_def);
  961. }
  962. return SUCCESS;
  963. }
  964. bool TaskGenerator::IsProfPoint(const OpDescPtr &op, const std::string &name) {
  965. if (op == nullptr) {
  966. return false;
  967. }
  968. if (op->GetName() == name) {
  969. return true;
  970. }
  971. std::vector<std::string> original_op_names;
  972. bool ret = AttrUtils::GetListStr(op, ge::ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES, original_op_names);
  973. if (!ret) {
  974. return false;
  975. }
  976. for (auto &origin_name : original_op_names) {
  977. if (origin_name == name) {
  978. return true;
  979. }
  980. }
  981. return false;
  982. }
  983. Status TaskGenerator::SetUnknownShapeStream(RunContext &run_context, rtStream_t &stream) {
  984. GE_CHK_RT_RET(rtStreamCreate(&stream, 0));
  985. run_context.stream = stream;
  986. rtError_t rt_ret = rtModelBindStream(run_context.model, stream, 0);
  987. if (rt_ret != RT_ERROR_NONE) {
  988. GELOGE(FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
  989. GE_CHK_RT_RET(rtStreamDestroy(stream));
  990. return FAILED;
  991. }
  992. return SUCCESS;
  993. }
  994. Status TaskGenerator::DestroyUnknownShapeStream(RunContext &run_context, rtStream_t &stream) {
  995. GE_CHK_RT(rtModelUnbindStream(run_context.model, stream));
  996. GE_CHK_RT_RET(rtStreamDestroy(stream));
  997. return SUCCESS;
  998. }
  999. Status TaskGenerator::SetKnownShapeStream(RunContext &run_context, int64_t stream_id) {
  1000. if (stream_id < 0 || stream_id >= static_cast<int64_t>(run_context.graphStreamList.size())) {
  1001. GELOGE(INTERNAL_ERROR, "Stream id[%ld] is invalid, stream list size=%zu", stream_id,
  1002. run_context.graphStreamList.size());
  1003. return INTERNAL_ERROR;
  1004. }
  1005. run_context.stream = run_context.graphStreamList[stream_id];
  1006. return SUCCESS;
  1007. }
  1008. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示