You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

model_executor.cc 24 kB

4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621
  1. /**
  2. * Copyright 2021 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/execute/model_executor.h"
  17. #include "graph/ge_context.h"
  18. #include "graph/debug/ge_attr_define.h"
  19. #include "common/ge_call_wrapper.h"
  20. #include "common/local_context.h"
  21. #include "graph/manager/graph_var_manager.h"
  22. #include "graph/manager/graph_mem_manager.h"
  23. #include "graph/manager/host_mem_manager.h"
  24. #include "graph/utils/tensor_adapter.h"
  25. #include "graph/load/graph_loader.h"
  26. #include "graph/load/model_manager/model_manager.h"
  27. #include "common/math/math_util.h"
  28. #include "common/formats/utils/formats_trans_utils.h"
  29. namespace {
  30. constexpr int32_t kBase = 10;
  31. constexpr uint8_t kNeverLoaded = 0;
  32. }
  33. namespace ge {
  34. ///
  35. /// @ingroup ge
  36. /// @brief graph executor init
  37. /// @param [in] options user config params
  38. /// @return Status result of function
  39. ///
  40. Status ModelExecutor::Initialize(const map<string, string> &options, uint64_t session_id) {
  41. if (init_flag_) {
  42. GELOGW("ModelExecutor has already initialized.");
  43. return SUCCESS;
  44. }
  45. session_id_ = session_id;
  46. graph_run_listener_ = MakeShared<GraphModelListener>(sync_run_mutex_, condition_);
  47. if (graph_run_listener_ == nullptr) {
  48. REPORT_CALL_ERROR("E19999", "New GraphModelListener fail");
  49. GELOGE(MEMALLOC_FAILED, "[New][GraphModelListener] failed");
  50. return MEMALLOC_FAILED;
  51. }
  52. const auto model_manager = ModelManager::GetInstance();
  53. GE_CHECK_NOTNULL(model_manager);
  54. Status status = model_manager->EnableExceptionDump(options);
  55. if (status != SUCCESS) {
  56. return status;
  57. }
  58. GE_CHK_STATUS_RET(HostMemManager::Instance().Initialize());
  59. const std::vector<rtMemType_t> mem_type({RT_MEMORY_HBM, RT_MEMORY_P2P_DDR});
  60. status = MemManager::Instance().Initialize(mem_type);
  61. if (status != SUCCESS) {
  62. GELOGE(status, "[Init][MemManager] MemoryAllocatorManager initialize failed.");
  63. REPORT_CALL_ERROR("E19999", "MemManager initialize failed.");
  64. return status;
  65. }
  66. size_t total_mem_size = 0;
  67. GE_CHK_STATUS_RET_NOLOG(GetTotalMemorySize(total_mem_size));
  68. status = VarManager::Instance(session_id)->SetMemoryMallocSize(options, total_mem_size);
  69. if (status != SUCCESS) {
  70. GELOGE(status, "[Set][MemoryMallocSize] failed.");
  71. REPORT_CALL_ERROR("E19999", "VarManager SetMemoryMallocSize failed, InnerSession:%lu.", session_id_);
  72. return status;
  73. }
  74. train_graph_flag_ = ParseTrainGraphFlag();
  75. thread_run_flag_.store(true);
  76. run_thread_ = std::thread(&ModelExecutor::RunThread, this);
  77. init_flag_ = true;
  78. return SUCCESS;
  79. }
  80. ///
  81. /// @ingroup ge
  82. /// @brief graph executor finalize
  83. /// @return Status result of function
  84. ///
  85. Status ModelExecutor::Finalize() {
  86. if (!init_flag_) {
  87. GELOGW("ModelExecutor has not been initialized.");
  88. return SUCCESS;
  89. }
  90. StopQueue();
  91. if (run_thread_.joinable()) {
  92. run_thread_.join();
  93. }
  94. if (graph_executor_.FreeExecuteMemory() != SUCCESS) {
  95. GELOGW("Graph executor FreeExecuteMemory failed, resources may not be released correctly.");
  96. }
  97. GELOGI("VarManager free var memory.");
  98. (void)VarManager::Instance(session_id_)->FreeVarMemory();
  99. MemManager::Instance().FreeSessionMemory(session_id_);
  100. HostMemManager::Instance().Finalize();
  101. ModelManager::GetInstance()->DestroyAicpuSession(session_id_);
  102. return SUCCESS;
  103. }
  104. Status ModelExecutor::GetTotalMemorySize(size_t &total_mem_size) {
  105. rtError_t rt_ret = rtSetDevice(GetContext().DeviceId());
  106. if (rt_ret != RT_ERROR_NONE) {
  107. REPORT_CALL_ERROR("E19999", "Call rtSetDevice failed, device_id:%u, ret:0x%X",
  108. GetContext().DeviceId(), rt_ret);
  109. GELOGE(RT_FAILED, "[Call][RtSetDevice] failed, device_id:%u, ret:0x%X", GetContext().DeviceId(), rt_ret);
  110. return RT_FAILED;
  111. }
  112. size_t free_mem = 0;
  113. rt_ret = rtMemGetInfoEx(RT_MEMORYINFO_HBM, &free_mem, &total_mem_size);
  114. if (rt_ret != RT_ERROR_NONE) {
  115. REPORT_CALL_ERROR("E19999", "Call rtMemGetInfo failed, ret:0x%X", rt_ret);
  116. GELOGE(RT_FAILED, "[Call][RtMemGetInfo] failed, ret:0x%X", rt_ret);
  117. return RT_FAILED;
  118. }
  119. rt_ret = rtDeviceReset(GetContext().DeviceId());
  120. if (rt_ret != RT_ERROR_NONE) {
  121. REPORT_CALL_ERROR("E19999", "Call rtDeviceReset failed, device_id:%u, ret:0x%X",
  122. GetContext().DeviceId(), rt_ret);
  123. GELOGE(RT_FAILED, "[Call][RtDeviceReset] failed, device_id:%u, ret:0x%X", GetContext().DeviceId(), rt_ret);
  124. return RT_FAILED;
  125. }
  126. return SUCCESS;
  127. }
  128. // OPTION_GRAPH_RUN_MODE is supposed to be a session-level option, but it used to be set to global-level in the past.
  129. // If can not parse from session, it can parse from global by GetContext().
  130. bool ModelExecutor::ParseTrainGraphFlag() {
  131. string run_mode;
  132. if (GetContext().GetOption(OPTION_GRAPH_RUN_MODE, run_mode) == SUCCESS && !run_mode.empty()) {
  133. if (GraphRunMode(std::strtol(run_mode.c_str(), nullptr, kBase)) >= TRAIN) {
  134. GELOGI("Graph train flag set.");
  135. return true;
  136. }
  137. }
  138. return false;
  139. }
  140. void ModelExecutor::AddGraphNode(GraphId graph_id, const GraphNodePtr &graph_node) {
  141. std::lock_guard<std::mutex> lock(mutex_);
  142. graph_nodes_.emplace(graph_id, graph_node);
  143. }
  144. void ModelExecutor::RemoveGraphNode(GraphId graph_id) {
  145. std::lock_guard<std::mutex> lock(mutex_);
  146. graph_nodes_.erase(graph_id);
  147. }
  148. ///
  149. /// @ingroup ge
  150. /// @brief Load mode for graph.
  151. /// @param [in] GeRootModel: root model of graph compiled.
  152. /// @param [in] GraphNode: node of graph.
  153. /// @return Status result of function
  154. ///
  155. Status ModelExecutor::LoadGraph(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node) {
  156. GE_CHECK_NOTNULL(graph_node);
  157. if (ge_root_model == nullptr) {
  158. return SUCCESS;
  159. }
  160. UpdateLocalOmeContext(graph_node);
  161. return graph_node->IsAsync() ? ModelLoadAsync(ge_root_model, graph_node) : ModelLoadSync(ge_root_model, graph_node);
  162. }
  163. ///
  164. /// @ingroup ge
  165. /// @brief Unload mode for graph.
  166. /// @param [in] GeRootModel: root model of graph compiled.
  167. /// @param [in] graph_id: graph identifier.
  168. /// @return Status result of function
  169. ///
  170. Status ModelExecutor::UnloadGraph(const GeRootModelPtr &ge_root_model, uint32_t graph_id) {
  171. GE_CHECK_NOTNULL(ge_root_model);
  172. rtError_t rt_ret = rtSetDevice(GetContext().DeviceId());
  173. if (rt_ret != RT_ERROR_NONE) {
  174. GELOGW("[GraphExecutor] rtSetDevice failed, modelId=%u, graphId=%u.", ge_root_model->GetModelId(), graph_id);
  175. return FAILED;
  176. }
  177. RemoveGraphNode(graph_id);
  178. Status ret = UnloadModel(ge_root_model, graph_id);
  179. if (ret != SUCCESS) {
  180. GELOGW("[GraphExecutor] unload model failed, graph_id=%u.", graph_id);
  181. }
  182. rt_ret = rtDeviceReset(GetContext().DeviceId());
  183. if (rt_ret != RT_ERROR_NONE) {
  184. GELOGW("[GraphExecutor] rtDeviceReset failed, graphId=%u.", graph_id);
  185. }
  186. return ret;
  187. }
  188. Status ModelExecutor::UnloadModel(const GeRootModelPtr &ge_root_model, uint32_t graph_id) {
  189. GE_CHECK_NOTNULL(ge_root_model);
  190. for (size_t i = 0; i < ge_root_model->GetAllModelId().size(); ++i) {
  191. uint32_t model_id = ge_root_model->GetAllModelId()[i];
  192. GELOGI("Unload model %u.", model_id);
  193. Status ret = GraphLoader::UnloadModel(model_id);
  194. if (ret != SUCCESS) {
  195. GELOGE(ret, "[GraphExecutor] unload model failed, modelId=%u, graphId=%u.", model_id, graph_id);
  196. return ret;
  197. }
  198. }
  199. return SUCCESS;
  200. }
  201. void ModelExecutor::StopQueue() {
  202. thread_run_flag_.store(false);
  203. run_args_q_.Stop();
  204. }
  205. void ModelExecutor::ReturnError(RunAsyncCallback callback, Status ret, const string &log) {
  206. StopQueue();
  207. GELOGE(ret, "%s.", log.c_str());
  208. std::vector<ge::Tensor> outputs;
  209. if (callback != nullptr) {
  210. callback(ret, outputs);
  211. }
  212. }
  213. void ModelExecutor::UpdateLocalOmeContext(const GraphNodePtr &graph_node) {
  214. std::lock_guard<std::mutex> lock(mutex_);
  215. SetLocalOmeContext(graph_node->GetOmeContext());
  216. }
  217. ///
  218. /// @ingroup ge
  219. /// @brief Push model execution params to queue.
  220. /// @param [in] RunArgs of for model execution.
  221. /// @return Status result of function
  222. ///
  223. Status ModelExecutor::PushGraph(const RunArgs &args) {
  224. return run_args_q_.Push(args) ? SUCCESS : FAILED;
  225. }
  226. void ModelExecutor::RunThread() {
  227. ErrorManager::GetInstance().SetStage(error_message::kModelExecute, error_message::kModelExecute);
  228. if (mmSetCurrentThreadName("GE_Run") != EN_OK) {
  229. GELOGW("Set thread name failed.");
  230. }
  231. RunArgs args;
  232. while (thread_run_flag_) {
  233. if (!run_args_q_.Pop(args)) {
  234. continue;
  235. }
  236. GELOGI("[RunThread] A new loop start, graph_id:%u.", args.graph_id);
  237. ErrorManager::GetInstance().SetErrorContext(args.error_context);
  238. GetContext().SetSessionId(args.session_id);
  239. GetThreadLocalContext() = args.context;
  240. UpdateLocalOmeContext(args.graph_node);
  241. // parse inputs.dims to vector<vector<uint64_t>> dynamic_dims
  242. Status ret = ParseInputsDims(args.input_tensor);
  243. if (ret != SUCCESS) {
  244. ReturnError(args.callback, ret, "ParseInputsDims failed, thread exit.");
  245. args.graph_node->Unlock();
  246. return;
  247. }
  248. args.graph_node->UpdateLoadFlag();
  249. if (!args.graph_node->GetLoadFlag()) {
  250. ErrorManager::GetInstance().SetStage(error_message::kModelLoad, error_message::kModelLoad);
  251. args.ge_root_model->SetTrainFlag(train_graph_flag_);
  252. ret = ModelLoadAsync(args.ge_root_model, args.graph_node);
  253. if (ret != SUCCESS || args.ge_root_model == nullptr) {
  254. StopQueue();
  255. ReturnError(args.callback, ret, "LoadGraphAsync failed, thread exit.");
  256. args.graph_node->Unlock();
  257. return;
  258. }
  259. // control the times of graph loading in multi-thread scenario
  260. args.graph_node->DecreaseLoadCount();
  261. args.graph_node->IncreaseLoadRecord();
  262. args.graph_node->SetLoadFlag(true);
  263. GELOGI("LoadGraph[%u], model[%u] success and set LoadFlag to true.", args.graph_node->GetGraphId(),
  264. args.ge_root_model->GetModelId());
  265. }
  266. ErrorManager::GetInstance().SetStage(error_message::kModelExecute, error_message::kModelExecute);
  267. if (train_graph_flag_) {
  268. graph_executor_.SetTrainFlag(train_graph_flag_);
  269. }
  270. ret = graph_executor_.ExecuteGraphAsync(args.graph_id, args.graph_node->GetGeRootModel(),
  271. args.input_tensor, args.callback);
  272. args.graph_node->SetRunFlag(false);
  273. if (ret != SUCCESS) {
  274. ReturnError(args.callback, ret, "ExecuteGraphAsync failed, thread exit.");
  275. args.graph_node->Unlock();
  276. return;
  277. }
  278. args.graph_node->Unlock();
  279. GELOGI("[GraphExecutor] Run graph async success, graph_id=%u.", args.graph_id);
  280. }
  281. }
  282. ///
  283. /// @ingroup ge
  284. /// @brief Run graph for synchronize model.
  285. /// @param [in] graph_node: node of graph.
  286. /// @param [in] graph_id: graph identifier.
  287. /// @param [in] inputs: input data for the graph running.
  288. /// @param [out] outputs: output data of the graph running
  289. /// @return Status result of function
  290. ///
  291. Status ModelExecutor::RunGraph(const GraphNodePtr &graph_node, GraphId graph_id,
  292. const std::vector<GeTensor> &inputs, std::vector<GeTensor> &outputs) {
  293. Status ret = graph_executor_.SetCondition(&sync_run_mutex_, &condition_, graph_run_listener_);
  294. if (ret != SUCCESS) {
  295. GELOGE(GE_GRAPH_RUNGRAPH_FAILED, "[Set][Condition] failed, graph_id = %u.", graph_id);
  296. graph_node->SetRunFlag(false);
  297. return GE_GRAPH_RUNGRAPH_FAILED;
  298. }
  299. if (train_graph_flag_) {
  300. graph_executor_.SetTrainFlag(train_graph_flag_);
  301. }
  302. ret = graph_executor_.ExecuteGraph(graph_id, graph_node->GetGeRootModel(), inputs, outputs);
  303. graph_node->SetRunFlag(false);
  304. if (ret != SUCCESS) {
  305. GELOGE(ret, "[Execute][Graph] failed, graph_id = %u.", graph_id);
  306. return ret;
  307. }
  308. return SUCCESS;
  309. }
  310. ///
  311. /// @ingroup ge
  312. /// @brief Run graph for NN synchronize model.
  313. /// @param [in] graph_node: node of graph.
  314. /// @param [in] graph_id: graph identifier.
  315. /// @param [in] stream: Stream for model running.
  316. /// @param [in] inputs: input data for the graph running.
  317. /// @param [out] outputs: output data of the graph running
  318. /// @return Status result of function
  319. ///
  320. Status ModelExecutor::RunGraphWithStream(const GraphNodePtr &graph_node, GraphId graph_id, rtStream_t stream,
  321. const std::vector<GeTensor> &inputs, std::vector<GeTensor> &outputs) {
  322. auto ret = graph_executor_.SetCondition(&sync_run_mutex_, &condition_, graph_run_listener_);
  323. if (ret != SUCCESS) {
  324. GELOGE(GE_GRAPH_RUNGRAPH_FAILED, "[Set][Condition] failed, graph id = %u, stream = %p.", graph_id, stream);
  325. graph_node->SetRunFlag(false);
  326. return GE_GRAPH_RUNGRAPH_FAILED;
  327. }
  328. ret = graph_executor_.ExecuteGraphWithStream(graph_id, stream, graph_node->GetGeRootModel(), inputs, outputs);
  329. graph_node->SetRunFlag(false);
  330. graph_node->SetIsSpecificStream(false);
  331. if (ret != SUCCESS) {
  332. GELOGE(ret, "[Execute][Graph] With Stream failed, graph id = %u, stream = %p.", graph_id, stream);
  333. return ret;
  334. }
  335. GELOGI("[Run][GraphWithStreamAsync] run graph success, graph id = %u, stream = %p.", graph_id, stream);
  336. return SUCCESS;
  337. }
  338. Status ModelExecutor::ModelLoadSync(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node) {
  339. ge_root_model->SetIsSpecificStream(graph_node->IsSpecificStream());
  340. return ModelLoad(ge_root_model, graph_node, graph_run_listener_);
  341. }
  342. Status ModelExecutor::ModelLoadAsync(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node) {
  343. auto listener = MakeShared<RunAsyncListener>();
  344. GE_CHECK_NOTNULL(listener);
  345. return ModelLoad(ge_root_model, graph_node, listener);
  346. }
  347. Status ModelExecutor::ModelLoad(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node,
  348. const std::shared_ptr<ModelListener> &listener) {
  349. ge_root_model->SetTrainFlag(train_graph_flag_);
  350. bool is_unknown_shape = false;
  351. GE_CHK_STATUS_RET(ge_root_model->CheckIsUnknownShape(is_unknown_shape));
  352. if (!is_unknown_shape) {
  353. if (getenv(kEnvGeuseStaticMemory) != nullptr) {
  354. GELOGI("[LoadGraph] GE_USE_STATIC_MEMORY is seted.");
  355. } else {
  356. auto root_graph = ge_root_model->GetRootGraph();
  357. GE_CHECK_NOTNULL(root_graph);
  358. auto name_to_model = ge_root_model->GetSubgraphInstanceNameToModel();
  359. GeModelPtr ge_model = name_to_model[root_graph->GetName()];
  360. GE_CHK_STATUS_RET(CheckAndReleaseMemory(ge_model, graph_node));
  361. }
  362. }
  363. GE_TIMESTAMP_START(LoadModelOnline);
  364. uint32_t model_id = INVALID_MODEL_ID;
  365. Status ret = GraphLoader::LoadModelOnline(model_id, ge_root_model, listener);
  366. GE_TIMESTAMP_EVENT_END(LoadModelOnline, "GraphLoader::LoadModelOnline");
  367. if (ret != SUCCESS) {
  368. GELOGE(ret, "[Load][ModelOnline] Failed, model_id:%u", model_id);
  369. graph_node->SetRunFlag(false);
  370. return ret;
  371. }
  372. graph_node->SetLoadFlag(true);
  373. ge_root_model->SetModelId(model_id);
  374. graph_node->SetGeRootModel(ge_root_model);
  375. AddGraphNode(graph_node->GetGraphId(), graph_node);
  376. return SUCCESS;
  377. }
  378. void ModelExecutor::ReleaseMemory(const GeModelPtr &ge_model, const GraphNodePtr &graph_node,
  379. const std::vector<uint32_t> &model_ids, uint32_t graph_id, uint64_t session_id) {
  380. rtError_t rt_ret = rtSetDevice(GetContext().DeviceId());
  381. if (rt_ret != RT_ERROR_NONE) {
  382. REPORT_CALL_ERROR("E19999", "Call rtSetDevice failed, device_id:%u", GetContext().DeviceId());
  383. GELOGE(RT_FAILED, "[Call][RtSetDevice] failed, device_id=%u.", GetContext().DeviceId());
  384. return;
  385. }
  386. for (auto model_id : model_ids) {
  387. uint64_t max_memory_size = 0;
  388. Status result = GraphLoader::GetMaxUsedMemory(model_id, max_memory_size);
  389. if (result != SUCCESS) {
  390. continue;
  391. }
  392. GELOGI("try to UnloadGraph[%u], model[%u] which MaxUsedMemory[%lu].", graph_id, model_id, max_memory_size);
  393. if (model_ids.size() > 1) {
  394. result = ge_model->GetSessionId(model_id, session_id);
  395. if (result != SUCCESS) {
  396. GELOGW("[GraphExecutor:] get session failed when dynamic memory, modelId=%u, graphId=%u.", model_id,
  397. graph_id);
  398. continue;
  399. }
  400. }
  401. result = GraphLoader::DestroyAicpuKernel(session_id, model_id, 0);
  402. if (result != SUCCESS) {
  403. GELOGW("[GraphExecutor:] destroy aicpu kernel failed when dynamic memory, modelId=%u, graphId=%u.", model_id,
  404. graph_id);
  405. }
  406. result = GraphLoader::UnloadModel(model_id);
  407. if (result != SUCCESS) {
  408. GELOGW("[GraphExecutor:] unload model failed, modelId=%u, graphId=%u.", model_id, graph_id);
  409. }
  410. GELOGI("UnloadGraph[%u], model[%u] success.", graph_id, model_id);
  411. }
  412. graph_node->SetLoadFlag(false);
  413. // Allow model to be loaded agagin without adding graph again
  414. graph_node->SetLoadCount(graph_node->GetLoadRecord());
  415. graph_node->SetLoadRecord(kNeverLoaded);
  416. GeRootModelPtr ge_root_model = graph_node->GetGeRootModel();
  417. if (ge_root_model == nullptr) {
  418. GELOGW("ge_root_model is null, graph_id:%u", graph_id);
  419. return;
  420. }
  421. ge_root_model->ClearAllModelId();
  422. rt_ret = rtDeviceReset(GetContext().DeviceId());
  423. if (rt_ret != RT_ERROR_NONE) {
  424. REPORT_CALL_ERROR("E19999", "Call rtDeviceReset failed, device_id:%u", GetContext().DeviceId());
  425. GELOGE(RT_FAILED, "[Call][RtDeviceReset] failed, device_id:%u.", GetContext().DeviceId());
  426. return;
  427. }
  428. }
  429. Status ModelExecutor::CheckAndReleaseMemory(const GeModelPtr &ge_model, const GraphNodePtr &graph_node) {
  430. GELOGI("graph_id[%u]", graph_node->GetGraphId());
  431. int64_t free_memory = 0;
  432. Status result = GraphLoader::GetMemoryInfo(free_memory);
  433. if (result != SUCCESS) {
  434. return result;
  435. }
  436. int64_t value = 0;
  437. int64_t memory_size = AttrUtils::GetInt(ge_model, ATTR_MODEL_MEMORY_SIZE, value) ? value : 0;
  438. int64_t weight_size = AttrUtils::GetInt(ge_model, ATTR_MODEL_WEIGHT_SIZE, value) ? value : 0;
  439. int64_t session_id = AttrUtils::GetInt(ge_model, MODEL_ATTR_SESSION_ID, value) ? value : 0;
  440. GELOGI("Graph[%u] need memory_size[%ld], weight_size[%ld], Device[%u] free_memory_size[%ld]",
  441. graph_node->GetGraphId(), memory_size, weight_size, GetContext().DeviceId(), free_memory);
  442. if (CheckInt64AddOverflow(memory_size, weight_size) != SUCCESS) {
  443. REPORT_INNER_ERROR("E19999", "memory_size:%ld and weight_size:%ld will overflow after add, check invalid",
  444. memory_size, weight_size);
  445. GELOGE(INTERNAL_ERROR, "[Check][Param] memory_size:%ld and weight_size:%ld will overflow after add",
  446. memory_size, weight_size);
  447. return INTERNAL_ERROR;
  448. }
  449. if (free_memory >= (memory_size + weight_size)) {
  450. return SUCCESS;
  451. }
  452. std::lock_guard<std::mutex> lock(mutex_);
  453. for (const auto &it : graph_nodes_) {
  454. auto graph_id = it.second->GetGraphId();
  455. auto model = it.second->GetGeRootModel();
  456. if (model == nullptr) {
  457. continue;
  458. }
  459. auto model_id = model->GetModelId();
  460. auto model_ids = model->GetAllModelId();
  461. // unload model not release
  462. bool is_unknown_shape = false;
  463. GE_CHK_STATUS_RET(model->CheckIsUnknownShape(is_unknown_shape));
  464. if (is_unknown_shape) {
  465. GELOGD("model_id[%u] graph_id[%u] is unknown model, not release memory", model_id, graph_id);
  466. continue;
  467. }
  468. // not loaded,no need unload
  469. if (!it.second->GetLoadFlag()) {
  470. GELOGI("CheckAndReleaseMemory graph[%u] has not been loaded.", graph_id);
  471. continue;
  472. }
  473. ReleaseMemory(ge_model, it.second, model_ids, graph_id, static_cast<uint64_t>(session_id));
  474. }
  475. return SUCCESS;
  476. }
  477. void ModelExecutor::ParseInputsDimsForData(const std::vector<ge::Tensor> &input_tensor) {
  478. GELOGD("Start parse input dims from data.");
  479. for (size_t i = 0; i < input_tensor.size(); ++i) {
  480. const TensorDesc &tensor_desc = input_tensor[i].GetTensorDesc();
  481. const Shape &shape = tensor_desc.GetShape();
  482. const auto &shape_dims = shape.GetDims();
  483. GELOGD("Input tensor dims is %s.", formats::JoinToString(shape_dims).c_str());
  484. GetLocalOmeContext().user_real_input_dims.emplace_back(shape_dims);
  485. }
  486. }
  487. Status ModelExecutor::ParseInputsDimsForGetNextNoSinkAndData(const vector<NodePtr> &dynamic_nodes,
  488. const std::vector<ge::Tensor> &input_tensor) {
  489. GELOGD("Start parse inputs dims when coexist data and getnext sink.");
  490. for (size_t i = 0; i < dynamic_nodes.size(); ++i) {
  491. auto op_desc = dynamic_nodes.at(i)->GetOpDesc();
  492. if (op_desc == nullptr) {
  493. continue;
  494. }
  495. GeAttrValue::INT index = 0;
  496. if (!(AttrUtils::GetInt(op_desc, ATTR_NAME_INDEX, index))) {
  497. REPORT_CALL_ERROR("E19999", "Get Attr:%s from op:%s(%s) fail", ATTR_NAME_INDEX.c_str(),
  498. op_desc->GetName().c_str(), op_desc->GetType().c_str());
  499. GELOGE(PARAM_INVALID, "[Get][Attr] %s from op:%s(%s) fail", ATTR_NAME_INDEX.c_str(),
  500. op_desc->GetName().c_str(), op_desc->GetType().c_str());
  501. return PARAM_INVALID;
  502. }
  503. if (static_cast<size_t>(index) > input_tensor.size()) {
  504. REPORT_INNER_ERROR("E19999", "Attr:%s in op:%s(%s) value:%ld > param input_tensor.size:%zu, "
  505. "check invalid", ATTR_NAME_INDEX.c_str(),
  506. op_desc->GetName().c_str(), op_desc->GetType().c_str(),
  507. index, input_tensor.size());
  508. GELOGE(PARAM_INVALID, "[Check][Param] Attr:%s in op:%s(%s) value:%ld > param input_tensor.size:%zu",
  509. ATTR_NAME_INDEX.c_str(), op_desc->GetName().c_str(), op_desc->GetType().c_str(),
  510. index, input_tensor.size());
  511. return PARAM_INVALID;
  512. }
  513. const TensorDesc &tensor_desc = input_tensor[i].GetTensorDesc();
  514. const Shape &shape = tensor_desc.GetShape();
  515. const auto &shape_dims = shape.GetDims();
  516. GELOGI("Shape dims of %zu data is %s.", index, formats::JoinToString(shape_dims).c_str());
  517. GetLocalOmeContext().user_real_input_dims.emplace_back(std::move(shape_dims));
  518. }
  519. return SUCCESS;
  520. }
  521. Status ModelExecutor::ParseInputsDims(const std::vector<ge::Tensor> &input_tensor) {
  522. GELOGI("Start parse input dims of %zu input tensor.", input_tensor.size());
  523. GetLocalOmeContext().user_real_input_dims.clear();
  524. if (GetLocalOmeContext().dynamic_node_type.empty()) {
  525. return SUCCESS;
  526. }
  527. const vector<NodePtr> &data_nodes = GetLocalOmeContext().data_nodes;
  528. const vector<NodePtr> &getnext_nosink_nodes = GetLocalOmeContext().getnext_nosink_nodes;
  529. GELOGD("Data nodes count is %zu, getnext nosink nodes count is %zu.", data_nodes.size(),
  530. getnext_nosink_nodes.size());
  531. if (GetLocalOmeContext().dynamic_node_type == DATA) {
  532. if (getnext_nosink_nodes.empty()) {
  533. // just data or data+getnext_sink
  534. ParseInputsDimsForData(input_tensor);
  535. } else {
  536. // data+getnext_nosink, but only need to get shape_dims of data
  537. if (ParseInputsDimsForGetNextNoSinkAndData(data_nodes, input_tensor) != SUCCESS) {
  538. GELOGE(PARAM_INVALID, "[Parse][Dims] from data failed, when data coexist with getnext nosink.");
  539. return PARAM_INVALID;
  540. }
  541. }
  542. } else {
  543. if (getnext_nosink_nodes.empty()) {
  544. // just getnext_sink or getnext_sink+data, need to get shape_dims from aicpu op
  545. GELOGI("Need to get dims from aicpu op: GETDYNAMICDIMS.");
  546. return SUCCESS;
  547. } else {
  548. if (data_nodes.empty()) {
  549. // just getnext_nosink
  550. ParseInputsDimsForData(input_tensor);
  551. } else {
  552. // getnext_nosink + data, but only need to get shape_dims of getnext_nosink
  553. if (ParseInputsDimsForGetNextNoSinkAndData(getnext_nosink_nodes, input_tensor) != SUCCESS) {
  554. GELOGE(PARAM_INVALID, "[Parse][Dims] from getnext nosink failed, when data coexist with getnext nosink");
  555. return PARAM_INVALID;
  556. }
  557. }
  558. }
  559. }
  560. GELOGI("Parse %zu inputs dims success.", GetLocalOmeContext().user_real_input_dims.size());
  561. return SUCCESS;
  562. }
  563. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示