You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

graph_manager.h 15 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_GRAPH_MANAGER_GRAPH_MANAGER_H_
  17. #define GE_GRAPH_MANAGER_GRAPH_MANAGER_H_
  18. #include <iostream>
  19. #include <map>
  20. #include <memory>
  21. #include <set>
  22. #include <string>
  23. #include <thread>
  24. #include <vector>
  25. #include "common/blocking_queue.h"
  26. #include "common/ge_inner_error_codes.h"
  27. #include "common/helper/model_cache_helper.h"
  28. #include "external/graph/types.h"
  29. #include "ge/ge_api_types.h"
  30. #include "graph/build/graph_builder.h"
  31. #include "graph/execute/graph_execute.h"
  32. #include "graph/ge_local_context.h"
  33. #include "graph/load/graph_loader.h"
  34. #include "graph/manager/graph_manager_utils.h"
  35. #include "graph/manager/util/variable_accelerate_ctrl.h"
  36. #include "graph/optimize/graph_optimize.h"
  37. #include "graph/partition/graph_partition.h"
  38. #include "graph/preprocess/graph_preprocess.h"
  39. #include "graph/tuning_utils.h"
  40. #include "model/ge_model.h"
  41. namespace ge {
  42. class GraphManager {
  43. public:
  44. GraphManager(OmgContext &omg_context);
  45. ~GraphManager() = default;
  46. ///
  47. /// @ingroup ge_graph
  48. /// @brief graph manager init
  49. /// @param [in] options user config params
  50. /// @return Status result of function
  51. ///
  52. Status Initialize(const std::map<string, string> &options);
  53. ///
  54. /// @ingroup ge_graph
  55. /// @brief graph manager finalize
  56. /// @return Status result of function
  57. ///
  58. Status Finalize();
  59. ///
  60. /// @ingroup ge_graph
  61. /// @brief add specific graph
  62. /// @param [in] graph_id graph id
  63. /// @param [out] Graph output graph
  64. /// @return Status result of function
  65. ///
  66. Status AddGraph(const GraphId &graph_id, const Graph &graph, const std::map<std::string, std::string> &options);
  67. ///
  68. /// @ingroup ge_graph
  69. /// @brief remove specific graph
  70. /// @param [in] graph_id graph id
  71. /// @return Status result of function
  72. ///
  73. Status RemoveGraph(const GraphId &graph_id);
  74. ///
  75. /// @ingroup ge_graph
  76. /// @brief run specific graph
  77. /// @param [in] graph_id graph id
  78. /// @param [in] inputs input data
  79. /// @param [out] outputs output data
  80. /// @return Status result of function
  81. ///
  82. Status RunGraph(const GraphId &graph_id, const std::vector<GeTensor> &inputs, std::vector<GeTensor> &outputs,
  83. uint64_t session_id = INVALID_SESSION_ID);
  84. ///
  85. /// @ingroup ge_graph
  86. /// @brief build specific graph
  87. /// @param [in] graph_id graph id
  88. /// @param [in] inputs input data
  89. /// @param [out] models build result
  90. /// @return Status result of function
  91. ///
  92. ge::Status BuildGraph(const GraphId &graph_id, const std::vector<GeTensor> &inputs, GeRootModelPtr &models,
  93. uint64_t session_id = 0, bool async = false);
  94. Status BuildGraphForUnregisteredOp(const GraphId &graph_id, const std::vector<GeTensor> &inputs,
  95. GeRootModelPtr &ge_root_model, uint64_t session_id);
  96. ///
  97. /// @ingroup ge_graph
  98. /// @brief Save extra attribute to Model
  99. /// @param [in] model: Model attribues will save to.
  100. /// @param [in] type: type of OpDesc.
  101. /// @param [in] attrs: attributes of OpDesc
  102. /// @param [in] inputs: input tensor
  103. /// @param [in] outputs: output tensor
  104. /// @return: Status
  105. ///
  106. Status SaveParams(ge::GeModel &model, const std::string &type, const std::map<string, GeAttrValue> &attrs,
  107. const std::vector<GeTensor> &inputs, const std::vector<GeTensor> &outputs);
  108. ///
  109. /// @ingroup ge_graph
  110. /// @brief get variable value from the session with specific session id
  111. /// @param [in] sessionId session id
  112. /// @param [in] name op name
  113. /// @param [out] val out value tensor
  114. /// @return Status result of function
  115. ///
  116. Status GetVariable(const std::string &name, Tensor &val);
  117. ///
  118. /// @ingroup ge_graph
  119. /// @brief run graph async on session with specific session id
  120. /// @param [in] graph_id graph id
  121. /// @param [in] inputs input data
  122. /// @param [out] callback: callback while run graph async finish
  123. /// @return Status result of function
  124. ///
  125. Status RunGraphAsync(const GraphId &graph_id, const std::vector<ge::InputTensorInfo> &inputs, uint64_t session_id,
  126. RunAsyncCallback callback);
  127. ///
  128. /// @ingroup ge_graph
  129. /// @brief me register the callback function to get the result of summary or checkpoin
  130. /// @param [in] key: summary or checkpoint
  131. /// @param [in] callbak: The real callback object of me
  132. /// @return Status result of function
  133. ///
  134. Status RegisterCallBackFunc(
  135. const std::string &key, const std::function<Status(uint32_t, const std::map<std::string, ge::Tensor> &)> &callback);
  136. const bool GetTrainFlag() const { return options_.train_graph_flag; }
  137. bool IsGraphNeedRebuild(uint32_t graph_id);
  138. Status GenerateInfershapeGraph(GraphId &graph_id);
  139. const std::map<std::string, std::string> *GetGraphOptions(uint32_t graph_id);
  140. void SetOptionsRunGraphFlag(bool run_graph_flag);
  141. Status GenCheckPointGraph(const std::map<std::string, GeTensorDesc> &all_variables, Graph &graph);
  142. Status SaveVariables(const Graph &graph, const std::vector<std::string> &var_names,
  143. const std::vector<Tensor> &outputs, std::vector<Tensor> &var_values);
  144. Status SaveCheckPointResult(const Graph &graph, const std::vector<Tensor> &outputs, map<string, Tensor> &var_results);
  145. private:
  146. struct PreRunArgs {
  147. GraphId graph_id;
  148. std::vector<ge::InputTensorInfo> input_tensor;
  149. uint64_t session_id;
  150. GEThreadLocalContext context;
  151. RunAsyncCallback callback;
  152. };
  153. struct RunArgs {
  154. GraphNodePtr graph_node;
  155. GraphId graph_id;
  156. std::vector<ge::InputTensorInfo> input_tensor;
  157. GeRootModelPtr ge_root_model;
  158. GEThreadLocalContext context;
  159. RunAsyncCallback callback;
  160. };
  161. Status GetGraphNode(const GraphId &graph_id, GraphNodePtr &out);
  162. std::shared_ptr<GraphModelListener> GetModelListener() const { return graph_run_listener_; }
  163. static Status ProcessSubGraphWithMultiThreads(GraphManager *graph_manager, const SubGraphInfoPtr &sub_graph_info_ptr,
  164. uint64_t session_id, const GEThreadLocalContext &ge_context);
  165. Status PreRun(const GraphNodePtr &graph_node, const std::vector<GeTensor> &inputs, GeRootModelPtr &ge_root_model,
  166. uint64_t session_id = INVALID_SESSION_ID);
  167. Status OptimizeSubgraph(const GraphNodePtr &graph_node, ComputeGraphPtr &compute_graph, uint64_t session_id);
  168. Status Build(const GraphNodePtr &graph_node, ComputeGraphPtr &compute_graph, GeRootModelPtr &ge_root_model,
  169. uint64_t session_id);
  170. Status StartForRunGraph(const GraphNodePtr &graph_node, const std::vector<GeTensor> &inputs,
  171. GeRootModelPtr &ge_root_model, uint64_t session_id = INVALID_SESSION_ID);
  172. Status InnerRunGraph(GraphNodePtr &graph_node, const GraphId &graph_id, const std::vector<GeTensor> &inputs,
  173. std::vector<GeTensor> &outputs);
  174. Status ParseOptions(const std::map<std::string, std::string> &options);
  175. static void ParseOption(const std::map<std::string, std::string> &options, const std::string &key,
  176. std::string &option);
  177. static Status ParseOption(const std::map<std::string, std::string> &options, const std::string &key, bool &option);
  178. static Status ParseOption(const std::map<std::string, std::string> &options, const std::string &key, int &option);
  179. static Status ParseOption(const std::map<std::string, std::string> &options, const std::string &key,
  180. std::map<std::string, int> &option);
  181. static void Trim(std::string &str);
  182. static Status CheckEngineName(const std::string &engine_name, const std::string &key,
  183. const std::map<std::string, int> &option);
  184. static Status ParseParallelNum(const std::string &parallel_num, const std::string &key, int &num);
  185. static Status ParseTrainGraphFlag(bool &options, bool &option);
  186. static bool IsPerfLevelInvalid(int32_t perf_level);
  187. Status SummaryHandle(const GraphId &graph_id, std::vector<GeTensor> &outputs);
  188. Status CheckpointHandle(const GraphId &graph_id, const ComputeGraphPtr &compute_graph,
  189. const std::vector<GeTensor> &outputs);
  190. // call the callback function of ME to push summary result data to ME
  191. Status PushSummaryData2ME(const GraphId &graph_id, const std::map<std::string, ge::Tensor> &summary_data);
  192. // call the callback function of ME to push save result data to ME
  193. Status PushSaveData2ME(const GraphId &graph_id, const std::map<std::string, ge::Tensor> &save_data);
  194. bool IsCheckpointGraph(ComputeGraphPtr &compute_graph);
  195. bool CheckNetOutputForCheckpointGraph(NodePtr &node);
  196. bool CheckVariableForCheckpointGraph(NodePtr &node);
  197. bool CheckTransOpForCheckpointGraph(NodePtr &node);
  198. Status MergeSubGraph(ComputeGraphPtr &compute_graph, const ge::ComputeGraphPtr &original_compute_graph);
  199. Status ConvertGraphToFile(ComputeGraphPtr &compute_graph, std::string file_path, bool exe_flag = false);
  200. Status SetSubgraph(uint64_t session_id, ComputeGraphPtr compute_graph);
  201. void SetAttrForHcomBroadCastOp(ge::ComputeGraphPtr &compute_graph);
  202. bool IsBroadCastOpData(const ge::NodePtr &var_node);
  203. void AdjustBroadCastOpData(const ge::NodePtr &var_node);
  204. bool IsAssignOpData(const ge::NodePtr &var_node);
  205. void AdjustAssignOpData(const ge::NodePtr &var_node);
  206. bool ConfirmUseOpAndIndexByAnchor(const ge::InDataAnchorPtr &in_anchor, const map<string, std::set<int>> &confirm_ops,
  207. ge::NodePtr &use_node);
  208. bool ConfirmUseOpAndIndexByNode(const ge::NodePtr &var_node, const map<string, std::set<int>> &confirm_ops,
  209. ge::NodePtr &use_node);
  210. // graph context
  211. std::shared_ptr<GraphContext> GetGraphContext() const { return graph_context_; }
  212. Status RemoveIsolatedConst(ge::ComputeGraphPtr &compute_graph);
  213. Status RemoveIsolatedConstInThisGraph(ge::ComputeGraphPtr &compute_graph);
  214. Status OptimizeStage1(ComputeGraphPtr &compute_graph);
  215. Status OptimizeStage2(ComputeGraphPtr &compute_graph);
  216. Status SubexpressionMigration(ComputeGraphPtr &compute_graph);
  217. Status LoadGraphAsync(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node);
  218. Status CheckAndReleaseMemory(const GeModelPtr &ge_model, const GraphNodePtr &graph_node);
  219. bool CheckModelLoad(const GeRootModelPtr &ge_model, bool load_flag);
  220. Status LoadGraph(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node);
  221. bool IsGraphNeedBuild(const GraphNodePtr &graph_node);
  222. Status LoadFromCache(const GraphNodePtr &graph_node, const ModelCacheHelperPtr &cache_helper, GeModelPtr &ge_model);
  223. Status SaveCacheBeforeBuild(uint32_t graph_id, const ModelCacheHelperPtr &cache_helper);
  224. Status SaveCacheAfterBuild(uint32_t graph_id, ComputeGraphPtr graph, GeModelPtr &ge_model);
  225. void AddModelCacheHelperToMap(const GraphId &graph_id, uint64_t session_id, ComputeGraphPtr &compute_graph);
  226. Status IncreBuild(const GraphNodePtr &graph_node, GeModelPtr &ge_model);
  227. void RemoveModelCacheHelper(const GraphId &graph_id);
  228. static void ConstructGeInput(std::vector<ge::GeTensor> &ge_inputs, PreRunArgs &args);
  229. static void PreRunThread(GraphManager *graph_manager);
  230. static void RunThread(GraphManager *graph_manager);
  231. static void StopQueue(GraphManager *graph_manager);
  232. static void ReturnError(GraphManager *graph_manager, RunAsyncCallback callback, Status ret, const string &log);
  233. static void ReturnError(GraphManager *graph_manager, GraphNodePtr &graph_node, RunAsyncCallback callback, Status ret,
  234. const string &log);
  235. void ChangeConstTypeWhenTraining(const ComputeGraphPtr &compute_graph);
  236. Status PreRunOptimizeOriginalGraph(const GraphNodePtr &graph_node, const std::vector<GeTensor> &inputs,
  237. ge::ComputeGraphPtr &compute_graph, uint64_t session_id);
  238. Status PreRunOptimizeSubGraph(const GraphNodePtr &graph_node, ge::ComputeGraphPtr &compute_graph,
  239. uint64_t session_id);
  240. Status PreRunAfterOptimizeSubGraph(const GraphNodePtr &graph_node, ComputeGraphPtr &compute_graph,
  241. GeRootModelPtr &ge_root_model, uint64_t session_id);
  242. Status CopySubGraphAndMarkFusion(const ComputeGraphPtr &compute_graph, Graph2SubGraphInfoList &sub_graph_map,
  243. std::unordered_map<std::string, ComputeGraphPtr> &copy_graphs);
  244. Status OptimizeSubGraphWithMultiThreads(ComputeGraphPtr compute_graph, Graph2SubGraphInfoList &sub_graph_map,
  245. uint64_t session_id);
  246. bool CheckAllFusionOptimizeSuccess(const ComputeGraphPtr &compute_graph, Graph2SubGraphInfoList &sub_graph_map);
  247. Status ReplaceSubgraphWithOriGraph(const ComputeGraphPtr &compute_graph, Graph2SubGraphInfoList &sub_graph_map,
  248. std::unordered_map<std::string, ComputeGraphPtr> &copy_graphs);
  249. Status SetRtContext(rtContext_t rt_context, rtCtxMode_t mode, uint64_t session_id, uint32_t graph_id);
  250. std::atomic_bool thread_run_flag_;
  251. BlockingQueue<PreRunArgs> prerun_args_q_{};
  252. BlockingQueue<RunArgs> run_args_q_{};
  253. std::thread prerun_thread_;
  254. std::thread run_thread_;
  255. std::map<GraphId, GraphNodePtr> graph_map_;
  256. std::map<GraphId, ModelCacheHelperPtr> cache_helper_map_;
  257. // for run graph synchronous return
  258. std::mutex sync_run_mutex_;
  259. std::condition_variable condition_;
  260. // run graph synchronization call back listener
  261. std::shared_ptr<GraphModelListener> graph_run_listener_;
  262. // summary and checkpoint callback function list for ME, key is summary or checkpoint
  263. std::map<std::string, std::function<Status(uint32_t, const std::map<std::string, ge::Tensor> &)>> me_callback_map_;
  264. bool init_flag_;
  265. GraphManagerOptions options_;
  266. OmgContext &omg_context_;
  267. GraphPrepare graph_preparer_;
  268. GraphOptimize graph_optimize_;
  269. GraphPartitioner graph_partitioner_;
  270. GraphBuilder graph_builder_;
  271. GraphLoader graph_loader_;
  272. GraphExecutor graph_executor_;
  273. GraphContextPtr graph_context_ = nullptr;
  274. VarAccelerateCtrl var_acc_ctrl_;
  275. std::mutex run_mutex_;
  276. };
  277. } // namespace ge
  278. #endif // GE_GRAPH_MANAGER_GRAPH_MANAGER_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示