You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

inner_session.cc 13 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "session/inner_session.h"
  17. #include <map>
  18. #include <memory>
  19. #include <vector>
  20. #include "analyzer/analyzer.h"
  21. #include "adx_datadump_server.h"
  22. #include "common/dump/dump_properties.h"
  23. #include "common/util.h"
  24. #include "framework/common/debug/ge_log.h"
  25. #include "graph/ge_context.h"
  26. #include "graph/ge_global_options.h"
  27. #include "graph/ge_local_context.h"
  28. #include "graph/load/new_model_manager/model_manager.h"
  29. #include "graph/manager/graph_var_manager.h"
  30. #include "graph/utils/tensor_adapter.h"
  31. #include "runtime/mem.h"
  32. namespace ge {
  33. namespace {
  34. const int32_t kDumpStatus = 0;
  35. Status CheckReuseMemoryOption(const std::map<string, string> &options) {
  36. auto iter = options.find(OPTION_EXEC_DISABLE_REUSED_MEMORY);
  37. if (iter != options.end()) {
  38. if (iter->second == "0") {
  39. GELOGD("%s=0, reuse memory is open", OPTION_EXEC_DISABLE_REUSED_MEMORY);
  40. } else if (iter->second == "1") {
  41. GELOGD("%s=1, reuse memory is close", OPTION_EXEC_DISABLE_REUSED_MEMORY);
  42. } else {
  43. GELOGE(PARAM_INVALID, "option %s=%s is invalid", OPTION_EXEC_DISABLE_REUSED_MEMORY, iter->second.c_str());
  44. return FAILED;
  45. }
  46. }
  47. return SUCCESS;
  48. }
  49. }
  50. static std::mutex mutex_; // BuildGraph and RunGraph use
  51. bool InnerSession::is_dump_server_inited_ = false;
  52. InnerSession::InnerSession(uint64_t session_id, const std::map<string, string> &options)
  53. : init_flag_(false), session_id_(session_id), options_(options), graph_manager_(domi::GetContext()) {}
  54. Status InnerSession::Initialize() {
  55. if (init_flag_) {
  56. GELOGW("[InnerSession:%lu] session already initialize.", session_id_);
  57. return SUCCESS;
  58. }
  59. // If the global options and the session options are duplicated, the session options is preferred.
  60. auto all_options = options_;
  61. all_options.insert(GetMutableGlobalOptions().begin(), GetMutableGlobalOptions().end());
  62. Status ret = CheckReuseMemoryOption(all_options);
  63. if (ret != SUCCESS) {
  64. GELOGE(ret, "[InnerSession:%lu] check reuse memory option failed.", session_id_);
  65. return ret;
  66. }
  67. UpdateThreadContext(std::map<std::string, std::string>{});
  68. GE_CHK_RT_RET(rtSetDevice(GetContext().DeviceId()));
  69. DumpProperties dump_properties;
  70. dump_properties.InitByOptions();
  71. GE_CHK_STATUS_RET(AddDumpProperties(dump_properties), "Add dump properties failed");
  72. ret = graph_manager_.Initialize(options_);
  73. if (ret != SUCCESS) {
  74. GELOGE(ret, "[InnerSession:%lu] initialize failed.", session_id_);
  75. GE_CHK_STATUS(RemoveDumpProperties(), "Remove dump properties failed");
  76. return ret;
  77. }
  78. ret = VarManager::Instance(session_id_)->SetMemoryMallocSize(all_options);
  79. if (ret != SUCCESS) {
  80. GELOGE(ret, "failed to set malloc size");
  81. (void)graph_manager_.Finalize();
  82. GE_CHK_STATUS(RemoveDumpProperties(), "Remove dump properties failed");
  83. GE_CHK_RT(rtDeviceReset(static_cast<int32_t>(GetContext().DeviceId())));
  84. return ret;
  85. }
  86. int32_t version = static_cast<int32_t>(SessionVersion::ClOUD_VERSION);
  87. const int DEFAULT_DEVICE_ID = 0;
  88. const int DEFAULT_JOB_ID = 0;
  89. ret = VarManager::Instance(session_id_)->Init(version, session_id_, DEFAULT_DEVICE_ID, DEFAULT_JOB_ID);
  90. if (ret != SUCCESS) {
  91. GELOGE(ret, "failed to init session instance");
  92. GE_CHK_STATUS(RemoveDumpProperties(), "Remove dump properties failed");
  93. }
  94. init_flag_ = true;
  95. return SUCCESS;
  96. }
  97. Status InnerSession::Finalize() {
  98. std::lock_guard<std::mutex> lock(resource_mutex_);
  99. if (!init_flag_) {
  100. GELOGW("[InnerSession:%lu] session does not initialize.", session_id_);
  101. return SUCCESS;
  102. }
  103. UpdateThreadContext(std::map<std::string, std::string>{});
  104. Status ret = graph_manager_.Finalize();
  105. if (ret != SUCCESS) {
  106. // Subsequent code execution is required, so no return is required
  107. GELOGE(ret, "[InnerSession:%lu] finalize failed.", session_id_);
  108. }
  109. ModelManager::GetInstance()->DestroyAicpuSession(session_id_);
  110. init_flag_ = false;
  111. // release var memory
  112. GELOGI("VarManager free var memory.");
  113. (void)VarManager::Instance(session_id_)->FreeVarMemory();
  114. // release analyzer saved info(Session Level)
  115. Analyzer::GetInstance()->DestroySessionJsonObject(session_id_);
  116. GE_CHK_RT(rtDeviceReset(static_cast<int32_t>(GetContext().DeviceId())));
  117. GE_CHK_STATUS_RET(RemoveDumpProperties(), "Remove dump properties failed");
  118. return ret;
  119. }
  120. Status InnerSession::GetVariable(const std::string &name, Tensor &val) {
  121. UpdateThreadContext(std::map<std::string, std::string>{});
  122. return graph_manager_.GetVariable(name, val);
  123. }
  124. Status InnerSession::AddGraph(uint32_t graph_id, const Graph &graph) {
  125. std::map<std::string, std::string> options;
  126. return AddGraph(graph_id, graph, options);
  127. }
  128. Status InnerSession::AddGraph(uint32_t graph_id, const Graph &graph,
  129. const std::map<std::string, std::string> &options) {
  130. std::lock_guard<std::mutex> lock(resource_mutex_);
  131. if (!init_flag_) {
  132. GELOGE(GE_SESS_INIT_FAILED, "[InnerSession:%lu] initialize failed.", session_id_);
  133. return GE_SESS_INIT_FAILED;
  134. }
  135. UpdateThreadContext(options);
  136. Status ret = graph_manager_.AddGraph(graph_id, graph, options);
  137. if (ret != SUCCESS) {
  138. GELOGE(ret, "[InnerSession:%lu] add graph %u failed.", session_id_, graph_id);
  139. return ret;
  140. }
  141. GELOGI("[InnerSession:%lu] add graph success, graph_id=%u.", session_id_, graph_id);
  142. return SUCCESS;
  143. }
  144. Status InnerSession::RunGraph(uint32_t graph_id, const std::vector<Tensor> &inputs, std::vector<Tensor> &outputs) {
  145. GELOGI("[InnerSession:%lu] run graph on session, graph_id=%u.", session_id_, graph_id);
  146. if (mutex_.try_lock()) {
  147. std::lock_guard<std::mutex> lock(mutex_, std::adopt_lock);
  148. if (!init_flag_) {
  149. GELOGE(GE_SESS_INIT_FAILED, "[InnerSession:%lu] initialize failed.", session_id_);
  150. return GE_SESS_INIT_FAILED;
  151. }
  152. UpdateThreadContext(graph_id);
  153. vector<GeTensor> geInputs;
  154. for (auto &item : inputs) {
  155. geInputs.push_back(TensorAdapter::AsGeTensor(item));
  156. }
  157. vector<GeTensor> geOutputs;
  158. Status ret = graph_manager_.RunGraph(graph_id, geInputs, geOutputs, session_id_);
  159. domi::GetContext().out_nodes_map.clear();
  160. domi::GetContext().user_out_nodes.clear();
  161. if (ret != SUCCESS) {
  162. GELOGE(ret, "[InnerSession:%lu] run graph failed, graph_id=%u.", session_id_, graph_id);
  163. return ret;
  164. }
  165. outputs.clear();
  166. for (auto &item : geOutputs) {
  167. outputs.push_back(TensorAdapter::AsTensor(item));
  168. }
  169. GELOGI("[InnerSession:%lu] run graph success, graph_id=%u.", session_id_, graph_id);
  170. return SUCCESS;
  171. } else {
  172. GELOGE(GE_SESS_ALREADY_RUNNING, "[InnerSession:%lu] run graph failed, graph_id=%u.", session_id_, graph_id);
  173. return GE_SESS_ALREADY_RUNNING;
  174. }
  175. }
  176. Status InnerSession::RemoveGraph(uint32_t graph_id) {
  177. std::lock_guard<std::mutex> lock(resource_mutex_);
  178. if (!init_flag_) {
  179. GELOGE(GE_SESS_INIT_FAILED, "[InnerSession:%lu] initialize failed.", session_id_);
  180. return GE_SESS_INIT_FAILED;
  181. }
  182. UpdateThreadContext(graph_id);
  183. Status ret = graph_manager_.RemoveGraph(graph_id);
  184. if (ret != SUCCESS) {
  185. GELOGE(ret, "[InnerSession:%lu] remove graph failed, graph_id=%u.", session_id_, graph_id);
  186. return ret;
  187. }
  188. GELOGI("[InnerSession:%lu] remove graph success, graph_id=%u.", session_id_, graph_id);
  189. return SUCCESS;
  190. }
  191. Status InnerSession::RegisterCallBackFunc(
  192. const std::string &key,
  193. const std::function<Status(uint32_t, const std::map<std::string, ge::Tensor> &)> &callback) {
  194. std::lock_guard<std::mutex> lock(resource_mutex_);
  195. if (!init_flag_) {
  196. GELOGE(GE_SESS_INIT_FAILED, "[InnerSession:%lu] initialize failed.", session_id_);
  197. return GE_SESS_INIT_FAILED;
  198. }
  199. UpdateThreadContext(std::map<std::string, std::string>{});
  200. Status ret = graph_manager_.RegisterCallBackFunc(key, callback);
  201. if (ret != SUCCESS) {
  202. GELOGE(ret, "[InnerSession:%lu] register %s callback function failed.", session_id_, key.c_str());
  203. return ret;
  204. }
  205. GELOGI("[InnerSession:%lu] register %s callback function success.", session_id_, key.c_str());
  206. return SUCCESS;
  207. }
  208. Status InnerSession::BuildGraph(uint32_t graph_id, const std::vector<InputTensorInfo> &inputs) {
  209. UpdateThreadContext(graph_id);
  210. GELOGI("[InnerSession:%lu] build graph on session, graph_id=%u.", session_id_, graph_id);
  211. std::vector<ge::GeTensor> ge_inputs;
  212. for (auto const &input : inputs) {
  213. std::vector<int64_t> input_dims;
  214. std::transform(input.dims.begin(), input.dims.end(), std::back_inserter(input_dims),
  215. [](int64_t x) -> int64_t { return x; });
  216. GeShape input_shape(input_dims);
  217. GeTensorDesc input_tensor_desc;
  218. input_tensor_desc.SetShape(input_shape);
  219. input_tensor_desc.SetDataType(static_cast<ge::DataType>(input.data_type));
  220. ge_inputs.emplace_back(input_tensor_desc);
  221. }
  222. GeRootModelPtr ge_root_model = nullptr;
  223. Status ret = graph_manager_.BuildGraph(graph_id, ge_inputs, ge_root_model, session_id_, true);
  224. if (ret != SUCCESS) {
  225. GELOGE(ret, "[InnerSession:%lu] build graph failed, graph_id=%u.", session_id_, graph_id);
  226. return ret;
  227. }
  228. GELOGI("[InnerSession:%lu] build graph success, graph_id=%u.", session_id_, graph_id);
  229. return ret;
  230. }
  231. Status InnerSession::RunGraphAsync(uint32_t graph_id, const std::vector<InputTensorInfo> &inputs,
  232. RunAsyncCallback callback) {
  233. UpdateThreadContext(graph_id);
  234. GELOGI("[InnerSession:%lu] run graph on session, graph_id=%u.", session_id_, graph_id);
  235. Status ret = graph_manager_.RunGraphAsync(graph_id, inputs, session_id_, callback);
  236. if (ret != SUCCESS) {
  237. GELOGE(ret, "[InnerSession:%lu] run graph failed, graph_id=%u.", session_id_, graph_id);
  238. return ret;
  239. }
  240. GELOGI("[InnerSession:%lu] run graph success, graph_id=%u.", session_id_, graph_id);
  241. return ret;
  242. }
  243. const GraphManager &InnerSession::getGraphManagerObj() const { return graph_manager_; }
  244. void InnerSession::UpdateThreadContext(const std::map<std::string, std::string> &options) {
  245. GetThreadLocalContext().SetGlobalOption(GetMutableGlobalOptions());
  246. GetThreadLocalContext().SetSessionOption(options_);
  247. GetThreadLocalContext().SetGraphOption(options);
  248. GetContext().SetSessionId(session_id_);
  249. }
  250. void InnerSession::UpdateThreadContext(uint32_t graph_id) {
  251. auto options = graph_manager_.GetGraphOptions(graph_id);
  252. if (options == nullptr) {
  253. GELOGW("graph level options is null.");
  254. UpdateThreadContext(std::map<std::string, std::string>{});
  255. } else {
  256. UpdateThreadContext(*options);
  257. }
  258. }
  259. bool InnerSession::IsGraphNeedRebuild(uint32_t graph_id) {
  260. UpdateThreadContext(graph_id);
  261. return graph_manager_.IsGraphNeedRebuild(graph_id);
  262. }
  263. Status InnerSession::GetAllVariables(std::map<std::string, GeTensorDesc> &all_variables) {
  264. return VarManager::Instance(session_id_)->GetAllVariables(all_variables);
  265. }
  266. Status InnerSession::GenCheckPointGraph(const std::map<std::string, GeTensorDesc> &all_variables, Graph &graph) {
  267. return graph_manager_.GenCheckPointGraph(all_variables, graph);
  268. }
  269. Status InnerSession::SaveVariables(const Graph &graph, const std::vector<std::string> &var_names,
  270. const std::vector<Tensor> &outputs, std::vector<Tensor> &var_values) {
  271. return graph_manager_.SaveVariables(graph, var_names, outputs, var_values);
  272. }
  273. Status InnerSession::AddDumpProperties(const DumpProperties &dump_properties) {
  274. if (!is_dump_server_inited_) {
  275. if (dump_properties.IsDumpOpen() || dump_properties.IsOpDebugOpen()) {
  276. GE_IF_BOOL_EXEC(AdxDataDumpServerInit() != kDumpStatus, GELOGE(PARAM_INVALID, "Data dump server init failed");
  277. return PARAM_INVALID)
  278. GELOGI("Init adx data dump server success");
  279. is_dump_server_inited_ = true;
  280. }
  281. }
  282. PropertiesManager::Instance().AddDumpProperties(session_id_, dump_properties);
  283. return SUCCESS;
  284. }
  285. Status InnerSession::RemoveDumpProperties() {
  286. PropertiesManager::Instance().RemoveDumpProperties(session_id_);
  287. if (is_dump_server_inited_ && PropertiesManager::Instance().GetDumpPropertiesMap().empty()) {
  288. GE_IF_BOOL_EXEC(AdxDataDumpServerUnInit() != kDumpStatus, GELOGE(PARAM_INVALID, "Data dump server uninit failed");
  289. return PARAM_INVALID)
  290. GELOGI("UnInit adx data dump server success");
  291. is_dump_server_inited_ = false;
  292. }
  293. return SUCCESS;
  294. }
  295. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示