You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

model_utils.cc 27 kB

5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/load/model_manager/model_utils.h"
  17. #include <string>
  18. #include "common/debug/log.h"
  19. #include "common/op/ge_op_utils.h"
  20. #include "graph/utils/tensor_utils.h"
  21. #include "graph/manager/graph_var_manager.h"
  22. #include "graph/types.h"
  23. #include "graph/build/memory/block_mem_assigner.h"
  24. #define VALIDATE_MEM_RANGE(OP, SIZE, OFFSET) \
  25. do { \
  26. if (SIZE <= static_cast<uint64_t>(OFFSET)) { \
  27. REPORT_INNER_ERROR("E19999", "Node:%s(%s) offset:%ld out of range size:%lu, check invalid", \
  28. OP->GetName().c_str(), OP->GetType().c_str(), OFFSET, SIZE); \
  29. GELOGE(OUT_OF_MEMORY, "[Check][Param]Node: %s, memory out of range[%lu: %ld]", \
  30. OP->GetName().c_str(), SIZE, OFFSET); \
  31. return {}; \
  32. } \
  33. } while (0)
  34. namespace ge {
  35. ///
  36. /// @ingroup ge
  37. /// @brief Get input size.
  38. /// @return vector<int64_t>
  39. ///
  40. vector<int64_t> ModelUtils::GetInputSize(ConstOpDescPtr op_desc) {
  41. vector<int64_t> v_input_size;
  42. GE_CHECK_NOTNULL_EXEC(op_desc, return v_input_size);
  43. const size_t inputs_size = op_desc->GetAllInputsSize();
  44. for (size_t i = 0; i < inputs_size; ++i) {
  45. const GeTensorDescPtr tensor_desc = op_desc->MutableInputDesc(i);
  46. if (tensor_desc == nullptr) {
  47. GELOGW("Op: %s, Index: %zu, Tensor Desc is null", op_desc->GetName().c_str(), i);
  48. continue;
  49. }
  50. int64_t tensor_size = 0;
  51. GE_IF_BOOL_EXEC(
  52. TensorUtils::GetSize(*tensor_desc, tensor_size) != GRAPH_SUCCESS,
  53. GELOGI("Get size from TensorDesc failed, op : %s, input index : %zu", op_desc->GetName().c_str(), i);
  54. continue);
  55. GELOGI("GetInputSize op: %s, index: %zu, size:%ld", op_desc->GetName().c_str(), i, tensor_size);
  56. v_input_size.push_back(tensor_size);
  57. }
  58. return v_input_size;
  59. }
  60. ///
  61. /// @ingroup ge
  62. /// @brief Get output size.
  63. /// @return vector<int64_t>
  64. ///
  65. vector<int64_t> ModelUtils::GetOutputSize(ConstOpDescPtr op_desc) {
  66. vector<int64_t> v_output_size;
  67. GE_CHECK_NOTNULL_EXEC(op_desc, return v_output_size);
  68. const size_t outputs_size = op_desc->GetOutputsSize();
  69. const vector<int64_t> v_output_offset = op_desc->GetOutputOffset();
  70. GE_IF_BOOL_EXEC(v_output_offset.size() != outputs_size,
  71. GELOGW("Output param invalid: output_offset=%zu, outputs=%zu.", v_output_offset.size(), outputs_size);
  72. return v_output_size;);
  73. for (size_t i = 0; i < outputs_size; ++i) {
  74. const GeTensorDescPtr tensor_desc = op_desc->MutableOutputDesc(i);
  75. if (tensor_desc == nullptr) {
  76. GELOGW("Op: %s, Index: %zu, Tensor Desc is null", op_desc->GetName().c_str(), i);
  77. continue;
  78. }
  79. int64_t tensor_size = 0;
  80. GE_IF_BOOL_EXEC(
  81. TensorUtils::GetSize(*tensor_desc, tensor_size) != GRAPH_SUCCESS,
  82. GELOGI("Get size from TensorDesc failed, op : %s, output index : %zu", op_desc->GetName().c_str(), i);
  83. continue);
  84. GELOGI("GetOutputSize op: %s, index: %zu, size:%ld", op_desc->GetName().c_str(), i, tensor_size);
  85. v_output_size.push_back(tensor_size);
  86. }
  87. return v_output_size;
  88. }
  89. ///
  90. /// @ingroup ge
  91. /// @brief Get workspace size.
  92. /// @return vector<int64_t>
  93. ///
  94. vector<int64_t> ModelUtils::GetWorkspaceSize(ConstOpDescPtr op_desc) {
  95. vector<int64_t> v_workspace_size;
  96. GE_CHECK_NOTNULL_EXEC(op_desc, return v_workspace_size);
  97. const vector<int64_t> v_workspace_num = op_desc->GetWorkspace();
  98. const vector<int64_t> v_workspace_bytes = op_desc->GetWorkspaceBytes();
  99. if (v_workspace_num.size() != v_workspace_bytes.size()) {
  100. GELOGW("workspace_num[%zu]!= workspace_bytes[%zu]", v_workspace_num.size(), v_workspace_bytes.size());
  101. return v_workspace_size;
  102. }
  103. for (auto workspace_bytes : v_workspace_bytes) {
  104. v_workspace_size.push_back(workspace_bytes);
  105. }
  106. return v_workspace_size;
  107. }
  108. ///
  109. /// @ingroup ge
  110. /// @brief Get weight size.
  111. /// @return vector<int64_t>
  112. ///
  113. vector<int64_t> ModelUtils::GetWeightSize(ConstOpDescPtr op_desc) {
  114. vector<int64_t> v_weight_size;
  115. GE_CHECK_NOTNULL_EXEC(op_desc, return v_weight_size);
  116. // const op, get weight directly
  117. const string type_name = op_desc->GetType();
  118. if ((type_name == "Const") || (type_name == "Constant")) {
  119. ConstGeTensorPtr weight = nullptr;
  120. if (AttrUtils::GetTensor(*op_desc, ATTR_NAME_WEIGHTS, weight)) {
  121. v_weight_size.push_back(TensorUtils::GetWeightSize(weight));
  122. }
  123. return v_weight_size;
  124. }
  125. // other ops get weight from connected constop
  126. const size_t inputs_size = op_desc->GetAllInputsSize();
  127. const vector<bool> v_is_input_const = op_desc->GetIsInputConst();
  128. for (size_t i = 0; i < inputs_size; ++i) {
  129. if ((i < v_is_input_const.size()) && v_is_input_const[i]) {
  130. const GeTensorDescPtr tensor_desc = op_desc->MutableInputDesc(i);
  131. if (tensor_desc == nullptr) {
  132. GELOGW("Op: %s, Index: %zu, Tensor Desc is null", op_desc->GetName().c_str(), i);
  133. continue;
  134. }
  135. int64_t tensor_size = 0;
  136. (void)TensorUtils::GetSize(*tensor_desc, tensor_size);
  137. v_weight_size.push_back(tensor_size);
  138. }
  139. }
  140. return v_weight_size;
  141. }
  142. ///
  143. /// @ingroup ge
  144. /// @brief Get weights.
  145. /// @return vector<ConstGeTensorPtr>
  146. ///
  147. vector<ConstGeTensorPtr> ModelUtils::GetWeights(ConstOpDescPtr op_desc) {
  148. vector<ConstGeTensorPtr> v_weights;
  149. GE_CHECK_NOTNULL_EXEC(op_desc, return v_weights);
  150. // const op, get weight directly
  151. const string op_type = op_desc->GetType();
  152. if ((op_type == "Const") || (op_type == "Constant")) {
  153. ConstGeTensorPtr weight = nullptr;
  154. if (AttrUtils::GetTensor(*op_desc, ATTR_NAME_WEIGHTS, weight)) {
  155. v_weights.push_back(weight);
  156. }
  157. return v_weights;
  158. }
  159. // other ops get weight from connected constop
  160. const size_t inputs_size = op_desc->GetAllInputsSize();
  161. const vector<bool> v_is_input_const = op_desc->GetIsInputConst();
  162. for (size_t i = 0; i < inputs_size; ++i) {
  163. if ((i < v_is_input_const.size()) && v_is_input_const[i]) {
  164. const GeTensorDescPtr tensor_desc = op_desc->MutableInputDesc(i);
  165. if (tensor_desc == nullptr) {
  166. GELOGW("Op: %s, Index: %zu, Tensor Desc is null", op_desc->GetName().c_str(), i);
  167. continue;
  168. }
  169. ConstGeTensorPtr weight = nullptr;
  170. if (AttrUtils::GetTensor(*tensor_desc, ATTR_NAME_WEIGHTS, weight)) {
  171. v_weights.push_back(weight);
  172. }
  173. }
  174. }
  175. return v_weights;
  176. }
  177. ///
  178. /// @ingroup ge
  179. /// @brief Get AiCpuOp Input descriptor.
  180. /// @return vector<::tagCcAICPUTensor>
  181. ///
  182. vector<::tagCcAICPUTensor> ModelUtils::GetInputDescs(ConstOpDescPtr op_desc) {
  183. // AiCpuOp::GetInputDescs
  184. vector<::opTensor_t> v_input_descs;
  185. GE_CHECK_NOTNULL_EXEC(op_desc, return v_input_descs);
  186. const size_t inputs_size = op_desc->GetAllInputsSize();
  187. const vector<bool> v_is_input_const = op_desc->GetIsInputConst();
  188. for (size_t i = 0; i < inputs_size; ++i) {
  189. if ((i < v_is_input_const.size()) && v_is_input_const[i]) { // skip Const input node
  190. continue;
  191. }
  192. const GeTensorDescPtr tensor_desc = op_desc->MutableInputDesc(i);
  193. if (tensor_desc == nullptr) {
  194. GELOGW("Op: %s, Index: %zu, Tensor Desc is null", op_desc->GetName().c_str(), i);
  195. continue;
  196. }
  197. uint32_t dim_cnt = 0;
  198. GE_CHK_BOOL_EXEC_WARN(TensorUtils::GetRealDimCnt(*tensor_desc, dim_cnt) == GRAPH_SUCCESS, continue,
  199. "Get dim_cnt failed");
  200. opTensor_t tmp;
  201. uint32_t tmp_fmt = tensor_desc->GetFormat();
  202. tmp.format = tagOpTensorFormat(tmp_fmt);
  203. tmp.dim_cnt = static_cast<int32_t>(dim_cnt);
  204. uint32_t tmp_type = tensor_desc->GetDataType();
  205. tmp.data_type = tagOpDataType(tmp_type);
  206. for (int32_t j = 0; j < 4; j++) { // 4 dims
  207. tmp.dim[j] = (j < tmp.dim_cnt ? tensor_desc->GetShape().GetDim(j) : 1);
  208. }
  209. v_input_descs.push_back(tmp);
  210. }
  211. return v_input_descs;
  212. }
  213. ///
  214. /// @ingroup ge
  215. /// @brief Get AiCpuOp Output descriptor.
  216. /// @return vector<::tagCcAICPUTensor>
  217. ///
  218. vector<::tagCcAICPUTensor> ModelUtils::GetOutputDescs(ConstOpDescPtr op_desc) {
  219. // AiCpuOp::GetOutputDescs
  220. vector<::opTensor_t> v_output_descs;
  221. GE_CHECK_NOTNULL_EXEC(op_desc, return v_output_descs);
  222. // init op output opTensor_t struct
  223. const size_t output_num = op_desc->GetOutputsSize();
  224. for (size_t i = 0; i < output_num; ++i) {
  225. const GeTensorDescPtr tensor_desc = op_desc->MutableOutputDesc(i);
  226. if (tensor_desc == nullptr) {
  227. GELOGW("Op: %s, Index: %zu, Tensor Desc is null", op_desc->GetName().c_str(), i);
  228. continue;
  229. }
  230. uint32_t dim_cnt = 0;
  231. GE_CHK_BOOL_EXEC_WARN(TensorUtils::GetRealDimCnt(*tensor_desc, dim_cnt) == GRAPH_SUCCESS, continue,
  232. "Get dim_cnt failed");
  233. opTensor_t tmp;
  234. uint32_t tmp_fmt = tensor_desc->GetFormat();
  235. tmp.format = tagOpTensorFormat(tmp_fmt);
  236. tmp.dim_cnt = static_cast<int32_t>(dim_cnt);
  237. uint32_t tmp_type = tensor_desc->GetDataType();
  238. tmp.data_type = tagOpDataType(tmp_type);
  239. for (int32_t j = 0; j < 4; j++) { // 4 dims
  240. tmp.dim[j] = (j < tmp.dim_cnt ? tensor_desc->GetShape().GetDim(j) : 1);
  241. }
  242. v_output_descs.push_back(tmp);
  243. }
  244. return v_output_descs;
  245. }
  246. ///
  247. /// @ingroup ge
  248. /// @brief Get input data address.
  249. /// @return vector<void*>
  250. ///
  251. vector<void *> ModelUtils::GetInputDataAddrs(const RuntimeParam &model_param, ConstOpDescPtr op_desc) {
  252. vector<void *> v_input_data_addr; // init as:buf_base + op_def_->input(i));
  253. GE_CHECK_NOTNULL_EXEC(op_desc, return v_input_data_addr);
  254. uint64_t session_id = model_param.session_id;
  255. const size_t inputs_size = op_desc->GetInputsSize();
  256. const vector<int64_t> v_input_offset = op_desc->GetInputOffset();
  257. const string op_type = op_desc->GetType();
  258. size_t non_const_index = 0;
  259. const vector<bool> v_is_input_const = op_desc->GetIsInputConst();
  260. vector<int64_t> v_memory_type;
  261. bool has_mem_type_attr = ge::AttrUtils::GetListInt(op_desc, ATTR_NAME_INPUT_MEM_TYPE_LIST, v_memory_type);
  262. if (has_mem_type_attr && (v_memory_type.size() != inputs_size)) {
  263. REPORT_INNER_ERROR("E19999", "Attr:%s, memory_type.size:%zu != input_desc.size:%zu, op:%s(%s), check invalid",
  264. ATTR_NAME_INPUT_MEM_TYPE_LIST.c_str(), v_memory_type.size(), inputs_size,
  265. op_desc->GetName().c_str(), op_desc->GetType().c_str());
  266. GELOGE(PARAM_INVALID, "[Check][Param] Attr:%s, memory_type.size:%zu != input_desc.size:%zu, op:%s(%s)",
  267. ATTR_NAME_INPUT_MEM_TYPE_LIST.c_str(), v_memory_type.size(), inputs_size,
  268. op_desc->GetName().c_str(), op_desc->GetType().c_str());
  269. return v_input_data_addr;
  270. }
  271. for (size_t i = 0; i < op_desc->GetAllInputsSize(); ++i) {
  272. const GeTensorDescPtr tensor_desc = op_desc->MutableInputDesc(static_cast<uint32_t>(i));
  273. GE_IF_BOOL_EXEC(tensor_desc == nullptr, GELOGD("Op: %s, Index: %zu, has no input", op_desc->GetName().c_str(), i);
  274. continue;)
  275. if ((i < v_is_input_const.size()) && v_is_input_const[i]) {
  276. // TBE: add weights address to input
  277. int64_t tensor_size = 0;
  278. GE_CHK_STATUS(TensorUtils::GetSize(*tensor_desc, tensor_size));
  279. if (tensor_size) {
  280. int64_t data_offset = 0;
  281. GE_CHK_STATUS(TensorUtils::GetDataOffset(*tensor_desc, data_offset));
  282. VALIDATE_MEM_RANGE(op_desc, model_param.weight_size, data_offset);
  283. uint8_t *weight_addr = model_param.weight_base + data_offset;
  284. v_input_data_addr.push_back(weight_addr);
  285. GELOGI("[IMAS]GetInputDataAddrs graph_%u type[C] name[%s] input[%zu] memaddr[%p]", model_param.graph_id,
  286. op_desc->GetName().c_str(), i, weight_addr);
  287. }
  288. non_const_index++;
  289. continue;
  290. }
  291. GE_IF_BOOL_EXEC(non_const_index >= v_input_offset.size(), break);
  292. int64_t input_offset = v_input_offset[non_const_index];
  293. non_const_index++;
  294. int64_t inner_offset = 0;
  295. (void)ge::AttrUtils::GetInt(op_desc->MutableInputDesc(i), ATTR_NAME_INNER_OFFSET, inner_offset);
  296. GE_IF_BOOL_EXEC(model_param.var_size != 0 && ge::VarManager::Instance(session_id)->IsVarAddr(input_offset - inner_offset),
  297. uint8_t *variable_addr = nullptr;
  298. GE_CHK_STATUS_EXEC(GetVarAddr(model_param, op_desc, input_offset - inner_offset, variable_addr), return {});
  299. variable_addr += inner_offset;
  300. v_input_data_addr.push_back(variable_addr);
  301. GELOGI("[IMAS]GetInputDataAddrs graph_%u type[V] name[%s] input[%lu] memaddr[%p]",
  302. model_param.graph_id, op_desc->GetName().c_str(), i, variable_addr);
  303. continue);
  304. int64_t mem_type;
  305. bool tensor_has_mem_type = ge::AttrUtils::GetInt(tensor_desc, ATTR_NAME_TENSOR_MEM_TYPE, mem_type);
  306. // feature maps
  307. void *mem_addr = nullptr;
  308. if (has_mem_type_attr && v_memory_type[i] == RT_MEMORY_L1) { // fusion
  309. mem_addr = reinterpret_cast<uint8_t *>(static_cast<intptr_t>(input_offset));
  310. v_input_data_addr.push_back(mem_addr);
  311. } else if (has_mem_type_attr && v_memory_type[i] == RT_MEMORY_TS_4G) {
  312. int64_t tensor_size = 0;
  313. GE_CHK_STATUS_EXEC(TensorUtils::GetSize(*tensor_desc, tensor_size), return {});
  314. VALIDATE_MEM_RANGE(op_desc, model_param.mem_size, input_offset);
  315. mem_addr = model_param.ts_mem_mall->Acquire(input_offset, static_cast<uint64_t>(tensor_size));
  316. v_input_data_addr.push_back(mem_addr);
  317. } else if (tensor_has_mem_type && mem_type == RT_MEMORY_P2P_DDR) {
  318. uint8_t *p2p_mem_addr = model_param.memory_infos.at(RT_MEMORY_P2P_DDR).memory_base + v_input_offset[i];
  319. v_input_data_addr.push_back(p2p_mem_addr);
  320. GELOGI("[IMAS]GetInputDataAddrs graph_%u type[P] name[%s] input[%zu] memaddr[%p]", model_param.graph_id,
  321. op_desc->GetName().c_str(), i, p2p_mem_addr);
  322. continue;
  323. } else {
  324. VALIDATE_MEM_RANGE(op_desc, model_param.mem_size, input_offset);
  325. mem_addr = model_param.mem_base + input_offset;
  326. v_input_data_addr.push_back(mem_addr);
  327. }
  328. GELOGI("[IMAS]GetInputDataAddrs graph_%u type[F] name[%s] input[%zu] memaddr[%p]", model_param.graph_id,
  329. op_desc->GetName().c_str(), i, mem_addr);
  330. }
  331. return v_input_data_addr;
  332. }
  333. ///
  334. /// @ingroup ge
  335. /// @brief Get variable address.
  336. /// @return Status
  337. ///
  338. Status ModelUtils::GetVarAddr(const RuntimeParam &model_param, const ConstOpDescPtr &op_desc, int64_t offset,
  339. uint8_t *&var_addr) {
  340. rtMemType_t mem_type = ge::VarManager::Instance(model_param.session_id)->GetVarMemType(offset);
  341. switch (mem_type) {
  342. case RT_MEMORY_RDMA_HBM:
  343. if (offset < 0) {
  344. REPORT_INNER_ERROR("E19999", "Param offset:%ld < 0, check invalid", offset);
  345. GELOGE(PARAM_INVALID, "[Check][Param] Param offset:%ld cannot be negative", offset);
  346. return PARAM_INVALID;
  347. }
  348. var_addr = reinterpret_cast<uint8_t *>(static_cast<uintptr_t>(offset));
  349. break;
  350. case RT_MEMORY_HBM:
  351. VALIDATE_MEM_RANGE(op_desc, model_param.var_size, offset - model_param.logic_var_base);
  352. var_addr = model_param.var_base + offset - model_param.logic_var_base;
  353. break;
  354. default:
  355. REPORT_INNER_ERROR("E19999", "Get mem_type:%d for offset:%ld is unsupported, check invalid", mem_type, offset);
  356. GELOGE(PARAM_INVALID, "[Check][Param] Get mem_type:%d for offset:%ld is unsupported, check invalid",
  357. mem_type, offset);
  358. return PARAM_INVALID;
  359. }
  360. GE_CHECK_NOTNULL(var_addr);
  361. return SUCCESS;
  362. }
  363. ///
  364. /// @ingroup ge
  365. /// @brief Get output data address.
  366. /// @return vector<void*>
  367. ///
  368. vector<void *> ModelUtils::GetOutputDataAddrs(const RuntimeParam &model_param, ConstOpDescPtr op_desc) {
  369. vector<void *> v_output_data_addr; // init as:buf_base + op_def_->output(i)
  370. GE_CHECK_NOTNULL_EXEC(op_desc, return v_output_data_addr);
  371. uint64_t session_id = model_param.session_id;
  372. const size_t outputs_size = op_desc->GetOutputsSize();
  373. const vector<int64_t> v_output_offset = op_desc->GetOutputOffset();
  374. GE_IF_BOOL_EXEC(v_output_offset.size() != outputs_size,
  375. GELOGW("Output param invalid: output_offset=%zu, outputs=%zu.", v_output_offset.size(), outputs_size);
  376. return v_output_data_addr);
  377. vector<int64_t> v_memory_type;
  378. bool has_mem_type_attr = ge::AttrUtils::GetListInt(op_desc, ATTR_NAME_OUTPUT_MEM_TYPE_LIST, v_memory_type);
  379. if (has_mem_type_attr && (v_memory_type.size() != outputs_size)) {
  380. REPORT_INNER_ERROR("E19999", "Attr:%s, memory_type.size:%zu != output_desc.size:%zu, op:%s(%s), check invalid",
  381. ATTR_NAME_OUTPUT_MEM_TYPE_LIST.c_str(), v_memory_type.size(), outputs_size,
  382. op_desc->GetName().c_str(), op_desc->GetType().c_str());
  383. GELOGE(PARAM_INVALID, "[Check][Param] Attr:%s, memory_type.size:%zu != output_desc.size:%zu, op:%s(%s)",
  384. ATTR_NAME_OUTPUT_MEM_TYPE_LIST.c_str(), v_memory_type.size(), outputs_size,
  385. op_desc->GetName().c_str(), op_desc->GetType().c_str());
  386. return v_output_data_addr;
  387. }
  388. for (size_t i = 0; i < outputs_size; ++i) {
  389. const GeTensorDescPtr tensor_desc = op_desc->MutableOutputDesc(i);
  390. if (tensor_desc == nullptr) {
  391. GELOGW("Op: %s, Index: %zu, Tensor Desc is null", op_desc->GetName().c_str(), i);
  392. continue;
  393. }
  394. int32_t calc_type = 0;
  395. bool ret = ge::AttrUtils::GetInt(tensor_desc, ATTR_NAME_MEMORY_SIZE_CALC_TYPE, calc_type);
  396. if (ret && (calc_type == static_cast<int32_t>(ge::MemorySizeCalcType::ALWAYS_EMPTY))) {
  397. GELOGD("%s is an optional output, the address don't need to be saved.", tensor_desc->GetName().c_str());
  398. continue;
  399. }
  400. int64_t inner_offset = 0;
  401. (void)ge::AttrUtils::GetInt(op_desc->MutableOutputDesc(i), ATTR_NAME_INNER_OFFSET, inner_offset);
  402. GE_IF_BOOL_EXEC(model_param.var_size != 0 && ge::VarManager::Instance(session_id)->IsVarAddr(v_output_offset[i] - inner_offset),
  403. uint8_t *variable_addr = nullptr;
  404. GE_CHK_STATUS_EXEC(GetVarAddr(model_param, op_desc, v_output_offset[i] - inner_offset, variable_addr), return {});
  405. variable_addr += inner_offset;
  406. v_output_data_addr.push_back(variable_addr);
  407. GELOGI("[IMAS]GetOutputDataAddrs graph_%u type[V] name[%s] output[%zu] memaddr[%p]",
  408. model_param.graph_id, op_desc->GetName().c_str(), i, variable_addr);
  409. continue);
  410. int64_t mem_type;
  411. bool tensor_has_mem_type = ge::AttrUtils::GetInt(tensor_desc, ATTR_NAME_TENSOR_MEM_TYPE, mem_type);
  412. // feature maps
  413. void *mem_addr = nullptr;
  414. if (has_mem_type_attr && v_memory_type[i] == RT_MEMORY_L1) { // fusion
  415. mem_addr = reinterpret_cast<uint8_t *>(static_cast<intptr_t>(v_output_offset[i]));
  416. v_output_data_addr.push_back(mem_addr);
  417. } else if (has_mem_type_attr && v_memory_type[i] == RT_MEMORY_TS_4G) {
  418. const GeTensorDescPtr tensor_desc = op_desc->MutableOutputDesc(i);
  419. GE_CHECK_NOTNULL_EXEC(tensor_desc, return {});
  420. int64_t tensor_size = 0;
  421. GE_CHK_STATUS_EXEC(TensorUtils::GetSize(*tensor_desc, tensor_size), return {});
  422. VALIDATE_MEM_RANGE(op_desc, model_param.mem_size, v_output_offset[i]);
  423. mem_addr = model_param.ts_mem_mall->Acquire(v_output_offset[i], static_cast<uint64_t>(tensor_size));
  424. v_output_data_addr.push_back(mem_addr);
  425. } else if (tensor_has_mem_type && mem_type == RT_MEMORY_P2P_DDR) {
  426. uint8_t *p2p_mem_addr = model_param.memory_infos.at(RT_MEMORY_P2P_DDR).memory_base + v_output_offset[i];
  427. v_output_data_addr.push_back(p2p_mem_addr);
  428. GELOGI("[IMAS]GetOutputDataAddrs graph_%u type[P] name[%s] output[%zu] memaddr[%p]", model_param.graph_id,
  429. op_desc->GetName().c_str(), i, p2p_mem_addr);
  430. continue;
  431. } else {
  432. VALIDATE_MEM_RANGE(op_desc, model_param.mem_size, v_output_offset[i]);
  433. mem_addr = static_cast<uint8_t *>(model_param.mem_base + v_output_offset[i]);
  434. v_output_data_addr.push_back(mem_addr);
  435. }
  436. GELOGI("[IMAS]GetOutputDataAddrs graph_%u type[F] name[%s] output[%zu] memaddr[%p]", model_param.graph_id,
  437. op_desc->GetName().c_str(), i, mem_addr);
  438. }
  439. return v_output_data_addr;
  440. }
  441. ///
  442. /// @ingroup ge
  443. /// @brief Get workspace data address.
  444. /// @return vector<void*>
  445. ///
  446. vector<void *> ModelUtils::GetWorkspaceDataAddrs(const RuntimeParam &model_param, ConstOpDescPtr op_desc) {
  447. vector<void *> v_workspace_data_addr;
  448. GE_CHECK_NOTNULL_EXEC(op_desc, return v_workspace_data_addr);
  449. const vector<int64_t> v_workspace_offset = op_desc->GetWorkspace();
  450. const vector<int64_t> v_workspace_bytes = op_desc->GetWorkspaceBytes();
  451. if (v_workspace_offset.size() != v_workspace_bytes.size()) {
  452. GELOGW("v_workspace_offset.size()[%zu] != v_workspace_bytes.size()[%zu]", v_workspace_offset.size(),
  453. v_workspace_bytes.size());
  454. return v_workspace_data_addr;
  455. }
  456. vector<bool> workspace_reuse_flag;
  457. bool has_workspace_reuse = ge::AttrUtils::GetListBool(op_desc, "workspace_reuse_flag", workspace_reuse_flag);
  458. vector<int64_t> v_memory_type;
  459. vector<int64_t> workspace_memory_type;
  460. bool has_mem_type_attr = ge::AttrUtils::GetListInt(op_desc, TVM_ATTR_NAME_WORKSPACE_TYPE, v_memory_type);
  461. bool has_mem_type_workspace =
  462. ge::AttrUtils::GetListInt(op_desc, ATTR_NAME_WORKSPACE_TYPE_LIST, workspace_memory_type);
  463. vector<int32_t> workspace_no_reuse_scope;
  464. bool has_workspace_no_reuse_scope =
  465. ge::AttrUtils::GetListInt(op_desc, ATTR_NAME_WORKSPACE_MEMORY_NO_REUSE_SCOPE, workspace_no_reuse_scope);
  466. for (size_t i = 0; i < v_workspace_bytes.size(); ++i) {
  467. // Temporary solution, the aicpu workspace of multiple images cannot be shared.
  468. bool aicpu_work_space = (has_workspace_reuse && i < workspace_reuse_flag.size() && !workspace_reuse_flag[i] &&
  469. !model_param.is_single_op);
  470. if (aicpu_work_space) {
  471. void *mem_addr = model_param.aicpu_mem_mall->Acquire(v_workspace_offset[i], v_workspace_bytes[i]);
  472. v_workspace_data_addr.push_back(mem_addr);
  473. GELOGI(
  474. "[IMAS]GetWorkspaceDataAddrs graph_%u type[F] name[%s] aicpu workspace[%zu] offset[%ld] bytes[%ld] "
  475. "memaddr[%p]",
  476. model_param.graph_id, op_desc->GetName().c_str(), i, v_workspace_offset[i], v_workspace_bytes[i], mem_addr);
  477. continue;
  478. } else if (has_mem_type_workspace && workspace_memory_type[i] == RT_MEMORY_P2P_DDR) {
  479. int64_t p2p_workspace_offset = v_workspace_offset[i];
  480. int64_t p2p_workspace_bytes = v_workspace_bytes[i];
  481. uint8_t *p2p_mem_addr = p2p_workspace_bytes == 0
  482. ? nullptr
  483. : model_param.memory_infos.at(RT_MEMORY_P2P_DDR).memory_base + p2p_workspace_offset;
  484. v_workspace_data_addr.push_back(p2p_mem_addr);
  485. GELOGI(
  486. "[IMAS]GetWorkspaceDataAddrs graph_%u type[P] name[%s] p2p workspace[%zu] offset[%ld] bytes[%ld] "
  487. "memaddr[%p]",
  488. model_param.graph_id, op_desc->GetName().c_str(), i, p2p_workspace_offset, p2p_workspace_bytes, p2p_mem_addr);
  489. continue;
  490. }
  491. if (has_mem_type_attr && v_memory_type[i] == RT_MEMORY_L1) {
  492. v_workspace_data_addr.push_back(reinterpret_cast<uint8_t *>(static_cast<intptr_t>(v_workspace_offset[i])));
  493. GELOGI("[IMAS]GetWorkspaceDataAddrs graph_%u type[L1] name[%s], mem_addr[workspace index %zu]:0x%lx",
  494. model_param.graph_id, op_desc->GetName().c_str(), i, v_workspace_offset[i]);
  495. } else if (v_workspace_bytes[i] == 0) {
  496. v_workspace_data_addr.push_back(nullptr);
  497. GELOGI("[IMAS]GetWorkspaceDataAddrs graph_%u type[F] name[%s] workspace[%zu] offset[%ld] bytes[%ld] Null addr",
  498. model_param.graph_id, op_desc->GetName().c_str(), i, v_workspace_offset[i], v_workspace_bytes[i]);
  499. } else {
  500. VALIDATE_MEM_RANGE(op_desc, model_param.mem_size, v_workspace_offset[i]);
  501. uint8_t *mem_addr = nullptr;
  502. bool session_scope_memory = (has_workspace_no_reuse_scope) && (i < workspace_no_reuse_scope.size());
  503. if (session_scope_memory) {
  504. mem_addr = model_param.memory_infos.at(kSessionScopeMemory | RT_MEMORY_HBM).memory_base + v_workspace_offset[i];
  505. } else {
  506. mem_addr = model_param.mem_base + v_workspace_offset[i];
  507. }
  508. v_workspace_data_addr.push_back(mem_addr);
  509. GELOGI("[IMAS]GetWorkspaceDataAddrs graph_%u type[F] name[%s] workspace[%zu] offset[%ld] bytes[%ld] memaddr[%p]",
  510. model_param.graph_id, op_desc->GetName().c_str(), i, v_workspace_offset[i], v_workspace_bytes[i],
  511. mem_addr);
  512. }
  513. }
  514. return v_workspace_data_addr;
  515. }
  516. ///
  517. /// @ingroup ge
  518. /// @brief Get runtime memory address.
  519. /// @return Status
  520. ///
  521. Status ModelUtils::GetRtAddress(const RuntimeParam &param, uintptr_t logic_addr, uint8_t *&mem_addr) {
  522. uint8_t *runtime_base_addr = nullptr;
  523. if ((param.logic_mem_base <= logic_addr) && (logic_addr < param.logic_mem_base + param.mem_size)) {
  524. runtime_base_addr = param.mem_base - param.logic_mem_base;
  525. GELOGI("The logic addr:0x%lx is data address, base:0x%lx, size:%lu", logic_addr, param.logic_mem_base,
  526. param.mem_size);
  527. } else if ((param.logic_weight_base <= logic_addr) && (logic_addr < param.logic_weight_base + param.weight_size)) {
  528. runtime_base_addr = param.weight_base - param.logic_weight_base;
  529. GELOGI("The logic addr:0x%lx is weight address, base:0x%lx, size:%lu", logic_addr, param.logic_weight_base,
  530. param.weight_size);
  531. } else if ((param.logic_var_base <= logic_addr) && (logic_addr < param.logic_var_base + param.var_size)) {
  532. runtime_base_addr = param.var_base - param.logic_var_base;
  533. GELOGI("The logic addr:0x%lx is variable address, base:0x%lx, size:%lu", logic_addr, param.logic_var_base,
  534. param.var_size);
  535. } else if (logic_addr != 0) {
  536. mem_addr = nullptr;
  537. REPORT_INNER_ERROR("E19999", "Check param logic addr:0x%lx abnormal", logic_addr);
  538. GELOGE(PARAM_INVALID, "[Check][Param] The logic addr:0x%lx is abnormal", logic_addr);
  539. return PARAM_INVALID;
  540. }
  541. mem_addr = runtime_base_addr + logic_addr;
  542. return SUCCESS;
  543. }
  544. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示