You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

model_utils.cc 31 kB

5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/load/model_manager/model_utils.h"
  17. #include <string>
  18. #include "common/debug/log.h"
  19. #include "common/op/ge_op_utils.h"
  20. #include "graph/utils/tensor_utils.h"
  21. #include "graph/manager/graph_var_manager.h"
  22. #include "graph/types.h"
  23. #include "graph/build/memory/block_mem_assigner.h"
  24. #include "common/math/math_util.h"
  25. #define VALIDATE_MEM_RANGE(OP, TOTAL_SIZE, OFFSET, SIZE) \
  26. do { \
  27. if (ge::CheckInt64AddOverflow((OFFSET), (SIZE)) != SUCCESS) { \
  28. GELOGE(PARAM_INVALID, "Int64 %ld and %ld addition can result in overflow!", \
  29. static_cast<int64_t>(OFFSET), static_cast<int64_t>(SIZE)); \
  30. return {}; \
  31. } \
  32. int64_t range = (OFFSET) + (SIZE); \
  33. if ((TOTAL_SIZE) < static_cast<uint64_t>(range)) { \
  34. REPORT_INNER_ERROR("E19999", \
  35. "Node:%s(%s) memory out of range, offset:%ld, size:%ld, exceed total size:%lu.", \
  36. OP->GetName().c_str(), OP->GetType().c_str(), (OFFSET), (SIZE), (TOTAL_SIZE)); \
  37. GELOGE(OUT_OF_MEMORY, \
  38. "[Check][Param]Node:%s(%s) memory out of range, offset:%ld, size:%ld, exceed total size:%lu.", \
  39. OP->GetName().c_str(), OP->GetType().c_str(), (OFFSET), (SIZE), (TOTAL_SIZE)); \
  40. return {}; \
  41. } \
  42. } while (0)
  43. namespace {
  44. const char *const kUsedStreamNum = "used_stream_num";
  45. } // namespace
  46. namespace ge {
  47. ///
  48. /// @ingroup ge
  49. /// @brief Get input size.
  50. /// @return vector<int64_t>
  51. ///
  52. vector<int64_t> ModelUtils::GetInputSize(ConstOpDescPtr op_desc) {
  53. vector<int64_t> v_input_size;
  54. GE_CHECK_NOTNULL_EXEC(op_desc, return v_input_size);
  55. const size_t inputs_size = op_desc->GetAllInputsSize();
  56. for (size_t i = 0; i < inputs_size; ++i) {
  57. const GeTensorDescPtr tensor_desc = op_desc->MutableInputDesc(i);
  58. if (tensor_desc == nullptr) {
  59. GELOGW("Op: %s, Index: %zu, Tensor Desc is null", op_desc->GetName().c_str(), i);
  60. continue;
  61. }
  62. int64_t tensor_size = 0;
  63. GE_IF_BOOL_EXEC(
  64. TensorUtils::GetSize(*tensor_desc, tensor_size) != GRAPH_SUCCESS,
  65. GELOGI("Get size from TensorDesc failed, op : %s, input index : %zu", op_desc->GetName().c_str(), i);
  66. continue);
  67. GELOGI("GetInputSize op: %s, index: %zu, size:%ld", op_desc->GetName().c_str(), i, tensor_size);
  68. v_input_size.push_back(tensor_size);
  69. }
  70. return v_input_size;
  71. }
  72. ///
  73. /// @ingroup ge
  74. /// @brief Get output size.
  75. /// @return vector<int64_t>
  76. ///
  77. vector<int64_t> ModelUtils::GetOutputSize(ConstOpDescPtr op_desc) {
  78. vector<int64_t> v_output_size;
  79. GE_CHECK_NOTNULL_EXEC(op_desc, return v_output_size);
  80. const size_t outputs_size = op_desc->GetOutputsSize();
  81. const vector<int64_t> v_output_offset = op_desc->GetOutputOffset();
  82. GE_IF_BOOL_EXEC(v_output_offset.size() != outputs_size,
  83. GELOGW("Output param invalid: output_offset=%zu, outputs=%zu.", v_output_offset.size(), outputs_size);
  84. return v_output_size;);
  85. for (size_t i = 0; i < outputs_size; ++i) {
  86. const GeTensorDescPtr tensor_desc = op_desc->MutableOutputDesc(i);
  87. if (tensor_desc == nullptr) {
  88. GELOGW("Op: %s, Index: %zu, Tensor Desc is null", op_desc->GetName().c_str(), i);
  89. continue;
  90. }
  91. int64_t tensor_size = 0;
  92. GE_IF_BOOL_EXEC(
  93. TensorUtils::GetSize(*tensor_desc, tensor_size) != GRAPH_SUCCESS,
  94. GELOGI("Get size from TensorDesc failed, op : %s, output index : %zu", op_desc->GetName().c_str(), i);
  95. continue);
  96. GELOGI("GetOutputSize op: %s, index: %zu, size:%ld", op_desc->GetName().c_str(), i, tensor_size);
  97. v_output_size.push_back(tensor_size);
  98. }
  99. return v_output_size;
  100. }
  101. ///
  102. /// @ingroup ge
  103. /// @brief Get workspace size.
  104. /// @return vector<int64_t>
  105. ///
  106. vector<int64_t> ModelUtils::GetWorkspaceSize(ConstOpDescPtr op_desc) {
  107. vector<int64_t> v_workspace_size;
  108. GE_CHECK_NOTNULL_EXEC(op_desc, return v_workspace_size);
  109. const vector<int64_t> v_workspace_num = op_desc->GetWorkspace();
  110. const vector<int64_t> v_workspace_bytes = op_desc->GetWorkspaceBytes();
  111. if (v_workspace_num.size() != v_workspace_bytes.size()) {
  112. GELOGW("workspace_num[%zu]!= workspace_bytes[%zu]", v_workspace_num.size(), v_workspace_bytes.size());
  113. return v_workspace_size;
  114. }
  115. for (auto workspace_bytes : v_workspace_bytes) {
  116. v_workspace_size.push_back(workspace_bytes);
  117. }
  118. return v_workspace_size;
  119. }
  120. ///
  121. /// @ingroup ge
  122. /// @brief Get weight size.
  123. /// @return vector<int64_t>
  124. ///
  125. vector<int64_t> ModelUtils::GetWeightSize(ConstOpDescPtr op_desc) {
  126. vector<int64_t> v_weight_size;
  127. GE_CHECK_NOTNULL_EXEC(op_desc, return v_weight_size);
  128. // const op, get weight directly
  129. const string type_name = op_desc->GetType();
  130. if ((type_name == "Const") || (type_name == "Constant")) {
  131. ConstGeTensorPtr weight = nullptr;
  132. if (AttrUtils::GetTensor(*op_desc, ATTR_NAME_WEIGHTS, weight)) {
  133. v_weight_size.push_back(TensorUtils::GetWeightSize(weight));
  134. }
  135. return v_weight_size;
  136. }
  137. // other ops get weight from connected constop
  138. const size_t inputs_size = op_desc->GetAllInputsSize();
  139. const vector<bool> v_is_input_const = op_desc->GetIsInputConst();
  140. for (size_t i = 0; i < inputs_size; ++i) {
  141. if ((i < v_is_input_const.size()) && v_is_input_const[i]) {
  142. const GeTensorDescPtr tensor_desc = op_desc->MutableInputDesc(i);
  143. if (tensor_desc == nullptr) {
  144. GELOGW("Op: %s, Index: %zu, Tensor Desc is null", op_desc->GetName().c_str(), i);
  145. continue;
  146. }
  147. int64_t tensor_size = 0;
  148. (void)TensorUtils::GetSize(*tensor_desc, tensor_size);
  149. v_weight_size.push_back(tensor_size);
  150. }
  151. }
  152. return v_weight_size;
  153. }
  154. ///
  155. /// @ingroup ge
  156. /// @brief Get weights.
  157. /// @return vector<ConstGeTensorPtr>
  158. ///
  159. vector<ConstGeTensorPtr> ModelUtils::GetWeights(ConstOpDescPtr op_desc) {
  160. vector<ConstGeTensorPtr> v_weights;
  161. GE_CHECK_NOTNULL_EXEC(op_desc, return v_weights);
  162. // const op, get weight directly
  163. const string op_type = op_desc->GetType();
  164. if ((op_type == "Const") || (op_type == "Constant")) {
  165. ConstGeTensorPtr weight = nullptr;
  166. if (AttrUtils::GetTensor(*op_desc, ATTR_NAME_WEIGHTS, weight)) {
  167. v_weights.push_back(weight);
  168. }
  169. return v_weights;
  170. }
  171. // other ops get weight from connected constop
  172. const size_t inputs_size = op_desc->GetAllInputsSize();
  173. const vector<bool> v_is_input_const = op_desc->GetIsInputConst();
  174. for (size_t i = 0; i < inputs_size; ++i) {
  175. if ((i < v_is_input_const.size()) && v_is_input_const[i]) {
  176. const GeTensorDescPtr tensor_desc = op_desc->MutableInputDesc(i);
  177. if (tensor_desc == nullptr) {
  178. GELOGW("Op: %s, Index: %zu, Tensor Desc is null", op_desc->GetName().c_str(), i);
  179. continue;
  180. }
  181. ConstGeTensorPtr weight = nullptr;
  182. if (AttrUtils::GetTensor(*tensor_desc, ATTR_NAME_WEIGHTS, weight)) {
  183. v_weights.push_back(weight);
  184. }
  185. }
  186. }
  187. return v_weights;
  188. }
  189. ///
  190. /// @ingroup ge
  191. /// @brief Get AiCpuOp Input descriptor.
  192. /// @return vector<::tagCcAICPUTensor>
  193. ///
  194. vector<::tagCcAICPUTensor> ModelUtils::GetInputDescs(ConstOpDescPtr op_desc) {
  195. // AiCpuOp::GetInputDescs
  196. vector<::opTensor_t> v_input_descs;
  197. GE_CHECK_NOTNULL_EXEC(op_desc, return v_input_descs);
  198. const size_t inputs_size = op_desc->GetAllInputsSize();
  199. const vector<bool> v_is_input_const = op_desc->GetIsInputConst();
  200. for (size_t i = 0; i < inputs_size; ++i) {
  201. if ((i < v_is_input_const.size()) && v_is_input_const[i]) { // skip Const input node
  202. continue;
  203. }
  204. const GeTensorDescPtr tensor_desc = op_desc->MutableInputDesc(i);
  205. if (tensor_desc == nullptr) {
  206. GELOGW("Op: %s, Index: %zu, Tensor Desc is null", op_desc->GetName().c_str(), i);
  207. continue;
  208. }
  209. uint32_t dim_cnt = 0;
  210. GE_CHK_BOOL_EXEC_WARN(TensorUtils::GetRealDimCnt(*tensor_desc, dim_cnt) == GRAPH_SUCCESS, continue,
  211. "Get dim_cnt failed");
  212. opTensor_t tmp;
  213. uint32_t tmp_fmt = tensor_desc->GetFormat();
  214. tmp.format = tagOpTensorFormat(tmp_fmt);
  215. tmp.dim_cnt = static_cast<int32_t>(dim_cnt);
  216. uint32_t tmp_type = tensor_desc->GetDataType();
  217. tmp.data_type = tagOpDataType(tmp_type);
  218. for (int32_t j = 0; j < 4; j++) { // 4 dims
  219. tmp.dim[j] = (j < tmp.dim_cnt ? tensor_desc->GetShape().GetDim(j) : 1);
  220. }
  221. v_input_descs.push_back(tmp);
  222. }
  223. return v_input_descs;
  224. }
  225. ///
  226. /// @ingroup ge
  227. /// @brief Get AiCpuOp Output descriptor.
  228. /// @return vector<::tagCcAICPUTensor>
  229. ///
  230. vector<::tagCcAICPUTensor> ModelUtils::GetOutputDescs(ConstOpDescPtr op_desc) {
  231. // AiCpuOp::GetOutputDescs
  232. vector<::opTensor_t> v_output_descs;
  233. GE_CHECK_NOTNULL_EXEC(op_desc, return v_output_descs);
  234. // init op output opTensor_t struct
  235. const size_t output_num = op_desc->GetOutputsSize();
  236. for (size_t i = 0; i < output_num; ++i) {
  237. const GeTensorDescPtr tensor_desc = op_desc->MutableOutputDesc(i);
  238. if (tensor_desc == nullptr) {
  239. GELOGW("Op: %s, Index: %zu, Tensor Desc is null", op_desc->GetName().c_str(), i);
  240. continue;
  241. }
  242. uint32_t dim_cnt = 0;
  243. GE_CHK_BOOL_EXEC_WARN(TensorUtils::GetRealDimCnt(*tensor_desc, dim_cnt) == GRAPH_SUCCESS, continue,
  244. "Get dim_cnt failed");
  245. opTensor_t tmp;
  246. uint32_t tmp_fmt = tensor_desc->GetFormat();
  247. tmp.format = tagOpTensorFormat(tmp_fmt);
  248. tmp.dim_cnt = static_cast<int32_t>(dim_cnt);
  249. uint32_t tmp_type = tensor_desc->GetDataType();
  250. tmp.data_type = tagOpDataType(tmp_type);
  251. for (int32_t j = 0; j < 4; j++) { // 4 dims
  252. tmp.dim[j] = (j < tmp.dim_cnt ? tensor_desc->GetShape().GetDim(j) : 1);
  253. }
  254. v_output_descs.push_back(tmp);
  255. }
  256. return v_output_descs;
  257. }
  258. ///
  259. /// @ingroup ge
  260. /// @brief Get input data address.
  261. /// @return vector<void*>
  262. ///
  263. vector<void *> ModelUtils::GetInputDataAddrs(const RuntimeParam &model_param, ConstOpDescPtr op_desc) {
  264. vector<void *> v_input_data_addr; // init as:buf_base + op_def_->input(i));
  265. GE_CHECK_NOTNULL_EXEC(op_desc, return v_input_data_addr);
  266. uint64_t session_id = model_param.session_id;
  267. const size_t inputs_size = op_desc->GetInputsSize();
  268. const vector<int64_t> v_input_offset = op_desc->GetInputOffset();
  269. const string op_type = op_desc->GetType();
  270. size_t non_const_index = 0;
  271. const vector<bool> v_is_input_const = op_desc->GetIsInputConst();
  272. vector<int64_t> v_memory_type;
  273. bool has_mem_type_attr = ge::AttrUtils::GetListInt(op_desc, ATTR_NAME_INPUT_MEM_TYPE_LIST, v_memory_type);
  274. if (has_mem_type_attr && (v_memory_type.size() != inputs_size)) {
  275. REPORT_INNER_ERROR("E19999", "Attr:%s, memory_type.size:%zu != input_desc.size:%zu, op:%s(%s), check invalid",
  276. ATTR_NAME_INPUT_MEM_TYPE_LIST.c_str(), v_memory_type.size(), inputs_size,
  277. op_desc->GetName().c_str(), op_desc->GetType().c_str());
  278. GELOGE(PARAM_INVALID, "[Check][Param] Attr:%s, memory_type.size:%zu != input_desc.size:%zu, op:%s(%s)",
  279. ATTR_NAME_INPUT_MEM_TYPE_LIST.c_str(), v_memory_type.size(), inputs_size,
  280. op_desc->GetName().c_str(), op_desc->GetType().c_str());
  281. return v_input_data_addr;
  282. }
  283. for (size_t i = 0; i < op_desc->GetAllInputsSize(); ++i) {
  284. const GeTensorDescPtr tensor_desc = op_desc->MutableInputDesc(static_cast<uint32_t>(i));
  285. GE_IF_BOOL_EXEC(tensor_desc == nullptr, GELOGD("Op: %s, Index: %zu, has no input", op_desc->GetName().c_str(), i);
  286. continue;)
  287. int64_t tensor_size = 0;
  288. GE_CHK_STATUS_EXEC(TensorUtils::GetSize(*tensor_desc, tensor_size), return {});
  289. if ((i < v_is_input_const.size()) && v_is_input_const[i]) {
  290. // Add weights address to input
  291. int64_t data_offset = 0;
  292. GE_CHK_STATUS(TensorUtils::GetDataOffset(*tensor_desc, data_offset));
  293. int64_t weight_size = 0;
  294. // The reason why GetTensorSizeInBytes is used here is that the weight is allocated based on the size of
  295. // TensorData in function AdjustConstWeightSize. and the size is zero when the tensor is empty.
  296. GE_CHK_STATUS(TensorUtils::GetTensorSizeInBytes(*tensor_desc, weight_size));
  297. VALIDATE_MEM_RANGE(op_desc, model_param.weight_size, data_offset, weight_size);
  298. uint8_t *weight_addr = model_param.weight_base + data_offset;
  299. v_input_data_addr.push_back(weight_addr);
  300. GELOGI("[IMAS]GetInputDataAddrs graph_%u type[C] name[%s] input[%zu] memaddr[%p]", model_param.graph_id,
  301. op_desc->GetName().c_str(), i, weight_addr);
  302. non_const_index++;
  303. continue;
  304. }
  305. GE_IF_BOOL_EXEC(non_const_index >= v_input_offset.size(), break);
  306. int64_t input_offset = v_input_offset[non_const_index];
  307. non_const_index++;
  308. int64_t inner_offset = 0;
  309. (void)ge::AttrUtils::GetInt(op_desc->MutableInputDesc(i), ATTR_NAME_INNER_OFFSET, inner_offset);
  310. GE_IF_BOOL_EXEC(model_param.var_size != 0
  311. && ge::VarManager::Instance(session_id)->IsVarAddr(input_offset - inner_offset),
  312. uint8_t *variable_addr = nullptr;
  313. GE_CHK_STATUS_EXEC(GetVarAddr(model_param, op_desc, input_offset - inner_offset,
  314. tensor_size + inner_offset, variable_addr), return {});
  315. variable_addr += inner_offset;
  316. v_input_data_addr.push_back(variable_addr);
  317. GELOGI("[IMAS]GetInputDataAddrs graph_%u type[V] name[%s] input[%lu] memaddr[%p]",
  318. model_param.graph_id, op_desc->GetName().c_str(), i, variable_addr);
  319. continue);
  320. int64_t mem_type;
  321. bool tensor_has_mem_type = ge::AttrUtils::GetInt(tensor_desc, ATTR_NAME_TENSOR_MEM_TYPE, mem_type);
  322. // feature maps
  323. void *mem_addr = nullptr;
  324. if (has_mem_type_attr && v_memory_type[i] == RT_MEMORY_L1) { // fusion
  325. mem_addr = reinterpret_cast<uint8_t *>(static_cast<intptr_t>(input_offset));
  326. v_input_data_addr.push_back(mem_addr);
  327. } else if (has_mem_type_attr && v_memory_type[i] == RT_MEMORY_TS_4G) {
  328. // The input size and peer output size may be not consecutive, therefore, the tensor_size is not been checked.
  329. VALIDATE_MEM_RANGE(op_desc, model_param.mem_size, input_offset, static_cast<int64_t>(0));
  330. mem_addr = model_param.ts_mem_mall->Acquire(input_offset, static_cast<uint64_t>(tensor_size));
  331. v_input_data_addr.push_back(mem_addr);
  332. } else if (tensor_has_mem_type && mem_type == RT_MEMORY_P2P_DDR) {
  333. uint8_t *p2p_mem_addr = model_param.memory_infos.at(RT_MEMORY_P2P_DDR).memory_base + v_input_offset[i];
  334. v_input_data_addr.push_back(p2p_mem_addr);
  335. GELOGI("[IMAS]GetInputDataAddrs graph_%u type[P] name[%s] input[%zu] memaddr[%p]", model_param.graph_id,
  336. op_desc->GetName().c_str(), i, p2p_mem_addr);
  337. continue;
  338. } else {
  339. // The input size and peer output size may be not consecutive, therefore, the tensor_size is not been checked.
  340. VALIDATE_MEM_RANGE(op_desc, model_param.mem_size, input_offset, static_cast<int64_t>(0));
  341. mem_addr = model_param.mem_base + input_offset;
  342. v_input_data_addr.push_back(mem_addr);
  343. }
  344. GELOGI("[IMAS]GetInputDataAddrs graph_%u type[F] name[%s] input[%zu] memaddr[%p]", model_param.graph_id,
  345. op_desc->GetName().c_str(), i, mem_addr);
  346. }
  347. return v_input_data_addr;
  348. }
  349. ///
  350. /// @ingroup ge
  351. /// @brief Get variable address.
  352. /// @return Status
  353. ///
  354. Status ModelUtils::GetVarAddr(const RuntimeParam &model_param, const ConstOpDescPtr &op_desc, int64_t offset,
  355. int64_t tensor_size, uint8_t *&var_addr) {
  356. rtMemType_t mem_type = ge::VarManager::Instance(model_param.session_id)->GetVarMemType(offset);
  357. switch (mem_type) {
  358. case RT_MEMORY_RDMA_HBM:
  359. if (offset < 0) {
  360. REPORT_INNER_ERROR("E19999", "Param offset:%ld < 0, check invalid", offset);
  361. GELOGE(PARAM_INVALID, "[Check][Param] Param offset:%ld cannot be negative", offset);
  362. return PARAM_INVALID;
  363. }
  364. var_addr = reinterpret_cast<uint8_t *>(static_cast<uintptr_t>(offset));
  365. break;
  366. case RT_MEMORY_HBM:
  367. VALIDATE_MEM_RANGE(op_desc, model_param.var_size, offset - model_param.logic_var_base, tensor_size);
  368. var_addr = model_param.var_base + offset - model_param.logic_var_base;
  369. break;
  370. default:
  371. REPORT_INNER_ERROR("E19999", "Get mem_type:%d for offset:%ld is unsupported, check invalid", mem_type, offset);
  372. GELOGE(PARAM_INVALID, "[Check][Param] Get mem_type:%d for offset:%ld is unsupported, check invalid",
  373. mem_type, offset);
  374. return PARAM_INVALID;
  375. }
  376. GE_CHECK_NOTNULL(var_addr);
  377. return SUCCESS;
  378. }
  379. ///
  380. /// @ingroup ge
  381. /// @brief Get output data address.
  382. /// @return vector<void*>
  383. ///
  384. vector<void *> ModelUtils::GetOutputDataAddrs(const RuntimeParam &model_param, ConstOpDescPtr op_desc) {
  385. vector<void *> v_output_data_addr; // init as:buf_base + op_def_->output(i)
  386. GE_CHECK_NOTNULL_EXEC(op_desc, return v_output_data_addr);
  387. uint64_t session_id = model_param.session_id;
  388. const size_t outputs_size = op_desc->GetOutputsSize();
  389. const vector<int64_t> v_output_offset = op_desc->GetOutputOffset();
  390. GE_IF_BOOL_EXEC(v_output_offset.size() != outputs_size,
  391. GELOGW("Output param invalid: output_offset=%zu, outputs=%zu.", v_output_offset.size(), outputs_size);
  392. return v_output_data_addr);
  393. vector<int64_t> v_memory_type;
  394. bool has_mem_type_attr = ge::AttrUtils::GetListInt(op_desc, ATTR_NAME_OUTPUT_MEM_TYPE_LIST, v_memory_type);
  395. if (has_mem_type_attr && (v_memory_type.size() != outputs_size)) {
  396. REPORT_INNER_ERROR("E19999", "Attr:%s, memory_type.size:%zu != output_desc.size:%zu, op:%s(%s), check invalid",
  397. ATTR_NAME_OUTPUT_MEM_TYPE_LIST.c_str(), v_memory_type.size(), outputs_size,
  398. op_desc->GetName().c_str(), op_desc->GetType().c_str());
  399. GELOGE(PARAM_INVALID, "[Check][Param] Attr:%s, memory_type.size:%zu != output_desc.size:%zu, op:%s(%s)",
  400. ATTR_NAME_OUTPUT_MEM_TYPE_LIST.c_str(), v_memory_type.size(), outputs_size,
  401. op_desc->GetName().c_str(), op_desc->GetType().c_str());
  402. return v_output_data_addr;
  403. }
  404. for (size_t i = 0; i < outputs_size; ++i) {
  405. const GeTensorDescPtr tensor_desc = op_desc->MutableOutputDesc(i);
  406. if (tensor_desc == nullptr) {
  407. GELOGW("Op: %s, Index: %zu, Tensor Desc is null", op_desc->GetName().c_str(), i);
  408. continue;
  409. }
  410. int32_t calc_type = 0;
  411. bool ret = ge::AttrUtils::GetInt(tensor_desc, ATTR_NAME_MEMORY_SIZE_CALC_TYPE, calc_type);
  412. if (ret && (calc_type == static_cast<int32_t>(ge::MemorySizeCalcType::ALWAYS_EMPTY))) {
  413. GELOGD("%s is an optional output, the address don't need to be saved.", tensor_desc->GetName().c_str());
  414. continue;
  415. }
  416. int64_t inner_offset = 0;
  417. (void)ge::AttrUtils::GetInt(op_desc->MutableOutputDesc(i), ATTR_NAME_INNER_OFFSET, inner_offset);
  418. int64_t tensor_size = 0;
  419. GE_CHK_STATUS_EXEC(TensorUtils::GetSize(*tensor_desc, tensor_size), return {});
  420. GE_IF_BOOL_EXEC(model_param.var_size != 0
  421. && ge::VarManager::Instance(session_id)->IsVarAddr(v_output_offset[i] - inner_offset),
  422. uint8_t *variable_addr = nullptr;
  423. GE_CHK_STATUS_EXEC(GetVarAddr(model_param, op_desc, v_output_offset[i] - inner_offset,
  424. tensor_size + inner_offset, variable_addr), return {});
  425. variable_addr += inner_offset;
  426. v_output_data_addr.push_back(variable_addr);
  427. GELOGI("[IMAS]GetOutputDataAddrs graph_%u type[V] name[%s] output[%zu] memaddr[%p]",
  428. model_param.graph_id, op_desc->GetName().c_str(), i, variable_addr);
  429. continue);
  430. int64_t mem_type;
  431. bool tensor_has_mem_type = ge::AttrUtils::GetInt(tensor_desc, ATTR_NAME_TENSOR_MEM_TYPE, mem_type);
  432. // feature maps
  433. void *mem_addr = nullptr;
  434. if (has_mem_type_attr && v_memory_type[i] == RT_MEMORY_L1) { // fusion
  435. mem_addr = reinterpret_cast<uint8_t *>(static_cast<intptr_t>(v_output_offset[i]));
  436. v_output_data_addr.push_back(mem_addr);
  437. } else if (has_mem_type_attr && v_memory_type[i] == RT_MEMORY_TS_4G) {
  438. VALIDATE_MEM_RANGE(op_desc, model_param.mem_size, v_output_offset[i], tensor_size);
  439. mem_addr = model_param.ts_mem_mall->Acquire(v_output_offset[i], static_cast<uint64_t>(tensor_size));
  440. v_output_data_addr.push_back(mem_addr);
  441. } else if (tensor_has_mem_type && mem_type == RT_MEMORY_P2P_DDR) {
  442. uint8_t *p2p_mem_addr = model_param.memory_infos.at(RT_MEMORY_P2P_DDR).memory_base + v_output_offset[i];
  443. v_output_data_addr.push_back(p2p_mem_addr);
  444. GELOGI("[IMAS]GetOutputDataAddrs graph_%u type[P] name[%s] output[%zu] memaddr[%p]", model_param.graph_id,
  445. op_desc->GetName().c_str(), i, p2p_mem_addr);
  446. continue;
  447. } else {
  448. VALIDATE_MEM_RANGE(op_desc, model_param.mem_size, v_output_offset[i], tensor_size);
  449. mem_addr = static_cast<uint8_t *>(model_param.mem_base + v_output_offset[i]);
  450. v_output_data_addr.push_back(mem_addr);
  451. }
  452. GELOGI("[IMAS]GetOutputDataAddrs graph_%u type[F] name[%s] output[%zu] memaddr[%p]", model_param.graph_id,
  453. op_desc->GetName().c_str(), i, mem_addr);
  454. }
  455. return v_output_data_addr;
  456. }
  457. ///
  458. /// @ingroup ge
  459. /// @brief Get workspace data address.
  460. /// @return vector<void*>
  461. ///
  462. vector<void *> ModelUtils::GetWorkspaceDataAddrs(const RuntimeParam &model_param, ConstOpDescPtr op_desc) {
  463. vector<void *> v_workspace_data_addr;
  464. GE_CHECK_NOTNULL_EXEC(op_desc, return v_workspace_data_addr);
  465. const vector<int64_t> v_workspace_offset = op_desc->GetWorkspace();
  466. const vector<int64_t> v_workspace_bytes = op_desc->GetWorkspaceBytes();
  467. if (v_workspace_offset.size() != v_workspace_bytes.size()) {
  468. GELOGW("v_workspace_offset.size()[%zu] != v_workspace_bytes.size()[%zu]", v_workspace_offset.size(),
  469. v_workspace_bytes.size());
  470. return v_workspace_data_addr;
  471. }
  472. vector<bool> workspace_reuse_flag;
  473. bool has_workspace_reuse = ge::AttrUtils::GetListBool(op_desc, "workspace_reuse_flag", workspace_reuse_flag);
  474. vector<int64_t> v_memory_type;
  475. vector<int64_t> workspace_memory_type;
  476. bool has_mem_type_attr = ge::AttrUtils::GetListInt(op_desc, TVM_ATTR_NAME_WORKSPACE_TYPE, v_memory_type);
  477. bool has_mem_type_workspace =
  478. ge::AttrUtils::GetListInt(op_desc, ATTR_NAME_WORKSPACE_TYPE_LIST, workspace_memory_type);
  479. vector<int32_t> workspace_no_reuse_scope;
  480. bool has_workspace_no_reuse_scope =
  481. ge::AttrUtils::GetListInt(op_desc, ATTR_NAME_WORKSPACE_MEMORY_NO_REUSE_SCOPE, workspace_no_reuse_scope);
  482. for (size_t i = 0; i < v_workspace_bytes.size(); ++i) {
  483. // Temporary solution, the aicpu workspace of multiple images cannot be shared.
  484. bool aicpu_work_space = (has_workspace_reuse && i < workspace_reuse_flag.size() && !workspace_reuse_flag[i] &&
  485. !model_param.is_single_op);
  486. if (aicpu_work_space) {
  487. void *mem_addr = model_param.aicpu_mem_mall->Acquire(v_workspace_offset[i], v_workspace_bytes[i]);
  488. v_workspace_data_addr.push_back(mem_addr);
  489. GELOGI(
  490. "[IMAS]GetWorkspaceDataAddrs graph_%u type[F] name[%s] aicpu workspace[%zu] offset[%ld] bytes[%ld] "
  491. "memaddr[%p]",
  492. model_param.graph_id, op_desc->GetName().c_str(), i, v_workspace_offset[i], v_workspace_bytes[i], mem_addr);
  493. continue;
  494. } else if (has_mem_type_workspace && workspace_memory_type[i] == RT_MEMORY_P2P_DDR) {
  495. int64_t p2p_workspace_offset = v_workspace_offset[i];
  496. int64_t p2p_workspace_bytes = v_workspace_bytes[i];
  497. uint8_t *p2p_mem_addr = p2p_workspace_bytes == 0
  498. ? nullptr
  499. : model_param.memory_infos.at(RT_MEMORY_P2P_DDR).memory_base + p2p_workspace_offset;
  500. v_workspace_data_addr.push_back(p2p_mem_addr);
  501. GELOGI(
  502. "[IMAS]GetWorkspaceDataAddrs graph_%u type[P] name[%s] p2p workspace[%zu] offset[%ld] bytes[%ld] "
  503. "memaddr[%p]",
  504. model_param.graph_id, op_desc->GetName().c_str(), i, p2p_workspace_offset, p2p_workspace_bytes, p2p_mem_addr);
  505. continue;
  506. }
  507. if (has_mem_type_attr && v_memory_type[i] == RT_MEMORY_L1) {
  508. v_workspace_data_addr.push_back(reinterpret_cast<uint8_t *>(static_cast<intptr_t>(v_workspace_offset[i])));
  509. GELOGI("[IMAS]GetWorkspaceDataAddrs graph_%u type[L1] name[%s], mem_addr[workspace index %zu]:0x%lx",
  510. model_param.graph_id, op_desc->GetName().c_str(), i, v_workspace_offset[i]);
  511. } else if (v_workspace_bytes[i] == 0) {
  512. v_workspace_data_addr.push_back(nullptr);
  513. GELOGI("[IMAS]GetWorkspaceDataAddrs graph_%u type[F] name[%s] workspace[%zu] offset[%ld] bytes[%ld] Null addr",
  514. model_param.graph_id, op_desc->GetName().c_str(), i, v_workspace_offset[i], v_workspace_bytes[i]);
  515. } else {
  516. VALIDATE_MEM_RANGE(op_desc, model_param.mem_size, v_workspace_offset[i], v_workspace_bytes[i]);
  517. uint8_t *mem_addr = nullptr;
  518. bool session_scope_memory = (has_workspace_no_reuse_scope) && (i < workspace_no_reuse_scope.size());
  519. if (session_scope_memory) {
  520. mem_addr = model_param.memory_infos.at(kSessionScopeMemory | RT_MEMORY_HBM).memory_base + v_workspace_offset[i];
  521. } else {
  522. mem_addr = model_param.mem_base + v_workspace_offset[i];
  523. }
  524. v_workspace_data_addr.push_back(mem_addr);
  525. GELOGI("[IMAS]GetWorkspaceDataAddrs graph_%u type[F] name[%s] workspace[%zu] offset[%ld] bytes[%ld] memaddr[%p]",
  526. model_param.graph_id, op_desc->GetName().c_str(), i, v_workspace_offset[i], v_workspace_bytes[i],
  527. mem_addr);
  528. }
  529. }
  530. return v_workspace_data_addr;
  531. }
  532. ///
  533. /// @ingroup ge
  534. /// @brief Get runtime memory address.
  535. /// @return Status
  536. ///
  537. Status ModelUtils::GetRtAddress(const RuntimeParam &param, uintptr_t logic_addr, uint8_t *&mem_addr) {
  538. uint8_t *runtime_base_addr = nullptr;
  539. if ((param.logic_mem_base <= logic_addr) && (logic_addr < param.logic_mem_base + param.mem_size)) {
  540. runtime_base_addr = param.mem_base - param.logic_mem_base;
  541. GELOGI("The logic addr:0x%lx is data address, base:0x%lx, size:%lu", logic_addr, param.logic_mem_base,
  542. param.mem_size);
  543. } else if ((param.logic_weight_base <= logic_addr) && (logic_addr < param.logic_weight_base + param.weight_size)) {
  544. runtime_base_addr = param.weight_base - param.logic_weight_base;
  545. GELOGI("The logic addr:0x%lx is weight address, base:0x%lx, size:%lu", logic_addr, param.logic_weight_base,
  546. param.weight_size);
  547. } else if ((param.logic_var_base <= logic_addr) && (logic_addr < param.logic_var_base + param.var_size)) {
  548. runtime_base_addr = param.var_base - param.logic_var_base;
  549. GELOGI("The logic addr:0x%lx is variable address, base:0x%lx, size:%lu", logic_addr, param.logic_var_base,
  550. param.var_size);
  551. } else if (logic_addr != 0) {
  552. mem_addr = nullptr;
  553. REPORT_INNER_ERROR("E19999", "Check param logic addr:0x%lx abnormal", logic_addr);
  554. GELOGE(PARAM_INVALID, "[Check][Param] The logic addr:0x%lx is abnormal", logic_addr);
  555. return PARAM_INVALID;
  556. }
  557. mem_addr = runtime_base_addr + logic_addr;
  558. return SUCCESS;
  559. }
  560. Status ModelUtils::CalculateFollowStream(const GeModelPtr &ge_model, int64_t &hccl_fellow_stream_num) {
  561. const auto &model_def = ge_model->GetModelTaskDefPtr();
  562. GE_CHECK_NOTNULL(model_def);
  563. Graph graph = ge_model->GetGraph();
  564. ComputeGraphPtr compute_graph = GraphUtils::GetComputeGraph(graph);
  565. GE_CHECK_NOTNULL(compute_graph);
  566. map<uint32_t, OpDescPtr> op_list;
  567. for (const auto &node : compute_graph->GetDirectNode()) {
  568. OpDescPtr op_desc = node->GetOpDesc();
  569. GE_CHECK_NOTNULL(op_desc);
  570. op_list.emplace(op_desc->GetId(), op_desc);
  571. }
  572. std::multimap<int64_t, int64_t> main_follow_num;
  573. for (int32_t i = 0; i < model_def->task_size(); i++) {
  574. const domi::TaskDef &task = model_def->task(i);
  575. if (static_cast<rtModelTaskType_t>(task.type() == RT_MODEL_TASK_HCCL)){
  576. auto hccl_def = task.kernel_hccl();
  577. OpDescPtr hccl_op_desc = op_list.at(hccl_def.op_index());
  578. int64_t main_stream_id = hccl_op_desc->GetStreamId();
  579. int64_t follow_stream_num = 0;
  580. if (!ge::AttrUtils::GetInt(hccl_op_desc, kUsedStreamNum, follow_stream_num)) {
  581. GELOGW("Get used stream num failed, op is %s", hccl_op_desc->GetName().c_str());
  582. }
  583. main_follow_num.emplace(main_stream_id, follow_stream_num);
  584. }
  585. }
  586. hccl_fellow_stream_num = CalFollowStramSum(main_follow_num);
  587. return SUCCESS;
  588. }
  589. int64_t ModelUtils::CalFollowStramSum(const std::multimap<int64_t, int64_t> &hccl_stream_map) {
  590. std::map<int64_t, int64_t> max_follow_stream_map;
  591. for (const auto &it : hccl_stream_map) {
  592. auto max_it = max_follow_stream_map.find(it.first);
  593. if(max_it == max_follow_stream_map.end()) {
  594. max_follow_stream_map.emplace(it.first, it.second);
  595. } else if (it.second > max_it->second) {
  596. max_follow_stream_map.at(max_it->first) = it.second;
  597. }
  598. }
  599. int64_t need_follow_stream_num = 0;
  600. for (const auto &follow_it : max_follow_stream_map) {
  601. need_follow_stream_num = need_follow_stream_num + follow_it.second;
  602. }
  603. GELOGD("Need follow num is %ld", need_follow_stream_num);
  604. return need_follow_stream_num;
  605. }
  606. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示