You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

model_builder.cc 36 kB

5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/build/model_builder.h"
  17. #include <securectype.h>
  18. #include <iostream>
  19. #include <set>
  20. #include <unordered_map>
  21. #include "common/ge/ge_util.h"
  22. #include "common/dump/dump_manager.h"
  23. #include "framework/common/debug/ge_log.h"
  24. #include "graph/anchor.h"
  25. #include "graph/attr_value.h"
  26. #include "graph/buffer.h"
  27. #include "graph/build/stream_allocator.h"
  28. #include "graph/common/omg_util.h"
  29. #include "graph/common/ge_call_wrapper.h"
  30. #include "graph/common/local_context.h"
  31. #include "graph/debug/ge_attr_define.h"
  32. #include "graph/ge_attr_value.h"
  33. #include "graph/ge_context.h"
  34. #include "graph/ge_error_codes.h"
  35. #include "graph/manager/graph_mem_allocator.h"
  36. #include "graph/manager/graph_var_manager.h"
  37. #include "graph/optimize/common/params.h"
  38. #include "graph/types.h"
  39. #include "graph/utils/attr_utils.h"
  40. #include "graph/utils/graph_utils.h"
  41. #include "graph/utils/node_utils.h"
  42. #include "graph/utils/op_desc_utils.h"
  43. #include "graph/utils/tensor_utils.h"
  44. #include "graph/utils/type_utils.h"
  45. #include "init/gelib.h"
  46. #include "memory/memory_assigner.h"
  47. #include "omg/version.h"
  48. #include "register/op_registry.h"
  49. #include "graph/passes/set_input_output_offset_pass.h"
  50. using std::map;
  51. using std::set;
  52. using std::string;
  53. using std::vector;
  54. namespace {
  55. const uint32_t kWeightsStartOffset = 512;
  56. const int32_t kWrongIndex = -2;
  57. const int kInvalidIndexNum = -1;
  58. const char *const kVectorCore = "VectorCore";
  59. const char *const kCoreType = "ge.engineType";
  60. const std::string kEnableL1Fusion = "ge.l1Fusion";
  61. const set<string> adjust_layer_type_ = {ge::CONVOLUTION};
  62. bool IsGeLocalOp(const ge::ConstOpDescPtr &op_desc) {
  63. auto type = op_desc->GetType();
  64. if (type == ge::CONSTANTOP) {
  65. // constant op just has one output
  66. ge::GeTensorDesc output_desc = op_desc->GetOutputDesc(0);
  67. return !(output_desc.GetDataType() == ge::DT_STRING);
  68. }
  69. const set<string> ge_local_set = {ge::STREAMMERGE, ge::MEMCPYASYNC, ge::STREAMACTIVE, ge::STREAMSWITCH,
  70. ge::VARIABLE, ge::NOOP, ge::CONSTANT, ge::ENTER,
  71. ge::REFENTER, ge::LOOPCOND, ge::NEXTITERATION, ge::REFNEXTITERATION,
  72. ge::EXIT, ge::REFEXIT, ge::MERGE, ge::MEMCPYADDRASYNC};
  73. return (ge_local_set.find(type) != ge_local_set.end());
  74. }
  75. } // namespace
  76. namespace ge {
  77. ModelBuilder::ModelBuilder(uint64_t session_id, ge::ComputeGraphPtr compute_graph,
  78. const Graph2SubGraphInfoList &subgraphs, const map<string, int> &stream_max_parallel_num,
  79. bool hcom_parallel, int mode)
  80. : session_id_(session_id),
  81. weight_offset_(kWeightsStartOffset),
  82. compute_graph_(std::move(compute_graph)),
  83. subgraphs_(subgraphs),
  84. stream_num_(0),
  85. event_num_(0),
  86. label_num_(0),
  87. stream_max_parallel_num_(stream_max_parallel_num),
  88. hcom_parallel_(hcom_parallel),
  89. build_mode_(mode),
  90. max_mem_offset_(0),
  91. p2p_mem_offset_(0),
  92. zero_copy_mem_size_(0),
  93. platform_type_(0),
  94. is_loop_graph_(false),
  95. is_l1_fusion_enable_(false) {}
  96. ModelBuilder::~ModelBuilder() {}
  97. Status ModelBuilder::CalcOutputSize(const ge::NodePtr &n) {
  98. GE_CHECK_NOTNULL(n);
  99. auto node_op_desc = n->GetOpDesc();
  100. GE_CHECK_NOTNULL(node_op_desc);
  101. uint32_t index = 0;
  102. for (const auto &output_desc_ptr : node_op_desc->GetAllOutputsDescPtr()) {
  103. GeTensorDesc &desc_temp = *output_desc_ptr;
  104. uint32_t dim_num = static_cast<uint32_t>(desc_temp.GetShape().GetDimNum());
  105. GE_IF_BOOL_EXEC(dim_num > DIM_DEFAULT_SIZE, TensorUtils::SetRealDimCnt(desc_temp, dim_num));
  106. // calculate tensor size
  107. int64_t size_temp = 0;
  108. graphStatus graph_status = TensorUtils::GetTensorMemorySizeInBytes(desc_temp, size_temp);
  109. if (graph_status != GRAPH_SUCCESS) {
  110. GELOGE(graph_status, "GetTensorMemorySizeInBytes failed!");
  111. return FAILED;
  112. }
  113. TensorUtils::SetSize(desc_temp, size_temp);
  114. if (node_op_desc->UpdateOutputDesc(index, desc_temp) != SUCCESS) {
  115. GELOGE(FAILED, "UpdateOutputDesc failed.");
  116. return FAILED;
  117. }
  118. GELOGD("update output desc, dim_size: %u, mem_size: %ld, format: %s, type: %s, node name:%s", dim_num, size_temp,
  119. TypeUtils::FormatToSerialString(desc_temp.GetFormat()).c_str(),
  120. TypeUtils::DataTypeToSerialString(desc_temp.GetDataType()).c_str(), node_op_desc->GetName().c_str());
  121. index++;
  122. }
  123. return SUCCESS;
  124. }
  125. bool ModelBuilder::SetInputConst(const OpDescPtr &op_desc, const NodePtr &src_node, size_t index,
  126. vector<bool> &is_input_const) {
  127. GELOGI("SetIsInputConst const: %s, source node: %s", op_desc->GetName().c_str(), src_node->GetName().c_str());
  128. for (size_t i = is_input_const.size(); i <= index; ++i) {
  129. is_input_const.push_back(false);
  130. }
  131. is_input_const[index] = true;
  132. vector<GeTensorPtr> weights = OpDescUtils::MutableWeights(src_node);
  133. if (weights.empty()) {
  134. GELOGW("SetInputIsConst weights is empty, node: %s", src_node->GetName().c_str());
  135. return false;
  136. }
  137. GeTensorPtr weight = weights[0];
  138. GE_IF_BOOL_EXEC(weight == nullptr, return true);
  139. GeTensorDesc &tensor_desc = weight->MutableTensorDesc();
  140. int64_t data_offset = 0;
  141. if (TensorUtils::GetDataOffset(tensor_desc, data_offset) != GRAPH_SUCCESS) {
  142. GELOGW("Get Offset from weight failed");
  143. return false;
  144. }
  145. auto input_tensor = op_desc->MutableInputDesc(static_cast<uint32_t>(index));
  146. if (input_tensor == nullptr) {
  147. GELOGW("Get input_tensor failed");
  148. return false;
  149. }
  150. TensorUtils::SetDataOffset(*input_tensor, data_offset);
  151. return true;
  152. }
  153. void ModelBuilder::SetInputIsConst(const ge::NodePtr &n) {
  154. auto node_op_desc = n->GetOpDesc();
  155. GE_CHECK_NOTNULL_JUST_RETURN(node_op_desc);
  156. auto is_input_const = node_op_desc->GetIsInputConst();
  157. // must set all true input_const to false
  158. for (size_t i = 0; i < is_input_const.size(); i++) {
  159. is_input_const[i] = false;
  160. }
  161. std::string const_type;
  162. auto in_data_anchors = n->GetAllInDataAnchors();
  163. for (size_t index = 0; index < in_data_anchors.size(); index++) {
  164. auto in_data_anchor = in_data_anchors.at(index);
  165. const auto &peer_out_anchor = in_data_anchor->GetPeerOutAnchor();
  166. GE_IF_BOOL_EXEC(peer_out_anchor == nullptr, continue);
  167. const auto &src_node = peer_out_anchor->GetOwnerNode();
  168. if (!NodeUtils::GetConstOpType(src_node, const_type)) {
  169. continue;
  170. }
  171. if (const_type == CONSTANT) {
  172. if (!SetInputConst(node_op_desc, src_node, index, is_input_const)) {
  173. return;
  174. }
  175. } else {
  176. if ((index < is_input_const.size()) && is_input_const[index]) {
  177. is_input_const[index] = false;
  178. }
  179. }
  180. }
  181. std::string input_const_info = ToString(is_input_const);
  182. GELOGD("update opdesc:%s InputConst:%s", node_op_desc->GetName().c_str(), input_const_info.c_str());
  183. node_op_desc->SetIsInputConst(is_input_const);
  184. }
  185. Status ModelBuilder::AdjustConstWeightSize(const ge::NodePtr &node, size_t &mem_offset) {
  186. GE_CHECK_NOTNULL(node);
  187. if (node->GetType() == CONSTANT) {
  188. vector<GeTensorPtr> weights = OpDescUtils::MutableWeights(node);
  189. if (weights.empty()) {
  190. GELOGE(FAILED, "weights size of node %s is empty", node->GetName().c_str());
  191. return FAILED;
  192. }
  193. GeTensorPtr weight = weights[0];
  194. if (weight == nullptr) {
  195. GELOGE(FAILED, "weights[0] is null.");
  196. return FAILED;
  197. }
  198. GeTensorDesc &tensor_desc = weight->MutableTensorDesc();
  199. size_t output_size = weight->GetData().size();
  200. TensorUtils::SetDataOffset(tensor_desc, mem_offset);
  201. GELOGD("Node: %s, weight size: %zu.", node->GetName().c_str(), output_size);
  202. mem_offset += output_size;
  203. }
  204. return SUCCESS;
  205. }
  206. Status ModelBuilder::SetInputOutputDesc() {
  207. Status ret;
  208. for (const ge::NodePtr &n : compute_graph_->GetNodes(compute_graph_->GetGraphUnknownFlag())) {
  209. auto node_op_desc = n->GetOpDesc();
  210. GE_IF_BOOL_EXEC(node_op_desc == nullptr, continue);
  211. if (!is_loop_graph_ && node_op_desc->GetType() == LOOPCOND) {
  212. is_loop_graph_ = true;
  213. }
  214. // if user set input node format ND, the expected node for data and netoutput format is ND in
  215. // final graph.
  216. if ((GetLocalOmgContext().format == domi::DOMI_TENSOR_ND) && (!node_op_desc->HasAttr("_is_single_op")) &&
  217. ((node_op_desc->GetType() == DATA_TYPE) || (node_op_desc->GetType() == NETOUTPUT))) {
  218. auto inputDescsPtr = node_op_desc->GetAllInputsDescPtr();
  219. auto outputDescsPtr = node_op_desc->GetAllOutputsDescPtr();
  220. ge::Format format = ge::FORMAT_ND;
  221. for (auto &inputDescPtr : inputDescsPtr) {
  222. GE_CHECK_NOTNULL(inputDescPtr);
  223. inputDescPtr->SetFormat(format);
  224. inputDescPtr->SetOriginFormat(format);
  225. }
  226. for (auto &outputDescPtr : outputDescsPtr) {
  227. GE_CHECK_NOTNULL(outputDescPtr);
  228. outputDescPtr->SetFormat(format);
  229. outputDescPtr->SetOriginFormat(format);
  230. }
  231. }
  232. if (node_op_desc->GetType() == DATA_TYPE || node_op_desc->GetType() == AIPP_DATA_TYPE) {
  233. GELOGD("Data node: %s.", n->GetName().c_str());
  234. continue;
  235. }
  236. GE_IF_BOOL_EXEC(n->GetInAllNodes().empty() && n->GetOutAllNodes().empty(), continue;);
  237. SetInputIsConst(n);
  238. if (IsGeLocalOp(n->GetOpDesc())) {
  239. GE_CHK_STATUS_RET(CalcOutputSize(n), "Calculate output size failed");
  240. }
  241. ret = AdjustConstWeightSize(n, weight_offset_);
  242. GE_CHK_STATUS_RET(ret, "AdjustConstWeightSize failed");
  243. GE_IF_BOOL_EXEC(((weight_offset_ > 0) && (weight_offset_ % MEM_ALIGN_SIZE != 0)),
  244. weight_offset_ = (weight_offset_ + MEM_ALIGN_SIZE - 1) / MEM_ALIGN_SIZE * MEM_ALIGN_SIZE);
  245. }
  246. GE_CHK_STATUS_RET(compute_graph_->TopologicalSorting(), "TopologicalSorting failed");
  247. return SUCCESS;
  248. }
  249. void ModelBuilder::AddNodeInputProperty() {
  250. for (const ge::NodePtr &node : compute_graph_->GetNodes(compute_graph_->GetGraphUnknownFlag())) {
  251. auto node_op_desc = node->GetOpDesc();
  252. GE_IF_BOOL_EXEC(node_op_desc == nullptr, GELOGW("node_op_desc is nullptr!"); return);
  253. vector<string> src_name_list;
  254. vector<int64_t> src_index_list;
  255. for (const auto &in_data_anchor : node->GetAllInDataAnchors()) {
  256. auto peer_out_anchor = in_data_anchor->GetPeerOutAnchor();
  257. GE_IF_BOOL_EXEC(peer_out_anchor == nullptr, continue);
  258. GE_IF_BOOL_EXEC(node_op_desc->HasAttr(MERGE_PRENODE_FLAG), continue);
  259. ge::NodePtr src_node = peer_out_anchor->GetOwnerNode();
  260. src_name_list.emplace_back(src_node->GetName());
  261. src_index_list.emplace_back(peer_out_anchor->GetIdx());
  262. }
  263. auto in_control_anchor = node->GetInControlAnchor();
  264. if (in_control_anchor != nullptr) {
  265. string src_name_temp;
  266. for (const auto &out_control_anchor : in_control_anchor->GetPeerOutControlAnchors()) {
  267. ge::NodePtr src_node = out_control_anchor->GetOwnerNode();
  268. src_name_temp = src_name_temp.empty() ? src_node->GetName() : src_name_temp + ":" + src_node->GetName();
  269. }
  270. GE_IF_BOOL_EXEC(!src_name_temp.empty(), src_name_list.emplace_back(src_name_temp);)
  271. }
  272. node_op_desc->SetSrcName(src_name_list);
  273. node_op_desc->SetSrcIndex(src_index_list);
  274. }
  275. for (const ge::NodePtr &node : compute_graph_->GetNodes(compute_graph_->GetGraphUnknownFlag())) {
  276. auto node_op_desc = node->GetOpDesc();
  277. GE_IF_BOOL_EXEC(node_op_desc == nullptr, GELOGW("node_op_desc is nullptr!"); return);
  278. GE_IF_BOOL_EXEC(node_op_desc->GetType() == NETOUTPUT, continue);
  279. auto out_control_anchor = node->GetOutControlAnchor();
  280. GE_IF_BOOL_EXEC(out_control_anchor == nullptr, GELOGW("out_control_anchor is nullptr"); return);
  281. vector<string> dst_name_list;
  282. vector<int64_t> dst_index_list;
  283. string dst_name_temp;
  284. for (const auto &in_control_anchor : out_control_anchor->GetPeerInControlAnchors()) {
  285. ge::NodePtr dst_node = in_control_anchor->GetOwnerNode();
  286. dst_name_temp = dst_name_temp.empty() ? dst_node->GetName() : dst_name_temp + ":" + dst_node->GetName();
  287. }
  288. GE_IF_BOOL_EXEC(!dst_name_temp.empty(), dst_name_list.emplace_back(dst_name_temp));
  289. GE_IF_BOOL_EXEC(!out_control_anchor->GetPeerInControlAnchors().empty(),
  290. dst_index_list.emplace_back(kInvalidIndexNum));
  291. for (const auto &out_data_anchor : node->GetAllOutDataAnchors()) {
  292. GE_IF_BOOL_EXEC(node_op_desc->HasAttr(MERGE_PRENODE_FLAG), break);
  293. dst_name_temp = "";
  294. int64_t dst_index = kWrongIndex; // assign an impossible value to dst_index.
  295. for (const auto &in_data_anchor : out_data_anchor->GetPeerInDataAnchors()) {
  296. GE_IF_BOOL_EXEC(in_data_anchor == nullptr, GELOGW("in_data_anchor is nullptr"); return);
  297. ge::NodePtr dst_node = in_data_anchor->GetOwnerNode();
  298. dst_name_temp = dst_name_temp.empty() ? dst_node->GetName() : dst_name_temp + ":" + dst_node->GetName();
  299. dst_index = in_data_anchor->GetIdx();
  300. }
  301. GE_IF_BOOL_EXEC(dst_index != kWrongIndex, dst_index_list.emplace_back(dst_index)); // not found
  302. GE_IF_BOOL_EXEC(!dst_name_temp.empty(), dst_name_list.emplace_back(dst_name_temp));
  303. }
  304. node_op_desc->SetDstName(dst_name_list);
  305. node_op_desc->SetDstIndex(dst_index_list);
  306. }
  307. }
  308. Status ModelBuilder::AdjustInputTensorFlag() {
  309. for (const ge::NodePtr &n : compute_graph_->GetNodes(compute_graph_->GetGraphUnknownFlag())) {
  310. if ((n->GetType() == DATA_TYPE) || (n->GetType() == AIPP_DATA_TYPE)) {
  311. GELOGD("Data node: %s.", n->GetName().c_str());
  312. for (const auto &anchor : n->GetAllOutDataAnchors()) {
  313. for (const auto &in_anchors : anchor->GetPeerInDataAnchors()) {
  314. GE_IF_BOOL_EXEC(in_anchors == nullptr, continue);
  315. auto owner_node = in_anchors->GetOwnerNode();
  316. auto owner_node_op_desc = owner_node->GetOpDesc();
  317. GE_IF_BOOL_EXEC(owner_node_op_desc == nullptr, continue);
  318. auto input_desc = owner_node_op_desc->GetInputDesc(in_anchors->GetIdx());
  319. ge::TensorUtils::SetInputTensor(input_desc, true);
  320. if (owner_node_op_desc->UpdateInputDesc(in_anchors->GetIdx(), input_desc) != SUCCESS) {
  321. GELOGE(FAILED, "UpdateOutputDesc failed.");
  322. return FAILED;
  323. }
  324. }
  325. }
  326. }
  327. }
  328. return SUCCESS;
  329. }
  330. void ModelBuilder::InitL1FusionOption() {
  331. string buffer_optimize = "off_optimize";
  332. graphStatus ret = ge::GetContext().GetOption(BUFFER_OPTIMIZE, buffer_optimize);
  333. if (ret == GRAPH_SUCCESS) {
  334. is_l1_fusion_enable_ = (buffer_optimize == "l1_optimize");
  335. GELOGD("The value of %s is %s.", BUFFER_OPTIMIZE.c_str(), buffer_optimize.c_str());
  336. } else {
  337. GELOGW("The value of %s is empty.", kEnableL1Fusion.c_str());
  338. }
  339. }
  340. Status ModelBuilder::BuildModelDef(ge::Model &model) {
  341. ClearOriginalFormat();
  342. max_mem_offset_ = mem_type_to_mem_offset_[RT_MEMORY_HBM];
  343. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_MEMORY_SIZE, max_mem_offset_),
  344. GELOGE(FAILED, "SetInt of ATTR_MODEL_MEMORY_SIZE failed.");
  345. return FAILED);
  346. if (mem_type_to_mem_offset_.find(RT_MEMORY_P2P_DDR) != mem_type_to_mem_offset_.end()) {
  347. p2p_mem_offset_ = mem_type_to_mem_offset_[RT_MEMORY_P2P_DDR];
  348. }
  349. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_P2P_MEMORY_SIZE, p2p_mem_offset_),
  350. GELOGE(FAILED, "SetInt of ATTR_MODEL_P2P_MEMORY_SIZE failed.");
  351. return FAILED);
  352. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_WEIGHT_SIZE, weight_offset_),
  353. GELOGE(FAILED, "SetInt of ATTR_MODEL_WEIGHT_SIZE failed.");
  354. return FAILED);
  355. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_STREAM_NUM, stream_num_),
  356. GELOGE(FAILED, "SetInt of ATTR_MODEL_STREAM_NUM failed.");
  357. return FAILED);
  358. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_EVENT_NUM, event_num_),
  359. GELOGE(FAILED, "SetInt of ATTR_MODEL_EVENT_NUM failed.");
  360. return FAILED);
  361. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListInt(&model, ATTR_MODEL_HUGE_STREAM_LIST, huge_streams_),
  362. GELOGE(FAILED, "SetInt of ATTR_MODEL_HUGE_STREAM_LIST failed.");
  363. return FAILED);
  364. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_LABEL_NUM, label_num_),
  365. GELOGE(FAILED, "SetInt of ATTR_MODEL_LABEL_NUM failed.");
  366. return FAILED);
  367. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetInt(&model, ATTR_MODEL_ZERO_COPY_MEMORY_SIZE, zero_copy_mem_size_),
  368. GELOGE(FAILED, "SetInt of ATTR_MODEL_ZERO_COPY_MEMORY_SIZE failed.");
  369. return FAILED);
  370. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListStr(&model, ATTR_MODEL_OUT_NODES_NAME, GetLocalOmgContext().net_out_nodes),
  371. GELOGE(FAILED, "SetListStr of ATTR_MODEL_OUT_NODES_NAME failed.");
  372. return FAILED);
  373. GELOGI("For model, max_mem_offset_: %zu, p2p_mem_size: %zu, zero_copy_mem_size_: %zu", max_mem_offset_,
  374. p2p_mem_offset_, zero_copy_mem_size_);
  375. string fp_ceiling_mode;
  376. if (ge::GetContext().GetOption("ge.fpCeilingMode", fp_ceiling_mode) == SUCCESS) {
  377. if (!ge::AttrUtils::SetStr(&model, ATTR_FP_CEILING_MODE, fp_ceiling_mode)) {
  378. GELOGE(FAILED, "Failed to set attr ATTR_FP_CEILING_MODE");
  379. return FAILED;
  380. }
  381. GELOGI("Set attr ATTR_FP_CEILING_MODE to model, value is %s.", fp_ceiling_mode.c_str());
  382. }
  383. string ge_core_type;
  384. Status ret = ge::GetContext().GetOption(kCoreType, ge_core_type);
  385. if (ret != SUCCESS) {
  386. GELOGW("get the option CORE_TYPE fail, set it to default value VECTOR_ENGINE");
  387. }
  388. int64_t core_type = (ge_core_type == kVectorCore) ? 1 : 0;
  389. GELOGI("core_type: %ld", core_type);
  390. if (!ge::AttrUtils::SetInt(&model, ATTR_MODEL_CORE_TYPE, core_type)) {
  391. GELOGE(FAILED, "SetInt of ATTR_CORE_TYPE failed.");
  392. }
  393. InitL1FusionOption();
  394. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetBool(&model, ATTR_NAME_SWITCH_FOR_L1_FUSION, is_l1_fusion_enable_),
  395. GELOGE(FAILED, "SetBool of ATTR_NAME_SWITCH_FOR_L1_FUSION failed.");
  396. return FAILED);
  397. const DumpProperties &dump_properties = DumpManager::GetInstance().GetDumpProperties(session_id_);
  398. bool is_op_debug = dump_properties.IsOpDebugOpen();
  399. if (is_op_debug) {
  400. if (!ge::AttrUtils::SetBool(&model, ATTR_OP_DEBUG_FLAG, is_op_debug)) {
  401. GELOGE(FAILED, "SetBool of ATTR_OP_DEBUG_FLAG failed.");
  402. return FAILED;
  403. }
  404. uint32_t op_debug_mode = dump_properties.GetOpDebugMode();
  405. GELOGI("Get op debug mode:%d", op_debug_mode);
  406. if (!ge::AttrUtils::SetInt(&model, ATTR_OP_DEBUG_MODE, op_debug_mode)) {
  407. GELOGE(FAILED, "SetBool of ATTR_OP_DEBUG_MODE failed.");
  408. return FAILED;
  409. }
  410. }
  411. model.SetName(compute_graph_->GetName());
  412. model.SetGraph(ge::GraphUtils::CreateGraphFromComputeGraph(compute_graph_));
  413. GELOGI("weight_offset_: %zu", weight_offset_);
  414. GELOGI("Set event num: %ld.", event_num_);
  415. if (Params::Instance() == nullptr) {
  416. return FAILED;
  417. }
  418. platform_type_ = Params::Instance()->GetTarget_8bit();
  419. return SUCCESS;
  420. }
  421. void ModelBuilder::ClearOriginalFormat() {
  422. for (const ge::NodePtr &n : compute_graph_->GetNodes(compute_graph_->GetGraphUnknownFlag())) {
  423. auto node_op_desc = n->GetOpDesc();
  424. if (node_op_desc != nullptr) {
  425. if (node_op_desc->HasAttr(ATTR_NAME_FORMAT)) {
  426. if (node_op_desc->DelAttr(ATTR_NAME_FORMAT) != SUCCESS) {
  427. GELOGW("DelAttr ATTR_NAME_FORMAT failed.");
  428. }
  429. }
  430. GE_IF_BOOL_EXEC(
  431. node_op_desc->HasAttr(ATTR_NAME_INFERRED_FORMAT),
  432. if (node_op_desc->DelAttr(ATTR_NAME_INFERRED_FORMAT) != SUCCESS) {
  433. GELOGW("DelAttr ATTR_NAME_INFERRED_FORMAT failed.");
  434. });
  435. GE_IF_BOOL_EXEC(
  436. node_op_desc->HasAttr(ATTR_NAME_PRED_PERMUTE_DELETED),
  437. if (node_op_desc->DelAttr(ATTR_NAME_PRED_PERMUTE_DELETED) != SUCCESS) {
  438. GELOGW("DelAttr ATTR_NAME_PRED_PERMUTE_DELETED failed.");
  439. });
  440. GE_IF_BOOL_EXEC(
  441. node_op_desc->HasAttr(ATTR_NAME_IGNORE_PRED_FORMAT),
  442. if (node_op_desc->DelAttr(ATTR_NAME_IGNORE_PRED_FORMAT) != SUCCESS) {
  443. GELOGW("DelAttr ATTR_NAME_IGNORE_PRED_FORMAT failed.");
  444. });
  445. }
  446. }
  447. }
  448. Status ModelBuilder::MergeWeights() {
  449. if (weight_offset_ == 0) {
  450. return SUCCESS;
  451. }
  452. ge::Buffer buffer(weight_offset_);
  453. weight_buffer_ = buffer;
  454. auto base_addr = weight_buffer_.GetData();
  455. for (const ge::NodePtr &node : compute_graph_->GetNodes(compute_graph_->GetGraphUnknownFlag())) {
  456. auto op_desc = node->GetOpDesc();
  457. GE_IF_BOOL_EXEC(op_desc == nullptr, continue);
  458. if (node->GetType() != CONSTANT) {
  459. continue;
  460. }
  461. // Get const op weight pointer
  462. ge::GeTensorPtr weight = nullptr;
  463. // If MutableTensor failed, weight is nullptr.
  464. (void)ge::AttrUtils::MutableTensor(op_desc, ATTR_NAME_WEIGHTS, weight);
  465. if (weight == nullptr) {
  466. GELOGE(FAILED, "Can't get const op weight, name: %s", node->GetName().c_str());
  467. return FAILED;
  468. }
  469. // Get const op weight offset
  470. int64_t offset = 0;
  471. if (ge::TensorUtils::GetDataOffset(weight->GetTensorDesc(), offset) != SUCCESS) {
  472. GELOGW("Can't get const op offset, name: %s", node->GetName().c_str());
  473. continue; // continue to merge if can not get offset
  474. }
  475. // Get const op weight data
  476. auto weight_data = weight->MutableData();
  477. // copy const op weight data to buffer
  478. GELOGI("Move to buffer, name: %s offset: %ld size: %zu", node->GetName().c_str(), offset, weight_data.size());
  479. ge::TensorUtils::SetWeightSize(weight->MutableTensorDesc(), static_cast<uint32_t>(weight_data.size()));
  480. if ((offset == 0) || (weight_data.size() == 0)) {
  481. GELOGI("Size or offset is 0. size: %lu offset: %ld", weight_data.size(), offset);
  482. continue;
  483. }
  484. if (weight_data.data() != nullptr) {
  485. GE_IF_BOOL_EXEC(base_addr == nullptr, GELOGE(FAILED, "Base addr is nullptr."); return FAILED);
  486. if (weight_offset_ - offset < weight_data.size()) {
  487. GELOGE(FAILED, "left weight size not enough. left_size:%lu, weight_size:%lu",
  488. weight_offset_ - offset, weight_data.size());
  489. return FAILED;
  490. }
  491. uintptr_t dst_ptr = reinterpret_cast<uintptr_t>(base_addr) + offset;
  492. uintptr_t src_ptr = reinterpret_cast<uintptr_t>(weight_data.data());
  493. size_t left_size = weight_data.size();
  494. while (left_size > SECUREC_MEM_MAX_LEN) {
  495. auto err = memcpy_s(reinterpret_cast<void *>(dst_ptr), SECUREC_MEM_MAX_LEN, reinterpret_cast<void *>(src_ptr),
  496. SECUREC_MEM_MAX_LEN);
  497. if (err != EOK) {
  498. GELOGE(FAILED, "mem copy failed. errret:%u, "
  499. "dst_ptr:%lx, dst_size:%lu, src_ptr:%lx, src_size:%lu",
  500. err, dst_ptr, SECUREC_MEM_MAX_LEN, src_ptr, SECUREC_MEM_MAX_LEN);
  501. return FAILED;
  502. }
  503. left_size -= SECUREC_MEM_MAX_LEN;
  504. dst_ptr = dst_ptr + SECUREC_MEM_MAX_LEN;
  505. src_ptr = src_ptr + SECUREC_MEM_MAX_LEN;
  506. }
  507. auto err = memcpy_s(reinterpret_cast<void *>(dst_ptr), left_size, reinterpret_cast<void *>(src_ptr), left_size);
  508. if (err != EOK) {
  509. GELOGE(FAILED, "mem copy failed. errret:%u, "
  510. "dst_ptr:%lx, dst_size:%lu, src_ptr:%lx, src_size:%lu",
  511. err, dst_ptr, SECUREC_MEM_MAX_LEN, src_ptr, SECUREC_MEM_MAX_LEN);
  512. return FAILED;
  513. }
  514. }
  515. weight->ClearData();
  516. }
  517. return SUCCESS;
  518. }
  519. Status ModelBuilder::SaveDataToModel(ge::Model &model, ge::GeModel &ge_model) {
  520. // Add weight
  521. ge_model.SetWeight(weight_buffer_);
  522. // Add TBE Kernels and custom aicpu op bin
  523. std::set<std::string> tbe_name_set;
  524. std::set<std::string> aicpu_name_set;
  525. std::set<std::string> aicpu_op_types;
  526. std::set<std::string> aicpu_tf_op_types;
  527. for (const ge::NodePtr &n : compute_graph_->GetNodes(compute_graph_->GetGraphUnknownFlag())) {
  528. auto node_op_desc = n->GetOpDesc();
  529. GE_IF_BOOL_EXEC(node_op_desc == nullptr, continue);
  530. // check aicpu op type
  531. CollectCheckAicpuAttr(node_op_desc, aicpu_op_types, aicpu_tf_op_types);
  532. TBEKernelPtr tbe_kernel = node_op_desc->TryGetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, TBEKernelPtr());
  533. if (tbe_kernel == nullptr) {
  534. std::string kernel_name;
  535. GeAttrValue::BYTES kernel_buffer;
  536. (void) AttrUtils::GetStr(node_op_desc, ATTR_NAME_TBE_KERNEL_NAME, kernel_name);
  537. (void) AttrUtils::GetBytes(node_op_desc, ATTR_NAME_TBE_KERNEL_BUFFER, kernel_buffer);
  538. if (!kernel_name.empty() && (kernel_buffer.GetSize() > 0)) {
  539. GE_CHECK_NOTNULL(kernel_buffer.GetData());
  540. std::vector<char> data(kernel_buffer.GetData(), kernel_buffer.GetData() + kernel_buffer.GetSize());
  541. tbe_kernel = std::make_shared<OpKernelBin>(kernel_name, std::move(data));
  542. }
  543. }
  544. GE_IF_BOOL_EXEC(tbe_kernel == nullptr, continue);
  545. if (tbe_name_set.count(tbe_kernel->GetName()) > 0) {
  546. GELOGE(FAILED, "tbe_kernel name %s can't be the same", tbe_kernel->GetName().c_str());
  547. return FAILED;
  548. }
  549. tbe_name_set.insert(tbe_kernel->GetName());
  550. tbe_kernel_store_.AddTBEKernel(tbe_kernel);
  551. }
  552. SetModelCheckAicpuAttr(model, aicpu_op_types, aicpu_tf_op_types);
  553. for (const ge::NodePtr &n : compute_graph_->GetNodes(compute_graph_->GetGraphUnknownFlag())) {
  554. auto node_op_desc = n->GetOpDesc();
  555. GE_IF_BOOL_EXEC(node_op_desc == nullptr, continue);
  556. CustAICPUKernelPtr cust_aicpu_kernel =
  557. node_op_desc->TryGetExtAttr(ge::OP_EXTATTR_CUSTAICPU_KERNEL, CustAICPUKernelPtr());
  558. GE_IF_BOOL_EXEC(cust_aicpu_kernel == nullptr, continue);
  559. if (aicpu_name_set.count(cust_aicpu_kernel->GetName()) > 0) {
  560. GELOGE(FAILED, "aicpu_kernel name %s can't be the same", cust_aicpu_kernel->GetName().c_str());
  561. return FAILED;
  562. }
  563. aicpu_name_set.insert(cust_aicpu_kernel->GetName());
  564. cust_aicpu_kernel_store_.AddCustAICPUKernel(cust_aicpu_kernel);
  565. GELOGI("Add cust aicpu kernel bin %s", cust_aicpu_kernel->GetName().c_str());
  566. }
  567. if (!tbe_kernel_store_.Build()) {
  568. GELOGE(FAILED, "TBE Kernels store build failed!");
  569. return FAILED;
  570. }
  571. if (!cust_aicpu_kernel_store_.Build()) {
  572. GELOGE(FAILED, "custom AICPU kernels store build failed!");
  573. return FAILED;
  574. }
  575. ge_model.SetTBEKernelStore(tbe_kernel_store_);
  576. ge_model.SetCustAICPUKernelStore(cust_aicpu_kernel_store_);
  577. // Add task
  578. GeAttrValue::BYTES task_def_bytes;
  579. if (!AttrUtils::GetZeroCopyBytes(model, MODEL_ATTR_TASKS, task_def_bytes)) {
  580. GELOGE(INTERNAL_ERROR, "Get zero copy bytes fail.");
  581. return INTERNAL_ERROR;
  582. }
  583. int byte_size = static_cast<int>(task_def_bytes.GetSize());
  584. std::shared_ptr<domi::ModelTaskDef> task = ge::MakeShared<domi::ModelTaskDef>();
  585. GE_CHECK_NOTNULL(task);
  586. GE_CHK_BOOL_EXEC(ReadProtoFromArray(task_def_bytes.GetData(), byte_size, task.get()), return INTERNAL_ERROR,
  587. "ReadProtoFromArray failed.");
  588. ge_model.SetModelTaskDef(task);
  589. // Add graph
  590. ge_model.SetName(model.GetName());
  591. ge_model.SetGraph(model.GetGraph());
  592. ge_model.SetVersion(model.GetVersion());
  593. ge_model.SetPlatformVersion(model.GetPlatformVersion());
  594. ge_model.SetPlatformType(platform_type_);
  595. ge_model.SetAttr(model.MutableAttrMap());
  596. return SUCCESS;
  597. }
  598. void ModelBuilder::SetModelVersion(ge::Model &model) {
  599. // set framework_version TO model
  600. string framework_version;
  601. uint32_t counter = 0;
  602. Status frame_rt = PlatformVersionManager::GetPlatformVersion(framework_version);
  603. GE_IF_BOOL_EXEC((frame_rt == SUCCESS),
  604. string model_framework_version = framework_version + "." + std::to_string(counter);
  605. model.SetPlatformVersion(model_framework_version););
  606. // set IR Version TO model
  607. model.SetVersion(static_cast<uint32_t>(OM_PROTO_VERSION));
  608. }
  609. Status ModelBuilder::PreBuildModel() {
  610. if ((compute_graph_ == nullptr) || !(compute_graph_->IsValid())) {
  611. GELOGE(FAILED, "Graph_ is not valid.");
  612. return FAILED;
  613. }
  614. GE_CHK_STATUS_RET(SetInputOutputDesc(), "SetInputOutputDesc Failed!");
  615. AddNodeInputProperty();
  616. return SUCCESS;
  617. }
  618. Status ModelBuilder::BuildModelForGetTask(ge::Model &model) {
  619. GE_CHK_STATUS_RET(AdjustInputTensorFlag(), "AdjustInputTensorFlag failed!");
  620. // Assign logical streams.
  621. StreamAllocator stream_allocator(compute_graph_, subgraphs_);
  622. GE_TIMESTAMP_START(AssignLogicalStreams);
  623. GE_CHK_STATUS_RET(stream_allocator.AssignLogicalStreams(stream_max_parallel_num_, hcom_parallel_),
  624. "Assign logical streams failed.");
  625. GE_TIMESTAMP_END(AssignLogicalStreams, "GraphBuilder::AssignLogicalStreams");
  626. // Assign functional op labels.
  627. auto root_graph = GraphUtils::FindRootGraph(compute_graph_);
  628. (void)AttrUtils::GetInt(*root_graph, ATTR_MODEL_LABEL_NUM, label_num_);
  629. GE_TIMESTAMP_START(AssignMemory);
  630. MemoryAssigner mem_assigner(compute_graph_);
  631. GE_CHK_STATUS_RET(mem_assigner.AssignMemory(is_loop_graph_, mem_type_to_mem_offset_, zero_copy_mem_size_),
  632. "Assign Memory Failed!");
  633. GE_TIMESTAMP_END(AssignMemory, "GraphBuilder::AssignMemory");
  634. GE_TIMESTAMP_START(SetInputOutputOffset);
  635. SetInputOutputOffsetPass input_output_offset;
  636. GE_CHK_STATUS_RET(input_output_offset.Run(compute_graph_), "Set input output offset failed.");
  637. GE_TIMESTAMP_END(SetInputOutputOffset, "SetInputOutputOffsetPass::Run.");
  638. // Compile single op in graph build stage
  639. GE_TIMESTAMP_START(CompileSingleOp);
  640. GE_CHK_STATUS_RET(CompileSingleOp(), "ATC builder CompileSingleOp() return fail.");
  641. GE_TIMESTAMP_EVENT_END(CompileSingleOp, "GraphBuilder::CompileSingleOp");
  642. // Refresh real streams and insert event nodes.
  643. GE_TIMESTAMP_START(RefreshRealStream);
  644. GE_CHK_STATUS_RET(stream_allocator.RefreshRealStream(stream_num_, event_num_), "RefreshRealStream failed.");
  645. huge_streams_ = stream_allocator.GetHugeStreams();
  646. GE_TIMESTAMP_END(RefreshRealStream, "GraphBuilder::RefreshRealStream");
  647. GE_TIMESTAMP_START(MergeWeights);
  648. GE_CHK_STATUS_RET(MergeWeights(), "MergeWeights Failed!");
  649. GE_TIMESTAMP_END(MergeWeights, "GraphBuilder::MergeWeights");
  650. GE_TIMESTAMP_START(BuildModelDef);
  651. GE_CHK_STATUS_RET(BuildModelDef(model), "BuildModelDef failed!");
  652. GE_TIMESTAMP_END(BuildModelDef, "GraphBuilder::BuildModelDef");
  653. SetModelVersion(model);
  654. return SUCCESS;
  655. }
  656. Status ModelBuilder::BuildModelForGetDynShapeTask(ge::Model &model_def) {
  657. GE_TIMESTAMP_START(BuildModelDef);
  658. GE_CHK_STATUS_RET(BuildModelDef(model_def), "BuildModelDef failed!");
  659. GE_TIMESTAMP_END(BuildModelDef, "GraphBuilder::BuildModelDef");
  660. SetModelVersion(model_def);
  661. return SUCCESS;
  662. }
  663. ge::Buffer ModelBuilder::GetWeightBuffer() const { return weight_buffer_; }
  664. Status ModelBuilder::CompileSingleOp() {
  665. GELOGD("Begin to compile single op.");
  666. // Create ge instance
  667. std::shared_ptr<GELib> instance = ge::GELib::GetInstance();
  668. if ((instance == nullptr) || !instance->InitFlag()) {
  669. GELOGE(ge::GE_CLI_GE_NOT_INITIALIZED, "CompileSingleOp failed.");
  670. return ge::GE_CLI_GE_NOT_INITIALIZED;
  671. }
  672. GE_TIMESTAMP_CALLNUM_START(BatchCompileOp);
  673. std::unordered_map<string, vector<ge::NodePtr>> node_vector_map;
  674. for (auto &node : compute_graph_->GetNodes(compute_graph_->GetGraphUnknownFlag())) {
  675. auto op_desc = node->GetOpDesc();
  676. if (op_desc == nullptr) {
  677. continue;
  678. }
  679. // Graph build stage only supports the individual compilation of atomic clean operator
  680. if (op_desc->GetType() == ATOMICADDRCLEAN) {
  681. GELOGD("Begin to compile single op, op name is %s.", op_desc->GetName().c_str());
  682. string kernel_lib_name = op_desc->GetOpKernelLibName();
  683. if (kernel_lib_name.empty()) {
  684. // Reset op kernel lib
  685. (void)instance->DNNEngineManagerObj().GetDNNEngineName(node);
  686. kernel_lib_name = op_desc->GetOpKernelLibName();
  687. if (kernel_lib_name.empty()) {
  688. GELOGE(ge::INTERNAL_ERROR, "Get node:%s(%s) kernel lib failed.", node->GetName().c_str(),
  689. node->GetType().c_str());
  690. return ge::INTERNAL_ERROR;
  691. }
  692. }
  693. OpsKernelInfoStorePtr kernel_info = instance->OpsKernelManagerObj().GetOpsKernelInfoStore(kernel_lib_name);
  694. if (kernel_info != nullptr) {
  695. node_vector_map[kernel_lib_name].emplace_back(node);
  696. } else {
  697. GELOGE(ge::GE_GRAPH_PARAM_NULLPTR, "Get op %s ops kernel info store failed", node->GetName().c_str());
  698. return ge::GE_GRAPH_PARAM_NULLPTR;
  699. }
  700. }
  701. }
  702. for (auto &it : node_vector_map) {
  703. auto &kernel_lib_name = it.first;
  704. auto &node_vector = it.second;
  705. OpsKernelInfoStorePtr kernel_info = instance->OpsKernelManagerObj().GetOpsKernelInfoStore(kernel_lib_name);
  706. GE_CHECK_NOTNULL(kernel_info);
  707. GE_TIMESTAMP_RESTART(BatchCompileOp);
  708. auto ret = kernel_info->CompileOp(node_vector);
  709. GELOGI("[GEPERFTRACE] The node size of compile op of %s is %zu", kernel_lib_name.c_str(), node_vector.size());
  710. GE_TIMESTAMP_ADD(BatchCompileOp);
  711. if (ret != ge::SUCCESS) {
  712. GELOGE(ret, "Compile op failed, kernel lib name is %s", kernel_lib_name.c_str());
  713. return ret;
  714. }
  715. }
  716. GE_TIMESTAMP_CALLNUM_END(BatchCompileOp, "GraphBuild::CompileOp");
  717. return ge::SUCCESS;
  718. }
  719. void ModelBuilder::CollectCheckAicpuAttr(const OpDescPtr &op_desc, std::set<std::string> &aicpu_op_types,
  720. std::set<std::string> &aicpu_tf_op_types) {
  721. std::string aicpu_optype;
  722. bool has_attr_check_cpu = ge::AttrUtils::GetStr(op_desc, "needCheckCpu", aicpu_optype);
  723. std::vector<std::string> tf_optypes;
  724. bool has_attr_check_tf = ge::AttrUtils::GetListStr(op_desc, "needCheckTf", tf_optypes);
  725. if (has_attr_check_cpu && !aicpu_optype.empty()) {
  726. aicpu_op_types.insert(aicpu_optype);
  727. }
  728. if (has_attr_check_tf && !tf_optypes.empty()) {
  729. aicpu_tf_op_types.insert(tf_optypes.begin(), tf_optypes.end());
  730. }
  731. return;
  732. }
  733. void ModelBuilder::SetModelCheckAicpuAttr(ge::Model &model, std::set<std::string> &aicpu_op_types,
  734. std::set<std::string> &aicpu_tf_op_types) {
  735. std::vector<std::string> aicpu_optype_list;
  736. std::vector<std::string> aicpu_tf_optype_list;
  737. if (ge::AttrUtils::GetListStr(&model, "needCheckCpu", aicpu_optype_list)) {
  738. GELOGI("Already have aicpu optype size: %zu", aicpu_optype_list.size());
  739. aicpu_op_types.insert(aicpu_optype_list.begin(), aicpu_optype_list.end());
  740. }
  741. if (ge::AttrUtils::GetListStr(&model, "needCheckTf", aicpu_tf_optype_list)) {
  742. GELOGI("Already have aicpu tf optype size: %zu", aicpu_tf_optype_list.size());
  743. aicpu_tf_op_types.insert(aicpu_tf_optype_list.begin(), aicpu_tf_optype_list.end());
  744. }
  745. // reset list with set
  746. aicpu_optype_list.assign(aicpu_op_types.begin(), aicpu_op_types.end());
  747. aicpu_tf_optype_list.assign(aicpu_tf_op_types.begin(), aicpu_tf_op_types.end());
  748. GELOGI(
  749. "Check Aicpu op types ComputeGraph: %s aicpu_op_types: %zu, aicpu_optype_list: %zu, aicpu_tf_op_types: %zu, "
  750. "aicpu_tf_optype_list:%zu.",
  751. compute_graph_->GetName().c_str(), aicpu_op_types.size(), aicpu_optype_list.size(), aicpu_tf_op_types.size(),
  752. aicpu_tf_optype_list.size());
  753. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListStr(&model, "needCheckCpu", aicpu_optype_list), return,
  754. "Set attr needCheckCpu fail.");
  755. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListStr(&model, "needCheckTf", aicpu_tf_optype_list), return,
  756. "Set attr needCheckTf fail.");
  757. return;
  758. }
  759. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示