You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

davinci_model_unittest.cc 34 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <gtest/gtest.h>
  17. #define private public
  18. #define protected public
  19. #include "graph/utils/graph_utils.h"
  20. #include "common/profiling/profiling_manager.h"
  21. #include "graph/load/model_manager/davinci_model.h"
  22. using namespace std;
  23. namespace ge {
  24. extern OpDescPtr CreateOpDesc(string name, string type);
  25. class UtestDavinciModel : public testing::Test {
  26. protected:
  27. void SetUp() {}
  28. void TearDown() {}
  29. };
  30. /*
  31. TEST_F(UtestDavinciModel, init_success) {
  32. DavinciModel model(0, nullptr);
  33. ComputeGraphPtr graph = make_shared<ComputeGraph>("default");
  34. ProfilingManager::Instance().is_load_profiling_ = true;
  35. GeModelPtr ge_model = make_shared<GeModel>();
  36. ge_model->SetGraph(GraphUtils::CreateGraphFromComputeGraph(graph));
  37. AttrUtils::SetInt(ge_model, ATTR_MODEL_MEMORY_SIZE, 5120000);
  38. AttrUtils::SetInt(ge_model, ATTR_MODEL_STREAM_NUM, 1);
  39. shared_ptr<domi::ModelTaskDef> model_task_def = make_shared<domi::ModelTaskDef>();
  40. ge_model->SetModelTaskDef(model_task_def);
  41. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  42. TensorUtils::SetSize(tensor, 512);
  43. OpDescPtr op_input = CreateOpDesc("data", DATA);
  44. op_input->AddInputDesc(tensor);
  45. op_input->AddOutputDesc(tensor);
  46. op_input->SetInputOffset({1024});
  47. op_input->SetOutputOffset({1024});
  48. NodePtr node_input = graph->AddNode(op_input); // op_index = 0
  49. OpDescPtr op_kernel = CreateOpDesc("square", "Square");
  50. op_kernel->AddInputDesc(tensor);
  51. op_kernel->AddOutputDesc(tensor);
  52. op_kernel->SetInputOffset({1024});
  53. op_kernel->SetOutputOffset({1024});
  54. NodePtr node_kernel = graph->AddNode(op_kernel); // op_index = 1
  55. OpDescPtr op_memcpy = CreateOpDesc("memcpy", MEMCPYASYNC);
  56. op_memcpy->AddInputDesc(tensor);
  57. op_memcpy->AddOutputDesc(tensor);
  58. op_memcpy->SetInputOffset({1024});
  59. op_memcpy->SetOutputOffset({5120});
  60. NodePtr node_memcpy = graph->AddNode(op_memcpy); // op_index = 2
  61. OpDescPtr op_output = CreateOpDesc("output", NETOUTPUT);
  62. op_output->AddInputDesc(tensor);
  63. op_output->SetInputOffset({5120});
  64. op_output->SetSrcName( { "memcpy" } );
  65. op_output->SetSrcIndex( { 0 } );
  66. NodePtr node_output = graph->AddNode(op_output); // op_index = 3
  67. domi::TaskDef *task_def1 = model_task_def->add_task();
  68. task_def1->set_stream_id(0);
  69. task_def1->set_type(RT_MODEL_TASK_KERNEL);
  70. domi::KernelDef *kernel_def = task_def1->mutable_kernel();
  71. kernel_def->set_stub_func("stub_func");
  72. kernel_def->set_args_size(64);
  73. string args(64, '1');
  74. kernel_def->set_args(args.data(), 64);
  75. domi::KernelContext *context = kernel_def->mutable_context();
  76. context->set_op_index(1);
  77. context->set_kernel_type(2); // ccKernelType::TE
  78. uint16_t args_offset[9] = {0};
  79. context->set_args_offset(args_offset, 9 * sizeof(uint16_t));
  80. domi::TaskDef *task_def2 = model_task_def->add_task();
  81. task_def2->set_stream_id(0);
  82. task_def2->set_type(RT_MODEL_TASK_MEMCPY_ASYNC);
  83. domi::MemcpyAsyncDef *memcpy_async = task_def2->mutable_memcpy_async();
  84. memcpy_async->set_src(1024);
  85. memcpy_async->set_dst(5120);
  86. memcpy_async->set_dst_max(512);
  87. memcpy_async->set_count(1);
  88. memcpy_async->set_kind(RT_MEMCPY_DEVICE_TO_DEVICE);
  89. memcpy_async->set_op_index(2);
  90. EXPECT_EQ(model.Assign(ge_model), SUCCESS);
  91. EXPECT_EQ(model.Init(), SUCCESS);
  92. EXPECT_EQ(model.input_addrs_list_.size(), 1);
  93. EXPECT_EQ(model.output_addrs_list_.size(), 1);
  94. EXPECT_EQ(model.task_list_.size(), 2);
  95. OutputData output_data;
  96. vector<OutputTensorInfo> outputs;
  97. EXPECT_EQ(model.GenOutputTensorInfo(&output_data, outputs), SUCCESS);
  98. EXPECT_EQ(output_data.blobs.size(), 1);
  99. EXPECT_EQ(outputs.size(), 1);
  100. ProfilingManager::Instance().is_load_profiling_ = false;
  101. }
  102. */
  103. TEST_F(UtestDavinciModel, init_data_op) {
  104. DavinciModel model(0, nullptr);
  105. model.ge_model_ = make_shared<GeModel>();
  106. model.runtime_param_.mem_base = (uint8_t *)0x08000000;
  107. model.runtime_param_.mem_size = 5120000;
  108. ComputeGraphPtr graph = make_shared<ComputeGraph>("default");
  109. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  110. TensorUtils::SetSize(tensor, 512);
  111. OpDescPtr op_input = CreateOpDesc("data", DATA);
  112. op_input->AddInputDesc(tensor);
  113. op_input->AddOutputDesc(tensor);
  114. op_input->SetInputOffset({1024});
  115. op_input->SetOutputOffset({1024});
  116. NodePtr node_input = graph->AddNode(op_input);
  117. OpDescPtr op_output = CreateOpDesc("output", NETOUTPUT);
  118. op_output->AddInputDesc(tensor);
  119. op_output->SetInputOffset({1024});
  120. op_output->SetSrcName( { "data" } );
  121. op_output->SetSrcIndex( { 0 } );
  122. NodePtr node_output = graph->AddNode(op_output);
  123. EXPECT_EQ(model.InitNodes(graph), SUCCESS);
  124. EXPECT_EQ(model.input_addrs_list_.size(), 1);
  125. EXPECT_EQ(model.output_addrs_list_.size(), 1);
  126. EXPECT_EQ(model.op_list_.size(), 2);
  127. }
  128. TEST_F(UtestDavinciModel, init_data_op_subgraph) {
  129. DavinciModel model(0, nullptr);
  130. model.runtime_param_.mem_base = (uint8_t *)0x08000000;
  131. model.runtime_param_.mem_size = 5120000;
  132. ComputeGraphPtr graph = make_shared<ComputeGraph>("default");
  133. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  134. TensorUtils::SetSize(tensor, 512);
  135. OpDescPtr op_input = CreateOpDesc("data", DATA);
  136. op_input->AddInputDesc(tensor);
  137. op_input->AddOutputDesc(tensor);
  138. op_input->SetInputOffset({1024});
  139. op_input->SetOutputOffset({1024});
  140. NodePtr node = graph->AddNode(op_input);
  141. uint32_t data_op_index = 0;
  142. map<uint32_t, OpDescPtr> data_by_index;
  143. set<const void *> input_outside_addrs;
  144. EXPECT_EQ(model.InitDataOp(nullptr, node, data_op_index, data_by_index, input_outside_addrs), SUCCESS);
  145. EXPECT_EQ(model.input_addrs_list_.size(), 0);
  146. EXPECT_EQ(model.output_addrs_list_.size(), 0);
  147. EXPECT_EQ(data_op_index, 0);
  148. EXPECT_TRUE(data_by_index.empty());
  149. }
  150. TEST_F(UtestDavinciModel, init_netoutput_op_subgraph) {
  151. DavinciModel model(0, nullptr);
  152. model.runtime_param_.mem_base = (uint8_t *)0x08000000;
  153. model.runtime_param_.mem_size = 5120000;
  154. ComputeGraphPtr graph = make_shared<ComputeGraph>("default");
  155. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  156. TensorUtils::SetSize(tensor, 512);
  157. OpDescPtr op_output = CreateOpDesc("output", NETOUTPUT);
  158. op_output->AddInputDesc(tensor);
  159. op_output->SetInputOffset({1024});
  160. op_output->SetSrcName( { "data" } );
  161. op_output->SetSrcIndex( { 0 } );
  162. NodePtr node = graph->AddNode(op_output);
  163. std::vector<OpDescPtr> output_op_list;
  164. set<const void *> output_outside_addrs;
  165. EXPECT_EQ(model.InitNetOutput(nullptr, node, output_op_list, output_outside_addrs), SUCCESS);
  166. EXPECT_EQ(model.input_addrs_list_.size(), 0);
  167. EXPECT_EQ(model.output_addrs_list_.size(), 0);
  168. EXPECT_TRUE(output_op_list.empty());
  169. }
  170. TEST_F(UtestDavinciModel, init_unknown) {
  171. DavinciModel model(0, nullptr);
  172. model.SetKnownNode(true);
  173. ComputeGraphPtr graph = make_shared<ComputeGraph>("default");
  174. GeModelPtr ge_model = make_shared<GeModel>();
  175. ge_model->SetGraph(GraphUtils::CreateGraphFromComputeGraph(graph));
  176. AttrUtils::SetInt(ge_model, ATTR_MODEL_MEMORY_SIZE, 5120000);
  177. AttrUtils::SetInt(ge_model, ATTR_MODEL_STREAM_NUM, 1);
  178. shared_ptr<domi::ModelTaskDef> model_task_def = make_shared<domi::ModelTaskDef>();
  179. ge_model->SetModelTaskDef(model_task_def);
  180. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  181. TensorUtils::SetSize(tensor, 512);
  182. OpDescPtr op_input = CreateOpDesc("data", DATA);
  183. op_input->AddInputDesc(tensor);
  184. op_input->AddOutputDesc(tensor);
  185. op_input->SetInputOffset({1024});
  186. op_input->SetOutputOffset({1024});
  187. NodePtr node_input = graph->AddNode(op_input); // op_index = 0
  188. OpDescPtr op_kernel = CreateOpDesc("square", "Square");
  189. op_kernel->AddInputDesc(tensor);
  190. op_kernel->AddOutputDesc(tensor);
  191. op_kernel->SetInputOffset({1024});
  192. op_kernel->SetOutputOffset({1024});
  193. NodePtr node_kernel = graph->AddNode(op_kernel); // op_index = 1
  194. OpDescPtr op_memcpy = CreateOpDesc("memcpy", MEMCPYASYNC);
  195. op_memcpy->AddInputDesc(tensor);
  196. op_memcpy->AddOutputDesc(tensor);
  197. op_memcpy->SetInputOffset({1024});
  198. op_memcpy->SetOutputOffset({5120});
  199. NodePtr node_memcpy = graph->AddNode(op_memcpy); // op_index = 2
  200. OpDescPtr op_output = CreateOpDesc("output", NETOUTPUT);
  201. op_output->AddInputDesc(tensor);
  202. op_output->SetInputOffset({5120});
  203. op_output->SetSrcName( { "memcpy" } );
  204. op_output->SetSrcIndex( { 0 } );
  205. NodePtr node_output = graph->AddNode(op_output); // op_index = 3
  206. domi::TaskDef *task_def1 = model_task_def->add_task();
  207. task_def1->set_stream_id(0);
  208. task_def1->set_type(RT_MODEL_TASK_KERNEL);
  209. domi::KernelDef *kernel_def = task_def1->mutable_kernel();
  210. kernel_def->set_stub_func("stub_func");
  211. kernel_def->set_args_size(64);
  212. string args(64, '1');
  213. kernel_def->set_args(args.data(), 64);
  214. domi::KernelContext *context = kernel_def->mutable_context();
  215. context->set_op_index(1);
  216. context->set_kernel_type(2); // ccKernelType::TE
  217. uint16_t args_offset[9] = {0};
  218. context->set_args_offset(args_offset, 9 * sizeof(uint16_t));
  219. domi::TaskDef *task_def2 = model_task_def->add_task();
  220. task_def2->set_stream_id(0);
  221. task_def2->set_type(RT_MODEL_TASK_MEMCPY_ASYNC);
  222. domi::MemcpyAsyncDef *memcpy_async = task_def2->mutable_memcpy_async();
  223. memcpy_async->set_src(1024);
  224. memcpy_async->set_dst(5120);
  225. memcpy_async->set_dst_max(512);
  226. memcpy_async->set_count(1);
  227. memcpy_async->set_kind(RT_MEMCPY_DEVICE_TO_DEVICE);
  228. memcpy_async->set_op_index(2);
  229. EXPECT_EQ(model.Assign(ge_model), SUCCESS);
  230. EXPECT_EQ(model.Init(), SUCCESS);
  231. EXPECT_EQ(model.input_addrs_list_.size(), 1);
  232. EXPECT_EQ(model.output_addrs_list_.size(), 1);
  233. EXPECT_EQ(model.task_list_.size(), 2);
  234. EXPECT_EQ(model.task_list_[0]->UpdateArgs(), SUCCESS);
  235. EXPECT_EQ(model.task_list_[1]->UpdateArgs(), SUCCESS);
  236. vector<string> out_shape_info;
  237. model.GetModelAttr(out_shape_info);
  238. vector<InputOutputDescInfo> input_descs;
  239. vector<InputOutputDescInfo> output_descs;
  240. EXPECT_EQ(model.GetInputOutputDescInfo(input_descs, output_descs), SUCCESS);
  241. int32_t virtual_addr = 0;
  242. const vector<void *> inputs = { &virtual_addr };
  243. const vector<void *> outputs = { &virtual_addr };
  244. EXPECT_EQ(model.UpdateKnownNodeArgs(inputs, outputs), SUCCESS);
  245. }
  246. TEST_F(UtestDavinciModel, Init_variable_op) {
  247. DavinciModel model(0, nullptr);
  248. model.ge_model_ = make_shared<GeModel>();
  249. model.runtime_param_.mem_base = (uint8_t *)0x08000000;
  250. model.runtime_param_.mem_size = 5120000;
  251. ComputeGraphPtr graph = make_shared<ComputeGraph>("default");
  252. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  253. TensorUtils::SetSize(tensor, 512);
  254. OpDescPtr var1 = CreateOpDesc("var1", VARIABLE);
  255. var1->AddInputDesc(tensor);
  256. var1->AddOutputDesc(tensor);
  257. var1->SetInputOffset({1024});
  258. var1->SetOutputOffset({1024});
  259. AttrUtils::SetBool(var1, VAR_ATTR_VAR_IS_BROADCAST, true);
  260. graph->AddNode(var1);
  261. OpDescPtr var2 = CreateOpDesc(NODE_NAME_GLOBAL_STEP, VARIABLE);
  262. var2->AddInputDesc(tensor);
  263. var2->AddOutputDesc(tensor);
  264. var2->SetInputOffset({1024});
  265. var2->SetOutputOffset({1024});
  266. graph->AddNode(var2);
  267. EXPECT_EQ(model.InitNodes(graph), SUCCESS);
  268. EXPECT_EQ(model.ReturnNoOutput(1), PARAM_INVALID);
  269. EXPECT_EQ(model.SyncVarData(), SUCCESS);
  270. }
  271. TEST_F(UtestDavinciModel, InitRealSizeAndShapeInfo_succ1) {
  272. DavinciModel model(0, nullptr);
  273. model.ge_model_ = make_shared<GeModel>();
  274. ComputeGraphPtr graph = make_shared<ComputeGraph>("default");
  275. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  276. OpDescPtr op_output = CreateOpDesc("output_ascend_mbatch_batch_1", NETOUTPUT);
  277. op_output->AddInputDesc(tensor);
  278. op_output->SetInputOffset({1024});
  279. NodePtr node_output = graph->AddNode(op_output);
  280. EXPECT_EQ(model.InitRealSizeAndShapeInfo(graph, node_output), SUCCESS);
  281. }
  282. TEST_F(UtestDavinciModel, InitRealSizeAndShapeInfo_succ2) {
  283. DavinciModel model(0, nullptr);
  284. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test_graph");
  285. OpDescPtr data1 = CreateOpDesc("data1", DATA);
  286. GeTensorDesc shape_desc(GeShape({4,3,224,224}), FORMAT_NCHW, DT_FLOAT);
  287. data1->AddInputDesc(shape_desc);
  288. data1->AddOutputDesc(shape_desc);
  289. NodePtr data1_node = graph->AddNode(data1);
  290. OpDescPtr case_node = CreateOpDesc("case1", CASE);
  291. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  292. case_node->AddInputDesc(tensor);
  293. case_node->AddOutputDesc(tensor);
  294. NodePtr case1_node = graph->AddNode(case_node);
  295. OpDescPtr output = CreateOpDesc("output1", NETOUTPUT);
  296. output->AddInputDesc(tensor);
  297. output->SetSrcName( { "case1" } );
  298. output->SetSrcIndex( { 0 } );
  299. NodePtr output_node = graph->AddNode(output);
  300. GraphUtils::AddEdge(data1_node->GetOutDataAnchor(0), case1_node->GetInDataAnchor(0));
  301. GraphUtils::AddEdge(case1_node->GetOutDataAnchor(0), output_node->GetInDataAnchor(0));
  302. (void)AttrUtils::SetStr(output_node->GetOpDesc(), ATTR_ALL_GEARS_INFO, "1;2;4;8");
  303. (void)AttrUtils::SetBool(case_node, ATTR_INSERT_BY_MBATCH, true);
  304. model.is_getnext_sink_dynamic_ = false;
  305. model.is_online_infer_dynamic_ = true;
  306. auto ret = model.InitRealSizeAndShapeInfo(graph, output_node);
  307. // GetGearAndRealOutShapeInfo without ATTR_NAME_DYNAMIC_OUTPUT_DIMS
  308. EXPECT_EQ(ret, SUCCESS);
  309. vector<string> dynamic_output_dims = {"0,0,1,1,0,2,2,0,4,3,0,8"};
  310. (void)AttrUtils::SetListStr(output_node->GetOpDesc(), ATTR_NAME_DYNAMIC_OUTPUT_DIMS, dynamic_output_dims);
  311. ret = model.InitRealSizeAndShapeInfo(graph, output_node);
  312. EXPECT_EQ(ret, SUCCESS);
  313. }
  314. TEST_F(UtestDavinciModel, InitRealSizeAndShapeInfo_succ3) {
  315. DavinciModel model(0, nullptr);
  316. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test_graph");
  317. OpDescPtr data1 = CreateOpDesc("data1", DATA);
  318. GeTensorDesc shape_desc(GeShape({4,3,224,224}), FORMAT_NCHW, DT_FLOAT);
  319. data1->AddInputDesc(shape_desc);
  320. data1->AddOutputDesc(shape_desc);
  321. NodePtr data1_node = graph->AddNode(data1);
  322. OpDescPtr shape_node = CreateOpDesc("ascend_mbatch_get_dynamic_dims_node", GETDYNAMICDIMS);
  323. GeTensorDesc in_tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  324. GeTensorDesc out_tensor(GeShape({4,3}), FORMAT_NCHW, DT_FLOAT);
  325. shape_node->AddInputDesc(in_tensor);
  326. shape_node->AddOutputDesc(out_tensor);
  327. NodePtr get_dynamic_dims_node = graph->AddNode(shape_node);
  328. OpDescPtr output = CreateOpDesc("output1", NETOUTPUT);
  329. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  330. output->AddInputDesc(tensor);
  331. output->SetSrcName( { "data1", "ascend_mbatch_get_dynamic_dims_node" } );
  332. output->SetSrcIndex( { 0, 1 } );
  333. NodePtr output_node = graph->AddNode(output);
  334. GraphUtils::AddEdge(data1_node->GetOutDataAnchor(0), output_node->GetInDataAnchor(0));
  335. GraphUtils::AddEdge(get_dynamic_dims_node->GetOutDataAnchor(0), output_node->GetInDataAnchor(1));
  336. (void)AttrUtils::SetStr(output_node->GetOpDesc(), ATTR_ALL_GEARS_INFO, "1,3;;4,3;,3");
  337. model.is_getnext_sink_dynamic_ = true;
  338. model.is_online_infer_dynamic_ = false;
  339. auto ret = model.InitRealSizeAndShapeInfo(graph, output_node);
  340. EXPECT_EQ(ret, SUCCESS);
  341. model.runtime_param_.mem_base = (uint8_t *)0x08000000;
  342. model.runtime_param_.mem_size = 4;
  343. ret = model.InitRealSizeAndShapeInfo(graph, output_node);
  344. EXPECT_EQ(ret, SUCCESS);
  345. }
  346. TEST_F(UtestDavinciModel, init_data_aipp_info) {
  347. DavinciModel model(0, nullptr);
  348. model.ge_model_ = make_shared<GeModel>(); // for CustAICPUKernelStore::GetCustAICPUKernelStore()
  349. model.runtime_param_.mem_base = (uint8_t *)0x08000000;
  350. model.runtime_param_.mem_size = 5120000;
  351. ComputeGraphPtr graph = make_shared<ComputeGraph>("default");
  352. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  353. TensorUtils::SetSize(tensor, 512);
  354. OpDescPtr op_desc = CreateOpDesc("data", DATA);
  355. op_desc->AddInputDesc(tensor);
  356. op_desc->AddOutputDesc(tensor);
  357. op_desc->SetInputOffset({1024});
  358. op_desc->SetOutputOffset({1024});
  359. NodePtr node = graph->AddNode(op_desc);
  360. GeAttrValue::NAMED_ATTRS aipp_attr;
  361. aipp_attr.SetAttr("aipp_mode", GeAttrValue::CreateFrom<GeAttrValue::INT>(domi::AippOpParams::dynamic));
  362. aipp_attr.SetAttr("related_input_rank", GeAttrValue::CreateFrom<GeAttrValue::INT>(0));
  363. aipp_attr.SetAttr("max_src_image_size", GeAttrValue::CreateFrom<GeAttrValue::INT>(2048));
  364. aipp_attr.SetAttr("support_rotation", GeAttrValue::CreateFrom<GeAttrValue::INT>(1));
  365. EXPECT_TRUE(AttrUtils::SetNamedAttrs(op_desc, ATTR_NAME_AIPP, aipp_attr));
  366. AippConfigInfo aipp_info;
  367. EXPECT_EQ(model.GetAippInfo(0, aipp_info), ACL_ERROR_GE_AIPP_NOT_EXIST);
  368. EXPECT_EQ(model.InitNodes(graph), SUCCESS);
  369. EXPECT_EQ(model.GetAippInfo(0, aipp_info), SUCCESS);
  370. EXPECT_EQ(aipp_info.aipp_mode, domi::AippOpParams::dynamic);
  371. EXPECT_EQ(model.input_addrs_list_.size(), 1);
  372. EXPECT_EQ(model.output_addrs_list_.size(), 0);
  373. EXPECT_EQ(model.op_list_.size(), 1);
  374. }
  375. TEST_F(UtestDavinciModel, init_data_aipp_static) {
  376. DavinciModel model(0, nullptr);
  377. model.ge_model_ = make_shared<GeModel>(); // for CustAICPUKernelStore::GetCustAICPUKernelStore()
  378. model.runtime_param_.mem_base = (uint8_t *)0x08000000;
  379. model.runtime_param_.mem_size = 5120000;
  380. ComputeGraphPtr graph = make_shared<ComputeGraph>("default");
  381. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  382. TensorUtils::SetSize(tensor, 512);
  383. OpDescPtr op_desc = CreateOpDesc("data", DATA);
  384. op_desc->AddInputDesc(tensor);
  385. op_desc->AddOutputDesc(tensor);
  386. op_desc->SetInputOffset({1024});
  387. op_desc->SetOutputOffset({1024});
  388. NodePtr node = graph->AddNode(op_desc);
  389. AttrUtils::SetStr(op_desc, ATTR_DATA_RELATED_AIPP_MODE, "static_aipp");
  390. InputAippType aipp_type;
  391. size_t aipp_index = 0;
  392. EXPECT_EQ(model.GetAippType(0, aipp_type, aipp_index), PARAM_INVALID);
  393. EXPECT_EQ(model.InitNodes(graph), SUCCESS);
  394. EXPECT_EQ(model.GetAippType(0, aipp_type, aipp_index), SUCCESS);
  395. EXPECT_EQ(aipp_type, DATA_WITH_STATIC_AIPP);
  396. EXPECT_EQ(aipp_index, 0xFFFFFFFFu);
  397. EXPECT_EQ(model.input_addrs_list_.size(), 1);
  398. EXPECT_EQ(model.output_addrs_list_.size(), 0);
  399. EXPECT_EQ(model.op_list_.size(), 1);
  400. }
  401. TEST_F(UtestDavinciModel, init_data_aipp_dynamic) {
  402. DavinciModel model(0, nullptr);
  403. model.ge_model_ = make_shared<GeModel>(); // for CustAICPUKernelStore::GetCustAICPUKernelStore()
  404. model.runtime_param_.mem_base = (uint8_t *)0x08000000;
  405. model.runtime_param_.mem_size = 5120000;
  406. ComputeGraphPtr graph = make_shared<ComputeGraph>("default");
  407. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  408. TensorUtils::SetSize(tensor, 512);
  409. OpDescPtr op_desc = CreateOpDesc("data", DATA);
  410. op_desc->AddInputDesc(tensor);
  411. op_desc->AddOutputDesc(tensor);
  412. op_desc->SetInputOffset({1024});
  413. op_desc->SetOutputOffset({1024});
  414. NodePtr node = graph->AddNode(op_desc); // op_index 0
  415. AttrUtils::SetStr(op_desc, ATTR_DATA_RELATED_AIPP_MODE, "dynamic_aipp");
  416. AttrUtils::SetStr(op_desc, ATTR_DATA_AIPP_DATA_NAME_MAP, "releated_aipp");
  417. InputAippType aipp_type;
  418. size_t aipp_index = 0;
  419. EXPECT_EQ(model.GetAippType(0, aipp_type, aipp_index), PARAM_INVALID);
  420. EXPECT_EQ(model.InitNodes(graph), SUCCESS);
  421. EXPECT_EQ(model.GetAippType(0, aipp_type, aipp_index), SUCCESS);
  422. EXPECT_EQ(model.input_addrs_list_.size(), 1);
  423. EXPECT_EQ(model.output_addrs_list_.size(), 0);
  424. EXPECT_EQ(model.op_list_.size(), 1);
  425. }
  426. TEST_F(UtestDavinciModel, init_data_aipp_releated) {
  427. DavinciModel model(0, nullptr);
  428. model.ge_model_ = make_shared<GeModel>(); // for CustAICPUKernelStore::GetCustAICPUKernelStore()
  429. model.runtime_param_.mem_base = (uint8_t *)0x08000000;
  430. model.runtime_param_.mem_size = 5120000;
  431. ComputeGraphPtr graph = make_shared<ComputeGraph>("default");
  432. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  433. TensorUtils::SetSize(tensor, 512);
  434. {
  435. OpDescPtr op_desc = CreateOpDesc("data", DATA);
  436. op_desc->AddInputDesc(tensor);
  437. op_desc->AddOutputDesc(tensor);
  438. op_desc->SetInputOffset({1024});
  439. op_desc->SetOutputOffset({1024});
  440. NodePtr node = graph->AddNode(op_desc); // op_index 0
  441. AttrUtils::SetStr(op_desc, ATTR_DATA_RELATED_AIPP_MODE, "dynamic_aipp");
  442. AttrUtils::SetStr(op_desc, ATTR_DATA_AIPP_DATA_NAME_MAP, "releated_aipp");
  443. }
  444. {
  445. OpDescPtr op_desc = CreateOpDesc("releated_aipp", DATA);
  446. op_desc->AddInputDesc(tensor);
  447. op_desc->AddOutputDesc(tensor);
  448. op_desc->SetInputOffset({1024});
  449. op_desc->SetOutputOffset({1024});
  450. NodePtr node = graph->AddNode(op_desc); // op_index 1
  451. }
  452. InputAippType aipp_type;
  453. size_t aipp_index = 0;
  454. EXPECT_EQ(model.GetAippType(0, aipp_type, aipp_index), PARAM_INVALID);
  455. EXPECT_EQ(model.InitNodes(graph), SUCCESS);
  456. EXPECT_EQ(model.GetAippType(0, aipp_type, aipp_index), SUCCESS);
  457. EXPECT_EQ(aipp_type, DATA_WITH_DYNAMIC_AIPP);
  458. EXPECT_EQ(aipp_index, 1);
  459. EXPECT_EQ(model.input_addrs_list_.size(), 2);
  460. EXPECT_EQ(model.output_addrs_list_.size(), 0);
  461. EXPECT_EQ(model.op_list_.size(), 2);
  462. }
  463. TEST_F(UtestDavinciModel, init_data_aipp_dynamic_conf) {
  464. DavinciModel model(0, nullptr);
  465. model.ge_model_ = make_shared<GeModel>(); // for CustAICPUKernelStore::GetCustAICPUKernelStore()
  466. model.runtime_param_.mem_base = (uint8_t *)0x08000000;
  467. model.runtime_param_.mem_size = 5120000;
  468. ComputeGraphPtr graph = make_shared<ComputeGraph>("default");
  469. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  470. TensorUtils::SetSize(tensor, 512);
  471. OpDescPtr op_desc = CreateOpDesc("data", DATA);
  472. op_desc->AddInputDesc(tensor);
  473. op_desc->AddOutputDesc(tensor);
  474. op_desc->SetInputOffset({1024});
  475. op_desc->SetOutputOffset({1024});
  476. NodePtr node = graph->AddNode(op_desc); // op_index 0
  477. AttrUtils::SetStr(op_desc, ATTR_DATA_RELATED_AIPP_MODE, "dynamic_aipp_conf");
  478. InputAippType aipp_type;
  479. size_t aipp_index = 0;
  480. EXPECT_EQ(model.GetAippType(0, aipp_type, aipp_index), PARAM_INVALID);
  481. EXPECT_EQ(model.InitNodes(graph), SUCCESS);
  482. EXPECT_EQ(model.GetAippType(0, aipp_type, aipp_index), SUCCESS);
  483. EXPECT_EQ(aipp_type, DYNAMIC_AIPP_NODE);
  484. EXPECT_EQ(aipp_index, 0xFFFFFFFFU);
  485. EXPECT_EQ(model.input_addrs_list_.size(), 1);
  486. EXPECT_EQ(model.output_addrs_list_.size(), 0);
  487. EXPECT_EQ(model.op_list_.size(), 1);
  488. }
  489. TEST_F(UtestDavinciModel, init_data_aipp_dynamic_invalid) {
  490. DavinciModel model(0, nullptr);
  491. model.ge_model_ = make_shared<GeModel>(); // for CustAICPUKernelStore::GetCustAICPUKernelStore()
  492. model.runtime_param_.mem_base = (uint8_t *)0x08000000;
  493. model.runtime_param_.mem_size = 5120000;
  494. ComputeGraphPtr graph = make_shared<ComputeGraph>("default");
  495. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  496. TensorUtils::SetSize(tensor, 512);
  497. OpDescPtr op_desc = CreateOpDesc("data", DATA);
  498. op_desc->AddInputDesc(tensor);
  499. op_desc->AddOutputDesc(tensor);
  500. op_desc->SetInputOffset({1024});
  501. op_desc->SetOutputOffset({1024});
  502. NodePtr node = graph->AddNode(op_desc); // op_index 0
  503. AttrUtils::SetStr(op_desc, ATTR_DATA_RELATED_AIPP_MODE, "dynamic_aipp_invalid");
  504. InputAippType aipp_type;
  505. size_t aipp_index = 0;
  506. EXPECT_EQ(model.GetAippType(0, aipp_type, aipp_index), PARAM_INVALID);
  507. EXPECT_EQ(model.InitNodes(graph), ACL_ERROR_GE_AIPP_MODE_INVALID);
  508. EXPECT_EQ(model.input_addrs_list_.size(), 1);
  509. EXPECT_EQ(model.output_addrs_list_.size(), 0);
  510. EXPECT_EQ(model.op_list_.size(), 1);
  511. }
  512. TEST_F(UtestDavinciModel, init_data_aipp_input_info_empty) {
  513. DavinciModel model(0, nullptr);
  514. model.ge_model_ = make_shared<GeModel>(); // for CustAICPUKernelStore::GetCustAICPUKernelStore()
  515. model.runtime_param_.mem_base = (uint8_t *)0x08000000;
  516. model.runtime_param_.mem_size = 5120000;
  517. ComputeGraphPtr graph = make_shared<ComputeGraph>("default");
  518. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  519. TensorUtils::SetSize(tensor, 512);
  520. OpDescPtr op_desc = CreateOpDesc("data", DATA);
  521. op_desc->AddInputDesc(tensor);
  522. op_desc->AddOutputDesc(tensor);
  523. op_desc->SetInputOffset({1024});
  524. op_desc->SetOutputOffset({1024});
  525. NodePtr node = graph->AddNode(op_desc); // op_index 0
  526. vector<string> inputs = {};
  527. AttrUtils::SetListStr(op_desc, ATTR_NAME_AIPP_INPUTS, inputs);
  528. vector<string> outputs = {};
  529. AttrUtils::SetListStr(op_desc, ATTR_NAME_AIPP_OUTPUTS, outputs);
  530. OriginInputInfo orig_input_info;
  531. EXPECT_EQ(model.GetOrigInputInfo(0, orig_input_info), ACL_ERROR_GE_AIPP_NOT_EXIST);
  532. EXPECT_EQ(model.InitNodes(graph), SUCCESS);
  533. EXPECT_EQ(model.GetOrigInputInfo(0, orig_input_info), SUCCESS);
  534. EXPECT_EQ(model.input_addrs_list_.size(), 1);
  535. EXPECT_EQ(model.output_addrs_list_.size(), 0);
  536. EXPECT_EQ(model.op_list_.size(), 1);
  537. }
  538. TEST_F(UtestDavinciModel, init_data_aipp_input_info_normal) {
  539. DavinciModel model(0, nullptr);
  540. model.ge_model_ = make_shared<GeModel>(); // for CustAICPUKernelStore::GetCustAICPUKernelStore()
  541. model.runtime_param_.mem_base = (uint8_t *)0x08000000;
  542. model.runtime_param_.mem_size = 5120000;
  543. ComputeGraphPtr graph = make_shared<ComputeGraph>("default");
  544. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  545. TensorUtils::SetSize(tensor, 512);
  546. OpDescPtr op_desc = CreateOpDesc("data", DATA);
  547. op_desc->AddInputDesc(tensor);
  548. op_desc->AddOutputDesc(tensor);
  549. op_desc->SetInputOffset({1024});
  550. op_desc->SetOutputOffset({1024});
  551. NodePtr node = graph->AddNode(op_desc); // op_index 0
  552. vector<string> inputs = { "NCHW:DT_FLOAT:TensorName:TensorSize:3:1,2,8" };
  553. AttrUtils::SetListStr(op_desc, ATTR_NAME_AIPP_INPUTS, inputs);
  554. vector<string> outputs = { "NCHW:DT_FLOAT:TensorName:TensorSize:3:1,2,8" };
  555. AttrUtils::SetListStr(op_desc, ATTR_NAME_AIPP_OUTPUTS, outputs);
  556. OriginInputInfo orig_input_info;
  557. EXPECT_EQ(model.GetOrigInputInfo(0, orig_input_info), ACL_ERROR_GE_AIPP_NOT_EXIST);
  558. EXPECT_EQ(model.InitNodes(graph), SUCCESS);
  559. EXPECT_EQ(model.GetOrigInputInfo(0, orig_input_info), SUCCESS);
  560. EXPECT_EQ(model.input_addrs_list_.size(), 1);
  561. EXPECT_EQ(model.output_addrs_list_.size(), 0);
  562. EXPECT_EQ(model.op_list_.size(), 1);
  563. }
  564. TEST_F(UtestDavinciModel, init_data_aipp_input_info_invalid) {
  565. DavinciModel model(0, nullptr);
  566. model.ge_model_ = make_shared<GeModel>(); // for CustAICPUKernelStore::GetCustAICPUKernelStore()
  567. model.runtime_param_.mem_base = (uint8_t *)0x08000000;
  568. model.runtime_param_.mem_size = 5120000;
  569. ComputeGraphPtr graph = make_shared<ComputeGraph>("default");
  570. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  571. TensorUtils::SetSize(tensor, 512);
  572. OpDescPtr op_desc = CreateOpDesc("data", DATA);
  573. op_desc->AddInputDesc(tensor);
  574. op_desc->AddOutputDesc(tensor);
  575. op_desc->SetInputOffset({1024});
  576. op_desc->SetOutputOffset({1024});
  577. NodePtr node = graph->AddNode(op_desc); // op_index 0
  578. vector<string> inputs = { "NCHW:DT_FLOAT:TensorName" }; // Invalid
  579. AttrUtils::SetListStr(op_desc, ATTR_NAME_AIPP_INPUTS, inputs);
  580. vector<string> outputs = { "NCHW:DT_FLOAT:TensorName:TensorSize:3:1,2,8" };
  581. AttrUtils::SetListStr(op_desc, ATTR_NAME_AIPP_OUTPUTS, outputs);
  582. OriginInputInfo orig_input_info;
  583. EXPECT_EQ(model.GetOrigInputInfo(0, orig_input_info), ACL_ERROR_GE_AIPP_NOT_EXIST);
  584. EXPECT_EQ(model.InitNodes(graph), ACL_ERROR_GE_AIPP_MODE_INVALID);
  585. EXPECT_EQ(model.GetOrigInputInfo(0, orig_input_info), ACL_ERROR_GE_AIPP_NOT_EXIST);
  586. EXPECT_EQ(model.input_addrs_list_.size(), 1);
  587. EXPECT_EQ(model.output_addrs_list_.size(), 0);
  588. EXPECT_EQ(model.op_list_.size(), 1);
  589. }
  590. TEST_F(UtestDavinciModel, init_data_aipp_input_dims_normal) {
  591. DavinciModel model(0, nullptr);
  592. model.ge_model_ = make_shared<GeModel>(); // for CustAICPUKernelStore::GetCustAICPUKernelStore()
  593. model.runtime_param_.mem_base = (uint8_t *)0x08000000;
  594. model.runtime_param_.mem_size = 5120000;
  595. ComputeGraphPtr graph = make_shared<ComputeGraph>("default");
  596. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  597. TensorUtils::SetSize(tensor, 512);
  598. OpDescPtr op_desc = CreateOpDesc("data", DATA);
  599. op_desc->AddInputDesc(tensor);
  600. op_desc->AddOutputDesc(tensor);
  601. op_desc->SetInputOffset({1024});
  602. op_desc->SetOutputOffset({1024});
  603. NodePtr node = graph->AddNode(op_desc); // op_index 0
  604. vector<string> inputs = { "NCHW:DT_FLOAT:TensorName:TensorSize:3:1,2,8" };
  605. AttrUtils::SetListStr(op_desc, ATTR_NAME_AIPP_INPUTS, inputs);
  606. vector<string> outputs = { "NCHW:DT_FLOAT:TensorName:TensorSize:3:1,2,8" };
  607. AttrUtils::SetListStr(op_desc, ATTR_NAME_AIPP_OUTPUTS, outputs);
  608. vector<InputOutputDims> input_dims;
  609. vector<InputOutputDims> output_dims;
  610. EXPECT_EQ(model.GetAllAippInputOutputDims(0, input_dims, output_dims), ACL_ERROR_GE_AIPP_NOT_EXIST);
  611. EXPECT_EQ(model.InitNodes(graph), SUCCESS);
  612. EXPECT_EQ(model.GetAllAippInputOutputDims(0, input_dims, output_dims), SUCCESS);
  613. EXPECT_EQ(input_dims.size(), 1);
  614. EXPECT_EQ(output_dims.size(), 1);
  615. EXPECT_EQ(model.input_addrs_list_.size(), 1);
  616. EXPECT_EQ(model.output_addrs_list_.size(), 0);
  617. EXPECT_EQ(model.op_list_.size(), 1);
  618. }
  619. /*
  620. // test label_set_task Init
  621. TEST_F(UtestDavinciModel, label_task_success) {
  622. DavinciModel model(0, nullptr);
  623. ComputeGraphPtr graph = make_shared<ComputeGraph>("default");
  624. GeModelPtr ge_model = make_shared<GeModel>();
  625. ge_model->SetGraph(GraphUtils::CreateGraphFromComputeGraph(graph));
  626. AttrUtils::SetInt(ge_model, ATTR_MODEL_MEMORY_SIZE, 5120000);
  627. AttrUtils::SetInt(ge_model, ATTR_MODEL_STREAM_NUM, 1);
  628. shared_ptr<domi::ModelTaskDef> model_task_def = make_shared<domi::ModelTaskDef>();
  629. ge_model->SetModelTaskDef(model_task_def);
  630. GeTensorDesc tensor(GeShape(), FORMAT_ND, DT_INT32);
  631. TensorUtils::SetSize(tensor, 64);
  632. uint32_t op_index = 0;
  633. {
  634. OpDescPtr op_desc = CreateOpDesc("label_switch", LABELSWITCHBYINDEX);
  635. op_desc->AddInputDesc(tensor);
  636. op_desc->SetInputOffset({1024});
  637. NodePtr node = graph->AddNode(op_desc); // op_index = 0
  638. EXPECT_TRUE(AttrUtils::SetListInt(op_desc, ATTR_NAME_LABEL_SWITCH_LIST, {0, 1}));
  639. domi::TaskDef *task_def1 = model_task_def->add_task();
  640. task_def1->set_stream_id(0);
  641. task_def1->set_type(RT_MODEL_TASK_STREAM_LABEL_SWITCH_BY_INDEX);
  642. domi::LabelSwitchByIndexDef *label_task_def = task_def1->mutable_label_switch_by_index();
  643. label_task_def->set_op_index(op_index++);
  644. label_task_def->set_label_max(2);
  645. }
  646. {
  647. OpDescPtr op_desc = CreateOpDesc("label_then", LABELSET);
  648. NodePtr node = graph->AddNode(op_desc); // op_index = 1
  649. EXPECT_TRUE(AttrUtils::SetInt(op_desc, ATTR_NAME_LABEL_SWITCH_INDEX, 1));
  650. domi::TaskDef *task_def1 = model_task_def->add_task();
  651. task_def1->set_stream_id(0);
  652. task_def1->set_type(RT_MODEL_TASK_LABEL_SET);
  653. domi::LabelSetDef *label_task_def = task_def1->mutable_label_set();
  654. label_task_def->set_op_index(op_index++);
  655. }
  656. {
  657. OpDescPtr op_desc = CreateOpDesc("label_goto", LABELGOTOEX);
  658. NodePtr node = graph->AddNode(op_desc); // op_index = 2
  659. EXPECT_TRUE(AttrUtils::SetInt(op_desc, ATTR_NAME_LABEL_SWITCH_INDEX, 2));
  660. domi::TaskDef *task_def2 = model_task_def->add_task();
  661. task_def2->set_stream_id(0);
  662. task_def2->set_type(RT_MODEL_TASK_STREAM_LABEL_GOTO);
  663. domi::LabelGotoExDef *label_task_def = task_def2->mutable_label_goto_ex();
  664. label_task_def->set_op_index(op_index++);
  665. }
  666. {
  667. OpDescPtr op_desc = CreateOpDesc("label_else", LABELSET);
  668. NodePtr node = graph->AddNode(op_desc); // op_index = 3
  669. EXPECT_TRUE(AttrUtils::SetInt(op_desc, ATTR_NAME_LABEL_SWITCH_INDEX, 0));
  670. domi::TaskDef *task_def1 = model_task_def->add_task();
  671. task_def1->set_stream_id(0);
  672. task_def1->set_type(RT_MODEL_TASK_LABEL_SET);
  673. domi::LabelSetDef *label_task_def = task_def1->mutable_label_set();
  674. label_task_def->set_op_index(op_index++);
  675. }
  676. {
  677. OpDescPtr op_desc = CreateOpDesc("label_leave", LABELSET);
  678. NodePtr node = graph->AddNode(op_desc); // op_index = 4
  679. EXPECT_TRUE(AttrUtils::SetInt(op_desc, ATTR_NAME_LABEL_SWITCH_INDEX, 2));
  680. domi::TaskDef *task_def1 = model_task_def->add_task();
  681. task_def1->set_stream_id(0);
  682. task_def1->set_type(RT_MODEL_TASK_LABEL_SET);
  683. domi::LabelSetDef *label_task_def = task_def1->mutable_label_set();
  684. label_task_def->set_op_index(op_index++);
  685. }
  686. EXPECT_TRUE(AttrUtils::SetInt(ge_model, ATTR_MODEL_LABEL_NUM, 3));
  687. EXPECT_EQ(model.Assign(ge_model), SUCCESS);
  688. EXPECT_EQ(model.Init(), SUCCESS);
  689. EXPECT_EQ(model.input_addrs_list_.size(), 0);
  690. EXPECT_EQ(model.output_addrs_list_.size(), 0);
  691. EXPECT_EQ(model.task_list_.size(), 5);
  692. }
  693. */
  694. TEST_F(UtestDavinciModel, LoadWithQueue_fail_with_diff_args) {
  695. DavinciModel model(0, nullptr);
  696. model.ge_model_ = make_shared<GeModel>();
  697. model.input_queue_ids_.emplace_back(0);
  698. EXPECT_EQ(model.LoadWithQueue(), ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID);
  699. EXPECT_EQ(model.input_data_info_.size(), 0);
  700. ZeroCopyOffset zero_copy_offset;
  701. model.input_data_info_[0] = zero_copy_offset;
  702. model.output_queue_ids_.emplace_back(0);
  703. EXPECT_EQ(model.LoadWithQueue(), ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID);
  704. EXPECT_EQ(model.output_data_info_.size(), 0);
  705. model.output_data_info_[0] = zero_copy_offset;
  706. EXPECT_EQ(model.LoadWithQueue(), INTERNAL_ERROR);
  707. EXPECT_EQ(model.active_stream_list_.size(), 0);
  708. }
  709. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示