You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

single_op_task_unittest.cc 16 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <gtest/gtest.h>
  17. #include <vector>
  18. #include "graph/load/model_manager/model_utils.h"
  19. #include "graph/utils/graph_utils.h"
  20. #include "hybrid/node_executor/aicpu/aicpu_ext_info.h"
  21. #include "runtime/rt.h"
  22. #define protected public
  23. #define private public
  24. #include "single_op/single_op_model.h"
  25. #include "single_op/task/tbe_task_builder.h"
  26. #include "single_op/task/op_task.h"
  27. #include "single_op/task/tbe_task_builder.h"
  28. #include "external/register/op_tiling_registry.h"
  29. #undef private
  30. #undef protected
  31. #include "tests/depends/runtime/src/runtime_stub.h"
  32. using namespace std;
  33. using namespace testing;
  34. using namespace ge;
  35. using namespace optiling;
  36. class UtestSingleOpTask : public testing::Test {
  37. protected:
  38. void SetUp() {
  39. RTS_STUB_SETUP();
  40. }
  41. void TearDown() {
  42. RTS_STUB_TEARDOWN();
  43. }
  44. };
  45. TEST_F(UtestSingleOpTask, test_build_kernel_task) {
  46. string model_data_str = "123456789";
  47. SingleOpModel model("model", model_data_str.c_str(), model_data_str.size());
  48. model.input_offset_list_.push_back(0);
  49. model.input_sizes_.push_back(16);
  50. model.output_offset_list_.push_back(0);
  51. model.output_sizes_.push_back(16);
  52. auto graph = make_shared<ComputeGraph>("graph");
  53. auto op_desc = make_shared<OpDesc>("Add", "Add");
  54. AttrUtils::SetStr(op_desc, TVM_ATTR_NAME_MAGIC, "RT_DEV_BINARY_MAGIC_ELF");
  55. std::vector<char> kernelBin;
  56. TBEKernelPtr tbe_kernel = std::make_shared<ge::OpKernelBin>("name/Add", std::move(kernelBin));
  57. op_desc->SetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel);
  58. std::string kernel_name("kernel/Add");
  59. AttrUtils::SetStr(op_desc, op_desc->GetName() + "_kernelname", kernel_name);
  60. vector<int64_t> shape{16, 16};
  61. GeShape ge_shape(shape);
  62. GeTensorDesc desc(ge_shape);
  63. op_desc->AddInputDesc(desc);
  64. op_desc->AddOutputDesc(desc);
  65. auto node = graph->AddNode(op_desc);
  66. std::mutex stream_mu_;
  67. rtStream_t stream_ = nullptr;
  68. StreamResource stream_resource(0);
  69. SingleOp single_op(&stream_resource, &stream_mu_, stream_);
  70. domi::TaskDef task_def;
  71. task_def.set_type(RT_MODEL_TASK_ALL_KERNEL);
  72. domi::KernelDefWithHandle *kernel_with_handle = task_def.mutable_kernel_with_handle();
  73. kernel_with_handle->set_original_kernel_key("");
  74. kernel_with_handle->set_node_info("");
  75. kernel_with_handle->set_block_dim(32);
  76. kernel_with_handle->set_args_size(64);
  77. string args(64, '1');
  78. kernel_with_handle->set_args(args.data(), 64);
  79. domi::KernelContext *context = kernel_with_handle->mutable_context();
  80. context->set_op_index(1);
  81. context->set_kernel_type(2); // ccKernelType::TE
  82. uint16_t args_offset[9] = {0};
  83. context->set_args_offset(args_offset, 9 * sizeof(uint16_t));
  84. model.op_list_[1] = node;
  85. TbeOpTask task_tmp;
  86. TbeOpTask *task = &task_tmp;
  87. ASSERT_EQ(model.BuildKernelTask(task_def, &task), SUCCESS);
  88. ge::DataBuffer data_buffer;
  89. vector<GeTensorDesc> input_desc;
  90. vector<DataBuffer> input_buffers = { data_buffer };
  91. vector<GeTensorDesc> output_desc;
  92. vector<DataBuffer> output_buffers = { data_buffer };
  93. task->node_ = node;
  94. OpTilingFunc op_tiling_func = [](const TeOpParas &, const OpCompileInfo &, OpRunInfo &) -> bool {return true;};
  95. OpTilingRegistryInterf("Add", op_tiling_func);
  96. ge::AttrUtils::SetStr(op_desc, "compile_info_key", "op_compile_info_key");
  97. ge::AttrUtils::SetStr(op_desc, "compile_info_json", "op_compile_info_json");
  98. char c = '0';
  99. char* buffer = &c;
  100. task->tiling_buffer_ = buffer;
  101. task->max_tiling_size_ = 64;
  102. task->tiling_data_ = "tiling_data";
  103. task->arg_size_ = 64;
  104. task->args_.reset(new (std::nothrow) uint8_t[sizeof(void *) * 3]);
  105. ASSERT_EQ(task->LaunchKernel(input_desc, input_buffers, output_desc, output_buffers, stream_), SUCCESS);
  106. char *handle = "00";
  107. task->SetHandle(handle);
  108. ASSERT_EQ(task->LaunchKernel(input_desc, input_buffers, output_desc, output_buffers, stream_), SUCCESS);
  109. }
  110. TEST_F(UtestSingleOpTask, test_update_ioaddr) {
  111. auto graph = make_shared<ComputeGraph>("graph");
  112. auto op_desc = make_shared<OpDesc>("Add", "Add");
  113. GeTensorDesc desc;
  114. op_desc->AddInputDesc(desc);
  115. op_desc->AddInputDesc(desc);
  116. op_desc->AddOutputDesc(desc);
  117. vector<bool> is_input_const = { true, false };
  118. op_desc->SetIsInputConst(is_input_const);
  119. auto node = graph->AddNode(op_desc);
  120. TbeOpTask task;
  121. task.op_desc_ = op_desc;
  122. task.node_ = node;
  123. ASSERT_EQ(task.SetArgIndex(), SUCCESS);
  124. task.arg_size_ = sizeof(void *) * 4;
  125. task.args_.reset(new (std::nothrow) uint8_t[task.arg_size_]);
  126. task.arg_index_ = {0};
  127. task.input_num_ = 2;
  128. task.output_num_ = 1;
  129. vector<void *> args;
  130. vector<DataBuffer> inputs;
  131. vector<DataBuffer> outputs;
  132. ASSERT_EQ(task.UpdateIoAddr(inputs, outputs), ACL_ERROR_GE_PARAM_INVALID);
  133. ge::DataBuffer data_buffer;
  134. inputs = { data_buffer };
  135. outputs = { data_buffer };
  136. ASSERT_EQ(task.UpdateIoAddr(inputs, outputs), SUCCESS);
  137. task.tiling_buffer_ = (void *)0x0001;
  138. task.workspaces_ = { (void *)0x0002 };
  139. ASSERT_EQ(task.UpdateTilingArgs(nullptr), SUCCESS);
  140. task.tiling_buffer_ = nullptr;
  141. }
  142. TEST_F(UtestSingleOpTask, test_atomic_exec) {
  143. auto graph = make_shared<ComputeGraph>("graph");
  144. auto op_desc = make_shared<OpDesc>("Add", "Add");
  145. GeTensorDesc desc;
  146. op_desc->AddInputDesc(desc);
  147. op_desc->AddOutputDesc(desc);
  148. auto node = graph->AddNode(op_desc);
  149. AtomicAddrCleanOpTask task;
  150. task.op_desc_ = op_desc;
  151. task.node_ = node;
  152. vector<DataBuffer> inputs;
  153. vector<DataBuffer> outputs;
  154. std::vector<int64_t> atomic_output_indices;
  155. ge::AttrUtils::SetListInt(op_desc, ATOMIC_ATTR_OUTPUT_INDEX, atomic_output_indices);
  156. ASSERT_EQ(task.InitAtomicAddrCleanIndices(), INTERNAL_ERROR);
  157. atomic_output_indices = { 0 };
  158. ge::AttrUtils::SetListInt(op_desc, ATOMIC_ATTR_OUTPUT_INDEX, atomic_output_indices);
  159. ASSERT_EQ(task.InitAtomicAddrCleanIndices(), INTERNAL_ERROR);
  160. task.arg_size_ = sizeof(void *) * 2;
  161. task.args_.reset(new (std::nothrow) uint8_t[task.arg_size_]);
  162. ASSERT_EQ(task.InitAtomicAddrCleanIndices(), SUCCESS);
  163. ASSERT_EQ(task.UpdateIoAddr(inputs, outputs), ACL_ERROR_GE_PARAM_INVALID);
  164. ge::DataBuffer data_buffer;
  165. outputs = { data_buffer };
  166. ASSERT_EQ(task.UpdateIoAddr(inputs, outputs), SUCCESS);
  167. task.tiling_buffer_ = (void *)0x0001;
  168. ASSERT_EQ(task.UpdateTilingArgs(nullptr), SUCCESS);
  169. task.tiling_buffer_ = nullptr;
  170. optiling::utils::OpRunInfo run_info(0, true, 0);
  171. task.CalcTilingInfo(run_info);
  172. }
  173. TEST_F(UtestSingleOpTask, test_aicpu_task_launch_kernel) {
  174. AiCpuCCTask task;
  175. rtStream_t stream;
  176. ASSERT_EQ(rtStreamCreate(&stream, 0), RT_ERROR_NONE);
  177. auto op_desc = make_shared<OpDesc>("deque", "Deque");
  178. ge::AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, false);
  179. AttrUtils::SetInt(op_desc, ::ge::ATTR_NAME_UNKNOWN_SHAPE_TYPE, ge::DEPEND_COMPUTE);
  180. GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
  181. op_desc->AddInputDesc(tensor);
  182. op_desc->AddInputDesc(tensor);
  183. op_desc->AddOutputDesc(tensor);
  184. task.SetOpDesc(op_desc);
  185. task.num_inputs_ = 2;
  186. task.num_outputs_ = 1;
  187. task.input_is_const_ = {true, false};
  188. int total_addr = 3;
  189. uint32_t* addrs[total_addr] = {nullptr, nullptr, nullptr};
  190. task.io_addr_ = reinterpret_cast<uintptr_t*>(addrs);
  191. task.io_addr_num_ = total_addr;
  192. ge::hybrid::AicpuExtInfo aicpu_ext_info;
  193. aicpu_ext_info.infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_SHAPE_TYPE;
  194. aicpu_ext_info.infoLen = sizeof(int32_t);
  195. int32_t type = ge::DEPEND_COMPUTE;
  196. memcpy_s(aicpu_ext_info.infoMsg, sizeof(int32_t), &type, sizeof(int32_t));
  197. char *ext_mem = (char*)malloc(sizeof(ge::hybrid::AicpuExtInfo) + sizeof(int32_t));
  198. memcpy_s(ext_mem, sizeof(ge::hybrid::AicpuExtInfo) + sizeof(int32_t), &aicpu_ext_info,
  199. sizeof(ge::hybrid::AicpuExtInfo) + sizeof(int32_t));
  200. std::string ext_info_str(ext_mem, sizeof(ge::hybrid::AicpuExtInfo) + sizeof(int32_t));
  201. vector<DataBuffer> inputs(2, DataBuffer());
  202. vector<DataBuffer> outputs(1, DataBuffer());
  203. vector<GeTensorDesc> inputs_desc(2, tensor);
  204. vector<GeTensorDesc> outputs_desc(1, tensor);
  205. ASSERT_EQ(task.SetExtInfoAndType(ext_info_str, 0), SUCCESS);
  206. task.unknown_type_ = ge::DEPEND_COMPUTE;
  207. ASSERT_EQ(task.InitForSummaryAndCopy(), SUCCESS);
  208. ASSERT_EQ(task.LaunchKernel(inputs_desc, inputs, outputs_desc, outputs, stream), SUCCESS);
  209. }
  210. TEST_F(UtestSingleOpTask, test_aicpu_task_update_io_addr) {
  211. AiCpuCCTask task;
  212. task.num_inputs_ = 2;
  213. task.num_outputs_ = 1;
  214. task.input_is_const_ = {true, false};
  215. int total_addr = 3;
  216. uint32_t* addrs[total_addr] = {nullptr, nullptr, nullptr};
  217. task.io_addr_ = reinterpret_cast<uintptr_t*>(addrs);
  218. task.io_addr_num_ = total_addr;
  219. {
  220. vector<DataBuffer> inputs(1, DataBuffer());
  221. vector<DataBuffer> outputs(1, DataBuffer());
  222. auto ret = task.UpdateIoAddr(inputs, outputs);
  223. ASSERT_EQ(ret, SUCCESS);
  224. ASSERT_EQ(addrs[0], nullptr);
  225. ASSERT_EQ(addrs[1], nullptr);
  226. ASSERT_EQ(addrs[2], nullptr);
  227. }
  228. {
  229. uint32_t data_buf[2];
  230. vector<DataBuffer> inputs{DataBuffer(&data_buf[0], 4, false)};
  231. vector<DataBuffer> outputs{DataBuffer(&data_buf[1], 4, false)};
  232. auto ret = task.UpdateIoAddr(inputs, outputs);
  233. ASSERT_EQ(ret, SUCCESS);
  234. ASSERT_EQ(addrs[0], nullptr);
  235. ASSERT_EQ(addrs[1], &data_buf[0]);
  236. ASSERT_EQ(addrs[2], &data_buf[1]);
  237. }
  238. {
  239. uint32_t data_buf[2];
  240. vector<DataBuffer> inputs{DataBuffer(nullptr, 4, false)};
  241. vector<DataBuffer> outputs{DataBuffer(&data_buf[1], 4, false)};
  242. auto ret = task.UpdateIoAddr(inputs, outputs);
  243. ASSERT_EQ(ret, PARAM_INVALID);
  244. }
  245. {
  246. uint32_t data_buf[2];
  247. vector<DataBuffer> inputs{DataBuffer(&data_buf[0], 4, false)};
  248. vector<DataBuffer> outputs{DataBuffer(nullptr, 4, false)};
  249. auto ret = task.UpdateIoAddr(inputs, outputs);
  250. ASSERT_EQ(ret, PARAM_INVALID);
  251. }
  252. }
  253. TEST_F(UtestSingleOpTask, test_blocking_aicpu_op_01) {
  254. int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo);
  255. vector<char> aicpu_ext_info(len, 0);
  256. char *buf = aicpu_ext_info.data();
  257. int offset = 0;
  258. hybrid::AicpuExtInfo *ext_info = reinterpret_cast<hybrid::AicpuExtInfo*>(buf + offset);
  259. ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT;
  260. ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo);
  261. offset += sizeof(hybrid::AicpuExtInfo);
  262. hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast<hybrid::AsyncWaitInfo*>(buf + offset);
  263. async_wait_info->waitType = 0;
  264. async_wait_info->waitId = 0;
  265. async_wait_info->timeOut = 0;
  266. async_wait_info->reserved = 0;
  267. domi::KernelDef kernel_def;
  268. kernel_def.set_kernel_ext_info(buf, len);
  269. kernel_def.set_kernel_ext_info_size(len);
  270. auto op_desc = make_shared<OpDesc>("deque", "Deque");
  271. ge::AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, true);
  272. AiCpuCCTask aicpu_task;
  273. aicpu_task.SetOpDesc(op_desc);
  274. rtStream_t stream;
  275. ASSERT_EQ(rtStreamCreate(&stream, 0), RT_ERROR_NONE);
  276. ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS);
  277. ASSERT_EQ(aicpu_task.LaunchKernel(stream), SUCCESS);
  278. }
  279. TEST_F(UtestSingleOpTask, test_blocking_aicpu_op_02) {
  280. int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo);
  281. vector<char> aicpu_ext_info(len, 0);
  282. char *buf = aicpu_ext_info.data();
  283. int offset = 0;
  284. hybrid::AicpuExtInfo *ext_info = reinterpret_cast<hybrid::AicpuExtInfo*>(buf + offset);
  285. ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT;
  286. ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo);
  287. offset += sizeof(hybrid::AicpuExtInfo);
  288. hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast<hybrid::AsyncWaitInfo*>(buf + offset);
  289. async_wait_info->waitType = 0;
  290. async_wait_info->waitId = 0;
  291. async_wait_info->timeOut = 0;
  292. async_wait_info->reserved = 0;
  293. domi::KernelDef kernel_def;
  294. kernel_def.set_kernel_ext_info(buf, len);
  295. kernel_def.set_kernel_ext_info_size(len);
  296. auto op_desc = make_shared<OpDesc>("deque", "Deque");
  297. ge::AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, true);
  298. AiCpuTask aicpu_task;
  299. aicpu_task.SetOpDesc(op_desc);
  300. rtStream_t stream;
  301. ASSERT_EQ(rtStreamCreate(&stream, 0), RT_ERROR_NONE);
  302. ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS);
  303. ASSERT_EQ(aicpu_task.LaunchKernel(stream), SUCCESS);
  304. }
  305. TEST_F(UtestSingleOpTask, test_blocking_aicpu_op_fail) {
  306. int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo);
  307. vector<char> aicpu_ext_info(len, 0);
  308. char *buf = aicpu_ext_info.data();
  309. int offset = 0;
  310. hybrid::AicpuExtInfo *ext_info = reinterpret_cast<hybrid::AicpuExtInfo*>(buf + offset);
  311. ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT;
  312. ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo);
  313. offset += sizeof(hybrid::AicpuExtInfo);
  314. hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast<hybrid::AsyncWaitInfo*>(buf + offset);
  315. async_wait_info->waitType = 0;
  316. async_wait_info->waitId = 0;
  317. async_wait_info->timeOut = 0;
  318. async_wait_info->reserved = 0;
  319. domi::KernelDef kernel_def;
  320. kernel_def.set_kernel_ext_info(buf, len);
  321. kernel_def.set_kernel_ext_info_size(len);
  322. auto op_desc = make_shared<OpDesc>("deque", "Deque");
  323. ge::AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, true);
  324. AiCpuTask aicpu_task;
  325. aicpu_task.SetOpDesc(op_desc);
  326. rtStream_t stream;
  327. ASSERT_EQ(rtStreamCreate(&stream, 0), RT_ERROR_NONE);
  328. ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS);
  329. ASSERT_EQ(aicpu_task.LaunchKernel(stream), SUCCESS);
  330. RTS_STUB_RETURN_VALUE(rtGetDevice, rtError_t, 0x78000001);
  331. ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), FAILED);
  332. RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001);
  333. ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), FAILED);
  334. RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001);
  335. ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), FAILED);
  336. RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE);
  337. RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_SUPPORT + 1);
  338. ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), FAILED);
  339. RTS_STUB_RETURN_VALUE(rtGetDevice, rtError_t, 0x78000001);
  340. ASSERT_EQ(aicpu_task.LaunchKernel(stream), FAILED);
  341. ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS);
  342. RTS_STUB_RETURN_VALUE(rtStreamWaitEvent, rtError_t, 0x78000001);
  343. ASSERT_EQ(aicpu_task.LaunchKernel(stream), FAILED);
  344. ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS);
  345. RTS_STUB_RETURN_VALUE(rtEventReset, rtError_t, 0x78000001);
  346. ASSERT_EQ(aicpu_task.LaunchKernel(stream), FAILED);
  347. RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE);
  348. RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT);
  349. EXPECT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS);
  350. RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE);
  351. RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT);
  352. EXPECT_EQ(aicpu_task.LaunchKernel(stream), SUCCESS);
  353. }

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示