You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

op_task.cc 37 kB

5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "single_op/task/op_task.h"
  17. #include <google/protobuf/extension_set.h>
  18. #include <chrono>
  19. #include <thread>
  20. #include "aicpu/common/aicpu_task_struct.h"
  21. #include "common/dump/dump_manager.h"
  22. #include "common/dump/dump_op.h"
  23. #include "common/profiling/profiling_manager.h"
  24. #include "common/formats/formats.h"
  25. #include "common/math/math_util.h"
  26. #include "framework/common/debug/log.h"
  27. #include "register/op_tiling.h"
  28. #include "runtime/rt.h"
  29. #include "build_task_utils.h"
  30. namespace ge {
  31. namespace {
  32. constexpr int kLaunchRetryTimes = 1000;
  33. constexpr int kSleepTime = 10;
  34. constexpr uint64_t kReleaseFlag = 1;
  35. constexpr int kCopyNum = 2;
  36. constexpr uint64_t kInferSessionId = 0;
  37. void FreeHbm(void *var) {
  38. if (var) {
  39. (void)rtFree(var);
  40. }
  41. }
  42. } // namespace
  43. Status OpTask::OpenDump(rtStream_t stream) {
  44. if (DumpManager::GetInstance().GetDumpProperties(kInferSessionId).IsSingleOpNeedDump()) {
  45. GELOGI("Dump is open in single op, start to set dump info");
  46. std::vector<uint64_t> input_addrs;
  47. std::vector<uint64_t> output_adds;
  48. auto input_size = op_desc_->GetInputsSize();
  49. auto output_size = op_desc_->GetOutputsSize();
  50. uintptr_t *arg_base = nullptr;
  51. size_t arg_num = 0;
  52. GetIoAddr(arg_base, arg_num);
  53. if (arg_num < input_size + output_size) {
  54. GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "io_addrs_for_dump_ size %zu is not equal input and output size %zu",
  55. arg_num,
  56. input_size + output_size);
  57. return ACL_ERROR_GE_INTERNAL_ERROR;
  58. }
  59. for (size_t i = 0; i < input_size; i++) {
  60. uint64_t input_addr = arg_base[i];
  61. input_addrs.emplace_back(input_addr);
  62. }
  63. for (size_t j = 0; j < output_size; j++) {
  64. uint64_t output_addr = arg_base[input_size + j];
  65. output_adds.emplace_back(output_addr);
  66. }
  67. dump_op_.SetDumpInfo(DumpManager::GetInstance().GetDumpProperties(kInferSessionId), op_desc_, input_addrs, output_adds, stream);
  68. auto status = dump_op_.LaunchDumpOp();
  69. if (status != SUCCESS) {
  70. GELOGE(status, "Launch dump op failed in single op");
  71. return status;
  72. }
  73. return SUCCESS;
  74. }
  75. GELOGI("Dump is not open in single op");
  76. return SUCCESS;
  77. }
  78. void TbeOpTask::SetStubFunc(const std::string &name, const void *stub_func) {
  79. this->stub_name_ = name;
  80. this->stub_func_ = stub_func;
  81. }
  82. void TbeOpTask::SetKernelArgs(std::unique_ptr<uint8_t[]> &&args, size_t arg_size, uint32_t block_dim,
  83. const OpDescPtr &op_desc) {
  84. args_ = std::move(args);
  85. arg_size_ = arg_size;
  86. block_dim_ = block_dim;
  87. op_desc_ = op_desc;
  88. }
  89. void TbeOpTask::SetKernelWithHandleArgs(std::unique_ptr<uint8_t[]> &&args, size_t arg_size, uint32_t block_dim,
  90. const OpDescPtr &op_desc,
  91. const domi::KernelDefWithHandle &kernel_def_with_handle) {
  92. SetKernelArgs(std::move(args), arg_size, block_dim, op_desc);
  93. original_kernel_key_ = kernel_def_with_handle.original_kernel_key();
  94. node_info_ = kernel_def_with_handle.node_info();
  95. }
  96. void TbeOpTask::SetSmDesc(void *sm_desc) { sm_desc_ = sm_desc; }
  97. void OpTask::SetModelArgs(std::string model_name, uint32_t model_id) {
  98. model_name_ = model_name;
  99. model_id_ = model_id;
  100. }
  101. Status OpTask::GetProfilingArgs(TaskDescInfo &task_desc_info, uint32_t &model_id) {
  102. uint32_t task_id = 0;
  103. uint32_t stream_id = 0;
  104. auto rt_ret = rtGetTaskIdAndStreamID(&task_id, &stream_id);
  105. if (rt_ret != RT_ERROR_NONE) {
  106. GELOGE(RT_FAILED, "Get task_id and stream_id failed ret: 0x%X.", rt_ret);
  107. return RT_ERROR_TO_GE_STATUS(rt_ret);
  108. }
  109. GE_CHECK_NOTNULL(op_desc_);
  110. string op_name = op_desc_->GetName();
  111. GELOGD("Get profiling args of op [%s] end, task_id[%u], stream_id[%u]", op_name.c_str(), task_id, stream_id);
  112. model_id = model_id_;
  113. task_desc_info.model_name = model_name_;
  114. task_desc_info.block_dim = block_dim_;
  115. task_desc_info.task_id = task_id;
  116. task_desc_info.stream_id = stream_id;
  117. task_desc_info.op_name = op_name;
  118. task_desc_info.op_type = op_desc_->GetType();
  119. auto &prof_mgr = ProfilingManager::Instance();
  120. prof_mgr.GetOpInputOutputInfo(op_desc_, task_desc_info);
  121. return SUCCESS;
  122. }
  123. Status OpTask::UpdateRunInfo(const vector<GeTensorDesc> &input_desc, const vector<GeTensorDesc> &output_desc) {
  124. return UNSUPPORTED;
  125. }
  126. Status OpTask::DoUpdateArgTable(const SingleOpModelParam &param, bool keep_workspace) {
  127. auto addresses = BuildTaskUtils::GetAddresses(op_desc_, param, keep_workspace);
  128. auto all_addresses = BuildTaskUtils::JoinAddresses(addresses);
  129. uintptr_t *arg_base = nullptr;
  130. size_t arg_num = 0;
  131. GetIoAddr(arg_base, arg_num);
  132. if (arg_num < all_addresses.size()) {
  133. GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "[%s] arg number mismatches, expect at least = %zu, but got = %zu",
  134. op_desc_->GetName().c_str(),
  135. all_addresses.size(),
  136. arg_num);
  137. return ACL_ERROR_GE_INTERNAL_ERROR;
  138. }
  139. for (void *addr : all_addresses) {
  140. *arg_base++ = reinterpret_cast<uintptr_t >(addr);
  141. }
  142. return SUCCESS;
  143. }
  144. Status OpTask::UpdateArgTable(const SingleOpModelParam &param) {
  145. return DoUpdateArgTable(param, true);
  146. }
  147. Status OpTask::LaunchKernel(const vector<GeTensorDesc> &input_desc,
  148. const vector<DataBuffer> &input_buffers,
  149. vector<GeTensorDesc> &output_desc,
  150. vector<DataBuffer> &output_buffers,
  151. rtStream_t stream) {
  152. return UNSUPPORTED;
  153. }
  154. const std::string &OpTask::GetTaskType() const { return kTaskTypeInvalid; }
  155. TbeOpTask::~TbeOpTask() {
  156. if (sm_desc_ != nullptr) {
  157. (void)rtMemFreeManaged(sm_desc_);
  158. }
  159. if (tiling_buffer_ != nullptr) {
  160. (void)rtFree(tiling_buffer_);
  161. }
  162. }
  163. const void *TbeOpTask::GetArgs() const { return args_.get(); }
  164. size_t TbeOpTask::GetArgSize() const { return arg_size_; }
  165. const std::string &TbeOpTask::GetStubName() const { return stub_name_; }
  166. const std::string &TbeOpTask::GetTaskType() const { return kTaskTypeAicore; }
  167. void TbeOpTask::SetHandle(void *handle) {
  168. this->handle_ = handle;
  169. }
  170. Status TbeOpTask::LaunchKernel(rtStream_t stream) {
  171. GELOGD("To invoke rtKernelLaunch. task = %s, block_dim = %u", this->stub_name_.c_str(), block_dim_);
  172. auto *sm_desc = reinterpret_cast<rtSmDesc_t *>(sm_desc_);
  173. auto ret = rtKernelLaunch(stub_func_, block_dim_, args_.get(), static_cast<uint32_t>(arg_size_), sm_desc, stream);
  174. int retry_times = 0;
  175. while (ret != RT_ERROR_NONE && retry_times < kLaunchRetryTimes) {
  176. retry_times++;
  177. GELOGW("Retry after %d ms, retry_times: %d", kSleepTime, retry_times);
  178. std::this_thread::sleep_for(std::chrono::milliseconds(kSleepTime));
  179. ret = rtKernelLaunch(stub_func_, block_dim_, args_.get(), arg_size_, sm_desc, stream);
  180. }
  181. if (ret != RT_ERROR_NONE) {
  182. GELOGE(ret, "Invoke rtKernelLaunch failed. ret = %d, task = %s", ret, this->stub_name_.c_str());
  183. return RT_ERROR_TO_GE_STATUS(ret);
  184. }
  185. GELOGI("[TASK_INFO] %s", this->stub_name_.c_str());
  186. return SUCCESS;
  187. }
  188. Status TbeOpTask::UpdateRunInfo(const vector<GeTensorDesc> &input_desc, const vector<GeTensorDesc> &output_desc) {
  189. GE_CHK_STATUS_RET_NOLOG(UpdateNodeByShape(input_desc, output_desc));
  190. // invoke OpParaCalculate
  191. GELOGD("Start to invoke OpParaCalculate.");
  192. optiling::OpRunInfo run_info;
  193. run_info.block_dim = 0;
  194. auto ret = optiling::OpParaCalculate(*node_, run_info);
  195. if (ret != GRAPH_SUCCESS) {
  196. GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "Failed to invoke OpParaCalculate. ret = %u", ret);
  197. return ACL_ERROR_GE_INTERNAL_ERROR;
  198. }
  199. block_dim_ = run_info.block_dim;
  200. tiling_data_ = run_info.tiling_data.str();
  201. tiling_key_ = run_info.tiling_key;
  202. GELOGD("Done invoking OpParaCalculate successfully. block_dim = %u, tiling size = %zu, tiling_key = %u", block_dim_,
  203. tiling_data_.size(), tiling_key_);
  204. GE_CHK_STATUS_RET(AllocateWorkspaces(run_info.workspaces), "Failed to allocate workspaces");
  205. return SUCCESS;
  206. }
  207. Status TbeOpTask::UpdateTensorDesc(const GeTensorDesc &src_tensor, GeTensorDesc &dst_tensor) {
  208. int64_t storage_format_val = static_cast<Format>(FORMAT_RESERVED);
  209. (void)AttrUtils::GetInt(src_tensor, ge::ATTR_NAME_STORAGE_FORMAT, storage_format_val);
  210. auto storage_format = static_cast<Format>(storage_format_val);
  211. if (storage_format == FORMAT_RESERVED) {
  212. GELOGD("Storage format not set. update shape to [%s], and original shape to [%s]",
  213. src_tensor.GetShape().ToString().c_str(), src_tensor.GetOriginShape().ToString().c_str());
  214. dst_tensor.SetShape(src_tensor.GetShape());
  215. dst_tensor.SetOriginShape(src_tensor.GetOriginShape());
  216. } else {
  217. std::vector<int64_t> storage_shape;
  218. if (!AttrUtils::GetListInt(src_tensor, ge::ATTR_NAME_STORAGE_SHAPE, storage_shape)) {
  219. GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "Failed to get storage_shape while storage_format was set");
  220. return ACL_ERROR_GE_INTERNAL_ERROR;
  221. }
  222. GELOGD("Storage format set. update shape to [%s], and original shape to [%s]",
  223. GeShape(storage_shape).ToString().c_str(), src_tensor.GetShape().ToString().c_str());
  224. dst_tensor.SetShape(GeShape(std::move(storage_shape)));
  225. dst_tensor.SetOriginShape(src_tensor.GetShape());
  226. }
  227. return SUCCESS;
  228. }
  229. Status TbeOpTask::UpdateNodeByShape(const vector<GeTensorDesc> &input_desc, const vector<GeTensorDesc> &output_desc) {
  230. auto op_desc = node_->GetOpDesc();
  231. GE_CHECK_NOTNULL(op_desc);
  232. // Set runtime shape to node
  233. for (size_t i = 0; i < input_desc.size(); ++i) {
  234. auto tensor_desc = op_desc->MutableInputDesc(i);
  235. auto &runtime_tensor_desc = input_desc[i];
  236. GE_CHECK_NOTNULL(tensor_desc);
  237. GE_CHK_STATUS_RET(UpdateTensorDesc(runtime_tensor_desc, *tensor_desc));
  238. }
  239. for (size_t i = 0; i < output_desc.size(); ++i) {
  240. auto tensor_desc = op_desc->MutableOutputDesc(i);
  241. auto &runtime_tensor_desc = output_desc[i];
  242. GE_CHECK_NOTNULL(tensor_desc);
  243. GE_CHK_STATUS_RET(UpdateTensorDesc(runtime_tensor_desc, *tensor_desc));
  244. }
  245. return SUCCESS;
  246. }
  247. void TbeOpTask::EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, size_t max_tiling_size) {
  248. node_ = node;
  249. tiling_buffer_ = tiling_buffer;
  250. max_tiling_size_ = max_tiling_size;
  251. }
  252. Status TbeOpTask::AllocateWorkspaces(const vector<int64_t> &workspace_sizes) {
  253. static const std::string kPurpose("malloc workspace memory for dynamic op.");
  254. if (workspace_sizes.empty()) {
  255. GELOGD("No need to allocate workspace.");
  256. return SUCCESS;
  257. }
  258. int64_t total_size = 0;
  259. std::vector<int64_t> ws_offsets;
  260. for (auto ws_size : workspace_sizes) {
  261. // alignment and padding should be done in OpParaCalculate
  262. if (CheckInt64AddOverflow(total_size, ws_size) != SUCCESS) {
  263. return ACL_ERROR_GE_INTERNAL_ERROR;
  264. }
  265. ws_offsets.emplace_back(total_size);
  266. total_size += ws_size;
  267. }
  268. GELOGD("Total workspace size is %ld", total_size);
  269. GE_CHECK_NOTNULL(stream_resource_);
  270. auto ws_base = stream_resource_->MallocMemory(kPurpose, static_cast<size_t>(total_size));
  271. if (ws_base == nullptr) {
  272. GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Failed to allocate memory of size: %ld", total_size);
  273. return ACL_ERROR_GE_MEMORY_ALLOCATION;
  274. }
  275. GELOGD("Done allocating workspace memory successfully.");
  276. for (auto ws_offset : ws_offsets) {
  277. workspaces_.emplace_back(ws_base + ws_offset);
  278. }
  279. return SUCCESS;
  280. }
  281. Status TbeOpTask::LaunchKernel(const vector<GeTensorDesc> &input_desc,
  282. const vector<DataBuffer> &input_buffers,
  283. vector<GeTensorDesc> &output_desc,
  284. vector<DataBuffer> &output_buffers,
  285. rtStream_t stream) {
  286. GE_CHK_STATUS_RET_NOLOG(UpdateRunInfo(input_desc, output_desc));
  287. GELOGD("[%s] Start to launch kernel", node_->GetName().c_str());
  288. std::vector<void *> args;
  289. for (auto &buffer : input_buffers) {
  290. args.emplace_back(buffer.data);
  291. }
  292. for (auto &buffer : output_buffers) {
  293. args.emplace_back(buffer.data);
  294. }
  295. for (auto &buffer : workspaces_) {
  296. args.emplace_back(buffer);
  297. }
  298. if (tiling_buffer_ != nullptr) {
  299. GELOGD("[%s] Start to copy tiling info. size = %zu", node_->GetName().c_str(), tiling_data_.size());
  300. GE_CHK_RT_RET(rtMemcpyAsync(tiling_buffer_, max_tiling_size_, tiling_data_.data(), tiling_data_.size(),
  301. RT_MEMCPY_HOST_TO_DEVICE_EX, stream));
  302. args.emplace_back(tiling_buffer_);
  303. }
  304. if (memcpy_s(args_.get(), arg_size_, args.data(), args.size() * sizeof(void *)) != EOK) {
  305. GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, "[%s] Failed to update kernel args.",
  306. node_->GetName().c_str());
  307. return ACL_ERROR_GE_MEMORY_OPERATE_FAILED;
  308. }
  309. GELOGD("[%s] Start to invoke rtKernelLaunch", node_->GetName().c_str());
  310. if (handle_ == nullptr) {
  311. GE_CHK_RT_RET(rtKernelLaunch(stub_func_, block_dim_, args_.get(), arg_size_, nullptr, stream));
  312. GELOGD("[%s] Done invoking rtKernelLaunch successfully", node_->GetName().c_str());
  313. } else {
  314. std::string dev_func = original_kernel_key_ + "_" + std::to_string(tiling_key_);
  315. std::string kernel_info = node_info_ + "/" + std::to_string(tiling_key_);
  316. GE_CHK_RT_RET(rtKernelLaunchWithHandle(handle_, dev_func.c_str(), block_dim_, args_.get(), arg_size_, nullptr,
  317. stream, kernel_info.c_str()));
  318. GELOGD("[%s] Done invoking rtKernelLaunchWithHandle successfully", node_->GetName().c_str());
  319. }
  320. return SUCCESS;
  321. }
  322. void TbeOpTask::GetIoAddr(uintptr_t *&arg_base, size_t &arg_count) {
  323. arg_base = reinterpret_cast<uintptr_t *>(args_.get());
  324. arg_count = arg_size_ / sizeof(void *);
  325. if (tiling_buffer_ != nullptr) {
  326. --arg_count;
  327. }
  328. }
  329. AiCpuBaseTask::~AiCpuBaseTask() {
  330. if (ext_info_addr_dev_ != nullptr) {
  331. (void)rtFree(ext_info_addr_dev_);
  332. }
  333. }
  334. Status AiCpuBaseTask::SetExtInfoAndType(const std::string &kernel_ext_info, uint64_t kernel_id) {
  335. if (kernel_ext_info.empty()) {
  336. GELOGI("Kernel_ext_info is empty, no need copy to device.");
  337. return SUCCESS;
  338. }
  339. int32_t unknown_shape_type_val = 0;
  340. (void) AttrUtils::GetInt(op_desc_, ::ge::ATTR_NAME_UNKNOWN_SHAPE_TYPE, unknown_shape_type_val);
  341. GELOGD("Get unknown_type is %d.", unknown_shape_type_val);
  342. unknown_type_ = static_cast<UnknowShapeOpType>(unknown_shape_type_val);
  343. aicpu_ext_handle_.reset(new(std::nothrow) ::ge::hybrid::AicpuExtInfoHandler(op_desc_->GetName(),
  344. num_inputs_,
  345. num_outputs_,
  346. unknown_type_));
  347. GE_CHK_BOOL_RET_STATUS(aicpu_ext_handle_ != nullptr, ACL_ERROR_GE_MEMORY_ALLOCATION,
  348. "Malloc aicpu_ext_handle mem failed!");
  349. Status ret = aicpu_ext_handle_->Parse(kernel_ext_info);
  350. if (ret != SUCCESS) {
  351. GELOGE(ret, "Parse kernel ext info failed, kernel_ext_info_size=%zu.", kernel_ext_info.size());
  352. return ret;
  353. }
  354. GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateSessionInfo(ULLONG_MAX, kernel_id, false),
  355. "UpdateSessionInfo failed.");
  356. GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateExecuteMode(true), "UpdateExecuteMode failed.");
  357. GE_CHK_RT_RET(rtMalloc(&ext_info_addr_dev_, aicpu_ext_handle_->GetExtInfoLen(), RT_MEMORY_HBM));
  358. GE_CHK_RT_RET(rtMemcpy(ext_info_addr_dev_, aicpu_ext_handle_->GetExtInfoLen(),
  359. aicpu_ext_handle_->GetExtInfo(), aicpu_ext_handle_->GetExtInfoLen(),
  360. RT_MEMCPY_HOST_TO_DEVICE));
  361. return SUCCESS;
  362. }
  363. Status AiCpuBaseTask::SetInputConst() {
  364. input_is_const_.clear();
  365. const vector<bool> v_is_input_const = op_desc_->GetIsInputConst();
  366. for (size_t i = 0; i < op_desc_->GetAllInputsSize(); ++i) {
  367. const GeTensorDescPtr tensor_desc = op_desc_->MutableInputDesc(static_cast<uint32_t>(i));
  368. if (tensor_desc == nullptr) {
  369. GELOGD("SingleOp: %s, Index: %zu, has no input", op_desc_->GetName().c_str(), i);
  370. continue;
  371. }
  372. if (i < v_is_input_const.size() && v_is_input_const[i]) {
  373. GELOGD("SingleOp: %s, Index: %zu, input is const", op_desc_->GetName().c_str(), i);
  374. input_is_const_.push_back(true);
  375. continue;
  376. }
  377. input_is_const_.push_back(false);
  378. }
  379. return SUCCESS;
  380. }
  381. Status AiCpuBaseTask::UpdateExtInfo(const std::vector<GeTensorDesc> &input_desc,
  382. std::vector<GeTensorDesc> &output_desc,
  383. rtStream_t stream) {
  384. GELOGI("Update ext info begin, unknown_type=%d.", unknown_type_);
  385. GE_CHECK_NOTNULL(aicpu_ext_handle_);
  386. GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateExecuteMode(false), "UpdateExecuteMode failed.");
  387. if (num_inputs_ == 0 && num_outputs_ == 0) {
  388. GELOGI("No input and output, no need update ext info.");
  389. return SUCCESS;
  390. }
  391. size_t non_const_index = 0;
  392. for (size_t input_index = 0; input_index < num_inputs_; input_index++) {
  393. if (input_index < input_is_const_.size() && input_is_const_[input_index]) {
  394. // get input_desc from op_desc if const input, num_inputs_ is op_desc_ input_size
  395. auto const_input_desc = op_desc_->MutableInputDesc(static_cast<uint32_t>(input_index));
  396. GE_CHECK_NOTNULL(const_input_desc);
  397. GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateInputShapeAndType(input_index, *const_input_desc),
  398. "Input[%zu] update input shape failed.", input_index);
  399. continue;
  400. }
  401. GE_CHK_BOOL_RET_STATUS(non_const_index < input_desc.size(), ACL_ERROR_GE_PARAM_INVALID,
  402. "Input_desc size is %zu, but get non_const_index is %zu",
  403. input_desc.size(), non_const_index);
  404. GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateInputShapeAndType(input_index, input_desc[non_const_index]),
  405. "Input[%zu] update input shape failed.", input_index);
  406. non_const_index++;
  407. }
  408. if (unknown_type_ != DEPEND_COMPUTE) {
  409. for (size_t j = 0; j < num_outputs_; ++j) {
  410. GE_CHK_STATUS_RET(aicpu_ext_handle_->UpdateOutputShapeAndType(j, output_desc[j]),
  411. "Output[%zu] UpdateOutputShapeAndType failed.", j);
  412. }
  413. }
  414. GE_CHK_RT_RET(rtMemcpyAsync(ext_info_addr_dev_,
  415. aicpu_ext_handle_->GetExtInfoLen(), // check size
  416. aicpu_ext_handle_->GetExtInfo(),
  417. aicpu_ext_handle_->GetExtInfoLen(),
  418. RT_MEMCPY_HOST_TO_DEVICE_EX,
  419. stream));
  420. GELOGI("Update ext info end.");
  421. return SUCCESS;
  422. }
  423. Status AiCpuBaseTask::UpdateOutputShape(vector<GeTensorDesc> &output_desc) {
  424. if (num_outputs_ == 0) {
  425. GELOGD("AiCpuBaseTask output_num is 0, no need update output shape.");
  426. return SUCCESS;
  427. }
  428. GELOGD("Start to update DEPEND_SHAPE_RANGE AiCpuBaseTask outputshape.");
  429. GE_CHK_RT_RET(rtMemcpy(aicpu_ext_handle_->GetExtInfo(),
  430. aicpu_ext_handle_->GetExtInfoLen(),
  431. ext_info_addr_dev_,
  432. aicpu_ext_handle_->GetExtInfoLen(),
  433. RT_MEMCPY_DEVICE_TO_HOST));
  434. for (size_t i = 0; i < num_outputs_; ++i) {
  435. GeShape shape;
  436. DataType data_type;
  437. aicpu_ext_handle_->GetOutputShapeAndType(i, shape, data_type);
  438. GE_CHK_STATUS_RET(UpdateShapeToOutputDesc(shape, output_desc[i]),
  439. "AiCpuCCTask Update [%zu]th output shape failed.", i);
  440. if (DumpManager::GetInstance().GetDumpProperties(kInferSessionId).IsSingleOpNeedDump()) {
  441. GE_CHK_STATUS_RET(op_desc_->UpdateOutputDesc(i, output_desc[i]),
  442. "AiCpuCCTask Update [%zu]th output desc failed.", i);
  443. }
  444. }
  445. GELOGD("Update DEPEND_SHAPE_RANGE AiCpuBaseTask outputshape finished.");
  446. return SUCCESS;
  447. }
  448. Status AiCpuBaseTask::UpdateShapeToOutputDesc(const GeShape &shape_new, GeTensorDesc &output_desc) {
  449. auto shape_old = output_desc.GetShape();
  450. output_desc.SetShape(shape_new);
  451. GELOGD("Update AiCpuBaseTask shape from %s to %s", shape_old.ToString().c_str(), shape_new.ToString().c_str());
  452. auto origin_shape_old = output_desc.GetOriginShape();
  453. auto origin_format = output_desc.GetOriginFormat();
  454. auto format = output_desc.GetFormat();
  455. if (origin_format == format) {
  456. output_desc.SetOriginShape(shape_new);
  457. return SUCCESS;
  458. }
  459. std::vector<int64_t> origin_dims_new;
  460. auto trans_ret = formats::TransShape(format, shape_new.GetDims(),
  461. output_desc.GetDataType(), origin_format, origin_dims_new);
  462. GE_CHK_STATUS_RET(trans_ret,
  463. "AiCpuTask originFormat[%d] is not same as format[%d], but TransShape failed, shape=%s.",
  464. origin_format, format, shape_new.ToString().c_str());
  465. auto origin_shape_new = GeShape(origin_dims_new);
  466. output_desc.SetOriginShape(origin_shape_new);
  467. GELOGD("AiCpuTask originFormat[%d] is not same as format[%d], need update from %s ro %s.",
  468. origin_format, format, origin_shape_old.ToString().c_str(), origin_shape_new.ToString().c_str());
  469. return SUCCESS;
  470. }
  471. Status AiCpuBaseTask::UpdateIoAddr(const vector<DataBuffer> &inputs, const vector<DataBuffer> &outputs) {
  472. uintptr_t *arg_base = nullptr;
  473. size_t arg_num = 0;
  474. GetIoAddr(arg_base, arg_num);
  475. // input number and output number was check in ValidateParams
  476. size_t non_const_index = 0;
  477. for (size_t input_index = 0; input_index < num_inputs_; input_index++) {
  478. if (input_index < input_is_const_.size() && input_is_const_[input_index]) {
  479. // const input no need update addr
  480. GE_CHECK_NOTNULL(arg_base);
  481. GELOGD("AICpuTask input[%zu] addr = %lu", input_index, *arg_base);
  482. arg_base++;
  483. continue;
  484. }
  485. GE_CHK_BOOL_RET_STATUS(non_const_index < inputs.size(), ACL_ERROR_GE_PARAM_INVALID,
  486. "Input size is %zu, but get non_const_index is %zu",
  487. inputs.size(), non_const_index);
  488. auto addr = inputs[non_const_index].data;
  489. GE_CHECK_NOTNULL(addr);
  490. GELOGD("AICpuTask input[%zu] addr = %p", input_index, addr);
  491. *arg_base++ = reinterpret_cast<uintptr_t>(addr);
  492. non_const_index++;
  493. }
  494. for (size_t i = 0; i < outputs.size(); ++i) {
  495. auto addr = outputs[i].data;
  496. GE_CHECK_NOTNULL(addr);
  497. GELOGD("AICpuTask output[%zu] addr = %p", i, addr);
  498. *arg_base++ = reinterpret_cast<uintptr_t>(addr);
  499. }
  500. return SUCCESS;
  501. }
  502. AiCpuTask::~AiCpuTask() {
  503. FreeHbm(args_);
  504. FreeHbm(io_addr_);
  505. if (dynamic_flag_) {
  506. FreeHbm(workspace_addr_);
  507. }
  508. FreeHbm(copy_workspace_buf_);
  509. FreeHbm(copy_ioaddr_dev_);
  510. FreeHbm(copy_input_release_flag_dev_);
  511. FreeHbm(copy_input_data_size_dev_);
  512. FreeHbm(copy_input_src_dev_);
  513. FreeHbm(copy_input_dst_dev_);
  514. FreeHbm(copy_task_args_buf_);
  515. for (auto summary : output_summary_) {
  516. FreeHbm(summary);
  517. }
  518. for (auto out_shape : out_shape_hbm_) {
  519. FreeHbm(out_shape);
  520. }
  521. }
  522. Status AiCpuTask::LaunchKernel(rtStream_t stream) {
  523. GELOGD("Start to launch kernel. task = %s", this->op_type_.c_str());
  524. auto ret = rtMemcpyAsync(io_addr_,
  525. io_addr_size_,
  526. io_addr_host_.data(),
  527. io_addr_host_.size() * sizeof(void *),
  528. RT_MEMCPY_HOST_TO_DEVICE_EX,
  529. stream);
  530. if (ret != RT_ERROR_NONE) {
  531. GELOGE(ret, "rtMemcpyAsync workspace data failed. ret = %d, task = %s", ret, this->op_type_.c_str());
  532. return RT_ERROR_TO_GE_STATUS(ret);
  533. }
  534. GELOGI("To invoke rtKernelLaunchEx. task = %s", this->op_type_.c_str());
  535. ret = rtKernelLaunchEx(args_, arg_size_, 0, stream);
  536. if (ret != RT_ERROR_NONE) {
  537. GELOGE(ret, "Invoke rtKernelLaunch failed. ret = %d, task = %s", ret, this->op_type_.c_str());
  538. return RT_ERROR_TO_GE_STATUS(ret);
  539. }
  540. GELOGI("[TASK_INFO] %lu/%s", kernel_id_, op_type_.c_str());
  541. GELOGD("Done launch kernel successfully. task = %s", this->op_type_.c_str());
  542. return SUCCESS;
  543. }
  544. Status AiCpuTask::PrepareCopyInputs(vector<DataBuffer> &outputs) {
  545. std::vector<uint64_t> copy_input_release_flag;
  546. std::vector<uint64_t> copy_input_data_size;
  547. std::vector<uint64_t> copy_input_src;
  548. std::vector<uint64_t> copy_input_dst;
  549. for (size_t i = 0; i < num_outputs_; ++i) {
  550. const auto &summary = output_summary_host_[i];
  551. GELOGI("Node out[%zu] summary, shape data=0x%lx, shape data size=%lu, raw data=0x%lx, raw data size=%lu.",
  552. i, summary.shape_data_ptr, summary.shape_data_size,
  553. summary.raw_data_ptr, summary.raw_data_size);
  554. auto output = outputs[i];
  555. copy_input_release_flag.emplace_back(kReleaseFlag);
  556. if (summary.raw_data_size > 0) {
  557. copy_input_data_size.emplace_back(output.length);
  558. } else {
  559. copy_input_data_size.emplace_back(summary.raw_data_size);
  560. }
  561. copy_input_src.emplace_back(summary.raw_data_ptr);
  562. copy_input_dst.emplace_back(reinterpret_cast<uintptr_t>(output.data));
  563. const auto &shape_buffer = out_shape_hbm_[i];
  564. copy_input_release_flag.emplace_back(kReleaseFlag);
  565. copy_input_data_size.emplace_back(summary.shape_data_size);
  566. copy_input_src.emplace_back(summary.shape_data_ptr);
  567. copy_input_dst.emplace_back(reinterpret_cast<uintptr_t>(shape_buffer));
  568. }
  569. const size_t copy_input_buf_len = num_outputs_ * kCopyNum * sizeof(uint64_t);
  570. GE_CHK_RT_RET(rtMemcpy(copy_input_release_flag_dev_, copy_input_buf_len,
  571. copy_input_release_flag.data(), copy_input_buf_len, RT_MEMCPY_HOST_TO_DEVICE));
  572. GE_CHK_RT_RET(rtMemcpy(copy_input_data_size_dev_, copy_input_buf_len,
  573. copy_input_data_size.data(), copy_input_buf_len, RT_MEMCPY_HOST_TO_DEVICE));
  574. GE_CHK_RT_RET(rtMemcpy(copy_input_src_dev_, copy_input_buf_len,
  575. copy_input_src.data(), copy_input_buf_len, RT_MEMCPY_HOST_TO_DEVICE));
  576. GE_CHK_RT_RET(rtMemcpy(copy_input_dst_dev_, copy_input_buf_len,
  577. copy_input_dst.data(), copy_input_buf_len, RT_MEMCPY_HOST_TO_DEVICE));
  578. return SUCCESS;
  579. }
  580. Status AiCpuTask::ReadResultSummaryAndPrepareMemory() {
  581. for (size_t i = 0; i < num_outputs_; ++i) {
  582. auto &result_summary = output_summary_host_[i];
  583. GE_CHK_RT_RET(rtMemcpy(&result_summary, sizeof(aicpu::FWKAdapter::ResultSummary),
  584. output_summary_[i], sizeof(aicpu::FWKAdapter::ResultSummary),
  585. RT_MEMCPY_DEVICE_TO_HOST));
  586. auto shape_data_size = result_summary.shape_data_size;
  587. void *shape_buffer = nullptr;
  588. if (shape_data_size > 0) {
  589. GE_CHK_RT_RET(rtMalloc(&shape_buffer, shape_data_size, RT_MEMORY_HBM));
  590. }
  591. out_shape_hbm_.emplace_back(shape_buffer);
  592. }
  593. return SUCCESS;
  594. }
  595. Status AiCpuTask::CopyDataToHbm(vector<DataBuffer> &outputs,
  596. rtStream_t stream) {
  597. GE_CHK_STATUS_RET_NOLOG(PrepareCopyInputs(outputs));
  598. GE_CHK_RT_RET(rtKernelLaunchEx(copy_task_args_buf_, sizeof(STR_FWK_OP_KERNEL),
  599. RT_KERNEL_DEFAULT, stream));
  600. GE_CHK_RT_RET(rtStreamSynchronize(stream));
  601. return SUCCESS;
  602. }
  603. Status AiCpuTask::UpdateShapeByHbmBuffer(vector<GeTensorDesc> &output_desc) {
  604. for (size_t i = 0; i < num_outputs_; ++i) {
  605. const auto &result_summary = output_summary_host_[i];
  606. std::vector<int64_t> shape_dims;
  607. if (result_summary.shape_data_size > 0) {
  608. const auto &shape_hbm = out_shape_hbm_[i];
  609. uint32_t dim_num = result_summary.shape_data_size / sizeof(int64_t);
  610. std::unique_ptr<int64_t[]> shape_addr(new(std::nothrow) int64_t[dim_num]());
  611. GE_CHECK_NOTNULL(shape_addr);
  612. GE_CHK_RT_RET(rtMemcpy(shape_addr.get(), result_summary.shape_data_size,
  613. shape_hbm, result_summary.shape_data_size, RT_MEMCPY_DEVICE_TO_HOST));
  614. for (uint32_t dim_idx = 0; dim_idx < dim_num; ++dim_idx) {
  615. shape_dims.emplace_back(shape_addr[dim_idx]);
  616. GELOGD("Node [%zu]th output dim[%u]=%ld.", i, dim_idx, shape_addr[dim_idx]);
  617. }
  618. }
  619. GE_CHK_STATUS_RET(UpdateShapeToOutputDesc(GeShape(shape_dims), output_desc[i]),
  620. "AiCpuTask update [%zu]th output shape failed.", i);
  621. if (DumpManager::GetInstance().GetDumpProperties(kInferSessionId).IsSingleOpNeedDump()) {
  622. GE_CHK_STATUS_RET(op_desc_->UpdateOutputDesc(i, output_desc[i]),
  623. "AiCpuTask update [%zu]th output desc failed.", i);
  624. }
  625. }
  626. return SUCCESS;
  627. }
  628. Status AiCpuTask::UpdateShapeAndDataByResultSummary(vector<GeTensorDesc> &output_desc,
  629. vector<DataBuffer> &outputs,
  630. rtStream_t stream) {
  631. if (num_outputs_ == 0) {
  632. GELOGI("Output num is 0, there is no need to update the output and size.");
  633. return SUCCESS;
  634. }
  635. GELOGI("Update shape and data by result summary begin.");
  636. for (auto out_shape : out_shape_hbm_) {
  637. FreeHbm(out_shape);
  638. }
  639. out_shape_hbm_.clear();
  640. GE_CHK_STATUS_RET(ReadResultSummaryAndPrepareMemory(),
  641. "Read ResultSummary and update output shape failed.");
  642. GE_CHK_STATUS_RET(CopyDataToHbm(outputs, stream),
  643. "Copy data to output failed.");
  644. GE_CHK_STATUS_RET(UpdateShapeByHbmBuffer(output_desc),
  645. "Update shape by hbm buffer failed.");
  646. for (auto out_shape : out_shape_hbm_) {
  647. FreeHbm(out_shape);
  648. }
  649. out_shape_hbm_.clear();
  650. GELOGI("Update shape and data by result summary end.");
  651. return SUCCESS;
  652. }
  653. Status AiCpuTask::InitForSummaryAndCopy() {
  654. if (unknown_type_ != DEPEND_COMPUTE || num_outputs_ == 0) {
  655. GELOGI("Unknown_type is %d, output num is %zu.", unknown_type_, num_outputs_);
  656. return SUCCESS;
  657. }
  658. output_summary_.resize(num_outputs_);
  659. constexpr auto result_summary_size = sizeof(aicpu::FWKAdapter::ResultSummary);
  660. for (size_t i = 0; i < num_outputs_; ++i) {
  661. GE_CHK_RT_RET(rtMalloc(&output_summary_[i], result_summary_size, RT_MEMORY_HBM));
  662. }
  663. output_summary_host_.resize(num_outputs_);
  664. const size_t copy_input_buf_len = num_outputs_ * kCopyNum * sizeof(uint64_t);
  665. GE_CHK_RT_RET(rtMalloc(&copy_input_release_flag_dev_, copy_input_buf_len, RT_MEMORY_HBM));
  666. GE_CHK_RT_RET(rtMalloc(&copy_input_data_size_dev_, copy_input_buf_len, RT_MEMORY_HBM));
  667. GE_CHK_RT_RET(rtMalloc(&copy_input_src_dev_, copy_input_buf_len, RT_MEMORY_HBM));
  668. GE_CHK_RT_RET(rtMalloc(&copy_input_dst_dev_, copy_input_buf_len, RT_MEMORY_HBM));
  669. GE_CHK_RT_RET(rtMalloc(&copy_task_args_buf_, sizeof(STR_FWK_OP_KERNEL), RT_MEMORY_HBM));
  670. std::vector<uint64_t> copy_io_addr;
  671. copy_io_addr.emplace_back(reinterpret_cast<uintptr_t>(copy_input_release_flag_dev_));
  672. copy_io_addr.emplace_back(reinterpret_cast<uintptr_t>(copy_input_data_size_dev_));
  673. copy_io_addr.emplace_back(reinterpret_cast<uintptr_t>(copy_input_src_dev_));
  674. copy_io_addr.emplace_back(reinterpret_cast<uintptr_t>(copy_input_dst_dev_));
  675. const auto copy_io_addr_size = sizeof(uint64_t) * copy_io_addr.size();
  676. GE_CHK_RT_RET(rtMalloc(&copy_ioaddr_dev_, copy_io_addr_size, RT_MEMORY_HBM));
  677. GE_CHK_RT_RET(rtMemcpy(copy_ioaddr_dev_, copy_io_addr_size,
  678. copy_io_addr.data(), copy_io_addr_size, RT_MEMCPY_HOST_TO_DEVICE));
  679. return SUCCESS;
  680. }
  681. Status AiCpuTask::SetMemCopyTask(const domi::KernelExDef &kernel_def) {
  682. if (kernel_def.args_size() > sizeof(STR_FWK_OP_KERNEL)) {
  683. GELOGE(ACL_ERROR_GE_PARAM_INVALID, "sizeof STR_FWK_OP_KERNEL is: %lu, but args_size is: %d",
  684. sizeof(STR_FWK_OP_KERNEL), kernel_def.args_size());
  685. return ACL_ERROR_GE_PARAM_INVALID;
  686. }
  687. GE_CHK_RT_RET(rtMalloc(&copy_workspace_buf_, kernel_def.task_info_size(), RT_MEMORY_HBM));
  688. GE_CHK_RT_RET(rtMemcpy(copy_workspace_buf_, kernel_def.task_info_size(),
  689. kernel_def.task_info().data(), kernel_def.task_info_size(), RT_MEMCPY_HOST_TO_DEVICE));
  690. STR_FWK_OP_KERNEL aicpu_task = {0};
  691. auto sec_ret = memcpy_s(&aicpu_task, sizeof(STR_FWK_OP_KERNEL),
  692. kernel_def.args().data(), kernel_def.args().size());
  693. if (sec_ret != EOK) {
  694. GELOGE(ACL_ERROR_GE_MEMORY_OPERATE_FAILED, "memcpy failed, ret: %d", sec_ret);
  695. return ACL_ERROR_GE_MEMORY_OPERATE_FAILED;
  696. }
  697. aicpu_task.fwkKernelBase.fwk_kernel.inputOutputAddr = reinterpret_cast<uintptr_t>(copy_ioaddr_dev_);
  698. aicpu_task.fwkKernelBase.fwk_kernel.workspaceBaseAddr = reinterpret_cast<uintptr_t>(copy_workspace_buf_);
  699. aicpu_task.fwkKernelBase.fwk_kernel.extInfoAddr = 0;
  700. aicpu_task.fwkKernelBase.fwk_kernel.extInfoLen = 0;
  701. GE_CHK_RT_RET(rtMemcpy(copy_task_args_buf_, sizeof(STR_FWK_OP_KERNEL),
  702. &aicpu_task, sizeof(STR_FWK_OP_KERNEL), RT_MEMCPY_HOST_TO_DEVICE));
  703. return SUCCESS;
  704. }
  705. Status AiCpuTask::LaunchKernel(const std::vector<GeTensorDesc> &input_desc,
  706. const std::vector<DataBuffer> &input_buffers,
  707. std::vector<GeTensorDesc> &output_desc,
  708. std::vector<DataBuffer> &output_buffers,
  709. rtStream_t stream) {
  710. GE_CHK_STATUS_RET_NOLOG(UpdateExtInfo(input_desc, output_desc, stream));
  711. if (unknown_type_ == DEPEND_COMPUTE) {
  712. std::vector<DataBuffer> summary_buffers;
  713. for (size_t i = 0; i < num_outputs_; ++i) {
  714. summary_buffers.emplace_back(output_summary_[i], sizeof(aicpu::FWKAdapter::ResultSummary), false);
  715. }
  716. GE_CHK_STATUS_RET_NOLOG(UpdateIoAddr(input_buffers, summary_buffers));
  717. } else {
  718. GE_CHK_STATUS_RET_NOLOG(UpdateIoAddr(input_buffers, output_buffers));
  719. }
  720. GE_CHK_STATUS_RET_NOLOG(LaunchKernel(stream));
  721. if (unknown_type_ == DEPEND_SHAPE_RANGE) {
  722. GE_CHK_RT_RET(rtStreamSynchronize(stream));
  723. GE_CHK_STATUS_RET_NOLOG(UpdateOutputShape(output_desc));
  724. } else if (unknown_type_ == DEPEND_COMPUTE) {
  725. GE_CHK_RT_RET(rtStreamSynchronize(stream));
  726. GE_CHK_STATUS_RET_NOLOG(UpdateShapeAndDataByResultSummary(output_desc, output_buffers, stream));
  727. }
  728. return SUCCESS;
  729. }
  730. Status AiCpuBaseTask::UpdateArgTable(const SingleOpModelParam &param) {
  731. // aicpu do not have workspace, for now
  732. return DoUpdateArgTable(param, false);
  733. }
  734. const std::string &AiCpuBaseTask::GetTaskType() const { return kTaskTypeAicpu; }
  735. void AiCpuTask::GetIoAddr(uintptr_t *&arg_base, size_t &arg_count) {
  736. arg_base = reinterpret_cast<uintptr_t *>(io_addr_host_.data());
  737. arg_count = io_addr_host_.size();
  738. }
  739. void AiCpuCCTask::SetKernelArgs(std::unique_ptr<uint8_t[]> args, size_t arg_size) {
  740. args_ = std::move(args);
  741. arg_size_ = arg_size;
  742. // The blockdim value is defult "1" for rtCpuKernelLaunch
  743. block_dim_ = 1;
  744. }
  745. void AiCpuCCTask::SetSoName(const std::string &so_name) { so_name_ = so_name; }
  746. void AiCpuCCTask::SetkernelName(const std::string &kernel_Name) { kernel_name_ = kernel_Name; }
  747. void AiCpuCCTask::SetIoAddr(uintptr_t *io_addr) { io_addr_ = io_addr; }
  748. const void *AiCpuCCTask::GetArgs() const { return args_.get(); }
  749. size_t AiCpuCCTask::GetArgSize() const { return arg_size_; }
  750. AiCpuCCTask::~AiCpuCCTask() {
  751. }
  752. Status AiCpuCCTask::LaunchKernel(rtStream_t stream) {
  753. GELOGI("To invoke rtCpuKernelLaunch. block_dim = %u, so_name is %s, kernel_name is %s", block_dim_, so_name_.data(),
  754. kernel_name_.data());
  755. // sm_desc is nullptr, because l2 buffer does not support
  756. auto *sm_desc = reinterpret_cast<rtSmDesc_t *>(sm_desc_);
  757. auto ret = rtCpuKernelLaunchWithFlag(static_cast<const void *>(so_name_.data()),
  758. static_cast<const void *>(kernel_name_.data()),
  759. block_dim_, args_.get(), static_cast<uint32_t>(arg_size_),
  760. sm_desc, stream, dump_flag_);
  761. if (ret != RT_ERROR_NONE) {
  762. GELOGE(ret, "Invoke rtCpuKernelLaunch failed. ret = %d", ret);
  763. return RT_ERROR_TO_GE_STATUS(ret);
  764. }
  765. GELOGI("[TASK_INFO] %lu/%s", kernel_id_, op_type_.c_str());
  766. GELOGD("Invoke rtCpuKernelLaunch succeeded");
  767. return SUCCESS;
  768. }
  769. Status AiCpuCCTask::LaunchKernel(const std::vector<GeTensorDesc> &input_desc,
  770. const std::vector<DataBuffer> &input_buffers,
  771. std::vector<GeTensorDesc> &output_desc,
  772. std::vector<DataBuffer> &output_buffers,
  773. rtStream_t stream) {
  774. GE_CHK_STATUS_RET_NOLOG(UpdateExtInfo(input_desc, output_desc, stream));
  775. GE_CHK_STATUS_RET_NOLOG(UpdateIoAddr(input_buffers, output_buffers));
  776. GE_CHK_STATUS_RET_NOLOG(LaunchKernel(stream));
  777. if (unknown_type_ == DEPEND_SHAPE_RANGE) {
  778. GE_CHK_RT_RET(rtStreamSynchronize(stream));
  779. GE_CHK_STATUS_RET_NOLOG(UpdateOutputShape(output_desc));
  780. }
  781. return SUCCESS;
  782. }
  783. void AiCpuCCTask::GetIoAddr(uintptr_t *&arg_base, size_t &arg_count) {
  784. arg_base = io_addr_;
  785. arg_count = io_addr_num_;
  786. }
  787. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示