You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

single_op.cc 11 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "single_op/single_op.h"
  17. #include "common/fmk_types.h"
  18. #include "common/math/math_util.h"
  19. #include "common/profiling/profiling_manager.h"
  20. #include "framework/common/debug/ge_log.h"
  21. #include "framework/common/util.h"
  22. #include "graph/load/new_model_manager/model_utils.h"
  23. #include "runtime/mem.h"
  24. #include "single_op/single_op_manager.h"
  25. #include "graph/load/new_model_manager/model_manager.h"
  26. namespace ge {
  27. namespace {
  28. const size_t kDataMemAlignSize = 32;
  29. size_t GetAlignedSize(size_t size) {
  30. size_t aligned_size = (size + 2 * kDataMemAlignSize - 1) / kDataMemAlignSize * kDataMemAlignSize;
  31. return aligned_size;
  32. }
  33. } // namespace
  34. SingleOp::SingleOp(std::mutex *stream_mutex, rtStream_t stream) : stream_mutex_(stream_mutex), stream_(stream) {
  35. }
  36. FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY SingleOp::~SingleOp() {
  37. for (auto task : tasks_) {
  38. delete task;
  39. task = nullptr;
  40. }
  41. GELOGI("SingleOp destory sessionId = %lu", aicpu_session_id_);
  42. ModelManager::GetInstance()->DestroyAicpuSession(aicpu_session_id_);
  43. }
  44. Status SingleOp::ValidateArgs(const std::vector<DataBuffer> &inputs, const std::vector<DataBuffer> &outputs) {
  45. auto num_inputs = inputs.size();
  46. if (num_inputs != input_sizes_.size()) {
  47. GELOGE(PARAM_INVALID, "Input num mismatch. model expect %zu, but given %zu", input_addr_list_.size(),
  48. inputs.size());
  49. return PARAM_INVALID;
  50. }
  51. for (size_t i = 0; i < num_inputs; ++i) {
  52. // preventing from read out of bound
  53. size_t aligned_size = GetAlignedSize(inputs[i].length);
  54. GELOGI("Input [%zu], aligned_size:%zu, inputs.length:%lu, input_sizes_:%lu",
  55. i, aligned_size, inputs[i].length, input_sizes_[i]);
  56. if (aligned_size < input_sizes_[i]) {
  57. GELOGE(PARAM_INVALID, "Input size mismatch. index = %zu, model expect %zu,"
  58. " but given %zu(after align)", i, input_sizes_[i], aligned_size);
  59. return PARAM_INVALID;
  60. }
  61. }
  62. auto num_outputs = outputs.size();
  63. if (num_outputs != output_sizes_.size()) {
  64. GELOGE(PARAM_INVALID, "output num mismatch. model expect %zu, but given %zu", output_sizes_.size(), outputs.size());
  65. return PARAM_INVALID;
  66. }
  67. for (size_t i = 0; i < num_outputs; ++i) {
  68. // preventing from write out of bound
  69. size_t aligned_size = GetAlignedSize(outputs[i].length);
  70. GELOGI("Output [%zu], aligned_size:%zu, outputs.length:%lu, output_sizes_:%lu",
  71. i, aligned_size, outputs[i].length, output_sizes_[i]);
  72. if (aligned_size < output_sizes_[i]) {
  73. GELOGE(PARAM_INVALID, "Output size mismatch. index = %zu, model expect %zu,"
  74. "but given %zu(after align)", i, output_sizes_[i], aligned_size);
  75. return PARAM_INVALID;
  76. }
  77. }
  78. return SUCCESS;
  79. }
  80. Status SingleOp::GetArgs(const std::vector<DataBuffer> &inputs, const std::vector<DataBuffer> &outputs) {
  81. size_t arg_index = 0;
  82. for (auto &input : inputs) {
  83. args_[arg_index++] = reinterpret_cast<uintptr_t>(input.data);
  84. }
  85. for (auto &output : outputs) {
  86. args_[arg_index++] = reinterpret_cast<uintptr_t>(output.data);
  87. }
  88. return SUCCESS;
  89. }
  90. Status SingleOp::UpdateArgs(const std::vector<DataBuffer> &inputs, const std::vector<DataBuffer> &outputs) {
  91. Status ret = GetArgs(inputs, outputs);
  92. if (ret != SUCCESS) {
  93. return ret;
  94. }
  95. // update tbe task args
  96. size_t num_args = arg_table_.size();
  97. for (size_t i = 0; i < num_args; ++i) {
  98. std::vector<uintptr_t *> &ptr_to_arg_in_tasks = arg_table_[i];
  99. if (ptr_to_arg_in_tasks.empty()) {
  100. GELOGW("found NO arg address to update for arg[%lu]", i);
  101. continue;
  102. }
  103. for (uintptr_t *arg_addr : ptr_to_arg_in_tasks) {
  104. *arg_addr = args_[i];
  105. }
  106. }
  107. // update aicpu_TF or aicpu_CC args
  108. for (auto &task : tasks_) {
  109. size_t io_addr_num = args_.size();
  110. if (task->GetOpTaskType() == OP_TASK_AICPU) {
  111. GELOGD("Update aicpu_TF task args");
  112. auto *dst_io_addr = const_cast<uintptr_t *>(reinterpret_cast<const uintptr_t *>(task->GetIOAddr()));
  113. GE_CHECK_NOTNULL(dst_io_addr);
  114. auto rt_ret = rtMemcpyAsync(dst_io_addr,
  115. sizeof(uint64_t) * args_.size(),
  116. &args_[0],
  117. sizeof(uint64_t) * args_.size(),
  118. RT_MEMCPY_HOST_TO_DEVICE_EX,
  119. stream_);
  120. if (rt_ret != RT_ERROR_NONE) {
  121. GELOGE(RT_FAILED, "rtMemcpyAsync addresses failed, ret = %d", rt_ret);
  122. return RT_FAILED;
  123. }
  124. } else if (task->GetOpTaskType() == OP_TASK_AICPUCC) {
  125. GELOGD("Update aicpu_CC task args");
  126. const uintptr_t *task_io_addr = reinterpret_cast<const uintptr_t *>(task->GetIOAddr());
  127. GE_CHECK_NOTNULL(task_io_addr);
  128. auto io_addr = reinterpret_cast<uint64_t *>(const_cast<uintptr_t *>(task_io_addr));
  129. for (size_t i = 0; i < io_addr_num; ++i) {
  130. io_addr[i] = reinterpret_cast<uintptr_t>(args_[i]);
  131. }
  132. } else {
  133. GELOGW("Only TF_kernel aicpu and aicpu_CC are supported, but got %u", task->GetOpTaskType());
  134. continue;
  135. }
  136. }
  137. return SUCCESS;
  138. }
  139. FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status SingleOp::ExecuteAsync(const std::vector<DataBuffer> &inputs,
  140. const std::vector<DataBuffer> &outputs) {
  141. Status ret = ValidateArgs(inputs, outputs);
  142. if (ret != SUCCESS) {
  143. return ret;
  144. }
  145. std::lock_guard<std::mutex> lk(*stream_mutex_);
  146. ret = UpdateArgs(inputs, outputs);
  147. if (ret != SUCCESS) {
  148. return ret;
  149. }
  150. for (auto &task : tasks_) {
  151. ret = task->LaunchKernel(stream_);
  152. if (ret != SUCCESS) {
  153. return ret;
  154. }
  155. ret = task->OpenDump(args_, stream_);
  156. if (ret != SUCCESS) {
  157. GELOGE(ret, "Open dump failed");
  158. return ret;
  159. }
  160. }
  161. return ret;
  162. }
  163. void SingleOp::SetStream(rtStream_t stream) {
  164. stream_ = stream;
  165. }
  166. void SingleOp::SetSessionID(uint64_t session_id) {
  167. aicpu_session_id_ = session_id;
  168. }
  169. DynamicSingleOp::DynamicSingleOp(uintptr_t resource_id, std::mutex *stream_mutex, rtStream_t stream)
  170. : resource_id_(resource_id), stream_mutex_(stream_mutex), stream_(stream) {
  171. }
  172. DynamicSingleOp::~DynamicSingleOp() {
  173. GELOGI("DynamicSingleOp destory sessionId = %lu", aicpu_session_id_);
  174. ModelManager::GetInstance()->DestroyAicpuSession(aicpu_session_id_);
  175. }
  176. Status DynamicSingleOp::ValidateParams(const vector<GeTensorDesc> &input_desc,
  177. const std::vector<DataBuffer> &inputs,
  178. std::vector<GeTensorDesc> &output_desc,
  179. std::vector<DataBuffer> &outputs) const {
  180. if (inputs.size() != input_desc.size()) {
  181. GELOGE(PARAM_INVALID,
  182. "Input number mismatches input desc number. Input num = %zu, input desc num = %zu",
  183. inputs.size(),
  184. input_desc.size());
  185. return PARAM_INVALID;
  186. }
  187. if (outputs.size() != output_desc.size()) {
  188. GELOGE(PARAM_INVALID,
  189. "Output number mismatches output desc number. Output num = %zu, output desc num = %zu",
  190. outputs.size(),
  191. output_desc.size());
  192. return PARAM_INVALID;
  193. }
  194. if (input_desc.size() != num_inputs_) {
  195. GELOGE(PARAM_INVALID, "Input number mismatches. expect %zu, but given %zu", num_inputs_, input_desc.size());
  196. return PARAM_INVALID;
  197. }
  198. if (output_desc.size() != num_outputs_) {
  199. GELOGE(PARAM_INVALID, "Output number mismatches. expect %zu, but given %zu", num_outputs_, output_desc.size());
  200. return PARAM_INVALID;
  201. }
  202. return SUCCESS;
  203. }
  204. Status DynamicSingleOp::AllocateWorkspaces(const std::vector<int64_t> &workspace_sizes,
  205. std::vector<void *> &workspaces) {
  206. static const std::string kPurpose("malloc workspace memory for dynamic op.");
  207. if (workspace_sizes.empty()) {
  208. GELOGD("No need to allocate workspace.");
  209. return SUCCESS;
  210. }
  211. int64_t total_size = 0;
  212. std::vector<int64_t> ws_offsets;
  213. for (auto ws_size : workspace_sizes) {
  214. // alignment and padding should be done in OpParaCalculate
  215. GE_CHK_STATUS_RET_NOLOG(CheckInt64AddOverflow(total_size, ws_size));
  216. ws_offsets.emplace_back(total_size);
  217. total_size += ws_size;
  218. }
  219. GELOGD("Total workspace size is %ld", total_size);
  220. StreamResource *stream_resource = SingleOpManager::GetInstance().GetResource(resource_id_, stream_);
  221. GE_CHECK_NOTNULL(stream_resource);
  222. auto ws_base = stream_resource->MallocMemory(kPurpose, static_cast<size_t>(total_size));
  223. if (ws_base == nullptr) {
  224. GELOGE(MEMALLOC_FAILED, "Failed to allocate memory of size: %ld", total_size);
  225. return MEMALLOC_FAILED;
  226. }
  227. GELOGD("Done allocating workspace memory successfully.");
  228. for (auto ws_offset : ws_offsets) {
  229. workspaces.emplace_back(ws_base + ws_offset);
  230. }
  231. return SUCCESS;
  232. }
  233. Status DynamicSingleOp::ExecuteTbeTask(const vector<GeTensorDesc> &input_desc,
  234. const vector<void *> &inputs,
  235. vector<GeTensorDesc> &output_desc,
  236. vector<void *> &outputs) {
  237. GE_CHK_STATUS_RET_NOLOG(op_task_->UpdateRunInfo(input_desc, output_desc));
  238. std::vector<void *> workspace_buffers;
  239. GE_CHK_STATUS_RET_NOLOG(AllocateWorkspaces(op_task_->GetWorkspaceSizes(), workspace_buffers));
  240. return op_task_->LaunchKernel(inputs, outputs, workspace_buffers, stream_);
  241. }
  242. Status DynamicSingleOp::ExecuteAsync(const vector<GeTensorDesc> &input_desc,
  243. const vector<DataBuffer> &input_buffers,
  244. vector<GeTensorDesc> &output_desc,
  245. vector<DataBuffer> &output_buffers) {
  246. GE_CHECK_NOTNULL(op_task_);
  247. GE_CHK_STATUS_RET_NOLOG(ValidateParams(input_desc, input_buffers, output_desc, output_buffers));
  248. std::lock_guard<std::mutex> lk(*stream_mutex_);
  249. std::vector<void *> inputs;
  250. std::vector<void *> outputs;
  251. for (auto &buffer : input_buffers) {
  252. inputs.emplace_back(buffer.data);
  253. }
  254. for (auto &buffer : output_buffers) {
  255. outputs.emplace_back(buffer.data);
  256. }
  257. if (op_task_->GetOpTaskType() == OP_TASK_TBE) {
  258. return ExecuteTbeTask(input_desc, inputs, output_desc, outputs);
  259. } else if (op_task_->GetOpTaskType() == OP_TASK_AICPU || op_task_->GetOpTaskType() == OP_TASK_AICPUCC) {
  260. return op_task_->LaunchKernel(input_desc, inputs, output_desc, outputs, stream_);
  261. } else {
  262. GELOGE(UNSUPPORTED,
  263. "Only TBE_Task, AI_CPU_Task and AI_CPUCC_Task are supported, but got %u",
  264. op_task_->GetOpTaskType());
  265. return UNSUPPORTED;
  266. }
  267. }
  268. void DynamicSingleOp::SetSessionID(uint64_t session_id) {
  269. aicpu_session_id_ = session_id;
  270. }
  271. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示