You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

host_mem_manager.cc 5.3 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/manager/host_mem_manager.h"
  17. #include <sstream>
  18. #include "graph/ge_context.h"
  19. #include "graph/utils/tensor_utils.h"
  20. #include "runtime/mem.h"
  21. namespace {
  22. const uint32_t kMallocHostMemFlag = 0;
  23. } // namespace
  24. namespace ge {
  25. Status SharedMemAllocator::Allocate(SharedMemInfo &mem_info) {
  26. auto device_id = GetContext().DeviceId();
  27. GELOGD("SharedMemAllocator::Malloc host mem size= %zu for devid:[%u].", mem_info.mem_size, device_id);
  28. auto dev_id = static_cast<int32_t>(device_id);
  29. GE_CHK_RT_RET(rtSetDevice(dev_id));
  30. // DeviceReset before memory finished!
  31. GE_MAKE_GUARD(not_used_var, [&] { GE_CHK_RT(rtDeviceReset(dev_id)); });
  32. rtMallocHostSharedMemoryIn input_para = {mem_info.shm_name.c_str(), mem_info.mem_size, kMallocHostMemFlag};
  33. rtMallocHostSharedMemoryOut output_para;
  34. rtError_t rt_ret = rtMallocHostSharedMemory(&input_para, &output_para);
  35. if (rt_ret != RT_ERROR_NONE) {
  36. GELOGE(RT_FAILED, "Call rt api(rtMallocHostSharedMemory) failed, devid:[%u].", device_id);
  37. return GE_GRAPH_MEMORY_ALLOC_FAILED;
  38. }
  39. mem_info.fd = output_para.fd;
  40. #ifndef ONLY_COMPILE_OPEN_SRC
  41. mem_info.host_aligned_ptr = AlignedPtr::BuildFromAllocFunc([&output_para](std::unique_ptr<uint8_t[], deleter> &ptr) {
  42. ptr.reset(reinterpret_cast<uint8_t *>(output_para.ptr));
  43. },
  44. [](uint8_t *ptr) {
  45. ptr = nullptr;
  46. });
  47. #else
  48. mem_info.host_address = reinterpret_cast<uint8_t *>(output_para.ptr);
  49. #endif
  50. mem_info.device_address = reinterpret_cast<uint8_t *>(output_para.devPtr);
  51. return SUCCESS;
  52. }
  53. Status SharedMemAllocator::DeAllocate(SharedMemInfo &mem_info) {
  54. GELOGD("SharedMemAllocator::DeAllocate");
  55. #ifndef ONLY_COMPILE_OPEN_SRC
  56. rtFreeHostSharedMemoryIn free_para = {mem_info.shm_name.c_str(), mem_info.mem_size, mem_info.fd,
  57. mem_info.host_aligned_ptr->MutableGet(), mem_info.device_address};
  58. #else
  59. rtFreeHostSharedMemoryIn free_para = {mem_info.shm_name.c_str(), mem_info.mem_size, mem_info.fd,
  60. mem_info.host_address, mem_info.device_address};
  61. #endif
  62. rtError_t rt_ret = rtFreeHostSharedMemory(&free_para);
  63. if (rt_ret != RT_ERROR_NONE) {
  64. GELOGE(RT_FAILED, "Call rt api(rtFreeHostSharedMemory) failed, ret: 0x%X.", rt_ret);
  65. return RT_FAILED;
  66. }
  67. return ge::SUCCESS;
  68. }
  69. HostMemManager &HostMemManager::Instance() {
  70. static HostMemManager mem_manager;
  71. return mem_manager;
  72. }
  73. Status HostMemManager::Initialize() {
  74. std::lock_guard<std::recursive_mutex> lock(mutex_);
  75. allocator_ = std::unique_ptr<SharedMemAllocator>(new (std::nothrow) SharedMemAllocator());
  76. if (allocator_ == nullptr) {
  77. GELOGE(GE_GRAPH_MALLOC_FAILED, "Shared memory allocator init failed!");
  78. return GE_GRAPH_MALLOC_FAILED;
  79. }
  80. return SUCCESS;
  81. }
  82. void HostMemManager::Finalize() noexcept {
  83. std::lock_guard<std::recursive_mutex> lock(mutex_);
  84. for (auto &it : var_memory_base_map_) {
  85. if (allocator_->DeAllocate(it.second) != SUCCESS) {
  86. GELOGW("Host %s mem release failed!", it.first.c_str());
  87. }
  88. }
  89. var_memory_base_map_.clear();
  90. }
  91. Status HostMemManager::MallocSharedMemory(SharedMemInfo &mem_info) {
  92. std::lock_guard<std::recursive_mutex> lock(mutex_);
  93. auto iter = var_memory_base_map_.find(mem_info.op_name);
  94. if (iter != var_memory_base_map_.end()) {
  95. GELOGE(FAILED, "Host shared memory for op %s has been malloced", mem_info.op_name.c_str());
  96. return FAILED;
  97. }
  98. mem_info.shm_name = OpNameToShmName(mem_info.op_name);
  99. GE_CHECK_NOTNULL(allocator_);
  100. GE_CHK_STATUS_RET(allocator_->Allocate(mem_info));
  101. var_memory_base_map_[mem_info.op_name] = mem_info;
  102. return SUCCESS;
  103. }
  104. Status HostMemManager::QueryVarMemInfo(const string &op_name, uint64_t &base_addr, uint64_t &data_size) {
  105. std::lock_guard<std::recursive_mutex> lock(mutex_);
  106. if (var_memory_base_map_.find(op_name) == var_memory_base_map_.end()) {
  107. GELOGE(INTERNAL_ERROR, "Find host base base_addr failed,node name:%s!", op_name.c_str());
  108. return INTERNAL_ERROR;
  109. }
  110. base_addr = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(var_memory_base_map_[op_name].device_address));
  111. data_size = var_memory_base_map_[op_name].mem_size;
  112. return SUCCESS;
  113. }
  114. string HostMemManager::OpNameToShmName(const string &op_name) {
  115. string sh_name("Ascend_");
  116. std::hash<std::string> hash_str;
  117. sh_name.append(std::to_string(hash_str(op_name)));
  118. return sh_name;
  119. }
  120. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示