You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

stream_resource.cc 7.7 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "single_op/stream_resource.h"
  17. #include "framework/common/debug/ge_log.h"
  18. #include "framework/common/debug/log.h"
  19. #include "runtime/rt.h"
  20. #include "single_op/single_op_model.h"
  21. namespace ge {
  22. namespace {
  23. // limit available device mem size 1M
  24. const uint32_t kFuzzDeviceBufferSize = 1 * 1024 * 1024;
  25. }
  26. StreamResource::StreamResource(uintptr_t resource_id) : resource_id_(resource_id) {
  27. }
  28. StreamResource::~StreamResource() {
  29. for (auto mem : memory_list_) {
  30. if (mem != nullptr) {
  31. auto rt_ret = rtFree(mem);
  32. GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "[Free][Rt] failed."));
  33. }
  34. }
  35. for (auto weight : weight_list_) {
  36. if (weight != nullptr) {
  37. auto rt_ret = rtFree(weight);
  38. GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "[Free][Rt] failed."));
  39. }
  40. }
  41. if (device_buffer_ != nullptr) {
  42. auto rt_ret = rtFree(device_buffer_);
  43. GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "[Free][Rt] failed."));
  44. }
  45. }
  46. Status StreamResource::Init() {
  47. auto rt_ret = rtMalloc(&device_buffer_, kFuzzDeviceBufferSize, RT_MEMORY_HBM);
  48. GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "[Malloc][Rt] failed."));
  49. return SUCCESS;
  50. }
  51. SingleOp *StreamResource::GetOperator(const uint64_t key) {
  52. std::lock_guard<std::mutex> lk(mu_);
  53. auto it = op_map_.find(key);
  54. if (it == op_map_.end()) {
  55. return nullptr;
  56. }
  57. return it->second.get();
  58. }
  59. DynamicSingleOp *StreamResource::GetDynamicOperator(const uint64_t key) {
  60. std::lock_guard<std::mutex> lk(mu_);
  61. auto it = dynamic_op_map_.find(key);
  62. if (it == dynamic_op_map_.end()) {
  63. return nullptr;
  64. }
  65. return it->second.get();
  66. }
  67. rtStream_t StreamResource::GetStream() const {
  68. return stream_;
  69. }
  70. void StreamResource::SetStream(rtStream_t stream) {
  71. stream_ = stream;
  72. }
  73. uint8_t *StreamResource::DoMallocMemory(const std::string &purpose,
  74. size_t size,
  75. size_t &max_allocated,
  76. std::vector<uint8_t *> &allocated) {
  77. if (size == 0) {
  78. GELOGD("Mem size == 0");
  79. return nullptr;
  80. }
  81. if (size <= max_allocated && !allocated.empty()) {
  82. GELOGD("reuse last memory");
  83. return allocated.back();
  84. }
  85. if (!allocated.empty()) {
  86. uint8_t *current_buffer = allocated.back();
  87. allocated.pop_back();
  88. if (rtStreamSynchronize(stream_) != RT_ERROR_NONE) {
  89. GELOGW("Failed to invoke rtStreamSynchronize");
  90. }
  91. (void) rtFree(current_buffer);
  92. }
  93. uint8_t *buffer = nullptr;
  94. auto ret = rtMalloc(reinterpret_cast<void **>(&buffer), size, RT_MEMORY_HBM);
  95. if (ret != RT_ERROR_NONE) {
  96. GELOGE(RT_FAILED, "[RtMalloc][Memory] failed, size = %zu, ret = %d", size, ret);
  97. REPORT_INNER_ERROR("E19999", "rtMalloc failed, size = %zu, ret = %d.", size, ret);
  98. return nullptr;
  99. }
  100. GE_PRINT_DYNAMIC_MEMORY(rtMalloc, purpose.c_str(), size)
  101. ret = rtMemset(buffer, size, 0U, size);
  102. if (ret != RT_ERROR_NONE) {
  103. GELOGE(RT_FAILED, "[RtMemset][Memory] failed, ret = %d", ret);
  104. REPORT_INNER_ERROR("E19999", "rtMemset failed, ret = %d.", ret);
  105. auto rt_ret = rtFree(buffer);
  106. GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "[RtFree][Memory] failed"));
  107. return nullptr;
  108. }
  109. GELOGD("Malloc new memory succeeded. size = %zu", size);
  110. max_allocated = size;
  111. allocated.emplace_back(buffer);
  112. return buffer;
  113. }
  114. uint8_t *StreamResource::MallocMemory(const std::string &purpose, size_t size, bool holding_lock) {
  115. GELOGD("To Malloc memory, size = %zu", size);
  116. if (holding_lock) {
  117. return DoMallocMemory(purpose, size, max_memory_size_, memory_list_);
  118. } else {
  119. std::lock_guard<std::mutex> lk(stream_mu_);
  120. return DoMallocMemory(purpose, size, max_memory_size_, memory_list_);
  121. }
  122. }
  123. uint8_t *StreamResource::MallocWeight(const std::string &purpose, size_t size) {
  124. GELOGD("To Malloc weight, size = %zu", size);
  125. uint8_t *buffer = nullptr;
  126. auto ret = rtMalloc(reinterpret_cast<void **>(&buffer), size, RT_MEMORY_HBM);
  127. if (ret != RT_ERROR_NONE) {
  128. GELOGE(RT_FAILED, "[RtMalloc][Memory] failed, size = %zu, ret = %d", size, ret);
  129. REPORT_INNER_ERROR("E19999", "rtMalloc failed, size = %zu, ret = %d.", size, ret);
  130. return nullptr;
  131. }
  132. GE_PRINT_DYNAMIC_MEMORY(rtMalloc, purpose.c_str(), size)
  133. weight_list_.emplace_back(buffer);
  134. return buffer;
  135. }
  136. Status StreamResource::BuildDynamicOperator(const ModelData &model_data,
  137. DynamicSingleOp **single_op,
  138. const uint64_t model_id) {
  139. const string &model_name = std::to_string(model_id);
  140. std::lock_guard<std::mutex> lk(mu_);
  141. auto it = dynamic_op_map_.find(model_id);
  142. if (it != dynamic_op_map_.end()) {
  143. *single_op = it->second.get();
  144. return SUCCESS;
  145. }
  146. SingleOpModel model(model_name, model_data.model_data, model_data.model_len);
  147. auto ret = model.Init();
  148. if (ret != SUCCESS) {
  149. GELOGE(ret, "[Init][SingleOpModel] failed. model = %s, ret = %u", model_name.c_str(), ret);
  150. REPORT_CALL_ERROR("E19999", "SingleOpModel init failed, model = %s, ret = %u", model_name.c_str(), ret);
  151. return ret;
  152. }
  153. auto new_op = std::unique_ptr<DynamicSingleOp>(new(std::nothrow) DynamicSingleOp(resource_id_, &stream_mu_, stream_));
  154. GE_CHECK_NOTNULL(new_op);
  155. GELOGI("To build operator: %s", model_name.c_str());
  156. GE_CHK_STATUS_RET(model.BuildDynamicOp(*this, *new_op),
  157. "[Build][DynamicOp]failed. op = %s, ret = %u", model_name.c_str(), ret);
  158. *single_op = new_op.get();
  159. dynamic_op_map_[model_id] = std::move(new_op);
  160. return SUCCESS;
  161. }
  162. Status StreamResource::BuildOperator(const ModelData &model_data, SingleOp **single_op, const uint64_t model_id) {
  163. const string &model_name = std::to_string(model_id);
  164. std::lock_guard<std::mutex> lk(mu_);
  165. auto it = op_map_.find(model_id);
  166. if (it != op_map_.end()) {
  167. *single_op = it->second.get();
  168. return SUCCESS;
  169. }
  170. SingleOpModel model(model_name, model_data.model_data, model_data.model_len);
  171. auto ret = model.Init();
  172. if (ret != SUCCESS) {
  173. GELOGE(ret, "[Init][SingleOpModel] failed. model = %s, ret = %u", model_name.c_str(), ret);
  174. REPORT_CALL_ERROR("E19999", "SingleOpModel init failed, model = %s, ret = %u", model_name.c_str(), ret);
  175. return ret;
  176. }
  177. auto new_op = std::unique_ptr<SingleOp>(new(std::nothrow) SingleOp(this, &stream_mu_, stream_));
  178. if (new_op == nullptr) {
  179. GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "[New][SingleOp] failed.");
  180. REPORT_CALL_ERROR("E19999", "new SingleOp failed.");
  181. return ACL_ERROR_GE_MEMORY_ALLOCATION;
  182. }
  183. GELOGI("To build operator: %s", model_name.c_str());
  184. GE_CHK_STATUS_RET(model.BuildOp(*this, *new_op), "[Build][Op] failed. op = %s, ret = %u", model_name.c_str(), ret);
  185. *single_op = new_op.get();
  186. op_map_[model_id] = std::move(new_op);
  187. return SUCCESS;
  188. }
  189. const uint8_t *StreamResource::GetMemoryBase() const {
  190. if (memory_list_.empty()) {
  191. return nullptr;
  192. }
  193. return memory_list_.back();
  194. }
  195. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示