You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

execution_engine.cc 14 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "hybrid/executor/worker/execution_engine.h"
  17. #include "graph/runtime_inference_context.h"
  18. #include "graph/utils/tensor_utils.h"
  19. #include "graph/utils/tensor_adapter.h"
  20. #include "hybrid/node_executor/node_executor.h"
  21. #include "common/dump/dump_manager.h"
  22. #include "common/dump/dump_op.h"
  23. #include "common/types.h"
  24. namespace ge {
  25. namespace hybrid {
  26. namespace {
  27. constexpr int64_t kMaxPadding = 63;
  28. Status LogInputs(const NodeItem &node_item, const TaskContext &task_context) {
  29. for (auto i = 0; i < task_context.NumInputs(); ++i) {
  30. const auto &input_tensor = task_context.GetInput(i);
  31. GE_CHECK_NOTNULL(input_tensor);
  32. const auto &tensor_desc = node_item.op_desc->MutableInputDesc(i);
  33. GE_CHECK_NOTNULL(tensor_desc);
  34. GELOGD("[%s] Print task args. input[%d] = %s, shape = [%s]", node_item.NodeName().c_str(), i,
  35. input_tensor->DebugString().c_str(), tensor_desc->MutableShape().ToString().c_str());
  36. }
  37. return SUCCESS;
  38. }
  39. Status LogOutputs(const NodeItem &node_item, const TaskContext &task_context) {
  40. for (auto i = 0; i < task_context.NumOutputs(); ++i) {
  41. const auto &output_tensor = task_context.GetOutput(i);
  42. GE_CHECK_NOTNULL(output_tensor);
  43. const auto &tensor_desc = node_item.op_desc->MutableOutputDesc(i);
  44. GE_CHECK_NOTNULL(tensor_desc);
  45. GELOGD("[%s] Print task args. output[%d] = %s, shape = [%s]", node_item.NodeName().c_str(), i,
  46. output_tensor->DebugString().c_str(), tensor_desc->MutableShape().ToString().c_str());
  47. }
  48. return SUCCESS;
  49. }
  50. } // namespace
  51. class NodeDoneCallback {
  52. public:
  53. NodeDoneCallback(GraphExecutionContext *graph_context, std::shared_ptr<TaskContext> task_context);
  54. ~NodeDoneCallback() = default;
  55. Status OnNodeDone();
  56. private:
  57. Status PrepareConstInputs(const NodeItem &node_item);
  58. Status DumpDynamicNode();
  59. GraphExecutionContext *graph_context_;
  60. std::shared_ptr<TaskContext> context_;
  61. DumpOp dump_op_;
  62. };
  63. NodeDoneCallback::NodeDoneCallback(GraphExecutionContext *graph_context, std::shared_ptr<TaskContext> task_context)
  64. : graph_context_(graph_context), context_(std::move(task_context)) {}
  65. Status NodeDoneCallback::PrepareConstInputs(const NodeItem &node_item) {
  66. for (auto output_idx : node_item.to_const_output_id_list) {
  67. RECORD_CALLBACK_EVENT(graph_context_, node_item.NodeName().c_str(), "[PrepareConstInputs] [index = %d] Start",
  68. output_idx);
  69. auto output_tensor = context_->GetOutput(output_idx);
  70. GE_CHECK_NOTNULL(output_tensor);
  71. Tensor tensor;
  72. auto ge_tensor_desc = node_item.op_desc->MutableOutputDesc(output_idx);
  73. GE_CHECK_NOTNULL(ge_tensor_desc);
  74. tensor.SetTensorDesc(TensorAdapter::GeTensorDesc2TensorDesc(*ge_tensor_desc));
  75. int64_t tensor_size;
  76. GE_CHK_GRAPH_STATUS_RET(TensorUtils::GetTensorSizeInBytes(*ge_tensor_desc, tensor_size),
  77. "Failed to invoke GetTensorSizeInBytes");
  78. if (output_tensor->GetSize() < static_cast<size_t>(tensor_size)) {
  79. GELOGE(INTERNAL_ERROR, "[%s] Tensor size is not enough. output index = %d, required size = %zu, tensor = %s",
  80. node_item.NodeName().c_str(), output_idx, tensor_size, output_tensor->DebugString().c_str());
  81. return INTERNAL_ERROR;
  82. }
  83. vector<uint8_t> host_buffer(static_cast<unsigned long>(tensor_size));
  84. GELOGD("[%s] To cache output[%d] to host, size = %zu", node_item.NodeName().c_str(), output_idx,
  85. output_tensor->GetSize());
  86. GE_CHK_RT_RET(
  87. rtMemcpy(host_buffer.data(), tensor_size, output_tensor->GetData(), tensor_size, RT_MEMCPY_DEVICE_TO_HOST));
  88. tensor.SetData(host_buffer);
  89. string session_id = std::to_string(context_->GetSessionId());
  90. RuntimeInferenceContext *runtime_infer_ctx = nullptr;
  91. GE_CHK_GRAPH_STATUS_RET(RuntimeInferenceContext::GetContext(session_id, &runtime_infer_ctx),
  92. "Failed to get RuntimeInferenceContext, session_id = %s", session_id.c_str());
  93. GE_CHK_STATUS_RET(runtime_infer_ctx->SetTensor(node_item.node_id, output_idx, std::move(tensor)),
  94. "Failed to SetTensor, node = %s, output_index = %d", node_item.NodeName().c_str(), output_idx);
  95. GELOGD("[%s] Output[%d] cached successfully in session: %s. node_id = %d, shape = [%s]",
  96. node_item.NodeName().c_str(), output_idx, session_id.c_str(), node_item.node_id,
  97. ge_tensor_desc->GetShape().ToString().c_str());
  98. RECORD_CALLBACK_EVENT(graph_context_, node_item.NodeName().c_str(), "[PrepareConstInputs] [index = %d] End",
  99. output_idx);
  100. }
  101. return SUCCESS;
  102. }
  103. Status NodeDoneCallback::DumpDynamicNode() {
  104. auto node = context_->GetNodeItem().node;
  105. if (node == nullptr) {
  106. GELOGE(PARAM_INVALID, "Get node is nullptr");
  107. return PARAM_INVALID;
  108. }
  109. auto op_desc = node->GetOpDesc();
  110. auto stream = context_->GetStream();
  111. vector<uintptr_t> input_addrs;
  112. vector<uintptr_t> output_addrs;
  113. for (int i = 0; i < context_->NumInputs(); i++) {
  114. auto tensor_value = context_->GetInput(i);
  115. GE_CHK_BOOL_RET_STATUS(tensor_value != nullptr, PARAM_INVALID, "Tensor value is nullptr");
  116. uint64_t input_addr = reinterpret_cast<uintptr_t>(tensor_value->GetData());
  117. input_addrs.emplace_back(input_addr);
  118. }
  119. for (int j = 0; j < context_->NumOutputs(); j++) {
  120. auto tensor_value = context_->GetOutput(j);
  121. GE_CHK_BOOL_RET_STATUS(tensor_value != nullptr, PARAM_INVALID, "Tensor value is nullptr");
  122. uint64_t output_addr = reinterpret_cast<uintptr_t>(tensor_value->GetData());
  123. output_addrs.emplace_back(output_addr);
  124. }
  125. dump_op_.SetDumpInfo(context_->GetDumpProperties(), op_desc, input_addrs, output_addrs, stream);
  126. GE_CHECK_NOTNULL(graph_context_);
  127. const HybridModel *model = graph_context_->model;
  128. GE_CHECK_NOTNULL(model);
  129. std::string dynamic_model_name = model->GetModelName();
  130. uint32_t model_id = model->GetModelId();
  131. dump_op_.SetDynamicModelInfo(dynamic_model_name, model_id);
  132. void *global_step = nullptr;
  133. TensorValue *varible_global_step = context_->GetVariable(NODE_NAME_GLOBAL_STEP);
  134. if (varible_global_step != nullptr) {
  135. global_step = const_cast<void *>(varible_global_step->GetData());
  136. }
  137. void *loop_per_iter = nullptr;
  138. TensorValue *varible_loop_per_iter = context_->GetVariable(NODE_NAME_FLOWCTRL_LOOP_PER_ITER);
  139. if (varible_loop_per_iter != nullptr) {
  140. loop_per_iter = const_cast<void *>(varible_loop_per_iter->GetData());
  141. }
  142. void *loop_cond = nullptr;
  143. TensorValue *varible_loop_cond = context_->GetVariable(NODE_NAME_FLOWCTRL_LOOP_COND);
  144. if (varible_loop_cond != nullptr) {
  145. loop_cond = const_cast<void *>(varible_loop_cond->GetData());
  146. }
  147. dump_op_.SetLoopAddr(global_step, loop_per_iter, loop_cond);
  148. GE_CHK_STATUS_RET(dump_op_.LaunchDumpOp(), "Failed to launch dump op in hybird model");
  149. auto rt_ret = rtStreamSynchronize(stream);
  150. if (rt_ret != RT_ERROR_NONE) {
  151. GELOGE(rt_ret, "rtStreamSynchronize failed");
  152. return rt_ret;
  153. }
  154. return SUCCESS;
  155. }
  156. Status NodeDoneCallback::OnNodeDone() {
  157. auto &node_item = context_->GetNodeItem();
  158. GELOGI("[%s] Start callback process.", node_item.NodeName().c_str());
  159. RECORD_CALLBACK_EVENT(graph_context_, context_->GetNodeName(), "[Compute] End");
  160. RECORD_CALLBACK_EVENT(graph_context_, context_->GetNodeName(), "[Callback] Start");
  161. auto dump_path = context_->GetDumpProperties().GetDumpPath();
  162. if (!dump_path.empty()) {
  163. GELOGI("Start to dump dynamic shape,dump_path is %s", dump_path.c_str());
  164. GE_CHK_STATUS_RET(DumpDynamicNode(), "Failed to dump dynamic node");
  165. }
  166. // release inputs
  167. for (int i = 0; i < context_->NumInputs(); ++i) {
  168. context_->ReleaseInput(i);
  169. }
  170. GE_CHK_STATUS_RET_NOLOG(PrepareConstInputs(node_item));
  171. // PropagateOutputs for type == DEPEND_COMPUTE
  172. if (node_item.shape_inference_type == DEPEND_COMPUTE) {
  173. if (graph_context_->trace_enabled) {
  174. (void)LogOutputs(node_item, *context_);
  175. }
  176. GE_CHK_STATUS_RET(context_->PropagateOutputs(), "[%s] Failed to propagate outputs failed",
  177. node_item.NodeName().c_str());
  178. RECORD_CALLBACK_EVENT(graph_context_, context_->GetNodeName(), "[PropagateOutputs] End");
  179. }
  180. // release condition variable
  181. if (node_item.has_observer) {
  182. GELOGI("[%s] Notify observer. node_id = %d", node_item.NodeName().c_str(), node_item.node_id);
  183. context_->NodeDone();
  184. }
  185. RECORD_CALLBACK_EVENT(graph_context_, context_->GetNodeName(), "[Callback] End");
  186. return SUCCESS;
  187. }
  188. Status ExecutionEngine::ExecuteAsync(NodeState &node_state, const std::shared_ptr<TaskContext> &task_context,
  189. GraphExecutionContext &execution_context) {
  190. GELOGI("[%s] Node is ready for execution", task_context->GetNodeName());
  191. RECORD_EXECUTION_EVENT(&execution_context, task_context->GetNodeName(), "Start");
  192. auto cb = std::shared_ptr<NodeDoneCallback>(new (std::nothrow) NodeDoneCallback(&execution_context, task_context));
  193. GE_CHECK_NOTNULL(cb);
  194. auto callback = [&, cb]() {
  195. auto ret = cb->OnNodeDone();
  196. if (ret != SUCCESS) {
  197. task_context->OnError(ret);
  198. }
  199. };
  200. GE_CHK_STATUS_RET_NOLOG(DoExecuteAsync(node_state, *task_context, execution_context, callback));
  201. GE_CHK_STATUS_RET_NOLOG(PropagateOutputs(*node_state.GetNodeItem(), *task_context, execution_context));
  202. return SUCCESS;
  203. }
  204. Status ExecutionEngine::DoExecuteAsync(NodeState &node_state, TaskContext &task_context, GraphExecutionContext &context,
  205. const std::function<void()> &callback) {
  206. const auto &task = node_state.GetKernelTask();
  207. if (task == nullptr) {
  208. GELOGE(INTERNAL_ERROR, "[%s] NodeTask is null.", node_state.GetName().c_str());
  209. return INTERNAL_ERROR;
  210. }
  211. // Wait for dependent nodes(DEPEND_COMPUTE), so that the input tensors are valid.
  212. RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[AwaitDependents] Start");
  213. GE_CHK_STATUS_RET(node_state.AwaitInputTensors(context), "[%s] Failed to wait for dependent nodes.",
  214. node_state.GetName().c_str());
  215. const auto &node_item = *node_state.GetNodeItem();
  216. auto executor = node_item.node_executor;
  217. GE_CHECK_NOTNULL(executor);
  218. RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[PrepareTask] Start");
  219. GE_CHK_STATUS_RET(executor->PrepareTask(*task, task_context), "[%s] Failed to prepare task",
  220. node_state.GetName().c_str());
  221. RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[PrepareTask] End");
  222. GELOGD("[%s] Done task preparation successfully.", node_state.GetName().c_str());
  223. if (context.trace_enabled) {
  224. LogInputs(node_item, task_context);
  225. if (node_item.shape_inference_type != DEPEND_COMPUTE) {
  226. LogOutputs(node_item, task_context);
  227. }
  228. }
  229. GE_CHK_STATUS_RET(ValidateInputTensors(node_state, task_context), "Failed to validate input tensors.");
  230. RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[ValidateInputTensors] End");
  231. if (context.profiling_level > 0) {
  232. auto *ctx = &context;
  233. const string &name = node_state.GetName();
  234. (void)task_context.RegisterCallback([ctx, name]() { RECORD_CALLBACK_EVENT(ctx, name.c_str(), "[Compute] Start"); });
  235. }
  236. RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[ExecuteTask] Start");
  237. GE_CHK_STATUS_RET(node_item.node_executor->ExecuteTask(*task, task_context, callback), "[%s] Failed to execute task",
  238. node_state.GetName().c_str());
  239. RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[ExecuteTask] End");
  240. GELOGD("[%s] Done task launch successfully.", node_state.GetName().c_str());
  241. return SUCCESS;
  242. }
  243. Status ExecutionEngine::ValidateInputTensors(const NodeState &node_state, const TaskContext &task_context) {
  244. for (auto i = 0; i < task_context.NumInputs(); ++i) {
  245. const auto &input_tensor = task_context.GetInput(i);
  246. GE_CHECK_NOTNULL(input_tensor);
  247. if (input_tensor->GetData() == nullptr) {
  248. GELOGD("[%s] Skipping null input, index = %d", task_context.GetNodeName(), i);
  249. continue;
  250. }
  251. const auto &tensor_desc = node_state.GetOpDesc()->MutableInputDesc(i);
  252. GE_CHECK_NOTNULL(tensor_desc);
  253. if (tensor_desc->GetDataType() == DT_STRING) {
  254. GELOGD("[%s] Skipping DT_STRING input, index = %d", task_context.GetNodeName(), i);
  255. continue;
  256. }
  257. int64_t expected_size;
  258. GE_CHK_GRAPH_STATUS_RET(TensorUtils::GetTensorMemorySizeInBytes(*tensor_desc, expected_size));
  259. GELOGD("[%s] Input[%d] expects [%ld] bytes.", task_context.GetNodeName(), i, expected_size);
  260. auto size_diff = expected_size - static_cast<int64_t>(input_tensor->GetSize());
  261. if (size_diff > 0) {
  262. if (size_diff <= kMaxPadding) {
  263. GELOGW("[%s] Input[%d]: tensor size mismatches. expected: %ld, but given %zu", task_context.GetNodeName(), i,
  264. expected_size, input_tensor->GetSize());
  265. } else {
  266. GELOGE(INTERNAL_ERROR, "[%s] Input[%d]: tensor size mismatches. expected: %ld, but given %zu",
  267. task_context.GetNodeName(), i, expected_size, input_tensor->GetSize());
  268. return INTERNAL_ERROR;
  269. }
  270. }
  271. }
  272. return SUCCESS;
  273. }
  274. Status ExecutionEngine::PropagateOutputs(const NodeItem &node_item, TaskContext &task_context,
  275. GraphExecutionContext &context) {
  276. if (node_item.shape_inference_type != DEPEND_COMPUTE) {
  277. GE_CHK_STATUS_RET(task_context.PropagateOutputs(), "[%s] Failed to propagate outputs.",
  278. node_item.NodeName().c_str());
  279. RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[PropagateOutputs] End");
  280. GELOGD("[%s] Done propagating outputs successfully.", node_item.NodeName().c_str());
  281. }
  282. return SUCCESS;
  283. }
  284. } // namespace hybrid
  285. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示