You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

build_task_utils.cc 4.2 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "single_op/task/build_task_utils.h"
  17. #include "runtime/rt.h"
  18. #include "graph/load/new_model_manager/model_utils.h"
  19. #include "graph/manager/graph_var_manager.h"
  20. #include "graph/utils/type_utils.h"
  21. #include "framework/common/debug/ge_log.h"
  22. #include "framework/common/types.h"
  23. namespace ge {
  24. namespace {
  25. const uint64_t kSessionId = UINT64_MAX;
  26. uint8_t *kVarBase = nullptr;
  27. const uint64_t kLogicVarBase = 0;
  28. const uint64_t kVarSize = 0;
  29. }
  30. std::vector<std::vector<void *>> BuildTaskUtils::GetAddresses(const OpDescPtr &op_desc,
  31. const SingleOpModelParam &param,
  32. bool keep_workspace) {
  33. std::vector<std::vector<void *>> ret;
  34. RuntimeParam runtime_para;
  35. runtime_para.mem_size = param.memory_size;
  36. runtime_para.logic_mem_base = param.base_addr;
  37. runtime_para.mem_base = param.mem_base;
  38. runtime_para.weight_size = param.weight_size;
  39. runtime_para.logic_weight_base = param.weight_addr;
  40. runtime_para.weight_base = param.weight_base;
  41. runtime_para.var_size = kVarSize;
  42. runtime_para.logic_var_base = kLogicVarBase;
  43. runtime_para.var_base = kVarBase;
  44. runtime_para.session_id = kSessionId;
  45. runtime_para.is_single_op = true;
  46. ret.emplace_back(ModelUtils::GetInputDataAddrs(runtime_para, op_desc));
  47. ret.emplace_back(ModelUtils::GetOutputDataAddrs(runtime_para, op_desc));
  48. if (keep_workspace) {
  49. ret.emplace_back(ModelUtils::GetWorkspaceDataAddrs(runtime_para, op_desc));
  50. }
  51. return ret;
  52. }
  53. std::vector<void *> BuildTaskUtils::JoinAddresses(const std::vector<std::vector<void *>> &addresses) {
  54. std::vector<void *> ret;
  55. for (auto &address : addresses) {
  56. ret.insert(ret.end(), address.begin(), address.end());
  57. }
  58. return ret;
  59. }
  60. std::vector<void *> BuildTaskUtils::GetKernelArgs(const OpDescPtr &op_desc,
  61. const SingleOpModelParam &param) {
  62. auto addresses = GetAddresses(op_desc, param);
  63. return JoinAddresses(addresses);
  64. }
  65. std::string BuildTaskUtils::GetTaskInfo(const OpDescPtr &op_desc) {
  66. std::stringstream ss;
  67. if (op_desc != nullptr) {
  68. auto op_type = op_desc->GetType();
  69. if (op_type == ge::NETOUTPUT || op_type == ge::DATA) {
  70. return ss.str();
  71. }
  72. // Conv2D IN[DT_FLOAT16 NC1HWC0[256, 128, 7, 7, 16],DT_FLOAT16 FRACTAL_Z[128, 32, 16, 16]]
  73. // OUT[DT_FLOAT16 NC1HWC0[256, 32, 7, 7, 16]]
  74. ss << op_type << " IN[";
  75. for (uint32_t idx = 0; idx < op_desc->GetAllInputsSize(); idx++) {
  76. const GeTensorDescPtr &input = op_desc->MutableInputDesc(idx);
  77. if (input == nullptr) {
  78. continue;
  79. }
  80. ss << TypeUtils::DataTypeToSerialString(input->GetDataType()) << " ";
  81. ss << TypeUtils::FormatToSerialString(input->GetFormat());
  82. ss << VectorToString(input->GetShape().GetDims());
  83. if (idx < op_desc->GetInputsSize() - 1) {
  84. ss << ",";
  85. }
  86. }
  87. ss << "] OUT[";
  88. for (uint32_t idx = 0; idx < op_desc->GetOutputsSize(); idx++) {
  89. const GeTensorDescPtr &output = op_desc->MutableOutputDesc(idx);
  90. ss << TypeUtils::DataTypeToSerialString(output->GetDataType()) << " ";
  91. Format out_format = output->GetFormat();
  92. const GeShape &out_shape = output->GetShape();
  93. const auto &dims = out_shape.GetDims();
  94. ss << TypeUtils::FormatToSerialString(out_format);
  95. ss << VectorToString(dims);
  96. if (idx < op_desc->GetOutputsSize() - 1) {
  97. ss << ",";
  98. }
  99. }
  100. ss << "]\n";
  101. }
  102. return ss.str();
  103. }
  104. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示