You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

inference_context.cc 4.4 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "external/graph/inference_context.h"
  17. #include "debug/ge_util.h"
  18. namespace ge {
  19. class ShapeAndTypeImpl {
  20. public:
  21. ShapeAndTypeImpl() = default;
  22. ~ShapeAndTypeImpl() = default;
  23. ShapeAndTypeImpl(const Shape &shape, DataType data_type) : shape_(shape), data_type_(data_type) {}
  24. Shape shape_;
  25. DataType data_type_ = DT_UNDEFINED;
  26. };
  27. class InferenceContextImpl {
  28. public:
  29. InferenceContextImpl() = default;
  30. ~InferenceContextImpl() = default;
  31. // For deliver to op in pair, help to support dynamic shape
  32. std::vector<std::string> marks_;
  33. std::vector<std::vector<ShapeAndType>> input_handle_shapes_and_types_;
  34. std::vector<std::vector<ShapeAndType>> output_handle_shapes_and_types_;
  35. };
  36. ShapeAndType::ShapeAndType() { shape_and_type_impl_ = ComGraphMakeShared<ShapeAndTypeImpl>(); }
  37. ShapeAndType::ShapeAndType(const Shape &shape, DataType data_type) {
  38. shape_and_type_impl_ = ComGraphMakeShared<ShapeAndTypeImpl>(shape, data_type);
  39. }
  40. void ShapeAndType::SetShape(const Shape &shape) {
  41. if (shape_and_type_impl_ != nullptr) {
  42. shape_and_type_impl_->shape_ = shape;
  43. }
  44. }
  45. void ShapeAndType::SetType(DataType data_type) {
  46. if (shape_and_type_impl_ != nullptr) {
  47. shape_and_type_impl_->data_type_ = data_type;
  48. }
  49. }
  50. Shape ShapeAndType::GetShape() const {
  51. if (shape_and_type_impl_ != nullptr) {
  52. return shape_and_type_impl_->shape_;
  53. }
  54. return Shape();
  55. }
  56. DataType ShapeAndType::GetDataType() const {
  57. if (shape_and_type_impl_ != nullptr) {
  58. return shape_and_type_impl_->data_type_;
  59. }
  60. return DT_UNDEFINED;
  61. }
  62. InferenceContext::InferenceContext(std::unique_ptr<InferenceContextImpl> &impl) {
  63. inference_context_impl_ = std::move(impl);
  64. }
  65. std::unique_ptr<InferenceContext> InferenceContext::Create() {
  66. std::unique_ptr<InferenceContextImpl> impl =
  67. std::unique_ptr<InferenceContextImpl>(new (std::nothrow) InferenceContextImpl());
  68. if (impl == nullptr) {
  69. return nullptr;
  70. }
  71. return std::unique_ptr<InferenceContext>(new (std::nothrow) InferenceContext(impl));
  72. }
  73. void InferenceContext::SetInputHandleShapesAndTypes(std::vector<std::vector<ShapeAndType>> &&shapes_and_types) {
  74. inference_context_impl_->input_handle_shapes_and_types_.swap(shapes_and_types);
  75. }
  76. const std::vector<std::vector<ShapeAndType>> &InferenceContext::GetInputHandleShapesAndTypes() const {
  77. return inference_context_impl_->input_handle_shapes_and_types_;
  78. }
  79. const std::vector<std::vector<ShapeAndType>> &InferenceContext::GetOutputHandleShapesAndTypes() const {
  80. return inference_context_impl_->output_handle_shapes_and_types_;
  81. }
  82. void InferenceContext::SetOutputHandleShapesAndTypes(const std::vector<std::vector<ShapeAndType>> &shapes_and_types) {
  83. inference_context_impl_->output_handle_shapes_and_types_ = shapes_and_types;
  84. }
  85. void InferenceContext::SetOutputHandleShapesAndTypes(std::vector<std::vector<ShapeAndType>> &&shapes_and_types) {
  86. inference_context_impl_->output_handle_shapes_and_types_.swap(shapes_and_types);
  87. }
  88. void InferenceContext::SetMarks(const std::vector<std::string> &marks) { inference_context_impl_->marks_ = marks; }
  89. void InferenceContext::SetMarks(const std::vector<AscendString> &marks) {
  90. std::vector<std::string> impl_marks;
  91. for (const auto &mark : marks) {
  92. if (mark.GetString() != nullptr) {
  93. impl_marks.emplace_back(mark.GetString());
  94. }
  95. }
  96. inference_context_impl_->marks_ = impl_marks;
  97. }
  98. const std::vector<std::string> &InferenceContext::GetMarks() const { return inference_context_impl_->marks_; }
  99. void InferenceContext::GetMarks(std::vector<AscendString> &marks) const {
  100. std::vector<std::string> str_marks = inference_context_impl_->marks_;
  101. for (auto &str_mark : str_marks) {
  102. marks.emplace_back(str_mark.c_str());
  103. }
  104. }
  105. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示