You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

hcom_ops.h 7.4 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_OP_HCOM_OPS_H_
  17. #define GE_OP_HCOM_OPS_H_
  18. #include "graph/operator_reg.h"
  19. namespace ge {
  20. /**
  21. * @brief Outputs a tensor gathering all input tensors.
  22. * @par Inputs:
  23. * x: A tensor. Must be one of the following types: int8, int32, float16,
  24. * float32.
  25. * @par Attributes:
  26. * @li rank_size: A required integer identifying the number of ranks
  27. * participating in the op.
  28. * @li group: A required string identifying the group name of ranks
  29. * participating in the op.
  30. * @par Outputs:
  31. * y: A Tensor. Has the same type as "x".
  32. * @attention Constraints:\n
  33. * "group" is limited to 128 characters. Use "hccl_world_group"
  34. * as the name of a world group.
  35. */
  36. REG_OP(HcomAllGather)
  37. .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_FLOAT16}))
  38. .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_FLOAT16}))
  39. .REQUIRED_ATTR(rank_size, Int)
  40. .REQUIRED_ATTR(group, String)
  41. .ATTR(alpha, Float, 1.0)
  42. .ATTR(beta, Float, 0.0)
  43. .OP_END_FACTORY_REG(HcomAllGather)
  44. /**
  45. * @brief Outputs a tensor containing the reduction across all input tensors
  46. * passed to op.
  47. * @par Inputs:
  48. * x: A tensor. Must be one of the following types: int8, int32, float16,
  49. * float32.
  50. * @par Attributes:
  51. * @li reduction: A required string identifying the reduction operation to
  52. * perform.The supported operation are: "sum", "max", "min", "prod".
  53. * @li group: A required string identifying the group name of ranks
  54. * participating in the op.
  55. * @li fusion: An optional integer identifying the fusion flag of the op. \n
  56. * 0: no fusion; 1 (default): fusion.
  57. * @par Outputs:
  58. * y: A Tensor. Has the same type as "x".
  59. * @attention Constraints: \n
  60. * "group" is limited to 128 characters. Use "hccl_world_group"
  61. * as the name of a world group.
  62. */
  63. REG_OP(HcomAllReduce)
  64. .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_FLOAT16}))
  65. .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_FLOAT16}))
  66. .REQUIRED_ATTR(reduction, String)
  67. .REQUIRED_ATTR(group, String)
  68. .ATTR(fusion, Int, 1)
  69. .ATTR(alpha, Float, 1.0)
  70. .ATTR(beta, Float, 0.0)
  71. .OP_END_FACTORY_REG(HcomAllReduce)
  72. /**
  73. * @brief Broadcasts the input tensor in root rank to all ranks.
  74. * @par Inputs:
  75. * x: A list of dynamic input tensor. Must be one of the following types:
  76. * int8, int32, float16, float32.
  77. * @par Attributes:
  78. * @li root_rank: A required integer identifying the root rank in the op
  79. * input of this rank will be broadcast to other ranks.
  80. * @li group: A required string identifying the group name of ranks
  81. * participating in the op.
  82. * @par Outputs:
  83. * y: A list of dynamic output tensor. Has the same type and length as "x".
  84. * @attention Constraints:\n
  85. * "group" is limited to 128 characters. Use "hccl_world_group"
  86. * as the name of a world group.
  87. */
  88. REG_OP(HcomBroadcast)
  89. .DYNAMIC_INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_FLOAT16}))
  90. .DYNAMIC_OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_FLOAT16}))
  91. .REQUIRED_ATTR(root_rank, Int)
  92. .REQUIRED_ATTR(group, String)
  93. .ATTR(alpha, Float, 1.0)
  94. .ATTR(beta, Float, 0.0)
  95. .OP_END_FACTORY_REG(HcomBroadcast)
  96. /**
  97. * @brief Performs reduction across all input tensors, scattering in equal
  98. * blocks among ranks, each rank getting a chunk of data based on its rank
  99. * index.
  100. * @par Inputs:
  101. * x: A tensor. Must be one of the following types: int8, int32, float16,
  102. * float32.
  103. * @par Attributes:
  104. * @li reduction: A required string identifying the reduction operation to
  105. * perform. The supported operation are: "sum", "max", "min", "prod".
  106. * @li group: A required string identifying the group name of ranks
  107. * participating in the op.
  108. * @li rank_size: A required integer identifying the number of ranks
  109. * participating in the op.
  110. * @par Outputs:
  111. * y: A Tensor. Has the same type as "x".
  112. * @attention Constraints:\n
  113. * "group" is limited to 128 characters. Use "hccl_world_group"
  114. * as the name of a world group.
  115. */
  116. REG_OP(HcomReduceScatter)
  117. .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_FLOAT16}))
  118. .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_FLOAT16}))
  119. .REQUIRED_ATTR(reduction, String)
  120. .REQUIRED_ATTR(group, String)
  121. .REQUIRED_ATTR(rank_size, Int)
  122. .ATTR(alpha, Float, 1.0)
  123. .ATTR(beta, Float, 0.0)
  124. .OP_END_FACTORY_REG(HcomReduceScatter)
  125. /**
  126. * @brief Sends the input tensor to destination rank.
  127. * @par Inputs:
  128. * x: A tensor. Must be one of the following types: int8, int32, float16,
  129. * float32.
  130. * @par Attributes:
  131. * @li sr_tag: A required integer identifying the send/recv message tag. The
  132. * message will be received by the HcomReceive op with the same "sr_tag".
  133. * @li dest_rank: A required integer identifying the destination rank.
  134. * @li group: A string identifying the group name of ranks participating in
  135. * the op.
  136. * @par Outputs:
  137. * None.
  138. * @attention Constraints:\n
  139. * @li "group" is limited to 128 characters. Use
  140. * "hccl_world_group" as the name of a world group.
  141. * @li Operators HcomSend and HcomReceive have the same "sr_tag".
  142. * @see HcomReceive
  143. */
  144. REG_OP(HcomSend)
  145. .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_FLOAT16}))
  146. .REQUIRED_ATTR(group, String)
  147. .REQUIRED_ATTR(sr_tag, Int)
  148. .REQUIRED_ATTR(dest_rank, Int)
  149. .ATTR(alpha, Float, 1.0)
  150. .ATTR(beta, Float, 0.0)
  151. .OP_END_FACTORY_REG(HcomSend)
  152. /**
  153. * @brief Receives the tensor from source rank.
  154. * @par Inputs:
  155. * None.
  156. * @par Attributes:
  157. * @li sr_tag: A required integer identifying the send/recv message tag. The
  158. * message will be send by the HcomSend op with the same "sr_tag".
  159. * @li src_rank: A required integer identifying the source rank.
  160. * @li group: A required string identifying the group name of ranks
  161. * participating in the op.
  162. * @li shape: A required list identifying the shape of the tensor to be
  163. * received.
  164. * @li dtype: A required integer identifying the type of the tensor to be
  165. * received. The supported types are: int8, int32, float16, float32.
  166. * @par Outputs:
  167. * y: A tensor with type identified in "dtype".
  168. * @attention Constraints:\n
  169. * @li "group" is limited to 128 characters. Use
  170. * "hccl_world_group" as the name of a world group.
  171. * @li Operators HcomSend and HcomReceive have the same "sr_tag".
  172. * @li "shape" should be same as the input tensor of HcomSend.
  173. * @li "dtype" should be same as the input tensor of HcomSend.
  174. * @see HcomSend
  175. */
  176. REG_OP(HcomReceive)
  177. .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_FLOAT16}))
  178. .REQUIRED_ATTR(group, String)
  179. .REQUIRED_ATTR(sr_tag, Int)
  180. .REQUIRED_ATTR(src_rank, Int)
  181. .REQUIRED_ATTR(shape, ListInt)
  182. .REQUIRED_ATTR(dtype, Type)
  183. .ATTR(alpha, Float, 1.0)
  184. .ATTR(beta, Float, 0.0)
  185. .OP_END_FACTORY_REG(HcomReceive)
  186. } // namespace ge
  187. #endif // GE_OP_HCOM_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示