You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

hcom_ops.h 7.8 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file hcom_ops.h
  18. * \brief huawei collective communication library ops.
  19. */
  20. #ifndef GE_OP_HCOM_OPS_H_
  21. #define GE_OP_HCOM_OPS_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. * @brief Outputs a tensor gathering all input tensors.
  26. * @par Inputs:
  27. * x: A tensor. Must be one of the following types: int8, int16, int32, float16,
  28. float32.
  29. * @par Attributes:
  30. * @li rank_size: A required integer identifying the number of ranks
  31. participating in the op.
  32. * @li group: A required string identifying the group name of ranks
  33. participating in the op.
  34. * @par Outputs:
  35. * y: A Tensor. Has the same type as "x".
  36. * @attention Constraints:
  37. "group" is limited to 128 characters. Use "hccl_world_group"
  38. as the name of a world group.
  39. */
  40. REG_OP(HcomAllGather)
  41. .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  42. .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  43. .REQUIRED_ATTR(rank_size, Int)
  44. .REQUIRED_ATTR(group, String)
  45. .ATTR(alpha, Float, 1.0)
  46. .ATTR(beta, Float, 0.0)
  47. .OP_END_FACTORY_REG(HcomAllGather)
  48. /**
  49. * @brief Outputs a tensor containing the reduction across all input tensors
  50. passed to op.
  51. * @par Inputs:
  52. * x: A tensor. Must be one of the following types: int8, int16, int32, float16,
  53. float32.
  54. * @par Attributes:
  55. * @li reduction: A required string identifying the reduction operation to
  56. perform.The supported operation are: "sum", "max", "min", "prod".
  57. * @li group: A required string identifying the group name of ranks
  58. participating in the op.
  59. * @li fusion: An optional integer identifying the fusion flag of the op.
  60. 0: no fusion; 1 (default): fusion; 2: fusion the ops by fusion id.
  61. * @li fusion_id: An optional integer identifying the fusion id of the op.
  62. * The HcomAllReduce ops with the same fusion id will be fused.
  63. * @par Outputs:
  64. * y: A Tensor. Has the same type as "x".
  65. * @attention Constraints:
  66. *"group" is limited to 128 characters. Use "hccl_world_group"
  67. as the name of a world group.
  68. */
  69. REG_OP(HcomAllReduce)
  70. .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  71. .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  72. .REQUIRED_ATTR(reduction, String)
  73. .REQUIRED_ATTR(group, String)
  74. .ATTR(fusion, Int, 1)
  75. .ATTR(fusion_id, Int, -1)
  76. .ATTR(alpha, Float, 1.0)
  77. .ATTR(beta, Float, 0.0)
  78. .OP_END_FACTORY_REG(HcomAllReduce)
  79. /**
  80. * @brief Broadcasts the input tensor in root rank to all ranks.
  81. * @par Inputs:
  82. * x: A list of dynamic input tensor. Must be one of the following types:
  83. int8, int16, int32, float16, float32. It's a dynamic input.
  84. * @par Attributes:
  85. * @li root_rank: A required integer identifying the root rank in the op
  86. input of this rank will be broadcast to other ranks.
  87. * @li group: A required string identifying the group name of ranks
  88. participating in the op.
  89. * @par Outputs:
  90. * y: A list of dynamic output tensor. Has the same type and length as "x".
  91. * It's a dynamic output.
  92. * @attention Constraints:
  93. "group" is limited to 128 characters. Use "hccl_world_group"
  94. as the name of a world group.
  95. */
  96. REG_OP(HcomBroadcast)
  97. .DYNAMIC_INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  98. .DYNAMIC_OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  99. .REQUIRED_ATTR(root_rank, Int)
  100. .REQUIRED_ATTR(group, String)
  101. .ATTR(alpha, Float, 1.0)
  102. .ATTR(beta, Float, 0.0)
  103. .OP_END_FACTORY_REG(HcomBroadcast)
  104. /**
  105. * @brief Performs reduction across all input tensors, scattering in equal
  106. blocks among ranks, each rank getting a chunk of data based on its rank
  107. index.
  108. * @par Inputs:
  109. * x: A tensor. Must be one of the following types: int8, int16, int32, float16,
  110. float32.
  111. * @par Attributes:
  112. * @li reduction: A required string identifying the reduction operation to
  113. perform. The supported operation are: "sum", "max", "min", "prod".
  114. * @li group: A required string identifying the group name of ranks
  115. participating in the op.
  116. * @li rank_size: A required integer identifying the number of ranks
  117. participating in the op.
  118. * @par Outputs:
  119. * y: A Tensor. Has the same type as "x".
  120. * @attention Constraints:
  121. "group" is limited to 128 characters. Use "hccl_world_group"
  122. as the name of a world group.
  123. */
  124. REG_OP(HcomReduceScatter)
  125. .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  126. .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  127. .REQUIRED_ATTR(reduction, String)
  128. .REQUIRED_ATTR(group, String)
  129. .REQUIRED_ATTR(rank_size, Int)
  130. .ATTR(alpha, Float, 1.0)
  131. .ATTR(beta, Float, 0.0)
  132. .OP_END_FACTORY_REG(HcomReduceScatter)
  133. /**
  134. * @brief Sends the input tensor to destination rank.
  135. * @par Inputs:
  136. * x: A tensor. Must be one of the following types: int8, int16, int32, float16,
  137. float32.
  138. * @par Attributes:
  139. * @li sr_tag: A required integer identifying the send/recv message tag. The
  140. message will be received by the HcomReceive op with the same "sr_tag".
  141. * @li dest_rank: A required integer identifying the destination rank.
  142. * @li group: A string identifying the group name of ranks participating in
  143. the op.
  144. * @par Outputs:
  145. * None.
  146. * @attention Constraints:
  147. @li "group" is limited to 128 characters. Use
  148. "hccl_world_group" as the name of a world group.
  149. * @li Operators HcomSend and HcomReceive have the same "sr_tag".
  150. * @see HcomReceive
  151. */
  152. REG_OP(HcomSend)
  153. .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  154. .REQUIRED_ATTR(group, String)
  155. .REQUIRED_ATTR(sr_tag, Int)
  156. .REQUIRED_ATTR(dest_rank, Int)
  157. .ATTR(alpha, Float, 1.0)
  158. .ATTR(beta, Float, 0.0)
  159. .OP_END_FACTORY_REG(HcomSend)
  160. /**
  161. * @brief Receives the tensor from source rank.
  162. * @par Inputs:
  163. * None.
  164. * @par Attributes:
  165. * @li sr_tag: A required integer identifying the send/recv message tag. The
  166. message will be send by the HcomSend op with the same "sr_tag".
  167. * @li src_rank: A required integer identifying the source rank.
  168. * @li group: A required string identifying the group name of ranks
  169. * participating in the op.
  170. * @li shape: A required list identifying the shape of the tensor to be
  171. received.
  172. * @li dtype: A required integer identifying the type of the tensor to be
  173. received. The supported types are: int8, int16, int32, float16, float32.
  174. * @par Outputs:
  175. * y: A tensor with type identified in "dtype".
  176. * @attention Constraints:
  177. @li "group" is limited to 128 characters. Use
  178. "hccl_world_group" as the name of a world group.
  179. * @li Operators HcomSend and HcomReceive have the same "sr_tag".
  180. * @li "shape" should be same as the input tensor of HcomSend.
  181. * @li "dtype" should be same as the input tensor of HcomSend.
  182. * @see HcomSend
  183. */
  184. REG_OP(HcomReceive)
  185. .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  186. .REQUIRED_ATTR(group, String)
  187. .REQUIRED_ATTR(sr_tag, Int)
  188. .REQUIRED_ATTR(src_rank, Int)
  189. .REQUIRED_ATTR(shape, ListInt)
  190. .REQUIRED_ATTR(dtype, Type)
  191. .ATTR(alpha, Float, 1.0)
  192. .ATTR(beta, Float, 0.0)
  193. .OP_END_FACTORY_REG(HcomReceive)
  194. } // namespace ge
  195. #endif // GE_OP_HCOM_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示