You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

hcom_ops.h 7.7 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_OP_HCOM_OPS_H_
  17. #define GE_OP_HCOM_OPS_H_
  18. #include "graph/operator_reg.h"
  19. namespace ge {
  20. /**
  21. * @brief Outputs a tensor gathering all input tensors.
  22. * @par Inputs:
  23. * x: A tensor. Must be one of the following types: int8, int16, int32, float16,
  24. * float32.
  25. * @par Attributes:
  26. * @li rank_size: A required integer identifying the number of ranks
  27. * participating in the op.
  28. * @li group: A required string identifying the group name of ranks
  29. * participating in the op.
  30. * @par Outputs:
  31. * y: A Tensor. Has the same type as "x".
  32. * @attention Constraints:\n
  33. * "group" is limited to 128 characters. Use "hccl_world_group"
  34. * as the name of a world group.
  35. */
  36. REG_OP(HcomAllGather)
  37. .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  38. .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  39. .REQUIRED_ATTR(rank_size, Int)
  40. .REQUIRED_ATTR(group, String)
  41. .ATTR(alpha, Float, 1.0)
  42. .ATTR(beta, Float, 0.0)
  43. .OP_END_FACTORY_REG(HcomAllGather)
  44. /**
  45. * @brief Outputs a tensor containing the reduction across all input tensors
  46. * passed to op.
  47. * @par Inputs:
  48. * x: A tensor. Must be one of the following types: int8, int16, int32, float16,
  49. * float32.
  50. * @par Attributes:
  51. * @li reduction: A required string identifying the reduction operation to
  52. * perform.The supported operation are: "sum", "max", "min", "prod".
  53. * @li group: A required string identifying the group name of ranks
  54. * participating in the op.
  55. * @li fusion: An optional integer identifying the fusion flag of the op. \n
  56. * 0: no fusion; 1 (default): fusion; 2: fusion the ops by fusion id.
  57. * @li fusion_id: An optional integer identifying the fusion id of the op.
  58. * The HcomAllReduce ops with the same fusion id will be fused.
  59. * @par Outputs:
  60. * y: A Tensor. Has the same type as "x".
  61. * @attention Constraints: \n
  62. * "group" is limited to 128 characters. Use "hccl_world_group"
  63. * as the name of a world group.
  64. */
  65. REG_OP(HcomAllReduce)
  66. .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  67. .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  68. .REQUIRED_ATTR(reduction, String)
  69. .REQUIRED_ATTR(group, String)
  70. .ATTR(fusion, Int, 1)
  71. .ATTR(fusion_id, Int, -1)
  72. .ATTR(alpha, Float, 1.0)
  73. .ATTR(beta, Float, 0.0)
  74. .OP_END_FACTORY_REG(HcomAllReduce)
  75. /**
  76. * @brief Broadcasts the input tensor in root rank to all ranks.
  77. * @par Inputs:
  78. * x: A list of dynamic input tensor. Must be one of the following types:
  79. * int8, int16, int32, float16, float32.
  80. * @par Attributes:
  81. * @li root_rank: A required integer identifying the root rank in the op
  82. * input of this rank will be broadcast to other ranks.
  83. * @li group: A required string identifying the group name of ranks
  84. * participating in the op.
  85. * @par Outputs:
  86. * y: A list of dynamic output tensor. Has the same type and length as "x".
  87. * @attention Constraints:\n
  88. * "group" is limited to 128 characters. Use "hccl_world_group"
  89. * as the name of a world group.
  90. */
  91. REG_OP(HcomBroadcast)
  92. .DYNAMIC_INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  93. .DYNAMIC_OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  94. .REQUIRED_ATTR(root_rank, Int)
  95. .REQUIRED_ATTR(group, String)
  96. .ATTR(alpha, Float, 1.0)
  97. .ATTR(beta, Float, 0.0)
  98. .OP_END_FACTORY_REG(HcomBroadcast)
  99. /**
  100. * @brief Performs reduction across all input tensors, scattering in equal
  101. * blocks among ranks, each rank getting a chunk of data based on its rank
  102. * index.
  103. * @par Inputs:
  104. * x: A tensor. Must be one of the following types: int8, int16, int32, float16,
  105. * float32.
  106. * @par Attributes:
  107. * @li reduction: A required string identifying the reduction operation to
  108. * perform. The supported operation are: "sum", "max", "min", "prod".
  109. * @li group: A required string identifying the group name of ranks
  110. * participating in the op.
  111. * @li rank_size: A required integer identifying the number of ranks
  112. * participating in the op.
  113. * @par Outputs:
  114. * y: A Tensor. Has the same type as "x".
  115. * @attention Constraints:\n
  116. * "group" is limited to 128 characters. Use "hccl_world_group"
  117. * as the name of a world group.
  118. */
  119. REG_OP(HcomReduceScatter)
  120. .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  121. .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  122. .REQUIRED_ATTR(reduction, String)
  123. .REQUIRED_ATTR(group, String)
  124. .REQUIRED_ATTR(rank_size, Int)
  125. .ATTR(alpha, Float, 1.0)
  126. .ATTR(beta, Float, 0.0)
  127. .OP_END_FACTORY_REG(HcomReduceScatter)
  128. /**
  129. * @brief Sends the input tensor to destination rank.
  130. * @par Inputs:
  131. * x: A tensor. Must be one of the following types: int8, int16, int32, float16,
  132. * float32.
  133. * @par Attributes:
  134. * @li sr_tag: A required integer identifying the send/recv message tag. The
  135. * message will be received by the HcomReceive op with the same "sr_tag".
  136. * @li dest_rank: A required integer identifying the destination rank.
  137. * @li group: A string identifying the group name of ranks participating in
  138. * the op.
  139. * @par Outputs:
  140. * None.
  141. * @attention Constraints:\n
  142. * @li "group" is limited to 128 characters. Use
  143. * "hccl_world_group" as the name of a world group.
  144. * @li Operators HcomSend and HcomReceive have the same "sr_tag".
  145. * @see HcomReceive
  146. */
  147. REG_OP(HcomSend)
  148. .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  149. .REQUIRED_ATTR(group, String)
  150. .REQUIRED_ATTR(sr_tag, Int)
  151. .REQUIRED_ATTR(dest_rank, Int)
  152. .ATTR(alpha, Float, 1.0)
  153. .ATTR(beta, Float, 0.0)
  154. .OP_END_FACTORY_REG(HcomSend)
  155. /**
  156. * @brief Receives the tensor from source rank.
  157. * @par Inputs:
  158. * None.
  159. * @par Attributes:
  160. * @li sr_tag: A required integer identifying the send/recv message tag. The
  161. * message will be send by the HcomSend op with the same "sr_tag".
  162. * @li src_rank: A required integer identifying the source rank.
  163. * @li group: A required string identifying the group name of ranks
  164. * participating in the op.
  165. * @li shape: A required list identifying the shape of the tensor to be
  166. * received.
  167. * @li dtype: A required integer identifying the type of the tensor to be
  168. * received. The supported types are: int8, int16, int32, float16, float32.
  169. * @par Outputs:
  170. * y: A tensor with type identified in "dtype".
  171. * @attention Constraints:\n
  172. * @li "group" is limited to 128 characters. Use
  173. * "hccl_world_group" as the name of a world group.
  174. * @li Operators HcomSend and HcomReceive have the same "sr_tag".
  175. * @li "shape" should be same as the input tensor of HcomSend.
  176. * @li "dtype" should be same as the input tensor of HcomSend.
  177. * @see HcomSend
  178. */
  179. REG_OP(HcomReceive)
  180. .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  181. .REQUIRED_ATTR(group, String)
  182. .REQUIRED_ATTR(sr_tag, Int)
  183. .REQUIRED_ATTR(src_rank, Int)
  184. .REQUIRED_ATTR(shape, ListInt)
  185. .REQUIRED_ATTR(dtype, Type)
  186. .ATTR(alpha, Float, 1.0)
  187. .ATTR(beta, Float, 0.0)
  188. .OP_END_FACTORY_REG(HcomReceive)
  189. } // namespace ge
  190. #endif // GE_OP_HCOM_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示