You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

split_combination_ops.h 13 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_OP_SPLIT_COMBINATION_OPS_H
  17. #define GE_OP_SPLIT_COMBINATION_OPS_H
  18. #include "../graph/operator_reg.h"
  19. namespace ge {
  20. /**
  21. *@brief Splits a tensor along dimension "split_dim" into "num_split" smaller tensors.
  22. *@par Inputs:
  23. * Two inputs, including:
  24. *@li x: An ND Tensor. \n
  25. *Must be one of the following types: float16, float32, int32, int8, int16, int64, uint8, uint16, uint32, uint64
  26. *@li split_dim: Must be the following type:int32. Specifies the dimension along which to split.
  27. *@par Attributes:
  28. *num_split: A required int8, int16, int32, or int64. Specifies the number of output tensors. No default value.
  29. *@par Outputs:
  30. *y: Dynamic output.A list of output tensors. Has the same type and format as "x".
  31. *@attention Constraints:
  32. *@li "num_split" is greater than or equals to 1.
  33. *@li "num_split" is divisible by the size of dimension "split_dim".
  34. *@li "split_dim" is in the range [-len(x.shape), (x.shape)-1].
  35. */
  36. REG_OP(Split)
  37. .INPUT(split_dim, TensorType({DT_INT32}))
  38. .INPUT(x, TensorType::BasicType())
  39. .DYNAMIC_OUTPUT(y, TensorType::BasicType())
  40. .REQUIRED_ATTR(num_split, Int)
  41. .OP_END_FACTORY_REG(Split)
  42. /**
  43. *@brief Splits a tensor along dimension "split_dim" into "num_split" smaller tensors.
  44. *@par Inputs:
  45. * One input:
  46. *: An ND Tensor. \n
  47. *Must be one of the following types: float16, float32, int32, int8, int16, int64, uint8, uint16, uint32, uint64
  48. *@par Attributes:
  49. *@li split_dim: A required int8, int16, int32, or int64. Specifies the dimension along which to split. No default value.
  50. *@li num_split: A required int8, int16, int32, or int64. Specifies the number of output tensors. No default value.
  51. *@par Outputs:
  52. *y:Dynamic output. A list of output tensors. Has the same type and format as "x".
  53. *@attention Constraints:
  54. *@li "num_split" is greater than or equals to 1.
  55. *@li "num_split" is divisible by the size of dimension "split_dim".
  56. *@li "split_dim" is in the range [-len(x.shape), (x.shape)-1].
  57. */
  58. REG_OP(SplitD)
  59. .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  60. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT, DT_FLOAT16}))
  61. .DYNAMIC_OUTPUT(y, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  62. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT, DT_FLOAT16}))
  63. .REQUIRED_ATTR(split_dim, Int)
  64. .REQUIRED_ATTR(num_split, Int)
  65. .OP_END_FACTORY_REG(SplitD)
  66. /**
  67. *@brief Splits a tensor along dimension "split_dim" into "num_split" smaller tensors according to "size_splits".
  68. *@par Inputs:
  69. * Three inputs, including:
  70. *@li x: An ND Tensor. \n
  71. *Must be one of the following types: float16, float32, int32, int8, int16, int64, uint8, uint16, uint32, uint64
  72. *@li size_splits: A list of int8, int16, int32, or int64. Specifies a list containing the sizes of each output tensor along the split dimension.
  73. *@li split_dim: An int8, int16, int32, or int64. Specifies the dimension along which to split.
  74. *@par Attributes:
  75. *num_split: A required int8, int16, int32, or int64. Specifies the number of output tensors. No default value.
  76. *@par Outputs:
  77. *y: Dynamic output.A list of output tensors. Has the same type and format as "x".
  78. *@attention Constraints:
  79. *@li Each element in "size_splits" is greater than or equal to 1.
  80. *@li "size_splits" and "num_split" have the same length.
  81. *@li The elements in "size_splits" sum to the size of dimension "split_dim".
  82. */
  83. REG_OP(SplitV)
  84. .INPUT(x, TensorType::BasicType())
  85. .INPUT(size_splits, TensorType::IndexNumberType())
  86. .INPUT(split_dim, TensorType({DT_INT32}))
  87. .DYNAMIC_OUTPUT(y, TensorType::BasicType())
  88. .REQUIRED_ATTR(num_split, Int)
  89. .OP_END_FACTORY_REG(SplitV)
  90. /**
  91. *@brief Splits a tensor along dimension "split_dim" into "num_split" smaller tensors according to "size_splits".
  92. *@par Inputs:
  93. * One input:
  94. * x: An ND Tensor. \n
  95. *Must be one of the following types: float16, float32, int32, int8, int16, int64, uint8, uint16, uint32, uint64
  96. *@par Attributes:
  97. *@li size_splits: A required list of int8, int16, int32, or int64. Specifies a list containing the sizes of each output tensor along the split dimension.
  98. *@li split_dim: A required int8, int16, int32, or int64. Specifies the dimension along which to split. No default value.
  99. *@li num_split: A required int8, int16, int32, or int64. Specifies the number of output tensors. No default value.
  100. *@par Outputs:
  101. *y: Dynamic output.A list of output tensors. Has the same type and format as "x".
  102. *@attention Constraints:
  103. *@li Each element in "size_splits" is greater than or equal to 1.
  104. *@li "size_splits" and "num_split" have the same length.
  105. *@li The elements in "size_splits" sum to the size of dimension "split_dim".
  106. */
  107. REG_OP(SplitVD)
  108. .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  109. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT, DT_FLOAT16}))
  110. .DYNAMIC_OUTPUT(y, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  111. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT, DT_FLOAT16}))
  112. .REQUIRED_ATTR(size_splits, ListInt)
  113. .REQUIRED_ATTR(split_dim, Int)
  114. .REQUIRED_ATTR(num_split, Int)
  115. .OP_END_FACTORY_REG(SplitVD)
  116. /**
  117. *@brief Concatenates a list of N tensors along the first dimension.
  118. *@par Inputs:
  119. * Two inputs, including:
  120. * @li values: A list of Tensors. Must be one of the following types: int8, int16, int32, \n
  121. * int64, uint8, uint16, uint32, uint64, float16, float32. \n
  122. * Tensors to be concatenated. \n
  123. * All must have size 1 in the first dimension and same shape.
  124. * @li shape: A Tensor of the same type as "x". \n
  125. * The final shape of the result. Should be equal to the shapes of any input
  126. * but with the number of input values in the first dimension.
  127. *@par Attributes:
  128. * @li shape: A required list of ints.
  129. * @li N: The numble of dynamic_input "values".
  130. *@par Outputs:
  131. *output_data: The concatenated tensor with same type as "values".
  132. */
  133. REG_OP(ParallelConcat)
  134. .DYNAMIC_INPUT(values, TensorType({DT_FLOAT,DT_FLOAT16,DT_INT8,DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_UINT32,DT_UINT64}))
  135. .OUTPUT(output_data, TensorType({DT_FLOAT,DT_FLOAT16,DT_INT8,DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_UINT32,DT_UINT64}))
  136. .REQUIRED_ATTR(shape, ListInt)
  137. .REQUIRED_ATTR(N, Int)
  138. .OP_END_FACTORY_REG(ParallelConcat)
  139. /**
  140. *@brief Concatenates tensors along one dimension.
  141. *@par Inputs:
  142. * One input:
  143. *x: Dynamic input.An NC1HWC0 or ND Tensor. \n
  144. *Must be one of the following types: float16, float32, int32, int8, int16, int64, uint8, uint16, uint32, uint64
  145. *@par Attributes:
  146. *concat_dim: A required int8, int16, int32, or int64. Specifies the dimension along which to concatenate. No default value.
  147. *@par Outputs:
  148. *y: A Tensor. Has the same type and format as "x".
  149. *@attention Constraints:
  150. *@li "x" is a list of at least 2 "tensor" objects of the same type.
  151. *@li "concat_dim" is in the range [-len(x.shape), len(x.shape)].
  152. */
  153. REG_OP(ConcatV2D)
  154. .DYNAMIC_INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_INT64, DT_UINT64, DT_UINT32, DT_INT16, DT_UINT16, DT_UINT8}))
  155. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_INT64, DT_UINT64, DT_UINT32, DT_INT16, DT_UINT16, DT_UINT8}))
  156. .REQUIRED_ATTR(concat_dim, Int)
  157. .ATTR(N, Int, 1)
  158. .OP_END_FACTORY_REG(ConcatV2D)
  159. /**
  160. *@brief Concatenates tensors along one dimension.
  161. *@par Inputs:
  162. * Two inputs, including:
  163. *@li Dynamic input "x" is An NC1HWC0 or ND Tensor. \n
  164. *Must be one of the following types: float16, float32, int32, int8, int16, int64, uint8, uint16, uint32, uint64
  165. *@li concat_dim: An int8, int16, int32, or int64. Specifies the dimension along which to concatenate.
  166. *@par Attributes:
  167. *N: An optional int8, int16, int32, or int64. Specifies the number of elements in "x". No default value.
  168. *@par Outputs:
  169. *y: A Tensor. Has the same type and format as "x".
  170. *@attention Constraints:
  171. * "x" is a list of at least 2 "tensor" objects of the same type.
  172. */
  173. REG_OP(ConcatV2)
  174. .DYNAMIC_INPUT(x, TensorType::BasicType())
  175. .INPUT(concat_dim, TensorType::IndexNumberType())
  176. .OUTPUT(y, TensorType::BasicType())
  177. .ATTR(N, Int, 1)
  178. .OP_END_FACTORY_REG(ConcatV2)
  179. /**
  180. *@brief Concatenates tensors along one dimension.
  181. *@par Inputs:
  182. * One input:
  183. *x:Dynamic input. An NC1HWC0 or ND Tensor. \n
  184. *Must be one of the following types: \n float16, float32, int32, int8, int16, int64, uint8, uint16, uint32, uint64
  185. *@par Attributes:
  186. *@li concat_dim: A required int8, int16, int32, or int64. Specifies the dimension along which to concatenate. No default value.
  187. *@li N: An optional int8, int16, int32, or int64. Specifies the number of elements in "x". No default value.
  188. *@par Outputs:
  189. *y: A Tensor. Has the same type and format as "x".
  190. *@attention Constraints:
  191. *@li "x" is a list of at least 2 "tensor" objects of the same type.
  192. *@li "concat_dim" is in the range [-len(x.shape), len(x.shape)].
  193. */
  194. REG_OP(ConcatD)
  195. .DYNAMIC_INPUT(x, TensorType({DT_FLOAT,DT_FLOAT16,DT_INT8,DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_UINT32,DT_UINT64}))
  196. .OUTPUT(y, TensorType({DT_FLOAT,DT_FLOAT16,DT_INT8,DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_UINT32,DT_UINT64}))
  197. .REQUIRED_ATTR(concat_dim, Int)
  198. .ATTR(N, Int, 1)
  199. .OP_END_FACTORY_REG(ConcatD)
  200. /**
  201. *@brief Concatenates tensors along one dimension.
  202. *@par Inputs:
  203. * Two inputs, including:
  204. *@li x: Dynamic input.An NC1HWC0 or ND Tensor. \n
  205. *Must be one of the following types: float16, float32, int32, int8, int16, int64, uint8, uint16, uint32, uint64
  206. *@li concat_dim: An int8, int16, int32, or int64. Specifies the dimension along which to concatenate.
  207. *@par Attributes:
  208. *N: An optional int8, int16, int32, or int64. Specifies the number of elements in "x".
  209. *@par Outputs:
  210. *y: A Tensor. Has the same type and format as "x".
  211. *@attention Constraints:
  212. *@li "x" is a list of at least 2 "tensor" objects of the same type.
  213. *@li "concat_dim" is in the range [-len(x.shape), len(x.shape)].
  214. */
  215. REG_OP(Concat)
  216. .DYNAMIC_INPUT(x, TensorType::BasicType())
  217. .INPUT(concat_dim, TensorType::IndexNumberType())
  218. .OUTPUT(y, TensorType::BasicType())
  219. .ATTR(N, Int, 1)
  220. .OP_END_FACTORY_REG(Concat)
  221. /**
  222. *@brief Packs the list of tensors in values into a tensor with rank one higher than each tensor in
  223. * values, by packing them along the axis dimension. Given a list of length N of tensors of
  224. * shape (A, B, C); if axis == 0 then the output tensor will have the shape (N, A, B, C).
  225. *@par Inputs:
  226. * x: A list of N Tensors. Must be one of the following types: int8, int16, int32,
  227. * int64, uint8, uint16, uint32, uint64, float16, float32, bool.
  228. *@par Attributes:
  229. *@li axis: A optional int, defaultvalue is 0.
  230. * Dimension along which to pack. The range is [-(R+1), R+1).
  231. *@li N: A required int. Number of tensors.
  232. *@par Outputs:
  233. *y: A Tensor. Has the same type as "x".
  234. */
  235. REG_OP(Pack)
  236. .DYNAMIC_INPUT(x, TensorType::BasicType())
  237. .OUTPUT(y, TensorType::BasicType())
  238. .ATTR(axis, Int, 0)
  239. .REQUIRED_ATTR(N, Int)
  240. .OP_END_FACTORY_REG(Pack)
  241. /**
  242. *@brief Computes offsets of concat inputs within its output.
  243. *@par Inputs:
  244. *Two inputs, including:
  245. * @li concat_dim: A Tensor of type int32.
  246. * @li x: A list of 1D Tensor objects of type int32.
  247. *@par Attributes:
  248. *@li Concat_dim: A required int. Must be within the rank of input "x".
  249. *@li N: A required int.
  250. *@par Outputs:
  251. *y: A Tensor list with same type as "x".
  252. */
  253. REG_OP(ConcatOffset)
  254. .INPUT(concat_dim, TensorType({DT_INT32}))
  255. .DYNAMIC_INPUT(x, TensorType({DT_INT32}))
  256. .DYNAMIC_OUTPUT(y, TensorType({DT_INT32}))
  257. .REQUIRED_ATTR(N, Int)
  258. .OP_END_FACTORY_REG(ConcatOffset)
  259. /**
  260. *@brief Computes offsets of concat inputs within its output.
  261. *@par Inputs:
  262. *Two inputs, including:
  263. * @li concat_dim: A Tensor of type int32.
  264. * @li x: A list of 1D Tensor objects of type int32.
  265. *@par Attributes:
  266. *@li Concat_dim: A required int. Must be within the rank of input "x".
  267. *@li N: A required int.
  268. *@par Outputs:
  269. *y: A Tensor list with same type as "x".
  270. */
  271. REG_OP(ConcatOffsetD)
  272. .DYNAMIC_INPUT(x, TensorType({DT_INT32}))
  273. .DYNAMIC_OUTPUT(y, TensorType({DT_INT32}))
  274. .REQUIRED_ATTR(concat_dim, Int)
  275. .REQUIRED_ATTR(N, Int)
  276. .OP_END_FACTORY_REG(ConcatOffsetD)
  277. } // namespace ge
  278. #endif // GE_OP_SPLIT_COMBINATION_OPS_H

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示