You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

transformation_ops.h 12 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_OP_TRANSFORMATION_OPS_H
  17. #define GE_OP_TRANSFORMATION_OPS_H
  18. #include "../graph/operator_reg.h"
  19. namespace ge {
  20. REG_OP(DepthwiseWeight4DTo6D)
  21. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT16}))
  22. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT16}))
  23. .OP_END_FACTORY_REG(DepthwiseWeight4DTo6D)
  24. REG_OP(DepthwiseWeight6DTo4D)
  25. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT16}))
  26. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT16}))
  27. .ATTR(channel_size, Int, 16)
  28. .OP_END_FACTORY_REG(DepthwiseWeight6DTo4D)
  29. /**
  30. *@brief Permutes the dimensions according to perm.\n
  31. The returned tensor's dimension i will correspond to the input dimension perm[i].
  32. *@par Inputs:
  33. *x: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64.
  34. *@par Attributes:
  35. *perm: A permutation of the dimensions of "x".
  36. *@par Outputs:
  37. *y: A Tensor. Has the same type as "x".
  38. */
  39. REG_OP(TransposeD)
  40. .INPUT(x, TensorType::BasicType())
  41. .OUTPUT(y, TensorType::BasicType())
  42. .ATTR(perm, ListInt, {})
  43. .OP_END_FACTORY_REG(TransposeD)
  44. /**
  45. *@brief Permutes the dimensions according to perm.\n
  46. The returned tensor's dimension i will correspond to the input dimension perm[i].
  47. *@par Inputs:
  48. *@li x: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64.
  49. *@li perm: A Tensor of type int32 or int64. A permutation of the dimensions of "x".
  50. *@par Outputs:
  51. *y: A Tensor. Has the same type as "x".
  52. */
  53. REG_OP(Transpose)
  54. .INPUT(x, TensorType::BasicType())
  55. .INPUT(perm, TensorType::IndexNumberType())
  56. .OUTPUT(y, TensorType::BasicType())
  57. .OP_END_FACTORY_REG(Transpose)
  58. REG_OP(Flatten)
  59. .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64,
  60. DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64,
  61. DT_FLOAT, DT_FLOAT16}))
  62. .OUTPUT(y, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64,
  63. DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64,
  64. DT_FLOAT, DT_FLOAT16}))
  65. .OP_END_FACTORY_REG(Flatten)
  66. REG_OP(BatchToSpaceND)
  67. .INPUT(x, TensorType::BasicType())
  68. .INPUT(block_shape, TensorType::IndexNumberType())
  69. .INPUT(crops, TensorType::IndexNumberType())
  70. .OUTPUT(y, TensorType::BasicType())
  71. .OP_END_FACTORY_REG(BatchToSpaceND)
  72. REG_OP(BatchToSpaceNDD)
  73. .INPUT(x, TensorType::BasicType())
  74. .OUTPUT(y, TensorType::BasicType())
  75. .REQUIRED_ATTR(block_shape, ListInt)
  76. .REQUIRED_ATTR(crops, ListInt)
  77. .OP_END_FACTORY_REG(BatchToSpaceNDD)
  78. REG_OP(SpaceToBatchND)
  79. .INPUT(x, TensorType::BasicType())
  80. .INPUT(block_shape, TensorType::IndexNumberType())
  81. .INPUT(paddings, TensorType::IndexNumberType())
  82. .OUTPUT(y, TensorType::BasicType())
  83. .OP_END_FACTORY_REG(SpaceToBatchND)
  84. REG_OP(SpaceToBatchNDD)
  85. .INPUT(x, TensorType::BasicType())
  86. .OUTPUT(y, TensorType::BasicType())
  87. .REQUIRED_ATTR(block_shape, ListInt)
  88. .REQUIRED_ATTR(paddings, ListInt)
  89. .OP_END_FACTORY_REG(SpaceToBatchNDD)
  90. REG_OP(SpaceToDepth)
  91. .INPUT(x, TensorType::BasicType())
  92. .OUTPUT(y, TensorType::BasicType())
  93. .REQUIRED_ATTR(block_size, Int)
  94. .ATTR(data_format, String, "NHWC")
  95. .OP_END_FACTORY_REG(SpaceToDepth)
  96. /**
  97. *@brief Rearranges data from depth into blocks of spatial data.
  98. *@par Inputs:
  99. *x: A Tensor. Must be one of the following types: float16, float32, double, int32, uint8,
  100. * int16, int8, complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16,
  101. * complex128, uint32, uint64
  102. *@par Attributes:
  103. *Two attributes, including:
  104. * @li block_size: An int >= 2, specifying the size of the spatial block.
  105. * @li data_format: An optional string, specifying the data format. Defaults to "NHWC".
  106. *@par Outputs:
  107. *y: A Tensor of the same type as "x".
  108. */
  109. REG_OP(DepthToSpace)
  110. .INPUT(x, TensorType::BasicType())
  111. .OUTPUT(y, TensorType::BasicType())
  112. .REQUIRED_ATTR(block_size, Int)
  113. .ATTR(data_format, String, "NHWC")
  114. .OP_END_FACTORY_REG(DepthToSpace)
  115. /**
  116. *@brief Permutes data into spatial data blocks and then prunes them.
  117. *@par Inputs:
  118. *x: A 4D Tensor with format NC1HWC0. \n
  119. *Must be one of the following types: float16, float32
  120. *@par Attributes:
  121. *@li crops: A required list of int8, int16, int32, or int64. No default value.
  122. *@li block_size: A required int8, int16, int32, or int64. No default value.
  123. *@par Outputs:
  124. *y: A 4D Tensor with format NC1HWC0, \n
  125. * of type float16 or float32.
  126. *@attention Constraints:
  127. *@li The size of the first dimension of input "x" must be divisible by (block_size * block_size).
  128. *@li "crops" is a 2D tensor of non-negative integers with shape (2, 2).
  129. *@li block_size >= 2
  130. */
  131. REG_OP(BatchToSpace)
  132. .INPUT(x, TensorType::BasicType())
  133. .INPUT(crops, TensorType::IndexNumberType())
  134. .OUTPUT(y, TensorType::BasicType())
  135. .REQUIRED_ATTR(block_size, Int)
  136. .OP_END_FACTORY_REG(BatchToSpace)
  137. /**
  138. *@brief Rearrange the batch (permutes) data into spatial data blocks, and then crop them.
  139. *@par Inputs:
  140. * One input:
  141. *x: An Tensor of shape [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, depth].\n
  142. *The batch size of the input tensor must be divisible by (block size * block size).
  143. *@par Attributes:
  144. *@li block_size: Must be one of the following types: `int32`, `int64`.
  145. *@li crops: An Tensor. Must be one of the following types: int32, Int64.\n
  146. *2D tensor with non negative integer of shape [2, 2]. It specifies how many\n
  147. *elements are clipped from the intermediate result of spatial dimension.
  148. *@par Outputs:
  149. *y: A Tensor. Has the same type and format as input "x".
  150. *@attention Constraints:
  151. *@li The size of the first dimension of input "x" must be divisible by (block_size * block_size).
  152. *@li "crops" is a 2D tensor of non-negative integers with shape (2, 2).
  153. *@li block_size >= 2
  154. */
  155. REG_OP(BatchToSpaceD)
  156. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8,
  157. DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, DT_INT16, DT_COMPLEX64,
  158. DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32}))
  159. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8,
  160. DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, DT_INT16, DT_COMPLEX64,
  161. DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32}))
  162. .REQUIRED_ATTR(block_size, Int)
  163. .REQUIRED_ATTR(crops, ListInt)
  164. .OP_END_FACTORY_REG(BatchToSpaceD)
  165. REG_OP(SpaceToBatch)
  166. .INPUT(x, TensorType::BasicType())
  167. .INPUT(paddings, TensorType::IndexNumberType())
  168. .OUTPUT(y, TensorType::BasicType())
  169. .REQUIRED_ATTR(block_size, Int)
  170. .OP_END_FACTORY_REG(SpaceToBatch)
  171. REG_OP(SpaceToBatchD)
  172. .INPUT(x, TensorType::BasicType())
  173. .OUTPUT(y, TensorType::BasicType())
  174. .REQUIRED_ATTR(block_size, Int)
  175. .REQUIRED_ATTR(paddings, ListInt)
  176. .OP_END_FACTORY_REG(SpaceToBatchD)
  177. /**
  178. * @brief Unpacks the given dimension of a rank-R tensor "value" into rank-(R-1)
  179. * tensors.
  180. * @par Inputs:
  181. * @ value: A rank-R tensor (R > 0) of type BasicType, with format ND or NC1HWC0.
  182. * @par Attributes:
  183. * @li num: An optional int, specifying the number of tensors to be unpacked to.
  184. * Defaults to "None".
  185. * @li axis: A required int, specifying the axis to unpack along. The value range
  186. * is [-R, R).
  187. * @par Outputs:
  188. * output: The list of Tensor objects unpacked from "value", of type BasicType.
  189. * @attention Constraints:
  190. * @li If "num" is not specified, it is inferred from the shape of "value".
  191. * @li For the ND format, "axis" is in the range [-R, R); For the NC1HWC0 format,
  192. * "axis" must not be 2, 3, -2, or -3.
  193. */
  194. REG_OP(Unpack)
  195. .INPUT(value, TensorType::BasicType())
  196. .DYNAMIC_OUTPUT(output, TensorType::BasicType())
  197. .REQUIRED_ATTR(num, Int)
  198. .ATTR(axis, Int, 0)
  199. .OP_END_FACTORY_REG(Unpack)
  200. /**
  201. * @brief Extract "patches" from "images" and stacks them in the "depth"
  202. * dimension of the output.
  203. * @par Inputs:
  204. * images: A 4D Tensor with shape [batch, in_rows, in_cols, depth].
  205. * @par Attributes:
  206. * @li ksizes: An optional tuple or list. size of the sliding window for
  207. * each dimension of images.
  208. * @li strides: An optional tuple or list. How far the centers of two
  209. * consecutive patches are in the images.\n
  210. * Must be: [1, stride_rows, stride_cols, 1].
  211. * @li rates: Must be: An optional tuple or list. [1, rate_rows, rate_cols, 1].
  212. * This is the input stride,\n
  213. * specifying how far two consecutive patch samples are in the input. Equivalent\n
  214. * to extracting patches with patch_sizes_eff = patch_sizes + (patch_sizes - 1) *\n
  215. * (rates - 1), followed by subsampling them spatially by a factor of rates. This\n
  216. * is equivalent to rate in dilated (a.k.a. Atrous) convolutions.
  217. * @li padding: An optional string. The type of padding algorithm to use.
  218. * @par Outputs:
  219. * Output: A 4D Tensor with shape [batch, out_rows, out_cols, ksize_rows *\n
  220. * ksize_cols * depth] containing image patches with size ksize_rows x ksize_cols\n
  221. * x depth vectorized in the "depth" dimension. Note "out_rows" and "out_cols"\n
  222. * are the dimensions of the output patches.
  223. * @attention Constraints:
  224. * "ksizes", "strides" and "rates" are lists of integers.
  225. */
  226. REG_OP(ExtractImagePatches)
  227. .INPUT(images, TensorType::REALNUMBERTYPE())
  228. .OUTPUT(y, TensorType::REALNUMBERTYPE())
  229. .ATTR(ksizes, ListInt, {1,3,3,1})
  230. .ATTR(strides, ListInt, {1,1,1,1})
  231. .ATTR(rates, ListInt, {1,1,1,1})
  232. .ATTR(padding, String, "SAME")
  233. .OP_END_FACTORY_REG(ExtractImagePatches)
  234. /**
  235. *@brief Confuse reshape and transpose.
  236. *@par Inputs:
  237. *x: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64.
  238. *@par Attributes:
  239. *@li perm: A permutation of the dimensions of "x".
  240. *@li shape: The shape of the input.
  241. *@li transpose_first: If True, the transpose is first, otherwise the reshape is first.
  242. *@par Outputs:
  243. *y: A Tensor. Has the same type as "x".
  244. */
  245. REG_OP(ConfusionTransposeD)
  246. .INPUT(x, TensorType::BasicType())
  247. .OUTPUT(y, TensorType::BasicType())
  248. .REQUIRED_ATTR(perm, ListInt)
  249. .REQUIRED_ATTR(shape, ListInt)
  250. .REQUIRED_ATTR(transpose_first, Bool)
  251. .OP_END_FACTORY_REG(ConfusionTransposeD)
  252. /**
  253. *@brief Confuse reshape and transpose.
  254. *@par Inputs:
  255. *@li x: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64.
  256. *@li shape: The shape of the input.
  257. *@par Attributes:
  258. *@li perm: A permutation of the dimensions of "x".
  259. *@li transpose_first: If True, the transpose is first, otherwise the reshape is first.
  260. *@par Outputs:
  261. *y: A Tensor. Has the same type as "x".
  262. */
  263. REG_OP(ConfusionTranspose)
  264. .INPUT(x, TensorType::BasicType())
  265. .INPUT(shape, TensorType::IndexNumberType())
  266. .OUTPUT(y, TensorType::BasicType())
  267. .REQUIRED_ATTR(perm, ListInt)
  268. .REQUIRED_ATTR(transpose_first, Bool)
  269. .OP_END_FACTORY_REG(ConfusionTranspose)
  270. } // namespace ge
  271. #endif // GE_OP_TRANSFORMATION_OPS_H

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示