You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

transformation_ops.h 24 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_OP_TRANSFORMATION_OPS_H
  17. #define GE_OP_TRANSFORMATION_OPS_H
  18. #include "graph/operator_reg.h"
  19. namespace ge {
  20. /**
  21. *@brief This operation convert output dataType and shape
  22. *@par Inputs:
  23. *The input handle must have the resource type. Inputs include: \n
  24. *@li x:A list of Tensor objects. One or more tensors from which \n
  25. the enqueued tensors should be taken.
  26. *@par Outputs:
  27. *@li y:A list of Tensor objects. One or more tensors from which \n
  28. the enqueued tensors should be taken.
  29. *@par Attributes:
  30. *@li type: An optional ge::DataType. It refers to the target data type of outputs.
  31. *@par Third-party framework compatibility
  32. *Compatible with tensorflow QueueIsClosed operator.
  33. */
  34. REG_OP(Bitcast)
  35. .INPUT(x, TensorType({DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32, DT_UINT32, DT_UINT8,
  36. DT_INT64, DT_UINT64, DT_INT16, DT_UINT16, DT_DOUBLE, DT_COMPLEX64,
  37. DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32}))
  38. .OUTPUT(y, TensorType({DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32, DT_UINT32, DT_UINT8,
  39. DT_INT64, DT_UINT64, DT_INT16, DT_UINT16, DT_DOUBLE, DT_COMPLEX64,
  40. DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32}))
  41. .REQUIRED_ATTR(type, Type)
  42. .OP_END_FACTORY_REG(Bitcast)
  43. /**
  44. *@brief Convert tensor format from HWCN to C1HWNCoC0.
  45. *@par Inputs:
  46. *x: A Tensor. Must be 4D Tensor of type float16, float32, int32, uint16, with format HWCN.
  47. *@par Outputs:
  48. *y: A 6D Tensor. Has the same type as "x", with format C1HWNCoC0.
  49. */
  50. REG_OP(DepthwiseWeight4DTo6D)
  51. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT16}))
  52. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT16}))
  53. .OP_END_FACTORY_REG(DepthwiseWeight4DTo6D)
  54. /**
  55. *@brief Convert tensor format from C1HWNCoC0 to HWCN.
  56. *@par Inputs:
  57. *x: A Tensor. Must be 6D Tensor of type float16, float32, int32, uint16, with format C1HWNCoC0.
  58. *@par Attributes:
  59. *channel_size: An optional int, specifying the channel size of 4D Tensor with format HWCN.
  60. *@par Outputs:
  61. *y: A 4D Tensor. Has the same type as "x", with format HWCN.
  62. */
  63. REG_OP(DepthwiseWeight6DTo4D)
  64. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT16}))
  65. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT16}))
  66. .ATTR(channel_size, Int, 16)
  67. .OP_END_FACTORY_REG(DepthwiseWeight6DTo4D)
  68. /**
  69. *@brief Permutes the dimensions according to perm.\n
  70. The returned tensor's dimension i will correspond to the input dimension perm[i].
  71. *@par Inputs:
  72. *x: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64.
  73. *@par Attributes:
  74. *perm: A permutation of the dimensions of "x".
  75. *@par Outputs:
  76. *y: A Tensor. Has the same type as "x".
  77. */
  78. REG_OP(TransposeD)
  79. .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  80. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT16, DT_FLOAT}))
  81. .OUTPUT(y, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  82. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT16, DT_FLOAT}))
  83. .REQUIRED_ATTR(perm, ListInt)
  84. .OP_END_FACTORY_REG(TransposeD)
  85. /**
  86. *@brief Permutes the dimensions according to perm.\n
  87. The returned tensor's dimension i will correspond to the input dimension perm[i].
  88. *@par Inputs:
  89. *Two inputs, including:
  90. *@li x: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64.
  91. *@li perm: A Tensor of type int32 or int64. A permutation of the dimensions of "x".
  92. *@par Outputs:
  93. *y: A Tensor. Has the same type as "x".
  94. *@par Third-party framework compatibility
  95. *Compatible with the TensorFlow operator Transpose.
  96. */
  97. REG_OP(Transpose)
  98. .INPUT(x, TensorType::BasicType())
  99. .INPUT(perm, TensorType::IndexNumberType())
  100. .OUTPUT(y, TensorType::BasicType())
  101. .OP_END_FACTORY_REG(Transpose)
  102. /**
  103. *@brief Doing format_transfer for various data format only \n
  104. support NHWC/NCHW to NC1HWC0 and NC1HWC0 to NHWC/NCHW \n
  105. NCHW to FRACTAL_Zn or FRACTAL_Zn to NCHW \n
  106. HWCN to FRACTAL_Zn or FRACTAL_Zn to HWCN.
  107. *@par Inputs:
  108. *src: A Tensor dtype of all types.
  109. *@par Attributes:
  110. *@li src_format: A string source data format, can be NHWC, NCHW, FRACTAL_Zn etc.
  111. *@li expose_hidden: A string target data format, can be NC1HWC0, NCHW, FRACTAL_Zn etc.
  112. *@par Outputs:
  113. *dst: A Tensor dtype of all types.
  114. */
  115. REG_OP(TransData)
  116. .INPUT(src, TensorType::BasicType())
  117. .OUTPUT(dst, TensorType::BasicType())
  118. .REQUIRED_ATTR(src_format, String)
  119. .REQUIRED_ATTR(dst_format, String)
  120. .OP_END_FACTORY_REG(TransData)
  121. /**
  122. *@brief Permutes the dimensions according to order.\n
  123. The returned tensor's dimension i will correspond to the input dimension order[i].
  124. *@par Inputs:
  125. *x: A Tensor. Must be one of the following types: float16, float32.
  126. *@par Attributes:
  127. *order: A permutation of the dimensions of "x".Type is int32.support any axis transformation.Defaults to "{0}"
  128. *@par Outputs:
  129. *y: A Tensor. Has the same type as "x".
  130. */
  131. REG_OP(Permute)
  132. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  133. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  134. .ATTR(order, ListInt, {0})
  135. .OP_END_FACTORY_REG(Permute)
  136. /**
  137. *@brief Flattens the inputs. Reserves axis 0 and flattens the input tensors
  138. * along axis 1.
  139. *@par Inputs:
  140. *One input: \n
  141. *x: A multi-dimensional Tensor. Must be one of the following types:
  142. * int8, uint8, int16, uint16, int32, uint32, int64,uint64, float16, float32.
  143. *@par Outputs:
  144. *y: A 2D flattened Tensor (Reserves axis 0 and flattens the input tensors
  145. * along axis 1). Must be one of the following data types: int8, uint8, int16,
  146. * uint16, int32, uint32, int64,uint64, float16, float32.
  147. *@par Third-party framework compatibility
  148. * Compatible with TensorFlow operator Flatten.
  149. */
  150. REG_OP(Flatten)
  151. .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64,
  152. DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64,
  153. DT_FLOAT, DT_FLOAT16}))
  154. .OUTPUT(y, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64,
  155. DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64,
  156. DT_FLOAT, DT_FLOAT16}))
  157. .OP_END_FACTORY_REG(Flatten)
  158. /**
  159. *@brief Permutes and crops the input tensor.
  160. *@par Inputs:
  161. * Three inputs, including:
  162. *@li x: A 5D Tensor of type float16 or int8 or uint8, with format NC1HWC0.
  163. *@li block_shape: A 1D list or tuple of int32 or int64.
  164. *@li crops: A 2D list or tuple of int32 or int64. Specifies the amount to
  165. *crop from start and end dimensions after permutation.
  166. *@par Outputs:
  167. *y: A Tensor with format NC1HWC0. Has the same type as input "x".
  168. *@par Third-party framework compatibility
  169. * Compatible with the TensorFlow operator BatchToSpaceND.
  170. */
  171. REG_OP(BatchToSpaceND)
  172. .INPUT(x, TensorType::BasicType())
  173. .INPUT(block_shape, TensorType::IndexNumberType())
  174. .INPUT(crops, TensorType::IndexNumberType())
  175. .OUTPUT(y, TensorType::BasicType())
  176. .OP_END_FACTORY_REG(BatchToSpaceND)
  177. /**
  178. *@brief Permutes and crops the input tensor.
  179. *@par Inputs:
  180. * One input:
  181. *x: A 5D Tensor of type float16 or int8 or uint8, with format NC1HWC0.
  182. *@par Attributes:
  183. *@li block_shape: A required 1D list or tuple of int32 or int64.
  184. *@li crops: A required 2D list or tuple of int32 or int64. Specifies the amount to crop
  185. * from the start and end dimensions after permutation.
  186. *@par Outputs:
  187. *y: A Tensor with format NC1HWC0. Has the same type as input "x".
  188. *@par Third-party framework compatibility
  189. * Compatible with the TensorFlow operator BatchToSpaceND.
  190. */
  191. REG_OP(BatchToSpaceNDD)
  192. .INPUT(x, TensorType::BasicType())
  193. .OUTPUT(y, TensorType::BasicType())
  194. .REQUIRED_ATTR(block_shape, ListInt)
  195. .REQUIRED_ATTR(crops, ListInt)
  196. .OP_END_FACTORY_REG(BatchToSpaceNDD)
  197. /**
  198. *@brief Pads and permutes the input tensor.
  199. *@par Inputs:
  200. * Three inputs, including: \n
  201. *@li x: A 5D Tensor of type float16 or float32, with format NC1HWC0.
  202. *@li block_shape: A 1D list or tuple of int32 or int64.
  203. *@li paddings: A 2D list or tuple of int32 or int64. Specifies the padding for the start and end dimensions after permutation.
  204. *@par Outputs:
  205. *y: A Tensor with format NC1HWC0. Has the same type as input "x".
  206. *@par Third-party framework compatibility
  207. * Compatible with the TensorFlow operator SpaceToBatchND.
  208. */
  209. REG_OP(SpaceToBatchND)
  210. .INPUT(x, TensorType::BasicType())
  211. .INPUT(block_shape, TensorType::IndexNumberType())
  212. .INPUT(paddings, TensorType::IndexNumberType())
  213. .OUTPUT(y, TensorType::BasicType())
  214. .OP_END_FACTORY_REG(SpaceToBatchND)
  215. /**
  216. *@brief Pads and permutes the input tensor.
  217. *@par Inputs:
  218. * One input: \n
  219. *x: A 5D Tensor of type float16 or float32, with format NC1HWC0.
  220. *@par Attributes:
  221. *@li block_shape: A required 1D list or tuple of int32 or int64.
  222. *@li paddings: A required 2D list or tuple of int32 or int64. Specifies the padding for the start and end dimensions after permutation.
  223. *@par Outputs:
  224. *y: A Tensor with format NC1HWC0. Has the same type as input "x".
  225. *@par Third-party framework compatibility
  226. * Compatible with the TensorFlow operator SpaceToBatchND.
  227. */
  228. REG_OP(SpaceToBatchNDD)
  229. .INPUT(x, TensorType::BasicType())
  230. .OUTPUT(y, TensorType::BasicType())
  231. .REQUIRED_ATTR(block_shape, ListInt)
  232. .REQUIRED_ATTR(paddings, ListInt)
  233. .OP_END_FACTORY_REG(SpaceToBatchNDD)
  234. /**
  235. *@brief Outputs a copy of the input tensor where values from the "height" and
  236. * "width" dimensions are moved to the "depth" dimension.
  237. *@par Inputs:
  238. *x: An NHWC Tensor. Must be one of the following types:
  239. * float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8,
  240. * int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32.
  241. *@par Attributes:
  242. *@li block_size: A required int, specifying the input block size.
  243. *@li data_format: An optional string, specifying the data format. Defaults to
  244. * "NHWC".
  245. *@par Outputs:
  246. *y: A Tensor. Has the same type as input "x".
  247. *@par Third-party framework compatibility
  248. * Compatible with the TensorFlow operator SpaceToDepth.
  249. */
  250. REG_OP(SpaceToDepth)
  251. .INPUT(x, TensorType::BasicType())
  252. .OUTPUT(y, TensorType::BasicType())
  253. .REQUIRED_ATTR(block_size, Int)
  254. .ATTR(data_format, String, "NHWC")
  255. .OP_END_FACTORY_REG(SpaceToDepth)
  256. /**
  257. *@brief Rearranges data from depth into blocks of spatial data.
  258. *@par Inputs:
  259. *x: A Tensor. Must be one of the following types: float16, float32, double, int32, uint8,
  260. * int16, int8, complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16,
  261. * complex128, uint32, uint64
  262. *@par Attributes:
  263. *Two attributes, including:
  264. * @li block_size: An int >= 2, specifying the size of the spatial block.
  265. * @li data_format: An optional string, specifying the data format. Defaults to "NHWC".
  266. *@par Outputs:
  267. *y: A Tensor of the same type as "x".
  268. *@par Third-party framework compatibility:
  269. * Compatible with TensorFlow operator DepthToSpace.
  270. */
  271. REG_OP(DepthToSpace)
  272. .INPUT(x, TensorType::BasicType())
  273. .OUTPUT(y, TensorType::BasicType())
  274. .REQUIRED_ATTR(block_size, Int)
  275. .ATTR(data_format, String, "NHWC")
  276. .OP_END_FACTORY_REG(DepthToSpace)
  277. /**
  278. *@brief Permutes data into spatial data blocks and then prunes them.
  279. *@par Inputs:
  280. *@li x: A 4D Tensor with format NHWC.
  281. *@li crops: A 1D list or tuple of int32 or int64.
  282. *Must be one of the following types: float16, float32
  283. *@par Attributes:
  284. *block_size: A required int8, int16, int32, or int64. No default value.
  285. *@par Outputs:
  286. *y: A 4D Tensor with format NHWC,
  287. * of type float16 or float32.
  288. *@attention Constraints:
  289. *@li The size of the first dimension of input "x" must be divisible by (block_size * block_size).
  290. *@li "crops" is a 4Dshape [batch, height, width, depth], height = height_pad - crop_top - crop_bottom,
  291. *width = width_pad - crop_left - crop_right.
  292. *@li block_size > 2
  293. *@par Third-party framework compatibility
  294. * Compatible with the TensorFlow operator BatchToSpace.
  295. */
  296. REG_OP(BatchToSpace)
  297. .INPUT(x, TensorType::BasicType())
  298. .INPUT(crops, TensorType::IndexNumberType())
  299. .OUTPUT(y, TensorType::BasicType())
  300. .REQUIRED_ATTR(block_size, Int)
  301. .OP_END_FACTORY_REG(BatchToSpace)
  302. /**
  303. *@brief Rearrange the batch (permutes) data into spatial data blocks, and then crop them.
  304. *@par Inputs:
  305. * One input:
  306. *x: An Tensor of shape [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, depth].
  307. *The batch size of the input tensor must be divisible by (block size * block size).
  308. *Must be one of the following types: float16, float32, double, int64, int32, uint8, uint16, uint32, uint64,
  309. *int8, int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32.
  310. *@par Attributes:
  311. *@li block_size: Must be one of the following types: `int32`, `int64`.
  312. *@li crops: An Tensor. Must be one of the following types: int32, Int64.
  313. *2D tensor with non negative integer of shape [2, 2]. It specifies how many
  314. *elements are clipped from the intermediate result of spatial dimension.
  315. *@par Outputs:
  316. *y: A Tensor. Has the same type and format as input "x".
  317. *@attention Constraints:
  318. *@li The size of the first dimension of input "x" must be divisible by (block_size * block_size).
  319. *@li "crops" is a 2D tensor of non-negative integers with shape (2, 2).
  320. *@li block_size > 2
  321. *@par Third-party framework compatibility
  322. * Compatible with the TensorFlow operator BatchToSpace.
  323. */
  324. REG_OP(BatchToSpaceD)
  325. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8,
  326. DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, DT_INT16, DT_COMPLEX64,
  327. DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32}))
  328. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8,
  329. DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, DT_INT16, DT_COMPLEX64,
  330. DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32}))
  331. .REQUIRED_ATTR(block_size, Int)
  332. .REQUIRED_ATTR(crops, ListInt)
  333. .OP_END_FACTORY_REG(BatchToSpaceD)
  334. /**
  335. *@brief Outputs a copy of the input tensor where values from the "height" and
  336. * "width" dimensions are padded and rearranged to the "batch" dimension.
  337. *@par Inputs:
  338. * Two inputs, including:
  339. *@li x: An NHWC Tensor. Must be one of the following types:
  340. * float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8,
  341. * int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32.
  342. *@li paddings: A 2D tensor of type int, specifying the input.
  343. *@par Attributes:
  344. *block_size: A required int, specifying the input block size.
  345. *@par Outputs:
  346. *y: A Tensor. Has the same type as input "x".
  347. *@par Third-party framework compatibility
  348. * Compatible with the TensorFlow operator SpaceToBatch.
  349. */
  350. REG_OP(SpaceToBatch)
  351. .INPUT(x, TensorType::BasicType())
  352. .INPUT(paddings, TensorType::IndexNumberType())
  353. .OUTPUT(y, TensorType::BasicType())
  354. .REQUIRED_ATTR(block_size, Int)
  355. .OP_END_FACTORY_REG(SpaceToBatch)
  356. /**
  357. *@brief Outputs a copy of the input tensor where values from the "height" and "width" dimensions are padded and rearranged to the "batch" dimension.
  358. *@par Inputs:
  359. *x: An NHWC Tensor. Must be one of the following types: float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8, int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32.
  360. *@par Attributes:
  361. *@li block_size: A required int, specifying the input block size.
  362. *@li paddings: A 2D tensor. All data types are supported.
  363. *@par Outputs:
  364. *y: A Tensor. Has the same type as input "x".
  365. *@par Third-party framework compatibility
  366. *@ Compatible with the TensorFlow operator SpaceToBatch.
  367. */
  368. REG_OP(SpaceToBatchD)
  369. .INPUT(x, TensorType::BasicType())
  370. .OUTPUT(y, TensorType::BasicType())
  371. .REQUIRED_ATTR(block_size, Int)
  372. .REQUIRED_ATTR(paddings, ListInt)
  373. .OP_END_FACTORY_REG(SpaceToBatchD)
  374. /**
  375. * @brief Unpacks the given dimension of a rank-R Tensor "x" into rank-(R-1)
  376. * tensors.
  377. * @par Inputs:
  378. * x: A rank-R tensor (R > 0) of type BasicType, with format ND or NC1HWC0.
  379. * @par Attributes:
  380. * @li num: A required int, specifying the number of tensors to be unpacked to.
  381. * Defaults to "None".
  382. * @li axis: An optional int, specifying the axis to unpack along. The value range
  383. * is [-R, R).
  384. * @par Outputs:
  385. * y: Dynamic output. The list of Tensor objects unpacked from "x", of type BasicType.
  386. * @attention Constraints:
  387. * @li If "num" is not specified, it is inferred from the shape of "x".
  388. * @li For the ND format, "axis" is in the range [-R, R); For the NC1HWC0 format,
  389. * "axis" must not be 2, 3, -2, or -3.
  390. * @par Third-party framework compatibility
  391. * Compatible with the TensorFlow operator Unpack.
  392. */
  393. REG_OP(Unpack)
  394. .INPUT(x, TensorType::BasicType())
  395. .DYNAMIC_OUTPUT(y, TensorType::BasicType())
  396. .REQUIRED_ATTR(num, Int)
  397. .ATTR(axis, Int, 0)
  398. .OP_END_FACTORY_REG(Unpack)
  399. /**
  400. * @brief Extract "patches" from "images" and stacks them in the "depth"
  401. * dimension of the output.
  402. * @par Inputs:
  403. * x: A 4D Tensor with shape [batch, in_rows, in_cols, depth], Must be one of the
  404. * following types:float32, double, int32, uint8, int16, int8, int64, uint16,
  405. * float16, uint32, uint64
  406. * @par Attributes:
  407. * @li ksizes: A required list or tuple. The size of the sliding window for each
  408. * dimension of images.
  409. * @li strides: A required list or tuple. How far the centers of two consecutive
  410. * patches are in the images. Must be: [1, stride_rows, stride_cols, 1].
  411. * @li rates: A required list or tuple. Must be: [1, rate_rows, rate_cols, 1].\n
  412. * This is the input stride, specifying how far two consecutive patch\n
  413. * samples are in the input. Equivalent to extracting patches
  414. * with patch_sizes_eff = patch_sizes + (patch_sizes - 1) *\n
  415. * (rates - 1), followed by subsampling them spatially by a factor of rates.\n
  416. * This is equivalent to rate in dilated (a.k.a. Atrous) convolutions.
  417. * @li padding: A required string. The type of padding algorithm to use.
  418. * @par Outputs:
  419. * y: A 4D Tensor with shape [batch, out_rows, out_cols, ksize_rows *\n
  420. * ksize_cols * depth] containing image patches with size ksize_rows x ksize_cols\n
  421. * x depth vectorized in the "depth" dimension. Note "out_rows" and "out_cols"\n
  422. * are the dimensions of the output patches.
  423. * @attention Constraints:
  424. * "ksizes", "strides" and "rates" are lists of integers.
  425. * @par Third-party framework compatibility
  426. * Compatible with the TensorFlow operator ExtractImagePatches.
  427. */
  428. REG_OP(ExtractImagePatches)
  429. .INPUT(x, TensorType::RealNumberType())
  430. .OUTPUT(y, TensorType::RealNumberType())
  431. .REQUIRED_ATTR(ksizes, ListInt)
  432. .REQUIRED_ATTR(strides, ListInt)
  433. .REQUIRED_ATTR(rates, ListInt)
  434. .REQUIRED_ATTR(padding, String)
  435. .OP_END_FACTORY_REG(ExtractImagePatches)
  436. /**
  437. * @brief Extract "patches" from "input" and put them in the "depth"
  438. * dimension of the output.
  439. * @par Inputs:
  440. * x: A 5D Tensor with shape [batch, in_planes, in_rows, in_cols, depth].
  441. * @par Attributes:
  442. * @li ksizes: A required list or tuple. The size of the sliding window for each
  443. * dimension of "x".
  444. * @li strides: A required list or tuple. How far the centers of two consecutive
  445. * patches are in "x". Must be: [1, stride_planes, stride_rows, stride_cols, 1].
  446. * @li padding: A required string. The type of padding algorithm to use.
  447. * @par Outputs:
  448. * Output: A 5D Tensor with shape [batch, out_planes, out_rows, out_cols, ksize_planes * \n
  449. * ksize_rows * ksize_cols * depth] containing patches with size (ksize_rows * ksize_cols\n
  450. * * depth) vectorized in the "depth" dimension. Note "out_planes", "out_rows" and "out_cols"\n
  451. * are the dimensions of the output patches.
  452. * @attention Constraints:
  453. * "ksizes" and "strides" are lists of integers.
  454. * @par Third-party framework compatibility
  455. * Compatible with the TensorFlow operator ExtractVolumePatches.
  456. */
  457. REG_OP(ExtractVolumePatches)
  458. .INPUT(x, TensorType::REALNUMBERTYPE())
  459. .OUTPUT(y, TensorType::REALNUMBERTYPE())
  460. .REQUIRED_ATTR(ksizes, ListInt)
  461. .REQUIRED_ATTR(strides, ListInt)
  462. .REQUIRED_ATTR(padding, String)
  463. .OP_END_FACTORY_REG(ExtractVolumePatches)
  464. /**
  465. *@brief Confuse reshape and transpose.
  466. *@par Inputs:
  467. *x: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64.
  468. *@par Attributes:
  469. *@li perm: A permutation of the dimensions of "x".
  470. *@li shape: The shape of the input.
  471. *@li transpose_first: If True, the transpose is first, otherwise the reshape is first.
  472. *@par Outputs:
  473. *y: A Tensor. Has the same type as "x".
  474. */
  475. REG_OP(ConfusionTransposeD)
  476. .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  477. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT16, DT_FLOAT}))
  478. .OUTPUT(y, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  479. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT16, DT_FLOAT}))
  480. .REQUIRED_ATTR(perm, ListInt)
  481. .REQUIRED_ATTR(shape, ListInt)
  482. .REQUIRED_ATTR(transpose_first, Bool)
  483. .OP_END_FACTORY_REG(ConfusionTransposeD)
  484. /**
  485. *@brief Confuse reshape and transpose.
  486. *@par Inputs:
  487. *@li x: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64.
  488. *@li shape: The shape of the input.
  489. *@par Attributes:
  490. *@li perm: A permutation of the dimensions of "x".
  491. *@li transpose_first: If True, the transpose is first, otherwise the reshape is first.
  492. *@par Outputs:
  493. *y: A Tensor. Has the same type as "x".
  494. */
  495. REG_OP(ConfusionTranspose)
  496. .INPUT(x, TensorType::BasicType())
  497. .INPUT(shape, TensorType::IndexNumberType())
  498. .OUTPUT(y, TensorType::BasicType())
  499. .REQUIRED_ATTR(perm, ListInt)
  500. .REQUIRED_ATTR(transpose_first, Bool)
  501. .OP_END_FACTORY_REG(ConfusionTranspose)
  502. /**
  503. *@brief Flattens the input tensor to one-dimensional.
  504. *@par Inputs:
  505. *x: An ND tensor. All data types are supported.
  506. *@par Attributes:
  507. *@li axis: An optional int32, specifying the first axis to flatten. All preceding axes are retained in the output. Defaults to "1".
  508. *@li end_axis: An optional int32, specifying the last axis to flatten. All following axes are retained in the output. Defaults to "-1".
  509. *@par Outputs:
  510. *y: The flattened ND tensor. All data types are supported.
  511. *@attention Constraints:
  512. * "axis" and "end_axis" must be within the dimension range of the input. This operator cannot be directly called by the acllopExecute API.
  513. *@par Third-party framework compatibility
  514. * Compatible with the Caffe operator Flatten.
  515. */
  516. REG_OP(FlattenV2)
  517. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_INT16, DT_UINT16,
  518. DT_INT32, DT_UINT32, DT_INT64, DT_UINT64}))
  519. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_INT16, DT_UINT16,
  520. DT_INT32, DT_UINT32, DT_INT64, DT_UINT64}))
  521. .ATTR(axis, Int, 1)
  522. .ATTR(end_axis, Int, -1)
  523. .OP_END_FACTORY_REG(FlattenV2)
  524. REG_OP(DeConvTrans)
  525. .INPUT(x, TensorType({DT_INT8}))
  526. .OUTPUT(y, TensorType({DT_INT8}))
  527. .OP_END_FACTORY_REG(DeConvTrans)
  528. REG_OP(Compress)
  529. .INPUT(weight, TensorType({DT_INT8, DT_FLOAT16}))
  530. .OUTPUT(weight_compress, TensorType({DT_INT8, DT_FLOAT16}))
  531. .OUTPUT(compress_index, TensorType({DT_INT8}))
  532. .REQUIRED_ATTR(compress_parameters, ListInt)
  533. .OP_END_FACTORY_REG(Compress)
  534. REG_OP(CompressFcOp)
  535. .INPUT(weight, TensorType({DT_INT8}))
  536. .OUTPUT(weight_compress, TensorType({DT_INT8}))
  537. .OUTPUT(compress_index, TensorType({DT_INT8}))
  538. .REQUIRED_ATTR(compress_parameters, ListInt)
  539. .OP_END_FACTORY_REG(CompressFcOp)
  540. } // namespace ge
  541. #endif // GE_OP_TRANSFORMATION_OPS_H

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示