You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

pad_ops.h 12 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file pad_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_PAD_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_PAD_OPS_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. *@brief Creates a tensor filled with a scalar value.
  26. * This operation creates a tensor of shape "dims" and fills it with "value".
  27. *
  28. *@par Inputs:
  29. *@li dims: A 1D tensor of types int32 or int64. Represents the shape of the output tensor . \n
  30. *@li value: A 0D scalar. Specifies the value to fill the returned tensor.
  31. * Must be one of the following types:
  32. * float16, float32, double, int32, uint8, int16, int8, complex64, int64,
  33. * qint8, quint8, qint32, uint16, complex128, uint32, uint64.
  34. *
  35. *@par Outputs:
  36. * y: A tensor. Has the same type as "value".
  37. *
  38. *@par Third-party framework compatibility
  39. *@li Compatible with the TensorFlow operator Fill.
  40. *@li Compatible with the Caffe operator Filler.
  41. *
  42. */
  43. REG_OP(Fill)
  44. .INPUT(dims, TensorType::IndexNumberType())
  45. .INPUT(value, TensorType::BasicType())
  46. .OUTPUT(y, TensorType::BasicType())
  47. .OP_END_FACTORY_REG(Fill)
  48. /**
  49. *@brief Creates a tensor filled with a scalar value.
  50. * This operation creates a tensor of shape "dims" and fills it with "value".
  51. *
  52. *@par Inputs:
  53. * value: A 0D scalar for the value to fill the returned tensor. Must be one of
  54. * the following types:
  55. * float16, float32, uint8, int8, int16, int32, int64, quint8, qint8, qint32
  56. *
  57. *@par Attributes:
  58. * dims: A tensor. Must be one of the following types:"int32"
  59. * 1-D. Represents the shape of the output tensor.
  60. *
  61. *@par Outputs:
  62. * y: A tensor. Has the same type as "value".
  63. *
  64. * @par Restrictions:
  65. * Warning: THIS FUNCTION IS DEPRECATED. Please use Fill instead.
  66. */
  67. REG_OP(FillD)
  68. .INPUT(value, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16,
  69. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64,
  70. DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  71. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16,
  72. DT_UINT8, DT_INT32, DT_INT64, DT_UINT32,
  73. DT_UINT64, DT_BOOL, DT_DOUBLE}))
  74. .REQUIRED_ATTR(dims, ListInt)
  75. .OP_END_FACTORY_REG(FillD)
  76. /**
  77. *@brief Broadcasts an array for a compatible shape.
  78. * Broadcasting is the process of making arrays to have compatible shapes
  79. * for arithmetic operations. Two shapes are compatible if for each
  80. * dimension pair they are either equal or one of them is one. When trying
  81. * to broadcast a Tensor to a shape, it starts with the trailing dimensions,
  82. * and works its way forward.
  83. *
  84. *@par Inputs:
  85. *@li x: A tensor.
  86. *@li shape: A tensor of type int32.
  87. * A 1D tensor of type int32, for the shape of the desired output.
  88. *
  89. *@par Outputs:
  90. * y: A tensor. Has the same type as "x".
  91. *
  92. *@par Third-party framework compatibility
  93. *Compatible with the TensorFlow operator BroadcastTo.
  94. *
  95. */
  96. REG_OP(BroadcastTo)
  97. .INPUT(x, TensorType::BasicType())
  98. .INPUT(shape, TensorType({DT_INT32}))
  99. .OUTPUT(y, TensorType::BasicType())
  100. .OP_END_FACTORY_REG(BroadcastTo)
  101. /**
  102. *@brief Broadcasts an array for a compatible shape.
  103. * Broadcasting is the process of making arrays to have compatible shapes
  104. * for arithmetic operations. Two shapes are compatible if for each
  105. * dimension pair they are either equal or one of them is one. When trying
  106. * to broadcast a Tensor to a shape, it starts with the trailing dimensions,
  107. * and works its way forward.
  108. *
  109. *@par Inputs:
  110. * x: A tensor. A tensor to broadcast.
  111. *
  112. *@par Attributes:
  113. * shape: A tensor of type int32.
  114. * A 1D tensor of type int32, for the shape of the desired output.
  115. *
  116. *@par Outputs:
  117. * y: A tensor. Has the same type as "x".
  118. *
  119. *@par Third-party framework compatibility
  120. *Compatible with the TensorFlow operator BroadcastTo.
  121. *
  122. * @par Restrictions:
  123. * Warning: THIS FUNCTION IS DEPRECATED. Please use BroadcastTo instead.
  124. */
  125. REG_OP(BroadcastToD)
  126. .INPUT(x, TensorType::BasicType())
  127. .OUTPUT(y, TensorType::BasicType())
  128. .REQUIRED_ATTR(shape, ListInt)
  129. .OP_END_FACTORY_REG(BroadcastToD)
  130. /**
  131. *@brief Pads a tensor . \n
  132. *@par Inputs:
  133. *Two inputs, including:
  134. * @li x: A Tensor. Must be one of the following types: float16, float32, double, int32,
  135. * uint8, int16, int8, complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16,
  136. * complex128, uint32, uint64.
  137. * @li paddings: A Tensor of type int32 or int64 . \n
  138. *@par Outputs:
  139. *y: A Tensor of the same type as "x" . \n
  140. *@par Third-party framework compatibility:
  141. * Compatible with TensorFlow operator Pad.
  142. */
  143. REG_OP(Pad)
  144. .INPUT(x, TensorType::BasicType())
  145. .INPUT(paddings, TensorType::IndexNumberType())
  146. .OUTPUT(y, TensorType::BasicType())
  147. .OP_END_FACTORY_REG(Pad)
  148. /**
  149. *@brief Pads a tensor . \n
  150. *@par Inputs:
  151. *x: A Tensor. Must be one of the following types: float16, float32, int8, uint8, int32 . \n
  152. *@par Attributes:
  153. *paddings: An optional "vector<vector<int>>". Defaults to "{}".
  154. * For each dimension D of input, paddings[D, 0] indicates how many
  155. * values to add before the contents of tensor in that dimension,
  156. * and paddings[D, 1] indicates how many values to add after the
  157. * contents of tensor in that dimension . \n
  158. *@par Outputs:
  159. *y: A Tensor of the same type as "x" . \n
  160. *@par Third-party framework compatibility:
  161. * Compatible with TensorFlow operator Pad.
  162. *
  163. * @par Restrictions:
  164. * Warning: THIS FUNCTION IS DEPRECATED. Please use Pad instead.
  165. */
  166. REG_OP(PadD)
  167. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_FLOAT}))
  168. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_FLOAT}))
  169. .REQUIRED_ATTR(paddings, ListListInt)
  170. .OP_END_FACTORY_REG(PadD)
  171. /**
  172. *@brief Pads a tensor.
  173. *@par Inputs:
  174. *Two inputs, including:
  175. * @li x: A Tensor. Must be one of the following types: float16, float32, double, int32,
  176. * uint8, int16, int8, complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16,
  177. * complex128, uint32, uint64.
  178. * @li paddings: A Tensor of type int32 or int64.
  179. * @li constant_values: A optional Tensor of int32 or int64
  180. *@par Attributes:
  181. * @li mode: An optional string, Defaults to "constant", indicates paddings mode,
  182. * support "constant", "reflect", "edge"
  183. * @li paddings_contiguous: An optional bool value, Defaults to true.
  184. * If true, paddings is arranged as [[begin0, end0], [begin1, end1], ...]
  185. * If false, paddings is arranged as [[begin0, begin1], ..., [end0, end1], ...]
  186. *@par Outputs:
  187. *y: A Tensor of the same type as "x".
  188. *@par Third-party framework compatibility:
  189. * Compatible with ONNX operator Pad.
  190. */
  191. REG_OP(PadV3)
  192. .INPUT(x, TensorType::BasicType())
  193. .INPUT(paddings, TensorType::IndexNumberType())
  194. .OPTIONAL_INPUT(constant_values, TensorType::BasicType())
  195. .OUTPUT(y, TensorType::BasicType())
  196. .ATTR(mode, String, "constant")
  197. .ATTR(paddings_contiguous, Bool, true)
  198. .OP_END_FACTORY_REG(PadV3)
  199. /**
  200. *@brief Pads a tensor.
  201. *@par Inputs:
  202. *x: A Tensor. Must be one of the following types: float16, float32, int8, uint8, int32.
  203. *@par Attributes:
  204. * @li paddings: An required "vector<vector<int>>".
  205. * For each dimension D of input, paddings[D, 0] indicates how many
  206. * values to add before the contents of tensor in that dimension,
  207. * and paddings[D, 1] indicates how many values to add after the
  208. * contents of tensor in that dimension.
  209. * @li constant_values: An optional int value for pad.
  210. * @li mode: An optional string, Defaults to "constant", indicates paddings mode,
  211. * support "constant", "reflect", "edge"
  212. * @li paddings_contiguous: An optional bool value, Defaults to true.
  213. * If true, paddings is arranged as [[begin0, end0], [begin1, end1], ...]
  214. * If false, paddings is arranged as [[begin0, begin1], ..., [end0, end1], ...]
  215. *@par Outputs:
  216. *y: A Tensor of the same type as "x".
  217. *@par Third-party framework compatibility:
  218. * Compatible with ONNX operator Pad.
  219. * @par Restrictions:
  220. * Warning: THIS FUNCTION IS DEPRECATED. Please use PadV3 instead.
  221. */
  222. REG_OP(PadV3D)
  223. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8}))
  224. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8}))
  225. .REQUIRED_ATTR(paddings, ListListInt)
  226. .ATTR(constant_values, Int, 0)
  227. .ATTR(mode, String, "constant")
  228. .ATTR(paddings_contiguous, Bool, true)
  229. .OP_END_FACTORY_REG(PadV3D)
  230. /**
  231. *@brief Create a diagonal tensor
  232. *@par Inputs:
  233. *Two inputs, including:
  234. * @li x: A mutable Tensor. Must be one of the following types:
  235. * float16, float32, int32 . \n
  236. * @li assist: A mutable Tensor with rank k is at most 1,
  237. * Has the same type as "x" . \n
  238. *@par Outputs:
  239. *y: A mutable Tensor. Has the same type as "x" . \n
  240. *@see Diag()
  241. *@par Third-party framework compatibility
  242. * Compatible with the TensorFlow operator Diag.
  243. *
  244. * @par Restrictions:
  245. * Warning: THIS FUNCTION IS DEPRECATED. Please use Diag instead.
  246. */
  247. REG_OP(DiagD)
  248. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  249. .INPUT(assist, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  250. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  251. .OP_END_FACTORY_REG(DiagD)
  252. /**
  253. *@brief Create a diagonal tensor
  254. *@par Inputs:
  255. *One input, include:
  256. * x: A mutable Tensor with rank k, where k is at most 1. Must be one of the
  257. * following types:
  258. * float16, float32, double, int32, int64, complex64, complex128 . \n
  259. *@par Outputs:
  260. *y: A mutable Tensor. Has the same type as "x" . \n
  261. *@see DiagD()
  262. *@par Third-party framework compatibility
  263. * Compatible with the TensorFlow operator Diag.
  264. */
  265. REG_OP(Diag)
  266. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32,
  267. DT_INT64, DT_COMPLEX64, DT_COMPLEX128}))
  268. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32,
  269. DT_INT64, DT_COMPLEX64, DT_COMPLEX128}))
  270. .OP_END_FACTORY_REG(Diag)
  271. /**
  272. *@brief Ascend Padding, pad the last dimension of input
  273. *@par Inputs:
  274. *One input, include:
  275. *x: Tensor which last dimension must be 1. For example: [624000, 1] . \n
  276. *@par Outputs:
  277. *y: Padding the last dimension of x to padDimSize, [624000, padDimSize] . \n
  278. *@par Third-party framework compatibility
  279. * Compatible with the TensorFlow operator Diag.
  280. */
  281. REG_OP(AscendPadding)
  282. .INPUT(x, TensorType::BasicType())
  283. .OUTPUT(y, TensorType::BasicType())
  284. .ATTR(pad_dim_size, Int, 8)
  285. .OP_END_FACTORY_REG(AscendPadding)
  286. /**
  287. *@brief EmbeddingRankId, traverse the index calculation server and its position in the server . \n
  288. *@par Restrictions:
  289. *Warning:THIS FUNCTION IS DEPRECATED. Please do not use. \n
  290. *@par Inputs:
  291. *One input, include:
  292. *addr_table: Tensor which last dimension must be 3. For example: [8, 3].
  293. *index: Tensor For example: [640000].
  294. *@par Outputs:
  295. *rank_id: Tensor the first dimension of index to Size, [size, 3].
  296. Tensor which last dimension must be 3.For example: [640000, 3]
  297. *@par Third-party framework compatibility
  298. * Compatible with the TensorFlow operator Diag.
  299. */
  300. REG_OP(EmbeddingRankId)
  301. .INPUT(addr_table, TensorType({DT_UINT64}))
  302. .INPUT(index, TensorType({DT_UINT32}))
  303. .OUTPUT(rank_id, TensorType({DT_UINT64}))
  304. .ATTR(row_memory, Int, 320)
  305. .ATTR(mode, String, "mod")
  306. .OP_END_FACTORY_REG(EmbeddingRankId)
  307. } // namespace ge
  308. #endif // OPS_BUILT_IN_OP_PROTO_INC_PAD_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示