You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

pad_ops.h 7.8 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_OP_PAD_OPS_H
  17. #define GE_OP_PAD_OPS_H
  18. #include "graph/operator_reg.h"
  19. namespace ge {
  20. /**
  21. *@brief Creates a tensor filled with a scalar value.
  22. * This operation creates a tensor of shape "dims" and fills it with "value".
  23. *
  24. *@par Inputs:
  25. *@li dims: A 1D tensor of types int32 or int64. Represents the shape of the output tensor.
  26. *@li value: A 0D scalar. Specifies the value to fill the returned tensor.
  27. * Must be one of the following types:
  28. * float16, float32, double, int32, uint8, int16, int8, complex64, int64,
  29. * qint8, quint8, qint32, uint16, complex128, uint32, uint64.
  30. *
  31. *@par Outputs:
  32. * y: A tensor. Has the same type as "value".
  33. *
  34. *@par Third-party framework compatibility
  35. *@li Compatible with the TensorFlow operator Fill.
  36. *@li Compatible with the Caffe operator Filler.
  37. *
  38. */
  39. REG_OP(Fill)
  40. .INPUT(dims, TensorType::IndexNumberType())
  41. .INPUT(value, TensorType::BasicType())
  42. .OUTPUT(y, TensorType::BasicType())
  43. .OP_END_FACTORY_REG(Fill)
  44. /**
  45. *@brief Creates a tensor filled with a scalar value.
  46. * This operation creates a tensor of shape "dims" and fills it with "value".
  47. *
  48. *@par Inputs:
  49. * value: A 0D scalar for the value to fill the returned tensor. Must be one of
  50. * the following types:
  51. * float16, float32, uint8, int8, int16, int32, int64, quint8, qint8, qint32
  52. *
  53. *@par Attributes:
  54. * dims: A tensor. Must be one of the following types:"int32"
  55. * 1-D. Represents the shape of the output tensor.
  56. *
  57. *@par Outputs:
  58. * y: A tensor. Has the same type as "value".
  59. *
  60. */
  61. REG_OP(FillD)
  62. .INPUT(value, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16,
  63. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64,
  64. DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  65. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16,
  66. DT_UINT8, DT_INT32, DT_INT64, DT_UINT32,
  67. DT_UINT64, DT_BOOL, DT_DOUBLE}))
  68. .REQUIRED_ATTR(dims, ListInt)
  69. .OP_END_FACTORY_REG(FillD)
  70. /**
  71. *@brief Broadcasts an array for a compatible shape.
  72. * Broadcasting is the process of making arrays to have compatible shapes
  73. * for arithmetic operations. Two shapes are compatible if for each
  74. * dimension pair they are either equal or one of them is one. When trying
  75. * to broadcast a Tensor to a shape, it starts with the trailing dimensions,
  76. * and works its way forward.
  77. *
  78. *@par Inputs:
  79. *@li x: A tensor.
  80. *@li shape: A tensor of type int32.
  81. * A 1D tensor of type int32, for the shape of the desired output.
  82. *
  83. *@par Outputs:
  84. * y: A tensor. Has the same type as "x".
  85. *
  86. *@par Third-party framework compatibility
  87. *Compatible with the TensorFlow operator BroadcastTo.
  88. *
  89. */
  90. REG_OP(BroadcastTo)
  91. .INPUT(x, TensorType::BasicType())
  92. .INPUT(shape, TensorType({DT_INT32}))
  93. .OUTPUT(y, TensorType::BasicType())
  94. .OP_END_FACTORY_REG(BroadcastTo)
  95. /**
  96. *@brief Broadcasts an array for a compatible shape.
  97. * Broadcasting is the process of making arrays to have compatible shapes
  98. * for arithmetic operations. Two shapes are compatible if for each
  99. * dimension pair they are either equal or one of them is one. When trying
  100. * to broadcast a Tensor to a shape, it starts with the trailing dimensions,
  101. * and works its way forward.
  102. *
  103. *@par Inputs:
  104. * x: A tensor. A tensor to broadcast.
  105. *
  106. *@par Attributes:
  107. * shape: A tensor of type int32.
  108. * A 1D tensor of type int32, for the shape of the desired output.
  109. *
  110. *@par Outputs:
  111. * y: A tensor. Has the same type as "x".
  112. *
  113. *@par Third-party framework compatibility
  114. *Compatible with the TensorFlow operator BroadcastTo.
  115. *
  116. */
  117. REG_OP(BroadcastToD)
  118. .INPUT(x, TensorType::BasicType())
  119. .OUTPUT(y, TensorType::BasicType())
  120. .REQUIRED_ATTR(shape, ListInt)
  121. .OP_END_FACTORY_REG(BroadcastToD)
  122. /**
  123. *@brief Pads a tensor.
  124. *@par Inputs:
  125. *Two inputs, including:
  126. * @li x: A Tensor. Must be one of the following types: float16, float32, double, int32,
  127. * uint8, int16, int8, complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16,
  128. * complex128, uint32, uint64.
  129. * @li paddings: A Tensor of type int32 or int64.
  130. *@par Outputs:
  131. *y: A Tensor of the same type as "x".
  132. *@par Third-party framework compatibility:
  133. * Compatible with TensorFlow operator Pad.
  134. */
  135. REG_OP(Pad)
  136. .INPUT(x, TensorType::BasicType())
  137. .INPUT(paddings, TensorType::IndexNumberType())
  138. .OUTPUT(y, TensorType::BasicType())
  139. .OP_END_FACTORY_REG(Pad)
  140. /**
  141. *@brief Pads a tensor.
  142. *@par Inputs:
  143. *x: A Tensor. Must be one of the following types: float16, float32, int8, uint8, int32.
  144. *@par Attributes:
  145. *paddings: An optional "vector<vector<int>>". Defaults to "{}".
  146. * For each dimension D of input, paddings[D, 0] indicates how many
  147. * values to add before the contents of tensor in that dimension,
  148. * and paddings[D, 1] indicates how many values to add after the
  149. * contents of tensor in that dimension.
  150. *@par Outputs:
  151. *y: A Tensor of the same type as "x".
  152. *@par Third-party framework compatibility:
  153. * Compatible with TensorFlow operator Pad.
  154. */
  155. REG_OP(PadD)
  156. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_FLOAT}))
  157. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_FLOAT}))
  158. .REQUIRED_ATTR(paddings, ListListInt)
  159. .OP_END_FACTORY_REG(PadD)
  160. /**
  161. *@brief Create a diagonal tensor
  162. *@par Inputs:
  163. *Two inputs, including:
  164. * @li x: A mutable Tensor. Must be one of the following types:
  165. * float16, float32, int32.
  166. * @li assist: A mutable Tensor with rank k is at most 1,
  167. * Has the same type as "x".
  168. *@par Outputs:
  169. *y: A mutable Tensor. Has the same type as "x".
  170. *@see Diag()
  171. *@par Third-party framework compatibility
  172. * Compatible with the TensorFlow operator Diag.
  173. */
  174. REG_OP(DiagD)
  175. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  176. .INPUT(assist, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  177. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  178. .OP_END_FACTORY_REG(DiagD)
  179. /**
  180. *@brief Create a diagonal tensor
  181. *@par Inputs:
  182. *One input, include:
  183. * x: A mutable Tensor with rank k, where k is at most 1. Must be one of the
  184. * following types:
  185. * float16, float32, double, int32, int64, complex64, complex128.
  186. *@par Outputs:
  187. *y: A mutable Tensor. Has the same type as "x".
  188. *@see DiagD()
  189. *@par Third-party framework compatibility
  190. * Compatible with the TensorFlow operator Diag.
  191. */
  192. REG_OP(Diag)
  193. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32,
  194. DT_INT64, DT_COMPLEX64, DT_COMPLEX128}))
  195. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32,
  196. DT_INT64, DT_COMPLEX64, DT_COMPLEX128}))
  197. .OP_END_FACTORY_REG(Diag)
  198. /**
  199. *@brief Ascend Padding, pad the last dimension of input
  200. *@par Inputs:
  201. *One input, include:
  202. *x: Tensor which last dimension must be 1. For example: [624000, 1].
  203. *@par Outputs:
  204. *y: Padding the last dimension of x to padDimSize, [624000, padDimSize].
  205. *@par Third-party framework compatibility
  206. * Compatible with the TensorFlow operator Diag.
  207. */
  208. REG_OP(AscendPadding)
  209. .INPUT(x, TensorType::BasicType())
  210. .OUTPUT(y, TensorType::BasicType())
  211. .ATTR(pad_dim_size, Int, 8)
  212. .OP_END_FACTORY_REG(AscendPadding)
  213. } // namespace ge
  214. #endif //GE_OP_PAD_OPS_H

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示