You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

reduce_ops.h 13 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_OP_REDUCE_OPS_H
  17. #define GE_OP_REDUCE_OPS_H
  18. #include "../graph/operator_reg.h"
  19. namespace ge {
  20. REG_OP(BNTrainingReduce)
  21. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  22. .OUTPUT(sum, TensorType({DT_FLOAT}))
  23. .OUTPUT(square_sum, TensorType({DT_FLOAT}))
  24. .OP_END_FACTORY_REG(BNTrainingReduce)
  25. REG_OP(BNTrainingReduceGrad)
  26. .INPUT(grads, TensorType({DT_FLOAT16,DT_FLOAT}))
  27. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  28. .INPUT(diff_scale, TensorType({DT_FLOAT}))
  29. .INPUT(diff_offset, TensorType({DT_FLOAT}))
  30. .INPUT(scale, TensorType({DT_FLOAT}))
  31. .INPUT(batch_mean, TensorType({DT_FLOAT}))
  32. .INPUT(batch_variance, TensorType({DT_FLOAT}))
  33. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT}))
  34. .ATTR(epsilon, Float, 0.0001)
  35. .OP_END_FACTORY_REG(BNTrainingReduceGrad)
  36. REG_OP(BNTrainingUpdate)
  37. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  38. .INPUT(sum, TensorType({DT_FLOAT}))
  39. .INPUT(square_sum, TensorType({DT_FLOAT}))
  40. .INPUT(scale, TensorType({DT_FLOAT}))
  41. .INPUT(offset, TensorType({DT_FLOAT}))
  42. .INPUT(mean, TensorType({DT_FLOAT}))
  43. .INPUT(variance, TensorType({DT_FLOAT}))
  44. .REQUIRED_ATTR(factor, Float)
  45. .REQUIRED_ATTR(epsilon, Float)
  46. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT}))
  47. .OUTPUT(mean, TensorType({DT_FLOAT}))
  48. .OUTPUT(variance, TensorType({DT_FLOAT}))
  49. .OUTPUT(batch_mean, TensorType({DT_FLOAT}))
  50. .OUTPUT(batch_variance, TensorType({DT_FLOAT}))
  51. .OP_END_FACTORY_REG(BNTrainingUpdate)
  52. REG_OP(BNInfer)
  53. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  54. .INPUT(scale, TensorType({DT_FLOAT}))
  55. .INPUT(offset, TensorType({DT_FLOAT}))
  56. .INPUT(mean, TensorType({DT_FLOAT}))
  57. .INPUT(variance, TensorType({DT_FLOAT}))
  58. .REQUIRED_ATTR(epsilon, Float)
  59. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT}))
  60. .OP_END_FACTORY_REG(BNInfer)
  61. REG_OP(BNTrainingUpdateV2)
  62. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  63. .INPUT(sum, TensorType({DT_FLOAT}))
  64. .INPUT(square_sum, TensorType({DT_FLOAT}))
  65. .INPUT(scale, TensorType({DT_FLOAT}))
  66. .INPUT(offset, TensorType({DT_FLOAT}))
  67. .REQUIRED_ATTR(epsilon, Float)
  68. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT}))
  69. .OUTPUT(batch_mean, TensorType({DT_FLOAT}))
  70. .OUTPUT(batch_variance, TensorType({DT_FLOAT}))
  71. .OP_END_FACTORY_REG(BNTrainingUpdateV2)
  72. REG_OP(BNTrainingUpdateGrad)
  73. .INPUT(grads, TensorType({DT_FLOAT16,DT_FLOAT}))
  74. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  75. .INPUT(batch_mean, TensorType({DT_FLOAT}))
  76. .INPUT(batch_variance, TensorType({DT_FLOAT}))
  77. .ATTR(epsilon, Float, 0.0001)
  78. .OUTPUT(diff_scale, TensorType({DT_FLOAT}))
  79. .OUTPUT(diff_offset, TensorType({DT_FLOAT}))
  80. .OP_END_FACTORY_REG(BNTrainingUpdateGrad)
  81. REG_OP(BNInferGrad)
  82. .INPUT(grads, TensorType({DT_FLOAT16,DT_FLOAT}))
  83. .INPUT(scale, TensorType({DT_FLOAT}))
  84. .INPUT(batch_variance, TensorType({DT_FLOAT}))
  85. .OUTPUT(x_backprop, TensorType({DT_FLOAT16,DT_FLOAT}))
  86. .ATTR(epsilon, Float, 0.0001)
  87. .OP_END_FACTORY_REG(BNInferGrad)
  88. REG_OP(ReduceSum)
  89. .INPUT(x, TensorType::NumberType())
  90. .INPUT(axis, TensorType::IndexNumberType())
  91. .OUTPUT(y, TensorType::NumberType())
  92. .ATTR(keep_dims, Bool, false)
  93. .OP_END_FACTORY_REG(ReduceSum)
  94. REG_OP(ReduceSumD)
  95. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_INT32}))
  96. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_INT32}))
  97. .REQUIRED_ATTR(axis, ListInt)
  98. .ATTR(keep_dims, Bool, false)
  99. .OP_END_FACTORY_REG(ReduceSumD)
  100. /**
  101. *@brief Calculates the "logical sum" of elements of a tensor in a dimension.
  102. *@par Inputs:
  103. *One input:
  104. *x: A mutable Tensor. Must be one of the following types: float16,
  105. * float32, double. Should be a Variable Tensor.
  106. *@par Attributes:
  107. *@li keep_dims: A bool. If true, retains reduced dimensions with length 1.
  108. *@li axis: The dimensions to reduce. If None, reduces all dimensions.
  109. *Must be in the range [- rank (input_sensor), rank (input_sensor)).
  110. *@par Outputs:
  111. *y: The reduced tensor.
  112. */
  113. REG_OP(ReduceAllD)
  114. .INPUT(x, TensorType({DT_BOOL}))
  115. .OUTPUT(y, TensorType({DT_BOOL}))
  116. .REQUIRED_ATTR(axis, ListInt)
  117. .ATTR(keep_dims, Bool, false)
  118. .OP_END_FACTORY_REG(ReduceAllD)
  119. /**
  120. *@brief Calculates the "logical sum" of elements of a tensor in a dimension.
  121. *@par Inputs:
  122. *Two inputs, including:
  123. *@li x: A mutable Tensor. Must be one of the following types: float16, float32, double. Should be a Variable Tensor.
  124. *@li axis: A mutable Tensor. The dimensions to reduce. If None, reduces all dimensions. Must be in the range [- rank (input_sensor), rank (input_sensor)).
  125. *@par Attributes:
  126. *keep_dims: A bool. If true, retains reduced dimensions with length 1.
  127. *@par Outputs:
  128. *y: The reduced tensor.
  129. */
  130. REG_OP(ReduceAll)
  131. .INPUT(x, TensorType({DT_BOOL}))
  132. .INPUT(axis, TensorType::IndexNumberType())
  133. .OUTPUT(y, TensorType({DT_BOOL}))
  134. .ATTR(keep_dims, Bool, false)
  135. .OP_END_FACTORY_REG(ReduceAll)
  136. /**
  137. *@brief Reduce a tensor on a certain axis based on product..
  138. *@par Inputs:
  139. *Two inputs, including:
  140. *@li x: A mutable Tensor. Must be the type of NumberType.
  141. *@li axis: A mutable Tensor. The dimensions to reduce.
  142. *@par Attributes:
  143. *@li keep_dims: A bool. If true, retains reduced dimensions with length 1. Defaults to "False".
  144. *@par Outputs:
  145. *y: A Tensor. Has the same type and format as input "x".
  146. */
  147. REG_OP(ReduceProd)
  148. .INPUT(x,TensorType::NumberType())
  149. .INPUT(axis, TensorType::IndexNumberType())
  150. .OUTPUT(y,TensorType::NumberType())
  151. .ATTR(keep_dims, Bool, false)
  152. .OP_END_FACTORY_REG(ReduceProd)
  153. /**
  154. *@brief Computes the product of elements across dimensions of a tensor.
  155. *@par Inputs:
  156. * One input: \n
  157. *x: A Tensor. Must be one of the following types: float16, float, int8, uint8.
  158. *@par Attributes:
  159. *@li axis: A required int8, int16, int32, or int64. Specifies the dimensions to reduce. No default value.
  160. *@li keep_dims: An optional bool. If "True", retains reduced dimensions with length 1. Defaults to "False".
  161. *@par Outputs:
  162. *y: A Tensor. Has the same type and format as input "x".
  163. *@attention Constraints:
  164. * "keep_dims" is in the range [-rank(input_tensor), rank(input_tensor)].
  165. */
  166. REG_OP(ReduceProdD)
  167. .INPUT(x,TensorType({DT_FLOAT, DT_UINT8, DT_INT8, DT_INT32, DT_FLOAT16}))
  168. .OUTPUT(y,TensorType({DT_FLOAT, DT_UINT8, DT_INT8, DT_INT32, DT_FLOAT16}))
  169. .REQUIRED_ATTR(axis, ListInt)
  170. .ATTR(keep_dims, Bool, false)
  171. .OP_END_FACTORY_REG(ReduceProdD)
  172. /**
  173. *@brief Reduces "x" along the dimensions according to "axis".
  174. *@par Inputs:
  175. *Two inputs, including:
  176. * @li x: A Tensor. Must be one of the following types: float16, float32, int8, uint8.
  177. * @li axis: The dimensions to reduce. Must be one of the following types: int, list, tuple, NoneType.\n
  178. * - If None (the default), reduces all dimensions.\n
  179. * - Must be in the range [-rank(x), rank(x)).
  180. *@par Attributes:
  181. *keep_dims: A bool or NoneType. \n
  182. * - If true, retains reduced dimensions with length 1. \n
  183. * - If false, the rank of the tensor is reduced by 1 for each entry in axis.
  184. *@par Outputs:
  185. *y: A Tensor. Has the same type as "x".
  186. */
  187. REG_OP(ReduceMean)
  188. .INPUT(x, TensorType::NumberType())
  189. .INPUT(axis, TensorType::IndexNumberType())
  190. .OUTPUT(y, TensorType::NumberType())
  191. .ATTR(keep_dims, Bool, false)
  192. .OP_END_FACTORY_REG(ReduceMean)
  193. /**
  194. *@brief Reduces "x" along the dimensions according to "axis".
  195. *@par Inputs:
  196. *One input:
  197. * @li x: A Tensor. Must be one of the following types: float16, float32, int8, uint8.
  198. *@par Attributes:
  199. *@li axis: The dimensions to reduce. Must be one of the following types: int, list, tuple, NoneType. \n
  200. * If None (the default), reduces all dimensions. \n
  201. * Must be in the range [-rank(x), rank(x)). \n
  202. *@li keep_dims: A bool or NoneType. \n
  203. * - If true, retains reduced dimensions with length 1. \n
  204. * - If false, the rank of the tensor is reduced by 1 for each entry in axis.
  205. *@par Outputs:
  206. *y: A Tensor. Has the same type as "x".
  207. */
  208. REG_OP(ReduceMeanD)
  209. .INPUT(x, TensorType({DT_FLOAT16, DT_INT32, DT_FLOAT, DT_INT8, DT_UINT8}))
  210. .OUTPUT(y, TensorType({DT_FLOAT16, DT_INT32, DT_FLOAT, DT_INT8, DT_UINT8}))
  211. .REQUIRED_ATTR(axis, ListInt)
  212. .ATTR(keep_dims, Bool, false)
  213. .OP_END_FACTORY_REG(ReduceMeanD)
  214. REG_OP(ReduceMax)
  215. .INPUT(x, TensorType::NumberType())
  216. .INPUT(axis, TensorType::IndexNumberType())
  217. .OUTPUT(y, TensorType::NumberType())
  218. .ATTR(keep_dims, Bool, false)
  219. .OP_END_FACTORY_REG(ReduceMax)
  220. /**
  221. *@brief Returns the maximum of elements across dimensions of a Tensor.
  222. *@par Inputs:
  223. *x: A multi-dimensional Tensor of type float16, float32, or int16.
  224. *@par Attributes:
  225. * Two attributes, including: \n
  226. *@li axis: A required listint, specifying the axis information of the index with the maximum value.
  227. *@li keep_dims: A bool, specifying whether to keep dimensions for the output Tensor. Defaults to "false".
  228. *@par Outputs:
  229. *y: A multi-dimensional Tensor, specifying the maximum value of the corresponding axis in the tensor. Has the same type as "x". (If "keep_dims" is set to "false", the output dimensions are reduced by "dimension" compared with that of "x". Otherwise, the output has one fewer dimension than "x".)
  230. *@attention Constraints:
  231. * The value range of "axis" is [-dims, dims - 1]. "dims" indicates the dimension length of "x".
  232. */
  233. REG_OP(ReduceMaxD)
  234. .INPUT(x, TensorType({DT_FLOAT, DT_UINT8, DT_INT8,
  235. DT_FLOAT16, DT_INT32}))
  236. .OUTPUT(y, TensorType({DT_FLOAT, DT_UINT8, DT_INT8,
  237. DT_FLOAT16, DT_INT32}))
  238. .REQUIRED_ATTR(axis, ListInt)
  239. .ATTR(keep_dims, Bool, false)
  240. .OP_END_FACTORY_REG(ReduceMaxD)
  241. REG_OP(ReduceMin)
  242. .INPUT(x, TensorType::NumberType())
  243. .INPUT(axis, TensorType::IndexNumberType())
  244. .OUTPUT(y, TensorType::NumberType())
  245. .ATTR(keep_dims, Bool, false)
  246. .OP_END_FACTORY_REG(ReduceMin)
  247. REG_OP(ReduceMinD)
  248. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8}))
  249. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8}))
  250. .REQUIRED_ATTR(axis, ListInt)
  251. .ATTR(keep_dims, Bool, false)
  252. .OP_END_FACTORY_REG(ReduceMinD)
  253. /**
  254. *@brief Computes the "logical or" of elements across dimensions of a tensor.\n
  255. * Reduces `x` along the dimensions given in `axis`.
  256. * Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
  257. * entry in `axis`. If `keep_dims` is true, the reduced dimensions
  258. * are retained with length 1.
  259. *
  260. * If `axis` is None, all dimensions are reduced, and a
  261. * tensor with a single element is returned.
  262. *
  263. *@attention Constraints:\n
  264. * Only support bool
  265. *
  266. *@par Inputs:
  267. *@li x : The boolean tensor to reduce.
  268. *@li axis : The dimensions to reduce. If `None` (the default), reduces all
  269. * dimensions. Must be in the range `[-rank(x), rank(x))`.
  270. *
  271. *@par Attributes:
  272. * keep_dims : If true, retains reduced dimensions with length 1.
  273. *
  274. *@par Outputs:
  275. * y : The reduced tensor
  276. *
  277. */
  278. REG_OP(ReduceAny)
  279. .INPUT(x, TensorType({DT_BOOL}))
  280. .INPUT(axis, TensorType::IndexNumberType())
  281. .OUTPUT(y, TensorType({DT_BOOL}))
  282. .ATTR(keep_dims, Bool, false)
  283. .OP_END_FACTORY_REG(ReduceAny)
  284. /**
  285. *@brief Computes the "logical or" of elements across dimensions of a tensor.\n
  286. * Reduces `x` along the dimensions given in `axis`.
  287. * Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
  288. * entry in `axis`. If `keep_dims` is true, the reduced dimensions
  289. * are retained with length 1.
  290. *
  291. * If `axis` is None, all dimensions are reduced, and a
  292. * tensor with a single element is returned.
  293. *
  294. *@attention Constraints:\n
  295. * Only support bool
  296. *
  297. *@par Inputs:
  298. * x : The boolean tensor to reduce.
  299. *
  300. *@par Attributes:
  301. *@li axis : The dimensions to reduce. If `None` (the default), reduces all
  302. * dimensions. Must be in the range `[-rank(x), rank(x))`.
  303. *@li keep_dims : If true, retains reduced dimensions with length 1.
  304. *
  305. *@par Outputs:
  306. * y : The reduced tensor
  307. *
  308. */
  309. REG_OP(ReduceAnyD)
  310. .INPUT(x, TensorType({DT_BOOL}))
  311. .OUTPUT(y, TensorType({DT_BOOL}))
  312. .REQUIRED_ATTR(axis, ListInt)
  313. .ATTR(keep_dims, Bool, false)
  314. .OP_END_FACTORY_REG(ReduceAnyD)
  315. } //namespace ge
  316. #endif /* GE_OP_REDUCE_OPS_H */

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示