You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

random_ops.h 15 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_OP_RANDOM_OPS_H_
  17. #define GE_OP_RANDOM_OPS_H_
  18. #include <vector>
  19. #include "graph/operator_reg.h"
  20. namespace ge {
  21. /**
  22. *@brief Draws samples from a multinomial distribution.
  23. *@par Inputs:
  24. *Inputs include: \n
  25. * @li logits: A Tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8, \n
  26. int64, bfloat16, uint16, half, uint32, uint64. 2-D Tensor with shape [batch_size, num_classes].
  27. * @li num_samples: A Tensor of type int32. 0-D. Number of independent samples to draw for each row slice.
  28. *@par Attributes:
  29. *@li output_dtype: An optional type from: int32, int64. Defaults to int64.
  30. *@li seed: An optional int. Defaults to 0.
  31. *@li seed2: An optional int. Defaults to 0.
  32. *@par Outputs:
  33. *y_indices: A Tensor of type output_dtype.
  34. *@attention Constraints:\n
  35. *-The implementation for Multinomial on Ascend uses AICPU, with bad performance.\n
  36. */
  37. REG_OP(Multinomial)
  38. .INPUT(logits, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  39. .INPUT(num_samples, TensorType({DT_INT32}))
  40. .OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
  41. .ATTR(dtype, Type, DT_INT64)
  42. .ATTR(seed, Int, 0)
  43. .ATTR(seed2, Int, 0)
  44. .OP_END_FACTORY_REG(Multinomial)
  45. /**
  46. *@brief Outputs random values from a normal distribution.
  47. *@par Inputs:
  48. *Inputs include: \n
  49. * @li shape: A Tensor. Must be one of the following types: int32, int64. \n
  50. The shape of the output tensor. Batches are indexed by the 0th dimension.
  51. * @li means: A Tensor. Must be one of the following types: half, bfloat16, float32, float64.
  52. * @li stdevs: A Tensor. Must have the same type as means.
  53. * @li min: A Tensor. Must have the same type as means. The minimum cutoff. May be -infinity.
  54. * @li max: A Tensor. Must have the same type as means.
  55. *@par Attributes:
  56. *@li seed: An optional int. Defaults to 0.
  57. *@li seed2: An optional int. Defaults to 0.
  58. *@par Outputs:
  59. *y: A Tensor. Has the same type as means.
  60. *@attention Constraints:\n
  61. *-The implementation for ParameterizedTruncatedNormal on Ascend uses AICPU, with bad performance.\n
  62. */
  63. REG_OP(ParameterizedTruncatedNormal)
  64. .INPUT(shape, TensorType({DT_INT32, DT_INT64}))
  65. .INPUT(means, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  66. .INPUT(stdevs, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  67. .INPUT(min, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  68. .INPUT(max, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  69. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  70. .ATTR(seed, Int, 0)
  71. .ATTR(seed2, Int, 0)
  72. .OP_END_FACTORY_REG(ParameterizedTruncatedNormal)
  73. /**
  74. *@brief Computes the derivative of a Gamma random sample w.r.t. alpha.
  75. *@par Inputs:
  76. *Inputs include: \n
  77. * @li alpha: A Tensor. Must be one of the following types: float32, float64.
  78. * @li sample: A Tensor. Must have the same type as alpha.
  79. *@par Outputs:
  80. *y: A Tensor. Has the same type as alpha.
  81. *@attention Constraints:\n
  82. *-The implementation for RandomGammaGrad on Ascend uses AICPU, with bad performance.\n
  83. */
  84. REG_OP(RandomGammaGrad)
  85. .INPUT(alpha, TensorType({DT_FLOAT, DT_DOUBLE}))
  86. .INPUT(sample, TensorType({DT_FLOAT, DT_DOUBLE}))
  87. .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE}))
  88. .OP_END_FACTORY_REG(RandomGammaGrad)
  89. /**
  90. *@brief Outputs random values from the Gamma distribution(s) described by alpha.
  91. *@par Inputs:
  92. *Inputs include: \n
  93. * @li shape: A Tensor. Must be one of the following types: int32, int64. 1-D integer tensor.
  94. * @li alpha: A Tensor. Must be one of the following types: half, float32, float64.
  95. *@par Attributes:
  96. *@li seed: An optional int. Defaults to 0.
  97. *@li seed2: An optional int. Defaults to 0.
  98. *@par Outputs:
  99. *y: A Tensor. Has the same type as alpha.
  100. *@attention Constraints:\n
  101. *-The implementation for RandomGamma on Ascend uses AICPU, with bad performance.\n
  102. */
  103. REG_OP(RandomGamma)
  104. .INPUT(shape, TensorType({DT_INT32, DT_INT64}))
  105. .INPUT(alpha, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  106. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  107. .ATTR(seed, Int, 0)
  108. .ATTR(seed2, Int, 0)
  109. .OP_END_FACTORY_REG(RandomGamma)
  110. /**
  111. *@brief Outputs random values from the Poisson distribution(s) described by rate.
  112. *@par Inputs:
  113. *Inputs include: \n
  114. * @li shape: A Tensor. Must be one of the following types: int32, int64. 1-D integer tensor.
  115. * @li rate: A Tensor. Must be one of the following types: half, float32, float64, int32, int64.
  116. *@par Attributes:
  117. *@li dtype: An optional type from: half, float32, float64, int32, int64. Defaults to int64.
  118. *@li seed: An optional int. Defaults to 0.
  119. *@li seed2: An optional int. Defaults to 0.
  120. *@par Outputs:
  121. *y: A Tensor of type dtype.
  122. *@attention Constraints:\n
  123. *-The implementation for RandomPoisson on Ascend uses AICPU, with bad performance.\n
  124. */
  125. REG_OP(RandomPoisson)
  126. .INPUT(shape, TensorType({DT_INT32, DT_INT64}))
  127. .INPUT(rate, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  128. DT_INT32, DT_INT64}))
  129. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  130. DT_INT32, DT_INT64}))
  131. .ATTR(dtype, Type, DT_INT64)
  132. .ATTR(seed, Int, 0)
  133. .ATTR(seed2, Int, 0)
  134. .OP_END_FACTORY_REG(RandomPoisson)
  135. /**
  136. *@brief Randomly shuffles a tensor along its first dimension.
  137. *@par Inputs:
  138. *Inputs include: \n
  139. *x: A Tensor. The tensor to be shuffled.
  140. *@par Attributes:
  141. *@li seed: An optional int. Defaults to 0.
  142. *@li seed2: An optional int. Defaults to 0.
  143. *@par Outputs:
  144. *y: A Tensor. Has the same type as x.
  145. *@attention Constraints:\n
  146. *-The implementation for RandomShuffle on Ascend uses AICPU, with bad performance.\n
  147. */
  148. REG_OP(RandomShuffle)
  149. .INPUT(x, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16,
  150. DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64,
  151. DT_COMPLEX128, DT_BOOL, DT_STRING, DT_RESOURCE}))
  152. .OUTPUT(y, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16,
  153. DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64,
  154. DT_COMPLEX128, DT_BOOL, DT_STRING, DT_RESOURCE}))
  155. .ATTR(seed, Int, 0)
  156. .ATTR(seed2, Int, 0)
  157. .OP_END_FACTORY_REG(RandomShuffle)
  158. /**
  159. *@brief Outputs random values from a normal distribution.
  160. *@par Inputs:
  161. *Inputs include: \n
  162. *shape: A Tensor. Must be one of the following types: int32, int64. The shape of the output tensor.
  163. *@par Attributes:
  164. *@li dtype: A type from: half, float16, float32, float64. The type of the output.
  165. *@li seed: An optional int. Defaults to 0.
  166. *@li seed2: An optional int. Defaults to 0.
  167. *@par Outputs:
  168. *y: A Tensor of type dtype.
  169. *@attention Constraints:\n
  170. *-The implementation for RandomStandardNormal on Ascend uses AICPU, with bad performance.\n
  171. */
  172. REG_OP(RandomStandardNormal)
  173. .INPUT(shape, TensorType({DT_INT32, DT_INT64}))
  174. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  175. .REQUIRED_ATTR(dtype, Type)
  176. .ATTR(seed, Int, 0)
  177. .ATTR(seed2, Int, 0)
  178. .OP_END_FACTORY_REG(RandomStandardNormal)
  179. /**
  180. *@brief Outputs random integers from a uniform distribution.
  181. *@par Inputs:
  182. *Inputs include: \n
  183. * @li shape: A Tensor. Must be one of the following types: int32, int64. The shape of the output tensor.
  184. * @li min: A Tensor. Must be one of the following types: int32, int64. 0-D.
  185. * @li max: A Tensor. Must have the same type as minval. 0-D.
  186. *@par Attributes:
  187. *@li seed: An optional int. Defaults to 0.
  188. *@li seed2: An optional int. Defaults to 0.
  189. *@par Outputs:
  190. *y: A Tensor. Has the same type as min.
  191. *@attention Constraints:\n
  192. *-The implementation for RandomUniformInt on Ascend uses AICPU, with bad performance.\n
  193. */
  194. REG_OP(RandomUniformInt)
  195. .INPUT(shape, TensorType({DT_INT32, DT_INT64}))
  196. .INPUT(min, TensorType({DT_INT32, DT_INT64}))
  197. .INPUT(max, TensorType({DT_INT32, DT_INT64}))
  198. .OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
  199. .ATTR(seed, Int, 0)
  200. .ATTR(seed2, Int, 0)
  201. .OP_END_FACTORY_REG(RandomUniformInt)
  202. /**
  203. *@brief Outputs random values from a uniform distribution.
  204. *@par Inputs:
  205. *Inputs include: \n
  206. *shape: A Tensor. Must be one of the following types: int32, int64. The shape of the output tensor.
  207. *@par Attributes:
  208. *@li dtype: A type from: half, float16, float32, float64. The type of the output.
  209. *@li seed: An optional int. Defaults to 0.
  210. *@li seed2: An optional int. Defaults to 0.
  211. *@par Outputs:
  212. *y: A Tensor of type dtype.
  213. *@attention Constraints:\n
  214. *-The implementation for RandomUniform on Ascend uses AICPU, with bad performance.\n
  215. */
  216. REG_OP(RandomUniform)
  217. .INPUT(shape, TensorType({DT_INT32, DT_INT64}))
  218. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  219. .REQUIRED_ATTR(dtype, Type)
  220. .ATTR(seed, Int, 0)
  221. .ATTR(seed2, Int, 0)
  222. .OP_END_FACTORY_REG(RandomUniform)
  223. /**
  224. *@brief Outputs random values from a truncated normal distribution.
  225. *@par Inputs:
  226. *Inputs include: \n
  227. *shape: A Tensor. Must be one of the following types: int32, int64.
  228. *@par Attributes:
  229. *@li seed: An optional int. Defaults to 0.
  230. *@li seed2: An optional int. Defaults to 0.
  231. *@par Outputs:
  232. *size: A Tensor of types: float16, float32, double.
  233. *@attention Constraints:\n
  234. *-The implementation for TruncatedNormal on Ascend uses AICPU, with bad performance.\n
  235. */
  236. REG_OP(TruncatedNormal)
  237. .INPUT(shape, TensorType({ DT_INT32, DT_INT64 }))
  238. .OUTPUT(y, TensorType({ DT_FLOAT16, DT_FLOAT, DT_DOUBLE }))
  239. .ATTR(seed, Int, 0)
  240. .ATTR(seed2, Int, 0)
  241. .OP_END_FACTORY_REG(TruncatedNormal)
  242. /**
  243. *@brief Generate random bit mask for dropout.
  244. *@par Inputs:
  245. include: \n
  246. *@li shape:The shape of the output tensor.
  247. *@li prob:0-D. Number of bit 1.
  248. *@par Attributes:
  249. *@li seed:If either seed or seed2 are set to be non-zero, the random number\n
  250. *generator is seeded by the given seed. Otherwise, it is seeded by a random seed.
  251. *@li seed2:A second seed to avoid seed collision.
  252. *@par Outputs:
  253. *y:Output (1-D) random number using uint data format.
  254. *@attention Constraints:\n
  255. *The output is aligned with 128 bits
  256. *@see DropOutGenMask()
  257. */
  258. REG_OP(DropOutGenMask)
  259. .INPUT(shape, TensorType({ DT_INT32, DT_INT64 }))
  260. .INPUT(prob, TensorType({ DT_FLOAT16, DT_FLOAT }))
  261. .OUTPUT(y, TensorType({ DT_UINT8 }))
  262. .ATTR(seed, Int, 0)
  263. .ATTR(seed2, Int, 0)
  264. .OP_END_FACTORY_REG(DropOutGenMask)
  265. /**
  266. *@brief Generates values in an interval.
  267. *@par Inputs:\n
  268. * Four ND inputs, including:
  269. *@li input_assist: A 1D Tensor of type float32.
  270. *@li input_start: A 1D Tensor of type float32, for the first entry in the range.
  271. *@li input_stop: A 1D Tensor of type float32, for the last entry in the range.
  272. *@li input_num: A 1D Tensor of type int32, for the common difference of the entries.
  273. *@par Outputs:\n
  274. *output_op: A 1D Tensor of type float32.
  275. *@attention Constraints:\n
  276. * "input_assist" is a sequence of "input_num" evenly-spaced values beginning at 0 with an common difference of 1.
  277. */
  278. REG_OP(LinSpaceD)
  279. .INPUT(assist, TensorType({DT_FLOAT}))
  280. .INPUT(start, TensorType({DT_FLOAT}))
  281. .INPUT(stop, TensorType({DT_FLOAT}))
  282. .INPUT(num, TensorType::IndexNumberType())
  283. .OUTPUT(output, TensorType({DT_FLOAT}))
  284. .OP_END_FACTORY_REG(LinSpaceD)
  285. /**
  286. *@brief Generates values in an interval.
  287. *@par Inputs:\n
  288. * Four ND inputs, including:
  289. *@li input_assist: A 1D Tensor of type float32.
  290. *@li input_start: A 1D Tensor of type float32, for the first entry in the range.
  291. *@li input_stop: A 1D Tensor of type float32, for the last entry in the range.
  292. *@li input_num: A 1D Tensor of type int32, for the common difference of the entries.
  293. *@par Outputs:\n
  294. *output_op: A 1D Tensor of type float32.
  295. *@attention Constraints:\n
  296. * "input_assist" is a sequence of "input_num" evenly-spaced values beginning at 0 with an common difference of 1.
  297. */
  298. REG_OP(LinSpace)
  299. .INPUT(start, TensorType({DT_FLOAT, DT_DOUBLE}))
  300. .INPUT(stop, TensorType({DT_FLOAT, DT_DOUBLE}))
  301. .INPUT(num, TensorType::IndexNumberType())
  302. .OUTPUT(output, TensorType({DT_FLOAT, DT_DOUBLE}))
  303. .OP_END_FACTORY_REG(LinSpace)
  304. REG_OP(Dropout)
  305. .INPUT(x, TensorType{DT_FLOAT})
  306. .OUTPUT(y, TensorType{DT_FLOAT})
  307. .ATTR(dropout_ratio, Float, 0.5)
  308. .ATTR(scale_train, Bool, true)
  309. .ATTR(alpha, Float, 1.0)
  310. .ATTR(beta, Float, 0.0)
  311. .OP_END_FACTORY_REG(Dropout)
  312. /**
  313. *@brief Shuffle index of no-zero element.
  314. *@par Inputs:
  315. include: \n
  316. *x:A tensor <= 5-D.
  317. *@par Attributes:
  318. *@li count:the count of output, if 0, out all no-zero elements.
  319. *@li seed:If either seed or seed2 are set to be non-zero, the random number generator is seeded by the given seed.
  320. Otherwise, it is seeded by a random seed.
  321. *@li seed2:A second seed to avoid seed collision.
  322. *@par Outputs:
  323. *@li y:2-D tensor, no-zero element index.
  324. *@li mask:1-D, whether the corresponding index is valid.
  325. *@see RandomChoiceWithMask()
  326. */
  327. REG_OP(RandomChoiceWithMask)
  328. .INPUT(x, TensorType({DT_BOOL}))
  329. .OUTPUT(y, TensorType({DT_INT32}))
  330. .OUTPUT(mask, TensorType({DT_BOOL}))
  331. .ATTR(count, Int, 0)
  332. .ATTR(seed, Int, 0)
  333. .ATTR(seed2, Int, 0)
  334. .OP_END_FACTORY_REG(RandomChoiceWithMask)
  335. /**
  336. *@brief Permutes data in the channel dimension of the input
  337. *@par Inputs:
  338. *Inputs including: \n
  339. * @li x: A required Tensor. Must be one of the following types:
  340. float16, float32, int8, uint8, int16, uint16, int32, uint32, int64, uint64.
  341. *@par Attributes:
  342. *@li group: A required int32, specifying the number of groups to split the channel dimension into. Defaults to "1".
  343. *@par Outputs:
  344. *y: A required Tensor. Has same type and shape as "x". Must be one of the following types:
  345. float16, float32, int8, uint8, int16, uint16, int32, uint32, int64, uint64.
  346. *@attention Constraints:\n
  347. *@li "group" must be greater than 0 and must evenly divide the channel dimension size.
  348. *@li The format of input "x" must be NCHW.
  349. */
  350. REG_OP(ShuffleChannel)
  351. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT8, DT_UINT8, DT_INT16,
  352. DT_UINT16, DT_INT32, DT_UINT32,DT_INT64,DT_UINT64}))
  353. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT8, DT_UINT8, DT_INT16,
  354. DT_UINT16, DT_INT32, DT_UINT32,DT_INT64,DT_UINT64}))
  355. .ATTR(group, Int, 1)
  356. .OP_END_FACTORY_REG(ShuffleChannel)
  357. } // namespace ge
  358. #endif // GE_OP_RANDOM_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示