You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

sparse_ops.h 41 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_OP_SPARSE_OPS_H_
  17. #define GE_OP_SPARSE_OPS_H_
  18. #include "graph/operator_reg.h"
  19. namespace ge {
  20. /**
  21. *@brief Applies softmax to a batched ND SparseTensor.
  22. *@par Inputs:
  23. *The input must be a batched ND SparseTensor.
  24. * @li indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  25. * @li values: A vector Tensor of type float or double. 1D. The values of the SparseTensor.
  26. * @li shape: A vector Tensor of type int64. 1D. The shape of the SparseTensor.
  27. *@par Outputs:
  28. *y: A vector Tensor. 1D. Has the same type as "values".
  29. */
  30. REG_OP(SparseSoftmax)
  31. .INPUT(indices, TensorType({DT_INT64}))
  32. .INPUT(values, TensorType({DT_FLOAT, DT_DOUBLE}))
  33. .INPUT(shape, TensorType({DT_INT64}))
  34. .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE}))
  35. .OP_END_FACTORY_REG(SparseSoftmax)
  36. /**
  37. *@brief Adds up a SparseTensor and a dense Tensor, producing a dense Tensor.
  38. *@par Inputs:
  39. *Inputs "x1_*" must be SparseTensors and "x2" must be a dense Tensor.
  40. * @li x1_indices: A matrix Tensor of type int32 or int64. 2D. The indices of the SparseTensor.
  41. * @li x1_values: The values of the SparseTensor. A vector Tensor. 1D.
  42. * @li x1_shape: A vector Tensor of type int32 or int64. 1D. The shape of the SparseTensor.
  43. * @li x2: A matrix Tensor. Has the same type and same shape as the SparseTensors.
  44. *@par Outputs:
  45. *y: A matrix Tensor. Has the same type and same shape as "x2".
  46. */
  47. REG_OP(SparseTensorDenseAdd)
  48. .INPUT(x1_indices, TensorType({DT_INT32, DT_INT64}))
  49. .INPUT(x1_values, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, \
  50. DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  51. .INPUT(x1_shape, TensorType({DT_INT32, DT_INT64}))
  52. .INPUT(x2, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, \
  53. DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  54. .OUTPUT(y, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, \
  55. DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  56. .OP_END_FACTORY_REG(SparseTensorDenseAdd)
  57. /**
  58. *@brief Reorders a SparseTensor into the canonical, row-major ordering.
  59. *@par Inputs:
  60. * @li indices: A matrix Tensor of type int32 or int64. 2D. The indices of the SparseTensor.
  61. * @li values: Values of the SparseTensor. A vector Tensor. 1D.
  62. * @li shape: A vector Tensor of type int32 or int64. 1D. The shape of the SparseTensor.
  63. *@par Outputs:
  64. *@li y_indices: The indices of the SparseTensor. Has the same type as "indices".
  65. *@li y_values: The values of the SparseTensorr. Has the same type as "values".
  66. */
  67. REG_OP(SparseReorder)
  68. .INPUT(indices, TensorType({DT_INT64}))
  69. .INPUT(values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  70. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  71. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  72. .INPUT(shape, TensorType({DT_INT64}))
  73. .OUTPUT(y_indices, TensorType({DT_INT64}))
  74. .OUTPUT(y_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  75. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  76. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  77. .OP_END_FACTORY_REG(SparseReorder)
  78. /**
  79. *@brief Reshapes a SparseTensor to represent values in a new dense shape.
  80. *@par Inputs:
  81. * @li indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  82. * @li shape: A vector Tensor of type int64. 1D. The shape of the SparseTensor.
  83. * @li new_shape: A 1D Tensor of type int64. The requested new dense shape.
  84. *@par Outputs:
  85. *@li y_indices: A Tensor of type int64. The indices of the new dense shape.
  86. *@li y_shape: A Tensor of type int64. The shape of the new dense shape.
  87. */
  88. REG_OP(SparseReshape)
  89. .INPUT(indices, TensorType({DT_INT64}))
  90. .INPUT(shape, TensorType({DT_INT64}))
  91. .INPUT(new_shape, TensorType({DT_INT64}))
  92. .OUTPUT(y_indices, TensorType({DT_INT64}))
  93. .OUTPUT(y_shape, TensorType({DT_INT64}))
  94. .OP_END_FACTORY_REG(SparseReshape)
  95. /**
  96. *@brief Adds up a SparseTensor and a dense Tensor.
  97. *@par Inputs:
  98. *(1) Broadcasts the dense side to have the same shape as the sparse side, if eligible;\n
  99. *(2) Then, only the dense values pointed to by the indices of the SparseTensor participate in the cwise addition.
  100. * @li x1_indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  101. * @li x1_values: The values of the SparseTensor. A vector Tensor. 1D.
  102. * @li x1_shape: A 1D Tensor of type int64. The requested new dense shape.
  103. * @li x2: A dense Tensor of the same type as "x1_values".
  104. *@par Outputs:
  105. *y: A Tensor. Has the same type as "x1_values".
  106. */
  107. REG_OP(SparseDenseCwiseAdd)
  108. .INPUT(x1_indices, TensorType({DT_INT64}))
  109. .INPUT(x1_values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  110. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, \
  111. DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  112. .INPUT(x1_shape, TensorType({DT_INT64}))
  113. .INPUT(x2, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  114. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  115. DT_COMPLEX64, DT_COMPLEX128}))
  116. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  117. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  118. DT_COMPLEX64, DT_COMPLEX128}))
  119. .OP_END_FACTORY_REG(SparseDenseCwiseAdd)
  120. /**
  121. *@brief Divides a SparseTensor by a dense Tensor.
  122. *@par Inputs:
  123. * @li x1_indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  124. * @li x1_values: The values of the SparseTensor. A vector Tensor. 1D.
  125. * @li x1_shape: A 1D Tensor of type int64. The requested new dense shape.
  126. * @li x2: A dense Tensor of the same type as "x1_values".
  127. *@par Outputs:
  128. *y: A Tensor. Has the same type as "x1_values".
  129. */
  130. REG_OP(SparseDenseCwiseDiv)
  131. .INPUT(x1_indices, TensorType({DT_INT64}))
  132. .INPUT(x1_values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  133. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, \
  134. DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  135. .INPUT(x1_shape, TensorType({DT_INT64}))
  136. .INPUT(x2, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  137. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  138. DT_COMPLEX64, DT_COMPLEX128}))
  139. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  140. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  141. DT_COMPLEX64, DT_COMPLEX128}))
  142. .OP_END_FACTORY_REG(SparseDenseCwiseDiv)
  143. /**
  144. *@brief Multiplies a SparseTensor by a dense Tensor.
  145. *@par Inputs:
  146. * @li x1_indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  147. * @li x1_values: The values of the SparseTensor. A vector Tensor. 1D.
  148. * @li x1_shape: A 1D Tensor of type int64. The requested new dense shape.
  149. * @li x2: A dense Tensor of the same type as "x1_values".
  150. *@par Outputs:
  151. *y: A Tensor. Has the same type as "x1_values".
  152. */
  153. REG_OP(SparseDenseCwiseMul)
  154. .INPUT(x1_indices, TensorType({DT_INT64}))
  155. .INPUT(x1_values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  156. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, \
  157. DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  158. .INPUT(x1_shape, TensorType({DT_INT64}))
  159. .INPUT(x2, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  160. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  161. DT_COMPLEX64, DT_COMPLEX128}))
  162. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  163. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  164. DT_COMPLEX64, DT_COMPLEX128}))
  165. .OP_END_FACTORY_REG(SparseDenseCwiseMul)
  166. /**
  167. *@brief Adds a SparseTensor to a SparseTensorsMap.
  168. *@par Inputs:
  169. * The input tensor must be a SparseTensor.
  170. * @li x1_indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  171. * @li x1_values: The values of the SparseTensor. A vector Tensor. 1D.
  172. * @li x1_shape: A 1D Tensor of type int64. The requested new dense shape.
  173. *@par Attributes:
  174. *@li container: An optional string. Defaults to " ".
  175. *@li shared_name: An optional string. Defaults to " ".
  176. *@par Outputs:
  177. *handle: A Tensor of type int64.
  178. */
  179. REG_OP(AddSparseToTensorsMap)
  180. .INPUT(indices, TensorType({DT_INT64}))
  181. .INPUT(values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  182. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE \
  183. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  184. .INPUT(shape, TensorType({DT_INT64}))
  185. .OUTPUT(handle, TensorType({DT_INT64}))
  186. .ATTR(container, String, "")
  187. .ATTR(shared_name, String, "")
  188. .OP_END_FACTORY_REG(AddSparseToTensorsMap)
  189. /**
  190. *@brief The gradient operator for the SparseSlice op.
  191. *@par Inputs:
  192. * @li backprop_val_grad: A Tensor.
  193. * @li indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  194. * @li start: A 1D Tensor of type int64. The start of the slice.
  195. * @li new_indices: A matrix Tensor of type int64. 2D. The indices of the sliced SparseTensor.
  196. *@par Outputs:
  197. *y_grad: A Tensor of type int64.
  198. */
  199. REG_OP(SparseSliceGrad)
  200. .INPUT(backprop_val_grad, TensorType({ DT_INT8, DT_UINT8, DT_INT16,
  201. DT_UINT16, DT_INT32, DT_INT64, DT_FLOAT, DT_FLOAT16, DT_DOUBLE,
  202. DT_COMPLEX64, DT_COMPLEX128}))
  203. .INPUT(indices, TensorType({DT_INT64}))
  204. .INPUT(start, TensorType({DT_INT64}))
  205. .INPUT(new_indices, TensorType({DT_INT64}))
  206. .OUTPUT(y_grad, TensorType({ DT_INT8, DT_UINT8, DT_INT16,
  207. DT_UINT16, DT_INT32, DT_INT64, DT_FLOAT, DT_FLOAT16, DT_DOUBLE,
  208. DT_COMPLEX64, DT_COMPLEX128 }))
  209. .OP_END_FACTORY_REG(SparseSliceGrad)
  210. /**
  211. *@brief Slices a SparseTensor based on the "start" and "size".
  212. *@par Inputs:
  213. * @li indices: A 2D Tensor of type int64. The indices of the SparseTensor.
  214. * @li values: A 1D Tensor. The values of the SparseTensor.
  215. * @li shape: A 2D Tensor of type int64. The shape of the SparseTensor.
  216. * @li start: A 1D Tensor of type int64. The start of the slice.
  217. * @li size: A 1D Tensor of type int64. The size of the slice.
  218. *@par Outputs:
  219. *y_indices: A Tensor of type int64.
  220. *y_values: A Tensor. Has the same type as "values".
  221. *y_values: A Tensor of type int64.
  222. */
  223. REG_OP(SparseSlice)
  224. .INPUT(indices, TensorType({DT_INT64}))
  225. .INPUT(values, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, \
  226. DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, \
  227. DT_COMPLEX128, DT_BOOL, DT_STRING, DT_RESOURCE}))
  228. .INPUT(shape, TensorType({DT_INT64}))
  229. .INPUT(start, TensorType({DT_INT64}))
  230. .INPUT(size, TensorType({DT_INT64}))
  231. .OUTPUT(y_indices, TensorType({DT_INT64}))
  232. .OUTPUT(y_values, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, \
  233. DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, \
  234. DT_COMPLEX128, DT_BOOL, DT_STRING, DT_RESOURCE}))
  235. .OUTPUT(y_shape, TensorType({DT_INT64}))
  236. .OP_END_FACTORY_REG(SparseSlice)
  237. /**
  238. *@brief The gradient operator for the SparseAdd op.
  239. *@par Inputs:
  240. * @li backprop_val_grad: A 1D Tensor with shape [nnz(sum)]. The gradient with respect to the non-empty values of the sum.
  241. * @li x1_indices: A 2D Tensor of type int64. The indices of the SparseTensor A, with size [nnz(A), ndims].
  242. * @li x2_indices: A 2D Tensor of type int64. The indices of the SparseTensor B, with size [nnz(B), ndims].
  243. * @li sum_indices: A 2D Tensor of type int64. The indices of the sum SparseTensor, with size [nnz(sum), ndims].
  244. *@par Outputs:
  245. *x1_val_grad: A Tensor. Has the same type as "backprop_val_grad".
  246. *x2_val_grad: A Tensor. Has the same type as "backprop_val_grad".
  247. */
  248. REG_OP(SparseAddGrad)
  249. .INPUT(backprop_val_grad, TensorType({DT_INT8, DT_INT16, DT_INT32,
  250. DT_INT64, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  251. .INPUT(x1_indices, TensorType({DT_INT64}))
  252. .INPUT(x2_indices, TensorType({DT_INT64}))
  253. .INPUT(sum_indices, TensorType({DT_INT64}))
  254. .OUTPUT(x1_val_grad, TensorType({DT_INT8, DT_INT16, DT_INT32,
  255. DT_INT64, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  256. .OUTPUT(x2_val_grad, TensorType({DT_INT8, DT_INT16, DT_INT32,
  257. DT_INT64, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  258. .OP_END_FACTORY_REG(SparseAddGrad)
  259. /**
  260. *@brief The gradient of SparseFillEmptyRows.
  261. *@par Inputs:
  262. * @li reverse_index_map: A 1D Tensor of type int64. The reverse index map from SparseFillEmptyRows.
  263. * @li grad_values: A 1D Tensor. The gradients from backprop.
  264. *@par Outputs:
  265. *@li y_value: A Tensor. Has the same type as "grad_values".
  266. *@li y_default_value: A Tensor. Has the same type as "grad_values".
  267. */
  268. REG_OP(SparseFillEmptyRowsGrad)
  269. .INPUT(reverse_index_map, TensorType({DT_INT64}))
  270. .INPUT(grad_values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  271. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  272. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  273. .OUTPUT(y_value, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  274. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  275. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  276. .OUTPUT(y_default_value, TensorType({DT_INT8, DT_UINT8, DT_INT16, \
  277. DT_UINT16, DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  278. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  279. .OP_END_FACTORY_REG(SparseFillEmptyRowsGrad)
  280. /**
  281. *@brief Multiplies SparseTensor A (of rank 2) by dense matrix B.
  282. *@par Inputs:
  283. * @li x1_indices: A 2D Tensor of type int32 or int64.
  284. * @li The indices of the matrix "SparseTensor", with size [nnz, 2].
  285. * @li x1_values: A 1D Tensor. The values of the SparseTensor, with size [nnz].
  286. * @li x1_shape: A 1D Tensor of type int64. The shape of the SparseTensor, with size [2].
  287. * @li x2: A dense matrix Tensor of the same type as "x1_values". 2D.
  288. *@par Outputs:
  289. *y: A "Tensor". Has the same type as "x1_values".
  290. *@par Attributes:
  291. *@li adjoint_a: An optional bool. Defaults to "False".Use the adjoint of A in the matrix multiply.
  292. *@li If A is complex, this is transpose(conj(A)). Otherwise it is transpose(A).
  293. *@li adjoint_b: An optional bool. Defaults to "False".Use the adjoint of B in the matrix multiply.
  294. *@li If B is complex, this is transpose(conj(B)). Otherwise it is transpose(B).
  295. */
  296. REG_OP(SparseTensorDenseMatMul)
  297. .INPUT(x1_indices, TensorType({DT_INT32, DT_INT64}))
  298. .INPUT(x1_values, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, \
  299. DT_COMPLEXT64, DT_COMPLEX128, DT_FLOAT16}))
  300. .INPUT(x1_shape, TensorType({DT_INT64}))
  301. .INPUT(x2, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_COMPLEXT64, \
  302. DT_COMPLEX128, DT_FLOAT16}))
  303. .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_COMPLEXT64, \
  304. DT_COMPLEX128, DT_FLOAT16}))
  305. .ATTR(adjoint_a, Bool, false)
  306. .ATTR(adjoint_b, Bool, false)
  307. .OP_END_FACTORY_REG(SparseTensorDenseMatMul)
  308. /**
  309. *@brief Converts a sparse representation into a dense tensor.
  310. *@par Inputs:
  311. * @li indices: A 0D, 1D, or 2D Tensor of type int32 or int64.
  312. * @li output_shape: A 1D Tensor of the same type as "sparse_indices". The shape of the dense output tensor.
  313. * @li values: A 1D Tensor. Values corresponding to each row of "sparse_indices",
  314. * @li or a scalar value to be used for all sparse indices.
  315. * @li default_value: A Tensor of the same type as "sparse_values".
  316. *@par Outputs:
  317. *y: A Tensor. Has the same type as "values".
  318. */
  319. REG_OP(SparseToDense)
  320. .INPUT(indices, TensorType({DT_INT32, DT_INT64}))
  321. .INPUT(output_shape, TensorType({DT_INT32, DT_INT64}))
  322. .INPUT(values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  323. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_BOOL, DT_DOUBLE}))
  324. .INPUT(default_value, TensorType({DT_INT8, DT_UINT8, DT_INT16, \
  325. DT_UINT16, DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_BOOL, \
  326. DT_DOUBLE}))
  327. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  328. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_BOOL, DT_DOUBLE}))
  329. .ATTR(validate_indices, Bool, true)
  330. .OP_END_FACTORY_REG(SparseToDense)
  331. /**
  332. *@brief Concatenates a list of `SparseTensor` along the specified dimension.\n
  333. *Concatenation is with respect to the dense versions of these sparse tensors.
  334. *@par Inputs:
  335. *3 or 5 inputs,contains:
  336. * @li indices:A list of at least 2 `Tensor` objects with type `int64`.2-D. \n
  337. *Indices of each input `SparseTensor`.
  338. * @li values:A list with the same length as `indices` of `Tensor` objects with the same type.
  339. * @li shapes:A list with the same length as `indices` of `Tensor` objects with type `int64`.1-D. \n
  340. * Shapes of each `SparseTensor`.
  341. *@par Attributes:
  342. *@li concat_dim: An `int` Dimension to concatenate along
  343. *@li N:Number of sparse
  344. *@par Outputs:
  345. * @li y_indices:A `Tensor` of type `int64`.
  346. * @li y_values:A `Tensor`. Has the same type as `values`.
  347. * @li y_shape:A `Tensor` of type `int64`.
  348. * Compatible SparseConcat operator in Tensorflow
  349. */
  350. REG_OP(SparseConcat)
  351. .DYNAMIC_INPUT(indices, TensorType({DT_INT64}))
  352. .DYNAMIC_INPUT(values,
  353. TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  354. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  355. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  356. .DYNAMIC_INPUT(shapes, TensorType({DT_INT64}))
  357. .OUTPUT(y_indices, TensorType({DT_INT64}))
  358. .OUTPUT(y_values,
  359. TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  360. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  361. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  362. .OUTPUT(y_shape, TensorType({DT_INT64}))
  363. .ATTR(concat_dim, Int, 0)
  364. .ATTR(N, Int, 1)
  365. .OP_END_FACTORY_REG(SparseConcat)
  366. /**
  367. *@brief Adds two `SparseTensor` objects to produce another `SparseTensor`.
  368. *@par Inputs:
  369. *7 inputs, contains:
  370. * @li x1_indices:A `Tensor` of type `int64`.2-D. \n
  371. * The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.
  372. * @li x1_values:A `Tensor`. Must be one of the following types:float,int8,int16,int32,int64, float64.
  373. * @li x1_shape:A `Tensor` of type `int64`.1-D. The `shape` of the first `SparseTensor`, \n
  374. * size `[ndims]` Vector.
  375. * @li x2_indices:A `Tensor` of type `int64`.2-D.The `indices` of the second `SparseTensor`, \n
  376. * size `[nnz, ndims]` Matrix.
  377. * @li x2_values:A `Tensor`. Must have the same type as `a_values`.1-D. \n
  378. * The `values` of the second `SparseTensor`, size `[nnz]` Vector.
  379. * @li x2_shape:A `Tensor` of type `int64`.1-D. \n
  380. * The `shape` of the second `SparseTensor`, size `[ndims]` Vector.
  381. * @li thresh:A `Tensor` 0-D.The magnitude threshold that determines if an output value/index pair takes space.
  382. *@par Outputs:
  383. * @li sum_indices:A `Tensor` of type `int64`.
  384. * @li sum_values:A `Tensor`. Has the same type as `x1_values`.
  385. * @li sum_shape:A `Tensor` of type `int64`.
  386. * Compatible SparseAdd operator in Tensorflow
  387. */
  388. REG_OP(SparseAdd)
  389. .INPUT(x1_indices, TensorType({DT_INT64}))
  390. .INPUT(x1_values, TensorType({DT_FLOAT, DT_INT8, DT_INT16, \
  391. DT_INT32, DT_INT64, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  392. .INPUT(x1_shape, TensorType({DT_INT64}))
  393. .INPUT(x2_indices, TensorType({DT_INT64}))
  394. .INPUT(x2_values, TensorType({DT_FLOAT, DT_INT8, DT_INT16, DT_INT32, \
  395. DT_INT64, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  396. .INPUT(x2_shape, TensorType({DT_INT64}))
  397. .INPUT(thresh, TensorType({DT_FLOAT, DT_INT8, DT_INT16, DT_INT32, \
  398. DT_INT64, DT_DOUBLE}))
  399. .OUTPUT(sum_indices, TensorType({DT_INT64}))
  400. .OUTPUT(sum_values, TensorType({DT_FLOAT, DT_INT8, DT_INT16, \
  401. DT_INT32, DT_INT64, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  402. .OUTPUT(sum_shape, TensorType({DT_INT64}))
  403. .OP_END_FACTORY_REG(SparseAdd)
  404. /**
  405. *@brief Fills empty rows in the input 2-D `SparseTensor` with a default value.
  406. *@par Inputs:
  407. *4 inputs,contains:
  408. * @li indices: A `Tensor` of type `int64`.2-D. the indices of the sparse tensor.
  409. * @li values: A `Tensor`. 1-D. the values of the sparse tensor.
  410. * @li dense_shape: A `Tensor` of type `int64`.1-D. the shape of the sparse tensor.
  411. * @li default_value: `Tensor`. Must have the same type as `values`.\n
  412. *0-D. default value to insert into location `[row, 0, ..., 0]` \n
  413. *for rows missing from the input sparse tensor.
  414. *@par Outputs:
  415. * @li y_indices:A `Tensor` of type `int64`.
  416. * @li y_values:A `Tensor`. Has the same type as `values`.
  417. * @li empty_row_indicator:A `Tensor` of type `bool`.
  418. * @li reverse_index_map:A `Tensor` of type `int64`.
  419. * Compatible SparseFillEmptyRows operator in Tensorflow
  420. */
  421. REG_OP(SparseFillEmptyRows)
  422. .INPUT(indices, TensorType({DT_INT64}))
  423. .INPUT(values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  424. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  425. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  426. .INPUT(dense_shape, TensorType({DT_INT64}))
  427. .INPUT(default_value, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, \
  428. DT_INT16, DT_UINT16, DT_UINT8, \
  429. DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  430. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  431. .OUTPUT(y_indices, TensorType({DT_INT64}))
  432. .OUTPUT(y_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, \
  433. DT_INT16, DT_UINT16, DT_UINT8, \
  434. DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  435. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  436. .OUTPUT(empty_row_indicator, TensorType({DT_BOOL}))
  437. .OUTPUT(reverse_index_map, TensorType({DT_INT64}))
  438. .OP_END_FACTORY_REG(SparseFillEmptyRows)
  439. /**
  440. *@brief Returns the element-wise max of two SparseTensors.
  441. *@par Inputs:
  442. *6 inputs,contains:
  443. * @li x1_indices:A `Tensor` of type `int64`.2-D. \n
  444. *`N x R` matrix with the indices of non-empty values in a SparseTensor, \n
  445. * in the canonical lexicographic ordering.
  446. * @li x1_values:A `Tensor`. 1-D. the values of the sparse tensor.
  447. * @li x1_shape:A `Tensor` of type `int64`.1-D. the shape of the sparse tensor.
  448. * @li x2_indices:A `Tensor` of type `int64`.2-D. the indices of the sparse tensor.
  449. * @li x2_values:A `Tensor`. 1-D. Must have the same type as `x1_values`.
  450. * @li x2_shape:A `Tensor` of type `int64`.1-D. \n
  451. *counterpart to `a_shape` for the other operand; the two shapes must be equal.
  452. *@par Outputs:
  453. * @li y_indices:A `Tensor` of type `int64`.
  454. * @li y_values:A `Tensor`. Has the same type as `x1_values`.
  455. * Compatible SparseSparseMaximum operator in Tensorflow
  456. */
  457. REG_OP(SparseSparseMaximum)
  458. .INPUT(x1_indices, TensorType({DT_INT64}))
  459. .INPUT(x1_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  460. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  461. .INPUT(x1_shape, TensorType({DT_INT64}))
  462. .INPUT(x2_indices, TensorType({DT_INT64}))
  463. .INPUT(x2_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  464. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  465. .INPUT(x2_shape, TensorType({DT_INT64}))
  466. .OUTPUT(y_indices, TensorType({DT_INT64}))
  467. .OUTPUT(y_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  468. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  469. .OP_END_FACTORY_REG(SparseSparseMaximum)
  470. /**
  471. *@brief Returns the element-wise min of two SparseTensors.
  472. *@par Inputs:
  473. *6 inputs,contains:
  474. * @li x1_indices:A `Tensor` of type `int64`.2-D. \n
  475. *`N x R` matrix with the indices of non-empty values in a SparseTensor, \n
  476. * in the canonical lexicographic ordering.
  477. * @li x1_values:A `Tensor`. 1-D. the values of the sparse tensor.
  478. * @li x1_shape:A `Tensor` of type `int64`.1-D. the shape of the sparse tensor.
  479. * @li x2_indices:A `Tensor` of type `int64`.2-D. the indices of the sparse tensor.
  480. * @li x2_values:A `Tensor`. 1-D. Must have the same type as `x1_values`.
  481. * @li x2_shape:A `Tensor` of type `int64`.1-D. \n
  482. *counterpart to `a_shape` for the other operand; the two shapes must be equal.
  483. *@par Outputs:
  484. * @li y_indices:A `Tensor` of type `int64`.
  485. * @li y_values:A `Tensor`. Has the same type as `x1_values`.
  486. * Compatible SparseSparseMinimum operator in Tensorflow
  487. */
  488. REG_OP(SparseSparseMinimum)
  489. .INPUT(x1_indices, TensorType({DT_INT64}))
  490. .INPUT(x1_values, TensorType({DT_INT64, DT_INT32, \
  491. DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, DT_FLOAT16, \
  492. DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  493. .INPUT(x1_shape, TensorType({DT_INT64}))
  494. .INPUT(x2_indices, TensorType({DT_INT64}))
  495. .INPUT(x2_values, TensorType({DT_INT64, DT_INT32, \
  496. DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, DT_FLOAT16, \
  497. DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  498. .INPUT(x2_shape, TensorType({DT_INT64}))
  499. .OUTPUT(y_indices, TensorType({DT_INT64}))
  500. .OUTPUT(y_values, TensorType({DT_INT64, DT_INT32, \
  501. DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, DT_FLOAT16, \
  502. DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  503. .OP_END_FACTORY_REG(SparseSparseMinimum)
  504. /**
  505. *@brief Computes the max of elements across dimensions of a SparseTensor.
  506. *@par Inputs:
  507. *4 or 5 inputs,contains:
  508. * @li x_indices:A `Tensor` of type `int64`.2-D. \n
  509. *`N x R` matrix with the indices of non-empty values in a \n
  510. *SparseTensor, possibly not in canonical ordering.
  511. * @li x_values:A `Tensor`. 1-D. the values of the sparse tensor. \n
  512. *`N` non-empty values corresponding to `input_indices`.
  513. * @li x_shape:A `Tensor` of type `int64`.1-D. Shape of the input SparseTensor.
  514. * @li reduction_axes:A `Tensor` of type `int32`.1-D.\n
  515. *Length-`K` vector containing the reduction axes.
  516. *@par Attributes:
  517. * keep_dims:An optional `bool`. Defaults to `False`.\n
  518. *If true, retain reduced dimensions with length 1.
  519. *@par Outputs:
  520. * y:A `Tensor`. Has the same type as `input_values`.
  521. * Compatible SparseReduceMax operator in Tensorflow
  522. */
  523. REG_OP(SparseReduceMax)
  524. .INPUT(x_indices, TensorType({DT_INT64}))
  525. .INPUT(x_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  526. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  527. .INPUT(x_shape, TensorType({DT_INT64}))
  528. .INPUT(reduction_axes, TensorType({DT_INT32}))
  529. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16,
  530. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  531. .ATTR(keep_dims, Bool, false)
  532. .OP_END_FACTORY_REG(SparseReduceMax)
  533. /**
  534. *@brief Computes the max of elements across dimensions of a SparseTensor.
  535. *@par Inputs:
  536. *4 or 5 inputs,contains:
  537. * @li x_indices:A `Tensor` of type `int64`.2-D. \n
  538. *`N x R` matrix with the indices of non-empty values in a \n
  539. *SparseTensor, possibly not in canonical ordering.
  540. * @li x_values:A `Tensor`. 1-D. the values of the sparse tensor. \n
  541. *`N` non-empty values corresponding to `input_indices`.
  542. * @li x_shape:A `Tensor` of type `int64`.1-D. Shape of the input SparseTensor.
  543. * @li reduction_axes:A `Tensor` of type `int32`.1-D.\n
  544. *Length-`K` vector containing the reduction axes.
  545. *@par Attributes:
  546. * keep_dims:An optional `bool`. Defaults to `False`.\n
  547. *If true, retain reduced dimensions with length 1.
  548. *@par Outputs:
  549. * @li y_indices:A `Tensor` of type `int64`.
  550. * @li y_values:A `Tensor`. Has the same type as `input_values`.
  551. * @li y_shape:A `Tensor` of type `int64`.
  552. * Compatible SparseReduceMaxSparse operator in Tensorflow
  553. */
  554. REG_OP(SparseReduceMaxSparse)
  555. .INPUT(x_indices, TensorType({DT_INT64}))
  556. .INPUT(x_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  557. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  558. .INPUT(x_shape, TensorType({DT_INT64}))
  559. .INPUT(reduction_axes, TensorType({DT_INT32}))
  560. .OUTPUT(y_indices, TensorType({DT_INT64}))
  561. .OUTPUT(y_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  562. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  563. .OUTPUT(y_shape, TensorType({DT_INT64}))
  564. .ATTR(keep_dims, Bool, false)
  565. .OP_END_FACTORY_REG(SparseReduceMaxSparse)
  566. /**
  567. *@brief Computes the sum of elements across dimensions of a SparseTensor.
  568. *@par Inputs:
  569. *4 or 5 inputs, including:
  570. * @li x_indices: A 2D Tensor of type int64.
  571. *"N x R" matrix with the indices of non-empty values in a \n
  572. *SparseTensor, possibly not in canonical ordering.
  573. * @li x_values: A 1D Tensor. The values of the SparseTensor.
  574. *"N" non-empty values corresponding to "input_indices".
  575. * @li x_shape: A 1D Tensor of type int64. Shape of the input SparseTensor.
  576. * @li reduction_axes: A 1D Tensor of type int32. \n
  577. *A length-"K" vector containing the reduction axes.
  578. *@par Attributes:
  579. * keep_dims: An optional bool. Defaults to "False". \n
  580. *If true, retains reduced dimensions with length 1.
  581. *@par Outputs:
  582. * @li y_indices: A Tensor of type int64.
  583. * @li y_values: A Tensor. Has the same type as "input_values".
  584. * @li y_shape: A Tensor of type int64.
  585. */
  586. REG_OP(SparseReduceSum)
  587. .INPUT(x_indices, TensorType({DT_INT64}))
  588. .INPUT(x_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  589. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE, \
  590. DT_COMPLEX64, DT_COMPLEX128}))
  591. .INPUT(x_shape, TensorType({DT_INT64}))
  592. .INPUT(reduction_axes, TensorType({DT_INT32}))
  593. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16,
  594. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE, \
  595. DT_COMPLEX64, DT_COMPLEX128}))
  596. .ATTR(keep_dims, Bool, false)
  597. .OP_END_FACTORY_REG(SparseReduceSum)
  598. /**
  599. *@brief Computes the sum of elements across dimensions of a SparseTensor.
  600. *@par Inputs:
  601. *4 or 5 inputs, including:
  602. * @li x_indices: A 2D Tensor of type int64.
  603. *"N x R" matrix with the indices of non-empty values in a \n
  604. *SparseTensor, possibly not in canonical ordering.
  605. * @li x_values: A 1D Tensor. The values of the SparseTensor.
  606. *"N" non-empty values corresponding to "input_indices".
  607. * @li x_shape: A 1D Tensor of type int64. Shape of the input SparseTensor.
  608. * @li reduction_axes: A 1D Tensor of type int32. \n
  609. * A length-"K" vector containing the reduction axes.
  610. *@par Attributes:
  611. * keep_dims: An optional bool. Defaults to "False".\n
  612. *If true, retains reduced dimensions with length 1.
  613. *@par Outputs:
  614. * @li y_indices: A Tensor of type int64.
  615. * @li y_values: A Tensor. Has the same type as "input_values".
  616. * @li y_shape: A Tensor of type int64.
  617. */
  618. REG_OP(SparseReduceSumSparse)
  619. .INPUT(x_indices, TensorType({DT_INT64}))
  620. .INPUT(x_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  621. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE, \
  622. DT_COMPLEX64, DT_COMPLEX128}))
  623. .INPUT(x_shape, TensorType({DT_INT64}))
  624. .INPUT(reduction_axes, TensorType({DT_INT32}))
  625. .OUTPUT(y_indices, TensorType({DT_INT64}))
  626. .OUTPUT(y_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  627. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE, \
  628. DT_COMPLEX64, DT_COMPLEX128}))
  629. .OUTPUT(y_shape, TensorType({DT_INT64}))
  630. .ATTR(keep_dims, Bool, false)
  631. .OP_END_FACTORY_REG(SparseReduceSumSparse)
  632. /**
  633. *@brief Splits a SparseTensor into "num_split" tensors along one dimension.
  634. *@par Inputs:
  635. *4 or 5 inputs, including:
  636. * @li split_dim: A 0D Tensor of type int64.\n
  637. *The dimension along which to split. Must be in the range "[0, rank(shape))".
  638. * @li indices: A 2D Tensor of type int64.\n
  639. * The indices of the SparseTensor.
  640. * @li values: A 1D Tensor. The values of the SparseTensor.
  641. * @li shape: A 1D Tensor of type int64. Shape of the SparseTensor.
  642. *@par Attributes:
  643. * num_split: An int that is >= 1. The number of ways to split.
  644. *@par Outputs:
  645. * @li y_indices: A list of "num_split" Tensor objects of type int64.
  646. * @li y_values: A list of "num_split" Tensor objects with the same type as "values".
  647. * @li y_shape: A list of "num_split" Tensor objects of type int64.
  648. */
  649. REG_OP(SparseSplit)
  650. .INPUT(split_dim, TensorType({DT_INT64}))
  651. .INPUT(indices, TensorType({DT_INT64}))
  652. .INPUT(values, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, \
  653. DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, \
  654. DT_COMPLEX128, DT_BOOL, DT_STRING, DT_RESOURCE}))
  655. .INPUT(shape, TensorType({DT_INT64}))
  656. .DYNAMIC_OUTPUT(y_indices, TensorType({DT_INT64}))
  657. .DYNAMIC_OUTPUT(y_values, TensorType({DT_INT64, DT_INT32, DT_UINT16, \
  658. DT_INT16, DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  659. DT_COMPLEX64, DT_COMPLEX128, DT_BOOL, DT_STRING, DT_RESOURCE}))
  660. .DYNAMIC_OUTPUT(y_shape, TensorType({DT_INT64}))
  661. .ATTR(num_split, Int, 1)
  662. .OP_END_FACTORY_REG(SparseSplit)
  663. /**
  664. *@brief Generates sparse cross from a list of sparse and dense tensors.
  665. *@par Inputs:
  666. *8 or 10 inputs, including:
  667. * @li indices: A list of 2D Tensor objects of type int64.
  668. * Indices of each input SparseTensor.
  669. * @li values: A list of 1D Tensor objects of type int64 or string.
  670. * Values of each SparseTensor.
  671. * @li shapes: A list with the same length as "indices" of 1D Tensor objects of type int64.
  672. * Shapes of each SparseTensor.
  673. * @li dense_inputs: A list of 2D Tensor objects of type int64 or string.
  674. * Columns represented by dense Tensor.
  675. *@par Attributes:
  676. * @li N: number of sparse.
  677. * @li hashed_output: A bool. If true, returns the hash of the cross instead of the string.
  678. * @li num_buckets: An int that is >= 0. It is used if "hashed_output" is true. \n
  679. *output = hashed_value%num_buckets if num_buckets > 0 else "hashed_value".
  680. * @li hash_key: An int. Specify the hash_key that will be used by the "FingerprintCat64"\n
  681. *function to combine the crosses fingerprints.
  682. * @li out_type: An int64 or string.
  683. * @li internal_type: An int64 or string.
  684. *@par Outputs:
  685. * @li output_indices: A Tensor of type int64.
  686. * @li output_values: A Tensor of type "out_type".
  687. * @li output_shape: A Tensor of type int64.
  688. */
  689. REG_OP(SparseCross)
  690. .DYNAMIC_INPUT(indices, TensorType({DT_INT64}))
  691. .DYNAMIC_INPUT(values, TensorType({DT_INT64, DT_STRING}))
  692. .DYNAMIC_INPUT(shapes, TensorType({DT_INT64}))
  693. .DYNAMIC_INPUT(dense_inputs, TensorType({DT_INT64, DT_STRING}))
  694. .OUTPUT(output_indices, TensorType({DT_INT64}))
  695. .OUTPUT(output_values, TensorType({DT_INT64, DT_STRING}))
  696. .OUTPUT(output_shape, TensorType({DT_INT64}))
  697. .ATTR(N, Int, 0)
  698. .REQUIRED_ATTR(hashed_output, Bool)
  699. .ATTR(num_buckets, Int, 0)
  700. .REQUIRED_ATTR(hash_key, Int)
  701. .REQUIRED_ATTR(out_type, Type)
  702. .REQUIRED_ATTR(internal_type, Type)
  703. .OP_END_FACTORY_REG(SparseCross)
  704. /**
  705. *@brief Generates sparse cross from a list of sparse and dense tensors.
  706. *@par Inputs:
  707. *3 or 5 inputs, including:
  708. * @li indices: A 2D Tensor of type int64. \n
  709. * The "indices" of the minibatch SparseTensor.
  710. * @li values: A 1D Tensor. The "values" of the minibatch SparseTensor.
  711. * @li shape: A 1D Tensor of type int64. The "shape" of the minibatch SparseTensor.
  712. *@par Attributes:
  713. * @li container: An optional string. Defaults to "". \n
  714. *The container name for the "SparseTensorsMap" created by this op.
  715. * @li shared_name: An optional string. Defaults to "". \n
  716. *The shared name for the "SparseTensorsMap" created by this op.
  717. *@par Outputs:
  718. * handles: A Tensor of type int64.
  719. */
  720. REG_OP(AddManySparseToTensorsMap)
  721. .INPUT(indices, TensorType({DT_INT64}))
  722. .INPUT(values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  723. DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  724. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  725. .INPUT(shape, TensorType({DT_INT64}))
  726. .OUTPUT(handles, TensorType({DT_INT64}))
  727. .ATTR(container, String, "")
  728. .ATTR(shared_name, String, "")
  729. .OP_END_FACTORY_REG(AddManySparseToTensorsMap)
  730. /**
  731. *@brief Reads SparseTensors from a "SparseTensorsMap" and concatenate them.
  732. *@par Inputs:
  733. *2 or 4 inputs, including:
  734. * handles: A 1D Tensor of type int64. \n
  735. * The "N" serialized SparseTensor objects.
  736. *@par Attributes:
  737. * @li dtype: A tf.DType. The "dtype" of the SparseTensor objects stored in the "SparseTensorsMap".
  738. * @li container: An optional string. Defaults to "". \n
  739. *The container name for the "SparseTensorsMap" read by this op.
  740. * @li shared_name: An optional string. Defaults to "". \n
  741. *The shared name for the "SparseTensorsMap" read by this op.
  742. *@par Outputs:
  743. * @li indices: A Tensor of type int64.
  744. * @li values: A Tensor of type "dtype".
  745. * @li shape: A Tensor of type int64.
  746. */
  747. REG_OP(TakeManySparseFromTensorsMap)
  748. .INPUT(handles, TensorType({DT_INT64}))
  749. .OUTPUT(indices, TensorType({DT_INT64}))
  750. .OUTPUT(values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, \
  751. DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16}))
  752. .OUTPUT(shape, TensorType({DT_INT64}))
  753. .REQUIRED_ATTR(dtype, Type)
  754. .ATTR(container, String, "")
  755. .ATTR(shared_name, String, "")
  756. .OP_END_FACTORY_REG(TakeManySparseFromTensorsMap)
  757. /**
  758. *@brief Serializes a SparseTensor into a [3] Tensor object.
  759. *@par Inputs:
  760. *3 or 4 inputs, including:
  761. * @li indices: A 2D Tensor of type int64. The indices of the SparseTensor.
  762. * @li values: A 1D Tensor. The values of the SparseTensor.
  763. * @li shape: A 1D Tensor of type int64. The shape of the SparseTensor.
  764. *@par Attributes:
  765. * out_type: An optional type. Defaults to "string".
  766. *@par Outputs:
  767. * serialized_sparse: A Tensor of type "out_type".
  768. */
  769. REG_OP(SerializeSparse)
  770. .INPUT(indices, TensorType({DT_INT64}))
  771. .INPUT(values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, \
  772. DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, \
  773. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  774. .INPUT(shape, TensorType({DT_INT64}))
  775. .OUTPUT(serialized_sparse, TensorType({DT_STRING}))
  776. .ATTR(out_type, Type, DT_STRING)
  777. .OP_END_FACTORY_REG(SerializeSparse)
  778. /**
  779. *@brief Serializes an "N"-minibatch SparseTensor into an [N, 3] Tensor object.
  780. *@par Inputs:
  781. *3 or 4 inputs, including:
  782. * @li indices: A 2D Tensor of type int64. The "indices" of the minibatch SparseTensor.
  783. * @li values: A 1D Tensor. The "values" of the minibatch SparseTensor.
  784. * @li shape: A 1D Tensor of type int64. The "shape" of the minibatch SparseTensor.
  785. *@par Attributes:
  786. * out_type: An optional type. Defaults to "string".
  787. *@par Outputs:
  788. * serialized_sparse: A Tensor of type "out_type".
  789. */
  790. REG_OP(SerializeManySparse)
  791. .INPUT(indices, TensorType({DT_INT64}))
  792. .INPUT(values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, \
  793. DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, \
  794. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  795. .INPUT(shape, TensorType({DT_INT64}))
  796. .OUTPUT(serialized_sparse, TensorType({DT_STRING}))
  797. .ATTR(out_type, Type, DT_STRING)
  798. .OP_END_FACTORY_REG(SerializeManySparse)
  799. /**
  800. *@brief Deserializes SparseTensor objects.
  801. *@par Inputs:
  802. *Two inputs, including:
  803. * serialized_sparse: A Tensor. The serialized SparseTensor objects. \n
  804. *The last dimension must have 3 columns.
  805. *@par Attributes:
  806. * dtype: An optional type. The type of the serialized SparseTensor objects.
  807. *@par Outputs:
  808. * @li indices: A Tensor of type int64.
  809. * @li values: A Tensor of type "dtype".
  810. * @li shape: A Tensor of type int64.
  811. */
  812. REG_OP(DeserializeSparse)
  813. .INPUT(serialized_sparse, TensorType({DT_STRING}))
  814. .OUTPUT(indices, TensorType({DT_INT64}))
  815. .OUTPUT(values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, \
  816. DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, \
  817. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  818. .OUTPUT(shape, TensorType({DT_INT64}))
  819. .REQUIRED_ATTR(dtype, Type)
  820. .OP_END_FACTORY_REG(DeserializeSparse)
  821. /**
  822. *@brief Deserializes and concatenates SparseTensors from a serialized minibatch.
  823. *@par Inputs:
  824. *Two inputs, including:
  825. * serialized_sparse: A 2D Tensor of type string. \n
  826. *The "N" serialized SparseTensor objects. Must have 3 columns.
  827. *@par Attributes:
  828. * dtype: An optional type. The type of the serialized SparseTensor objects.
  829. *@par Outputs:
  830. * @li indices: A Tensor of type int64.
  831. * @li values: A Tensor of type "dtype".
  832. * @li shape: A Tensor of type int64.
  833. */
  834. REG_OP(DeserializeManySparse)
  835. .INPUT(serialized_sparse, TensorType({DT_STRING}))
  836. .OUTPUT(indices, TensorType({DT_INT64}))
  837. .OUTPUT(values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, \
  838. DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, \
  839. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  840. .OUTPUT(shape, TensorType({DT_INT64}))
  841. .REQUIRED_ATTR(dtype, Type)
  842. .OP_END_FACTORY_REG(DeserializeManySparse)
  843. } // namespace ge
  844. #endif // GE_OP_SPARSE_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示