You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

sparse_ops.h 44 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file sparse_ops.h
  18. * \brief
  19. */
  20. #ifndef GE_OP_SPARSE_OPS_H_
  21. #define GE_OP_SPARSE_OPS_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. *@brief Applies softmax to a batched ND SparseTensor.
  26. *@par Inputs:
  27. *The input must be a batched ND SparseTensor.
  28. * @li indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  29. * @li values: A vector Tensor of type float or double. 1D. The values of the SparseTensor.
  30. * @li shape: A vector Tensor of type int64. 1D. The shape of the SparseTensor.
  31. *@par Outputs:
  32. *y: A vector Tensor. 1D. Has the same type as "values".
  33. *@par Third-party framework compatibility
  34. *Compatible with the TensorFlow operator SparseSoftmax.
  35. */
  36. REG_OP(SparseSoftmax)
  37. .INPUT(indices, TensorType({DT_INT64}))
  38. .INPUT(values, TensorType({DT_FLOAT, DT_DOUBLE}))
  39. .INPUT(shape, TensorType({DT_INT64}))
  40. .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE}))
  41. .OP_END_FACTORY_REG(SparseSoftmax)
  42. /**
  43. *@brief Adds up a SparseTensor and a dense Tensor, producing a dense Tensor.
  44. *@par Inputs:
  45. *Inputs "x1_*" must be SparseTensors and "x2" must be a dense Tensor.
  46. * @li x1_indices: A matrix Tensor of type int32 or int64. 2D. The indices of the SparseTensor.
  47. * @li x1_values: The values of the SparseTensor. A vector Tensor. 1D.
  48. * @li x1_shape: A vector Tensor of type int32 or int64. 1D. The shape of the SparseTensor.
  49. * @li x2: A matrix Tensor. Has the same type and same shape as the SparseTensors.
  50. *@par Outputs:
  51. *y: A matrix Tensor. Has the same type and same shape as "x2".
  52. *@par Third-party framework compatibility
  53. * Compatible with the TensorFlow operator SparseTensorDenseAdd.
  54. */
  55. REG_OP(SparseTensorDenseAdd)
  56. .INPUT(x1_indices, TensorType({DT_INT32, DT_INT64}))
  57. .INPUT(x1_values, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, \
  58. DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  59. .INPUT(x1_shape, TensorType({DT_INT32, DT_INT64}))
  60. .INPUT(x2, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, \
  61. DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  62. .OUTPUT(y, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, \
  63. DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  64. .OP_END_FACTORY_REG(SparseTensorDenseAdd)
  65. /**
  66. *@brief Reorders a SparseTensor into the canonical, row-major ordering.
  67. *@par Inputs:
  68. * @li indices: A matrix Tensor of type int32 or int64. 2D. The indices of the SparseTensor.
  69. * @li values: Values of the SparseTensor. A vector Tensor. 1D.
  70. * @li shape: A vector Tensor of type int32 or int64. 1D. The shape of the SparseTensor.
  71. *@par Outputs:
  72. *@li y_indices: The indices of the SparseTensor. Has the same type as "indices".
  73. *@li y_values: The values of the SparseTensorr. Has the same type as "values".
  74. *@par Third-party framework compatibility
  75. * Compatible with the TensorFlow operator SparseReorder.
  76. */
  77. REG_OP(SparseReorder)
  78. .INPUT(indices, TensorType({DT_INT64}))
  79. .INPUT(values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  80. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  81. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  82. .INPUT(shape, TensorType({DT_INT64}))
  83. .OUTPUT(y_indices, TensorType({DT_INT64}))
  84. .OUTPUT(y_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  85. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  86. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  87. .OP_END_FACTORY_REG(SparseReorder)
  88. /**
  89. *@brief Reshapes a SparseTensor to represent values in a new dense shape.
  90. *@par Inputs:
  91. * @li indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  92. * @li shape: A vector Tensor of type int64. 1D. The shape of the SparseTensor.
  93. * @li new_shape: A 1D Tensor of type int64. The requested new dense shape.
  94. *@par Outputs:
  95. *@li y_indices: A Tensor of type int64. The indices of the new dense shape.
  96. *@li y_shape: A Tensor of type int64. The shape of the new dense shape.
  97. *@par Third-party framework compatibility
  98. * Compatible with the TensorFlow operator SparseReshape.
  99. */
  100. REG_OP(SparseReshape)
  101. .INPUT(indices, TensorType({DT_INT64}))
  102. .INPUT(shape, TensorType({DT_INT64}))
  103. .INPUT(new_shape, TensorType({DT_INT64}))
  104. .OUTPUT(y_indices, TensorType({DT_INT64}))
  105. .OUTPUT(y_shape, TensorType({DT_INT64}))
  106. .OP_END_FACTORY_REG(SparseReshape)
  107. /**
  108. *@brief Adds up a SparseTensor and a dense Tensor.
  109. *@par Inputs:
  110. *(1) Broadcasts the dense side to have the same shape as the sparse side, if eligible;\n
  111. *(2) Then, only the dense values pointed to by the indices of the SparseTensor participate in the cwise addition.
  112. * @li x1_indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  113. * @li x1_values: The values of the SparseTensor. A vector Tensor. 1D.
  114. * @li x1_shape: A 1D Tensor of type int64. The requested new dense shape.
  115. * @li x2: A dense Tensor of the same type as "x1_values".
  116. *@par Outputs:
  117. *y: A Tensor. Has the same type as "x1_values".
  118. *@par Third-party framework compatibility
  119. * Compatible with the TensorFlow operator SparseDenseCwiseAdd.
  120. */
  121. REG_OP(SparseDenseCwiseAdd)
  122. .INPUT(x1_indices, TensorType({DT_INT64}))
  123. .INPUT(x1_values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  124. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, \
  125. DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  126. .INPUT(x1_shape, TensorType({DT_INT64}))
  127. .INPUT(x2, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  128. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  129. DT_COMPLEX64, DT_COMPLEX128}))
  130. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  131. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  132. DT_COMPLEX64, DT_COMPLEX128}))
  133. .OP_END_FACTORY_REG(SparseDenseCwiseAdd)
  134. /**
  135. *@brief Divides a SparseTensor by a dense Tensor.
  136. *@par Inputs:
  137. * @li x1_indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  138. * @li x1_values: The values of the SparseTensor. A vector Tensor. 1D.
  139. * @li x1_shape: A 1D Tensor of type int64. The requested new dense shape.
  140. * @li x2: A dense Tensor of the same type as "x1_values".
  141. *@par Outputs:
  142. *y: A Tensor. Has the same type as "x1_values".
  143. *@par Third-party framework compatibility
  144. * Compatible with the TensorFlow operator SparseDenseCwiseDiv.
  145. */
  146. REG_OP(SparseDenseCwiseDiv)
  147. .INPUT(x1_indices, TensorType({DT_INT64}))
  148. .INPUT(x1_values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  149. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, \
  150. DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  151. .INPUT(x1_shape, TensorType({DT_INT64}))
  152. .INPUT(x2, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  153. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  154. DT_COMPLEX64, DT_COMPLEX128}))
  155. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  156. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  157. DT_COMPLEX64, DT_COMPLEX128}))
  158. .OP_END_FACTORY_REG(SparseDenseCwiseDiv)
  159. /**
  160. *@brief Multiplies a SparseTensor by a dense Tensor.
  161. *@par Inputs:
  162. * @li x1_indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  163. * @li x1_values: The values of the SparseTensor. A vector Tensor. 1D.
  164. * @li x1_shape: A 1D Tensor of type int64. The requested new dense shape.
  165. * @li x2: A dense Tensor of the same type as "x1_values".
  166. *@par Outputs:
  167. *y: A Tensor. Has the same type as "x1_values".
  168. *@par Third-party framework compatibility
  169. * Compatible with the TensorFlow operator SparseDenseCwiseMul.
  170. */
  171. REG_OP(SparseDenseCwiseMul)
  172. .INPUT(x1_indices, TensorType({DT_INT64}))
  173. .INPUT(x1_values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  174. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, \
  175. DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  176. .INPUT(x1_shape, TensorType({DT_INT64}))
  177. .INPUT(x2, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  178. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  179. DT_COMPLEX64, DT_COMPLEX128}))
  180. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  181. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  182. DT_COMPLEX64, DT_COMPLEX128}))
  183. .OP_END_FACTORY_REG(SparseDenseCwiseMul)
  184. /**
  185. *@brief Adds a SparseTensor to a SparseTensorsMap.
  186. *@par Inputs:
  187. * The input tensor must be a SparseTensor.
  188. * @li x1_indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  189. * @li x1_values: The values of the SparseTensor. A vector Tensor. 1D.
  190. * @li x1_shape: A 1D Tensor of type int64. The requested new dense shape.
  191. *@par Attributes:
  192. *@li container: An optional string. Defaults to " ".
  193. *@li shared_name: An optional string. Defaults to " ".
  194. *@par Outputs:
  195. *handle: A Tensor of type int64.
  196. *@par Third-party framework compatibility
  197. * Compatible with the TensorFlow operator AddSparseToTensorsMap.
  198. */
  199. REG_OP(AddSparseToTensorsMap)
  200. .INPUT(indices, TensorType({DT_INT64}))
  201. .INPUT(values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  202. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  203. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  204. .INPUT(shape, TensorType({DT_INT64}))
  205. .OUTPUT(handle, TensorType({DT_INT64}))
  206. .ATTR(container, String, "")
  207. .ATTR(shared_name, String, "")
  208. .OP_END_FACTORY_REG(AddSparseToTensorsMap)
  209. /**
  210. *@brief The gradient operator for the SparseSlice op.
  211. *@par Inputs:
  212. * @li backprop_val_grad: A Tensor.
  213. * @li indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  214. * @li start: A 1D Tensor of type int64. The start of the slice.
  215. * @li new_indices: A matrix Tensor of type int64. 2D. The indices of the sliced SparseTensor.
  216. *@par Outputs:
  217. *y_grad: A Tensor of type int64.
  218. *@par Third-party framework compatibility
  219. * Compatible with the TensorFlow operator SparseSliceGrad.
  220. */
  221. REG_OP(SparseSliceGrad)
  222. .INPUT(backprop_val_grad, TensorType({ DT_INT8, DT_UINT8, DT_INT16,
  223. DT_UINT16, DT_INT32, DT_INT64, DT_FLOAT, DT_FLOAT16, DT_DOUBLE,
  224. DT_COMPLEX64, DT_COMPLEX128}))
  225. .INPUT(indices, TensorType({DT_INT64}))
  226. .INPUT(start, TensorType({DT_INT64}))
  227. .INPUT(new_indices, TensorType({DT_INT64}))
  228. .OUTPUT(y_grad, TensorType({ DT_INT8, DT_UINT8, DT_INT16,
  229. DT_UINT16, DT_INT32, DT_INT64, DT_FLOAT, DT_FLOAT16, DT_DOUBLE,
  230. DT_COMPLEX64, DT_COMPLEX128 }))
  231. .OP_END_FACTORY_REG(SparseSliceGrad)
  232. /**
  233. *@brief Slices a SparseTensor based on the "start" and "size".
  234. *@par Inputs:
  235. * @li indices: A 2D Tensor of type int64. The indices of the SparseTensor.
  236. * @li values: A 1D Tensor. The values of the SparseTensor.
  237. * @li shape: A 2D Tensor of type int64. The shape of the SparseTensor.
  238. * @li start: A 1D Tensor of type int64. The start of the slice.
  239. * @li size: A 1D Tensor of type int64. The size of the slice.
  240. *@par Outputs:
  241. *y_indices: A Tensor of type int64.
  242. *y_values: A Tensor. Has the same type as "values".
  243. *y_values: A Tensor of type int64.
  244. *@par Third-party framework compatibility
  245. * Compatible with the TensorFlow operator SparseSlice.
  246. */
  247. REG_OP(SparseSlice)
  248. .INPUT(indices, TensorType({DT_INT64}))
  249. .INPUT(values, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, \
  250. DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, \
  251. DT_COMPLEX128, DT_BOOL, DT_STRING, DT_RESOURCE}))
  252. .INPUT(shape, TensorType({DT_INT64}))
  253. .INPUT(start, TensorType({DT_INT64}))
  254. .INPUT(size, TensorType({DT_INT64}))
  255. .OUTPUT(y_indices, TensorType({DT_INT64}))
  256. .OUTPUT(y_values, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, \
  257. DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, \
  258. DT_COMPLEX128, DT_BOOL, DT_STRING, DT_RESOURCE}))
  259. .OUTPUT(y_shape, TensorType({DT_INT64}))
  260. .OP_END_FACTORY_REG(SparseSlice)
  261. /**
  262. *@brief The gradient operator for the SparseAdd op.
  263. *@par Inputs:
  264. * @li backprop_val_grad: A 1D Tensor with shape [nnz(sum)]. The gradient with respect to the non-empty values of the sum.
  265. * @li x1_indices: A 2D Tensor of type int64. The indices of the SparseTensor A, with size [nnz(A), ndims].
  266. * @li x2_indices: A 2D Tensor of type int64. The indices of the SparseTensor B, with size [nnz(B), ndims].
  267. * @li sum_indices: A 2D Tensor of type int64. The indices of the sum SparseTensor, with size [nnz(sum), ndims].
  268. *@par Outputs:
  269. *x1_val_grad: A Tensor. Has the same type as "backprop_val_grad".
  270. *x2_val_grad: A Tensor. Has the same type as "backprop_val_grad".
  271. *@par Third-party framework compatibility
  272. * Compatible with the TensorFlow operator SparseAddGrad.
  273. */
  274. REG_OP(SparseAddGrad)
  275. .INPUT(backprop_val_grad, TensorType({DT_INT8, DT_INT16, DT_INT32,
  276. DT_INT64, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  277. .INPUT(x1_indices, TensorType({DT_INT64}))
  278. .INPUT(x2_indices, TensorType({DT_INT64}))
  279. .INPUT(sum_indices, TensorType({DT_INT64}))
  280. .OUTPUT(x1_val_grad, TensorType({DT_INT8, DT_INT16, DT_INT32,
  281. DT_INT64, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  282. .OUTPUT(x2_val_grad, TensorType({DT_INT8, DT_INT16, DT_INT32,
  283. DT_INT64, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  284. .OP_END_FACTORY_REG(SparseAddGrad)
  285. /**
  286. *@brief The gradient of SparseFillEmptyRows.
  287. *@par Inputs:
  288. * @li reverse_index_map: A 1D Tensor of type int64. The reverse index map from SparseFillEmptyRows.
  289. * @li grad_values: A 1D Tensor. The gradients from backprop.
  290. *@par Outputs:
  291. *@li y_value: A Tensor. Has the same type as "grad_values".
  292. *@li y_default_value: A Tensor. Has the same type as "grad_values".
  293. *@par Third-party framework compatibility
  294. * Compatible with the TensorFlow operator SparseFillEmptyRowsGrad.
  295. */
  296. REG_OP(SparseFillEmptyRowsGrad)
  297. .INPUT(reverse_index_map, TensorType({DT_INT64}))
  298. .INPUT(grad_values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  299. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  300. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  301. .OUTPUT(y_value, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  302. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  303. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  304. .OUTPUT(y_default_value, TensorType({DT_INT8, DT_UINT8, DT_INT16, \
  305. DT_UINT16, DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  306. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  307. .OP_END_FACTORY_REG(SparseFillEmptyRowsGrad)
  308. /**
  309. *@brief Multiplies SparseTensor A (of rank 2) by dense matrix B.
  310. *@par Inputs:
  311. * @li x1_indices: A 2D Tensor of type int32 or int64.
  312. * @li The indices of the matrix "SparseTensor", with size [nnz, 2].
  313. * @li x1_values: A 1D Tensor. The values of the SparseTensor, with size [nnz].
  314. * @li x1_shape: A 1D Tensor of type int64. The shape of the SparseTensor, with size [2].
  315. * @li x2: A dense matrix Tensor of the same type as "x1_values". 2D.
  316. *@par Outputs:
  317. *y: A "Tensor". Has the same type as "x1_values".
  318. *@par Attributes:
  319. *@li adjoint_a: An optional bool. Defaults to "False".Use the adjoint of A in the matrix multiply.
  320. *@li If A is complex, this is transpose(conj(A)). Otherwise it is transpose(A).
  321. *@li adjoint_b: An optional bool. Defaults to "False".Use the adjoint of B in the matrix multiply.
  322. *@li If B is complex, this is transpose(conj(B)). Otherwise it is transpose(B).
  323. *@par Third-party framework compatibility
  324. * Compatible with the TensorFlow operator SparseTensorDenseMatMul.
  325. */
  326. REG_OP(SparseTensorDenseMatMul)
  327. .INPUT(x1_indices, TensorType({DT_INT32, DT_INT64}))
  328. .INPUT(x1_values, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, \
  329. DT_COMPLEXT64, DT_COMPLEX128, DT_FLOAT16}))
  330. .INPUT(x1_shape, TensorType({DT_INT64}))
  331. .INPUT(x2, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_COMPLEXT64, \
  332. DT_COMPLEX128, DT_FLOAT16}))
  333. .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_COMPLEXT64, \
  334. DT_COMPLEX128, DT_FLOAT16}))
  335. .ATTR(adjoint_a, Bool, false)
  336. .ATTR(adjoint_b, Bool, false)
  337. .OP_END_FACTORY_REG(SparseTensorDenseMatMul)
  338. /**
  339. *@brief Converts a sparse representation into a dense tensor.
  340. *@par Inputs:
  341. * @li indices: A 0D, 1D, or 2D Tensor of type int32 or int64.
  342. * @li output_shape: A 1D Tensor of the same type as "sparse_indices". The shape of the dense output tensor.
  343. * @li values: A 1D Tensor. Values corresponding to each row of "sparse_indices",
  344. * @li or a scalar value to be used for all sparse indices.
  345. * @li default_value: A Tensor of the same type as "sparse_values".
  346. *@par Outputs:
  347. *y: A Tensor. Has the same type as "values".
  348. *@par Third-party framework compatibility
  349. * Compatible with the TensorFlow operator SparseToDense.
  350. */
  351. REG_OP(SparseToDense)
  352. .INPUT(indices, TensorType({DT_INT32, DT_INT64}))
  353. .INPUT(output_shape, TensorType({DT_INT32, DT_INT64}))
  354. .INPUT(values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  355. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_BOOL, DT_DOUBLE}))
  356. .INPUT(default_value, TensorType({DT_INT8, DT_UINT8, DT_INT16, \
  357. DT_UINT16, DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_BOOL, \
  358. DT_DOUBLE}))
  359. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  360. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_BOOL, DT_DOUBLE}))
  361. .ATTR(validate_indices, Bool, true)
  362. .OP_END_FACTORY_REG(SparseToDense)
  363. /**
  364. *@brief Concatenates a list of `SparseTensor` along the specified dimension.\n
  365. *Concatenation is with respect to the dense versions of these sparse tensors.
  366. *@par Inputs:
  367. *3 or 5 inputs,contains:
  368. * @li indices:A list of at least 2 `Tensor` objects with type `int64`.2-D. \n
  369. *Indices of each input `SparseTensor`.
  370. * @li values:A list with the same length as `indices` of `Tensor` objects with the same type.
  371. * @li shapes:A list with the same length as `indices` of `Tensor` objects with type `int64`.1-D. \n
  372. * Shapes of each `SparseTensor`.
  373. *@par Attributes:
  374. *@li concat_dim: An `int` Dimension to concatenate along
  375. *@li N:Number of sparse
  376. *@par Outputs:
  377. * @li y_indices:A `Tensor` of type `int64`.
  378. * @li y_values:A `Tensor`. Has the same type as `values`.
  379. * @li y_shape:A `Tensor` of type `int64`.
  380. *@par Third-party framework compatibility
  381. * Compatible SparseConcat operator in Tensorflow
  382. */
  383. REG_OP(SparseConcat)
  384. .DYNAMIC_INPUT(indices, TensorType({DT_INT64}))
  385. .DYNAMIC_INPUT(values,
  386. TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  387. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  388. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  389. .DYNAMIC_INPUT(shapes, TensorType({DT_INT64}))
  390. .OUTPUT(y_indices, TensorType({DT_INT64}))
  391. .OUTPUT(y_values,
  392. TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  393. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  394. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  395. .OUTPUT(y_shape, TensorType({DT_INT64}))
  396. .ATTR(concat_dim, Int, 0)
  397. .ATTR(N, Int, 1)
  398. .OP_END_FACTORY_REG(SparseConcat)
  399. /**
  400. *@brief Adds two `SparseTensor` objects to produce another `SparseTensor`.
  401. *@par Inputs:
  402. *7 inputs, contains:
  403. * @li x1_indices:A `Tensor` of type `int64`.2-D. \n
  404. * The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.
  405. * @li x1_values:A `Tensor`. Must be one of the following types:float,int8,int16,int32,int64, float64.
  406. * @li x1_shape:A `Tensor` of type `int64`.1-D. The `shape` of the first `SparseTensor`, \n
  407. * size `[ndims]` Vector.
  408. * @li x2_indices:A `Tensor` of type `int64`.2-D.The `indices` of the second `SparseTensor`, \n
  409. * size `[nnz, ndims]` Matrix.
  410. * @li x2_values:A `Tensor`. Must have the same type as `a_values`.1-D. \n
  411. * The `values` of the second `SparseTensor`, size `[nnz]` Vector.
  412. * @li x2_shape:A `Tensor` of type `int64`.1-D. \n
  413. * The `shape` of the second `SparseTensor`, size `[ndims]` Vector.
  414. * @li thresh:A `Tensor` 0-D.The magnitude threshold that determines if an output value/index pair takes space.
  415. *@par Outputs:
  416. * @li sum_indices:A `Tensor` of type `int64`.
  417. * @li sum_values:A `Tensor`. Has the same type as `x1_values`.
  418. * @li sum_shape:A `Tensor` of type `int64`.
  419. *@par Third-party framework compatibility
  420. * Compatible SparseAdd operator in Tensorflow
  421. */
  422. REG_OP(SparseAdd)
  423. .INPUT(x1_indices, TensorType({DT_INT64}))
  424. .INPUT(x1_values, TensorType({DT_FLOAT, DT_INT8, DT_INT16, \
  425. DT_INT32, DT_INT64, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  426. .INPUT(x1_shape, TensorType({DT_INT64}))
  427. .INPUT(x2_indices, TensorType({DT_INT64}))
  428. .INPUT(x2_values, TensorType({DT_FLOAT, DT_INT8, DT_INT16, DT_INT32, \
  429. DT_INT64, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  430. .INPUT(x2_shape, TensorType({DT_INT64}))
  431. .INPUT(thresh, TensorType({DT_FLOAT, DT_INT8, DT_INT16, DT_INT32, \
  432. DT_INT64, DT_DOUBLE}))
  433. .OUTPUT(sum_indices, TensorType({DT_INT64}))
  434. .OUTPUT(sum_values, TensorType({DT_FLOAT, DT_INT8, DT_INT16, \
  435. DT_INT32, DT_INT64, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  436. .OUTPUT(sum_shape, TensorType({DT_INT64}))
  437. .OP_END_FACTORY_REG(SparseAdd)
  438. /**
  439. *@brief Fills empty rows in the input 2-D `SparseTensor` with a default value.
  440. *@par Inputs:
  441. *4 inputs,contains:
  442. * @li indices: A `Tensor` of type `int64`.2-D. the indices of the sparse tensor.
  443. * @li values: A `Tensor`. 1-D. the values of the sparse tensor.
  444. * @li dense_shape: A `Tensor` of type `int64`.1-D. the shape of the sparse tensor.
  445. * @li default_value: `Tensor`. Must have the same type as `values`.\n
  446. *0-D. default value to insert into location `[row, 0, ..., 0]` \n
  447. *for rows missing from the input sparse tensor.
  448. *@par Outputs:
  449. * @li y_indices:A `Tensor` of type `int64`.
  450. * @li y_values:A `Tensor`. Has the same type as `values`.
  451. * @li empty_row_indicator:A `Tensor` of type `bool`.
  452. * @li reverse_index_map:A `Tensor` of type `int64`.
  453. *@par Third-party framework compatibility
  454. * Compatible SparseFillEmptyRows operator in Tensorflow
  455. */
  456. REG_OP(SparseFillEmptyRows)
  457. .INPUT(indices, TensorType({DT_INT64}))
  458. .INPUT(values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  459. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  460. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  461. .INPUT(dense_shape, TensorType({DT_INT64}))
  462. .INPUT(default_value, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, \
  463. DT_INT16, DT_UINT16, DT_UINT8, \
  464. DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  465. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  466. .OUTPUT(y_indices, TensorType({DT_INT64}))
  467. .OUTPUT(y_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, \
  468. DT_INT16, DT_UINT16, DT_UINT8, \
  469. DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  470. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  471. .OUTPUT(empty_row_indicator, TensorType({DT_BOOL}))
  472. .OUTPUT(reverse_index_map, TensorType({DT_INT64}))
  473. .OP_END_FACTORY_REG(SparseFillEmptyRows)
  474. /**
  475. *@brief Returns the element-wise max of two SparseTensors.
  476. *@par Inputs:
  477. *6 inputs,contains:
  478. * @li x1_indices:A `Tensor` of type `int64`.2-D. \n
  479. *`N x R` matrix with the indices of non-empty values in a SparseTensor, \n
  480. * in the canonical lexicographic ordering.
  481. * @li x1_values:A `Tensor`. 1-D. the values of the sparse tensor.
  482. * @li x1_shape:A `Tensor` of type `int64`.1-D. the shape of the sparse tensor.
  483. * @li x2_indices:A `Tensor` of type `int64`.2-D. the indices of the sparse tensor.
  484. * @li x2_values:A `Tensor`. 1-D. Must have the same type as `x1_values`.
  485. * @li x2_shape:A `Tensor` of type `int64`.1-D. \n
  486. *counterpart to `a_shape` for the other operand; the two shapes must be equal.
  487. *@par Outputs:
  488. * @li y_indices:A `Tensor` of type `int64`.
  489. * @li y_values:A `Tensor`. Has the same type as `x1_values`.
  490. *@par Third-party framework compatibility
  491. * Compatible SparseSparseMaximum operator in Tensorflow
  492. */
  493. REG_OP(SparseSparseMaximum)
  494. .INPUT(x1_indices, TensorType({DT_INT64}))
  495. .INPUT(x1_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  496. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  497. .INPUT(x1_shape, TensorType({DT_INT64}))
  498. .INPUT(x2_indices, TensorType({DT_INT64}))
  499. .INPUT(x2_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  500. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  501. .INPUT(x2_shape, TensorType({DT_INT64}))
  502. .OUTPUT(y_indices, TensorType({DT_INT64}))
  503. .OUTPUT(y_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  504. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  505. .OP_END_FACTORY_REG(SparseSparseMaximum)
  506. /**
  507. *@brief Returns the element-wise min of two SparseTensors.
  508. *@par Inputs:
  509. *6 inputs,contains:
  510. * @li x1_indices:A `Tensor` of type `int64`.2-D. \n
  511. *`N x R` matrix with the indices of non-empty values in a SparseTensor, \n
  512. * in the canonical lexicographic ordering.
  513. * @li x1_values:A `Tensor`. 1-D. the values of the sparse tensor.
  514. * @li x1_shape:A `Tensor` of type `int64`.1-D. the shape of the sparse tensor.
  515. * @li x2_indices:A `Tensor` of type `int64`.2-D. the indices of the sparse tensor.
  516. * @li x2_values:A `Tensor`. 1-D. Must have the same type as `x1_values`.
  517. * @li x2_shape:A `Tensor` of type `int64`.1-D. \n
  518. *counterpart to `a_shape` for the other operand; the two shapes must be equal.
  519. *@par Outputs:
  520. * @li y_indices:A `Tensor` of type `int64`.
  521. * @li y_values:A `Tensor`. Has the same type as `x1_values`.
  522. *@par Third-party framework compatibility
  523. * Compatible SparseSparseMinimum operator in Tensorflow
  524. */
  525. REG_OP(SparseSparseMinimum)
  526. .INPUT(x1_indices, TensorType({DT_INT64}))
  527. .INPUT(x1_values, TensorType({DT_INT64, DT_INT32, \
  528. DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, DT_FLOAT16, \
  529. DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  530. .INPUT(x1_shape, TensorType({DT_INT64}))
  531. .INPUT(x2_indices, TensorType({DT_INT64}))
  532. .INPUT(x2_values, TensorType({DT_INT64, DT_INT32, \
  533. DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, DT_FLOAT16, \
  534. DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  535. .INPUT(x2_shape, TensorType({DT_INT64}))
  536. .OUTPUT(y_indices, TensorType({DT_INT64}))
  537. .OUTPUT(y_values, TensorType({DT_INT64, DT_INT32, \
  538. DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, DT_FLOAT16, \
  539. DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  540. .OP_END_FACTORY_REG(SparseSparseMinimum)
  541. /**
  542. *@brief Computes the max of elements across dimensions of a SparseTensor.
  543. *@par Inputs:
  544. *4 or 5 inputs,contains:
  545. * @li x_indices:A `Tensor` of type `int64`.2-D. \n
  546. *`N x R` matrix with the indices of non-empty values in a \n
  547. *SparseTensor, possibly not in canonical ordering.
  548. * @li x_values:A `Tensor`. 1-D. the values of the sparse tensor. \n
  549. *`N` non-empty values corresponding to `input_indices`.
  550. * @li x_shape:A `Tensor` of type `int64`.1-D. Shape of the input SparseTensor.
  551. * @li reduction_axes:A `Tensor` of type `int32`.1-D.\n
  552. *Length-`K` vector containing the reduction axes.
  553. *@par Attributes:
  554. * keep_dims:An optional `bool`. Defaults to `False`.\n
  555. *If true, retain reduced dimensions with length 1.
  556. *@par Outputs:
  557. * y:A `Tensor`. Has the same type as `input_values`.
  558. *@par Third-party framework compatibility
  559. * Compatible SparseReduceMax operator in Tensorflow
  560. */
  561. REG_OP(SparseReduceMax)
  562. .INPUT(x_indices, TensorType({DT_INT64}))
  563. .INPUT(x_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  564. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  565. .INPUT(x_shape, TensorType({DT_INT64}))
  566. .INPUT(reduction_axes, TensorType({DT_INT32}))
  567. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16,
  568. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  569. .ATTR(keep_dims, Bool, false)
  570. .OP_END_FACTORY_REG(SparseReduceMax)
  571. /**
  572. *@brief Computes the max of elements across dimensions of a SparseTensor.
  573. *@par Inputs:
  574. *4 or 5 inputs,contains:
  575. * @li x_indices:A `Tensor` of type `int64`.2-D. \n
  576. *`N x R` matrix with the indices of non-empty values in a \n
  577. *SparseTensor, possibly not in canonical ordering.
  578. * @li x_values:A `Tensor`. 1-D. the values of the sparse tensor. \n
  579. *`N` non-empty values corresponding to `input_indices`.
  580. * @li x_shape:A `Tensor` of type `int64`.1-D. Shape of the input SparseTensor.
  581. * @li reduction_axes:A `Tensor` of type `int32`.1-D.\n
  582. *Length-`K` vector containing the reduction axes.
  583. *@par Attributes:
  584. * keep_dims:An optional `bool`. Defaults to `False`.\n
  585. *If true, retain reduced dimensions with length 1.
  586. *@par Outputs:
  587. * @li y_indices:A `Tensor` of type `int64`.
  588. * @li y_values:A `Tensor`. Has the same type as `input_values`.
  589. * @li y_shape:A `Tensor` of type `int64`.
  590. *@par Third-party framework compatibility
  591. * Compatible SparseReduceMaxSparse operator in Tensorflow
  592. */
  593. REG_OP(SparseReduceMaxSparse)
  594. .INPUT(x_indices, TensorType({DT_INT64}))
  595. .INPUT(x_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  596. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  597. .INPUT(x_shape, TensorType({DT_INT64}))
  598. .INPUT(reduction_axes, TensorType({DT_INT32}))
  599. .OUTPUT(y_indices, TensorType({DT_INT64}))
  600. .OUTPUT(y_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  601. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  602. .OUTPUT(y_shape, TensorType({DT_INT64}))
  603. .ATTR(keep_dims, Bool, false)
  604. .OP_END_FACTORY_REG(SparseReduceMaxSparse)
  605. /**
  606. *@brief Computes the sum of elements across dimensions of a SparseTensor.
  607. *@par Inputs:
  608. *4 or 5 inputs, including:
  609. * @li x_indices: A 2D Tensor of type int64.
  610. *"N x R" matrix with the indices of non-empty values in a \n
  611. *SparseTensor, possibly not in canonical ordering.
  612. * @li x_values: A 1D Tensor. The values of the SparseTensor.
  613. *"N" non-empty values corresponding to "input_indices".
  614. * @li x_shape: A 1D Tensor of type int64. Shape of the input SparseTensor.
  615. * @li reduction_axes: A 1D Tensor of type int32. \n
  616. *A length-"K" vector containing the reduction axes.
  617. *@par Attributes:
  618. * keep_dims: An optional bool. Defaults to "False". \n
  619. *If true, retains reduced dimensions with length 1.
  620. *@par Outputs:
  621. * @li y_indices: A Tensor of type int64.
  622. * @li y_values: A Tensor. Has the same type as "input_values".
  623. * @li y_shape: A Tensor of type int64.
  624. *@par Third-party framework compatibility
  625. * Compatible with the TensorFlow operator SparseReduceSum.
  626. */
  627. REG_OP(SparseReduceSum)
  628. .INPUT(x_indices, TensorType({DT_INT64}))
  629. .INPUT(x_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  630. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE, \
  631. DT_COMPLEX64, DT_COMPLEX128}))
  632. .INPUT(x_shape, TensorType({DT_INT64}))
  633. .INPUT(reduction_axes, TensorType({DT_INT32}))
  634. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16,
  635. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE, \
  636. DT_COMPLEX64, DT_COMPLEX128}))
  637. .ATTR(keep_dims, Bool, false)
  638. .OP_END_FACTORY_REG(SparseReduceSum)
  639. /**
  640. *@brief Computes the sum of elements across dimensions of a SparseTensor.
  641. *@par Inputs:
  642. *4 or 5 inputs, including:
  643. * @li x_indices: A 2D Tensor of type int64.
  644. *"N x R" matrix with the indices of non-empty values in a \n
  645. *SparseTensor, possibly not in canonical ordering.
  646. * @li x_values: A 1D Tensor. The values of the SparseTensor.
  647. *"N" non-empty values corresponding to "input_indices".
  648. * @li x_shape: A 1D Tensor of type int64. Shape of the input SparseTensor.
  649. * @li reduction_axes: A 1D Tensor of type int32. \n
  650. * A length-"K" vector containing the reduction axes.
  651. *@par Attributes:
  652. * keep_dims: An optional bool. Defaults to "False".\n
  653. *If true, retains reduced dimensions with length 1.
  654. *@par Outputs:
  655. * @li y_indices: A Tensor of type int64.
  656. * @li y_values: A Tensor. Has the same type as "input_values".
  657. * @li y_shape: A Tensor of type int64.
  658. *@par Third-party framework compatibility
  659. * Compatible with the TensorFlow operator SparseReduceSumSparse.
  660. */
  661. REG_OP(SparseReduceSumSparse)
  662. .INPUT(x_indices, TensorType({DT_INT64}))
  663. .INPUT(x_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  664. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE, \
  665. DT_COMPLEX64, DT_COMPLEX128}))
  666. .INPUT(x_shape, TensorType({DT_INT64}))
  667. .INPUT(reduction_axes, TensorType({DT_INT32}))
  668. .OUTPUT(y_indices, TensorType({DT_INT64}))
  669. .OUTPUT(y_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  670. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE, \
  671. DT_COMPLEX64, DT_COMPLEX128}))
  672. .OUTPUT(y_shape, TensorType({DT_INT64}))
  673. .ATTR(keep_dims, Bool, false)
  674. .OP_END_FACTORY_REG(SparseReduceSumSparse)
  675. /**
  676. *@brief Splits a SparseTensor into "num_split" tensors along one dimension.
  677. *@par Inputs:
  678. *4 or 5 inputs, including:
  679. * @li split_dim: A 0D Tensor of type int64.\n
  680. *The dimension along which to split. Must be in the range "[0, rank(shape))".
  681. * @li indices: A 2D Tensor of type int64.\n
  682. * The indices of the SparseTensor.
  683. * @li values: A 1D Tensor. The values of the SparseTensor.
  684. * @li shape: A 1D Tensor of type int64. Shape of the SparseTensor.
  685. *@par Attributes:
  686. * num_split: An int that is >= 1. The number of ways to split.
  687. *@par Outputs:
  688. * @li y_indices: A list of "num_split" Tensor objects of type int64.
  689. * @li y_values: A list of "num_split" Tensor objects with the same type as "values".
  690. * @li y_shape: A list of "num_split" Tensor objects of type int64.
  691. *@par Third-party framework compatibility
  692. * Compatible with the TensorFlow operator SparseSplit.
  693. */
  694. REG_OP(SparseSplit)
  695. .INPUT(split_dim, TensorType({DT_INT64}))
  696. .INPUT(indices, TensorType({DT_INT64}))
  697. .INPUT(values, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, \
  698. DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, \
  699. DT_COMPLEX128, DT_BOOL, DT_STRING, DT_RESOURCE}))
  700. .INPUT(shape, TensorType({DT_INT64}))
  701. .DYNAMIC_OUTPUT(y_indices, TensorType({DT_INT64}))
  702. .DYNAMIC_OUTPUT(y_values, TensorType({DT_INT64, DT_INT32, DT_UINT16, \
  703. DT_INT16, DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  704. DT_COMPLEX64, DT_COMPLEX128, DT_BOOL, DT_STRING, DT_RESOURCE}))
  705. .DYNAMIC_OUTPUT(y_shape, TensorType({DT_INT64}))
  706. .ATTR(num_split, Int, 1)
  707. .OP_END_FACTORY_REG(SparseSplit)
  708. /**
  709. *@brief Generates sparse cross from a list of sparse and dense tensors.
  710. *@par Inputs:
  711. *8 or 10 inputs, including:
  712. * @li indices: A list of 2D Tensor objects of type int64.
  713. * Indices of each input SparseTensor.
  714. * @li values: A list of 1D Tensor objects of type int64 or string.
  715. * Values of each SparseTensor.
  716. * @li shapes: A list with the same length as "indices" of 1D Tensor objects of type int64.
  717. * Shapes of each SparseTensor.
  718. * @li dense_inputs: A list of 2D Tensor objects of type int64 or string.
  719. * Columns represented by dense Tensor.
  720. *@par Attributes:
  721. * @li N: number of sparse.
  722. * @li hashed_output: A bool. If true, returns the hash of the cross instead of the string.
  723. * @li num_buckets: An int that is >= 0. It is used if "hashed_output" is true. \n
  724. *output = hashed_value%num_buckets if num_buckets > 0 else "hashed_value".
  725. * @li hash_key: An int. Specify the hash_key that will be used by the "FingerprintCat64"\n
  726. *function to combine the crosses fingerprints.
  727. * @li out_type: An int64 or string.
  728. * @li internal_type: An int64 or string.
  729. *@par Outputs:
  730. * @li output_indices: A Tensor of type int64.
  731. * @li output_values: A Tensor of type "out_type".
  732. * @li output_shape: A Tensor of type int64.
  733. *@par Third-party framework compatibility
  734. * Compatible with the TensorFlow operator SparseCross.
  735. */
  736. REG_OP(SparseCross)
  737. .DYNAMIC_INPUT(indices, TensorType({DT_INT64}))
  738. .DYNAMIC_INPUT(values, TensorType({DT_INT64, DT_STRING}))
  739. .DYNAMIC_INPUT(shapes, TensorType({DT_INT64}))
  740. .DYNAMIC_INPUT(dense_inputs, TensorType({DT_INT64, DT_STRING}))
  741. .OUTPUT(output_indices, TensorType({DT_INT64}))
  742. .OUTPUT(output_values, TensorType({DT_INT64, DT_STRING}))
  743. .OUTPUT(output_shape, TensorType({DT_INT64}))
  744. .ATTR(N, Int, 0)
  745. .REQUIRED_ATTR(hashed_output, Bool)
  746. .ATTR(num_buckets, Int, 0)
  747. .REQUIRED_ATTR(hash_key, Int)
  748. .REQUIRED_ATTR(out_type, Type)
  749. .REQUIRED_ATTR(internal_type, Type)
  750. .OP_END_FACTORY_REG(SparseCross)
  751. /**
  752. *@brief Generates sparse cross from a list of sparse and dense tensors.
  753. *@par Inputs:
  754. *3 or 5 inputs, including:
  755. * @li indices: A 2D Tensor of type int64. \n
  756. * The "indices" of the minibatch SparseTensor.
  757. * @li values: A 1D Tensor. The "values" of the minibatch SparseTensor.
  758. * @li shape: A 1D Tensor of type int64. The "shape" of the minibatch SparseTensor.
  759. *@par Attributes:
  760. * @li container: An optional string. Defaults to "". \n
  761. *The container name for the "SparseTensorsMap" created by this op.
  762. * @li shared_name: An optional string. Defaults to "". \n
  763. *The shared name for the "SparseTensorsMap" created by this op.
  764. *@par Outputs:
  765. * handles: A Tensor of type int64.
  766. *@par Third-party framework compatibility
  767. * Compatible with the TensorFlow operator AddManySparseToTensorsMap.
  768. */
  769. REG_OP(AddManySparseToTensorsMap)
  770. .INPUT(indices, TensorType({DT_INT64}))
  771. .INPUT(values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  772. DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  773. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  774. .INPUT(shape, TensorType({DT_INT64}))
  775. .OUTPUT(handles, TensorType({DT_INT64}))
  776. .ATTR(container, String, "")
  777. .ATTR(shared_name, String, "")
  778. .OP_END_FACTORY_REG(AddManySparseToTensorsMap)
  779. /**
  780. *@brief Reads SparseTensors from a "SparseTensorsMap" and concatenate them.
  781. *@par Inputs:
  782. *2 or 4 inputs, including:
  783. * handles: A 1D Tensor of type int64. \n
  784. * The "N" serialized SparseTensor objects.
  785. *@par Attributes:
  786. * @li dtype: A tf.DType. The "dtype" of the SparseTensor objects stored in the "SparseTensorsMap".
  787. * @li container: An optional string. Defaults to "". \n
  788. *The container name for the "SparseTensorsMap" read by this op.
  789. * @li shared_name: An optional string. Defaults to "". \n
  790. *The shared name for the "SparseTensorsMap" read by this op.
  791. *@par Outputs:
  792. * @li indices: A Tensor of type int64.
  793. * @li values: A Tensor of type "dtype".
  794. * @li shape: A Tensor of type int64.
  795. *@par Third-party framework compatibility
  796. * Compatible with the TensorFlow operator TakeManySparseFromTensorsMap.
  797. */
  798. REG_OP(TakeManySparseFromTensorsMap)
  799. .INPUT(handles, TensorType({DT_INT64}))
  800. .OUTPUT(indices, TensorType({DT_INT64}))
  801. .OUTPUT(values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, \
  802. DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16}))
  803. .OUTPUT(shape, TensorType({DT_INT64}))
  804. .REQUIRED_ATTR(dtype, Type)
  805. .ATTR(container, String, "")
  806. .ATTR(shared_name, String, "")
  807. .OP_END_FACTORY_REG(TakeManySparseFromTensorsMap)
  808. /**
  809. *@brief Serializes a SparseTensor into a [3] Tensor object.
  810. *@par Inputs:
  811. *3 or 4 inputs, including:
  812. * @li indices: A 2D Tensor of type int64. The indices of the SparseTensor.
  813. * @li values: A 1D Tensor. The values of the SparseTensor.
  814. * @li shape: A 1D Tensor of type int64. The shape of the SparseTensor.
  815. *@par Attributes:
  816. * out_type: An optional type. Defaults to "string".
  817. *@par Outputs:
  818. * serialized_sparse: A Tensor of type "out_type".
  819. *@par Third-party framework compatibility
  820. * Compatible with the TensorFlow operator SerializeSparse.
  821. */
  822. REG_OP(SerializeSparse)
  823. .INPUT(indices, TensorType({DT_INT64}))
  824. .INPUT(values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, \
  825. DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, \
  826. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  827. .INPUT(shape, TensorType({DT_INT64}))
  828. .OUTPUT(serialized_sparse, TensorType({DT_STRING}))
  829. .ATTR(out_type, Type, DT_STRING)
  830. .OP_END_FACTORY_REG(SerializeSparse)
  831. /**
  832. *@brief Serializes an "N"-minibatch SparseTensor into an [N, 3] Tensor object.
  833. *@par Inputs:
  834. *3 or 4 inputs, including:
  835. * @li indices: A 2D Tensor of type int64. The "indices" of the minibatch SparseTensor.
  836. * @li values: A 1D Tensor. The "values" of the minibatch SparseTensor.
  837. * @li shape: A 1D Tensor of type int64. The "shape" of the minibatch SparseTensor.
  838. *@par Attributes:
  839. * out_type: An optional type. Defaults to "string".
  840. *@par Outputs:
  841. * serialized_sparse: A Tensor of type "out_type".
  842. *@par Third-party framework compatibility
  843. * Compatible with the TensorFlow operator SerializeManySparse.
  844. */
  845. REG_OP(SerializeManySparse)
  846. .INPUT(indices, TensorType({DT_INT64}))
  847. .INPUT(values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, \
  848. DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, \
  849. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  850. .INPUT(shape, TensorType({DT_INT64}))
  851. .OUTPUT(serialized_sparse, TensorType({DT_STRING}))
  852. .ATTR(out_type, Type, DT_STRING)
  853. .OP_END_FACTORY_REG(SerializeManySparse)
  854. /**
  855. *@brief Deserializes SparseTensor objects.
  856. *@par Inputs:
  857. *Two inputs, including:
  858. * serialized_sparse: A Tensor. The serialized SparseTensor objects. \n
  859. *The last dimension must have 3 columns.
  860. *@par Attributes:
  861. * dtype: An optional type. The type of the serialized SparseTensor objects.
  862. *@par Outputs:
  863. * @li indices: A Tensor of type int64.
  864. * @li values: A Tensor of type "dtype".
  865. * @li shape: A Tensor of type int64.
  866. *@par Third-party framework compatibility
  867. * Compatible with the TensorFlow operator DeserializeSparse.
  868. */
  869. REG_OP(DeserializeSparse)
  870. .INPUT(serialized_sparse, TensorType({DT_STRING}))
  871. .OUTPUT(indices, TensorType({DT_INT64}))
  872. .OUTPUT(values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, \
  873. DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, \
  874. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  875. .OUTPUT(shape, TensorType({DT_INT64}))
  876. .REQUIRED_ATTR(dtype, Type)
  877. .OP_END_FACTORY_REG(DeserializeSparse)
  878. /**
  879. *@brief Deserializes and concatenates SparseTensors from a serialized minibatch.
  880. *@par Inputs:
  881. *Two inputs, including:
  882. * serialized_sparse: A 2D Tensor of type string. \n
  883. *The "N" serialized SparseTensor objects. Must have 3 columns.
  884. *@par Attributes:
  885. * dtype: An optional type. The type of the serialized SparseTensor objects.
  886. *@par Outputs:
  887. * @li indices: A Tensor of type int64.
  888. * @li values: A Tensor of type "dtype".
  889. * @li shape: A Tensor of type int64.
  890. *@par Third-party framework compatibility
  891. * Compatible with the TensorFlow operator DeserializeManySparse.
  892. */
  893. REG_OP(DeserializeManySparse)
  894. .INPUT(serialized_sparse, TensorType({DT_STRING}))
  895. .OUTPUT(indices, TensorType({DT_INT64}))
  896. .OUTPUT(values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, \
  897. DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, \
  898. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  899. .OUTPUT(shape, TensorType({DT_INT64}))
  900. .REQUIRED_ATTR(dtype, Type)
  901. .OP_END_FACTORY_REG(DeserializeManySparse)
  902. } // namespace ge
  903. #endif // GE_OP_SPARSE_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示