You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

dragon_backend.py 25 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049
  1. #! /usr/bin/python
  2. # -*- coding: utf-8 -*-
  3. from __future__ import absolute_import, division, print_function
  4. import numpy as np
  5. import dragon as D
  6. from dragon.core.eager import context
  7. from dragon.core.ops import init_ops
  8. from dragon.core.ops import vision_ops
  9. _dtypeDict = ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64']
  10. # TODO NotImplemented
  11. DType = None
  12. float16 = 'float16'
  13. float32 = 'float32'
  14. float64 = 'float64'
  15. int8 = 'int8'
  16. int16 = 'int16'
  17. int32 = 'int32'
  18. int64 = 'int64'
  19. uint8 = 'uint8'
  20. uint16 = 'uint16'
  21. uint32 = 'uint32'
  22. uint64 = 'uint64'
  23. # isinstance input output
  24. # TODO NotImplemented
  25. # TensorLike = None
  26. def _getter(init_fn, **kwargs):
  27. """Return an named eager tensor."""
  28. with context.eager_mode():
  29. value = init_fn(**kwargs)
  30. value._name = kwargs.get('name', value.id)
  31. return value
  32. def set_context(**kwargs):
  33. raise Exception("Using Dragon backend,You don't need to set context")
  34. def get_tensor_shape(x):
  35. return x.shape
  36. # initializers
  37. def zeros(shape, dtype='float32'):
  38. """
  39. Creates a tensor with all elements set to zero.
  40. Parameters
  41. ----------
  42. shape : A list of integers
  43. a tuple of integers, or a 1-D Tensor of type int32.
  44. dtype : tensor
  45. The DType of an element in the resulting Tensor
  46. Returns
  47. -------
  48. A Tensor with all elements set to zero.
  49. """
  50. return _getter(
  51. init_ops.fill,
  52. value=0,
  53. shape=shape,
  54. dtype=dtype,
  55. )
  56. def ones(shape, dtype='float32'):
  57. """
  58. Creates a tensor with all elements set to ones.
  59. Parameters
  60. ----------
  61. shape : A list of integers
  62. a tuple of integers, or a 1-D Tensor of type int32.
  63. dtype : tensor
  64. The DType of an element in the resulting Tensor
  65. Returns
  66. -------
  67. A Tensor with all elements set to zero.
  68. """
  69. return _getter(
  70. init_ops.fill,
  71. value=1,
  72. shape=shape,
  73. dtype=dtype,
  74. )
  75. def constant(value, shape, dtype='float32'):
  76. """
  77. Creates a constant tensor from a tensor-like object.
  78. Parameters
  79. ----------
  80. value : list
  81. A constant value (or list) of output type dtype.
  82. dtype : tensor
  83. The type of the elements of the resulting tensor.
  84. shape : tuple
  85. Optional dimensions of resulting tensor.
  86. Returns
  87. -------
  88. A Constant Tensor.
  89. """
  90. # shape = shape[::-1]
  91. return _getter(
  92. init_ops.fill,
  93. value=value,
  94. shape=shape,
  95. dtype=dtype,
  96. )
  97. def random_uniform(shape, minval=0, maxval=None, dtype='float32', seed=None):
  98. """
  99. Outputs random values from a uniform distribution.
  100. Parameters
  101. ----------
  102. shape : tuple
  103. A 1-D integer Tensor or Python array. The shape of the output tensor.
  104. minval : int
  105. The lower bound on the range of random values to generate (inclusive). Defaults to 0.
  106. maxval : int
  107. The upper bound on the range of random values to generate (exclusive). Defaults to 1 if dtype is floating point.
  108. dtype : tensor
  109. The type of the output: float16, float32, float64, int32, or int64.
  110. seed : int
  111. Used in combination with dragon.random.set_seed to create a reproducible sequence of tensors across multiple calls.
  112. Returns
  113. -------
  114. A tensor of the specified shape filled with random uniform values.
  115. """
  116. return _getter(init_ops.random_uniform, low=minval, high=maxval, shape=shape, dtype=dtype)
  117. def random_normal(shape, mean=0.0, stddev=1.0, dtype='float32', seed=None):
  118. """
  119. Outputs random values from a normal distribution.
  120. Parameters
  121. ----------
  122. shape : tuple
  123. A 1-D integer Tensor or Python array. The shape of the output tensor.
  124. mean : float
  125. The mean of the normal distribution
  126. stddev : float
  127. The standard deviation of the normal distribution.
  128. dtype : tensor
  129. The type of the output.
  130. seed : A Python integer
  131. Used to create a random seed for the distribution
  132. Returns
  133. -------
  134. A tensor of the specified shape filled with random normal values.
  135. """
  136. return _getter(
  137. init_ops.random_normal,
  138. mean=mean,
  139. std=stddev,
  140. shape=shape,
  141. dtype=dtype,
  142. )
  143. def truncated_normal(shape, mean=0.0, stddev=1.0, dtype='float32', seed=None):
  144. """
  145. Outputs random values from a truncated normal distribution.
  146. Parameters
  147. ----------
  148. shape : tuple
  149. A 1-D integer Tensor or Python array. The shape of the output tensor.
  150. mean : float
  151. The mean of the normal distribution
  152. stddev : float
  153. The standard deviation of the normal distribution.
  154. dtype : tensor
  155. The type of the output.
  156. seed : A Python integer
  157. Used to create a random seed for the distribution
  158. Returns
  159. -------
  160. A tensor of the specified shape filled with random truncated normal values.
  161. """
  162. return _getter(
  163. init_ops.truncated_normal,
  164. mean=mean,
  165. std=stddev,
  166. shape=shape,
  167. dtype=dtype,
  168. )
  169. def he_normal(shape, dtype, seed=None):
  170. """
  171. He normal initializer.
  172. Parameters
  173. ----------
  174. seed : A Python integer.
  175. Used to seed the random generator.
  176. shape : tuple
  177. A 1-D integer Tensor or Python array. The shape of the output tensor.
  178. dtype : tensor
  179. The type of the output.
  180. Returns
  181. -------
  182. A tensor of the specified shape filled with he normal values.
  183. """
  184. # shape = shape[::-1]
  185. raise NotImplementedError("He_Normal is not implemented")
  186. def Variable(initial_value, name, trainable=None):
  187. """
  188. Creates a new variable with value initial_value.
  189. Parameters
  190. ----------
  191. initial_value : tensor
  192. A Tensor, or Python object convertible to a Tensor
  193. name : str
  194. Optional name for the variable. Defaults to 'Variable' and gets uniquified automatically.
  195. Returns
  196. -------
  197. Variable
  198. """
  199. return D.Tensor(name=name, shape=initial_value)
  200. class MatMul(object):
  201. def __init__(self):
  202. pass
  203. def __call__(self, a, b):
  204. inputs = [a, b]
  205. return D.math.matmul(inputs)
  206. def matmul(a, b):
  207. """
  208. Multiplies matrix a by matrix b, producing a * b.
  209. Parameters
  210. ----------
  211. a : tensor
  212. type float16, float32, float64, int32, complex64, complex128 and rank > 1.
  213. b : tensor
  214. with same type and rank as a.
  215. Returns
  216. -------
  217. A Tensor of the same type as a and b
  218. """
  219. inputs = [a, b]
  220. return D.math.matmul(inputs)
  221. def add(value, bias):
  222. """
  223. Returns x + y element-wise.
  224. Parameters
  225. ----------
  226. value : tensor.
  227. Must be one of the following types: bfloat16, half, float32, float64,
  228. uint8, int8, int16, int32, int64, complex64, complex128, string.
  229. bias : tensor
  230. Must have the same type as a
  231. name : str
  232. A name for the operation
  233. Returns
  234. -------
  235. A Tensor. Has the same type as a.
  236. """
  237. inputs = [value, bias]
  238. return D.math.add(inputs)
  239. def dtypes(dt):
  240. """
  241. Data dtypes.
  242. Parameters
  243. ----------
  244. dt : string
  245. It could be 'uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
  246. 'int32', 'int64', 'float16', 'float32', 'float64', 'DType'.
  247. Returns
  248. -------
  249. Data dtypes
  250. """
  251. if dt not in _dtypeDict:
  252. raise Exception("Unsupported dtype: {}".format(dt))
  253. return dt
  254. def minimum(x, y):
  255. """
  256. Returns the min of x and y (i.e. x < y ? x : y) element-wise.
  257. Parameters
  258. ----------
  259. x : tensor.
  260. Must be one of the following types: bfloat16, half, float32, float64, int32, int64.
  261. y : A Tensor.
  262. Must have the same type as x.
  263. name : str
  264. A name for the operation (optional).
  265. Returns
  266. -------
  267. A Tensor. Has the same type as x
  268. """
  269. inputs = [x, y]
  270. return D.math.minimum(inputs)
  271. class FlattenReshape(object):
  272. def __init__(self):
  273. pass
  274. def __call__(self, inputs):
  275. dim = 1
  276. for d in get_tensor_shape(inputs)[1:]:
  277. dim *= d
  278. return D.reshape(inputs, [-1, dim])
  279. class Reshape(object):
  280. def __init__(self, shape):
  281. self.shape = shape
  282. def __call__(self, tensor):
  283. return D.reshape(tensor, shape=self.shape)
  284. def reshape(tensor, shape):
  285. """
  286. Reshapes a tensor.
  287. Parameters
  288. ----------
  289. tensor : tensor
  290. A Tensor.
  291. shape : tensor
  292. Defines the shape of the output tensor.
  293. Returns
  294. -------
  295. A Tensor. Has the same type as tensor
  296. """
  297. return D.reshape(tensor, shape=shape)
  298. class Concat(object):
  299. def __init__(self, axis):
  300. super(Concat, self).__init__()
  301. self.axis = axis
  302. def __call__(self, values):
  303. return D.concat(values=values, axis=self.axis)
  304. def concat(values, axis):
  305. """
  306. Concatenates tensors along one dimension.
  307. Parameters
  308. ----------
  309. values : list
  310. A list of Tensor objects or a single Tensor
  311. axis : int
  312. 0-D int32 Tensor. Dimension along which to concatenate
  313. Returns
  314. -------
  315. A Tensor resulting from concatenation of the input tensors.
  316. """
  317. return D.concat(values, axis=axis)
  318. def convert_to_tensor(value, dtype=None):
  319. """
  320. Converts the given value to a Tensor.
  321. Parameters
  322. ----------
  323. value : object
  324. An object whose type has a registered Tensor conversion function.
  325. dtype : optional
  326. Optional element type for the returned tensor. If missing, the type is inferred from the type of value.
  327. Returns
  328. -------
  329. A Tensor based on value.
  330. """
  331. return D.Tensor.convert_to(value, dtype)
  332. def sqrt(x):
  333. """
  334. Computes square root of x element-wise.
  335. Parameters
  336. ----------
  337. x : tensor
  338. Must be one of the following types: bfloat16, half, float32, float64, complex64, complex128.
  339. Returns
  340. -------
  341. A Tensor. Has the same type as x.
  342. """
  343. return D.math.sqrt(x)
  344. class ReduceSum(object):
  345. def __init__(self, axis):
  346. pass
  347. def construct(self, input):
  348. pass
  349. class ReduceMean(object):
  350. def __init__(self, axis):
  351. if axis == [1, 2]:
  352. self.data_format = 'NHWC'
  353. elif axis == [2, 3]:
  354. self.data_format = 'NCHW'
  355. else:
  356. raise ("`data_format` should have one of the following values: [`channels_last`, `channels_first`]")
  357. def __call__(self, inputs):
  358. return vision_ops.pool2d(
  359. inputs,
  360. kernel_shape=1,
  361. strides=1,
  362. pads=0,
  363. mode='AVG',
  364. global_pooling=True,
  365. data_format=self.data_format,
  366. )
  367. def reduce_mean(input_tensor, axis=None):
  368. """
  369. Computes the mean of elements across dimensions of a tensor.
  370. Parameters
  371. ----------
  372. input_tensor : tensor
  373. The tensor to reduce. Should have numeric type.
  374. axis : int
  375. The dimensions to reduce. If None (the default), reduces all dimensions.
  376. Must be in the range [-rank(input_tensor), rank(input_tensor)).
  377. name : str
  378. A name for the operation (optional).
  379. Returns
  380. -------
  381. The reduced tensor.
  382. """
  383. return D.mean(input_tensor, axes=axis)
  384. class ReduceMax(object):
  385. def __init__(self, axis):
  386. if axis == [1, 2]:
  387. self.data_format = 'NHWC'
  388. elif axis == [2, 3]:
  389. self.data_format = 'NCHW'
  390. else:
  391. raise ("`data_format` should have one of the following values: [`channels_last`, `channels_first`]")
  392. def __call__(self, inputs):
  393. return vision_ops.pool2d(
  394. inputs, kernel_shape=1, strides=1, pads=0, mode='MAX', global_pooling=True, data_format=self.data_format
  395. )
  396. def reduce_max(input_tensor, axis=None):
  397. """
  398. Computes the maximum of elements across dimensions of a tensor.
  399. Parameters
  400. ----------
  401. input_tensor : tensor
  402. The tensor to reduce. Should have real numeric type.
  403. axis : int
  404. The dimensions to reduce. If None (the default), reduces all dimensions.
  405. Must be in the range [-rank(input_tensor), rank(input_tensor)).
  406. name : str
  407. A name for the operation (optional).
  408. Returns
  409. -------
  410. The reduced tensor.
  411. """
  412. return D.max(input_tensor, axis)
  413. def reduce_min(input_tensor, axis=None):
  414. """
  415. Computes the minimum of elements across dimensions of a tensor.
  416. Parameters
  417. ----------
  418. input_tensor : tensor
  419. The tensor to reduce. Should have real numeric type.
  420. axis : int
  421. The dimensions to reduce. If None (the default), reduces all dimensions.
  422. Must be in the range [-rank(input_tensor), rank(input_tensor)).
  423. name : str
  424. A name for the operation (optional).
  425. Returns
  426. -------
  427. The reduced tensor.
  428. """
  429. return D.min(input_tensor, axis)
  430. class Pad(object):
  431. def __init__(self, paddings, mode="REFLECT"):
  432. if mode not in ['CONSTANT', 'REFLECT', 'SYMMETRIC']:
  433. raise Exception("Unsupported mode: {}".format(mode))
  434. if mode == 'SYMMETRIC':
  435. mode = 'EDGE'
  436. self.paddings = paddings
  437. self.mode = mode
  438. def __call__(self, x):
  439. outputs = D.pad(x, pads=self.paddings, mode=self.mode, value=0)
  440. return outputs
  441. def pad(tensor, paddings, mode='CONSTANT', constant_values=0):
  442. """
  443. Pads a tensor.
  444. Parameters
  445. ----------
  446. tensor : tensor
  447. A Tensor.
  448. paddings : tuple
  449. A tuple of type int32.
  450. mode : str
  451. One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
  452. constant_values : int
  453. In "CONSTANT" mode, the scalar pad value to use. Must be same type as tensor.
  454. Returns
  455. -------
  456. A Tensor. Has the same type as tensor.
  457. """
  458. if mode not in ['CONSTANT', 'REFLECT', 'SYMMETRIC']:
  459. raise Exception("Unsupported mode: {}".format(mode))
  460. if mode == 'SYMMETRIC':
  461. mode = 'EDGE'
  462. outputs = D.pad(tensor, pads=paddings, mode=mode, value=constant_values)
  463. return outputs
  464. class Unstack(object):
  465. def __init__(self, axis, num=None):
  466. self.axis = axis
  467. self.num = num
  468. def __call__(self, values):
  469. raise NotImplementedError
  470. class Stack(object):
  471. def __init__(self, axis):
  472. self.axis = axis
  473. def __call__(self, values):
  474. return D.stack(values, axis=self.axis)
  475. def stack(values, axis=0):
  476. """
  477. Stacks a list of rank-R tensors into one rank-(R+1) tensor.
  478. Parameters
  479. ----------
  480. values : list
  481. A list of Tensor objects with the same shape and type.
  482. axis : int
  483. An int. The axis to stack along. Defaults to the first dimension.
  484. Negative values wrap around, so the valid range is [-(R+1), R+1).
  485. Returns
  486. -------
  487. A stacked Tensor with the same type as values.
  488. """
  489. return D.stack(values, axis=axis)
  490. class Meshgrid(object):
  491. def __init__(self, indexing='xy'):
  492. super(Meshgrid, self).__init__()
  493. self.index = indexing
  494. def __call__(self, inputs):
  495. pass
  496. def meshgrid(x, y):
  497. """
  498. Broadcasts parameters for evaluation on an N-D grid.
  499. Parameters
  500. ----------
  501. x : tensor
  502. Tensors with rank 1.
  503. y : tensor
  504. Tensors with rank 1.
  505. Returns
  506. -------
  507. A list of N Tensors with rank N.
  508. """
  509. pass
  510. def range(start, limit=None, delta=1, dtype=None):
  511. """
  512. Creates a sequence of numbers.
  513. Parameters
  514. ----------
  515. start : tensor
  516. A 0-D Tensor (scalar). Acts as first entry in the range if limit is not None;
  517. otherwise, acts as range limit and first entry defaults to 0.
  518. limit : tensor
  519. A 0-D Tensor (scalar). Upper limit of sequence, exclusive. If None,
  520. defaults to the value of start while the first entry of the range defaults to 0.
  521. delta : tensor
  522. A 0-D Tensor (scalar). Number that increments start. Defaults to 1.
  523. dtype : type
  524. The type of the elements of the resulting tensor.
  525. Returns
  526. -------
  527. An 1-D Tensor of type dtype.
  528. """
  529. if dtype is None:
  530. dtype = 'int32'
  531. if limit is None:
  532. outputs = D.arange(start=0, stop=start, step=delta, dtype=dtype)
  533. else:
  534. outputs = D.arange(start, stop=limit, step=delta, dtype=dtype)
  535. return outputs
  536. class ExpandDims(object):
  537. def __init__(self, axis):
  538. pass
  539. def construct(self, input):
  540. pass
  541. def expand_dims(input, axis):
  542. """
  543. Inserts a dimension of 1 into a tensor's shape.
  544. Parameters
  545. ----------
  546. input : tensor
  547. A Tensor.
  548. axis : int
  549. 0-D (scalar). Specifies the dimension index at which to expand the shape of input.
  550. Must be in the range [-rank(input) - 1, rank(input)].
  551. Returns
  552. -------
  553. A Tensor with the same data as input, but its shape has an additional dimension of size 1 added.
  554. """
  555. return D.expand_dims(input, axis=axis)
  556. class Tile(object):
  557. def __init__(self):
  558. pass
  559. def __call__(self, input, multiples):
  560. return D.tile(input, multiples)
  561. def tile(input, multiples):
  562. """
  563. Constructs a tensor by tiling a given tensor.
  564. Parameters
  565. ----------
  566. input : tensor
  567. A Tensor. 1-D or higher.
  568. multiples : tensor
  569. Must be one of the following types: int32, int64. 1-D.
  570. Length must be the same as the number of dimensions in input
  571. Returns
  572. -------
  573. A Tensor. Has the same type as input.
  574. """
  575. return D.tile(input, multiples)
  576. class Cast(object):
  577. def __init__(self, dtype):
  578. pass
  579. def __call__(self, input):
  580. pass
  581. def cast(x, dtype):
  582. """
  583. Casts a tensor to a new type.
  584. Parameters
  585. ----------
  586. x : tensor
  587. A Tensor or SparseTensor or IndexedSlices of numeric type.
  588. It could be uint8, uint16, uint32, uint64, int8, int16, int32, int64, float16, float32, float64.
  589. dtype : dtpye
  590. The destination type. The list of supported dtypes is the same as x
  591. Returns
  592. -------
  593. A Tensor or SparseTensor or IndexedSlices with same shape as x and same type as dtype.
  594. """
  595. return D.cast(x, dtype=dtype)
  596. class Transpose(object):
  597. def __init__(self, perm, conjugate=False):
  598. self.perm = perm
  599. if conjugate:
  600. raise ("The conjugate Parameters not supported")
  601. def __call__(self, a):
  602. return D.transpose(a, self.perm)
  603. def transpose(a, perm=None, conjugate=False):
  604. """
  605. Transposes a.
  606. Parameters
  607. ----------
  608. a : tensor
  609. A Tensor.
  610. perm : int
  611. A permutation of the dimensions of a.
  612. conjugate : bool
  613. Setting it to True is mathematically equivalent to ms.math.conj(ms.transpose(input)).
  614. Returns
  615. -------
  616. A transposed Tensor.
  617. """
  618. conjugate = conjugate
  619. return D.transpose(a, perm=perm)
  620. def gather_nd(params, indices, batch_dims=0):
  621. """
  622. Gather slices from params into a Tensor with shape specified by indices.
  623. Parameters
  624. ----------
  625. params : tensor
  626. The tensor from which to gather values.
  627. indices : tensor
  628. Must be one of the following types: int32, int64. Index tensor.
  629. batch_dims : int
  630. An integer or a scalar 'Tensor'. The number of batch dimensions.
  631. Returns
  632. -------
  633. A Tensor. Has the same type as params.
  634. """
  635. pass
  636. def clip_by_value(t, clip_value_min, clip_value_max):
  637. """
  638. Clips tensor values to a specified min and max.
  639. Parameters
  640. ----------
  641. t : tensor
  642. A Tensor or IndexedSlices
  643. clip_value_min : tensor
  644. A 0-D (scalar) Tensor, or a Tensor with the same shape as t. The minimum value to clip by
  645. clip_value_max : tensor
  646. A 0-D (scalar) Tensor, or a Tensor with the same shape as t. The minimum value to clip by
  647. Returns
  648. -------
  649. A clipped Tensor or IndexedSlices.
  650. """
  651. pass
  652. def split(value, num_or_size_splits, axis=0, num=None):
  653. """
  654. Splits a tensor into sub tensors.
  655. Parameters
  656. ----------
  657. value : tensor
  658. The Tensor to split.
  659. num_or_size_splits : list
  660. Either an integer indicating the number of splits along split_dim or a 1-D integer Tensor or
  661. Python list containing the sizes of each output tensor along split_dim.
  662. axis : int
  663. The dimension along which to split. Must be in the range [-rank(value), rank(value)). Defaults to 0.
  664. num : int
  665. used to specify the number of outputs when it cannot be inferred from the shape of size_splits.
  666. Returns
  667. -------
  668. Tensor objects resulting from splitting value.
  669. """
  670. pass
  671. def floor(x):
  672. return D.math.floor(x)
  673. def gather(params, indices):
  674. return NotImplementedError
  675. def linspace(start, stop, num):
  676. return D.linspace(start, stop, num)
  677. def slice(inputs, starts, sizes):
  678. return D.slice(inputs, starts, sizes)
  679. def add_n(inputs):
  680. return NotImplementedError
  681. class OneHot(object):
  682. def __init__(self, axis=-1, depth=1, on_value=1.0, off_value=0.0, dtype='float32'):
  683. self.depth = depth
  684. self.dtype = dtype
  685. def __call__(self, indices):
  686. outputs = np.zeros(shape=(indices.shape[0], self.depth))
  687. for i in np.arange(indices.shape[0]):
  688. outputs[int(i)][int(indices[int(i)].get_value())] = 1
  689. outputs = D.constant(outputs, dtype=self.dtype)
  690. return outputs
  691. class L2Normalize(object):
  692. def __init__(self, axis=None, epsilon=1e-12):
  693. super(L2Normalize, self).__init__()
  694. pass
  695. def __call__(self, input, *args, **kwargs):
  696. pass
  697. class EmbeddingLookup(object):
  698. def __init__(self, max_norm=None):
  699. self.max_norm = max_norm
  700. def __call__(self, params, ids, *args, **kwargs):
  701. pass
  702. class NCELoss(object):
  703. def __init__(self, num_true=1, sampled_values=None, remove_accidental_hits=False):
  704. super(NCELoss, self).__init__()
  705. def __call__(self, weights, biases, labels, inputs, num_sampled, num_classes):
  706. pass
  707. class Not_equal(object):
  708. def __init__(self):
  709. pass
  710. def __call__(self, x, y):
  711. pass
  712. class Count_nonzero(object):
  713. def __init__(self, keepdims=None, dtype='int64'):
  714. pass
  715. def __call__(self, *args, **kwargs):
  716. pass
  717. class Resize:
  718. def __init__(self, scale, method, antialias=False, data_format='channels_last', ksize=None):
  719. if method not in ['nearest', 'linear', 'bilinear']:
  720. raise ('Current resize does not support this method.')
  721. if method == 'bilinear':
  722. method = 'linear'
  723. self.method = method
  724. self.antialias = antialias
  725. self.scale = scale
  726. if data_format != 'channel_last':
  727. raise Exception("UpSampling2d resize_images only support channel_last")
  728. def __call__(self, inputs):
  729. output_size = (int(inputs.shape[1] * self.scale[0]), int(inputs.shape[2] * self.scale[1]))
  730. outputs = D.vision.resize(inputs, sizes=output_size, mode=self.method, align_corners=self.antialias)
  731. return outputs
  732. def resize(inputs, output_size, method, antialias):
  733. if method not in ['nearest', 'linear', 'bilinear']:
  734. raise ('Current resize does not support this method.')
  735. if method == 'bilinear':
  736. method = 'linear'
  737. return D.vision.resize(inputs, sizes=output_size, mode=method, align_corners=antialias)
  738. class ZeroPadding1D(object):
  739. def __init__(self):
  740. pass
  741. def __call__(self, padding):
  742. raise NotImplementedError
  743. class ZeroPadding2D(object):
  744. def __init__(self):
  745. pass
  746. def __call__(self, padding):
  747. raise NotImplementedError
  748. class ZeroPadding3D(object):
  749. def __init__(self):
  750. pass
  751. def __call__(self, padding):
  752. raise NotImplementedError
  753. class Sign(object):
  754. def __init__(self):
  755. pass
  756. def __call__(self, x):
  757. return D.math.sign(x)
  758. def ceil(x):
  759. raise NotImplementedError
  760. def multiply(x, y):
  761. raise NotImplementedError
  762. def divide(x, y):
  763. raise NotImplementedError
  764. def identity(x):
  765. raise NotImplementedError
  766. class BatchToSpace(object):
  767. def __init__(self, block_size, crops):
  768. super(BatchToSpace, self).__init__()
  769. pass
  770. def __call__(self, input_x):
  771. raise NotImplementedError
  772. class DepthToSpace(object):
  773. def __init__(self, block_size, data_format='NHWC'):
  774. pass
  775. def __call__(self, input):
  776. raise NotImplementedError

TensorLayer3.0 是一款兼容多种深度学习框架为计算后端的深度学习库。计划兼容TensorFlow, Pytorch, MindSpore, Paddle.