You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

paddle_backend.py 24 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026
  1. #! /usr/bin/python
  2. # -*- coding: utf-8 -*-
  3. from __future__ import absolute_import, division, print_function
  4. import paddle as pd
  5. import paddle.nn as nn
  6. import numpy as np
  7. _dtypeDict = ["float16", "float32", "float64", "int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64"]
  8. # TODO NotImplemented
  9. DType = None
  10. float16 = "float16"
  11. float32 = "float32"
  12. float64 = "float64"
  13. int8 = "int8"
  14. int16 = "int16"
  15. int32 = "int32"
  16. int64 = "int64"
  17. uint8 = "uint8"
  18. uint16 = "uint16"
  19. uint32 = "uint32"
  20. uint64 = "uint64"
  21. def _getter(init_fn, **kwargs):
  22. """Return an named eager tensor."""
  23. raise NotImplementedError
  24. def set_context(**kwargs):
  25. raise Exception("Using Paddle backend,You don't need to set context")
  26. def get_tensor_shape(x):
  27. return pd.shape(x)
  28. # initializers
  29. def zeros(shape, dtype="float32"):
  30. """
  31. Creates a tensor with all elements set to zero.
  32. Parameters
  33. ----------
  34. shape : A list of integers
  35. a tuple of integers, or a 1-D Tensor of type int32.
  36. dtype : tensor
  37. The DType of an element in the resulting Tensor
  38. Returns
  39. -------
  40. A Tensor with all elements set to zero.
  41. """
  42. return pd.zeros(shape=shape, dtype=dtype)
  43. def ones(shape, dtype="float32"):
  44. """
  45. Creates a tensor with all elements set to ones.
  46. Parameters
  47. ----------
  48. shape : A list of integers
  49. a tuple of integers, or a 1-D Tensor of type int32.
  50. dtype : tensor
  51. The DType of an element in the resulting Tensor
  52. Returns
  53. -------
  54. A Tensor with all elements set to zero.
  55. """
  56. return pd.ones(shape=shape, dtype=dtype)
  57. def constant(value, shape, dtype="float32"):
  58. """
  59. Creates a constant tensor from a tensor-like object.
  60. Parameters
  61. ----------
  62. value : list
  63. A constant value (or list) of output type dtype.
  64. dtype : tensor
  65. The type of the elements of the resulting tensor.
  66. shape : tuple
  67. Optional dimensions of resulting tensor.
  68. Returns
  69. -------
  70. A Constant Tensor.
  71. """
  72. return nn.initializer.constant(value=value)
  73. def random_uniform(shape, minval=0, maxval=None, dtype="float32", seed=None):
  74. """
  75. Outputs random values from a uniform distribution.
  76. Parameters
  77. ----------
  78. shape : tuple
  79. A 1-D integer Tensor or Python array. The shape of the output tensor.
  80. minval : int
  81. The lower bound on the range of random values to generate (inclusive). Defaults to 0.
  82. maxval : int
  83. The upper bound on the range of random values to generate (exclusive). Defaults to 1 if dtype is floating point.
  84. dtype : tensor
  85. The type of the output: float16, float32, float64, int32, or int64.
  86. seed : int
  87. Used in combination with dragon.random.set_seed to create a reproducible sequence of tensors across multiple calls.
  88. Returns
  89. -------
  90. A tensor of the specified shape filled with random uniform values.
  91. """
  92. raise NotImplementedError
  93. def random_normal(shape, mean=0.0, stddev=1.0, dtype="float32", seed=None):
  94. """
  95. Outputs random values from a normal distribution.
  96. Parameters
  97. ----------
  98. shape : tuple
  99. A 1-D integer Tensor or Python array. The shape of the output tensor.
  100. mean : float
  101. The mean of the normal distribution
  102. stddev : float
  103. The standard deviation of the normal distribution.
  104. dtype : tensor
  105. The type of the output.
  106. seed : A Python integer
  107. Used to create a random seed for the distribution
  108. Returns
  109. -------
  110. A tensor of the specified shape filled with random normal values.
  111. """
  112. raise NotImplementedError
  113. def truncated_normal(shape, mean=0.0, stddev=1.0, dtype="float32", seed=None):
  114. """
  115. Outputs random values from a truncated normal distribution.
  116. Parameters
  117. ----------
  118. shape : tuple
  119. A 1-D integer Tensor or Python array. The shape of the output tensor.
  120. mean : float
  121. The mean of the normal distribution
  122. stddev : float
  123. The standard deviation of the normal distribution.
  124. dtype : tensor
  125. The type of the output.
  126. seed : A Python integer
  127. Used to create a random seed for the distribution
  128. Returns
  129. -------
  130. A tensor of the specified shape filled with random truncated normal values.
  131. """
  132. raise NotImplementedError
  133. def he_normal(shape, dtype, seed=None):
  134. """
  135. He normal initializer.
  136. Parameters
  137. ----------
  138. seed : A Python integer.
  139. Used to seed the random generator.
  140. shape : tuple
  141. A 1-D integer Tensor or Python array. The shape of the output tensor.
  142. dtype : tensor
  143. The type of the output.
  144. Returns
  145. -------
  146. A tensor of the specified shape filled with he normal values.
  147. """
  148. # shape = shape[::-1]
  149. raise NotImplementedError
  150. def Variable(initial_value, name, trainable=None):
  151. """
  152. Creates a new variable with value initial_value.
  153. Parameters
  154. ----------
  155. initial_value : tensor
  156. A Tensor, or Python object convertible to a Tensor
  157. name : str
  158. Optional name for the variable. Defaults to 'Variable' and gets uniquified automatically.
  159. Returns
  160. -------
  161. Variable
  162. """
  163. raise NotImplementedError
  164. class MatMul(object):
  165. def __init__(self):
  166. pass
  167. def __call__(self, a, b):
  168. return pd.matmul(x=a, y=b)
  169. def matmul(a, b):
  170. """
  171. Multiplies matrix a by matrix b, producing a * b.
  172. Parameters
  173. ----------
  174. a : tensor
  175. type float16, float32, float64, int32, complex64, complex128 and rank > 1.
  176. b : tensor
  177. with same type and rank as a.
  178. Returns
  179. -------
  180. A Tensor of the same type as a and b
  181. """
  182. raise NotImplementedError
  183. def add(value, bias):
  184. """
  185. Returns x + y element-wise.
  186. Parameters
  187. ----------
  188. value : tensor.
  189. Must be one of the following types: bfloat16, half, float32, float64,
  190. uint8, int8, int16, int32, int64, complex64, complex128, string.
  191. bias : tensor
  192. Must have the same type as a
  193. name : str
  194. A name for the operation
  195. Returns
  196. -------
  197. A Tensor. Has the same type as a.
  198. """
  199. raise NotImplementedError
  200. def dtypes(dt):
  201. """
  202. Data dtypes.
  203. Parameters
  204. ----------
  205. dt : string
  206. It could be 'uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
  207. 'int32', 'int64', 'float16', 'float32', 'float64', 'DType'.
  208. Returns
  209. -------
  210. Data dtypes
  211. """
  212. raise NotImplementedError
  213. class Maximum(object):
  214. def __init__(self):
  215. pass
  216. def __call__(self, x, y):
  217. raise NotImplementedError
  218. class Minimum(object):
  219. def __init__(self):
  220. pass
  221. def __call__(self, x, y):
  222. raise NotImplementedError
  223. def minimum(x, y):
  224. """
  225. Returns the min of x and y (i.e. x < y ? x : y) element-wise.
  226. Parameters
  227. ----------
  228. x : tensor.
  229. Must be one of the following types: bfloat16, half, float32, float64, int32, int64.
  230. y : A Tensor.
  231. Must have the same type as x.
  232. name : str
  233. A name for the operation (optional).
  234. Returns
  235. -------
  236. A Tensor. Has the same type as x
  237. """
  238. raise NotImplementedError
  239. class FlattenReshape(object):
  240. def __init__(self):
  241. pass
  242. def __call__(self, inputs):
  243. return pd.flatten(x=inputs, start_axis=1, stop_axis=-1)
  244. class Reshape(object):
  245. def __init__(self, shape):
  246. self.shape = shape
  247. def __call__(self, tensor):
  248. return pd.reshape(tensor, shape=self.shape)
  249. def reshape(tensor, shape):
  250. """
  251. Reshapes a tensor.
  252. Parameters
  253. ----------
  254. tensor : tensor
  255. A Tensor.
  256. shape : tensor
  257. Defines the shape of the output tensor.
  258. Returns
  259. -------
  260. A Tensor. Has the same type as tensor
  261. """
  262. return pd.reshape(tensor, shape)
  263. class Concat(object):
  264. def __init__(self, axis):
  265. super(Concat, self).__init__()
  266. self.axis = axis
  267. def __call__(self, values):
  268. return pd.concat(values, axis=self.axis)
  269. def concat(values, axis):
  270. """
  271. Concatenates tensors along one dimension.
  272. Parameters
  273. ----------
  274. values : list
  275. A list of Tensor objects or a single Tensor
  276. axis : int
  277. 0-D int32 Tensor. Dimension along which to concatenate
  278. Returns
  279. -------
  280. A Tensor resulting from concatenation of the input tensors.
  281. """
  282. return pd.concat(values, axis)
  283. def convert_to_tensor(value, dtype=float32):
  284. """
  285. Converts the given value to a Tensor.
  286. Parameters
  287. ----------
  288. value : object
  289. An object whose type has a registered Tensor conversion function.
  290. dtype : optional
  291. Optional element type for the returned tensor. If missing, the type is inferred from the type of value.
  292. Returns
  293. -------
  294. A Tensor based on value.
  295. """
  296. return pd.to_tensor(value, dtype=dtype)
  297. def convert_to_numpy(value):
  298. return value.numpy()
  299. def sqrt(x):
  300. """
  301. Computes square root of x element-wise.
  302. Parameters
  303. ----------
  304. x : tensor
  305. Must be one of the following types: bfloat16, half, float32, float64, complex64, complex128.
  306. Returns
  307. -------
  308. A Tensor. Has the same type as x.
  309. """
  310. return pd.sqrt(x)
  311. class ReduceSum(object):
  312. def __init__(self, axis):
  313. self.axis = axis
  314. def construct(self, input):
  315. return pd.sum(input, axis=self.axis)
  316. class ReduceMean(object):
  317. def __init__(self, axis):
  318. self.axis = axis
  319. def __call__(self, inputs):
  320. return pd.mean(inputs, axis=self.axis)
  321. def reduce_mean(input_tensor, axis=None):
  322. """
  323. Computes the mean of elements across dimensions of a tensor.
  324. Parameters
  325. ----------
  326. input_tensor : tensor
  327. The tensor to reduce. Should have numeric type.
  328. axis : int
  329. The dimensions to reduce. If None (the default), reduces all dimensions.
  330. Must be in the range [-rank(input_tensor), rank(input_tensor)).
  331. name : str
  332. A name for the operation (optional).
  333. Returns
  334. -------
  335. The reduced tensor.
  336. """
  337. return pd.mean(input_tensor, axis)
  338. class ReduceMax(object):
  339. def __init__(self, axis):
  340. self.axis = axis
  341. def __call__(self, inputs):
  342. return pd.max(inputs, axis=self.axis)
  343. def reduce_max(input_tensor, axis=None):
  344. """
  345. Computes the maximum of elements across dimensions of a tensor.
  346. Parameters
  347. ----------
  348. input_tensor : tensor
  349. The tensor to reduce. Should have real numeric type.
  350. axis : int
  351. The dimensions to reduce. If None (the default), reduces all dimensions.
  352. Must be in the range [-rank(input_tensor), rank(input_tensor)).
  353. name : str
  354. A name for the operation (optional).
  355. Returns
  356. -------
  357. The reduced tensor.
  358. """
  359. return pd.max(input_tensor, axis)
  360. def reduce_min(input_tensor, axis=None):
  361. """
  362. Computes the minimum of elements across dimensions of a tensor.
  363. Parameters
  364. ----------
  365. input_tensor : tensor
  366. The tensor to reduce. Should have real numeric type.
  367. axis : int
  368. The dimensions to reduce. If None (the default), reduces all dimensions.
  369. Must be in the range [-rank(input_tensor), rank(input_tensor)).
  370. name : str
  371. A name for the operation (optional).
  372. Returns
  373. -------
  374. The reduced tensor.
  375. """
  376. return pd.min(input_tensor, axis)
  377. class Pad(object):
  378. def __init__(self, paddings, mode="REFLECT", constant_values=0):
  379. if mode not in ['CONSTANT', 'REFLECT', 'SYMMETRIC']:
  380. raise Exception("Unsupported mode: {}".format(mode))
  381. if mode == 'SYMMETRIC':
  382. raise NotImplementedError
  383. self.paddings = paddings
  384. self.mode = mode.lower()
  385. self.constant_values = constant_values
  386. def __call__(self, x):
  387. if len(x.shape) == 3:
  388. data_format = 'NLC'
  389. self.paddings = self.correct_paddings(len(x.shape), self.paddings, data_format)
  390. elif len(x.shape) == 4:
  391. data_format = 'NHWC'
  392. self.paddings = self.correct_paddings(len(x.shape), self.paddings, data_format)
  393. elif len(x.shape) == 5:
  394. data_format = 'NDHWC'
  395. self.paddings = self.correct_paddings(len(x.shape), self.paddings, data_format)
  396. else:
  397. raise NotImplementedError('Please check the input shape.')
  398. return pd.nn.functional.pad(x, self.paddings, self.mode, value=self.constant_values, data_format=data_format)
  399. def correct_paddings(self, in_shape, paddings, data_format):
  400. if in_shape == 3 and data_format == 'NLC':
  401. correct_output = [paddings[1][0], paddings[1][1]]
  402. elif in_shape == 4 and data_format == 'NHWC':
  403. correct_output = [paddings[2][0], paddings[2][1],
  404. paddings[1][0], paddings[1][1]]
  405. elif in_shape == 5 and data_format == 'NDHWC':
  406. correct_output = [paddings[3][0], paddings[3][1],
  407. paddings[2][0], paddings[2][1],
  408. paddings[1][0], paddings[1][1]]
  409. else:
  410. raise NotImplementedError('Does not support channels first')
  411. return correct_output
  412. def pad(tensor, paddings, mode='CONSTANT', constant_values=0):
  413. """
  414. Pads a tensor.
  415. Parameters
  416. ----------
  417. tensor : tensor
  418. A Tensor.
  419. paddings : tuple
  420. A tuple of type int32.
  421. mode : str
  422. One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
  423. constant_values : int
  424. In "CONSTANT" mode, the scalar pad value to use. Must be same type as tensor.
  425. Returns
  426. -------
  427. A Tensor. Has the same type as tensor.
  428. """
  429. return Pad(paddings, mode, constant_values)(tensor)
  430. class Unstack(object):
  431. def __init__(self, axis, num=None):
  432. self.axis = axis
  433. self.num = num
  434. def __call__(self, values):
  435. return pd.unstack(values, self.axis, self.num)
  436. class Stack(object):
  437. def __init__(self, axis):
  438. self.axis = axis
  439. def __call__(self, values):
  440. return pd.stack(values, self.axis)
  441. def stack(values, axis=0):
  442. """
  443. Stacks a list of rank-R tensors into one rank-(R+1) tensor.
  444. Parameters
  445. ----------
  446. values : list or tuple
  447. A list of Tensor objects with the same shape and type.
  448. axis : int
  449. An int. The axis to stack along. Defaults to the first dimension.
  450. Negative values wrap around, so the valid range is [-(R+1), R+1).
  451. Returns
  452. -------
  453. A stacked Tensor with the same type as values.
  454. """
  455. return pd.stack(values, axis=axis)
  456. class Meshgrid(object):
  457. def __init__(self, indexing='xy'):
  458. super(Meshgrid, self).__init__()
  459. self.index = indexing
  460. def __call__(self, inputs):
  461. return pd.meshgrid(inputs)
  462. def meshgrid(*args, **kwargs):
  463. """
  464. Broadcasts parameters for evaluation on an N-D grid.
  465. Parameters
  466. ----------
  467. x : tensor
  468. Tensors with rank 1.
  469. y : tensor
  470. Tensors with rank 1.
  471. Returns
  472. -------
  473. A list of N Tensors with rank N.
  474. """
  475. return pd.meshgrid(*args, **kwargs)
  476. def range(start, limit=None, delta=1, dtype=None):
  477. """
  478. Creates a sequence of numbers.
  479. Parameters
  480. ----------
  481. start : tensor
  482. A 0-D Tensor (scalar). Acts as first entry in the range if limit is not None;
  483. otherwise, acts as range limit and first entry defaults to 0.
  484. limit : tensor
  485. A 0-D Tensor (scalar). Upper limit of sequence, exclusive. If None,
  486. defaults to the value of start while the first entry of the range defaults to 0.
  487. delta : tensor
  488. A 0-D Tensor (scalar). Number that increments start. Defaults to 1.
  489. dtype : type
  490. The type of the elements of the resulting tensor.
  491. Returns
  492. -------
  493. An 1-D Tensor of type dtype.
  494. """
  495. return pd.arange(start, step=delta)
  496. class ExpandDims(object):
  497. def __init__(self, axis):
  498. self.axis = axis
  499. def construct(self, input):
  500. input = convert_to_numpy(input)
  501. output = np.expand_dims(input, axis=self.axis)
  502. output = convert_to_tensor(output)
  503. return output
  504. def expand_dims(input, axis):
  505. """
  506. Inserts a dimension of 1 into a tensor's shape.
  507. Parameters
  508. ----------
  509. input : tensor
  510. A Tensor.
  511. axis : int
  512. 0-D (scalar). Specifies the dimension index at which to expand the shape of input.
  513. Must be in the range [-rank(input) - 1, rank(input)].
  514. Returns
  515. -------
  516. A Tensor with the same data as input, but its shape has an additional dimension of size 1 added.
  517. """
  518. input = convert_to_numpy(input)
  519. output = np.expand_dims(input, axis=axis)
  520. output = convert_to_tensor(output)
  521. return output
  522. class Tile(object):
  523. def __init__(self):
  524. pass
  525. def __call__(self, input, multiples):
  526. return pd.tile(input, multiples)
  527. def tile(input, multiples):
  528. """
  529. Constructs a tensor by tiling a given tensor.
  530. Parameters
  531. ----------
  532. input : tensor
  533. A Tensor. 1-D or higher.
  534. multiples : tensor
  535. Must be one of the following types: int32, int64. 1-D.
  536. Length must be the same as the number of dimensions in input
  537. Returns
  538. -------
  539. A Tensor. Has the same type as input.
  540. """
  541. return pd.tile(input, multiples)
  542. class Cast(object):
  543. def __init__(self, dtype):
  544. self.dtype = dtype
  545. def __call__(self, input):
  546. return pd.cast(input, self.dtype)
  547. def cast(x, dtype):
  548. """
  549. Casts a tensor to a new type.
  550. Parameters
  551. ----------
  552. x : tensor
  553. A Tensor or SparseTensor or IndexedSlices of numeric type.
  554. It could be uint8, uint16, uint32, uint64, int8, int16, int32, int64, float16, float32, float64.
  555. dtype : dtpye
  556. The destination type. The list of supported dtypes is the same as x
  557. Returns
  558. -------
  559. A Tensor or SparseTensor or IndexedSlices with same shape as x and same type as dtype.
  560. """
  561. return pd.cast(x, dtype)
  562. class Transpose(object):
  563. def __init__(self, perm, conjugate=False):
  564. self.perm = perm
  565. if conjugate:
  566. raise ("The conjugate Parameters not supported")
  567. def __call__(self, a):
  568. return pd.transpose(a, self.perm)
  569. def transpose(a, perm=None, conjugate=False):
  570. """
  571. Transposes a.
  572. Parameters
  573. ----------
  574. a : tensor
  575. A Tensor.
  576. perm : int
  577. A permutation of the dimensions of a.
  578. conjugate : bool
  579. Setting it to True is mathematically equivalent to ms.math.conj(ms.transpose(input)).
  580. Returns
  581. -------
  582. A transposed Tensor.
  583. """
  584. return pd.transpose(a, perm)
  585. def gather_nd(params, indices, batch_dims=0):
  586. """
  587. Gather slices from params into a Tensor with shape specified by indices.
  588. Parameters
  589. ----------
  590. params : tensor
  591. The tensor from which to gather values.
  592. indices : tensor
  593. Must be one of the following types: int32, int64. Index tensor.
  594. batch_dims : int
  595. An integer or a scalar 'Tensor'. The number of batch dimensions.
  596. Returns
  597. -------
  598. A Tensor. Has the same type as params.
  599. """
  600. return pd.gather_nd(params, indices)
  601. def clip_by_value(t, clip_value_min, clip_value_max):
  602. """
  603. Clips tensor values to a specified min and max.
  604. Parameters
  605. ----------
  606. t : tensor
  607. A Tensor or IndexedSlices
  608. clip_value_min : tensor
  609. A 0-D (scalar) Tensor, or a Tensor with the same shape as t. The minimum value to clip by
  610. clip_value_max : tensor
  611. A 0-D (scalar) Tensor, or a Tensor with the same shape as t. The minimum value to clip by
  612. Returns
  613. -------
  614. A clipped Tensor or IndexedSlices.
  615. """
  616. return pd.clip(t, clip_value_min, clip_value_max)
  617. def split(value, num_or_size_splits, axis=0, num=None):
  618. """
  619. Splits a tensor into sub tensors.
  620. Parameters
  621. ----------
  622. value : tensor
  623. The Tensor to split.
  624. num_or_size_splits : list or tuple
  625. Either an integer indicating the number of splits along split_dim or a 1-D integer Tensor or
  626. Python list containing the sizes of each output tensor along split_dim.
  627. axis : int
  628. The dimension along which to split. Must be in the range [-rank(value), rank(value)). Defaults to 0.
  629. num : int
  630. used to specify the number of outputs when it cannot be inferred from the shape of size_splits.
  631. Returns
  632. -------
  633. Tensor objects resulting from splitting value.
  634. """
  635. pd.split(value, num_or_size_splits, axis)
  636. class Floor(object):
  637. def __call__(self, x):
  638. return pd.floor(x)
  639. def floor(x):
  640. return pd.floor(x)
  641. def gather(params, indices):
  642. return pd.gather(params, indices)
  643. def linspace(start, stop, num):
  644. return pd.linspace(start, stop, num)
  645. def slice(inputs, starts, sizes):
  646. return pd.slice(inputs, starts=starts, ends=sizes)
  647. def add_n(inputs):
  648. return pd.add_n(inputs)
  649. class OneHot(object):
  650. def __init__(self, axis=-1, depth=1, on_value=1.0, off_value=0.0, dtype="float32"):
  651. self.depth = depth
  652. self.dtype = dtype
  653. def __call__(self, indices):
  654. output = pd.nn.functional.one_hot(indices, self.depth)
  655. return output
  656. class L2Normalize(object):
  657. def __init__(self, axis=None, epsilon=1e-12):
  658. super(L2Normalize, self).__init__()
  659. self.axis = axis
  660. self.epsilon = epsilon
  661. def __call__(self, input):
  662. return pd.nn.functional.normalize(x=input, p=2, axis=self.axis, epsilon=self.epsilon)
  663. class EmbeddingLookup(object):
  664. def __init__(self, max_norm=None):
  665. self.max_norm = max_norm
  666. def __call__(self, params, ids):
  667. pass
  668. class NCELoss(object):
  669. def __init__(self, num_true=1, sampled_values=None, remove_accidental_hits=False):
  670. super(NCELoss, self).__init__()
  671. def __call__(self, weights, biases, labels, inputs, num_sampled, num_classes):
  672. pass
  673. class NotEqual(object):
  674. def __init__(self):
  675. pass
  676. def __call__(self, x, y):
  677. pass
  678. class CountNonzero(object):
  679. def __init__(self, keepdims=None, dtype="int64"):
  680. pass
  681. def __call__(self, *args, **kwargs):
  682. pass
  683. class Resize:
  684. def __init__(self, scale, method, antialias=False, data_format='channels_last', ksize=None):
  685. if method not in ['nearest', 'linear', 'bilinear']:
  686. raise ('Current resize does not support this method.')
  687. if method == 'bilinear':
  688. method = 'linear'
  689. self.method = method
  690. self.antialias = antialias
  691. self.scale = scale
  692. if data_format != 'channel_last':
  693. raise Exception("UpSampling2d resize_images only support channel_last")
  694. def __call__(self, inputs):
  695. raise NotImplementedError
  696. def resize(inputs, output_size, method, antialias):
  697. raise NotImplementedError
  698. class ZeroPadding1D(object):
  699. def __init__(self):
  700. pass
  701. def __call__(self, padding):
  702. raise NotImplementedError
  703. class ZeroPadding2D(object):
  704. def __init__(self):
  705. pass
  706. def __call__(self, padding):
  707. raise NotImplementedError
  708. class ZeroPadding3D(object):
  709. def __init__(self):
  710. pass
  711. def __call__(self, padding):
  712. raise NotImplementedError
  713. class Sign(object):
  714. def __init__(self):
  715. pass
  716. def __call__(self, x):
  717. raise NotImplementedError
  718. class Ceil(object):
  719. def __call__(self, *args, **kwargs):
  720. raise NotImplementedError
  721. def ceil(x):
  722. raise NotImplementedError
  723. def multiply(x, y):
  724. raise NotImplementedError
  725. def divide(x, y):
  726. raise NotImplementedError
  727. def identity(x):
  728. raise NotImplementedError
  729. class BatchToSpace(object):
  730. def __init__(self, block_size, crops):
  731. super(BatchToSpace, self).__init__()
  732. pass
  733. def __call__(self, input_x):
  734. raise NotImplementedError
  735. class DepthToSpace(object):
  736. def __init__(self, block_size, data_format='NHWC'):
  737. pass
  738. def __call__(self, input):
  739. raise NotImplementedError

TensorLayer3.0 是一款兼容多种深度学习框架为计算后端的深度学习库。计划兼容TensorFlow, Pytorch, MindSpore, Paddle.