You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

mindspore_backend.py 32 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254
  1. #! /usr/bin/python
  2. # -*- coding: utf-8 -*-
  3. from __future__ import absolute_import, division, print_function
  4. from .mindspore_nn import nchw_to_nhwc, nhwc_to_nchw
  5. from mindspore._c_expression.typing import Type
  6. from mindspore.common import dtype as mstype
  7. from mindspore.common.parameter import Parameter
  8. from mindspore.common.initializer import (
  9. initializer, Constant, Normal, TruncatedNormal, Initializer, _assignment, _calculate_in_and_out, One, Zero
  10. )
  11. from mindspore.common.tensor import Tensor
  12. from mindspore._c_expression import Tensor as Tensor_
  13. from mindspore.ops import operations as P
  14. from mindspore.ops import functional as F
  15. from mindspore.ops import composite as C
  16. import mindspore.context as context
  17. from mindspore.nn import Cell
  18. import numpy as np
  19. from scipy.stats import truncnorm
  20. import random
  21. _dtypeDict = {
  22. 'DType': Type,
  23. 'float16': mstype.float16,
  24. 'float32': mstype.float32,
  25. 'float64': mstype.float64,
  26. 'int8': mstype.int8,
  27. 'int16': mstype.int16,
  28. 'int32': mstype.int32,
  29. 'int64': mstype.int64,
  30. 'uint8': mstype.uint8,
  31. 'uint16': mstype.uint16,
  32. 'uint32': mstype.uint32,
  33. 'uint64': mstype.uint64
  34. }
  35. DType = Type
  36. float16 = mstype.float16
  37. float32 = mstype.float32
  38. float64 = mstype.float64
  39. int8 = mstype.int8
  40. int16 = mstype.int16
  41. int32 = mstype.int32
  42. int64 = mstype.int64
  43. uint8 = mstype.uint8
  44. uint16 = mstype.uint16
  45. uint32 = mstype.uint32
  46. uint64 = mstype.uint64
  47. # isinstance input output
  48. # TensorLike = Tensor_
  49. def set_context(**kwargs):
  50. return context.set_context(**kwargs)
  51. def get_tensor_shape(x):
  52. return list(P.Shape()(x))
  53. # initializers
  54. def zeros(shape, dtype=mstype.float32):
  55. """
  56. Creates a tensor with all elements set to zero.
  57. Parameters
  58. ----------
  59. shape : A list of integers
  60. a tuple of integers, or a 1-D Tensor of type int32.
  61. dtype : tensor
  62. The DType of an element in the resulting Tensor
  63. Returns
  64. -------
  65. A Tensor with all elements set to zero.
  66. """
  67. # shape = shape[::-1]
  68. arr = np.ndarray(shape)
  69. init_obj = Zero()
  70. init_obj(arr)
  71. return Tensor(arr, dtype=dtype)
  72. def ones(shape, dtype=mstype.float32):
  73. """
  74. Creates a tensor with all elements set to ones.
  75. Parameters
  76. ----------
  77. shape : A list of integers
  78. a tuple of integers, or a 1-D Tensor of type int32.
  79. dtype : tensor
  80. The DType of an element in the resulting Tensor
  81. Returns
  82. -------
  83. A Tensor with all elements set to zero.
  84. """
  85. # shape = shape[::-1]
  86. arr = np.ndarray(shape)
  87. init_obj = One()
  88. init_obj(arr)
  89. return Tensor(arr, dtype=dtype)
  90. def constant(value, dtype=mstype.float32, shape=None):
  91. """
  92. Creates a constant tensor from a tensor-like object.
  93. Parameters
  94. ----------
  95. value : list
  96. A constant value (or list) of output type dtype.
  97. dtype : tensor
  98. The type of the elements of the resulting tensor.
  99. shape : tuple
  100. Optional dimensions of resulting tensor.
  101. Returns
  102. -------
  103. A Constant Tensor.
  104. """
  105. # shape = shape[::-1]
  106. arr = np.ndarray(shape)
  107. Constant(value)(arr=arr)
  108. return Tensor(arr, dtype=dtype)
  109. class Uniform(Initializer):
  110. """
  111. Initialize a uniform array, and obtain values U(-scale, scale) from the uniform distribution
  112. to fill the input tensor.
  113. Args:
  114. minval : int
  115. The lower bound on the range of random values to generate (inclusive). Defaults to 0.
  116. maxval : int
  117. The upper bound on the range of random values to generate (exclusive). Defaults to 1 if dtype is floating point.
  118. seed : int
  119. Used in combination with tf.random.set_seed to create a reproducible sequence of tensors across multiple calls.
  120. Returns:
  121. Array, uniform array.
  122. """
  123. def __init__(self, minval=0, maxval=None, seed=None):
  124. super(Uniform, self).__init__(minval=minval, maxval=maxval, seed=seed)
  125. self.minval = minval
  126. self.maxval = maxval
  127. self.seed = seed
  128. def _initialize(self, arr):
  129. random.seed(self.seed)
  130. tmp = np.random.uniform(self.minval, self.maxval, arr.shape)
  131. _assignment(arr, tmp)
  132. def random_uniform(shape, minval=0, maxval=None, dtype=mstype.float32, seed=None):
  133. """
  134. Outputs random values from a uniform distribution.
  135. Parameters
  136. ----------
  137. shape : tuple
  138. A 1-D integer Tensor or Python array. The shape of the output tensor.
  139. minval : int
  140. The lower bound on the range of random values to generate (inclusive). Defaults to 0.
  141. maxval : int
  142. The upper bound on the range of random values to generate (exclusive). Defaults to 1 if dtype is floating point.
  143. dtype : tensor
  144. The type of the output: float16, float32, float64, int32, or int64.
  145. seed : int
  146. Used in combination with tf.random.set_seed to create a reproducible sequence of tensors across multiple calls.
  147. Returns
  148. -------
  149. A tensor of the specified shape filled with random uniform values.
  150. """
  151. # shape = shape[::-1]
  152. arr = np.ndarray(shape)
  153. init_obj = Uniform(minval=minval, maxval=maxval, seed=seed)
  154. init_obj(arr)
  155. return Tensor(arr, dtype=dtype)
  156. class Normal(Initializer):
  157. """
  158. Initialize a normal array, and obtain values N(0, sigma) from the uniform distribution
  159. to fill the input tensor.
  160. Parameters
  161. ----------
  162. mean : float
  163. The mean of the normal distribution
  164. stddev : float
  165. The standard deviation of the normal distribution.
  166. seed : A Python integer
  167. Used to create a random seed for the distribution
  168. Returns:
  169. Array, normal array.
  170. """
  171. def __init__(self, mean=0.0, stddev=0.01, seed=None):
  172. super(Normal, self).__init__(mean=mean, stddev=stddev)
  173. self.mean = mean
  174. self.stddev = stddev
  175. self.seed = seed
  176. def _initialize(self, arr):
  177. random.seed(self.seed)
  178. tmp = np.random.normal(self.mean, self.stddev, arr.shape)
  179. _assignment(arr, tmp)
  180. class RandomNormal(Cell):
  181. def __init__(self, mean=0.0, stddev=0.01, seed=None):
  182. super(RandomNormal, self).__init__()
  183. self.normal = Normal(mean=mean, stddev=stddev, seed=seed)
  184. def construct(self, shape):
  185. arr = np.ndarray(shape)
  186. outputs = self.normal(arr)
  187. return outputs
  188. def random_normal(shape, mean=0.0, stddev=1.0, dtype=mstype.float32, seed=None):
  189. """
  190. Outputs random values from a normal distribution.
  191. Parameters
  192. ----------
  193. shape : tuple
  194. A 1-D integer Tensor or Python array. The shape of the output tensor.
  195. mean : float
  196. The mean of the normal distribution
  197. stddev : float
  198. The standard deviation of the normal distribution.
  199. dtype : tensor
  200. The type of the output.
  201. seed : A Python integer
  202. Used to create a random seed for the distribution
  203. Returns
  204. -------
  205. A tensor of the specified shape filled with random normal values.
  206. """
  207. # shape = shape[::-1]
  208. arr = np.ndarray(shape)
  209. init_obj = Normal(mean=mean, stddev=stddev, seed=seed)
  210. init_obj(arr)
  211. return Tensor(arr, dtype=dtype)
  212. class TruncatedNormal(Initializer):
  213. """
  214. Initialize a truncated normal distribution which is a bounded normal distribution within N(low, high).
  215. Args:
  216. sigma (float): The sigma of the array. Default: 0.01.
  217. Returns:
  218. Array, truncated normal array.
  219. """
  220. def __init__(self, mean=0.0, stddev=0.01, seed=None):
  221. super(TruncatedNormal, self).__init__(mean=mean, stddev=stddev, seed=seed)
  222. self.mean = mean
  223. self.stddev = stddev
  224. self.seed = seed
  225. def _initialize(self, arr):
  226. tmp = truncnorm.rvs(-2, 2, loc=self.mean, scale=self.stddev, size=arr.shape, random_state=None)
  227. _assignment(arr, tmp)
  228. def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=mstype.float32, seed=None):
  229. """
  230. Outputs random values from a truncated normal distribution.
  231. Parameters
  232. ----------
  233. shape : tuple
  234. A 1-D integer Tensor or Python array. The shape of the output tensor.
  235. mean : float
  236. The mean of the normal distribution
  237. stddev : float
  238. The standard deviation of the normal distribution.
  239. dtype : tensor
  240. The type of the output.
  241. seed : A Python integer
  242. Used to create a random seed for the distribution
  243. Returns
  244. -------
  245. A tensor of the specified shape filled with random truncated normal values.
  246. """
  247. # shape = shape[::-1]
  248. arr = np.ndarray(shape)
  249. init_obj = TruncatedNormal(mean=mean, stddev=stddev, seed=seed)
  250. init_obj(arr)
  251. return Tensor(arr, dtype=dtype)
  252. class HeNormal(Initializer):
  253. r"""
  254. he_normal: It draws samples from a truncated normal distribution centered on 0 with
  255. stddev = sqrt(2 / fan_in) where fan_in is the number of input units in the weight tensor.
  256. Args:
  257. arr (Array): The array to be assigned.
  258. Returns:
  259. Array, assigned array.
  260. """
  261. def __init__(self, seed=None):
  262. super(HeNormal, self).__init__(seed=seed)
  263. self.seed = seed
  264. def _initialize(self, arr):
  265. n_in, _ = _calculate_in_and_out(arr)
  266. boundary = np.sqrt(2.0 / n_in)
  267. random.seed(self.seed)
  268. data = np.random.normal(-boundary, boundary, arr.shape)
  269. _assignment(arr, data)
  270. def he_normal(shape, dtype, seed=None):
  271. """
  272. He normal initializer.
  273. Parameters
  274. ----------
  275. seed : A Python integer.
  276. Used to seed the random generator.
  277. shape : tuple
  278. A 1-D integer Tensor or Python array. The shape of the output tensor.
  279. dtype : tensor
  280. The type of the output.
  281. Returns
  282. -------
  283. A tensor of the specified shape filled with he normal values.
  284. """
  285. # shape = shape[::-1]
  286. arr = np.ndarray(shape)
  287. init_obj = HeNormal(seed)
  288. init_obj(arr)
  289. return Tensor(arr, dtype=dtype)
  290. def Variable(initial_value, name, trainable=True):
  291. """
  292. Creates a new variable with value initial_value.
  293. Parameters
  294. ----------
  295. initial_value : tensor
  296. A Tensor, or Python object convertible to a Tensor
  297. name : str
  298. Optional name for the variable. Defaults to 'Variable' and gets uniquified automatically.
  299. Returns
  300. -------
  301. Variable
  302. """
  303. var = Parameter(initial_value, name=name, requires_grad=trainable)
  304. return var
  305. class MatMul(Cell):
  306. def __init__(self):
  307. super(MatMul, self).__init__()
  308. self.matmul = P.MatMul()
  309. def construct(self, a, b):
  310. return self.matmul(a, b)
  311. def matmul(a, b):
  312. """
  313. Multiplies matrix a by matrix b, producing a * b.
  314. Parameters
  315. ----------
  316. a : tensor
  317. type float16, float32, float64, int32, complex64, complex128 and rank > 1.
  318. b : tensor
  319. with same type and rank as a.
  320. Returns
  321. -------
  322. A Tensor of the same type as a and b
  323. """
  324. matmul_obj = P.MatMul()
  325. outputs = matmul_obj(a, b)
  326. return outputs
  327. def add(value, bias):
  328. """
  329. Returns x + y element-wise.
  330. Parameters
  331. ----------
  332. value : tensor.
  333. Must be one of the following types: bfloat16, half, float32, float64,
  334. uint8, int8, int16, int32, int64, complex64, complex128, string.
  335. bias : tensor
  336. Must have the same type as a
  337. name : str
  338. A name for the operation
  339. Returns
  340. -------
  341. A Tensor. Has the same type as a.
  342. """
  343. add_obj = P.TensorAdd()
  344. outputs = add_obj(value, bias)
  345. return outputs
  346. def dtypes(dt):
  347. """
  348. Data dtypes.
  349. Parameters
  350. ----------
  351. dt : string
  352. It could be 'uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
  353. 'int32', 'int64', 'float16', 'float32', 'float64', 'DType'.
  354. Returns
  355. -------
  356. Data dtypes
  357. """
  358. if dt not in _dtypeDict.keys():
  359. raise Exception("Unsupported dtype: {}".format(dt))
  360. return _dtypeDict[dt]
  361. class Maximum(Cell):
  362. def __init__(self):
  363. super(Maximum, self).__init__()
  364. self.maximum = P.Maximum()
  365. def construct(self, x, y):
  366. return self.maximum(x, y)
  367. class Minimum(Cell):
  368. def __init__(self):
  369. super(Minimum, self).__init__()
  370. self.minimum = P.Minimum()
  371. def construct(self, x, y):
  372. return self.minimum(x, y)
  373. def minimum(x, y):
  374. """
  375. Returns the min of x and y (i.e. x < y ? x : y) element-wise.
  376. Parameters
  377. ----------
  378. x : tensor.
  379. Must be one of the following types: bfloat16, half, float32, float64, int32, int64.
  380. y : A Tensor.
  381. Must have the same type as x.
  382. name : str
  383. A name for the operation (optional).
  384. Returns
  385. -------
  386. A Tensor. Has the same type as x
  387. """
  388. minimum_obj = P.Minimum()
  389. outputs = minimum_obj(x, y)
  390. return outputs
  391. class FlattenReshape(Cell):
  392. def __init__(self):
  393. super(FlattenReshape, self).__init__()
  394. self.shape = P.Shape()
  395. self.reshape = P.Reshape()
  396. def construct(self, inputs):
  397. dim = 1
  398. for d in self.shape(inputs)[1:]:
  399. dim *= d
  400. return self.reshape(inputs, (-1, dim))
  401. class Reshape(Cell):
  402. def __init__(self, shape):
  403. super(Reshape, self).__init__()
  404. self.reshape = P.Reshape()
  405. self.shape = tuple(shape)
  406. def construct(self, tensor):
  407. return self.reshape(tensor, self.shape)
  408. def reshape(tensor, shape):
  409. """
  410. Reshapes a tensor.
  411. Parameters
  412. ----------
  413. tensor : tensor
  414. A Tensor.
  415. shape : tensor
  416. Defines the shape of the output tensor.
  417. Returns
  418. -------
  419. A Tensor. Has the same type as tensor
  420. """
  421. reshape_obj = P.Reshape()
  422. outputs = reshape_obj(tensor, tuple(shape))
  423. return outputs
  424. class Concat(Cell):
  425. def __init__(self, axis):
  426. super(Concat, self).__init__()
  427. self.concat = P.Concat(axis)
  428. def construct(self, values):
  429. return self.concat(values)
  430. def concat(values, axis):
  431. """
  432. Concatenates tensors along one dimension.
  433. Parameters
  434. ----------
  435. values : list
  436. A list of Tensor objects or a single Tensor
  437. axis : int
  438. 0-D int32 Tensor. Dimension along which to concatenate
  439. Returns
  440. -------
  441. A Tensor resulting from concatenation of the input tensors.
  442. """
  443. # TODO testing axis
  444. concat_obj = P.Concat(axis)
  445. outputs = concat_obj(values)
  446. return outputs
  447. def convert_to_tensor(value, dtype=None):
  448. """
  449. Converts the given value to a Tensor.
  450. Parameters
  451. ----------
  452. value : object
  453. An object whose type has a registered Tensor conversion function.
  454. dtype : optional
  455. Optional element type for the returned tensor. If missing, the type is inferred from the type of value.
  456. Returns
  457. -------
  458. A Tensor based on value.
  459. """
  460. #todo testing value
  461. return Tensor(value, dtype=dtype)
  462. def sqrt(x):
  463. """
  464. Computes square root of x element-wise.
  465. Parameters
  466. ----------
  467. x : tensor
  468. Must be one of the following types: bfloat16, half, float32, float64, complex64, complex128.
  469. Returns
  470. -------
  471. A Tensor. Has the same type as x.
  472. """
  473. sqrt_obj = P.Sqrt()
  474. outputs = sqrt_obj(x)
  475. return outputs
  476. class ReduceSum(Cell):
  477. def __init__(self, axis):
  478. super(ReduceSum, self).__init__()
  479. self.axis = axis
  480. self.reduce_sum = P.ReduceSum(keep_dims=True)
  481. def construct(self, input):
  482. return self.reduce_sum(input, self.axis)
  483. class ReduceMean(Cell):
  484. def __init__(self, axis):
  485. super(ReduceMean, self).__init__()
  486. self.axis = axis
  487. self.reducemean = P.ReduceMean(keep_dims=False)
  488. def construct(self, inputs):
  489. output = self.reducemean(inputs, self.axis)
  490. return output
  491. def reduce_mean(input_tensor, axis=None):
  492. """
  493. Computes the mean of elements across dimensions of a tensor.
  494. Parameters
  495. ----------
  496. input_tensor : tensor
  497. The tensor to reduce. Should have numeric type.
  498. axis : int
  499. The dimensions to reduce. If None (the default), reduces all dimensions.
  500. Must be in the range [-rank(input_tensor), rank(input_tensor)).
  501. name : str
  502. A name for the operation (optional).
  503. Returns
  504. -------
  505. The reduced tensor.
  506. """
  507. Rmean_obj = P.ReduceMean(keep_dims=False)
  508. outputs = Rmean_obj(input_tensor, axis)
  509. return outputs
  510. class ReduceMax(Cell):
  511. def __init__(self, axis):
  512. super(ReduceMax, self).__init__()
  513. self.axis = axis
  514. self.reducemax = P.ReduceMax(keep_dims=False)
  515. def construct(self, inputs):
  516. output = self.reducemax(inputs, self.axis)
  517. return output
  518. def reduce_max(input_tensor, axis=None):
  519. """
  520. Computes the maximum of elements across dimensions of a tensor.
  521. Parameters
  522. ----------
  523. input_tensor : tensor
  524. The tensor to reduce. Should have real numeric type.
  525. axis : int
  526. The dimensions to reduce. If None (the default), reduces all dimensions.
  527. Must be in the range [-rank(input_tensor), rank(input_tensor)).
  528. name : str
  529. A name for the operation (optional).
  530. Returns
  531. -------
  532. The reduced tensor.
  533. """
  534. Rmax_obj = P.ReduceMax(keep_dims=False)
  535. outputs = Rmax_obj(input_tensor, axis)
  536. return outputs
  537. def reduce_min(input_tensor, axis=None):
  538. """
  539. Computes the minimum of elements across dimensions of a tensor.
  540. Parameters
  541. ----------
  542. input_tensor : tensor
  543. The tensor to reduce. Should have real numeric type.
  544. axis : int
  545. The dimensions to reduce. If None (the default), reduces all dimensions.
  546. Must be in the range [-rank(input_tensor), rank(input_tensor)).
  547. name : str
  548. A name for the operation (optional).
  549. Returns
  550. -------
  551. The reduced tensor.
  552. """
  553. Rmin_obj = P.ReduceMin(keep_dims=False)
  554. outputs = Rmin_obj(input_tensor, axis)
  555. return outputs
  556. class Pad(Cell):
  557. def __init__(self, paddings, mode="REFLECT"):
  558. super(Pad, self).__init__()
  559. if mode not in ["REFLECT", "SYMMETRIC"]:
  560. raise Exception("Unsupported mode: {}".format(mode))
  561. self.pad = P.MirrorPad(mode=mode)
  562. self.paddings = Tensor(paddings)
  563. def construct(self, x):
  564. return self.pad(x, self.paddings)
  565. def pad(tensor, paddings, mode='CONSTANT', constant_values=0):
  566. """
  567. Pads a tensor.
  568. Parameters
  569. ----------
  570. tensor : tensor
  571. A Tensor.
  572. paddings : tuple
  573. A tuple of type int32.
  574. mode : str
  575. One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
  576. constant_values : int
  577. In "CONSTANT" mode, the scalar pad value to use. Must be same type as tensor.
  578. Returns
  579. -------
  580. A Tensor. Has the same type as tensor.
  581. """
  582. raise NotImplementedError
  583. class Unstack(Cell):
  584. def __init__(self, axis, num=None):
  585. super(Unstack, self).__init__()
  586. if num is not None:
  587. raise ("The num Parameters do not need to be set.")
  588. self.unstack = P.Unpack(axis=axis)
  589. def construct(self, values):
  590. return self.unstack(values)
  591. class Stack(Cell):
  592. def __init__(self, axis=0):
  593. super(Stack, self).__init__()
  594. self.stack = P.Pack(axis=axis)
  595. def construct(self, values):
  596. return self.stack(values)
  597. def stack(values, axis=0):
  598. """
  599. Stacks a list of rank-R tensors into one rank-(R+1) tensor.
  600. Parameters
  601. ----------
  602. values : list
  603. A list of Tensor objects with the same shape and type.
  604. axis : int
  605. An int. The axis to stack along. Defaults to the first dimension.
  606. Negative values wrap around, so the valid range is [-(R+1), R+1).
  607. Returns
  608. -------
  609. A stacked Tensor with the same type as values.
  610. """
  611. _stack = P.Pack(axis=axis)
  612. return _stack(values)
  613. class Meshgrid(Cell):
  614. def __init__(self, indexing='xy'):
  615. super(Meshgrid, self).__init__()
  616. self._meshgrid = P.Meshgrid(indexing=indexing)
  617. def construct(self, *args):
  618. inputs = tuple(*args)
  619. return self._meshgrid(inputs)
  620. def meshgrid(*args, **kwargs):
  621. """
  622. Broadcasts parameters for evaluation on an N-D grid.
  623. Parameters
  624. ----------
  625. x : tensor
  626. Tensors with rank 1.
  627. y : tensor
  628. Tensors with rank 1.
  629. Returns
  630. -------
  631. A list of N Tensors with rank N.
  632. """
  633. _meshgrid = P.Meshgrid(**kwargs)
  634. return _meshgrid(*args)
  635. def range(start, limit=None, delta=1, dtype=None):
  636. """
  637. Creates a sequence of numbers.
  638. Parameters
  639. ----------
  640. start : tensor
  641. A 0-D Tensor (scalar). Acts as first entry in the range if limit is not None;
  642. otherwise, acts as range limit and first entry defaults to 0.
  643. limit : tensor
  644. A 0-D Tensor (scalar). Upper limit of sequence, exclusive. If None,
  645. defaults to the value of start while the first entry of the range defaults to 0.
  646. delta : tensor
  647. A 0-D Tensor (scalar). Number that increments start. Defaults to 1.
  648. dtype : type
  649. The type of the elements of the resulting tensor.
  650. Returns
  651. -------
  652. An 1-D Tensor of type dtype.
  653. """
  654. pass
  655. class ExpandDims(Cell):
  656. def __init__(self, axis):
  657. super(ExpandDims, self).__init__()
  658. self.axis = axis
  659. self.expand_dims = P.ExpandDims()
  660. def construct(self, input):
  661. output = self.expand_dims(input, self.axis)
  662. return output
  663. def expand_dims(input, axis):
  664. """
  665. Inserts a dimension of 1 into a tensor's shape.
  666. Parameters
  667. ----------
  668. input : tensor
  669. A Tensor.
  670. axis : int
  671. 0-D (scalar). Specifies the dimension index at which to expand the shape of input.
  672. Must be in the range [-rank(input) - 1, rank(input)].
  673. Returns
  674. -------
  675. A Tensor with the same data as input, but its shape has an additional dimension of size 1 added.
  676. """
  677. expand_obj = P.ExpandDims()
  678. outputs = expand_obj(input, axis)
  679. return outputs
  680. class Tile(Cell):
  681. def __init__(self):
  682. super(Tile, self).__init__()
  683. self.tile = P.Tile()
  684. def construct(self, input, multiples):
  685. return self.tile(input, tuple(multiples))
  686. def tile(input, multiples):
  687. """
  688. Constructs a tensor by tiling a given tensor.
  689. Parameters
  690. ----------
  691. input : tensor
  692. A Tensor. 1-D or higher.
  693. multiples : tensor
  694. Must be one of the following types: int32, int64. 1-D.
  695. Length must be the same as the number of dimensions in input
  696. Returns
  697. -------
  698. A Tensor. Has the same type as input.
  699. """
  700. tile_obj = P.Tile()
  701. outputs = tile_obj(input, multiples)
  702. return outputs
  703. class Cast(Cell):
  704. def __init__(self, dtype):
  705. super(Cast, self).__init__()
  706. self.dtype = dtype
  707. self.cast = P.Cast()
  708. def construct(self, input):
  709. return self.cast(input, dtype=self.dtype)
  710. def cast(x, dtype):
  711. """
  712. Casts a tensor to a new type.
  713. Parameters
  714. ----------
  715. x : tensor
  716. A Tensor or SparseTensor or IndexedSlices of numeric type.
  717. It could be uint8, uint16, uint32, uint64, int8, int16, int32, int64, float16, float32, float64.
  718. dtype : dtpye
  719. The destination type. The list of supported dtypes is the same as x
  720. Returns
  721. -------
  722. A Tensor or SparseTensor or IndexedSlices with same shape as x and same type as dtype.
  723. """
  724. cast_obj = P.Cast()
  725. outputs = cast_obj(x, dtype)
  726. return outputs
  727. class Transpose(Cell):
  728. def __init__(self, perm, conjugate=False):
  729. super(Transpose, self).__init__()
  730. self.perm = tuple(perm)
  731. self.conjugate = conjugate
  732. self.transpose = P.Transpose()
  733. if self.conjugate:
  734. raise NotImplementedError("conjugate not implemented")
  735. def construct(self, a):
  736. return self.transpose(a, self.perm)
  737. def transpose(a, perm=None, conjugate=False):
  738. """
  739. Transposes a.
  740. Parameters
  741. ----------
  742. a : tensor
  743. A Tensor.
  744. perm : int
  745. A permutation of the dimensions of a.
  746. conjugate : bool
  747. Setting it to True is mathematically equivalent to ms.math.conj(ms.transpose(input)).
  748. Returns
  749. -------
  750. A transposed Tensor.
  751. """
  752. # TODO conjugate
  753. trans_obj = P.Transpose()
  754. outputs = trans_obj(a, perm)
  755. print(outputs)
  756. def gather_nd(params, indices, batch_dims=0):
  757. """
  758. Gather slices from params into a Tensor with shape specified by indices.
  759. Parameters
  760. ----------
  761. params : tensor
  762. The tensor from which to gather values.
  763. indices : tensor
  764. Must be one of the following types: int32, int64. Index tensor.
  765. batch_dims : int
  766. An integer or a scalar 'Tensor'. The number of batch dimensions.
  767. Returns
  768. -------
  769. A Tensor. Has the same type as params.
  770. """
  771. pass
  772. def clip_by_value(t, clip_value_min, clip_value_max):
  773. """
  774. Clips tensor values to a specified min and max.
  775. Parameters
  776. ----------
  777. t : tensor
  778. A Tensor or IndexedSlices
  779. clip_value_min : tensor
  780. A 0-D (scalar) Tensor, or a Tensor with the same shape as t. The minimum value to clip by
  781. clip_value_max : tensor
  782. A 0-D (scalar) Tensor, or a Tensor with the same shape as t. The minimum value to clip by
  783. Returns
  784. -------
  785. A clipped Tensor or IndexedSlices.
  786. """
  787. min_value = Tensor(clip_value_min, mstype.float32)
  788. max_value = Tensor(clip_value_max, mstype.float32)
  789. output = C.clip_by_value(t, min_value, max_value)
  790. return output
  791. def split(value, num_or_size_splits, axis=0, num=None):
  792. """
  793. Splits a tensor into sub tensors.
  794. Parameters
  795. ----------
  796. value : tensor
  797. The Tensor to split.
  798. num_or_size_splits : list
  799. Either an integer indicating the number of splits along split_dim or a 1-D integer Tensor or
  800. Python list containing the sizes of each output tensor along split_dim.
  801. axis : int
  802. The dimension along which to split. Must be in the range [-rank(value), rank(value)). Defaults to 0.
  803. num : int
  804. used to specify the number of outputs when it cannot be inferred from the shape of size_splits.
  805. Returns
  806. -------
  807. Tensor objects resulting from splitting value.
  808. """
  809. pass
  810. def floor(x):
  811. return NotImplementedError
  812. def gather(params, indices):
  813. return NotImplementedError
  814. def linspace(start, stop, num):
  815. return NotImplementedError
  816. def slice(inputs, starts, sizes):
  817. return NotImplementedError
  818. def add_n(inputs):
  819. return NotImplementedError
  820. class OneHot(Cell):
  821. def __init__(self, axis=-1, depth=1, on_value=1.0, off_value=0.0, dtype=mstype.float32):
  822. super(OneHot, self).__init__()
  823. self.onehot = P.OneHot(axis)
  824. self.depth = depth
  825. self.dtype = dtype
  826. self.on_value = F.cast(on_value, self.dtype)
  827. self.off_value = F.cast(off_value, self.dtype)
  828. def construct(self, indices):
  829. return self.onehot(indices, self.depth, self.on_value, self.off_value)
  830. class L2Normalize(Cell):
  831. def __init__(self, axis=None, epsilon=1e-12):
  832. super(L2Normalize, self).__init__()
  833. pass
  834. def __call__(self, input, *args, **kwargs):
  835. pass
  836. class EmbeddingLookup(Cell):
  837. def __init__(self, max_norm=None):
  838. self.max_norm = max_norm
  839. def __call__(self, params, ids, *args, **kwargs):
  840. pass
  841. class NCELoss(object):
  842. def __init__(self, num_true=1, sampled_values=None, remove_accidental_hits=False):
  843. super(NCELoss, self).__init__()
  844. def __call__(self, weights, biases, labels, inputs, num_sampled, num_classes):
  845. pass
  846. class Not_equal(object):
  847. def __init__(self):
  848. pass
  849. def __call__(self, x, y):
  850. pass
  851. class Count_nonzero(object):
  852. def __init__(self, keepdims=None, dtype=int64):
  853. pass
  854. def __call__(self, *args, **kwargs):
  855. pass
  856. class Resize(Cell):
  857. def __init__(self, scale, method, antialias=False, data_format='channels_last', ksize=None):
  858. super(Resize, self).__init__()
  859. self.data_format = data_format
  860. if method not in ['nearest', 'bilinear']:
  861. raise ('The method must be "nearest" or "bilinear".')
  862. self.method = method
  863. if ksize is None:
  864. raise ('The "bilinear" and "nearest" method must enter ksize. The dimension of size must be 2 (H, W).')
  865. out_seize = (int(ksize[0] * scale[0]), int(ksize[1] * scale[1]))
  866. if self.method == 'nearest':
  867. self.resize = P.ResizeNearestNeighbor(size=out_seize, align_corners=antialias)
  868. elif self.method == 'bilinear':
  869. self.resize = P.ResizeBilinear(size=out_seize)
  870. def construct(self, inputs):
  871. if self.data_format == 'channels_last':
  872. inputs = nhwc_to_nchw(inputs)
  873. outputs = self.resize(inputs)
  874. if self.data_format == 'channels_last':
  875. outputs = nchw_to_nhwc(outputs)
  876. return outputs
  877. def resize(inputs, output_size, method, antialias):
  878. raise NotImplementedError
  879. class ZeroPadding1D(Cell):
  880. def __init__(self, padding):
  881. super(ZeroPadding1D, self).__init__()
  882. if np.size(padding) == 2:
  883. self.pad = P.Pad(paddings=padding)
  884. else:
  885. raise ("The shape of parameter paddings is (N, 2). N is the rank of input data.")
  886. def construct(self, inputs):
  887. return self.pad(inputs)
  888. class ZeroPadding2D(Cell):
  889. def __init__(self, padding):
  890. super(ZeroPadding2D, self).__init__()
  891. if np.size(padding) == 4:
  892. self.pad = P.Pad(paddings=padding)
  893. else:
  894. raise ("The shape of parameter paddings is (N, 2). N is the rank of input data.")
  895. def construct(self, inputs):
  896. return self.pad(inputs)
  897. class ZeroPadding3D(Cell):
  898. def __init__(self, padding):
  899. super(ZeroPadding3D, self).__init__()
  900. if np.size(padding) == 6:
  901. self.pad = P.Pad(paddings=padding)
  902. else:
  903. raise ("The shape of parameter paddings is (N, 2). N is the rank of input data.")
  904. def construct(self, inputs):
  905. return self.pad(inputs)
  906. class Sign(Cell):
  907. def __init__(self):
  908. super(Sign, self).__init__()
  909. self.sign = P.Sign()
  910. def construct(self, x):
  911. return self.sign(x)
  912. def ceil(x):
  913. _ceil = P.Ceil()
  914. return _ceil(x)
  915. def multiply(x, y):
  916. raise NotImplementedError
  917. def divide(x, y):
  918. raise NotImplementedError
  919. def identity(x):
  920. raise NotImplementedError
  921. class BatchToSpace(Cell):
  922. def __init__(self, block_size, crops):
  923. super(BatchToSpace, self).__init__()
  924. self.batch_to_space = P.BatchToSpace(block_size=block_size, crops=crops)
  925. def __call__(self, input_x):
  926. return self.batch_to_space(input_x)
  927. class DepthToSpace(Cell):
  928. def __init__(self, block_size, data_format='NHWC'):
  929. super(DepthToSpace, self).__init__()
  930. self.data_format = data_format
  931. self.depth_to_space = P.DepthToSpace(block_size=block_size)
  932. def __call__(self, input):
  933. if self.data_format == 'NHWC':
  934. input = nhwc_to_nchw(input)
  935. output = self.depth_to_space(input)
  936. if self.data_format == 'NHWC':
  937. output = nchw_to_nhwc(output)
  938. return output

TensorLayer3.0 是一款兼容多种深度学习框架为计算后端的深度学习库。计划兼容TensorFlow, Pytorch, MindSpore, Paddle.