You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

mindspore_backend.py 33 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310
  1. #! /usr/bin/python
  2. # -*- coding: utf-8 -*-
  3. from __future__ import absolute_import, division, print_function
  4. from .mindspore_nn import nchw_to_nhwc, nhwc_to_nchw
  5. from mindspore._c_expression.typing import Type
  6. from mindspore.common import dtype as mstype
  7. from mindspore.common.parameter import Parameter
  8. from mindspore.common.initializer import (
  9. initializer, Constant, Normal, TruncatedNormal, Initializer, _assignment, _calculate_in_and_out, One, Zero
  10. )
  11. from mindspore.common.tensor import Tensor
  12. from mindspore.ops import operations as P
  13. from mindspore.ops import functional as F
  14. from mindspore.ops import composite as C
  15. import mindspore.context as context
  16. from mindspore.nn import Cell
  17. from mindspore.ops import count_nonzero
  18. import mindspore.numpy as msnp
  19. import numpy as np
  20. from scipy.stats import truncnorm
  21. import random
  22. _dtypeDict = {
  23. 'DType': Type,
  24. 'float16': mstype.float16,
  25. 'float32': mstype.float32,
  26. 'float64': mstype.float64,
  27. 'int8': mstype.int8,
  28. 'int16': mstype.int16,
  29. 'int32': mstype.int32,
  30. 'int64': mstype.int64,
  31. 'uint8': mstype.uint8,
  32. 'uint16': mstype.uint16,
  33. 'uint32': mstype.uint32,
  34. 'uint64': mstype.uint64
  35. }
  36. DType = Type
  37. float16 = mstype.float16
  38. float32 = mstype.float32
  39. float64 = mstype.float64
  40. int8 = mstype.int8
  41. int16 = mstype.int16
  42. int32 = mstype.int32
  43. int64 = mstype.int64
  44. uint8 = mstype.uint8
  45. uint16 = mstype.uint16
  46. uint32 = mstype.uint32
  47. uint64 = mstype.uint64
  48. # isinstance input output
  49. # TensorLike = Tensor_
  50. def set_context(**kwargs):
  51. return context.set_context(**kwargs)
  52. def get_tensor_shape(x):
  53. return list(P.Shape()(x))
  54. # initializers
  55. def zeros(shape, dtype=mstype.float32):
  56. """
  57. Creates a tensor with all elements set to zero.
  58. Parameters
  59. ----------
  60. shape : A list of integers
  61. a tuple of integers, or a 1-D Tensor of type int32.
  62. dtype : tensor
  63. The DType of an element in the resulting Tensor
  64. Returns
  65. -------
  66. A Tensor with all elements set to zero.
  67. """
  68. # shape = shape[::-1]
  69. arr = np.ndarray(shape)
  70. init_obj = Zero()
  71. init_obj(arr)
  72. return Tensor(arr, dtype=dtype)
  73. def ones(shape, dtype=mstype.float32):
  74. """
  75. Creates a tensor with all elements set to ones.
  76. Parameters
  77. ----------
  78. shape : A list of integers
  79. a tuple of integers, or a 1-D Tensor of type int32.
  80. dtype : tensor
  81. The DType of an element in the resulting Tensor
  82. Returns
  83. -------
  84. A Tensor with all elements set to zero.
  85. """
  86. # shape = shape[::-1]
  87. arr = np.ndarray(shape)
  88. init_obj = One()
  89. init_obj(arr)
  90. return Tensor(arr, dtype=dtype)
  91. def constant(value, dtype=mstype.float32, shape=None):
  92. """
  93. Creates a constant tensor from a tensor-like object.
  94. Parameters
  95. ----------
  96. value : list
  97. A constant value (or list) of output type dtype.
  98. dtype : tensor
  99. The type of the elements of the resulting tensor.
  100. shape : tuple
  101. Optional dimensions of resulting tensor.
  102. Returns
  103. -------
  104. A Constant Tensor.
  105. """
  106. # shape = shape[::-1]
  107. arr = np.ndarray(shape)
  108. Constant(value)(arr=arr)
  109. return Tensor(arr, dtype=dtype)
  110. class Uniform(Initializer):
  111. """
  112. Initialize a uniform array, and obtain values U(-scale, scale) from the uniform distribution
  113. to fill the input tensor.
  114. Args:
  115. minval : int
  116. The lower bound on the range of random values to generate (inclusive). Defaults to 0.
  117. maxval : int
  118. The upper bound on the range of random values to generate (exclusive). Defaults to 1 if dtype is floating point.
  119. seed : int
  120. Used in combination with tf.random.set_seed to create a reproducible sequence of tensors across multiple calls.
  121. Returns:
  122. Array, uniform array.
  123. """
  124. def __init__(self, minval=0, maxval=None, seed=None):
  125. super(Uniform, self).__init__(minval=minval, maxval=maxval, seed=seed)
  126. self.minval = minval
  127. self.maxval = maxval
  128. self.seed = seed
  129. def _initialize(self, arr):
  130. random.seed(self.seed)
  131. tmp = np.random.uniform(self.minval, self.maxval, arr.shape)
  132. _assignment(arr, tmp)
  133. def random_uniform(shape, minval=0, maxval=None, dtype=mstype.float32, seed=None):
  134. """
  135. Outputs random values from a uniform distribution.
  136. Parameters
  137. ----------
  138. shape : tuple
  139. A 1-D integer Tensor or Python array. The shape of the output tensor.
  140. minval : int
  141. The lower bound on the range of random values to generate (inclusive). Defaults to 0.
  142. maxval : int
  143. The upper bound on the range of random values to generate (exclusive). Defaults to 1 if dtype is floating point.
  144. dtype : tensor
  145. The type of the output: float16, float32, float64, int32, or int64.
  146. seed : int
  147. Used in combination with tf.random.set_seed to create a reproducible sequence of tensors across multiple calls.
  148. Returns
  149. -------
  150. A tensor of the specified shape filled with random uniform values.
  151. """
  152. # shape = shape[::-1]
  153. arr = np.ndarray(shape)
  154. init_obj = Uniform(minval=minval, maxval=maxval, seed=seed)
  155. init_obj(arr)
  156. return Tensor(arr, dtype=dtype)
  157. class Normal(Initializer):
  158. """
  159. Initialize a normal array, and obtain values N(0, sigma) from the uniform distribution
  160. to fill the input tensor.
  161. Parameters
  162. ----------
  163. mean : float
  164. The mean of the normal distribution
  165. stddev : float
  166. The standard deviation of the normal distribution.
  167. seed : A Python integer
  168. Used to create a random seed for the distribution
  169. Returns:
  170. Array, normal array.
  171. """
  172. def __init__(self, mean=0.0, stddev=0.01, seed=None):
  173. super(Normal, self).__init__(mean=mean, stddev=stddev)
  174. self.mean = mean
  175. self.stddev = stddev
  176. self.seed = seed
  177. def _initialize(self, arr):
  178. random.seed(self.seed)
  179. tmp = np.random.normal(self.mean, self.stddev, arr.shape)
  180. _assignment(arr, tmp)
  181. class RandomNormal(Cell):
  182. def __init__(self, mean=0.0, stddev=0.01, seed=None):
  183. super(RandomNormal, self).__init__()
  184. self.normal = Normal(mean=mean, stddev=stddev, seed=seed)
  185. def construct(self, shape):
  186. arr = np.ndarray(shape)
  187. outputs = self.normal(arr)
  188. return outputs
  189. def random_normal(shape, mean=0.0, stddev=1.0, dtype=mstype.float32, seed=None):
  190. """
  191. Outputs random values from a normal distribution.
  192. Parameters
  193. ----------
  194. shape : tuple
  195. A 1-D integer Tensor or Python array. The shape of the output tensor.
  196. mean : float
  197. The mean of the normal distribution
  198. stddev : float
  199. The standard deviation of the normal distribution.
  200. dtype : tensor
  201. The type of the output.
  202. seed : A Python integer
  203. Used to create a random seed for the distribution
  204. Returns
  205. -------
  206. A tensor of the specified shape filled with random normal values.
  207. """
  208. # shape = shape[::-1]
  209. arr = np.ndarray(shape)
  210. init_obj = Normal(mean=mean, stddev=stddev, seed=seed)
  211. init_obj(arr)
  212. return Tensor(arr, dtype=dtype)
  213. class TruncatedNormal(Initializer):
  214. """
  215. Initialize a truncated normal distribution which is a bounded normal distribution within N(low, high).
  216. Args:
  217. sigma (float): The sigma of the array. Default: 0.01.
  218. Returns:
  219. Array, truncated normal array.
  220. """
  221. def __init__(self, mean=0.0, stddev=0.01, seed=None):
  222. super(TruncatedNormal, self).__init__(mean=mean, stddev=stddev, seed=seed)
  223. self.mean = mean
  224. self.stddev = stddev
  225. self.seed = seed
  226. def _initialize(self, arr):
  227. tmp = truncnorm.rvs(-2, 2, loc=self.mean, scale=self.stddev, size=arr.shape, random_state=None)
  228. _assignment(arr, tmp)
  229. def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=mstype.float32, seed=None):
  230. """
  231. Outputs random values from a truncated normal distribution.
  232. Parameters
  233. ----------
  234. shape : tuple
  235. A 1-D integer Tensor or Python array. The shape of the output tensor.
  236. mean : float
  237. The mean of the normal distribution
  238. stddev : float
  239. The standard deviation of the normal distribution.
  240. dtype : tensor
  241. The type of the output.
  242. seed : A Python integer
  243. Used to create a random seed for the distribution
  244. Returns
  245. -------
  246. A tensor of the specified shape filled with random truncated normal values.
  247. """
  248. # shape = shape[::-1]
  249. arr = np.ndarray(shape)
  250. init_obj = TruncatedNormal(mean=mean, stddev=stddev, seed=seed)
  251. init_obj(arr)
  252. return Tensor(arr, dtype=dtype)
  253. class HeNormal(Initializer):
  254. r"""
  255. he_normal: It draws samples from a truncated normal distribution centered on 0 with
  256. stddev = sqrt(2 / fan_in) where fan_in is the number of input units in the weight tensor.
  257. Args:
  258. arr (Array): The array to be assigned.
  259. Returns:
  260. Array, assigned array.
  261. """
  262. def __init__(self, seed=None):
  263. super(HeNormal, self).__init__(seed=seed)
  264. self.seed = seed
  265. def _initialize(self, arr):
  266. n_in, _ = _calculate_in_and_out(arr)
  267. boundary = np.sqrt(2.0 / n_in)
  268. random.seed(self.seed)
  269. data = np.random.normal(-boundary, boundary, arr.shape)
  270. _assignment(arr, data)
  271. def he_normal(shape, dtype, seed=None):
  272. """
  273. He normal initializer.
  274. Parameters
  275. ----------
  276. seed : A Python integer.
  277. Used to seed the random generator.
  278. shape : tuple
  279. A 1-D integer Tensor or Python array. The shape of the output tensor.
  280. dtype : tensor
  281. The type of the output.
  282. Returns
  283. -------
  284. A tensor of the specified shape filled with he normal values.
  285. """
  286. # shape = shape[::-1]
  287. arr = np.ndarray(shape)
  288. init_obj = HeNormal(seed)
  289. init_obj(arr)
  290. return Tensor(arr, dtype=dtype)
  291. def Variable(initial_value, name, trainable=True):
  292. """
  293. Creates a new variable with value initial_value.
  294. Parameters
  295. ----------
  296. initial_value : tensor
  297. A Tensor, or Python object convertible to a Tensor
  298. name : str
  299. Optional name for the variable. Defaults to 'Variable' and gets uniquified automatically.
  300. Returns
  301. -------
  302. Variable
  303. """
  304. var = Parameter(initial_value, name=name, requires_grad=trainable)
  305. return var
  306. class MatMul(Cell):
  307. def __init__(self):
  308. super(MatMul, self).__init__()
  309. self.matmul = P.MatMul()
  310. def construct(self, a, b):
  311. return self.matmul(a, b)
  312. def matmul(a, b):
  313. """
  314. Multiplies matrix a by matrix b, producing a * b.
  315. Parameters
  316. ----------
  317. a : tensor
  318. type float16, float32, float64, int32, complex64, complex128 and rank > 1.
  319. b : tensor
  320. with same type and rank as a.
  321. Returns
  322. -------
  323. A Tensor of the same type as a and b
  324. """
  325. matmul_obj = P.MatMul()
  326. outputs = matmul_obj(a, b)
  327. return outputs
  328. def add(value, bias):
  329. """
  330. Returns x + y element-wise.
  331. Parameters
  332. ----------
  333. value : tensor.
  334. Must be one of the following types: bfloat16, half, float32, float64,
  335. uint8, int8, int16, int32, int64, complex64, complex128, string.
  336. bias : tensor
  337. Must have the same type as a
  338. name : str
  339. A name for the operation
  340. Returns
  341. -------
  342. A Tensor. Has the same type as a.
  343. """
  344. add_obj = P.TensorAdd()
  345. outputs = add_obj(value, bias)
  346. return outputs
  347. def dtypes(dt):
  348. """
  349. Data dtypes.
  350. Parameters
  351. ----------
  352. dt : string
  353. It could be 'uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
  354. 'int32', 'int64', 'float16', 'float32', 'float64', 'DType'.
  355. Returns
  356. -------
  357. Data dtypes
  358. """
  359. if dt not in _dtypeDict.keys():
  360. raise Exception("Unsupported dtype: {}".format(dt))
  361. return _dtypeDict[dt]
  362. class Maximum(Cell):
  363. def __init__(self):
  364. super(Maximum, self).__init__()
  365. self.maximum = P.Maximum()
  366. def construct(self, x, y):
  367. return self.maximum(x, y)
  368. class Minimum(Cell):
  369. def __init__(self):
  370. super(Minimum, self).__init__()
  371. self.minimum = P.Minimum()
  372. def construct(self, x, y):
  373. return self.minimum(x, y)
  374. def minimum(x, y):
  375. """
  376. Returns the min of x and y (i.e. x < y ? x : y) element-wise.
  377. Parameters
  378. ----------
  379. x : tensor.
  380. Must be one of the following types: bfloat16, half, float32, float64, int32, int64.
  381. y : A Tensor.
  382. Must have the same type as x.
  383. name : str
  384. A name for the operation (optional).
  385. Returns
  386. -------
  387. A Tensor. Has the same type as x
  388. """
  389. minimum_obj = P.Minimum()
  390. outputs = minimum_obj(x, y)
  391. return outputs
  392. class FlattenReshape(Cell):
  393. def __init__(self):
  394. super(FlattenReshape, self).__init__()
  395. self.shape = P.Shape()
  396. self.reshape = P.Reshape()
  397. def construct(self, inputs):
  398. dim = 1
  399. for d in self.shape(inputs)[1:]:
  400. dim *= d
  401. return self.reshape(inputs, (-1, dim))
  402. class Reshape(Cell):
  403. def __init__(self, shape):
  404. super(Reshape, self).__init__()
  405. self.reshape = P.Reshape()
  406. self.shape = tuple(shape)
  407. def construct(self, tensor):
  408. return self.reshape(tensor, self.shape)
  409. def reshape(tensor, shape):
  410. """
  411. Reshapes a tensor.
  412. Parameters
  413. ----------
  414. tensor : tensor
  415. A Tensor.
  416. shape : tensor
  417. Defines the shape of the output tensor.
  418. Returns
  419. -------
  420. A Tensor. Has the same type as tensor
  421. """
  422. reshape_obj = P.Reshape()
  423. outputs = reshape_obj(tensor, tuple(shape))
  424. return outputs
  425. class Concat(Cell):
  426. def __init__(self, axis):
  427. super(Concat, self).__init__()
  428. self.concat = P.Concat(axis)
  429. def construct(self, values):
  430. return self.concat(values)
  431. def concat(values, axis):
  432. """
  433. Concatenates tensors along one dimension.
  434. Parameters
  435. ----------
  436. values : list
  437. A list of Tensor objects or a single Tensor
  438. axis : int
  439. 0-D int32 Tensor. Dimension along which to concatenate
  440. Returns
  441. -------
  442. A Tensor resulting from concatenation of the input tensors.
  443. """
  444. # TODO testing axis
  445. concat_obj = P.Concat(axis)
  446. outputs = concat_obj(values)
  447. return outputs
  448. def convert_to_tensor(value, dtype=None):
  449. """
  450. Converts the given value to a Tensor.
  451. Parameters
  452. ----------
  453. value : object
  454. An object whose type has a registered Tensor conversion function.
  455. dtype : optional
  456. Optional element type for the returned tensor. If missing, the type is inferred from the type of value.
  457. Returns
  458. -------
  459. A Tensor based on value.
  460. """
  461. #todo testing value
  462. return Tensor(value, dtype=dtype)
  463. def convert_to_numpy(value):
  464. return value.asnumpy()
  465. def sqrt(x):
  466. """
  467. Computes square root of x element-wise.
  468. Parameters
  469. ----------
  470. x : tensor
  471. Must be one of the following types: bfloat16, half, float32, float64, complex64, complex128.
  472. Returns
  473. -------
  474. A Tensor. Has the same type as x.
  475. """
  476. sqrt_obj = P.Sqrt()
  477. outputs = sqrt_obj(x)
  478. return outputs
  479. class ReduceSum(Cell):
  480. def __init__(self, axis):
  481. super(ReduceSum, self).__init__()
  482. self.axis = axis
  483. self.reduce_sum = P.ReduceSum(keep_dims=False)
  484. def construct(self, input):
  485. return self.reduce_sum(input, self.axis)
  486. class ReduceMean(Cell):
  487. def __init__(self, axis):
  488. super(ReduceMean, self).__init__()
  489. self.axis = axis
  490. self.reducemean = P.ReduceMean(keep_dims=False)
  491. def construct(self, inputs):
  492. output = self.reducemean(inputs, self.axis)
  493. return output
  494. def reduce_mean(input_tensor, axis=None):
  495. """
  496. Computes the mean of elements across dimensions of a tensor.
  497. Parameters
  498. ----------
  499. input_tensor : tensor
  500. The tensor to reduce. Should have numeric type.
  501. axis : int
  502. The dimensions to reduce. If None (the default), reduces all dimensions.
  503. Must be in the range [-rank(input_tensor), rank(input_tensor)).
  504. name : str
  505. A name for the operation (optional).
  506. Returns
  507. -------
  508. The reduced tensor.
  509. """
  510. Rmean_obj = P.ReduceMean(keep_dims=False)
  511. outputs = Rmean_obj(input_tensor, axis)
  512. return outputs
  513. class ReduceMax(Cell):
  514. def __init__(self, axis):
  515. super(ReduceMax, self).__init__()
  516. self.axis = axis
  517. self.reducemax = P.ReduceMax(keep_dims=False)
  518. def construct(self, inputs):
  519. output = self.reducemax(inputs, self.axis)
  520. return output
  521. def reduce_max(input_tensor, axis=None):
  522. """
  523. Computes the maximum of elements across dimensions of a tensor.
  524. Parameters
  525. ----------
  526. input_tensor : tensor
  527. The tensor to reduce. Should have real numeric type.
  528. axis : int
  529. The dimensions to reduce. If None (the default), reduces all dimensions.
  530. Must be in the range [-rank(input_tensor), rank(input_tensor)).
  531. name : str
  532. A name for the operation (optional).
  533. Returns
  534. -------
  535. The reduced tensor.
  536. """
  537. Rmax_obj = P.ReduceMax(keep_dims=False)
  538. outputs = Rmax_obj(input_tensor, axis)
  539. return outputs
  540. def reduce_min(input_tensor, axis=None):
  541. """
  542. Computes the minimum of elements across dimensions of a tensor.
  543. Parameters
  544. ----------
  545. input_tensor : tensor
  546. The tensor to reduce. Should have real numeric type.
  547. axis : int
  548. The dimensions to reduce. If None (the default), reduces all dimensions.
  549. Must be in the range [-rank(input_tensor), rank(input_tensor)).
  550. name : str
  551. A name for the operation (optional).
  552. Returns
  553. -------
  554. The reduced tensor.
  555. """
  556. Rmin_obj = P.ReduceMin(keep_dims=False)
  557. outputs = Rmin_obj(input_tensor, axis)
  558. return outputs
  559. class Pad(Cell):
  560. def __init__(self, paddings, mode="REFLECT"):
  561. super(Pad, self).__init__()
  562. if mode not in ["REFLECT", "SYMMETRIC"]:
  563. raise Exception("Unsupported mode: {}".format(mode))
  564. self.pad = P.MirrorPad(mode=mode)
  565. self.paddings = Tensor(paddings)
  566. def construct(self, x):
  567. return self.pad(x, self.paddings)
  568. def pad(tensor, paddings, mode='CONSTANT', constant_values=0):
  569. """
  570. Pads a tensor.
  571. Parameters
  572. ----------
  573. tensor : tensor
  574. A Tensor.
  575. paddings : tuple
  576. A tuple of type int32.
  577. mode : str
  578. One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
  579. constant_values : int
  580. In "CONSTANT" mode, the scalar pad value to use. Must be same type as tensor.
  581. Returns
  582. -------
  583. A Tensor. Has the same type as tensor.
  584. """
  585. raise NotImplementedError
  586. class Unstack(Cell):
  587. def __init__(self, axis, num=None):
  588. super(Unstack, self).__init__()
  589. if num is not None:
  590. raise ("The num Parameters do not need to be set.")
  591. self.unstack = P.Unpack(axis=axis)
  592. def construct(self, values):
  593. return self.unstack(values)
  594. class Stack(Cell):
  595. def __init__(self, axis=0):
  596. super(Stack, self).__init__()
  597. self.stack = P.Pack(axis=axis)
  598. def construct(self, values):
  599. return self.stack(values)
  600. def stack(values, axis=0):
  601. """
  602. Stacks a list of rank-R tensors into one rank-(R+1) tensor.
  603. Parameters
  604. ----------
  605. values : list
  606. A list of Tensor objects with the same shape and type.
  607. axis : int
  608. An int. The axis to stack along. Defaults to the first dimension.
  609. Negative values wrap around, so the valid range is [-(R+1), R+1).
  610. Returns
  611. -------
  612. A stacked Tensor with the same type as values.
  613. """
  614. _stack = P.Pack(axis=axis)
  615. return _stack(values)
  616. class Meshgrid(Cell):
  617. def __init__(self, indexing='xy'):
  618. super(Meshgrid, self).__init__()
  619. self._meshgrid = P.Meshgrid(indexing=indexing)
  620. def construct(self, *args):
  621. inputs = tuple(*args)
  622. return self._meshgrid(inputs)
  623. def meshgrid(*args, **kwargs):
  624. """
  625. Broadcasts parameters for evaluation on an N-D grid.
  626. Parameters
  627. ----------
  628. x : tensor
  629. Tensors with rank 1.
  630. y : tensor
  631. Tensors with rank 1.
  632. Returns
  633. -------
  634. A list of N Tensors with rank N.
  635. """
  636. _meshgrid = P.Meshgrid(**kwargs)
  637. return _meshgrid(*args)
  638. def range(start, limit=None, delta=1, dtype=None):
  639. """
  640. Creates a sequence of numbers.
  641. Parameters
  642. ----------
  643. start : tensor
  644. A 0-D Tensor (scalar). Acts as first entry in the range if limit is not None;
  645. otherwise, acts as range limit and first entry defaults to 0.
  646. limit : tensor
  647. A 0-D Tensor (scalar). Upper limit of sequence, exclusive. If None,
  648. defaults to the value of start while the first entry of the range defaults to 0.
  649. delta : tensor
  650. A 0-D Tensor (scalar). Number that increments start. Defaults to 1.
  651. dtype : type
  652. The type of the elements of the resulting tensor.
  653. Returns
  654. -------
  655. An 1-D Tensor of type dtype.
  656. """
  657. pass
  658. class ExpandDims(Cell):
  659. def __init__(self, axis):
  660. super(ExpandDims, self).__init__()
  661. self.axis = axis
  662. self.expand_dims = P.ExpandDims()
  663. def construct(self, input):
  664. output = self.expand_dims(input, self.axis)
  665. return output
  666. def expand_dims(input, axis):
  667. """
  668. Inserts a dimension of 1 into a tensor's shape.
  669. Parameters
  670. ----------
  671. input : tensor
  672. A Tensor.
  673. axis : int
  674. 0-D (scalar). Specifies the dimension index at which to expand the shape of input.
  675. Must be in the range [-rank(input) - 1, rank(input)].
  676. Returns
  677. -------
  678. A Tensor with the same data as input, but its shape has an additional dimension of size 1 added.
  679. """
  680. expand_obj = P.ExpandDims()
  681. outputs = expand_obj(input, axis)
  682. return outputs
  683. class Tile(Cell):
  684. def __init__(self):
  685. super(Tile, self).__init__()
  686. self.tile = P.Tile()
  687. def construct(self, input, multiples):
  688. return self.tile(input, tuple(multiples))
  689. def tile(input, multiples):
  690. """
  691. Constructs a tensor by tiling a given tensor.
  692. Parameters
  693. ----------
  694. input : tensor
  695. A Tensor. 1-D or higher.
  696. multiples : tensor
  697. Must be one of the following types: int32, int64. 1-D.
  698. Length must be the same as the number of dimensions in input
  699. Returns
  700. -------
  701. A Tensor. Has the same type as input.
  702. """
  703. tile_obj = P.Tile()
  704. outputs = tile_obj(input, multiples)
  705. return outputs
  706. class Cast(Cell):
  707. def __init__(self, dtype):
  708. super(Cast, self).__init__()
  709. self.dtype = dtype
  710. self.cast = P.Cast()
  711. def construct(self, input):
  712. return self.cast(input, self.dtype)
  713. def cast(x, dtype):
  714. """
  715. Casts a tensor to a new type.
  716. Parameters
  717. ----------
  718. x : tensor
  719. A Tensor or SparseTensor or IndexedSlices of numeric type.
  720. It could be uint8, uint16, uint32, uint64, int8, int16, int32, int64, float16, float32, float64.
  721. dtype : dtpye
  722. The destination type. The list of supported dtypes is the same as x
  723. Returns
  724. -------
  725. A Tensor or SparseTensor or IndexedSlices with same shape as x and same type as dtype.
  726. """
  727. cast_obj = P.Cast()
  728. outputs = cast_obj(x, dtype)
  729. return outputs
  730. class Transpose(Cell):
  731. def __init__(self, perm, conjugate=False):
  732. super(Transpose, self).__init__()
  733. self.perm = tuple(perm)
  734. self.conjugate = conjugate
  735. self.transpose = P.Transpose()
  736. if self.conjugate:
  737. raise NotImplementedError("conjugate not implemented")
  738. def construct(self, a):
  739. return self.transpose(a, self.perm)
  740. def transpose(a, perm=None, conjugate=False):
  741. """
  742. Transposes a.
  743. Parameters
  744. ----------
  745. a : tensor
  746. A Tensor.
  747. perm : int
  748. A permutation of the dimensions of a.
  749. conjugate : bool
  750. Setting it to True is mathematically equivalent to ms.math.conj(ms.transpose(input)).
  751. Returns
  752. -------
  753. A transposed Tensor.
  754. """
  755. # TODO conjugate
  756. trans_obj = P.Transpose()
  757. outputs = trans_obj(a, perm)
  758. print(outputs)
  759. def gather_nd(params, indices, batch_dims=0):
  760. """
  761. Gather slices from params into a Tensor with shape specified by indices.
  762. Parameters
  763. ----------
  764. params : tensor
  765. The tensor from which to gather values.
  766. indices : tensor
  767. Must be one of the following types: int32, int64. Index tensor.
  768. batch_dims : int
  769. An integer or a scalar 'Tensor'. The number of batch dimensions.
  770. Returns
  771. -------
  772. A Tensor. Has the same type as params.
  773. """
  774. pass
  775. def clip_by_value(t, clip_value_min, clip_value_max):
  776. """
  777. Clips tensor values to a specified min and max.
  778. Parameters
  779. ----------
  780. t : tensor
  781. A Tensor or IndexedSlices
  782. clip_value_min : tensor
  783. A 0-D (scalar) Tensor, or a Tensor with the same shape as t. The minimum value to clip by
  784. clip_value_max : tensor
  785. A 0-D (scalar) Tensor, or a Tensor with the same shape as t. The minimum value to clip by
  786. Returns
  787. -------
  788. A clipped Tensor or IndexedSlices.
  789. """
  790. min_value = Tensor(clip_value_min, mstype.float32)
  791. max_value = Tensor(clip_value_max, mstype.float32)
  792. output = C.clip_by_value(t, min_value, max_value)
  793. return output
  794. def split(value, num_or_size_splits, axis=0, num=None):
  795. """
  796. Splits a tensor into sub tensors.
  797. Parameters
  798. ----------
  799. value : tensor
  800. The Tensor to split.
  801. num_or_size_splits : list
  802. Either an integer indicating the number of splits along split_dim or a 1-D integer Tensor or
  803. Python list containing the sizes of each output tensor along split_dim.
  804. axis : int
  805. The dimension along which to split. Must be in the range [-rank(value), rank(value)). Defaults to 0.
  806. num : int
  807. used to specify the number of outputs when it cannot be inferred from the shape of size_splits.
  808. Returns
  809. -------
  810. Tensor objects resulting from splitting value.
  811. """
  812. pass
  813. class Floor(Cell):
  814. def __call__(self, *args, **kwargs):
  815. raise NotImplementedError
  816. def floor(x):
  817. return NotImplementedError
  818. def gather(params, indices):
  819. return NotImplementedError
  820. def linspace(start, stop, num):
  821. return NotImplementedError
  822. def slice(inputs, starts, sizes):
  823. return NotImplementedError
  824. def add_n(inputs):
  825. return NotImplementedError
  826. class OneHot(Cell):
  827. def __init__(self, axis=-1, depth=1, on_value=1.0, off_value=0.0, dtype=mstype.float32):
  828. super(OneHot, self).__init__()
  829. self.onehot = P.OneHot(axis)
  830. self.depth = depth
  831. self.dtype = dtype
  832. self.on_value = F.cast(on_value, self.dtype)
  833. self.off_value = F.cast(off_value, self.dtype)
  834. def construct(self, indices):
  835. return self.onehot(indices, self.depth, self.on_value, self.off_value)
  836. class L2Normalize(Cell):
  837. def __init__(self, axis=None, epsilon=1e-12):
  838. super(L2Normalize, self).__init__()
  839. pass
  840. def construct(self, input, *args, **kwargs):
  841. pass
  842. class EmbeddingLookup(Cell):
  843. def __init__(self, max_norm=0):
  844. super(EmbeddingLookup, self).__init__()
  845. self.max_norm = max_norm
  846. self.embedding_lookup = P.EmbeddingLookup()
  847. def construct(self, params, ids, *args, **kwargs):
  848. return self.embedding_lookup(params, ids, self.max_norm)
  849. class NCELoss(Cell):
  850. def __init__(self, num_true=1, sampled_values=None, remove_accidental_hits=False):
  851. super(NCELoss, self).__init__()
  852. pass
  853. def construct(self, weights, biases, labels, inputs, num_sampled, num_classes):
  854. raise NotImplementedError
  855. class NotEqual(Cell):
  856. def __init__(self):
  857. super(NotEqual, self).__init__()
  858. self.not_equal = P.NotEqual()
  859. def construct(self, x, y):
  860. outputs = self.not_equal(x, y)
  861. return outputs
  862. class CountNonzero(object):
  863. def __init__(self, keepdims=None, dtype=int64):
  864. self.keepdims = keepdims
  865. self.dtype = dtype
  866. def __call__(self, input, axis=None):
  867. input = self.convert_dtype(input)
  868. return count_nonzero(x=input, axis=axis, keep_dims=self.keepdims, dtype=self.dtype)
  869. def bool_convert_to_tensor(self, x):
  870. x = x.asnumpy()
  871. shapes = x.shape
  872. b = np.ones(shapes)
  873. if len(shapes) == 1:
  874. for i in range(shapes - 1):
  875. if x[i] ==True:
  876. b[i] = 1
  877. else:
  878. b[i] = 0
  879. if len(shapes) == 2:
  880. for i in range(shapes[0] - 1):
  881. for j in range(shapes[1] - 1):
  882. if x[i][j] ==True:
  883. b[i][j] = 1
  884. else:
  885. b[i][j] = 0
  886. return Tensor(b, dtype=float32)
  887. def convert_dtype(self, input):
  888. if input.shape == 1 and type(input[0]) is bool:
  889. output = self.bool_convert_to_tensor(input)
  890. elif input.shape == 2 and type(input[0][0]) is bool:
  891. output = self.bool_convert_to_tensor(input)
  892. else:
  893. output = input
  894. return output
  895. class Resize(Cell):
  896. def __init__(self, scale, method, antialias=False, data_format='channels_last', ksize=None):
  897. super(Resize, self).__init__()
  898. self.data_format = data_format
  899. if method not in ['nearest', 'bilinear']:
  900. raise ('The method must be "nearest" or "bilinear".')
  901. self.method = method
  902. if ksize is None:
  903. raise ('The "bilinear" and "nearest" method must enter ksize. The dimension of size must be 2 (H, W).')
  904. out_seize = (int(ksize[0] * scale[0]), int(ksize[1] * scale[1]))
  905. if self.method == 'nearest':
  906. self.resize = P.ResizeNearestNeighbor(size=out_seize, align_corners=antialias)
  907. elif self.method == 'bilinear':
  908. self.resize = P.ResizeBilinear(size=out_seize)
  909. def construct(self, inputs):
  910. if self.data_format == 'channels_last':
  911. inputs = nhwc_to_nchw(inputs)
  912. outputs = self.resize(inputs)
  913. if self.data_format == 'channels_last':
  914. outputs = nchw_to_nhwc(outputs)
  915. return outputs
  916. def resize(inputs, output_size, method, antialias):
  917. raise NotImplementedError
  918. class ZeroPadding1D(Cell):
  919. def __init__(self, padding):
  920. super(ZeroPadding1D, self).__init__()
  921. if np.size(padding) == 2:
  922. self.pad = P.Pad(paddings=padding)
  923. else:
  924. raise ("The shape of parameter paddings is (N, 2). N is the rank of input data.")
  925. def construct(self, inputs):
  926. return self.pad(inputs)
  927. class ZeroPadding2D(Cell):
  928. def __init__(self, padding):
  929. super(ZeroPadding2D, self).__init__()
  930. if np.size(padding) == 4:
  931. self.pad = P.Pad(paddings=padding)
  932. else:
  933. raise ("The shape of parameter paddings is (N, 2). N is the rank of input data.")
  934. def construct(self, inputs):
  935. return self.pad(inputs)
  936. class ZeroPadding3D(Cell):
  937. def __init__(self, padding):
  938. super(ZeroPadding3D, self).__init__()
  939. if np.size(padding) == 6:
  940. self.pad = P.Pad(paddings=padding)
  941. else:
  942. raise ("The shape of parameter paddings is (N, 2). N is the rank of input data.")
  943. def construct(self, inputs):
  944. return self.pad(inputs)
  945. class Sign(Cell):
  946. def __init__(self):
  947. super(Sign, self).__init__()
  948. self.sign = P.Sign()
  949. def construct(self, x):
  950. return self.sign(x)
  951. class Ceil(Cell):
  952. def __init__(self):
  953. super(Ceil, self).__init__()
  954. self.ceil = P.Ceil()
  955. def construct(self, x):
  956. return self.ceil(x)
  957. def ceil(x):
  958. _ceil = P.Ceil()
  959. return _ceil(x)
  960. def multiply(x, y):
  961. raise NotImplementedError
  962. def divide(x, y):
  963. return msnp.divide(x, y)
  964. def identity(x):
  965. raise NotImplementedError
  966. class BatchToSpace(Cell):
  967. def __init__(self, block_size, crops):
  968. super(BatchToSpace, self).__init__()
  969. self.batch_to_space = P.BatchToSpace(block_size=block_size, crops=crops)
  970. def __call__(self, input_x):
  971. return self.batch_to_space(input_x)
  972. class DepthToSpace(Cell):
  973. def __init__(self, block_size, data_format='NHWC'):
  974. super(DepthToSpace, self).__init__()
  975. self.data_format = data_format
  976. self.depth_to_space = P.DepthToSpace(block_size=block_size)
  977. def __call__(self, input):
  978. if self.data_format == 'NHWC':
  979. input = nhwc_to_nchw(input)
  980. output = self.depth_to_space(input)
  981. if self.data_format == 'NHWC':
  982. output = nchw_to_nhwc(output)
  983. return output

TensorLayer3.0 是一款兼容多种深度学习框架为计算后端的深度学习库。计划兼容TensorFlow, Pytorch, MindSpore, Paddle.