You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_array_ops.py 12 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """ test array ops """
  16. import functools
  17. import numpy as np
  18. import pytest
  19. from mindspore._c_expression import signature_dtype as sig_dtype
  20. from mindspore._c_expression import signature_kind as sig_kind
  21. from mindspore._c_expression import signature_rw as sig_rw
  22. import mindspore as ms
  23. from mindspore import Tensor
  24. from mindspore.common import dtype as mstype
  25. from mindspore.nn import Cell
  26. from mindspore.ops import operations as P
  27. from mindspore.ops.operations import _inner_ops as inner
  28. from mindspore.ops import prim_attr_register
  29. from mindspore.ops.primitive import PrimitiveWithInfer
  30. import mindspore.context as context
  31. from ..ut_filter import non_graph_engine
  32. from ....mindspore_test_framework.mindspore_test import mindspore_test
  33. from ....mindspore_test_framework.pipeline.forward.compile_forward \
  34. import pipeline_for_compile_forward_ge_graph_for_case_by_case_config
  35. from ....mindspore_test_framework.pipeline.forward.verify_exception \
  36. import pipeline_for_verify_exception_for_case_by_case_config
  37. def test_expand_dims():
  38. input_tensor = Tensor(np.array([[2, 2], [2, 2]]))
  39. expand_dims = P.ExpandDims()
  40. output = expand_dims(input_tensor, 0)
  41. assert output.asnumpy().shape == (1, 2, 2)
  42. def test_cast():
  43. input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
  44. input_x = Tensor(input_np)
  45. td = ms.int32
  46. cast = P.Cast()
  47. result = cast(input_x, td)
  48. expect = input_np.astype(np.int32)
  49. assert np.all(result.asnumpy() == expect)
  50. @non_graph_engine
  51. def test_reshape():
  52. input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]))
  53. shp = (3, 2)
  54. reshape = P.Reshape()
  55. output = reshape(input_tensor, shp)
  56. assert output.asnumpy().shape == (3, 2)
  57. def test_transpose():
  58. input_tensor = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]))
  59. perm = (0, 2, 1)
  60. expect = np.array([[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]])
  61. transpose = P.Transpose()
  62. output = transpose(input_tensor, perm)
  63. assert np.all(output.asnumpy() == expect)
  64. def test_squeeze():
  65. input_tensor = Tensor(np.ones(shape=[3, 2, 1]))
  66. squeeze = P.Squeeze(2)
  67. output = squeeze(input_tensor)
  68. assert output.asnumpy().shape == (3, 2)
  69. def test_invert_permutation():
  70. invert_permutation = P.InvertPermutation()
  71. x = (3, 4, 0, 2, 1)
  72. output = invert_permutation(x)
  73. expect = (2, 4, 3, 0, 1)
  74. assert np.all(output == expect)
  75. def test_select():
  76. select = P.Select()
  77. cond = Tensor(np.array([[True, False, False], [False, True, True]]))
  78. x = Tensor(np.array([[1, 2, 3], [4, 5, 6]]))
  79. y = Tensor(np.array([[7, 8, 9], [10, 11, 12]]))
  80. output = select(cond, x, y)
  81. expect = np.array([[1, 8, 9], [10, 5, 6]])
  82. assert np.all(output.asnumpy() == expect)
  83. def test_argmin_invalid_output_type():
  84. P.Argmin(-1, mstype.int64)
  85. P.Argmin(-1, mstype.int32)
  86. with pytest.raises(TypeError):
  87. P.Argmin(-1, mstype.float32)
  88. with pytest.raises(TypeError):
  89. P.Argmin(-1, mstype.float64)
  90. with pytest.raises(TypeError):
  91. P.Argmin(-1, mstype.uint8)
  92. with pytest.raises(TypeError):
  93. P.Argmin(-1, mstype.bool_)
  94. class CustomOP(PrimitiveWithInfer):
  95. __mindspore_signature__ = (sig_dtype.T, sig_dtype.T, sig_dtype.T1,
  96. sig_dtype.T1, sig_dtype.T2, sig_dtype.T2,
  97. sig_dtype.T2, sig_dtype.T3, sig_dtype.T4)
  98. @prim_attr_register
  99. def __init__(self):
  100. pass
  101. def __call__(self, p1, p2, p3, p4, p5, p6, p7, p8, p9):
  102. raise NotImplementedError
  103. class CustomOP2(PrimitiveWithInfer):
  104. __mindspore_signature__ = (
  105. ('p1', sig_rw.RW_WRITE, sig_kind.KIND_POSITIONAL_KEYWORD, sig_kind.KIND_EMPTY_DEFAULT_VALUE, sig_dtype.T),
  106. ('p2', sig_rw.RW_READ, sig_kind.KIND_POSITIONAL_KEYWORD, sig_kind.KIND_EMPTY_DEFAULT_VALUE, sig_dtype.T),
  107. ('p3', sig_rw.RW_READ, sig_kind.KIND_POSITIONAL_KEYWORD, sig_kind.KIND_EMPTY_DEFAULT_VALUE, sig_dtype.T),
  108. )
  109. @prim_attr_register
  110. def __init__(self):
  111. pass
  112. def __call__(self, p1, p2, p3):
  113. raise NotImplementedError
  114. class CustNet1(Cell):
  115. def __init__(self):
  116. super(CustNet1, self).__init__()
  117. self.op = CustomOP()
  118. self.t1 = Tensor(np.ones([2, 2]), dtype=ms.int32)
  119. self.t2 = Tensor(np.ones([1, 5]), dtype=ms.float16)
  120. self.int1 = 3
  121. self.float1 = 5.1
  122. def construct(self):
  123. x = self.op(self.t1, self.t1, self.int1,
  124. self.float1, self.int1, self.float1,
  125. self.t2, self.t1, self.int1)
  126. return x
  127. class CustNet2(Cell):
  128. def __init__(self):
  129. super(CustNet2, self).__init__()
  130. self.op = CustomOP2()
  131. self.t1 = Tensor(np.ones([2, 2]), dtype=ms.int32)
  132. self.t2 = Tensor(np.ones([1, 5]), dtype=ms.float16)
  133. self.int1 = 3
  134. def construct(self):
  135. return self.op(self.t1, self.t2, self.int1)
  136. class CustNet3(Cell):
  137. def __init__(self):
  138. super(CustNet3, self).__init__()
  139. self.op = P.ReduceSum()
  140. self.t1 = Tensor(np.ones([2, 2]), dtype=ms.int32)
  141. self.t2 = Tensor(np.ones([1, 5]), dtype=ms.float16)
  142. self.t2 = 1
  143. def construct(self):
  144. return self.op(self.t1, self.t2)
  145. class MathBinaryNet1(Cell):
  146. def __init__(self):
  147. super(MathBinaryNet1, self).__init__()
  148. self.add = P.TensorAdd()
  149. self.mul = P.Mul()
  150. self.max = P.Maximum()
  151. self.number = 3
  152. def construct(self, x):
  153. return self.add(x, self.number) + self.mul(x, self.number) + self.max(x, self.number)
  154. class MathBinaryNet2(Cell):
  155. def __init__(self):
  156. super(MathBinaryNet2, self).__init__()
  157. self.less_equal = P.LessEqual()
  158. self.greater = P.Greater()
  159. self.logic_or = P.LogicalOr()
  160. self.logic_and = P.LogicalAnd()
  161. self.number = 3
  162. self.flag = True
  163. def construct(self, x):
  164. ret_less_equal = self.logic_and(self.less_equal(x, self.number), self.flag)
  165. ret_greater = self.logic_or(self.greater(x, self.number), self.flag)
  166. return self.logic_or(ret_less_equal, ret_greater)
  167. class BatchToSpaceNet(Cell):
  168. def __init__(self):
  169. super(BatchToSpaceNet, self).__init__()
  170. block_size = 2
  171. crops = [[0, 0], [0, 0]]
  172. self.batch_to_space = P.BatchToSpace(block_size, crops)
  173. def construct(self, x):
  174. return self.batch_to_space(x)
  175. class SpaceToBatchNet(Cell):
  176. def __init__(self):
  177. super(SpaceToBatchNet, self).__init__()
  178. block_size = 2
  179. paddings = [[0, 0], [0, 0]]
  180. self.space_to_batch = P.SpaceToBatch(block_size, paddings)
  181. def construct(self, x):
  182. return self.space_to_batch(x)
  183. class PackNet(Cell):
  184. def __init__(self):
  185. super(PackNet, self).__init__()
  186. self.pack = P.Pack()
  187. def construct(self, x):
  188. return self.pack((x, x))
  189. class UnpackNet(Cell):
  190. def __init__(self):
  191. super(UnpackNet, self).__init__()
  192. self.unpack = P.Unpack()
  193. def construct(self, x):
  194. return self.unpack(x)
  195. class SpaceToDepthNet(Cell):
  196. def __init__(self):
  197. super(SpaceToDepthNet, self).__init__()
  198. block_size = 2
  199. self.space_to_depth = P.SpaceToDepth(block_size)
  200. def construct(self, x):
  201. return self.space_to_depth(x)
  202. class DepthToSpaceNet(Cell):
  203. def __init__(self):
  204. super(DepthToSpaceNet, self).__init__()
  205. block_size = 2
  206. self.depth_to_space = P.DepthToSpace(block_size)
  207. def construct(self, x):
  208. return self.depth_to_space(x)
  209. class BatchToSpaceNDNet(Cell):
  210. def __init__(self):
  211. super(BatchToSpaceNDNet, self).__init__()
  212. block_shape = [2, 2]
  213. crops = [[0, 0], [0, 0]]
  214. self.batch_to_space_nd = P.BatchToSpaceND(block_shape, crops)
  215. def construct(self, x):
  216. return self.batch_to_space_nd(x)
  217. class SpaceToBatchNDNet(Cell):
  218. def __init__(self):
  219. super(SpaceToBatchNDNet, self).__init__()
  220. block_shape = [2, 2]
  221. paddings = [[0, 0], [0, 0]]
  222. self.space_to_batch_nd = P.SpaceToBatchND(block_shape, paddings)
  223. def construct(self, x):
  224. return self.space_to_batch_nd(x)
  225. class RangeNet(Cell):
  226. def __init__(self):
  227. super(RangeNet, self).__init__()
  228. self.range_ops = inner.Range(1.0, 8.0, 2.0)
  229. def construct(self, x):
  230. return self.range_ops(x)
  231. test_case_array_ops = [
  232. ('CustNet1', {
  233. 'block': CustNet1(),
  234. 'desc_inputs': []}),
  235. ('CustNet2', {
  236. 'block': CustNet2(),
  237. 'desc_inputs': []}),
  238. ('CustNet3', {
  239. 'block': CustNet3(),
  240. 'desc_inputs': []}),
  241. ('MathBinaryNet1', {
  242. 'block': MathBinaryNet1(),
  243. 'desc_inputs': [Tensor(np.ones([2, 2]), dtype=ms.int32)]}),
  244. ('MathBinaryNet2', {
  245. 'block': MathBinaryNet2(),
  246. 'desc_inputs': [Tensor(np.ones([2, 2]), dtype=ms.int32)]}),
  247. ('BatchToSpaceNet', {
  248. 'block': BatchToSpaceNet(),
  249. 'desc_inputs': [Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]).astype(np.float16))]}),
  250. ('SpaceToBatchNet', {
  251. 'block': SpaceToBatchNet(),
  252. 'desc_inputs': [Tensor(np.array([[[[1, 2], [3, 4]]]]).astype(np.float16))]}),
  253. ('PackNet', {
  254. 'block': PackNet(),
  255. 'desc_inputs': [Tensor(np.array([[[1, 2], [3, 4]]]).astype(np.float16))]}),
  256. ('UnpackNet', {
  257. 'block': UnpackNet(),
  258. 'desc_inputs': [Tensor(np.array([[1, 2], [3, 4]]).astype(np.float16))]}),
  259. ('SpaceToDepthNet', {
  260. 'block': SpaceToDepthNet(),
  261. 'desc_inputs': [Tensor(np.random.rand(1, 3, 2, 2).astype(np.float16))]}),
  262. ('DepthToSpaceNet', {
  263. 'block': DepthToSpaceNet(),
  264. 'desc_inputs': [Tensor(np.random.rand(1, 12, 1, 1).astype(np.float16))]}),
  265. ('SpaceToBatchNDNet', {
  266. 'block': SpaceToBatchNDNet(),
  267. 'desc_inputs': [Tensor(np.random.rand(1, 1, 2, 2).astype(np.float16))]}),
  268. ('BatchToSpaceNDNet', {
  269. 'block': BatchToSpaceNDNet(),
  270. 'desc_inputs': [Tensor(np.random.rand(4, 1, 1, 1).astype(np.float16))]}),
  271. ('RangeNet', {
  272. 'block': RangeNet(),
  273. 'desc_inputs': [Tensor(np.array([1, 2, 3, 2]), ms.int32)]}),
  274. ]
  275. test_case_lists = [test_case_array_ops]
  276. test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists)
  277. # use -k to select certain testcast
  278. # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm
  279. @non_graph_engine
  280. @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config)
  281. def test_exec():
  282. context.set_context(mode=context.GRAPH_MODE)
  283. return test_exec_case
  284. raise_set = [
  285. ('Squeeze_1_Error', {
  286. 'block': (lambda x: P.Squeeze(axis=1.2), {'exception': TypeError}),
  287. 'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5]))]}),
  288. ('Squeeze_2_Error', {
  289. 'block': (lambda x: P.Squeeze(axis=((1.2, 1.3))), {'exception': TypeError}),
  290. 'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5]))]}),
  291. ('ReduceSum_Error', {
  292. 'block': (lambda x: P.ReduceSum(keep_dims=1), {'exception': TypeError}),
  293. 'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5]))]}),
  294. ]
  295. @mindspore_test(pipeline_for_verify_exception_for_case_by_case_config)
  296. def test_check_exception():
  297. return raise_set