You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_math_ops.py 14 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """ test math ops """
  16. import functools
  17. import numpy as np
  18. import mindspore as ms
  19. import mindspore.nn as nn
  20. from mindspore.common.api import _executor
  21. from mindspore.common import dtype as mstype
  22. from mindspore.ops import prim_attr_register, PrimitiveWithInfer
  23. from mindspore import Tensor
  24. from mindspore.ops import composite as C
  25. from mindspore.ops import operations as P
  26. from mindspore.ops import functional as F
  27. import mindspore.context as context
  28. from ..ut_filter import non_graph_engine
  29. from ....mindspore_test_framework.mindspore_test import mindspore_test
  30. from ....mindspore_test_framework.pipeline.forward.compile_forward \
  31. import pipeline_for_compile_forward_ge_graph_for_case_by_case_config
  32. from ....mindspore_test_framework.pipeline.forward.verify_exception \
  33. import pipeline_for_verify_exception_for_case_by_case_config
  34. import pytest
  35. # pylint: disable=W0613
  36. # pylint: disable=W0231
  37. # W0613: unused-argument
  38. # W0231: super-init-not-called
  39. def test_multiply():
  40. """ test_multiply """
  41. input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]))
  42. input_y = Tensor(np.array([[0.1, 0.3, -3.6], [0.4, 0.5, -3.2]]))
  43. mul = P.Mul()
  44. result = mul(input_x, input_y)
  45. expect = np.array([[-0.01, 0.09, -12.96], [0.16, 0.25, 10.24]])
  46. diff = result.asnumpy() - expect
  47. error = np.ones(shape=[2, 3]) * 1.0e-6
  48. assert np.all(diff < error)
  49. assert np.all(-diff < error)
  50. def test_sub():
  51. """ test_sub """
  52. input_x = Tensor(np.ones(shape=[3]))
  53. input_y = Tensor(np.zeros(shape=[3]))
  54. sub = P.Sub()
  55. result = sub(input_x, input_y)
  56. expect = np.ones(shape=[3])
  57. assert np.all(result.asnumpy() == expect)
  58. def test_square():
  59. """ test_square """
  60. input_tensor = Tensor(np.array([[1, 2, 3], [4, 5, 6]]))
  61. square = P.Square()
  62. result = square(input_tensor)
  63. expect = np.array([[1, 4, 9], [16, 25, 36]])
  64. assert np.all(result.asnumpy() == expect)
  65. def test_sqrt():
  66. """ test_sqrt """
  67. input_tensor = Tensor(np.array([[4, 4], [9, 9]]))
  68. sqrt = P.Sqrt()
  69. expect = np.array([[2, 2], [3, 3]])
  70. result = sqrt(input_tensor)
  71. assert np.all(result.asnumpy() == expect)
  72. class PowNet(nn.Cell):
  73. def __init__(self):
  74. super(PowNet, self).__init__()
  75. self.pow = P.Pow()
  76. def construct(self, x, y):
  77. return self.pow(x, y)
  78. def test_pow():
  79. """ test_pow """
  80. input_tensor = Tensor(np.array([[2, 2], [3, 3]]))
  81. power = Tensor(np.array(3.0, np.int64))
  82. power2 = Tensor(np.array(True, np.bool))
  83. testpow = P.Pow()
  84. expect = np.array([[8, 8], [27, 27]])
  85. result = testpow(input_tensor, power)
  86. assert np.all(result.asnumpy() == expect)
  87. net = PowNet()
  88. with pytest.raises(TypeError):
  89. net(input_tensor, True)
  90. with pytest.raises(TypeError):
  91. net(input_tensor, power2)
  92. def test_exp():
  93. """ test_exp """
  94. input_tensor = Tensor(np.array([[2, 2], [3, 3]]))
  95. testexp = P.Exp()
  96. result = testexp(input_tensor)
  97. expect = np.exp(np.array([[2, 2], [3, 3]]))
  98. assert np.all(result.asnumpy() == expect)
  99. def test_realdiv():
  100. """ test_realdiv """
  101. x = Tensor(2048.0)
  102. y = Tensor(128.0)
  103. div = P.RealDiv()
  104. result = div(x, y)
  105. x = x.asnumpy()
  106. y = y.asnumpy()
  107. expect = x / y
  108. assert np.all(result.asnumpy() == expect)
  109. def test_eye():
  110. """ test_eye """
  111. x = np.arange(3)
  112. expect = np.ones_like(x)
  113. expect = np.diag(expect)
  114. eye = P.Eye()
  115. eye_output = eye(3, 3, ms.float32)
  116. assert np.all(eye_output.asnumpy() == expect)
  117. class VirtualLossGrad(PrimitiveWithInfer):
  118. """ VirtualLossGrad definition """
  119. @prim_attr_register
  120. def __init__(self):
  121. """init VirtualLossGrad"""
  122. def __call__(self, x, out, dout):
  123. raise NotImplementedError
  124. def infer_shape(self, x_shape, out_shape, dout_shape):
  125. return x_shape
  126. def infer_dtype(self, x_dtype, out_dtype, dout_dtype):
  127. return x_dtype
  128. class VirtualLoss(PrimitiveWithInfer):
  129. """ VirtualLoss definition """
  130. @prim_attr_register
  131. def __init__(self):
  132. """init VirtualLoss"""
  133. def __call__(self, x):
  134. raise NotImplementedError
  135. def get_bprop(self):
  136. loss_grad = VirtualLossGrad()
  137. def bprop(x, out, dout):
  138. dx = loss_grad(x, out, dout)
  139. return (dx,)
  140. return bprop
  141. def infer_shape(self, x_shape):
  142. return [1]
  143. def infer_dtype(self, x_dtype):
  144. return x_dtype
  145. class NetWithLoss(nn.Cell):
  146. """ NetWithLoss definition """
  147. def __init__(self, network):
  148. super(NetWithLoss, self).__init__()
  149. self.loss = VirtualLoss()
  150. self.network = network
  151. def construct(self, x, y, b):
  152. predict = self.network(x, y, b)
  153. return self.loss(predict)
  154. class GradWrap(nn.Cell):
  155. """ GradWrap definition """
  156. def __init__(self, network):
  157. super(GradWrap, self).__init__()
  158. self.network = network
  159. def construct(self, x, y, b):
  160. return C.grad(self.network)(x, y, b)
  161. class MatMulNet(nn.Cell):
  162. """ MatMulNet definition """
  163. def __init__(self):
  164. super(MatMulNet, self).__init__()
  165. self.matmul = P.MatMul()
  166. self.biasAdd = P.BiasAdd()
  167. def construct(self, x, y, b):
  168. return self.biasAdd(self.matmul(x, y), b)
  169. class NetWithLossSub(nn.Cell):
  170. """ NetWithLossSub definition """
  171. def __init__(self, network):
  172. super(NetWithLossSub, self).__init__()
  173. self.loss = VirtualLoss()
  174. self.network = network
  175. def construct(self, x, y):
  176. predict = self.network(x, y)
  177. return self.loss(predict)
  178. class GradWrapSub(nn.Cell):
  179. """ GradWrapSub definition """
  180. def __init__(self, network):
  181. super(GradWrapSub, self).__init__()
  182. self.network = network
  183. def construct(self, x, y):
  184. return C.grad(self.network)(x, y)
  185. class SubNet(nn.Cell):
  186. """ SubNet definition """
  187. def __init__(self):
  188. super(SubNet, self).__init__()
  189. self.sub = P.Sub()
  190. def construct(self, x, y):
  191. return self.sub(x, y)
  192. class NpuFloatNet(nn.Cell):
  193. """ NpuFloat definition """
  194. def __init__(self):
  195. super(NpuFloatNet, self).__init__()
  196. self.mul = P.Mul()
  197. self.alloc_status = P.NPUAllocFloatStatus()
  198. self.get_status = P.NPUGetFloatStatus()
  199. self.clear_status = P.NPUClearFloatStatus()
  200. self.fill = P.Fill()
  201. self.shape_op = P.Shape()
  202. self.select = P.Select()
  203. self.less = P.Less()
  204. self.cast = P.Cast()
  205. self.dtype = P.DType()
  206. self.reduce_sum = P.ReduceSum(keep_dims=True)
  207. self.sub = P.Sub()
  208. self.neg = P.Neg()
  209. self.add_flags(has_effect=True)
  210. def construct(self, x):
  211. init = self.alloc_status()
  212. self.clear_status(init)
  213. res = self.sub(x, self.neg(x))
  214. self.get_status(init)
  215. flag_sum = self.reduce_sum(init, (0,))
  216. base = self.cast(self.fill(self.dtype(res), self.shape_op(res), 0.0), self.dtype(flag_sum))
  217. cond = self.less(base, flag_sum)
  218. out = self.select(cond, self.cast(base, self.dtype(res)), res)
  219. return out
  220. class DiagNet(nn.Cell):
  221. """ DiagNet definition """
  222. def __init__(self):
  223. super(DiagNet, self).__init__()
  224. self.fill = P.Fill()
  225. self.diag = P.Diag()
  226. def construct(self, x):
  227. return x - self.diag(self.fill(mstype.float32, (3,), 1.0))
  228. class NetWithLossCumSum(nn.Cell):
  229. """ NetWithLossCumSum definition """
  230. def __init__(self, network):
  231. super(NetWithLossCumSum, self).__init__()
  232. self.loss = VirtualLoss()
  233. self.network = network
  234. def construct(self, input):
  235. predict = self.network(input)
  236. return self.loss(predict)
  237. class GradWrapCumSum(nn.Cell):
  238. """ GradWrap definition """
  239. def __init__(self, network):
  240. super(GradWrapCumSum, self).__init__()
  241. self.network = network
  242. def construct(self, input):
  243. return C.grad(self.network)(input)
  244. class NetCumSum(nn.Cell):
  245. """ NetCumSum definition """
  246. def __init__(self):
  247. super(NetCumSum, self).__init__()
  248. self.cumsum = P.CumSum()
  249. self.axis = 1
  250. def construct(self, input):
  251. return self.cumsum(input, self.axis)
  252. class SignNet(nn.Cell):
  253. def __init__(self):
  254. super(SignNet, self).__init__()
  255. self.sign = P.Sign()
  256. def construct(self, x):
  257. return self.sign(x)
  258. class AssignAdd(nn.Cell):
  259. def __init__(self):
  260. super().__init__()
  261. self.op = P.AssignAdd()
  262. self.inputdata = Parameter(initializer(1, [1], ms.float32), name="global_step")
  263. def construct(self, input_):
  264. self.inputdata = input_
  265. return self.op(self.inputdata, input_)
  266. class FloorNet(nn.Cell):
  267. def __init__(self):
  268. super(FloorNet, self).__init__()
  269. self.floor = P.Floor()
  270. def construct(self, x):
  271. return self.floor(x)
  272. class Log1pNet(nn.Cell):
  273. def __init__(self):
  274. super(Log1pNet, self).__init__()
  275. self.log1p = P.Log1p()
  276. def construct(self, x):
  277. return self.log1p(x)
  278. test_case_math_ops = [
  279. ('MatMulGrad', {
  280. 'block': GradWrap(NetWithLoss(MatMulNet())),
  281. 'desc_inputs': [Tensor(np.ones([3, 3]).astype(np.int32)),
  282. Tensor(np.ones([3, 3]).astype(np.int32)),
  283. Tensor(np.ones([3]).astype(np.int32))],
  284. 'desc_bprop': [Tensor(np.ones([3, 3]).astype(np.int32)),
  285. Tensor(np.ones([3, 3]).astype(np.int32)),
  286. Tensor(np.ones([3]).astype(np.int32))],
  287. 'skip': ['backward']}),
  288. ('CumSumGrad', {
  289. 'block': GradWrapCumSum(NetWithLossCumSum(NetCumSum())),
  290. 'desc_inputs': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float16))],
  291. 'desc_bprop': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float16))],
  292. 'skip': ['backward']}),
  293. ('Diag', {
  294. 'block': DiagNet(),
  295. 'desc_inputs': [Tensor(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]], np.float32))],
  296. 'desc_bprop': [Tensor(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]], np.float32))],
  297. 'skip': ['backward']}),
  298. ('SubBroadcast', {
  299. 'block': GradWrapSub(NetWithLossSub(SubNet())),
  300. 'desc_inputs': [Tensor(np.ones([5, 3])), Tensor(np.ones([8, 5, 3]))],
  301. 'desc_bprop': [Tensor(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]], np.float32))],
  302. 'skip': ['backward']}),
  303. ('NpuFloat_NotOverflow', {
  304. 'block': NpuFloatNet(),
  305. 'desc_inputs': [Tensor(np.full((8, 5, 3, 1), 655, dtype=np.float16), dtype=ms.float16)],
  306. 'desc_bprop': [Tensor(np.full((8, 5, 3, 1), 655, dtype=np.float16), dtype=ms.float16)],
  307. 'skip': ['backward']}),
  308. ('NpuFloat_Overflow', {
  309. 'block': NpuFloatNet(),
  310. 'desc_inputs': [Tensor(np.full((8, 5, 3, 1), 65504, dtype=np.float16), dtype=ms.float16)],
  311. 'desc_bprop': [Tensor(np.full((8, 5, 3, 1), 65504, dtype=np.float16), dtype=ms.float16)],
  312. 'skip': ['backward']}),
  313. ('Sign', {
  314. 'block': SignNet(),
  315. 'desc_inputs': [Tensor(np.array([[1., 0., -2.]], np.float32))],
  316. 'desc_bprop': [Tensor(np.array([[1., 0., -2.]], np.float32))],
  317. 'skip': ['backward']}),
  318. ('Floor', {
  319. 'block': FloorNet(),
  320. 'desc_inputs': [Tensor(np.array([[1., 0., -2.]], np.float32))],
  321. 'desc_bprop': [Tensor(np.array([[1., 0., -2.]], np.float32))],
  322. 'skip': ['backward']}),
  323. ('Log1p', {
  324. 'block': Log1pNet(),
  325. 'desc_inputs': [Tensor(np.array([[1.0, 2.0, 4.0]], np.float32))],
  326. 'desc_bprop': [Tensor(np.array([[1.0, 2.0, 4.0]], np.float32))],
  327. 'skip': ['backward']}),
  328. ]
  329. test_case_lists = [test_case_math_ops]
  330. test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists)
  331. # use -k to select certain testcast
  332. # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm
  333. import mindspore.context as context
  334. @non_graph_engine
  335. @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config)
  336. def test_exec():
  337. context.set_context(mode=context.GRAPH_MODE)
  338. return test_exec_case
  339. raise_set = [
  340. ('StridedSlice_1_Error', {
  341. 'block': (lambda x: P.StridedSlice(begin_mask="1"), {'exception': TypeError}),
  342. 'desc_inputs': [0]}),
  343. ('StridedSlice_2_Error', {
  344. 'block': (lambda x: P.StridedSlice(end_mask="1"), {'exception': TypeError}),
  345. 'desc_inputs': [0]}),
  346. ('StridedSlice_3_Error', {
  347. 'block': (lambda x: P.StridedSlice(ellipsis_mask=1.1), {'exception': TypeError}),
  348. 'desc_inputs': [0]}),
  349. ('StridedSlice_4_Error', {
  350. 'block': (lambda x: P.StridedSlice(new_axis_mask="1.1"), {'exception': TypeError}),
  351. 'desc_inputs': [0]}),
  352. ('AssignAdd_Error', {
  353. 'block': (P.AssignAdd(), {'exception': TypeError}),
  354. 'desc_inputs': [[1]]}),
  355. ]
  356. @mindspore_test(pipeline_for_verify_exception_for_case_by_case_config)
  357. def test_check_exception():
  358. return raise_set