You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

csum_lsx.S 6.7 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. /*******************************************************************************
  2. Copyright (c) 2023, The OpenBLAS Project
  3. All rights reserved.
  4. Redistribution and use in source and binary forms, with or without
  5. modification, are permitted provided that the following conditions are
  6. met:
  7. 1. Redistributions of source code must retain the above copyright
  8. notice, this list of conditions and the following disclaimer.
  9. 2. Redistributions in binary form must reproduce the above copyright
  10. notice, this list of conditions and the following disclaimer in
  11. the documentation and/or other materials provided with the
  12. distribution.
  13. 3. Neither the name of the OpenBLAS project nor the names of
  14. its contributors may be used to endorse or promote products
  15. derived from this software without specific prior written permission.
  16. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  17. AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  18. IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  19. ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
  20. LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  21. DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  22. SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  23. CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  24. OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  25. USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. *******************************************************************************/
  27. #define ASSEMBLER
  28. #include "common.h"
  29. #define N $r4
  30. #define X $r5
  31. #define INCX $r6
  32. #define I $r17
  33. #define TEMP $r18
  34. #define t1 $r15
  35. #define t2 $r12
  36. #define t3 $r13
  37. #define t4 $r14
  38. #define a1 $f12
  39. #define a2 $f13
  40. #define a3 $f14
  41. #define a4 $f15
  42. #define s1 $f16
  43. #define VX0 $vr12
  44. #define VX1 $vr13
  45. #define VX2 $vr14
  46. #define VX3 $vr15
  47. #define res1 $vr16
  48. #define res2 $vr17
  49. #define res3 $vr18
  50. PROLOGUE
  51. vxor.v res1, res1, res1
  52. vxor.v res2, res2, res2
  53. bge $r0, N, .L999
  54. bge $r0, INCX, .L999
  55. li.d TEMP, 1
  56. slli.d TEMP, TEMP, ZBASE_SHIFT
  57. slli.d INCX, INCX, ZBASE_SHIFT
  58. srai.d I, N, 3
  59. bne INCX, TEMP, .L20
  60. bge $r0, I, .L13
  61. .align 3
  62. .L11:
  63. #ifdef DOUBLE
  64. vld VX0, X, 0 * SIZE
  65. vld VX1, X, 2 * SIZE
  66. vfadd.d res2, VX0, VX1
  67. vfadd.d res1, res1, res2
  68. vld VX2, X, 4 * SIZE
  69. vld VX3, X, 6 * SIZE
  70. vfadd.d res2, VX2, VX3
  71. vfadd.d res1, res1, res2
  72. vld VX0, X, 8 * SIZE
  73. vld VX1, X, 10 * SIZE
  74. vfadd.d res2, VX0, VX1
  75. vfadd.d res1, res1, res2
  76. vld VX2, X, 12 * SIZE
  77. vld VX3, X, 14 * SIZE
  78. vfadd.d res2, VX2, VX3
  79. vfadd.d res1, res1, res2
  80. #else
  81. vld VX0, X, 0 * SIZE
  82. vld VX1, X, 4 * SIZE
  83. vfadd.s res2, VX0, VX1
  84. vld VX2, X, 8 * SIZE
  85. vld VX3, X, 12 * SIZE
  86. vfadd.s res3, VX2, VX3
  87. vfadd.s res2, res3, res2
  88. vfadd.s res1, res1, res2
  89. #endif
  90. addi.d I, I, -1
  91. addi.d X, X, 16 * SIZE
  92. blt $r0, I, .L11
  93. .align 3
  94. .L12:
  95. #ifdef DOUBLE
  96. vreplvei.d VX1, res1, 1
  97. vfadd.d res1, VX1, res1
  98. #else
  99. vreplvei.w VX1, res1, 1
  100. vreplvei.w VX2, res1, 2
  101. vreplvei.w VX3, res1, 3
  102. vfadd.s res1, VX1, res1
  103. vfadd.s res1, VX2, res1
  104. vfadd.s res1, VX3, res1
  105. #endif
  106. .align 3
  107. .L13:
  108. andi I, N, 7
  109. bge $r0, I, .L999
  110. .align 3
  111. .L14:
  112. LD a1, X, 0 * SIZE
  113. LD a2, X, 1 * SIZE
  114. ADD a1, a1, a2
  115. ADD s1, a1, s1
  116. addi.d I, I, -1
  117. addi.d X, X, 2 * SIZE
  118. blt $r0, I, .L14
  119. b .L999
  120. .align 3
  121. .L20:
  122. bge $r0, I, .L23
  123. .align 3
  124. .L21:
  125. #ifdef DOUBLE
  126. ld.d t1, X, 0 * SIZE
  127. ld.d t2, X, 1 * SIZE
  128. add.d X, X, INCX
  129. vinsgr2vr.d VX0, t1, 0
  130. vinsgr2vr.d VX0, t2, 1
  131. ld.d t1, X, 0 * SIZE
  132. ld.d t2, X, 1 * SIZE
  133. vinsgr2vr.d VX1, t1, 0
  134. vinsgr2vr.d VX1, t2, 1
  135. add.d X, X, INCX
  136. vfadd.d res2, VX0, VX1
  137. vfadd.d res1, res1, res2
  138. ld.d t3, X, 0 * SIZE
  139. ld.d t4, X, 1 * SIZE
  140. add.d X, X, INCX
  141. vinsgr2vr.d VX0, t3, 0
  142. vinsgr2vr.d VX0, t4, 1
  143. ld.d t3, X, 0 * SIZE
  144. ld.d t4, X, 1 * SIZE
  145. vinsgr2vr.d VX1, t3, 0
  146. vinsgr2vr.d VX1, t4, 1
  147. add.d X, X, INCX
  148. vfadd.d res2, VX0, VX1
  149. vfadd.d res1, res1, res2
  150. ld.d t1, X, 0 * SIZE
  151. ld.d t2, X, 1 * SIZE
  152. add.d X, X, INCX
  153. vinsgr2vr.d VX0, t1, 0
  154. vinsgr2vr.d VX0, t2, 1
  155. ld.d t1, X, 0 * SIZE
  156. ld.d t2, X, 1 * SIZE
  157. vinsgr2vr.d VX1, t1, 0
  158. vinsgr2vr.d VX1, t2, 1
  159. add.d X, X, INCX
  160. vfadd.d res2, VX0, VX1
  161. vfadd.d res1, res1, res2
  162. ld.d t3, X, 0 * SIZE
  163. ld.d t4, X, 1 * SIZE
  164. add.d X, X, INCX
  165. vinsgr2vr.d VX0, t3, 0
  166. vinsgr2vr.d VX0, t4, 1
  167. ld.d t3, X, 0 * SIZE
  168. ld.d t4, X, 1 * SIZE
  169. vinsgr2vr.d VX1, t3, 0
  170. vinsgr2vr.d VX1, t4, 1
  171. add.d X, X, INCX
  172. vfadd.d res2, VX0, VX1
  173. vfadd.d res1, res1, res2
  174. #else
  175. ld.w t1, X, 0 * SIZE
  176. ld.w t2, X, 1 * SIZE
  177. add.d X, X, INCX
  178. ld.w t3, X, 0 * SIZE
  179. ld.w t4, X, 1 * SIZE
  180. add.d X, X, INCX
  181. vinsgr2vr.w VX0, t1, 0
  182. vinsgr2vr.w VX0, t2, 1
  183. vinsgr2vr.w VX0, t3, 2
  184. vinsgr2vr.w VX0, t4, 3
  185. ld.w t1, X, 0 * SIZE
  186. ld.w t2, X, 1 * SIZE
  187. add.d X, X, INCX
  188. ld.w t3, X, 0 * SIZE
  189. ld.w t4, X, 1 * SIZE
  190. add.d X, X, INCX
  191. vinsgr2vr.w VX1, t1, 0
  192. vinsgr2vr.w VX1, t2, 1
  193. vinsgr2vr.w VX1, t3, 2
  194. vinsgr2vr.w VX1, t4, 3
  195. vfadd.s res2, VX0, VX1
  196. ld.w t1, X, 0 * SIZE
  197. ld.w t2, X, 1 * SIZE
  198. add.d X, X, INCX
  199. ld.w t3, X, 0 * SIZE
  200. ld.w t4, X, 1 * SIZE
  201. add.d X, X, INCX
  202. vinsgr2vr.w VX2, t1, 0
  203. vinsgr2vr.w VX2, t2, 1
  204. vinsgr2vr.w VX2, t3, 2
  205. vinsgr2vr.w VX2, t4, 3
  206. ld.w t1, X, 0 * SIZE
  207. ld.w t2, X, 1 * SIZE
  208. add.d X, X, INCX
  209. ld.w t3, X, 0 * SIZE
  210. ld.w t4, X, 1 * SIZE
  211. add.d X, X, INCX
  212. vinsgr2vr.w VX3, t1, 0
  213. vinsgr2vr.w VX3, t2, 1
  214. vinsgr2vr.w VX3, t3, 2
  215. vinsgr2vr.w VX3, t4, 3
  216. vfadd.s res3, VX2, VX3
  217. vfadd.s res2, res3, res2
  218. vfadd.s res1, res1, res2
  219. #endif
  220. addi.d I, I, -1
  221. blt $r0, I, .L21
  222. .align 3
  223. .L22:
  224. #ifdef DOUBLE
  225. vreplvei.d VX1, res1, 1
  226. vfadd.d res1, VX1, res1
  227. #else
  228. vreplvei.w VX1, res1, 1
  229. vreplvei.w VX2, res1, 2
  230. vreplvei.w VX3, res1, 3
  231. vfadd.s res1, VX1, res1
  232. vfadd.s res1, VX2, res1
  233. vfadd.s res1, VX3, res1
  234. #endif
  235. .align 3
  236. .L23:
  237. andi I, N, 7
  238. bge $r0, I, .L999
  239. .align 3
  240. .L24:
  241. LD a1, X, 0 * SIZE
  242. LD a2, X, 1 * SIZE
  243. ADD a1, a1, a2
  244. ADD s1, a1, s1
  245. addi.d I, I, -1
  246. add.d X, X, INCX
  247. blt $r0, I, .L24
  248. .align 3
  249. .L999:
  250. fmov.s $f0, $f16
  251. jirl $r0, $r1, 0x0
  252. .align 3
  253. EPILOGUE