You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

scal_lsx.S 7.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. /***************************************************************************
  2. Copyright (c) 2023, The OpenBLAS Project
  3. All rights reserved.
  4. Redistribution and use in source and binary forms, with or without
  5. modification, are permitted provided that the following conditions are
  6. met:
  7. 1. Redistributions of source code must retain the above copyright
  8. notice, this list of conditions and the following disclaimer.
  9. 2. Redistributions in binary form must reproduce the above copyright
  10. notice, this list of conditions and the following disclaimer in
  11. the documentation and/or other materials provided with the
  12. distribution.
  13. 3. Neither the name of the OpenBLAS project nor the names of
  14. its contributors may be used to endorse or promote products
  15. derived from this software without specific prior written permission.
  16. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  17. AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  18. IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  19. ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
  20. LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  21. DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  22. SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  23. CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  24. OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  25. USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. *****************************************************************************/
  27. #define ASSEMBLER
  28. #include "common.h"
  29. #define N $r4
  30. #define ALPHA $f0
  31. #define X $r7
  32. #define INCX $r8
  33. #define I $r12
  34. #define TEMP $r13
  35. #define t1 $r14
  36. #define t2 $r18
  37. #define t3 $r15
  38. #define t4 $r17
  39. #define XX $r16
  40. #define VX0 $vr12
  41. #define VX1 $vr13
  42. #define VT0 $vr14
  43. #define VT1 $vr15
  44. #define VALPHA $vr19
  45. #define a1 $f8
  46. #define a2 $f23
  47. PROLOGUE
  48. bge $r0, N, .L999
  49. bge $r0, INCX, .L999
  50. li.d TEMP, 1
  51. movgr2fr.d a1, $r0
  52. FFINT a1, a1
  53. movgr2fr.d a2, TEMP
  54. FFINT a2, a2
  55. slli.d TEMP, TEMP, BASE_SHIFT
  56. slli.d INCX, INCX, BASE_SHIFT
  57. CMPEQ $fcc0, ALPHA, a1
  58. bcnez $fcc0, .L20 //ALPHA==0
  59. CMPEQ $fcc0, ALPHA, a2
  60. bcnez $fcc0, .L999 //ALPHA==1 return
  61. srai.d I, N, 3
  62. beq INCX, TEMP, .L30 //ALPHA!=0|1 and INCX==1
  63. MTG TEMP, ALPHA
  64. #ifdef DOUBLE
  65. vreplgr2vr.d VALPHA, TEMP
  66. #else
  67. vreplgr2vr.w VALPHA, TEMP
  68. #endif
  69. move XX, X
  70. .align 3
  71. .L10: //ALPHA!=0|1 and INCX!=1
  72. bge $r0, I, .L32
  73. .align 3
  74. .L11:
  75. #ifdef DOUBLE
  76. ld.d t1, X, 0 * SIZE
  77. add.d X, X, INCX
  78. ld.d t2, X, 0 * SIZE
  79. add.d X, X, INCX
  80. vinsgr2vr.d VX0, t1, 0
  81. vinsgr2vr.d VX0, t2, 1
  82. vfmul.d VT0, VX0, VALPHA
  83. ld.d t3, X, 0 * SIZE
  84. add.d X, X, INCX
  85. ld.d t4, X, 0 * SIZE
  86. add.d X, X, INCX
  87. vinsgr2vr.d VX1, t3, 0
  88. vinsgr2vr.d VX1, t4, 1
  89. vstelm.d VT0, XX, 0, 0
  90. add.d XX, XX, INCX
  91. vstelm.d VT0, XX, 0, 1
  92. add.d XX, XX, INCX
  93. vfmul.d VT1, VX1, VALPHA
  94. ld.d t1, X, 0 * SIZE
  95. add.d X, X, INCX
  96. ld.d t2, X, 0 * SIZE
  97. add.d X, X, INCX
  98. vinsgr2vr.d VX0, t1, 0
  99. vinsgr2vr.d VX0, t2, 1
  100. vstelm.d VT1, XX, 0, 0
  101. add.d XX, XX, INCX
  102. vstelm.d VT1, XX, 0, 1
  103. add.d XX, XX, INCX
  104. vfmul.d VT0, VX0, VALPHA
  105. ld.d t3, X, 0 * SIZE
  106. add.d X, X, INCX
  107. ld.d t4, X, 0 * SIZE
  108. add.d X, X, INCX
  109. vinsgr2vr.d VX1, t3, 0
  110. vinsgr2vr.d VX1, t4, 1
  111. vstelm.d VT0, XX, 0, 0
  112. add.d XX, XX, INCX
  113. vstelm.d VT0, XX, 0, 1
  114. add.d XX, XX, INCX
  115. vfmul.d VT1, VX1, VALPHA
  116. vstelm.d VT1, XX, 0, 0
  117. add.d XX, XX, INCX
  118. vstelm.d VT1, XX, 0, 1
  119. #else
  120. ld.w t1, X, 0 * SIZE
  121. add.d X, X, INCX
  122. ld.w t2, X, 0 * SIZE
  123. add.d X, X, INCX
  124. ld.w t3, X, 0 * SIZE
  125. add.d X, X, INCX
  126. ld.w t4, X, 0 * SIZE
  127. add.d X, X, INCX
  128. vinsgr2vr.w VX0, t1, 0
  129. vinsgr2vr.w VX0, t2, 1
  130. vinsgr2vr.w VX0, t3, 2
  131. vinsgr2vr.w VX0, t4, 3
  132. ld.w t1, X, 0 * SIZE
  133. add.d X, X, INCX
  134. ld.w t2, X, 0 * SIZE
  135. add.d X, X, INCX
  136. vfmul.s VT0, VX0, VALPHA
  137. ld.w t3, X, 0 * SIZE
  138. add.d X, X, INCX
  139. ld.w t4, X, 0 * SIZE
  140. add.d X, X, INCX
  141. vinsgr2vr.w VX1, t1, 0
  142. vinsgr2vr.w VX1, t2, 1
  143. vinsgr2vr.w VX1, t3, 2
  144. vinsgr2vr.w VX1, t4, 3
  145. vstelm.w VT0, XX, 0, 0
  146. add.d XX, XX, INCX
  147. vstelm.w VT0, XX, 0, 1
  148. add.d XX, XX, INCX
  149. vstelm.w VT0, XX, 0, 2
  150. add.d XX, XX, INCX
  151. vstelm.w VT0, XX, 0, 3
  152. add.d XX, XX, INCX
  153. vfmul.s VT1, VX1, VALPHA
  154. vstelm.w VT1, XX, 0, 0
  155. add.d XX, XX, INCX
  156. vstelm.w VT1, XX, 0, 1
  157. add.d XX, XX, INCX
  158. vstelm.w VT1, XX, 0, 2
  159. add.d XX, XX, INCX
  160. vstelm.w VT1, XX, 0, 3
  161. #endif
  162. add.d XX, XX, INCX
  163. addi.d I, I, -1
  164. blt $r0, I, .L11
  165. b .L32
  166. .align 3
  167. .L20:
  168. srai.d I, N, 3
  169. beq INCX, TEMP, .L24
  170. bge $r0, I, .L22
  171. .align 3
  172. .L21:
  173. ST a1, X, 0
  174. add.d X, X, INCX
  175. ST a1, X, 0
  176. add.d X, X, INCX
  177. ST a1, X, 0
  178. add.d X, X, INCX
  179. ST a1, X, 0
  180. add.d X, X, INCX
  181. ST a1, X, 0
  182. add.d X, X, INCX
  183. ST a1, X, 0
  184. add.d X, X, INCX
  185. ST a1, X, 0
  186. add.d X, X, INCX
  187. ST a1, X, 0
  188. add.d X, X, INCX
  189. addi.d I, I, -1
  190. blt $r0, I, .L21
  191. .align 3
  192. .L22:
  193. andi I, N, 7
  194. bge $r0, I, .L999
  195. .align 3
  196. .L23:
  197. ST a1, X, 0 * SIZE
  198. addi.d I, I, -1
  199. add.d X, X, INCX
  200. blt $r0, I, .L23
  201. jirl $r0, $r1, 0
  202. .align 3
  203. .L24:
  204. bge $r0, I, .L26 /*N<8 INCX==1*/
  205. .align 3
  206. .L25:
  207. vxor.v VX0, VX0, VX0
  208. vst VX0, X, 0 * SIZE
  209. #ifdef DOUBLE
  210. vst VX0, X, 2 * SIZE
  211. vst VX0, X, 4 * SIZE
  212. vst VX0, X, 6 * SIZE
  213. #else
  214. vst VX0, X, 4 * SIZE
  215. #endif
  216. addi.d I, I, -1
  217. addi.d X, X, 8 * SIZE
  218. blt $r0, I, .L25
  219. .align 3
  220. .L26:
  221. andi I, N, 7
  222. bge $r0, I, .L999
  223. .align 3
  224. .L27:
  225. ST a1, X, 0 * SIZE
  226. addi.d I, I, -1
  227. addi.d X, X, SIZE
  228. blt $r0, I, .L27
  229. jirl $r0, $r1, 0
  230. .align 3
  231. .L30:
  232. bge $r0, I, .L32/*N<8 INCX==1*/
  233. MTG TEMP, ALPHA
  234. #ifdef DOUBLE
  235. vreplgr2vr.d VALPHA , TEMP
  236. #else
  237. vreplgr2vr.w VALPHA , TEMP
  238. #endif
  239. .align 3
  240. .L31:
  241. vld VX0, X, 0 * SIZE
  242. #ifdef DOUBLE
  243. vld VX1, X, 2 * SIZE
  244. vfmul.d VT0, VX0, VALPHA
  245. vfmul.d VT1, VX1, VALPHA
  246. vld VX0, X, 4 * SIZE
  247. vst VT0, X, 0 * SIZE
  248. vst VT1, X, 2 * SIZE
  249. vfmul.d VT0, VX0, VALPHA
  250. vld VX1, X, 6 * SIZE
  251. vst VT0, X, 4 * SIZE
  252. vfmul.d VT1, VX1, VALPHA
  253. vst VT1, X, 6 * SIZE
  254. addi.d I, I, -1
  255. #else
  256. vld VX1, X, 4 * SIZE
  257. vfmul.s VT0, VX0, VALPHA
  258. vfmul.s VT1, VX1, VALPHA
  259. addi.d I, I, -1
  260. vst VT0, X, 0 * SIZE
  261. vst VT1, X, 4 * SIZE
  262. #endif
  263. addi.d X, X, 8 * SIZE
  264. blt $r0, I, .L31
  265. .align 3
  266. .L32:
  267. andi I, N, 7
  268. bge $r0, I, .L999
  269. .align 3
  270. .L33:
  271. LD a1, X, 0 * SIZE
  272. addi.d I, I, -1
  273. MUL a1, ALPHA, a1
  274. ST a1, X, 0 * SIZE
  275. add.d X, X, INCX
  276. blt $r0, I, .L33
  277. jirl $r0, $r1, 0
  278. .align 3
  279. .L999:
  280. jirl $r0, $r1, 0x0
  281. EPILOGUE