You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

symv_U_vector.c 13 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272
  1. /***************************************************************************
  2. Copyright (c) 2020, The OpenBLAS Project
  3. All rights reserved.
  4. Redistribution and use in source and binary forms, with or without
  5. modification, are permitted provided that the following conditions are
  6. met:
  7. 1. Redistributions of source code must retain the above copyright
  8. notice, this list of conditions and the following disclaimer.
  9. 2. Redistributions in binary form must reproduce the above copyright
  10. notice, this list of conditions and the following disclaimer in
  11. the documentation and/or other materials provided with the
  12. distribution.
  13. 3. Neither the name of the OpenBLAS project nor the names of
  14. its contributors may be used to endorse or promote products
  15. derived from this software without specific prior written permission.
  16. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  17. AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  18. IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  19. ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
  20. LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  21. DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  22. SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  23. CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  24. OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  25. USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. *****************************************************************************/
  27. #include "common.h"
  28. #if !defined(DOUBLE)
  29. #define VSETVL(n) RISCV_RVV(vsetvl_e32m4)(n)
  30. #define VSETVL_MAX RISCV_RVV(vsetvlmax_e32m1)()
  31. #define FLOAT_V_T vfloat32m4_t
  32. #define FLOAT_V_T_M1 vfloat32m1_t
  33. #define VLEV_FLOAT RISCV_RVV(vle32_v_f32m4)
  34. #define VLSEV_FLOAT RISCV_RVV(vlse32_v_f32m4)
  35. #define VSEV_FLOAT RISCV_RVV(vse32_v_f32m4)
  36. #define VSSEV_FLOAT RISCV_RVV(vsse32_v_f32m4)
  37. #ifdef RISCV_0p10_INTRINSICS
  38. #define VFREDSUM_FLOAT(va, vb, gvl) vfredusum_vs_f32m4_f32m1(v_res, va, vb, gvl)
  39. #else
  40. #define VFREDSUM_FLOAT RISCV_RVV(vfredusum_vs_f32m4_f32m1)
  41. #endif
  42. #define VFMACCVV_FLOAT RISCV_RVV(vfmacc_vv_f32m4)
  43. #define VFMACCVF_FLOAT RISCV_RVV(vfmacc_vf_f32m4)
  44. #define VFMVVF_FLOAT RISCV_RVV(vfmv_v_f_f32m4)
  45. #define VFMVVF_FLOAT_M1 RISCV_RVV(vfmv_v_f_f32m1)
  46. #define VFDOTVV_FLOAT RISCV_RVV(vfdot_vv_f32m4)
  47. #define VFMULVV_FLOAT RISCV_RVV(vfmul_vv_f32m4)
  48. #else
  49. #define VSETVL(n) RISCV_RVV(vsetvl_e64m4)(n)
  50. #define VSETVL_MAX RISCV_RVV(vsetvlmax_e64m1)()
  51. #define FLOAT_V_T vfloat64m4_t
  52. #define FLOAT_V_T_M1 vfloat64m1_t
  53. #define VLEV_FLOAT RISCV_RVV(vle64_v_f64m4)
  54. #define VLSEV_FLOAT RISCV_RVV(vlse64_v_f64m4)
  55. #define VSEV_FLOAT RISCV_RVV(vse64_v_f64m4)
  56. #define VSSEV_FLOAT RISCV_RVV(vsse64_v_f64m4)
  57. #ifdef RISCV_0p10_INTRINSICS
  58. #define VFREDSUM_FLOAT(va, vb, gvl) vfredusum_vs_f64m4_f64m1(v_res, va, vb, gvl)
  59. #else
  60. #define VFREDSUM_FLOAT RISCV_RVV(vfredusum_vs_f64m4_f64m1)
  61. #endif
  62. #define VFMACCVV_FLOAT RISCV_RVV(vfmacc_vv_f64m4)
  63. #define VFMACCVF_FLOAT RISCV_RVV(vfmacc_vf_f64m4)
  64. #define VFMVVF_FLOAT RISCV_RVV(vfmv_v_f_f64m4)
  65. #define VFMVVF_FLOAT_M1 RISCV_RVV(vfmv_v_f_f64m1)
  66. #define VFDOTVV_FLOAT RISCV_RVV(vfdot_vv_f64m4)
  67. #define VFMULVV_FLOAT RISCV_RVV(vfmul_vv_f64m4)
  68. #endif
  69. int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer)
  70. {
  71. BLASLONG i, j, k;
  72. BLASLONG ix,iy;
  73. BLASLONG jx,jy;
  74. FLOAT temp1;
  75. FLOAT temp2;
  76. FLOAT *a_ptr = a;
  77. unsigned int gvl = 0;
  78. FLOAT_V_T_M1 v_res, v_z0;
  79. gvl = VSETVL_MAX;
  80. v_res = VFMVVF_FLOAT_M1(0, gvl);
  81. v_z0 = VFMVVF_FLOAT_M1(0, gvl);
  82. FLOAT_V_T va, vx, vy, vr;
  83. BLASLONG stride_x, stride_y, inc_xv, inc_yv;
  84. BLASLONG m1 = m - offset;
  85. if(inc_x == 1 && inc_y == 1){
  86. a_ptr += m1 * lda;
  87. for (j=m1; j<m; j++)
  88. {
  89. temp1 = alpha * x[j];
  90. temp2 = 0.0;
  91. if(j > 0){
  92. i = 0;
  93. gvl = VSETVL(j);
  94. vr = VFMVVF_FLOAT(0, gvl);
  95. for(k = 0; k < j / gvl; k++){
  96. vy = VLEV_FLOAT(&y[i], gvl);
  97. va = VLEV_FLOAT(&a_ptr[i], gvl);
  98. vy = VFMACCVF_FLOAT(vy, temp1, va, gvl);
  99. VSEV_FLOAT(&y[i], vy, gvl);
  100. vx = VLEV_FLOAT(&x[i], gvl);
  101. vr = VFMACCVV_FLOAT(vr, vx, va, gvl);
  102. i += gvl;
  103. }
  104. v_res = VFREDSUM_FLOAT(vr, v_z0, gvl);
  105. temp2 = EXTRACT_FLOAT(v_res);
  106. if(i < j){
  107. gvl = VSETVL(j-i);
  108. vy = VLEV_FLOAT(&y[i], gvl);
  109. va = VLEV_FLOAT(&a_ptr[i], gvl);
  110. vy = VFMACCVF_FLOAT(vy, temp1, va, gvl);
  111. VSEV_FLOAT(&y[i], vy, gvl);
  112. vx = VLEV_FLOAT(&x[i], gvl);
  113. vr = VFMULVV_FLOAT(vx, va, gvl);
  114. v_res = VFREDSUM_FLOAT(vr, v_z0, gvl);
  115. temp2 += EXTRACT_FLOAT(v_res);
  116. }
  117. }
  118. y[j] += temp1 * a_ptr[j] + alpha * temp2;
  119. a_ptr += lda;
  120. }
  121. }else if(inc_x == 1){
  122. jy = m1 * inc_y;
  123. a_ptr += m1 * lda;
  124. stride_y = inc_y * sizeof(FLOAT);
  125. for (j=m1; j<m; j++)
  126. {
  127. temp1 = alpha * x[j];
  128. temp2 = 0.0;
  129. if(j > 0){
  130. iy = 0;
  131. i = 0;
  132. gvl = VSETVL(j);
  133. inc_yv = inc_y * gvl;
  134. vr = VFMVVF_FLOAT(0, gvl);
  135. for(k = 0; k < j / gvl; k++){
  136. vy = VLSEV_FLOAT(&y[iy], stride_y, gvl);
  137. va = VLEV_FLOAT(&a_ptr[i], gvl);
  138. vy = VFMACCVF_FLOAT(vy, temp1, va, gvl);
  139. VSSEV_FLOAT(&y[iy], stride_y, vy, gvl);
  140. vx = VLEV_FLOAT(&x[i], gvl);
  141. vr = VFMACCVV_FLOAT(vr, vx, va, gvl);
  142. i += gvl;
  143. iy += inc_yv;
  144. }
  145. v_res = VFREDSUM_FLOAT(vr, v_z0, gvl);
  146. temp2 = EXTRACT_FLOAT(v_res);
  147. if(i < j){
  148. gvl = VSETVL(j-i);
  149. vy = VLSEV_FLOAT(&y[iy], stride_y, gvl);
  150. va = VLEV_FLOAT(&a_ptr[i], gvl);
  151. vy = VFMACCVF_FLOAT(vy, temp1, va, gvl);
  152. VSSEV_FLOAT(&y[iy], stride_y, vy, gvl);
  153. vx = VLEV_FLOAT(&x[i], gvl);
  154. vr = VFMULVV_FLOAT(vx, va, gvl);
  155. v_res = VFREDSUM_FLOAT(vr, v_z0, gvl);
  156. temp2 += EXTRACT_FLOAT(v_res);
  157. }
  158. }
  159. y[jy] += temp1 * a_ptr[j] + alpha * temp2;
  160. a_ptr += lda;
  161. jy += inc_y;
  162. }
  163. }else if(inc_y == 1){
  164. jx = m1 * inc_x;
  165. a_ptr += m1 * lda;
  166. stride_x = inc_x * sizeof(FLOAT);
  167. for (j=m1; j<m; j++)
  168. {
  169. temp1 = alpha * x[jx];
  170. temp2 = 0.0;
  171. if(j > 0){
  172. ix = 0;
  173. i = 0;
  174. gvl = VSETVL(j);
  175. inc_xv = inc_x * gvl;
  176. vr = VFMVVF_FLOAT(0, gvl);
  177. for(k = 0; k < j / gvl; k++){
  178. vy = VLEV_FLOAT(&y[i], gvl);
  179. va = VLEV_FLOAT(&a_ptr[i], gvl);
  180. vy = VFMACCVF_FLOAT(vy, temp1, va, gvl);
  181. VSEV_FLOAT(&y[i], vy, gvl);
  182. vx = VLSEV_FLOAT(&x[ix], stride_x, gvl);
  183. vr = VFMACCVV_FLOAT(vr, vx, va, gvl);
  184. i += gvl;
  185. ix += inc_xv;
  186. }
  187. v_res = VFREDSUM_FLOAT(vr, v_z0, gvl);
  188. temp2 = EXTRACT_FLOAT(v_res);
  189. if(i < j){
  190. gvl = VSETVL(j-i);
  191. vy = VLEV_FLOAT(&y[i], gvl);
  192. va = VLEV_FLOAT(&a_ptr[i], gvl);
  193. vy = VFMACCVF_FLOAT(vy, temp1, va, gvl);
  194. VSEV_FLOAT(&y[i], vy, gvl);
  195. vx = VLSEV_FLOAT(&x[ix], stride_x, gvl);
  196. vr = VFMULVV_FLOAT(vx, va, gvl);
  197. v_res = VFREDSUM_FLOAT(vr, v_z0, gvl);
  198. temp2 += EXTRACT_FLOAT(v_res);
  199. }
  200. }
  201. y[j] += temp1 * a_ptr[j] + alpha * temp2;
  202. a_ptr += lda;
  203. jx += inc_x;
  204. }
  205. }else{
  206. jx = m1 * inc_x;
  207. jy = m1 * inc_y;
  208. a_ptr += m1 * lda;
  209. stride_x = inc_x * sizeof(FLOAT);
  210. stride_y = inc_y * sizeof(FLOAT);
  211. for (j=m1; j<m; j++)
  212. {
  213. temp1 = alpha * x[jx];
  214. temp2 = 0.0;
  215. if(j > 0){
  216. ix = 0;
  217. iy = 0;
  218. i = 0;
  219. gvl = VSETVL(j);
  220. inc_xv = inc_x * gvl;
  221. inc_yv = inc_y * gvl;
  222. vr = VFMVVF_FLOAT(0, gvl);
  223. for(k = 0; k < j / gvl; k++){
  224. vy = VLSEV_FLOAT(&y[iy], stride_y, gvl);
  225. va = VLEV_FLOAT(&a_ptr[i], gvl);
  226. vy = VFMACCVF_FLOAT(vy, temp1, va, gvl);
  227. VSSEV_FLOAT(&y[iy], stride_y, vy, gvl);
  228. vx = VLSEV_FLOAT(&x[ix], stride_x, gvl);
  229. vr = VFMACCVV_FLOAT(vr, vx, va, gvl);
  230. i += gvl;
  231. ix += inc_xv;
  232. iy += inc_yv;
  233. }
  234. v_res = VFREDSUM_FLOAT(vr, v_z0, gvl);
  235. temp2 = EXTRACT_FLOAT(v_res);
  236. if(i < j){
  237. gvl = VSETVL(j-i);
  238. vy = VLSEV_FLOAT(&y[iy], stride_y, gvl);
  239. va = VLEV_FLOAT(&a_ptr[i], gvl);
  240. vy = VFMACCVF_FLOAT(vy, temp1, va, gvl);
  241. VSSEV_FLOAT(&y[iy], stride_y, vy, gvl);
  242. vx = VLSEV_FLOAT(&x[ix], stride_x, gvl);
  243. vr = VFMULVV_FLOAT(vx, va, gvl);
  244. v_res = VFREDSUM_FLOAT(vr, v_z0, gvl);
  245. temp2 += EXTRACT_FLOAT(v_res);
  246. }
  247. }
  248. y[jy] += temp1 * a_ptr[j] + alpha * temp2;
  249. a_ptr += lda;
  250. jx += inc_x;
  251. jy += inc_y;
  252. }
  253. }
  254. return(0);
  255. }