You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

sbgemv_t_bfdot.c 8.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. /***************************************************************************
  2. Copyright (c) 2025, The OpenBLAS Project
  3. All rights reserved.
  4. Redistribution and use in source and binary forms, with or without
  5. modification, are permitted provided that the following conditions are
  6. met:
  7. 1. Redistributions of source code must retain the above copyright
  8. notice, this list of conditions and the following disclaimer.
  9. 2. Redistributions in binary form must reproduce the above copyright
  10. notice, this list of conditions and the following disclaimer in
  11. the documentation and/or other materials provided with the
  12. distribution.
  13. 3. Neither the name of the OpenBLAS project nor the names of
  14. its contributors may be used to endorse or promote products
  15. derived from this software without specific prior written
  16. permission.
  17. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  18. AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19. IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20. ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  21. LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  22. DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  23. SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  24. CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  25. OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  26. USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27. *****************************************************************************/
  28. #include <arm_neon.h>
  29. #include "common.h"
  30. #ifdef BGEMM
  31. #define INNER_FLOAT bfloat16_t
  32. #define TO32(x) vcvtah_f32_bf16(x)
  33. #define FROM32(x) vcvth_bf16_f32(x)
  34. #else
  35. #define INNER_FLOAT float
  36. #define TO32(x) x
  37. #define FROM32(x) x
  38. #endif
  39. int CNAME(BLASLONG m, BLASLONG n, FLOAT alpha, bfloat16 *a, BLASLONG lda, bfloat16 *x, BLASLONG incx, FLOAT beta, FLOAT *y_in, BLASLONG incy)
  40. {
  41. if (m < 1 || n < 1) return(0);
  42. BLASLONG i;
  43. BLASLONG ix,iy;
  44. BLASLONG j;
  45. bfloat16_t *a_ptr;
  46. bfloat16_t *x_ptr;
  47. float temp, temp0, temp1, temp2, temp3;
  48. #ifdef BGEMM
  49. bfloat16_t alpha_bf16, beta_bf16;
  50. memcpy(&alpha_bf16, &alpha, sizeof(bfloat16_t));
  51. memcpy(&beta_bf16, &beta, sizeof(bfloat16_t));
  52. float alpha_f32 = vcvtah_f32_bf16(alpha_bf16);
  53. float beta_f32 = vcvtah_f32_bf16(beta_bf16);
  54. #else
  55. float alpha_f32 = alpha;
  56. float beta_f32 = beta;
  57. #endif
  58. INNER_FLOAT *y = (INNER_FLOAT *)y_in;
  59. INNER_FLOAT *y_ptr;
  60. iy = 0;
  61. a_ptr = (bfloat16_t*)(a);
  62. x_ptr = (bfloat16_t*)(x);
  63. if (incx == 1) {
  64. BLASLONG width = n / 4;
  65. bfloat16_t *a0_ptr = a_ptr + lda * width * 0;
  66. bfloat16_t *a1_ptr = a_ptr + lda * width * 1;
  67. bfloat16_t *a2_ptr = a_ptr + lda * width * 2;
  68. bfloat16_t *a3_ptr = a_ptr + lda * width * 3;
  69. INNER_FLOAT *y0_ptr = y + incy * width * 0;
  70. INNER_FLOAT *y1_ptr = y + incy * width * 1;
  71. INNER_FLOAT *y2_ptr = y + incy * width * 2;
  72. INNER_FLOAT *y3_ptr = y + incy * width * 3;
  73. for (j = 0; j < width; j++) {
  74. float32x4_t temp0_vec = vdupq_n_f32(0.0f);
  75. float32x4_t temp1_vec = vdupq_n_f32(0.0f);
  76. float32x4_t temp2_vec = vdupq_n_f32(0.0f);
  77. float32x4_t temp3_vec = vdupq_n_f32(0.0f);
  78. i = 0;
  79. while (i + 7 < m) {
  80. bfloat16x8_t x_vec = vld1q_bf16(x_ptr + i);
  81. bfloat16x8_t a0_vec = vld1q_bf16(a0_ptr + i);
  82. bfloat16x8_t a1_vec = vld1q_bf16(a1_ptr + i);
  83. bfloat16x8_t a2_vec = vld1q_bf16(a2_ptr + i);
  84. bfloat16x8_t a3_vec = vld1q_bf16(a3_ptr + i);
  85. temp0_vec = vbfdotq_f32(temp0_vec, a0_vec, x_vec);
  86. temp1_vec = vbfdotq_f32(temp1_vec, a1_vec, x_vec);
  87. temp2_vec = vbfdotq_f32(temp2_vec, a2_vec, x_vec);
  88. temp3_vec = vbfdotq_f32(temp3_vec, a3_vec, x_vec);
  89. i += 8;
  90. }
  91. if (i + 3 < m) {
  92. float32x2_t t0 = vdup_n_f32(0.0f);
  93. float32x2_t t1 = vdup_n_f32(0.0f);
  94. float32x2_t t2 = vdup_n_f32(0.0f);
  95. float32x2_t t3 = vdup_n_f32(0.0f);
  96. bfloat16x4_t x_vec = vld1_bf16(x_ptr + i);
  97. bfloat16x4_t a0_vec = vld1_bf16(a0_ptr + i);
  98. bfloat16x4_t a1_vec = vld1_bf16(a1_ptr + i);
  99. bfloat16x4_t a2_vec = vld1_bf16(a2_ptr + i);
  100. bfloat16x4_t a3_vec = vld1_bf16(a3_ptr + i);
  101. t0 = vbfdot_f32(t0, a0_vec, x_vec);
  102. t1 = vbfdot_f32(t1, a1_vec, x_vec);
  103. t2 = vbfdot_f32(t2, a2_vec, x_vec);
  104. t3 = vbfdot_f32(t3, a3_vec, x_vec);
  105. float32x2_t temp0_vec_low = vget_low_f32(temp0_vec);
  106. float32x2_t temp1_vec_low = vget_low_f32(temp1_vec);
  107. float32x2_t temp2_vec_low = vget_low_f32(temp2_vec);
  108. float32x2_t temp3_vec_low = vget_low_f32(temp3_vec);
  109. temp0_vec = vcombine_f32(vadd_f32(t0, temp0_vec_low), vget_high_f32(temp0_vec));
  110. temp1_vec = vcombine_f32(vadd_f32(t1, temp1_vec_low), vget_high_f32(temp1_vec));
  111. temp2_vec = vcombine_f32(vadd_f32(t2, temp2_vec_low), vget_high_f32(temp2_vec));
  112. temp3_vec = vcombine_f32(vadd_f32(t3, temp3_vec_low), vget_high_f32(temp3_vec));
  113. i += 4;
  114. }
  115. if (beta_f32 == 0.0f) {
  116. temp0 = alpha_f32 * vaddvq_f32(temp0_vec);
  117. temp1 = alpha_f32 * vaddvq_f32(temp1_vec);
  118. temp2 = alpha_f32 * vaddvq_f32(temp2_vec);
  119. temp3 = alpha_f32 * vaddvq_f32(temp3_vec);
  120. } else {
  121. temp0 = alpha_f32 * vaddvq_f32(temp0_vec) + beta_f32 * TO32(y0_ptr[iy]);
  122. temp1 = alpha_f32 * vaddvq_f32(temp1_vec) + beta_f32 * TO32(y1_ptr[iy]);
  123. temp2 = alpha_f32 * vaddvq_f32(temp2_vec) + beta_f32 * TO32(y2_ptr[iy]);
  124. temp3 = alpha_f32 * vaddvq_f32(temp3_vec) + beta_f32 * TO32(y3_ptr[iy]);
  125. }
  126. for (; i < m; ++i) {
  127. temp0 = temp0 + alpha_f32 * vcvtah_f32_bf16(a0_ptr[i]) * vcvtah_f32_bf16(x_ptr[i]);
  128. temp1 = temp1 + alpha_f32 * vcvtah_f32_bf16(a1_ptr[i]) * vcvtah_f32_bf16(x_ptr[i]);
  129. temp2 = temp2 + alpha_f32 * vcvtah_f32_bf16(a2_ptr[i]) * vcvtah_f32_bf16(x_ptr[i]);
  130. temp3 = temp3 + alpha_f32 * vcvtah_f32_bf16(a3_ptr[i]) * vcvtah_f32_bf16(x_ptr[i]);
  131. }
  132. y0_ptr[iy] = FROM32(temp0);
  133. y1_ptr[iy] = FROM32(temp1);
  134. y2_ptr[iy] = FROM32(temp2);
  135. y3_ptr[iy] = FROM32(temp3);
  136. iy += incy;
  137. a0_ptr += lda;
  138. a1_ptr += lda;
  139. a2_ptr += lda;
  140. a3_ptr += lda;
  141. }
  142. a_ptr = a3_ptr;
  143. y_ptr = y3_ptr;
  144. for (j = width * 4; j < n; j++) {
  145. float32x4_t temp0_vec = vdupq_n_f32(0.0f);
  146. i = 0;
  147. while (i + 7 < m) {
  148. bfloat16x8_t x_vec = vld1q_bf16(x_ptr + i);
  149. bfloat16x8_t a0_vec = vld1q_bf16(a_ptr + i);
  150. temp0_vec = vbfdotq_f32(temp0_vec, a0_vec, x_vec);
  151. i += 8;
  152. }
  153. if (i + 3 < m) {
  154. float32x2_t t0 = vdup_n_f32(0.0f);
  155. bfloat16x4_t x_vec = vld1_bf16(x_ptr + i);
  156. bfloat16x4_t a0_vec = vld1_bf16(a_ptr + i);
  157. t0 = vbfdot_f32(t0, a0_vec, x_vec);
  158. float32x2_t temp0_vec_low = vget_low_f32(temp0_vec);
  159. temp0_vec = vcombine_f32(vadd_f32(t0, temp0_vec_low), vget_high_f32(temp0_vec));
  160. i += 4;
  161. }
  162. if (beta_f32 == 0.0f) {
  163. temp = alpha_f32 * vaddvq_f32(temp0_vec);
  164. } else {
  165. temp = alpha_f32 * vaddvq_f32(temp0_vec) + beta_f32 * TO32(y_ptr[iy]);
  166. }
  167. for (; i < m; ++i) {
  168. temp += alpha_f32 * vcvtah_f32_bf16(a_ptr[i]) * vcvtah_f32_bf16(x_ptr[i]);
  169. }
  170. y_ptr[iy] = FROM32(temp);
  171. iy += incy;
  172. a_ptr += lda;
  173. }
  174. return(0);
  175. }
  176. for (j = 0; j < n; j++) {
  177. temp = 0.0;
  178. ix = 0;
  179. for (i = 0; i < m; i++) {
  180. temp += vcvtah_f32_bf16(a_ptr[i]) * vcvtah_f32_bf16(x_ptr[ix]);
  181. ix += incx;
  182. }
  183. if (beta_f32 == 0.0f) {
  184. y[iy] = FROM32(alpha_f32 * temp);
  185. }
  186. else {
  187. y[iy] = FROM32(alpha_f32 * temp + beta_f32 * TO32(y[iy]));
  188. }
  189. iy += incy;
  190. a_ptr += lda;
  191. }
  192. return (0);
  193. }