You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

sgemm_small_kernel_tt_skylakex.c 17 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414
  1. /***************************************************************************
  2. Copyright (c) 2021, The OpenBLAS Project
  3. All rights reserved.
  4. Redistribution and use in source and binary forms, with or without
  5. modification, are permitted provided that the following conditions are
  6. met:
  7. 1. Redistributions of source code must retain the above copyright
  8. notice, this list of conditions and the following disclaimer.
  9. 2. Redistributions in binary form must reproduce the above copyright
  10. notice, this list of conditions and the following disclaimer in
  11. the documentation and/or other materials provided with the
  12. distribution.
  13. 3. Neither the name of the OpenBLAS project nor the names of
  14. its contributors may be used to endorse or promote products
  15. derived from this software without specific prior written permission.
  16. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  17. AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  18. IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  19. ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
  20. LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  21. DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  22. SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  23. CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  24. OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  25. USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. *****************************************************************************/
  27. #include <immintrin.h>
  28. #include "common.h"
  29. #include <stdio.h>
  30. #define DECLARE_RESULT_512(M, N) __m512 result##M##N = _mm512_setzero_ps()
  31. #define BROADCAST_LOAD_A_512(M, N) __m512 Aval##M = _mm512_broadcastss_ps(_mm_load_ss(&A[k + lda * (i+M)]))
  32. #define LOAD_B_512(M,N) __m512 Bval##N = _mm512_loadu_ps(&B[ldb * k + j + (N*16)])
  33. #define MASK_LOAD_B_512(M, N) __m512 Bval##N = _mm512_maskz_loadu_ps(mask, &B[ldb * k + j + (N*16)])
  34. #define MATMUL_512(M, N) result##M##N = _mm512_fmadd_ps(Aval##M, Bval##N, result##M##N)
  35. #if defined(B0)
  36. #define STORE_8xy(v, N, x, y) _mm256_storeu_ps(&C[(j + N*16 + x + y*8)*ldc + i], v)
  37. #define STORE_4xy(v, N, x, y) _mm_mask_storeu_ps(&C[(j + N*16 + x + y*4)*ldc + i], mask8, v)
  38. #define SCATTER_STORE_512(M, N) result##M##N = _mm512_mul_ps(result##M##N, alpha_512); \
  39. _mm512_i32scatter_ps(&C[(j + N*16)*ldc + i + M], vindex_n, result##M##N, 4);
  40. #define MASK_SCATTER_STORE_512(M, N) result##M##N = _mm512_mul_ps(result##M##N, alpha_512); \
  41. _mm512_mask_i32scatter_ps(&C[(j + N*16)*ldc + i + M], mask, vindex_n, result##M##N, 4);
  42. #else
  43. #define STORE_8xy(v, N, x, y) \
  44. asm("vfmadd231ps (%1), %2, %0": "+v"(v): "r"(&C[(j + N*16 + x + y*8)*ldc + i]), "v"(beta_256)); \
  45. _mm256_storeu_ps(&C[(j + N*16 + x + y*8)*ldc + i], v)
  46. #define STORE_4xy(v, N, x, y) \
  47. asm("vfmadd231ps (%1), %2, %0": "+v"(v): "r"(&C[(j + N*16 + x + y*4)*ldc + i]), "v"(beta_128)); \
  48. _mm_mask_storeu_ps(&C[(j + N*16 + x + y*4)*ldc + i], mask8, v)
  49. #define SCATTER_STORE_512(M, N) result##M##N = _mm512_mul_ps(result##M##N, alpha_512); \
  50. __m512 tmp##M##N = _mm512_i32gather_ps(vindex_n, &C[(j + N*16)*ldc + i + M], 4); \
  51. result##M##N = _mm512_fmadd_ps(tmp##M##N, beta_512, result##M##N); \
  52. _mm512_i32scatter_ps(&C[(j + N*16)*ldc + i + M], vindex_n, result##M##N, 4);
  53. #define MASK_SCATTER_STORE_512(M, N) result##M##N = _mm512_mul_ps(result##M##N, alpha_512); \
  54. __m512 tmp##M##N = _mm512_mask_i32gather_ps(_mm512_setzero_ps(), mask, vindex_n, &C[(j + N*16)*ldc + i + M], 4); \
  55. result##M##N = _mm512_fmadd_ps(tmp##M##N, beta_512, result##M##N); \
  56. _mm512_mask_i32scatter_ps(&C[(j + N*16)*ldc + i + M], mask, vindex_n, result##M##N, 4);
  57. #endif
  58. #define REORDER_8x16(r0, r1, r2, r3, r4, r5, r6, r7) \
  59. __m512 t0, t1, t2, t3, t4, t5, t6, t7, v; \
  60. t0 = _mm512_unpacklo_ps(r0, r1); \
  61. t1 = _mm512_unpackhi_ps(r0, r1); \
  62. t2 = _mm512_unpacklo_ps(r2, r3); \
  63. t3 = _mm512_unpackhi_ps(r2, r3); \
  64. t4 = _mm512_unpacklo_ps(r4, r5); \
  65. t5 = _mm512_unpackhi_ps(r4, r5); \
  66. t6 = _mm512_unpacklo_ps(r6, r7); \
  67. t7 = _mm512_unpackhi_ps(r6, r7); \
  68. v = _mm512_shuffle_ps(t0, t2, 0x4E); \
  69. r0 = _mm512_mask_blend_ps(kc, t0, v); \
  70. r1 = _mm512_mask_blend_ps(k3, t2, v); \
  71. v = _mm512_shuffle_ps(t1, t3, 0x4E); \
  72. r2 = _mm512_mask_blend_ps(kc, t1, v); \
  73. r3 = _mm512_mask_blend_ps(k3, t3, v); \
  74. v = _mm512_shuffle_ps(t4, t6, 0x4E); \
  75. r4 = _mm512_mask_blend_ps(kc, t4, v); \
  76. r5 = _mm512_mask_blend_ps(k3, t6, v); \
  77. v = _mm512_shuffle_ps(t5, t7, 0x4E); \
  78. r6 = _mm512_mask_blend_ps(kc, t5, v); \
  79. r7 = _mm512_mask_blend_ps(k3, t7, v); \
  80. t0 = _mm512_permutex2var_ps(r0, idx_lo, r4); \
  81. t1 = _mm512_permutex2var_ps(r1, idx_lo, r5); \
  82. t2 = _mm512_permutex2var_ps(r2, idx_lo, r6); \
  83. t3 = _mm512_permutex2var_ps(r3, idx_lo, r7); \
  84. t4 = _mm512_permutex2var_ps(r0, idx_hi, r4); \
  85. t5 = _mm512_permutex2var_ps(r1, idx_hi, r5); \
  86. t6 = _mm512_permutex2var_ps(r2, idx_hi, r6); \
  87. t7 = _mm512_permutex2var_ps(r3, idx_hi, r7); \
  88. t0 = _mm512_mul_ps(t0, alpha_512); \
  89. t1 = _mm512_mul_ps(t1, alpha_512); \
  90. t2 = _mm512_mul_ps(t2, alpha_512); \
  91. t3 = _mm512_mul_ps(t3, alpha_512); \
  92. t4 = _mm512_mul_ps(t4, alpha_512); \
  93. t5 = _mm512_mul_ps(t5, alpha_512); \
  94. t6 = _mm512_mul_ps(t6, alpha_512); \
  95. t7 = _mm512_mul_ps(t7, alpha_512);
  96. #define SAVE_8(N, x, y) {\
  97. __m256 v8 = _mm512_extractf32x8_ps(t##x, y); \
  98. STORE_8xy(v8, N, x, y); \
  99. }
  100. #define REORDER_STORE_8x16(N) {\
  101. REORDER_8x16(result0##N, result1##N, result2##N, result3##N, result4##N, result5##N, result6##N, result7##N); \
  102. SAVE_8(N, 0, 0); SAVE_8(N, 1, 0); SAVE_8(N, 2, 0); SAVE_8(N, 3, 0); SAVE_8(N, 4, 0); SAVE_8(N, 5, 0); SAVE_8(N, 6, 0); SAVE_8(N, 7, 0); \
  103. SAVE_8(N, 0, 1); SAVE_8(N, 1, 1); SAVE_8(N, 2, 1); SAVE_8(N, 3, 1); SAVE_8(N, 4, 1); SAVE_8(N, 5, 1); SAVE_8(N, 6, 1); SAVE_8(N, 7, 1); \
  104. }
  105. #define MASK_SAVE_8() \
  106. switch (nn) { \
  107. case 16: SAVE_8(0, 7, 1); \
  108. case 15: SAVE_8(0, 6, 1); \
  109. case 14: SAVE_8(0, 5, 1); \
  110. case 13: SAVE_8(0, 4, 1); \
  111. case 12: SAVE_8(0, 3, 1); \
  112. case 11: SAVE_8(0, 2, 1); \
  113. case 10: SAVE_8(0, 1, 1); \
  114. case 9: SAVE_8(0, 0, 1); \
  115. case 8: SAVE_8(0, 7, 0); \
  116. case 7: SAVE_8(0, 6, 0); \
  117. case 6: SAVE_8(0, 5, 0); \
  118. case 5: SAVE_8(0, 4, 0); \
  119. case 4: SAVE_8(0, 3, 0); \
  120. case 3: SAVE_8(0, 2, 0); \
  121. case 2: SAVE_8(0, 1, 0); \
  122. case 1: SAVE_8(0, 0, 0); \
  123. }
  124. #define MASK_REORDER_STORE_8x16(N) {\
  125. REORDER_8x16(result0##N, result1##N, result2##N, result3##N, result4##N, result5##N, result6##N, result7##N); \
  126. MASK_SAVE_8(); \
  127. }
  128. #define REORDER_4x16(r0, r1, r2, r3) \
  129. __m512 t0, t1, t2, t3, v; \
  130. t0 = _mm512_unpacklo_ps(r0, r1); \
  131. t1 = _mm512_unpackhi_ps(r0, r1); \
  132. t2 = _mm512_unpacklo_ps(r2, r3); \
  133. t3 = _mm512_unpackhi_ps(r2, r3); \
  134. v = _mm512_shuffle_ps(t0, t2, 0x4E); \
  135. r0 = _mm512_mask_blend_ps(kc, t0, v); \
  136. r1 = _mm512_mask_blend_ps(k3, t2, v); \
  137. v = _mm512_shuffle_ps(t1, t3, 0x4E); \
  138. r2 = _mm512_mask_blend_ps(kc, t1, v); \
  139. r3 = _mm512_mask_blend_ps(k3, t3, v); \
  140. t0 = _mm512_mul_ps(r0, alpha_512); \
  141. t1 = _mm512_mul_ps(r1, alpha_512); \
  142. t2 = _mm512_mul_ps(r2, alpha_512); \
  143. t3 = _mm512_mul_ps(r3, alpha_512);
  144. #define SAVE_4(N, x, y) {\
  145. __m128 v4 = _mm512_extractf32x4_ps(t##x, y); \
  146. STORE_4xy(v4, N, x, y); \
  147. }
  148. #define REORDER_STORE_4x16(N) {\
  149. REORDER_4x16(result0##N, result1##N, result2##N, result3##N); \
  150. SAVE_4(N, 0, 0); SAVE_4(N, 1, 0); SAVE_4(N, 2, 0); SAVE_4(N, 3, 0); \
  151. SAVE_4(N, 0, 1); SAVE_4(N, 1, 1); SAVE_4(N, 2, 1); SAVE_4(N, 3, 1); \
  152. SAVE_4(N, 0, 2); SAVE_4(N, 1, 2); SAVE_4(N, 2, 2); SAVE_4(N, 3, 2); \
  153. SAVE_4(N, 0, 3); SAVE_4(N, 1, 3); SAVE_4(N, 2, 3); SAVE_4(N, 3, 3); \
  154. }
  155. #define MASK_SAVE_4() \
  156. switch (nn) { \
  157. case 16: SAVE_4(0, 3, 3); \
  158. case 15: SAVE_4(0, 2, 3); \
  159. case 14: SAVE_4(0, 1, 3); \
  160. case 13: SAVE_4(0, 0, 3); \
  161. case 12: SAVE_4(0, 3, 2); \
  162. case 11: SAVE_4(0, 2, 2); \
  163. case 10: SAVE_4(0, 1, 2); \
  164. case 9: SAVE_4(0, 0, 2); \
  165. case 8: SAVE_4(0, 3, 1); \
  166. case 7: SAVE_4(0, 2, 1); \
  167. case 6: SAVE_4(0, 1, 1); \
  168. case 5: SAVE_4(0, 0, 1); \
  169. case 4: SAVE_4(0, 3, 0); \
  170. case 3: SAVE_4(0, 2, 0); \
  171. case 2: SAVE_4(0, 1, 0); \
  172. case 1: SAVE_4(0, 0, 0); \
  173. }
  174. #define MASK_REORDER_STORE_4x16(N) {\
  175. REORDER_4x16(result0##N, result1##N, result2##N, result3##N); \
  176. MASK_SAVE_4(); \
  177. }
  178. #if defined(B0)
  179. int CNAME(BLASLONG M, BLASLONG N, BLASLONG K, FLOAT * A, BLASLONG lda, FLOAT alpha, FLOAT * B, BLASLONG ldb, FLOAT * C, BLASLONG ldc)
  180. #else
  181. int CNAME(BLASLONG M, BLASLONG N, BLASLONG K, FLOAT * A, BLASLONG lda, FLOAT alpha, FLOAT * B, BLASLONG ldb, FLOAT beta, FLOAT * C, BLASLONG ldc)
  182. #endif
  183. {
  184. // column major
  185. BLASLONG i, j, k;
  186. BLASLONG m8 = M & ~7;
  187. BLASLONG m4 = M & ~3;
  188. BLASLONG m2 = M & ~1;
  189. BLASLONG n64 = N & ~63;
  190. BLASLONG n32 = N & ~31;
  191. __m512 alpha_512 = _mm512_broadcastss_ps(_mm_load_ss(&alpha));
  192. #if !defined(B0)
  193. __m256 beta_256 = _mm256_broadcastss_ps(_mm_load_ss(&beta));
  194. __m128 beta_128 = _mm_broadcastss_ps(_mm_load_ss(&beta));
  195. #endif
  196. int permute_table[] = {
  197. 0x0, 0x1, 0x2, 0x3, 0x10, 0x11, 0x12, 0x13, 0x8, 0x9, 0xa, 0xb, 0x18, 0x19, 0x1a, 0x1b,
  198. 0x4, 0x5, 0x6, 0x7, 0x14, 0x15, 0x16, 0x17, 0xc, 0xd, 0xe, 0xf, 0x1c, 0x1d, 0x1e, 0x1f,
  199. };
  200. __m512i idx_lo = _mm512_loadu_si512(permute_table);
  201. __m512i idx_hi = _mm512_loadu_si512(permute_table + 16);
  202. __mmask16 kc = 0xcccc;
  203. __mmask16 k3 = 0x3333;
  204. __mmask8 mask8 = 0xff; // force use AVX128 instead of SSE
  205. for (i = 0; i < m8; i += 8) {
  206. for (j = 0; j < n32; j += 32) {
  207. DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); DECLARE_RESULT_512(2, 0); DECLARE_RESULT_512(3, 0);
  208. DECLARE_RESULT_512(4, 0); DECLARE_RESULT_512(5, 0); DECLARE_RESULT_512(6, 0); DECLARE_RESULT_512(7, 0);
  209. DECLARE_RESULT_512(0, 1); DECLARE_RESULT_512(1, 1); DECLARE_RESULT_512(2, 1); DECLARE_RESULT_512(3, 1);
  210. DECLARE_RESULT_512(4, 1); DECLARE_RESULT_512(5, 1); DECLARE_RESULT_512(6, 1); DECLARE_RESULT_512(7, 1);
  211. for (k = 0; k < K; k++) {
  212. BROADCAST_LOAD_A_512(0, x); BROADCAST_LOAD_A_512(1, x); BROADCAST_LOAD_A_512(2, x); BROADCAST_LOAD_A_512(3, x);
  213. BROADCAST_LOAD_A_512(4, x); BROADCAST_LOAD_A_512(5, x); BROADCAST_LOAD_A_512(6, x); BROADCAST_LOAD_A_512(7, x);
  214. LOAD_B_512(x, 0); LOAD_B_512(x, 1);
  215. MATMUL_512(0, 0); MATMUL_512(1, 0); MATMUL_512(2, 0); MATMUL_512(3, 0);
  216. MATMUL_512(4, 0); MATMUL_512(5, 0); MATMUL_512(6, 0); MATMUL_512(7, 0);
  217. MATMUL_512(0, 1); MATMUL_512(1, 1); MATMUL_512(2, 1); MATMUL_512(3, 1);
  218. MATMUL_512(4, 1); MATMUL_512(5, 1); MATMUL_512(6, 1); MATMUL_512(7, 1);
  219. }
  220. REORDER_STORE_8x16(0);
  221. REORDER_STORE_8x16(1);
  222. }
  223. __mmask16 mask = 0xffff;
  224. int nn = 16;
  225. for (; j < N; j += 16) {
  226. if (N - j < 16) {
  227. nn = N - j;
  228. mask = (1UL << nn) - 1;
  229. }
  230. DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); DECLARE_RESULT_512(2, 0); DECLARE_RESULT_512(3, 0);
  231. DECLARE_RESULT_512(4, 0); DECLARE_RESULT_512(5, 0); DECLARE_RESULT_512(6, 0); DECLARE_RESULT_512(7, 0);
  232. for (k = 0; k < K; k++) {
  233. BROADCAST_LOAD_A_512(0, x); BROADCAST_LOAD_A_512(1, x); BROADCAST_LOAD_A_512(2, x); BROADCAST_LOAD_A_512(3, x);
  234. BROADCAST_LOAD_A_512(4, x); BROADCAST_LOAD_A_512(5, x); BROADCAST_LOAD_A_512(6, x); BROADCAST_LOAD_A_512(7, x);
  235. MASK_LOAD_B_512(x, 0);
  236. MATMUL_512(0, 0); MATMUL_512(1, 0); MATMUL_512(2, 0); MATMUL_512(3, 0);
  237. MATMUL_512(4, 0); MATMUL_512(5, 0); MATMUL_512(6, 0); MATMUL_512(7, 0);
  238. }
  239. MASK_REORDER_STORE_8x16(0);
  240. }
  241. }
  242. for (; i < m4; i += 4) {
  243. for (j = 0; j < n64; j += 64) {
  244. DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); DECLARE_RESULT_512(2, 0); DECLARE_RESULT_512(3, 0);
  245. DECLARE_RESULT_512(0, 1); DECLARE_RESULT_512(1, 1); DECLARE_RESULT_512(2, 1); DECLARE_RESULT_512(3, 1);
  246. DECLARE_RESULT_512(0, 2); DECLARE_RESULT_512(1, 2); DECLARE_RESULT_512(2, 2); DECLARE_RESULT_512(3, 2);
  247. DECLARE_RESULT_512(0, 3); DECLARE_RESULT_512(1, 3); DECLARE_RESULT_512(2, 3); DECLARE_RESULT_512(3, 3);
  248. for (k = 0; k < K; k++) {
  249. BROADCAST_LOAD_A_512(0, x); BROADCAST_LOAD_A_512(1, x); BROADCAST_LOAD_A_512(2, x); BROADCAST_LOAD_A_512(3, x);
  250. LOAD_B_512(x, 0); LOAD_B_512(x, 1); LOAD_B_512(x, 2); LOAD_B_512(x, 3);
  251. MATMUL_512(0, 0); MATMUL_512(1, 0); MATMUL_512(2, 0); MATMUL_512(3, 0);
  252. MATMUL_512(0, 1); MATMUL_512(1, 1); MATMUL_512(2, 1); MATMUL_512(3, 1);
  253. MATMUL_512(0, 2); MATMUL_512(1, 2); MATMUL_512(2, 2); MATMUL_512(3, 2);
  254. MATMUL_512(0, 3); MATMUL_512(1, 3); MATMUL_512(2, 3); MATMUL_512(3, 3);
  255. }
  256. REORDER_STORE_4x16(0);
  257. REORDER_STORE_4x16(1);
  258. REORDER_STORE_4x16(2);
  259. REORDER_STORE_4x16(3);
  260. }
  261. for (; j < n32; j += 32) {
  262. DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); DECLARE_RESULT_512(2, 0); DECLARE_RESULT_512(3, 0);
  263. DECLARE_RESULT_512(0, 1); DECLARE_RESULT_512(1, 1); DECLARE_RESULT_512(2, 1); DECLARE_RESULT_512(3, 1);
  264. for (k = 0; k < K; k++) {
  265. BROADCAST_LOAD_A_512(0, x); BROADCAST_LOAD_A_512(1, x); BROADCAST_LOAD_A_512(2, x); BROADCAST_LOAD_A_512(3, x);
  266. LOAD_B_512(x, 0); LOAD_B_512(x, 1);
  267. MATMUL_512(0, 0); MATMUL_512(1, 0); MATMUL_512(2, 0); MATMUL_512(3, 0);
  268. MATMUL_512(0, 1); MATMUL_512(1, 1); MATMUL_512(2, 1); MATMUL_512(3, 1);
  269. }
  270. REORDER_STORE_4x16(0);
  271. REORDER_STORE_4x16(1);
  272. }
  273. __mmask16 mask = 0xffff;
  274. int nn = 16;
  275. for (; j < N; j += 16) {
  276. if (N - j < 16) {
  277. nn = N - j;
  278. mask = (1UL << nn) - 1;
  279. }
  280. DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); DECLARE_RESULT_512(2, 0); DECLARE_RESULT_512(3, 0);
  281. for (k = 0; k < K; k++) {
  282. BROADCAST_LOAD_A_512(0, x); BROADCAST_LOAD_A_512(1, x); BROADCAST_LOAD_A_512(2, x); BROADCAST_LOAD_A_512(3, x);
  283. MASK_LOAD_B_512(x, 0);
  284. MATMUL_512(0, 0); MATMUL_512(1, 0); MATMUL_512(2, 0); MATMUL_512(3, 0);
  285. }
  286. MASK_REORDER_STORE_4x16(0);
  287. }
  288. }
  289. if (i < M) {
  290. int index_n[16];
  291. for (int ii = 0; ii < 16; ii++) {
  292. index_n[ii] = ii * ldc;
  293. }
  294. __m512i vindex_n = _mm512_loadu_si512(index_n);
  295. #if !defined(B0)
  296. __m512 beta_512 = _mm512_broadcastss_ps(_mm_load_ss(&beta));
  297. #endif
  298. for (; i < m2; i += 2) {
  299. for (j = 0; j < n64; j += 64) {
  300. DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0);
  301. DECLARE_RESULT_512(0, 1); DECLARE_RESULT_512(1, 1);
  302. DECLARE_RESULT_512(0, 2); DECLARE_RESULT_512(1, 2);
  303. DECLARE_RESULT_512(0, 3); DECLARE_RESULT_512(1, 3);
  304. for (k = 0; k < K; k++) {
  305. BROADCAST_LOAD_A_512(0, x); BROADCAST_LOAD_A_512(1, x);
  306. LOAD_B_512(x, 0); LOAD_B_512(x, 1); LOAD_B_512(x, 2); LOAD_B_512(x, 3);
  307. MATMUL_512(0, 0); MATMUL_512(1, 0);
  308. MATMUL_512(0, 1); MATMUL_512(1, 1);
  309. MATMUL_512(0, 2); MATMUL_512(1, 2);
  310. MATMUL_512(0, 3); MATMUL_512(1, 3);
  311. }
  312. SCATTER_STORE_512(0, 0); SCATTER_STORE_512(1, 0);
  313. SCATTER_STORE_512(0, 1); SCATTER_STORE_512(1, 1);
  314. SCATTER_STORE_512(0, 2); SCATTER_STORE_512(1, 2);
  315. SCATTER_STORE_512(0, 3); SCATTER_STORE_512(1, 3);
  316. }
  317. for (; j < n32; j += 32) {
  318. DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0);
  319. DECLARE_RESULT_512(0, 1); DECLARE_RESULT_512(1, 1);
  320. for (k = 0; k < K; k++) {
  321. BROADCAST_LOAD_A_512(0, x); BROADCAST_LOAD_A_512(1, x);
  322. LOAD_B_512(x, 0); LOAD_B_512(x, 1);
  323. MATMUL_512(0, 0); MATMUL_512(1, 0);
  324. MATMUL_512(0, 1); MATMUL_512(1, 1);
  325. }
  326. SCATTER_STORE_512(0, 0); SCATTER_STORE_512(1, 0);
  327. SCATTER_STORE_512(0, 1); SCATTER_STORE_512(1, 1);
  328. }
  329. __mmask16 mask = 0xffff;
  330. int nn = 16;
  331. for (; j < N; j += 16) {
  332. if (N - j < 16) {
  333. nn = N - j;
  334. mask = (1UL << nn) - 1;
  335. }
  336. DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0);
  337. for (k = 0; k < K; k++) {
  338. BROADCAST_LOAD_A_512(0, x); BROADCAST_LOAD_A_512(1, x);
  339. MASK_LOAD_B_512(x, 0);
  340. MATMUL_512(0, 0); MATMUL_512(1, 0);
  341. }
  342. MASK_SCATTER_STORE_512(0, 0); MASK_SCATTER_STORE_512(1, 0);
  343. }
  344. }
  345. for (; i < M; i += 1) {
  346. for (j = 0; j < n64; j += 64) {
  347. DECLARE_RESULT_512(0, 0);
  348. DECLARE_RESULT_512(0, 1);
  349. DECLARE_RESULT_512(0, 2);
  350. DECLARE_RESULT_512(0, 3);
  351. for (k = 0; k < K; k++) {
  352. BROADCAST_LOAD_A_512(0, x);
  353. LOAD_B_512(x, 0); LOAD_B_512(x, 1); LOAD_B_512(x, 2); LOAD_B_512(x, 3);
  354. MATMUL_512(0, 0);
  355. MATMUL_512(0, 1);
  356. MATMUL_512(0, 2);
  357. MATMUL_512(0, 3);
  358. }
  359. SCATTER_STORE_512(0, 0);
  360. SCATTER_STORE_512(0, 1);
  361. SCATTER_STORE_512(0, 2);
  362. SCATTER_STORE_512(0, 3);
  363. }
  364. for (; j < n32; j += 32) {
  365. DECLARE_RESULT_512(0, 0);
  366. DECLARE_RESULT_512(0, 1);
  367. for (k = 0; k < K; k++) {
  368. BROADCAST_LOAD_A_512(0, x);
  369. LOAD_B_512(x, 0); LOAD_B_512(x, 1);
  370. MATMUL_512(0, 0);
  371. MATMUL_512(0, 1);
  372. }
  373. SCATTER_STORE_512(0, 0);
  374. SCATTER_STORE_512(0, 1);
  375. }
  376. __mmask16 mask = 0xffff;
  377. int nn = 16;
  378. for (; j < N; j += 16) {
  379. if (N - j < 16) {
  380. nn = N - j;
  381. mask = (1UL << nn) - 1;
  382. }
  383. DECLARE_RESULT_512(0, 0);
  384. for (k = 0; k < K; k++) {
  385. BROADCAST_LOAD_A_512(0, x);
  386. MASK_LOAD_B_512(x, 0);
  387. MATMUL_512(0, 0);
  388. }
  389. MASK_SCATTER_STORE_512(0, 0);
  390. }
  391. }
  392. }
  393. return 0;
  394. }