You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

zgemm3m_kernel_4x4_haswell.c 12 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. /* %0 = "+r"(a_pointer), %1 = "+r"(b_pointer), %2 = "+r"(c_pointer), %3 = "+r"(ldc_in_bytes), %4 for k_count, %5 for c_store */
  2. /* r12 = k << 5(const), r13 = k(const), r14 = b_head_pos(const), r15 = tmp */
  3. #include "common.h"
  4. #include <stdint.h>
  5. //recommended settings: GEMM_Q=256, GEMM_P=256
  6. /* m = 4 *//* ymm0 for alpha, ymm1-ymm3 for temporary use, ymm4-ymm15 for accumulators */
  7. #define KERNEL_k1m4n1 \
  8. "vmovupd (%0),%%ymm1; addq $32,%0;"\
  9. "vbroadcastsd (%1),%%ymm2; vfmadd231pd %%ymm1,%%ymm2,%%ymm4;"\
  10. "addq $8,%1;"
  11. #define KERNEL_h_k1m4n2 \
  12. "vmovddup (%0),%%ymm1; vmovddup 8(%0),%%ymm2; addq $32,%0;"\
  13. "vbroadcastf128 (%1),%%ymm3; vfmadd231pd %%ymm1,%%ymm3,%%ymm4; vfmadd231pd %%ymm2,%%ymm3,%%ymm5;"
  14. #define KERNEL_k1m4n2 KERNEL_h_k1m4n2 "addq $16,%1;"
  15. #define KERNEL_h_k1m4n4 \
  16. KERNEL_h_k1m4n2 "vbroadcastf128 16(%1),%%ymm3; vfmadd231pd %%ymm1,%%ymm3,%%ymm6; vfmadd231pd %%ymm2,%%ymm3,%%ymm7;"
  17. #define KERNEL_k1m4n4 KERNEL_h_k1m4n4 "addq $32,%1;"
  18. #define unit_kernel_k1m4n4(c1,c2,c3,c4,off1,off2,...) \
  19. "vbroadcastf128 "#off1"("#__VA_ARGS__"),%%ymm3; vfmadd231pd %%ymm1,%%ymm3,"#c1"; vfmadd231pd %%ymm2,%%ymm3,"#c2";"\
  20. "vbroadcastf128 "#off2"("#__VA_ARGS__"),%%ymm3; vfmadd231pd %%ymm1,%%ymm3,"#c3"; vfmadd231pd %%ymm2,%%ymm3,"#c4";"
  21. #define KERNEL_h_k1m4n8 KERNEL_h_k1m4n4 unit_kernel_k1m4n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11,0,16,%1,%%r12,1)
  22. #define KERNEL_k1m4n8 KERNEL_h_k1m4n8 "addq $32,%1;"
  23. #define KERNEL_h_k1m4n12 KERNEL_h_k1m4n8 unit_kernel_k1m4n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15,0,16,%1,%%r12,2)
  24. #define KERNEL_k1m4n12 KERNEL_h_k1m4n12 "addq $32,%1;"
  25. #define KERNEL_k2m4n1 KERNEL_k1m4n1 KERNEL_k1m4n1
  26. #define KERNEL_k2m4n2 KERNEL_k1m4n2 KERNEL_k1m4n2
  27. #define KERNEL_k2m4n4 KERNEL_k1m4n4 KERNEL_k1m4n4
  28. #define KERNEL_k2m4n8 KERNEL_k1m4n8 KERNEL_k1m4n8
  29. #define KERNEL_k2m4n12 \
  30. "vmovddup (%0),%%ymm1; vmovddup 8(%0),%%ymm2;"\
  31. unit_kernel_k1m4n4(%%ymm4,%%ymm5,%%ymm6,%%ymm7,0,16,%1)\
  32. unit_kernel_k1m4n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11,0,16,%1,%%r12,1)\
  33. unit_kernel_k1m4n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15,0,16,%1,%%r12,2)\
  34. "vmovddup 32(%0),%%ymm1; vmovddup 40(%0),%%ymm2; prefetcht0 512(%0); addq $64,%0;"\
  35. unit_kernel_k1m4n4(%%ymm4,%%ymm5,%%ymm6,%%ymm7,32,48,%1)\
  36. unit_kernel_k1m4n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11,32,48,%1,%%r12,1)\
  37. unit_kernel_k1m4n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15,32,48,%1,%%r12,2) "addq $64,%1;"
  38. #define INIT_m4n1 "vpxor %%ymm4,%%ymm4,%%ymm4;"
  39. #define INIT_m4n2 INIT_m4n1 "vpxor %%ymm5,%%ymm5,%%ymm5;"
  40. #define INIT_m4n4 INIT_m4n2 "vpxor %%ymm6,%%ymm6,%%ymm6;vpxor %%ymm7,%%ymm7,%%ymm7;"
  41. #define unit_init_m4n4(c1,c2,c3,c4) \
  42. "vpxor "#c1","#c1","#c1";vpxor "#c2","#c2","#c2";vpxor "#c3","#c3","#c3";vpxor "#c4","#c4","#c4";"
  43. #define INIT_m4n8 INIT_m4n4 unit_init_m4n4(%%ymm8,%%ymm9,%%ymm10,%%ymm11)
  44. #define INIT_m4n12 INIT_m4n8 unit_init_m4n4(%%ymm12,%%ymm13,%%ymm14,%%ymm15)
  45. #define SAVE_h_m4n1 \
  46. "vpermpd $216,%%ymm4,%%ymm3; vunpcklpd %%ymm3,%%ymm3,%%ymm1; vunpckhpd %%ymm3,%%ymm3,%%ymm2;"\
  47. "vfmadd213pd (%2),%%ymm0,%%ymm1; vfmadd213pd 32(%2),%%ymm0,%%ymm2; vmovupd %%ymm1,(%2); vmovupd %%ymm2,32(%2);"
  48. #define unit_save_m4n2(c1,c2) \
  49. "vperm2f128 $2,"#c1","#c2",%%ymm2; vperm2f128 $19,"#c1","#c2","#c2"; vmovapd %%ymm2,"#c1";"\
  50. "vunpcklpd "#c1","#c1",%%ymm2; vunpcklpd "#c2","#c2",%%ymm3;"\
  51. "vfmadd213pd (%5),%%ymm0,%%ymm2; vfmadd213pd 32(%5),%%ymm0,%%ymm3; vmovupd %%ymm2,(%5); vmovupd %%ymm3,32(%5);"\
  52. "vunpckhpd "#c1","#c1",%%ymm2; vunpckhpd "#c2","#c2",%%ymm3;"\
  53. "vfmadd213pd (%5,%3,1),%%ymm0,%%ymm2; vfmadd213pd 32(%5,%3,1),%%ymm0,%%ymm3; vmovupd %%ymm2,(%5,%3,1); vmovupd %%ymm3,32(%5,%3,1);"\
  54. "leaq (%5,%3,2),%5;"
  55. #define SAVE_h_m4n2 "movq %2,%5;" unit_save_m4n2(%%ymm4,%%ymm5)
  56. #define SAVE_h_m4n4 SAVE_h_m4n2 unit_save_m4n2(%%ymm6,%%ymm7)
  57. #define SAVE_h_m4n8 SAVE_h_m4n4 unit_save_m4n2(%%ymm8,%%ymm9) unit_save_m4n2(%%ymm10,%%ymm11)
  58. #define SAVE_h_m4n12 SAVE_h_m4n8 unit_save_m4n2(%%ymm12,%%ymm13) unit_save_m4n2(%%ymm14,%%ymm15)
  59. #define SAVE_m4(ndim) SAVE_h_m4n##ndim "addq $64,%2;"
  60. #define COMPUTE_m4(ndim) \
  61. INIT_m4n##ndim\
  62. "movq %%r13,%4; movq %%r14,%1; movq %2,%5; xorq %%r15,%%r15;"\
  63. "cmpq $24,%4; jb "#ndim"004042f;"\
  64. #ndim"004041:\n\t"\
  65. "cmpq $126,%%r15; movq $126,%%r15; cmoveq %3,%%r15;"\
  66. KERNEL_k2m4n##ndim KERNEL_k2m4n##ndim\
  67. "prefetcht1 (%5); subq $63,%5;"\
  68. KERNEL_k2m4n##ndim KERNEL_k2m4n##ndim\
  69. "addq %%r15,%5; prefetcht1 (%8); addq $32,%8;"\
  70. "subq $8,%4; cmpq $16,%4; jnb "#ndim"004041b;"\
  71. "movq %2,%5;"\
  72. #ndim"004042:\n\t"\
  73. "testq %4,%4; jz "#ndim"004043f;"\
  74. "prefetcht0 (%5); prefetcht0 63(%5);"\
  75. KERNEL_k1m4n##ndim\
  76. "prefetcht0 (%5,%3,4); prefetcht0 63(%5,%3,4); addq %3,%5;"\
  77. "decq %4; jmp "#ndim"004042b;"\
  78. #ndim"004043:\n\t"\
  79. "prefetcht0 (%%r14); prefetcht0 64(%%r14);"\
  80. SAVE_m4(ndim)
  81. /* m = 2 *//* vmm0 for alpha, vmm1-vmm3 for temporary use, vmm4-vmm9 for accumulators */
  82. #define KERNEL_k1m2n1 \
  83. "vmovupd (%0),%%xmm1; addq $16,%0;"\
  84. "vmovddup (%1),%%xmm2; vfmadd231pd %%xmm1,%%xmm2,%%xmm4;"\
  85. "addq $8,%1;"
  86. #define KERNEL_h_k1m2n2 \
  87. "vmovddup (%0),%%xmm1; vmovddup 8(%0),%%xmm2; addq $16,%0;"\
  88. "vmovupd (%1),%%xmm3; vfmadd231pd %%xmm1,%%xmm3,%%xmm4; vfmadd231pd %%xmm2,%%xmm3,%%xmm5;"
  89. #define KERNEL_k1m2n2 KERNEL_h_k1m2n2 "addq $16,%1;"
  90. #define unit_kernel_k1m2n4(c1,c2,...) \
  91. "vmovupd ("#__VA_ARGS__"),%%ymm3; vfmadd231pd %%ymm1,%%ymm3,"#c1"; vfmadd231pd %%ymm2,%%ymm3,"#c2";"
  92. #define KERNEL_h_k1m2n4 \
  93. "vbroadcastsd (%0),%%ymm1; vbroadcastsd 8(%0),%%ymm2; addq $16,%0;"\
  94. unit_kernel_k1m2n4(%%ymm4,%%ymm5,%1)
  95. #define KERNEL_k1m2n4 KERNEL_h_k1m2n4 "addq $32,%1;"
  96. #define KERNEL_h_k1m2n8 KERNEL_h_k1m2n4 \
  97. unit_kernel_k1m2n4(%%ymm6,%%ymm7,%1,%%r12,1)
  98. #define KERNEL_k1m2n8 KERNEL_h_k1m2n8 "addq $32,%1;"
  99. #define KERNEL_h_k1m2n12 KERNEL_h_k1m2n8 \
  100. unit_kernel_k1m2n4(%%ymm8,%%ymm9,%1,%%r12,2)
  101. #define KERNEL_k1m2n12 KERNEL_h_k1m2n12 "addq $32,%1;"
  102. #define INIT_m2n1 "vpxor %%xmm4,%%xmm4,%%xmm4;"
  103. #define INIT_m2n2 INIT_m2n1 "vpxor %%xmm5,%%xmm5,%%xmm5;"
  104. #define unit_init_m2n4(c1,c2) "vpxor "#c1","#c1","#c1";vpxor "#c2","#c2","#c2";"
  105. #define INIT_m2n4 unit_init_m2n4(%%ymm4,%%ymm5)
  106. #define INIT_m2n8 INIT_m2n4 unit_init_m2n4(%%ymm6,%%ymm7)
  107. #define INIT_m2n12 INIT_m2n8 unit_init_m2n4(%%ymm8,%%ymm9)
  108. #define SAVE_h_m2n1 \
  109. "vinsertf128 $1,%%xmm4,%%ymm4,%%ymm4; vpermilpd $12,%%ymm4,%%ymm4; vfmadd213pd (%2),%%ymm0,%%ymm4; vmovupd %%ymm4,(%2);"
  110. #define SAVE_h_m2n2 \
  111. "vinsertf128 $1,%%xmm5,%%ymm4,%%ymm4; vunpcklpd %%ymm4,%%ymm4,%%ymm1; vunpckhpd %%ymm4,%%ymm4,%%ymm2;"\
  112. "vfmadd213pd (%2),%%ymm0,%%ymm1; vmovupd %%ymm1,(%2);"\
  113. "vfmadd213pd (%2,%3,1),%%ymm0,%%ymm2; vmovupd %%ymm2,(%2,%3,1);"
  114. #define unit_save_m2n4(c1,c2) \
  115. "vperm2f128 $2,"#c1","#c2",%%ymm1; vunpcklpd %%ymm1,%%ymm1,%%ymm2; vunpckhpd %%ymm1,%%ymm1,%%ymm3;"\
  116. "vfmadd213pd (%5),%%ymm0,%%ymm2; vfmadd213pd (%5,%3,1),%%ymm0,%%ymm3; vmovupd %%ymm2,(%5); vmovupd %%ymm3,(%5,%3,1); leaq (%5,%3,2),%5;"\
  117. "vperm2f128 $19,"#c1","#c2",%%ymm1; vunpcklpd %%ymm1,%%ymm1,%%ymm2; vunpckhpd %%ymm1,%%ymm1,%%ymm3;"\
  118. "vfmadd213pd (%5),%%ymm0,%%ymm2; vfmadd213pd (%5,%3,1),%%ymm0,%%ymm3; vmovupd %%ymm2,(%5); vmovupd %%ymm3,(%5,%3,1); leaq (%5,%3,2),%5;"
  119. #define SAVE_h_m2n4 "movq %2,%5;" unit_save_m2n4(%%ymm4,%%ymm5)
  120. #define SAVE_h_m2n8 SAVE_h_m2n4 unit_save_m2n4(%%ymm6,%%ymm7)
  121. #define SAVE_h_m2n12 SAVE_h_m2n8 unit_save_m2n4(%%ymm8,%%ymm9)
  122. #define SAVE_m2(ndim) SAVE_h_m2n##ndim "addq $32,%2;"
  123. #define COMPUTE_m2(ndim) \
  124. INIT_m2n##ndim\
  125. "movq %%r13,%4; movq %%r14,%1;"\
  126. #ndim"002022:\n\t"\
  127. "testq %4,%4; jz "#ndim"002023f;"\
  128. KERNEL_k1m2n##ndim\
  129. "decq %4; jmp "#ndim"002022b;"\
  130. #ndim"002023:\n\t"\
  131. SAVE_m2(ndim)
  132. /* m = 1 *//* vmm0 for alpha, vmm1-vmm3 and vmm10-vmm15 for temporary use, vmm4-vmm6 for accumulators */
  133. #define KERNEL_k1m1n1 \
  134. "vmovsd (%0),%%xmm1; addq $8,%0;"\
  135. "vfmadd231sd (%1),%%xmm1,%%xmm4; addq $8,%1;"
  136. #define KERNEL_k1m1n2 \
  137. "vmovddup (%0),%%xmm1; addq $8,%0;"\
  138. "vfmadd231pd (%1),%%xmm1,%%xmm4; addq $16,%1;"
  139. #define unit_kernel_k1m1n4(c1,...) \
  140. "vmovupd ("#__VA_ARGS__"),%%ymm2; vfmadd231pd %%ymm1,%%ymm2,"#c1";"
  141. #define KERNEL_h_k1m1n4 \
  142. "vbroadcastsd (%0),%%ymm1; addq $8,%0;"\
  143. unit_kernel_k1m1n4(%%ymm4,%1)
  144. #define KERNEL_k1m1n4 KERNEL_h_k1m1n4 "addq $32,%1;"
  145. #define KERNEL_h_k1m1n8 KERNEL_h_k1m1n4 unit_kernel_k1m1n4(%%ymm5,%1,%%r12,1)
  146. #define KERNEL_k1m1n8 KERNEL_h_k1m1n8 "addq $32,%1;"
  147. #define KERNEL_h_k1m1n12 KERNEL_h_k1m1n8 unit_kernel_k1m1n4(%%ymm6,%1,%%r12,2)
  148. #define KERNEL_k1m1n12 KERNEL_h_k1m1n12 "addq $32,%1;"
  149. #define INIT_m1n1 INIT_m2n1
  150. #define INIT_m1n2 INIT_m2n1
  151. #define INIT_m1n4 "vpxor %%ymm4,%%ymm4,%%ymm4;"
  152. #define INIT_m1n8 INIT_m1n4 "vpxor %%ymm5,%%ymm5,%%ymm5;"
  153. #define INIT_m1n12 INIT_m1n8 "vpxor %%ymm6,%%ymm6,%%ymm6;"
  154. #define SAVE_h_m1n1 \
  155. "vmovddup %%xmm4,%%xmm4; vfmadd213pd (%2),%%xmm0,%%xmm4; vmovupd %%xmm4,(%2);"
  156. #define SAVE_h_m1n2 \
  157. "vunpcklpd %%xmm4,%%xmm4,%%xmm1; vunpckhpd %%xmm4,%%xmm4,%%xmm2;"\
  158. "vfmadd213pd (%2),%%xmm0,%%xmm1; vmovupd %%xmm1,(%2);"\
  159. "vfmadd213pd (%2,%3,1),%%xmm0,%%xmm2; vmovupd %%xmm2,(%2,%3,1);"
  160. #define unit_save_m1n4(c1) \
  161. "vunpcklpd "#c1","#c1",%%ymm1; vunpckhpd "#c1","#c1",%%ymm2;"\
  162. "vmovupd (%5),%%xmm3; vinsertf128 $1,(%5,%3,2),%%ymm3,%%ymm3;"\
  163. "vfmadd213pd %%ymm3,%%ymm0,%%ymm1; vmovupd %%xmm1,(%5); vextractf128 $1,%%ymm1,(%5,%3,2); addq %3,%5;"\
  164. "vmovupd (%5),%%xmm3; vinsertf128 $1,(%5,%3,2),%%ymm3,%%ymm3;"\
  165. "vfmadd213pd %%ymm3,%%ymm0,%%ymm2; vmovupd %%xmm2,(%5); vextractf128 $1,%%ymm2,(%5,%3,2); addq %3,%5; leaq (%5,%3,2),%5;"
  166. #define SAVE_h_m1n4 "movq %2,%5;" unit_save_m1n4(%%ymm4)
  167. #define SAVE_h_m1n8 SAVE_h_m1n4 unit_save_m1n4(%%ymm5)
  168. #define SAVE_h_m1n12 SAVE_h_m1n8 unit_save_m1n4(%%ymm6)
  169. #define SAVE_m1(ndim) SAVE_h_m1n##ndim "addq $16,%2;"
  170. #define COMPUTE_m1(ndim) \
  171. INIT_m1n##ndim\
  172. "movq %%r13,%4; movq %%r14,%1;"\
  173. #ndim"001011:\n\t"\
  174. "testq %4,%4; jz "#ndim"001012f;"\
  175. KERNEL_k1m1n##ndim\
  176. "decq %4; jmp "#ndim"001011b;"\
  177. #ndim"001012:\n\t"\
  178. SAVE_m1(ndim)
  179. #define COMPUTE(ndim) {\
  180. next_b = b_pointer + ndim * K;\
  181. __asm__ __volatile__(\
  182. "vbroadcastf128 (%6),%%ymm0;"\
  183. "movq %4,%%r13; movq %4,%%r12; salq $5,%%r12; movq %1,%%r14; movq %7,%%r11;"\
  184. "cmpq $4,%7;jb 33101"#ndim"f;"\
  185. "33109"#ndim":\n\t"\
  186. COMPUTE_m4(ndim)\
  187. "subq $4,%7;cmpq $4,%7;jnb 33109"#ndim"b;"\
  188. "33101"#ndim":\n\t"\
  189. "cmpq $2,%7;jb 33104"#ndim"f;"\
  190. COMPUTE_m2(ndim)\
  191. "subq $2,%7;"\
  192. "33104"#ndim":\n\t"\
  193. "testq %7,%7;jz 33105"#ndim"f;"\
  194. COMPUTE_m1(ndim)\
  195. "33105"#ndim":\n\t"\
  196. "movq %%r13,%4; movq %%r14,%1; movq %%r11,%7;"\
  197. :"+r"(a_pointer),"+r"(b_pointer),"+r"(c_pointer),"+r"(ldc_in_bytes),"+r"(K),"+r"(ctemp),"+r"(const_val),"+r"(M),"+r"(next_b)\
  198. ::"r11","r12","r13","r14","r15","xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7","xmm8","xmm9","xmm10","xmm11","xmm12","xmm13","xmm14",\
  199. "xmm15","cc","memory");\
  200. a_pointer -= M * K; b_pointer += ndim * K; c_pointer += 2*(LDC * ndim - M);\
  201. }
  202. int __attribute__ ((noinline))
  203. CNAME(BLASLONG m, BLASLONG n, BLASLONG k, double alphar, double alphai, double * __restrict__ A, double * __restrict__ B, double * __restrict__ C, BLASLONG LDC)
  204. {
  205. if(m==0||n==0||k==0) return 0;
  206. int64_t ldc_in_bytes = (int64_t)LDC * sizeof(double) * 2;
  207. double constval[2]; constval[0] = alphar; constval[1] = alphai;
  208. double *const_val=constval;
  209. int64_t M = (int64_t)m, K = (int64_t)k;
  210. BLASLONG n_count = n;
  211. double *a_pointer = A,*b_pointer = B,*c_pointer = C,*ctemp = C,*next_b = B;
  212. for(;n_count>11;n_count-=12) COMPUTE(12)
  213. for(;n_count>7;n_count-=8) COMPUTE(8)
  214. for(;n_count>3;n_count-=4) COMPUTE(4)
  215. for(;n_count>1;n_count-=2) COMPUTE(2)
  216. if(n_count>0) COMPUTE(1)
  217. return 0;
  218. }