You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

sgemm_kernel_16x4_skylakex_3.c 31 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515
  1. /* %0 = "+r"(a_pointer), %1 = "+r"(b_pointer), %2 = "+r"(c_pointer), %3 = "+r"(ldc_in_bytes), %4 for k_count, %5 for c_store */
  2. /* r10 to assist prefetch, r12 = k << 4(const), r13 = k(const), r14 = b_head_pos(const), r15 = %1 + 3r12 */
  3. #include "common.h"
  4. #include <stdint.h>
  5. /* m = 16 */ /* zmm8-zmm31 for accumulators, zmm4-zmm7 for temporary use, zmm0 for alpha */
  6. #define KERNEL_k1m16n1 \
  7. "vmovups (%0),%%zmm4; addq $64,%0;"\
  8. "vbroadcastss (%1),%%zmm6; vfmadd231ps %%zmm4,%%zmm6,%%zmm8;"\
  9. "addq $4,%1;"
  10. #define KERNEL_h_k1m16n2 \
  11. "vmovsldup (%0),%%zmm4; vmovshdup (%0),%%zmm5; prefetcht0 512(%0); addq $64,%0;"\
  12. "vbroadcastsd (%1),%%zmm6; vfmadd231ps %%zmm4,%%zmm6,%%zmm8; vfmadd231ps %%zmm5,%%zmm6,%%zmm9;"
  13. #define KERNEL_k1m16n2 KERNEL_h_k1m16n2 "addq $8,%1;"
  14. #define KERNEL_h_k1m16n4 KERNEL_h_k1m16n2 "vbroadcastsd 8(%1),%%zmm7; vfmadd231ps %%zmm4,%%zmm7,%%zmm10; vfmadd231ps %%zmm5,%%zmm7,%%zmm11;"
  15. #define KERNEL_k1m16n4 KERNEL_h_k1m16n4 "addq $16,%1;"
  16. #define unit_kernel_k1m16n4(c1,c2,c3,c4, ...) \
  17. "vbroadcastsd ("#__VA_ARGS__"),%%zmm6; vfmadd231ps %%zmm4,%%zmm6,"#c1"; vfmadd231ps %%zmm5,%%zmm6,"#c2";"\
  18. "vbroadcastsd 8("#__VA_ARGS__"),%%zmm7; vfmadd231ps %%zmm4,%%zmm7,"#c3"; vfmadd231ps %%zmm5,%%zmm7,"#c4";"
  19. #define KERNEL_h_k1m16n8 KERNEL_h_k1m16n4 unit_kernel_k1m16n4(%%zmm12,%%zmm13,%%zmm14,%%zmm15,%1,%%r12,1)
  20. #define KERNEL_k1m16n8 KERNEL_h_k1m16n8 "addq $16,%1;"
  21. #define KERNEL_h_k1m16n12 KERNEL_h_k1m16n8 unit_kernel_k1m16n4(%%zmm16,%%zmm17,%%zmm18,%%zmm19,%1,%%r12,2)
  22. #define KERNEL_k1m16n12 KERNEL_h_k1m16n12 "addq $16,%1;"
  23. #define KERNEL_h_k1m16n16 KERNEL_k1m16n12 unit_kernel_k1m16n4(%%zmm20,%%zmm21,%%zmm22,%%zmm23,%%r15)
  24. #define KERNEL_k1m16n16 KERNEL_h_k1m16n16 "addq $16,%%r15;"
  25. #define KERNEL_h_k1m16n20 KERNEL_h_k1m16n16 unit_kernel_k1m16n4(%%zmm24,%%zmm25,%%zmm26,%%zmm27,%%r15,%%r12,1)
  26. #define KERNEL_k1m16n20 KERNEL_h_k1m16n20 "addq $16,%%r15;"
  27. #define KERNEL_h_k1m16n24 KERNEL_h_k1m16n20 unit_kernel_k1m16n4(%%zmm28,%%zmm29,%%zmm30,%%zmm31,%%r15,%%r12,2)
  28. #define KERNEL_k1m16n24 KERNEL_h_k1m16n24 "addq $16,%%r15;"
  29. #define INIT_m16n1 "vpxorq %%zmm8,%%zmm8,%%zmm8;"
  30. #define INIT_m16n2 INIT_m16n1 "vpxorq %%zmm9,%%zmm9,%%zmm9;"
  31. #define INIT_m16n4 INIT_m16n2 "vpxorq %%zmm10,%%zmm10,%%zmm10;vpxorq %%zmm11,%%zmm11,%%zmm11;"
  32. #define unit_init_m16n4(c1,c2,c3,c4) \
  33. "vpxorq "#c1","#c1","#c1";vpxorq "#c2","#c2","#c2";vpxorq "#c3","#c3","#c3";vpxorq "#c4","#c4","#c4";"
  34. #define INIT_m16n8 INIT_m16n4 unit_init_m16n4(%%zmm12,%%zmm13,%%zmm14,%%zmm15)
  35. #define INIT_m16n12 INIT_m16n8 unit_init_m16n4(%%zmm16,%%zmm17,%%zmm18,%%zmm19)
  36. #define INIT_m16n16 INIT_m16n12 unit_init_m16n4(%%zmm20,%%zmm21,%%zmm22,%%zmm23)
  37. #define INIT_m16n20 INIT_m16n16 unit_init_m16n4(%%zmm24,%%zmm25,%%zmm26,%%zmm27)
  38. #define INIT_m16n24 INIT_m16n20 unit_init_m16n4(%%zmm28,%%zmm29,%%zmm30,%%zmm31)
  39. #define SAVE_h_m16n1 "vfmadd213ps (%2),%%zmm0,%%zmm8; vmovups %%zmm8,(%2);"
  40. #define unit_save_m16n2(c1,c2) \
  41. "vunpcklps "#c2","#c1",%%zmm6; vunpckhps "#c2","#c1",%%zmm7; vunpcklpd %%zmm7,%%zmm6,%%zmm4; vunpckhpd %%zmm7,%%zmm6,%%zmm5;"\
  42. "vfmadd213ps (%5),%%zmm0,%%zmm4; vfmadd213ps (%5,%3,1),%%zmm0,%%zmm5;"\
  43. "vmovups %%zmm4,(%5); vmovups %%zmm5,(%5,%3,1); leaq (%5,%3,2),%5;"
  44. #define SAVE_h_m16n2 "movq %2,%5;" unit_save_m16n2(%%zmm8,%%zmm9)
  45. #define SAVE_h_m16n4 SAVE_h_m16n2 unit_save_m16n2(%%zmm10,%%zmm11)
  46. #define SAVE_h_m16n8 SAVE_h_m16n4 unit_save_m16n2(%%zmm12,%%zmm13) unit_save_m16n2(%%zmm14,%%zmm15)
  47. #define SAVE_h_m16n12 SAVE_h_m16n8 unit_save_m16n2(%%zmm16,%%zmm17) unit_save_m16n2(%%zmm18,%%zmm19)
  48. #define SAVE_h_m16n16 SAVE_h_m16n12 unit_save_m16n2(%%zmm20,%%zmm21) unit_save_m16n2(%%zmm22,%%zmm23)
  49. #define SAVE_h_m16n20 SAVE_h_m16n16 unit_save_m16n2(%%zmm24,%%zmm25) unit_save_m16n2(%%zmm26,%%zmm27)
  50. #define SAVE_h_m16n24 SAVE_h_m16n20 unit_save_m16n2(%%zmm28,%%zmm29) unit_save_m16n2(%%zmm30,%%zmm31)
  51. #define SAVE_m16(ndim) SAVE_h_m16n##ndim "addq $64,%2;"
  52. #define COMPUTE_m16(ndim) \
  53. INIT_m16n##ndim\
  54. "movq %%r13,%4; movq %%r14,%1; leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15; movq %2,%5; xorq %%r10,%%r10;"\
  55. "cmpq $16,%4; jb "#ndim"016162f;"\
  56. #ndim"016161:\n\t"\
  57. "cmpq $126,%%r10; movq $126,%%r10; cmoveq %3,%%r10;"\
  58. KERNEL_k1m16n##ndim\
  59. KERNEL_k1m16n##ndim\
  60. "prefetcht1 (%5); subq $63,%5; addq %%r10,%5;"\
  61. KERNEL_k1m16n##ndim\
  62. KERNEL_k1m16n##ndim\
  63. "prefetcht1 (%6); addq $32,%6;"\
  64. "subq $4,%4; cmpq $16,%4; jnb "#ndim"016161b;"\
  65. "movq %2,%5;"\
  66. #ndim"016162:\n\t"\
  67. "testq %4,%4; jz "#ndim"016164f;"\
  68. #ndim"016163:\n\t"\
  69. "prefetcht0 (%5); prefetcht0 63(%5); prefetcht0 (%5,%3,1); prefetcht0 63(%5,%3,1);"\
  70. KERNEL_k1m16n##ndim\
  71. "leaq (%5,%3,2),%5; decq %4; jnz "#ndim"016163b;"\
  72. #ndim"016164:\n\t"\
  73. "prefetcht0 (%%r14); prefetcht0 64(%%r14);"\
  74. SAVE_m16(ndim)
  75. #define unit_save_m16n2_rscr(c1,c2,scr_off) \
  76. "vunpcklps "#c2","#c1",%%zmm6; vunpckhps "#c2","#c1",%%zmm7; vunpcklpd %%zmm7,%%zmm6,%%zmm4; vunpckhpd %%zmm7,%%zmm6,%%zmm5;"\
  77. "vmovups "#scr_off"(%7),%%zmm6; vfmadd213ps -64(%5),%%zmm0,%%zmm6; vfmadd213ps (%5),%%zmm0,%%zmm4;"\
  78. "vmovups %%zmm6,-64(%5); vmovups %%zmm4,(%5);"\
  79. "vmovups "#scr_off"+64(%7),%%zmm6; vfmadd213ps -64(%5,%3,1),%%zmm0,%%zmm6; vfmadd213ps (%5,%3,1),%%zmm0,%%zmm5;"\
  80. "vmovups %%zmm6,-64(%5,%3,1); vmovups %%zmm5,(%5,%3,1); leaq (%5,%3,2),%5;"
  81. #define unit_save_m16n2_wscr(c1,c2,scr_off) \
  82. "vunpcklps "#c2","#c1",%%zmm6; vunpckhps "#c2","#c1",%%zmm7; vunpcklpd %%zmm7,%%zmm6,%%zmm4; vunpckhpd %%zmm7,%%zmm6,%%zmm5;"\
  83. "vmovups %%zmm4,"#scr_off"(%7); vmovups %%zmm5,"#scr_off"+64(%7);"
  84. #define COMPUTE_m16n24_LSAVE \
  85. INIT_m16n24\
  86. "movq %%r13,%4; movq %%r14,%1; leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15; movq %2,%5;"\
  87. "cmpq $16,%4; jb 24716162f; movq $16,%4;"\
  88. "24716161:\n\t"\
  89. KERNEL_k1m16n24 "addq $4,%4; testq $12,%4; movq $172,%%r10; cmovz %3,%%r10;"\
  90. KERNEL_k1m16n24 "prefetcht1 -64(%5); leaq -129(%5,%%r10,1),%5;"\
  91. KERNEL_k1m16n24 "prefetcht1 (%6); addq $32,%6; cmpq $208,%4; cmoveq %2,%5;"\
  92. KERNEL_k1m16n24 "cmpq %4,%%r13; jnb 24716161b;"\
  93. "movq %2,%5; negq %4; leaq 16(%%r13,%4,1),%4;"\
  94. "24716162:\n\t"\
  95. "testq %4,%4; jz 24716164f; movq %7,%%r10;"\
  96. "24716163:\n\t"\
  97. "prefetcht0 -64(%5); prefetcht0 (%5); prefetcht0 63(%5); addq %3,%5;"\
  98. KERNEL_k1m16n24 "prefetcht0 (%%r10); addq $64,%%r10; decq %4; jnz 24716163b;"\
  99. "24716164:\n\t"\
  100. "prefetcht0 (%%r14); prefetcht0 64(%%r14); movq %2,%5; addq $64,%2;"\
  101. unit_save_m16n2_rscr(%%zmm8,%%zmm9,0) unit_save_m16n2_rscr(%%zmm10,%%zmm11,128) unit_save_m16n2_rscr(%%zmm12,%%zmm13,256)\
  102. unit_save_m16n2_rscr(%%zmm14,%%zmm15,384) unit_save_m16n2_rscr(%%zmm16,%%zmm17,512) unit_save_m16n2_rscr(%%zmm18,%%zmm19,640)\
  103. unit_save_m16n2_wscr(%%zmm20,%%zmm21,0) unit_save_m16n2_wscr(%%zmm22,%%zmm23,128) unit_save_m16n2_wscr(%%zmm24,%%zmm25,256)\
  104. unit_save_m16n2_wscr(%%zmm26,%%zmm27,384) unit_save_m16n2_wscr(%%zmm28,%%zmm29,512) unit_save_m16n2_wscr(%%zmm30,%%zmm31,640)
  105. #define COMPUTE_m16n24_RSAVE \
  106. INIT_m16n24 "leaq (%2,%3,8),%2; leaq (%2,%3,4),%2;"\
  107. "movq %%r13,%4; movq %%r14,%1; leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15; movq %2,%5;"\
  108. "cmpq $16,%4; jb 24616162f; movq $16,%4;"\
  109. "24616161:\n\t"\
  110. KERNEL_k1m16n24 "addq $4,%4; testq $12,%4; movq $172,%%r10; cmovz %3,%%r10;"\
  111. KERNEL_k1m16n24 "prefetcht1 -64(%5); leaq -129(%5,%%r10,1),%5;"\
  112. KERNEL_k1m16n24 "prefetcht1 (%6); addq $32,%6; cmpq $208,%4; cmoveq %2,%5;"\
  113. KERNEL_k1m16n24 "cmpq %4,%%r13; jnb 24616161b;"\
  114. "movq %2,%5; negq %4; leaq 16(%%r13,%4,1),%4;"\
  115. "24616162:\n\t"\
  116. "testq %4,%4; jz 24616164f; movq %7,%%r10;"\
  117. "24616163:\n\t"\
  118. "prefetcht0 -64(%5); prefetcht0 (%5); prefetcht0 63(%5); addq %3,%5;"\
  119. KERNEL_k1m16n24 "prefetcht0 (%%r10); addq $64,%%r10; decq %4; jnz 24616163b;"\
  120. "24616164:\n\t"\
  121. "prefetcht0 (%%r14); prefetcht0 64(%%r14); movq %2,%5; addq $64,%2;"\
  122. unit_save_m16n2_rscr(%%zmm20,%%zmm21,0) unit_save_m16n2_rscr(%%zmm22,%%zmm23,128) unit_save_m16n2_rscr(%%zmm24,%%zmm25,256)\
  123. unit_save_m16n2_rscr(%%zmm26,%%zmm27,384) unit_save_m16n2_rscr(%%zmm28,%%zmm29,512) unit_save_m16n2_rscr(%%zmm30,%%zmm31,640)\
  124. unit_save_m16n2_wscr(%%zmm8,%%zmm9,0) unit_save_m16n2_wscr(%%zmm10,%%zmm11,128) unit_save_m16n2_wscr(%%zmm12,%%zmm13,256)\
  125. unit_save_m16n2_wscr(%%zmm14,%%zmm15,384) unit_save_m16n2_wscr(%%zmm16,%%zmm17,512) unit_save_m16n2_wscr(%%zmm18,%%zmm19,640)\
  126. "negq %3; leaq (%2,%3,8),%2; leaq (%2,%3,4),%2; negq %3;"
  127. #define COMPUTE_m16n24_LINIT \
  128. INIT_m16n24\
  129. "movq %%r13,%4; movq %%r14,%1; leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15; movq %2,%5;"\
  130. "cmpq $16,%4; jb 24516162f; movq $16,%4;"\
  131. "24516161:\n\t"\
  132. KERNEL_k1m16n24 "addq $4,%4; testq $12,%4; movq $84,%%r10; cmovz %3,%%r10;"\
  133. KERNEL_k1m16n24 "prefetcht1 (%5); leaq -63(%5,%%r10,1),%5;"\
  134. KERNEL_k1m16n24 "prefetcht1 (%6); addq $32,%6; cmpq $208,%4; cmoveq %2,%5;"\
  135. KERNEL_k1m16n24 "cmpq %4,%%r13; jnb 24516161b;"\
  136. "movq %2,%5; negq %4; leaq 16(%%r13,%4,1),%4;"\
  137. "24516162:\n\t"\
  138. "testq %4,%4; jz 24516164f; movq %7,%%r10;"\
  139. "24516163:\n\t"\
  140. "prefetcht0 (%5); prefetcht0 63(%5); addq %3,%5;"\
  141. KERNEL_k1m16n24 "prefetcht0 (%%r10); addq $64,%%r10; decq %4; jnz 24516163b;"\
  142. "24516164:\n\t"\
  143. "prefetcht0 (%%r14); prefetcht0 64(%%r14); movq %2,%5; addq $64,%2;"\
  144. unit_save_m16n2(%%zmm8,%%zmm9) unit_save_m16n2(%%zmm10,%%zmm11) unit_save_m16n2(%%zmm12,%%zmm13)\
  145. unit_save_m16n2(%%zmm14,%%zmm15) unit_save_m16n2(%%zmm16,%%zmm17) unit_save_m16n2(%%zmm18,%%zmm19)\
  146. unit_save_m16n2_wscr(%%zmm20,%%zmm21,0) unit_save_m16n2_wscr(%%zmm22,%%zmm23,128) unit_save_m16n2_wscr(%%zmm24,%%zmm25,256)\
  147. unit_save_m16n2_wscr(%%zmm26,%%zmm27,384) unit_save_m16n2_wscr(%%zmm28,%%zmm29,512) unit_save_m16n2_wscr(%%zmm30,%%zmm31,640)
  148. #define COMPUTE_m16n24_LTAIL \
  149. INIT_m16n24\
  150. "movq %%r13,%4; movq %%r14,%1; leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15; movq %2,%5;"\
  151. "cmpq $16,%4; jb 24416162f; movq $16,%4;"\
  152. "24416161:\n\t"\
  153. KERNEL_k1m16n24 "addq $4,%4; testq $4,%4; movq $126,%%r10; cmovz %3,%%r10;"\
  154. KERNEL_k1m16n24 "prefetcht1 -64(%5); prefetcht1 (%5); leaq -63(%5,%%r10,1),%5;"\
  155. KERNEL_k1m16n24 "prefetcht1 (%6); addq $32,%6; cmpq $208,%4; cmoveq %2,%5;"\
  156. KERNEL_k1m16n24 "cmpq %4,%%r13; jnb 24416161b;"\
  157. "movq %2,%5; negq %4; leaq 16(%%r13,%4,1),%4;"\
  158. "24416162:\n\t"\
  159. "testq %4,%4; jz 24416164f; movq %7,%%r10;"\
  160. "24416163:\n\t"\
  161. "prefetcht0 -64(%5); prefetcht0 (%5); prefetcht0 63(%5); prefetcht0 -64(%5,%3,1); prefetcht0 (%5,%3,1); prefetcht0 63(%5,%3,1); leaq (%5,%3,2),%5;"\
  162. KERNEL_k1m16n24 "prefetcht0 (%%r10); addq $64,%%r10; decq %4; jnz 24416163b;"\
  163. "24416164:\n\t"\
  164. "prefetcht0 (%%r14); prefetcht0 64(%%r14); movq %2,%5; addq $64,%2;"\
  165. unit_save_m16n2_rscr(%%zmm8,%%zmm9,0) unit_save_m16n2_rscr(%%zmm10,%%zmm11,128) unit_save_m16n2_rscr(%%zmm12,%%zmm13,256)\
  166. unit_save_m16n2_rscr(%%zmm14,%%zmm15,384) unit_save_m16n2_rscr(%%zmm16,%%zmm17,512) unit_save_m16n2_rscr(%%zmm18,%%zmm19,640)\
  167. unit_save_m16n2(%%zmm20,%%zmm21) unit_save_m16n2(%%zmm22,%%zmm23) unit_save_m16n2(%%zmm24,%%zmm25)\
  168. unit_save_m16n2(%%zmm26,%%zmm27) unit_save_m16n2(%%zmm28,%%zmm29) unit_save_m16n2(%%zmm30,%%zmm31)
  169. #define COMPUTE_m16n24_RTAIL \
  170. INIT_m16n24\
  171. "movq %%r13,%4; movq %%r14,%1; leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15; movq %2,%5;"\
  172. "cmpq $16,%4; jb 24416162f; movq $16,%4;"\
  173. "24416161:\n\t"\
  174. KERNEL_k1m16n24 "addq $4,%4; testq $4,%4; movq $126,%%r10; cmovz %3,%%r10;"\
  175. KERNEL_k1m16n24 "prefetcht1 -64(%5); prefetcht1 (%5); leaq -63(%5,%%r10,1),%5;"\
  176. KERNEL_k1m16n24 "prefetcht1 (%6); addq $32,%6; cmpq $208,%4; cmoveq %2,%5;"\
  177. KERNEL_k1m16n24 "cmpq %4,%%r13; jnb 24416161b;"\
  178. "movq %2,%5; negq %4; leaq 16(%%r13,%4,1),%4;"\
  179. "24416162:\n\t"\
  180. "testq %4,%4; jz 24416164f; movq %7,%%r10;"\
  181. "24416163:\n\t"\
  182. "prefetcht0 -64(%5); prefetcht0 (%5); prefetcht0 63(%5); prefetcht0 -64(%5,%3,1); prefetcht0 (%5,%3,1); prefetcht0 63(%5,%3,1); leaq (%5,%3,2),%5;"\
  183. KERNEL_k1m16n24 "prefetcht0 (%%r10); addq $64,%%r10; decq %4; jnz 24416163b;"\
  184. "24416164:\n\t"\
  185. "prefetcht0 (%%r14); prefetcht0 64(%%r14); movq %2,%5; addq $64,%2;"\
  186. unit_save_m16n2(%%zmm8,%%zmm9) unit_save_m16n2(%%zmm10,%%zmm11) unit_save_m16n2(%%zmm12,%%zmm13)\
  187. unit_save_m16n2(%%zmm14,%%zmm15) unit_save_m16n2(%%zmm16,%%zmm17) unit_save_m16n2(%%zmm18,%%zmm19)\
  188. unit_save_m16n2_rscr(%%zmm20,%%zmm21,0) unit_save_m16n2_rscr(%%zmm22,%%zmm23,128) unit_save_m16n2_rscr(%%zmm24,%%zmm25,256)\
  189. unit_save_m16n2_rscr(%%zmm26,%%zmm27,384) unit_save_m16n2_rscr(%%zmm28,%%zmm29,512) unit_save_m16n2_rscr(%%zmm30,%%zmm31,640)
  190. /* m = 8 *//* zmm0 for alpha, zmm1-2 for perm words, zmm4-7 for temporary use, zmm8-19 for accumulators */
  191. #define KERNEL_k1m8n1 \
  192. "vbroadcastss (%1),%%ymm4; addq $4,%1; vfmadd231ps (%0),%%ymm4,%%ymm8; addq $32,%0;"
  193. #define KERNEL_k1m8n2 \
  194. "vmovups (%0),%%ymm4; addq $32,%0;"\
  195. "vbroadcastss (%1),%%ymm5; vfmadd231ps %%ymm5,%%ymm4,%%ymm8;"\
  196. "vbroadcastss 4(%1),%%ymm6; vfmadd231ps %%ymm6,%%ymm4,%%ymm9; addq $8,%1;"
  197. #define unit_kernel_k1m8n4(c1,c2,...)\
  198. "vbroadcastf32x4 ("#__VA_ARGS__"),%%zmm7; vfmadd231ps %%zmm7,%%zmm4,"#c1"; vfmadd231ps %%zmm7,%%zmm5,"#c2";"
  199. #define KERNEL_h_k1m8n4 \
  200. "vbroadcastf32x4 (%0),%%zmm4; vpermilps %%zmm2,%%zmm4,%%zmm4; vbroadcastf32x4 16(%0),%%zmm5; vpermilps %%zmm2,%%zmm5,%%zmm5; addq $32,%0;"\
  201. unit_kernel_k1m8n4(%%zmm8,%%zmm9,%1)
  202. #define KERNEL_k1m8n4 KERNEL_h_k1m8n4 "addq $16,%1;"
  203. #define KERNEL_h_k1m8n8 KERNEL_h_k1m8n4 unit_kernel_k1m8n4(%%zmm10,%%zmm11,%1,%%r12,1)
  204. #define KERNEL_k1m8n8 KERNEL_h_k1m8n8 "addq $16,%1;"
  205. #define KERNEL_k1m8n12 KERNEL_h_k1m8n8 unit_kernel_k1m8n4(%%zmm12,%%zmm13,%1,%%r12,2) "addq $16,%1;"
  206. #define KERNEL_h_k1m8n16 KERNEL_k1m8n12 unit_kernel_k1m8n4(%%zmm14,%%zmm15,%%r15)
  207. #define KERNEL_k1m8n16 KERNEL_h_k1m8n16 "addq $16,%%r15;"
  208. #define KERNEL_h_k1m8n20 KERNEL_h_k1m8n16 unit_kernel_k1m8n4(%%zmm16,%%zmm17,%%r15,%%r12,1)
  209. #define KERNEL_k1m8n20 KERNEL_h_k1m8n20 "addq $16,%%r15;"
  210. #define KERNEL_k1m8n24 KERNEL_h_k1m8n20 unit_kernel_k1m8n4(%%zmm18,%%zmm19,%%r15,%%r12,2) "addq $16,%%r15;"
  211. #define INIT_m8n1 "vpxor %%ymm8,%%ymm8,%%ymm8;"
  212. #define INIT_m8n2 "vpxor %%ymm8,%%ymm8,%%ymm8; vpxor %%ymm9,%%ymm9,%%ymm9;"
  213. #define unit_init_m8n4(c1,c2) "vpxorq "#c1","#c1","#c1";vpxorq "#c2","#c2","#c2";"
  214. #define INIT_m8n4 unit_init_m8n4(%%zmm8,%%zmm9)
  215. #define INIT_m8n8 INIT_m8n4 unit_init_m8n4(%%zmm10,%%zmm11)
  216. #define INIT_m8n12 INIT_m8n8 unit_init_m8n4(%%zmm12,%%zmm13)
  217. #define INIT_m8n16 INIT_m8n12 unit_init_m8n4(%%zmm14,%%zmm15)
  218. #define INIT_m8n20 INIT_m8n16 unit_init_m8n4(%%zmm16,%%zmm17)
  219. #define INIT_m8n24 INIT_m8n20 unit_init_m8n4(%%zmm18,%%zmm19)
  220. #define SAVE_h_m8n1 "vfmadd213ps (%2),%%ymm0,%%ymm8; vmovups %%ymm8,(%2);"
  221. #define SAVE_h_m8n2 \
  222. "vfmadd213ps (%2),%%ymm0,%%ymm8; vmovups %%ymm8,(%2);"\
  223. "vfmadd213ps (%2,%3,1),%%ymm0,%%ymm9; vmovups %%ymm9,(%2,%3,1);"
  224. #define unit_save_m8n4(c1_no,c2_no)\
  225. "vpermps %%zmm"#c1_no",%%zmm1,%%zmm"#c1_no"; vpermps %%zmm"#c2_no",%%zmm1,%%zmm"#c2_no";"\
  226. "vextractf64x4 $1,%%zmm"#c1_no",%%ymm5; vextractf64x4 $1,%%zmm"#c2_no",%%ymm6;"\
  227. "vmovups (%5),%%xmm4; vinsertf128 $1,(%5,%3,1),%%ymm4,%%ymm4; vfmadd231ps %%ymm"#c1_no",%%ymm0,%%ymm4;"\
  228. "vmovups %%xmm4,(%5); vextractf128 $1,%%ymm4,(%5,%3,1);"\
  229. "vmovups 16(%5),%%xmm4; vinsertf128 $1,16(%5,%3,1),%%ymm4,%%ymm4; vfmadd231ps %%ymm"#c2_no",%%ymm0,%%ymm4;"\
  230. "vmovups %%xmm4,16(%5); vextractf128 $1,%%ymm4,16(%5,%3,1); leaq (%5,%3,2),%5;"\
  231. "vmovups (%5),%%xmm4; vinsertf128 $1,(%5,%3,1),%%ymm4,%%ymm4; vfmadd231ps %%ymm5,%%ymm0,%%ymm4;"\
  232. "vmovups %%xmm4,(%5); vextractf128 $1,%%ymm4,(%5,%3,1);"\
  233. "vmovups 16(%5),%%xmm4; vinsertf128 $1,16(%5,%3,1),%%ymm4,%%ymm4; vfmadd231ps %%ymm6,%%ymm0,%%ymm4;"\
  234. "vmovups %%xmm4,16(%5); vextractf128 $1,%%ymm4,16(%5,%3,1); leaq (%5,%3,2),%5;"
  235. #define SAVE_h_m8n4 "movq %2,%5;" unit_save_m8n4(8,9)
  236. #define SAVE_h_m8n8 SAVE_h_m8n4 unit_save_m8n4(10,11)
  237. #define SAVE_h_m8n12 SAVE_h_m8n8 unit_save_m8n4(12,13)
  238. #define SAVE_h_m8n16 SAVE_h_m8n12 unit_save_m8n4(14,15)
  239. #define SAVE_h_m8n20 SAVE_h_m8n16 unit_save_m8n4(16,17)
  240. #define SAVE_h_m8n24 SAVE_h_m8n20 unit_save_m8n4(18,19)
  241. #define SAVE_m8(ndim) SAVE_h_m8n##ndim "addq $32,%2;"
  242. #define COMPUTE_m8(ndim) \
  243. INIT_m8n##ndim\
  244. "movq %%r13,%4; movq %%r14,%1; leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15;"\
  245. "testq %4,%4; jz "#ndim"008082f;"\
  246. #ndim"008081:\n\t"\
  247. KERNEL_k1m8n##ndim "decq %4; jnz "#ndim"008081b;"\
  248. #ndim"008082:\n\t"\
  249. SAVE_m8(ndim)
  250. /* m = 4 *//* zmm0 for alpha, zmm1-2 for perm words, zmm4-7 for temporary use, zmm8-15 for accumulators */
  251. #define KERNEL_k1m4n1 "vbroadcastss (%1),%%xmm4; addq $4,%1; vfmadd231ps (%0),%%xmm4,%%xmm8; addq $16,%0;"
  252. #define KERNEL_k1m4n2 "vmovups (%0),%%xmm4; addq $16,%0;"\
  253. "vbroadcastss (%1),%%xmm5; vfmadd231ps %%xmm5,%%xmm4,%%xmm8;"\
  254. "vbroadcastss 4(%1),%%xmm5; vfmadd231ps %%xmm5,%%xmm4,%%xmm9; addq $8,%1;"
  255. #define unit_kernel_k1m4n4(c1,...) "vbroadcastf32x4 ("#__VA_ARGS__"),%%zmm7; vfmadd231ps %%zmm7,%%zmm4,"#c1";"
  256. #define KERNEL_h_k1m4n4 "vbroadcastf32x4 (%0),%%zmm4; vpermilps %%zmm2,%%zmm4,%%zmm4; addq $16,%0;" unit_kernel_k1m4n4(%%zmm8,%1)
  257. #define KERNEL_k1m4n4 KERNEL_h_k1m4n4 "addq $16,%1;"
  258. #define KERNEL_h_k1m4n8 KERNEL_h_k1m4n4 unit_kernel_k1m4n4(%%zmm9,%1,%%r12,1)
  259. #define KERNEL_k1m4n8 KERNEL_h_k1m4n8 "addq $16,%1;"
  260. #define KERNEL_k1m4n12 KERNEL_h_k1m4n8 unit_kernel_k1m4n4(%%zmm10,%1,%%r12,2) "addq $16,%1;"
  261. #define KERNEL_h_k1m4n16 KERNEL_k1m4n12 unit_kernel_k1m4n4(%%zmm11,%%r15)
  262. #define KERNEL_k1m4n16 KERNEL_h_k1m4n16 "addq $16,%%r15;"
  263. #define KERNEL_h_k1m4n20 KERNEL_h_k1m4n16 unit_kernel_k1m4n4(%%zmm12,%%r15,%%r12,1)
  264. #define KERNEL_k1m4n20 KERNEL_h_k1m4n20 "addq $16,%%r15;"
  265. #define KERNEL_h_k1m4n24 KERNEL_h_k1m4n20 unit_kernel_k1m4n4(%%zmm13,%%r15,%%r12,2)
  266. #define KERNEL_k1m4n24 KERNEL_h_k1m4n24 "addq $16,%%r15;"
  267. #define INIT_m4n1 "vpxor %%xmm8,%%xmm8,%%xmm8;"
  268. #define INIT_m4n2 "vpxor %%xmm8,%%xmm8,%%xmm8; vpxor %%xmm9,%%xmm9,%%xmm9;"
  269. #define INIT_m4n4 "vpxorq %%zmm8,%%zmm8,%%zmm8;"
  270. #define INIT_m4n8 INIT_m4n4 "vpxorq %%zmm9,%%zmm9,%%zmm9;"
  271. #define INIT_m4n12 INIT_m4n8 "vpxorq %%zmm10,%%zmm10,%%zmm10;"
  272. #define INIT_m4n16 INIT_m4n12 "vpxorq %%zmm11,%%zmm11,%%zmm11;"
  273. #define INIT_m4n20 INIT_m4n16 "vpxorq %%zmm12,%%zmm12,%%zmm12;"
  274. #define INIT_m4n24 INIT_m4n20 "vpxorq %%zmm13,%%zmm13,%%zmm13;"
  275. #define SAVE_h_m4n1 "vfmadd213ps (%2),%%xmm0,%%xmm8; vmovups %%xmm8,(%2);"
  276. #define SAVE_h_m4n2 "vfmadd213ps (%2),%%xmm0,%%xmm8; vmovups %%xmm8,(%2); vfmadd213ps (%2,%3,1),%%xmm0,%%xmm9; vmovups %%xmm9,(%2,%3,1);"
  277. #define unit_save_m4n4(c1_no)\
  278. "vpermps %%zmm"#c1_no",%%zmm1,%%zmm"#c1_no"; vextractf64x4 $1,%%zmm"#c1_no",%%ymm5;"\
  279. "vmovups (%5),%%xmm4; vinsertf128 $1,(%5,%3,1),%%ymm4,%%ymm4; vfmadd231ps %%ymm0,%%ymm"#c1_no",%%ymm4;"\
  280. "vmovups %%xmm4,(%5); vextractf128 $1,%%ymm4,(%5,%3,1); leaq (%5,%3,2),%5;"\
  281. "vmovups (%5),%%xmm4; vinsertf128 $1,(%5,%3,1),%%ymm4,%%ymm4; vfmadd231ps %%ymm0,%%ymm5,%%ymm4;"\
  282. "vmovups %%xmm4,(%5); vextractf128 $1,%%ymm4,(%5,%3,1); leaq (%5,%3,2),%5;"
  283. #define SAVE_h_m4n4 "movq %2,%5;" unit_save_m4n4(8)
  284. #define SAVE_h_m4n8 SAVE_h_m4n4 unit_save_m4n4(9)
  285. #define SAVE_h_m4n12 SAVE_h_m4n8 unit_save_m4n4(10)
  286. #define SAVE_h_m4n16 SAVE_h_m4n12 unit_save_m4n4(11)
  287. #define SAVE_h_m4n20 SAVE_h_m4n16 unit_save_m4n4(12)
  288. #define SAVE_h_m4n24 SAVE_h_m4n20 unit_save_m4n4(13)
  289. #define SAVE_m4(ndim) SAVE_h_m4n##ndim "addq $16,%2;"
  290. #define COMPUTE_m4(ndim) \
  291. INIT_m4n##ndim\
  292. "movq %%r13,%4; movq %%r14,%1; leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15;"\
  293. "testq %4,%4; jz "#ndim"004042f;"\
  294. #ndim"004041:\n\t"\
  295. KERNEL_k1m4n##ndim "decq %4; jnz "#ndim"004041b;"\
  296. #ndim"004042:\n\t"\
  297. SAVE_m4(ndim)
  298. /* m = 2 *//* xmm0 for alpha, xmm1-xmm3 for temporary use, xmm4-xmm15 for accumulators */
  299. #define INIT_m2n1 "vpxor %%xmm4,%%xmm4,%%xmm4;"
  300. #define KERNEL_k1m2n1 \
  301. "vmovsd (%0),%%xmm1; addq $8,%0;"\
  302. "vbroadcastss (%1),%%xmm2; vfmadd231ps %%xmm1,%%xmm2,%%xmm4;"\
  303. "addq $4,%1;"
  304. #define SAVE_h_m2n1 "vmovsd (%2),%%xmm1; vfmadd213ps %%xmm1,%%xmm0,%%xmm4; vmovsd %%xmm4,(%2);"
  305. #define INIT_m2n2 INIT_m2n1 "vpxor %%xmm5,%%xmm5,%%xmm5;"
  306. #define KERNEL_k1m2n2 \
  307. "vmovsd (%0),%%xmm1; addq $8,%0;"\
  308. "vbroadcastss (%1),%%xmm2; vfmadd231ps %%xmm1,%%xmm2,%%xmm4;"\
  309. "vbroadcastss 4(%1),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm5;"\
  310. "addq $8,%1;"
  311. #define SAVE_h_m2n2 SAVE_h_m2n1 "vmovsd (%2,%3,1),%%xmm1; vfmadd213ps %%xmm1,%%xmm0,%%xmm5; vmovsd %%xmm5,(%2,%3,1);"
  312. #define INIT_m2n4 INIT_m2n2
  313. #define INIT_m2n8 INIT_m2n4 "vpxor %%xmm6,%%xmm6,%%xmm6; vpxor %%xmm7,%%xmm7,%%xmm7;"
  314. #define INIT_m2n12 INIT_m2n8 "vpxor %%xmm8,%%xmm8,%%xmm8; vpxor %%xmm9,%%xmm9,%%xmm9;"
  315. #define INIT_m2n16 INIT_m2n12 "vpxor %%xmm10,%%xmm10,%%xmm10; vpxor %%xmm11,%%xmm11,%%xmm11;"
  316. #define INIT_m2n20 INIT_m2n16 "vpxor %%xmm12,%%xmm12,%%xmm12; vpxor %%xmm13,%%xmm13,%%xmm13;"
  317. #define INIT_m2n24 INIT_m2n20 "vpxor %%xmm14,%%xmm14,%%xmm14; vpxor %%xmm15,%%xmm15,%%xmm15;"
  318. #define KERNEL_h_k1m2n4 \
  319. "vbroadcastss (%0),%%xmm1; vbroadcastss 4(%0),%%xmm2; addq $8,%0;"\
  320. "vmovups (%1),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm4; vfmadd231ps %%xmm2,%%xmm3,%%xmm5;"
  321. #define KERNEL_k1m2n4 KERNEL_h_k1m2n4 "addq $16,%1;"
  322. #define KERNEL_h_k1m2n8 KERNEL_h_k1m2n4 "vmovups (%1,%%r12,1),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm6; vfmadd231ps %%xmm2,%%xmm3,%%xmm7;"
  323. #define KERNEL_k1m2n8 KERNEL_h_k1m2n8 "addq $16,%1;"
  324. #define KERNEL_k1m2n12 KERNEL_h_k1m2n8 \
  325. "vmovups (%1,%%r12,2),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm8; vfmadd231ps %%xmm2,%%xmm3,%%xmm9; addq $16,%1;"
  326. #define KERNEL_h_k1m2n16 KERNEL_k1m2n12 "vmovups (%%r15),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm10; vfmadd231ps %%xmm2,%%xmm3,%%xmm11;"
  327. #define KERNEL_k1m2n16 KERNEL_h_k1m2n16 "addq $16,%%r15;"
  328. #define KERNEL_h_k1m2n20 KERNEL_h_k1m2n16 "vmovups (%%r15,%%r12,1),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm12; vfmadd231ps %%xmm2,%%xmm3,%%xmm13;"
  329. #define KERNEL_k1m2n20 KERNEL_h_k1m2n20 "addq $16,%%r15;"
  330. #define KERNEL_h_k1m2n24 KERNEL_h_k1m2n20 "vmovups (%%r15,%%r12,2),%%xmm3; vfmadd231ps %%xmm1,%%xmm3,%%xmm14; vfmadd231ps %%xmm2,%%xmm3,%%xmm15;"
  331. #define KERNEL_k1m2n24 KERNEL_h_k1m2n24 "addq $16,%%r15;"
  332. #define unit_save_m2n4(c1,c2) \
  333. "vunpcklps "#c2","#c1",%%xmm1; vunpckhps "#c2","#c1",%%xmm2;"\
  334. "vmovsd (%5),%%xmm3; vmovhpd (%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm1; vmovsd %%xmm1,(%5); vmovhpd %%xmm1,(%5,%3,1);"\
  335. "leaq (%5,%3,2),%5;"\
  336. "vmovsd (%5),%%xmm3; vmovhpd (%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm2; vmovsd %%xmm2,(%5); vmovhpd %%xmm2,(%5,%3,1);"\
  337. "leaq (%5,%3,2),%5;"
  338. #define SAVE_h_m2n4 "movq %2,%5;" unit_save_m2n4(%%xmm4,%%xmm5)
  339. #define SAVE_h_m2n8 SAVE_h_m2n4 unit_save_m2n4(%%xmm6,%%xmm7)
  340. #define SAVE_h_m2n12 SAVE_h_m2n8 unit_save_m2n4(%%xmm8,%%xmm9)
  341. #define SAVE_h_m2n16 SAVE_h_m2n12 unit_save_m2n4(%%xmm10,%%xmm11)
  342. #define SAVE_h_m2n20 SAVE_h_m2n16 unit_save_m2n4(%%xmm12,%%xmm13)
  343. #define SAVE_h_m2n24 SAVE_h_m2n20 unit_save_m2n4(%%xmm14,%%xmm15)
  344. #define SAVE_m2(ndim) SAVE_h_m2n##ndim "addq $8,%2;"
  345. #define COMPUTE_m2(ndim) \
  346. INIT_m2n##ndim\
  347. "movq %%r13,%4; movq %%r14,%1; leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15;"\
  348. "testq %4,%4; jz "#ndim"002022f;"\
  349. #ndim"002021:\n\t"\
  350. KERNEL_k1m2n##ndim "decq %4; jnz "#ndim"002021b;"\
  351. #ndim"002022:\n\t"\
  352. SAVE_m2(ndim)
  353. /* m = 1 *//* xmm0 for alpha, xmm1-xmm3 and xmm10 for temporary use, xmm4-xmm9 for accumulators */
  354. #define INIT_m1n1 "vpxor %%xmm4,%%xmm4,%%xmm4;"
  355. #define KERNEL_k1m1n1 \
  356. "vmovss (%1),%%xmm3; addq $4,%1;"\
  357. "vmovss (%0),%%xmm1; vfmadd231ss %%xmm3,%%xmm1,%%xmm4;"\
  358. "addq $4,%0;"
  359. #define SAVE_h_m1n1 "vfmadd213ss (%2),%%xmm0,%%xmm4; vmovss %%xmm4,(%2);"
  360. #define INIT_m1n2 INIT_m1n1
  361. #define KERNEL_k1m1n2 \
  362. "vmovsd (%1),%%xmm3; addq $8,%1;"\
  363. "vbroadcastss (%0),%%xmm1; vfmadd231ps %%xmm3,%%xmm1,%%xmm4;"\
  364. "addq $4,%0;"
  365. #define SAVE_h_m1n2 \
  366. "vmovss (%2),%%xmm3; vinsertps $16,(%2,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm4;"\
  367. "vmovss %%xmm4,(%2); vextractps $1,%%xmm4,(%2,%3,1);"
  368. #define INIT_m1n4 INIT_m1n2
  369. #define INIT_m1n8 INIT_m1n4 "vpxor %%xmm5,%%xmm5,%%xmm5;"
  370. #define INIT_m1n12 INIT_m1n8 "vpxor %%xmm6,%%xmm6,%%xmm6;"
  371. #define INIT_m1n16 INIT_m1n12 "vpxor %%xmm7,%%xmm7,%%xmm7;"
  372. #define INIT_m1n20 INIT_m1n16 "vpxor %%xmm8,%%xmm8,%%xmm8;"
  373. #define INIT_m1n24 INIT_m1n20 "vpxor %%xmm9,%%xmm9,%%xmm9;"
  374. #define KERNEL_h_k1m1n4 \
  375. "vbroadcastss (%0),%%xmm1; addq $4,%0; vfmadd231ps (%1),%%xmm1,%%xmm4;"
  376. #define KERNEL_k1m1n4 KERNEL_h_k1m1n4 "addq $16,%1;"
  377. #define KERNEL_h_k1m1n8 KERNEL_h_k1m1n4 "vfmadd231ps (%1,%%r12,1),%%xmm1,%%xmm5;"
  378. #define KERNEL_k1m1n8 KERNEL_h_k1m1n8 "addq $16,%1;"
  379. #define KERNEL_k1m1n12 KERNEL_h_k1m1n8 "vfmadd231ps (%1,%%r12,2),%%xmm1,%%xmm6; addq $16,%1;"
  380. #define KERNEL_h_k1m1n16 KERNEL_k1m1n12 "vfmadd231ps (%%r15),%%xmm1,%%xmm7;"
  381. #define KERNEL_k1m1n16 KERNEL_h_k1m1n16 "addq $16,%%r15;"
  382. #define KERNEL_h_k1m1n20 KERNEL_h_k1m1n16 "vfmadd231ps (%%r15,%%r12,1),%%xmm1,%%xmm8;"
  383. #define KERNEL_k1m1n20 KERNEL_h_k1m1n20 "addq $16,%%r15;"
  384. #define KERNEL_h_k1m1n24 KERNEL_h_k1m1n20 "vfmadd231ps (%%r15,%%r12,2),%%xmm1,%%xmm9;"
  385. #define KERNEL_k1m1n24 KERNEL_h_k1m1n24 "addq $16,%%r15;"
  386. #define unit_save_m1n4(c1) \
  387. "vpxor %%xmm10,%%xmm10,%%xmm10; vmovsd "#c1",%%xmm10,%%xmm2; vmovhlps "#c1",%%xmm10,%%xmm1;"\
  388. "vmovss (%5),%%xmm3; vinsertps $16,(%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm2;"\
  389. "vmovss %%xmm2,(%5); vextractps $1,%%xmm2,(%5,%3,1); leaq (%5,%3,2),%5;"\
  390. "vmovss (%5),%%xmm3; vinsertps $16,(%5,%3,1),%%xmm3,%%xmm3; vfmadd213ps %%xmm3,%%xmm0,%%xmm1;"\
  391. "vmovss %%xmm1,(%5); vextractps $1,%%xmm1,(%5,%3,1); leaq (%5,%3,2),%5;"
  392. #define SAVE_h_m1n4 "movq %2,%5;" unit_save_m1n4(%%xmm4)
  393. #define SAVE_h_m1n8 SAVE_h_m1n4 unit_save_m1n4(%%xmm5)
  394. #define SAVE_h_m1n12 SAVE_h_m1n8 unit_save_m1n4(%%xmm6)
  395. #define SAVE_h_m1n16 SAVE_h_m1n12 unit_save_m1n4(%%xmm7)
  396. #define SAVE_h_m1n20 SAVE_h_m1n16 unit_save_m1n4(%%xmm8)
  397. #define SAVE_h_m1n24 SAVE_h_m1n20 unit_save_m1n4(%%xmm9)
  398. #define SAVE_m1(ndim) SAVE_h_m1n##ndim "addq $4,%2;"
  399. #define COMPUTE_m1(ndim) \
  400. INIT_m1n##ndim\
  401. "movq %%r13,%4; movq %%r14,%1; leaq (%1,%%r12,2),%%r15; addq %%r12,%%r15;"\
  402. "testq %4,%4; jz "#ndim"001012f;"\
  403. #ndim"001011:\n\t"\
  404. KERNEL_k1m1n##ndim "decq %4; jnz "#ndim"001011b;"\
  405. #ndim"001012:\n\t"\
  406. SAVE_m1(ndim)
  407. /* %0 = "+r"(a_pointer), %1 = "+r"(b_pointer), %2 = "+r"(c_pointer), %3 = "+r"(ldc_in_bytes), %4 = "+r"(K), %5 = "+r"(ctemp) */
  408. /* %6 = "+r"(next_b), %7 = "m"(ALPHA), %8 = "m"(M) */
  409. /* r11 = m_counter, r12 = k << 4(const), r13 = k(const), r14 = b_head_pos(const), r15 = %1 + 3r12 */
  410. #define COMPUTE(ndim) {\
  411. next_b = b_pointer + ndim * K;\
  412. __asm__ __volatile__(\
  413. "vbroadcastss %7,%%zmm0; vmovups %9,%%zmm1; vmovups %10,%%zmm2;"\
  414. "movq %4,%%r13; movq %4,%%r12; salq $4,%%r12; movq %1,%%r14; movq %8,%%r11;"\
  415. "cmpq $16,%%r11;jb 33101"#ndim"f;"\
  416. "33109"#ndim":\n\t"\
  417. COMPUTE_m16(ndim)\
  418. "subq $16,%%r11;cmpq $16,%%r11;jnb 33109"#ndim"b;"\
  419. "33101"#ndim":\n\t"\
  420. "cmpq $8,%%r11;jb 33102"#ndim"f;"\
  421. COMPUTE_m8(ndim)\
  422. "subq $8,%%r11;"\
  423. "33102"#ndim":\n\t"\
  424. "cmpq $4,%%r11;jb 33103"#ndim"f;"\
  425. COMPUTE_m4(ndim)\
  426. "subq $4,%%r11;"\
  427. "33103"#ndim":\n\t"\
  428. "cmpq $2,%%r11;jb 33104"#ndim"f;"\
  429. COMPUTE_m2(ndim)\
  430. "subq $2,%%r11;"\
  431. "33104"#ndim":\n\t"\
  432. "testq %%r11,%%r11;jz 33105"#ndim"f;"\
  433. COMPUTE_m1(ndim)\
  434. "33105"#ndim":\n\t"\
  435. "movq %%r13,%4; movq %%r14,%1; vzeroupper;"\
  436. :"+r"(a_pointer),"+r"(b_pointer),"+r"(c_pointer),"+r"(ldc_in_bytes),"+r"(K),"+r"(ctemp),"+r"(next_b):"m"(ALPHA),"m"(M),"m"(perm[0]),"m"(permil[0])\
  437. :"r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14",\
  438. "zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31",\
  439. "cc","memory");\
  440. a_pointer -= M * K; b_pointer += ndim * K; c_pointer += LDC * ndim - M;\
  441. }
  442. #define COMPUTE_n24 {\
  443. next_b = b_pointer + 24 * K;\
  444. __asm__ __volatile__(\
  445. "vbroadcastss %8,%%zmm0; vmovups %10,%%zmm1; vmovups %11,%%zmm2;"\
  446. "movq %4,%%r13; movq %4,%%r12; salq $4,%%r12; movq %1,%%r14; movq %9,%%r11;"\
  447. "cmpq $32,%%r11;jb 3310024f;"\
  448. COMPUTE_m16n24_LINIT "subq $16,%%r11; cmpq $32,%%r11;jb 3310724f;"\
  449. "3310924:\n\t"\
  450. COMPUTE_m16n24_RSAVE "subq $16,%%r11; cmpq $32,%%r11;jb 3310824f;"\
  451. COMPUTE_m16n24_LSAVE "subq $16,%%r11; cmpq $32,%%r11;jnb 3310924b;"\
  452. "3310724:\n\t"\
  453. COMPUTE_m16n24_RTAIL "subq $16,%%r11; jmp 3310124f;"\
  454. "3310824:\n\t"\
  455. COMPUTE_m16n24_LTAIL "subq $16,%%r11; jmp 3310124f;"\
  456. "3310024:\n\t"\
  457. "cmpq $16,%%r11;jb 3310124f;"\
  458. COMPUTE_m16(24)\
  459. "subq $16,%%r11;"\
  460. "3310124:\n\t"\
  461. "cmpq $8,%%r11;jb 3310224f;"\
  462. COMPUTE_m8(24)\
  463. "subq $8,%%r11;"\
  464. "3310224:\n\t"\
  465. "cmpq $4,%%r11;jb 3310324f;"\
  466. COMPUTE_m4(24)\
  467. "subq $4,%%r11;"\
  468. "3310324:\n\t"\
  469. "cmpq $2,%%r11;jb 3310424f;"\
  470. COMPUTE_m2(24)\
  471. "subq $2,%%r11;"\
  472. "3310424:\n\t"\
  473. "testq %%r11,%%r11;jz 3310524f;"\
  474. COMPUTE_m1(24)\
  475. "3310524:\n\t"\
  476. "movq %%r13,%4; movq %%r14,%1; vzeroupper;"\
  477. :"+r"(a_pointer),"+r"(b_pointer),"+r"(c_pointer),"+r"(ldc_in_bytes),"+r"(K),"+r"(ctemp),"+r"(next_b),"+r"(wscr):"m"(ALPHA),"m"(M),"m"(perm[0]),"m"(permil[0])\
  478. :"r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14",\
  479. "zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31",\
  480. "cc","memory");\
  481. a_pointer -= M * K; b_pointer += 24 * K; c_pointer += LDC * 24 - M;\
  482. }
  483. int __attribute__ ((noinline))
  484. CNAME(BLASLONG m, BLASLONG n, BLASLONG k, float alpha, float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, BLASLONG LDC)
  485. {
  486. if(m==0||n==0||k==0||alpha==(float)0.0) return 0;
  487. float scr[192]; float *wscr = scr;
  488. int64_t ldc_in_bytes = (int64_t)LDC * sizeof(float);float ALPHA = alpha;
  489. int64_t M = (int64_t)m, K = (int64_t)k;
  490. int32_t perm[16] = {0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15};
  491. int32_t permil[16] = {0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3};
  492. BLASLONG n_count = n;
  493. float *a_pointer = A,*b_pointer = B,*c_pointer = C,*ctemp = C,*next_b = B;
  494. for(;n_count>23;n_count-=24) COMPUTE_n24
  495. for(;n_count>19;n_count-=20) COMPUTE(20)
  496. for(;n_count>15;n_count-=16) COMPUTE(16)
  497. for(;n_count>11;n_count-=12) COMPUTE(12)
  498. for(;n_count>7;n_count-=8) COMPUTE(8)
  499. for(;n_count>3;n_count-=4) COMPUTE(4)
  500. for(;n_count>1;n_count-=2) COMPUTE(2)
  501. if(n_count>0) COMPUTE(1)
  502. return 0;
  503. }
  504. #include <immintrin.h>
  505. //#include "sgemm_direct_skylakex.c"