You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

sbgemv_common_power10.c 19 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629
  1. /***************************************************************************
  2. Copyright (c) 2024, The OpenBLAS Project
  3. All rights reserved.
  4. Redistribution and use in source and binary forms, with or without
  5. modification, are permitted provided that the following conditions are
  6. met:
  7. 1. Redistributions of source code must retain the above copyright
  8. notice, this list of conditions and the following disclaimer.
  9. 2. Redistributions in binary form must reproduce the above copyright
  10. notice, this list of conditions and the following disclaimer in
  11. the documentation and/or other materials provided with the
  12. distribution.
  13. 3. Neither the name of the OpenBLAS project nor the names of
  14. its contributors may be used to endorse or promote products
  15. derived from this software without specific prior written permission.
  16. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  17. AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  18. IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  19. ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
  20. LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  21. DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  22. SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  23. CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  24. OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  25. USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. *****************************************************************************/
  27. #ifndef SBGEMV_COMMON_MMA_C
  28. #define SBGEMV_COMMON_MMA_C
  29. #include "sbgemv_common.c"
  30. #if defined(_AIX) || defined(__clang__)
  31. #define USE_MERGE_MMA
  32. #endif
  33. FORCEINLINE void vec_load_pair2(vec_bf16 *in0, vec_bf16 *in)
  34. {
  35. vec_load_pair((vec_f32 *)(in0 + 0), (vec_f32 *)(in + 0));
  36. vec_load_pair((vec_f32 *)(in0 + 2), (vec_f32 *)(in + 2));
  37. }
  38. FORCEINLINE void vec_load_mult_mma(__vector_quad *out, vec_bf16 *in, vec_bf16 inp)
  39. {
  40. vec_bf16 in0 = (vec_bf16)vec_load_vec(in);
  41. __builtin_mma_xvbf16ger2pp(out, (vec_uc8)in0, (vec_uc8)inp);
  42. }
  43. FORCEINLINE void vec_load_mult12a_mma(__vector_quad *out, vec_bf16 *in0, vec_bf16 *in1, vec_bf16 inp)
  44. {
  45. vec_bf16 in11 = (vec_bf16)vec_load_vec(in1);
  46. vec_load_mult_mma(out, in0, inp);
  47. __builtin_mma_xvbf16ger2pp(out + 1, (vec_uc8)in11, (vec_uc8)inp);
  48. }
  49. FORCEINLINE void vec_load_mult14_mma(__vector_quad *out, vec_bf16 *in0, vec_bf16 *in1, vec_bf16 *in2, vec_bf16 *in3, vec_bf16 inp)
  50. {
  51. vec_bf16 in21 = (vec_bf16)vec_load_vec(in2);
  52. vec_bf16 in31 = (vec_bf16)vec_load_vec(in3);
  53. vec_load_mult12a_mma(out, in0, in1, inp);
  54. __builtin_mma_xvbf16ger2pp(out + 2, (vec_uc8)in21, (vec_uc8)inp);
  55. __builtin_mma_xvbf16ger2pp(out + 3, (vec_uc8)in31, (vec_uc8)inp);
  56. }
  57. FORCEINLINE void vec_load_mult2_mma(__vector_quad *out, vec_bf16 *in, vec_bf16 *inp)
  58. {
  59. vec_bf16 in0[2];
  60. vec_load_pair((vec_f32 *)in0, (vec_f32 *)in);
  61. __builtin_mma_xvbf16ger2pp(out, (vec_uc8)in0[0], (vec_uc8)inp[0]);
  62. __builtin_mma_xvbf16ger2pp(out, (vec_uc8)in0[1], (vec_uc8)inp[1]);
  63. }
  64. FORCEINLINE void vec_mult2d_mma(__vector_quad *out, vec_bf16 *in01, vec_bf16 *in11, vec_bf16 *inp)
  65. {
  66. __builtin_mma_xvbf16ger2pp(out + 0, (vec_uc8)in01[0], (vec_uc8)inp[0]);
  67. __builtin_mma_xvbf16ger2pp(out + 1, (vec_uc8)in11[0], (vec_uc8)inp[0]);
  68. }
  69. FORCEINLINE void vec_load_mult22_mma(__vector_quad *out, vec_bf16 *in0, vec_bf16 *in1, vec_bf16 *inp)
  70. {
  71. vec_bf16 in01[2], in11[2];
  72. vec_load_pair((vec_f32 *)in01, (vec_f32 *)in0);
  73. vec_load_pair((vec_f32 *)in11, (vec_f32 *)in1);
  74. vec_mult2d_mma(out, in01 + 0, in11 + 0, inp + 0);
  75. vec_mult2d_mma(out, in01 + 1, in11 + 1, inp + 1);
  76. }
  77. FORCEINLINE void vec_load_mult24_mma(__vector_quad *out, vec_bf16 *in0, vec_bf16 *in1, vec_bf16 *in2, vec_bf16 *in3, vec_bf16 *inp)
  78. {
  79. vec_bf16 in01[2], in11[2], in21[2], in31[2];
  80. vec_load_pair((vec_f32 *)in01, (vec_f32 *)in0);
  81. vec_load_pair((vec_f32 *)in11, (vec_f32 *)in1);
  82. vec_load_pair((vec_f32 *)in21, (vec_f32 *)in2);
  83. vec_load_pair((vec_f32 *)in31, (vec_f32 *)in3);
  84. vec_mult2d_mma(out + 0, in01 + 0, in11 + 0, inp + 0);
  85. vec_mult2d_mma(out + 2, in21 + 0, in31 + 0, inp + 0);
  86. vec_mult2d_mma(out + 0, in01 + 1, in11 + 1, inp + 1);
  87. vec_mult2d_mma(out + 2, in21 + 1, in31 + 1, inp + 1);
  88. }
  89. FORCEINLINE void vec_load_mult4_mma(__vector_quad *out, vec_bf16 *in, vec_bf16 *inp)
  90. {
  91. vec_bf16 in0[2];
  92. vec_load_pair((vec_f32 *)(in0 + 0), (vec_f32 *)(in + 2));
  93. vec_load_mult2_mma(out, in + 0, inp + 0);
  94. __builtin_mma_xvbf16ger2pp(out, (vec_uc8)in0[0], (vec_uc8)inp[2]);
  95. __builtin_mma_xvbf16ger2pp(out, (vec_uc8)in0[1], (vec_uc8)inp[3]);
  96. }
  97. FORCEINLINE void vec_load_mult42_mma(__vector_quad *out, vec_bf16 *in0, vec_bf16 *in1, vec_bf16 *inp)
  98. {
  99. vec_bf16 in01[4], in11[4];
  100. vec_load_pair2(in01, in0);
  101. vec_load_pair2(in11, in1);
  102. vec_mult2d_mma(out, in01 + 0, in11 + 0, inp + 0);
  103. vec_mult2d_mma(out, in01 + 1, in11 + 1, inp + 1);
  104. vec_mult2d_mma(out, in01 + 2, in11 + 2, inp + 2);
  105. vec_mult2d_mma(out, in01 + 3, in11 + 3, inp + 3);
  106. }
  107. FORCEINLINE void vec_mult4d_mma(__vector_quad *out, vec_bf16 *in01, vec_bf16 *in11, vec_bf16 *in21, vec_bf16 *in31, vec_bf16 *inp)
  108. {
  109. vec_mult2d_mma(out + 0, in01, in11, inp);
  110. vec_mult2d_mma(out + 2, in21, in31, inp);
  111. }
  112. FORCEINLINE void vec_load_mult44_mma(__vector_quad *out, vec_bf16 *in0, vec_bf16 *in1, vec_bf16 *in2, vec_bf16 *in3, vec_bf16 *inp)
  113. {
  114. vec_bf16 in01[4], in11[4], in21[4], in31[4];
  115. vec_load_pair2(in01, in0);
  116. vec_load_pair2(in11, in1);
  117. vec_load_pair2(in21, in2);
  118. vec_load_pair2(in31, in3);
  119. vec_mult4d_mma(out, in01 + 0, in11 + 0, in21 + 0, in31 + 0, inp + 0);
  120. vec_mult4d_mma(out, in01 + 1, in11 + 1, in21 + 1, in31 + 1, inp + 1);
  121. vec_mult4d_mma(out, in01 + 2, in11 + 2, in21 + 2, in31 + 2, inp + 2);
  122. vec_mult4d_mma(out, in01 + 3, in11 + 3, in21 + 3, in31 + 3, inp + 3);
  123. }
  124. FORCEINLINE void vec_loadN_mult_mma(__vector_quad *out, vec_bf16 *in, vec_bf16 inp, BLASLONG n)
  125. {
  126. vec_bf16 in0 = vec_loadN(in, n);
  127. __builtin_mma_xvbf16ger2pp(out, (vec_uc8)in0, (vec_uc8)inp);
  128. }
  129. FORCEINLINE void vec_loadN_mult12a_mma(__vector_quad *out, vec_bf16 *in0, vec_bf16 *in1, vec_bf16 inp, BLASLONG n)
  130. {
  131. vec_bf16 in11 = (vec_bf16)vec_loadN(in1, n);
  132. vec_loadN_mult_mma(out, in0, inp, n);
  133. __builtin_mma_xvbf16ger2pp(out + 1, (vec_uc8)in11, (vec_uc8)inp);
  134. }
  135. FORCEINLINE void vec_loadN_mult14_mma(__vector_quad *out, vec_bf16 *in0, vec_bf16 *in1, vec_bf16 *in2, vec_bf16 *in3, vec_bf16 inp, BLASLONG n)
  136. {
  137. vec_bf16 in21 = (vec_bf16)vec_loadN(in2, n);
  138. vec_bf16 in31 = (vec_bf16)vec_loadN(in3, n);
  139. vec_loadN_mult12a_mma(out, in0, in1, inp, n);
  140. __builtin_mma_xvbf16ger2pp(out + 2, (vec_uc8)in21, (vec_uc8)inp);
  141. __builtin_mma_xvbf16ger2pp(out + 3, (vec_uc8)in31, (vec_uc8)inp);
  142. }
  143. FORCEINLINE void vec_mult1_mma(__vector_quad *out, vec_bf16 in0, vec_bf16 inp)
  144. {
  145. vec_bf16 in00 = vec_mergeh(in0, in0);
  146. __builtin_mma_xvbf16ger2(out, (vec_uc8)inp, (vec_uc8)in00);
  147. }
  148. FORCEINLINE void vec_mult2_mma(__vector_quad *out, vec_bf16 in0, vec_bf16 inp)
  149. {
  150. vec_bf16 in01 = vec_mergel(in0, in0);
  151. vec_mult1_mma(&out[0], in0, inp);
  152. __builtin_mma_xvbf16ger2(&out[1], (vec_uc8)inp, (vec_uc8)in01);
  153. }
  154. #ifndef USE_MERGE_MMA
  155. FORCEINLINE void vec_mult4_mma(__vector_quad *out, vec_bf16 *in0, vec_bf16 inp)
  156. {
  157. vec_mult2_mma(out + 0, in0[0], inp);
  158. vec_mult2_mma(out + 2, in0[1], inp);
  159. }
  160. #endif
  161. FORCEINLINE void vec_loadN_mult11_mma(__vector_quad *out, vec_bf16 *in, vec_bf16 inp, BLASLONG n)
  162. {
  163. vec_bf16 in0 = vec_loadN(in, n);
  164. vec_mult1_mma(out, in0, inp);
  165. }
  166. FORCEINLINE void vec_loadN_mult12_mma(__vector_quad *out, vec_bf16 *in, vec_bf16 inp, BLASLONG n)
  167. {
  168. vec_bf16 in0 = vec_loadN(in, n);
  169. vec_mult2_mma(out, in0, inp);
  170. }
  171. FORCEINLINE void vec_load_mult12_mma(__vector_quad *out, vec_bf16 *in, vec_bf16 inp)
  172. {
  173. vec_bf16 in0 = (vec_bf16)vec_load_vec(in);
  174. vec_mult2_mma(out, in0, inp);
  175. }
  176. #ifndef USE_MERGE_MMA
  177. FORCEINLINE void vec_load_mult18_mma(__vector_quad *out, vec_bf16 *in, vec_bf16 inp)
  178. {
  179. vec_bf16 in0[4];
  180. vec_load_pair((vec_f32 *)(in0 + 0), (vec_f32 *)(in + 0));
  181. vec_load_pair((vec_f32 *)(in0 + 2), (vec_f32 *)(in + 2));
  182. vec_mult4_mma(&out[0], in0 + 0, inp);
  183. vec_mult4_mma(&out[4], in0 + 2, inp);
  184. }
  185. #endif
  186. FORCEINLINE void vec_reduce1_mma(__vector_quad *out, vec_f32 *temp, vec_f32 v_alpha, vec_f32 *vy0)
  187. {
  188. __builtin_mma_disassemble_acc((void*)temp, &out[0]);
  189. vy0[0] += (temp[0] * v_alpha);
  190. }
  191. FORCEINLINE void vec_reduce2_mma(__vector_quad *out, vec_f32 *temp, vec_f32 v_alpha, vec_f32 *vy0)
  192. {
  193. vec_reduce1_mma(&out[0], &temp[0], v_alpha, &vy0[0]);
  194. vec_reduce1_mma(&out[1], &temp[4], v_alpha, &vy0[1]);
  195. }
  196. #ifndef USE_MERGE_MMA
  197. FORCEINLINE void vec_reduce8_mma(__vector_quad *out, vec_f32 *temp, vec_f32 v_alpha, vec_f32 *vy0)
  198. {
  199. vec_reduce2_mma(&out[0], &temp[0], v_alpha, vy0 + 0);
  200. vec_reduce2_mma(&out[2], &temp[8], v_alpha, vy0 + 2);
  201. vec_reduce2_mma(&out[4], &temp[16], v_alpha, vy0 + 4);
  202. vec_reduce2_mma(&out[6], &temp[24], v_alpha, vy0 + 6);
  203. }
  204. #else
  205. FORCEINLINE void vec_reduce44_mma(__vector_quad *out, vec_f32 *temp, vec_f32 v_alpha, vec_f32 *vy0)
  206. {
  207. __builtin_mma_disassemble_acc((void*)temp, &out[0]);
  208. vy0[0] += (temp[0] * v_alpha);
  209. vy0[2] += (temp[1] * v_alpha);
  210. vy0[4] += (temp[2] * v_alpha);
  211. vy0[6] += (temp[3] * v_alpha);
  212. }
  213. FORCEINLINE void vec_reduce84_mma(__vector_quad *out, vec_f32 *temp, vec_f32 v_alpha, vec_f32 *vy0)
  214. {
  215. vec_reduce44_mma(&out[0], &temp[0], v_alpha, vy0 + 0);
  216. vec_reduce44_mma(&out[1], &temp[4], v_alpha, vy0 + 1);
  217. }
  218. FORCEINLINE void vec_reduce88_mma(__vector_quad *out, vec_f32 *temp, vec_f32 v_alpha, vec_f32 *vy0)
  219. {
  220. vec_reduce44_mma(&out[0], &temp[ 0], v_alpha, vy0 + 0);
  221. vec_reduce44_mma(&out[1], &temp[ 4], v_alpha, vy0 + 1);
  222. vec_reduce44_mma(&out[2], &temp[ 8], v_alpha, vy0 + 8);
  223. vec_reduce44_mma(&out[3], &temp[12], v_alpha, vy0 + 9);
  224. }
  225. #endif
  226. FORCEINLINE void vec_mult11a_mma(__vector_quad *out, vec_bf16 in0, vec_bf16 in1, vec_bf16 inp)
  227. {
  228. vec_bf16 in00 = vec_mergeh(in0, in1);
  229. __builtin_mma_xvbf16ger2(out, (vec_uc8)inp, (vec_uc8)in00);
  230. }
  231. FORCEINLINE void vec_mult2a_mma(__vector_quad *out, vec_bf16 in0, vec_bf16 in1, vec_bf16 inp)
  232. {
  233. vec_bf16 in01 = vec_mergel(in0, in1);
  234. vec_mult11a_mma(&out[0], in0, in1, inp);
  235. __builtin_mma_xvbf16ger2(&out[1], (vec_uc8)inp, (vec_uc8)in01);
  236. }
  237. FORCEINLINE void vec_mult4a_mma(__vector_quad *out, vec_bf16 *in0, vec_bf16 *in1, vec_bf16 inp)
  238. {
  239. vec_mult2a_mma(out + 0, in0[0], in1[0], inp);
  240. vec_mult2a_mma(out + 2, in0[1], in1[1], inp);
  241. }
  242. FORCEINLINE void vec_loadN_mult11a_mma(__vector_quad *out, vec_bf16 *ina, vec_bf16 *inb, vec_bf16 inp, BLASLONG n)
  243. {
  244. vec_bf16 in0 = vec_loadN(ina, n);
  245. vec_bf16 in1 = vec_loadN(inb, n);
  246. vec_mult11a_mma(out, in0, in1, inp);
  247. }
  248. FORCEINLINE void vec_load_mult22a_mma(__vector_quad *out, vec_bf16 *ina, vec_bf16 *inb, vec_bf16 inp)
  249. {
  250. vec_bf16 in0 = (vec_bf16)vec_load_vec(ina);
  251. vec_bf16 in1 = (vec_bf16)vec_load_vec(inb);
  252. vec_mult2a_mma(out, in0, in1, inp);
  253. }
  254. FORCEINLINE void vec_load4_mma(vec_bf16 *in0, vec_bf16 *in1, vec_bf16 *ina, vec_bf16 *inb)
  255. {
  256. vec_load_pair((vec_f32 *)(in0 + 0), (vec_f32 *)(ina + 0));
  257. vec_load_pair((vec_f32 *)(in1 + 0), (vec_f32 *)(inb + 0));
  258. vec_load_pair((vec_f32 *)(in0 + 2), (vec_f32 *)(ina + 2));
  259. vec_load_pair((vec_f32 *)(in1 + 2), (vec_f32 *)(inb + 2));
  260. }
  261. #ifndef USE_MERGE_MMA
  262. FORCEINLINE void vec_load_mult28a_mma(__vector_quad *out, vec_bf16 *ina, vec_bf16 *inb, vec_bf16 inp)
  263. {
  264. vec_bf16 in0[4], in1[4];
  265. vec_load4_mma(in0, in1, ina, inb);
  266. vec_mult4a_mma(&out[0], in0 + 0, in1 + 0, inp);
  267. vec_mult4a_mma(&out[4], in0 + 2, in1 + 2, inp);
  268. }
  269. #endif
  270. FORCEINLINE void vec_loadN_mult22a_mma(__vector_quad *out, vec_bf16 *ina, vec_bf16 *inb, vec_bf16 inp, BLASLONG n)
  271. {
  272. vec_bf16 in0 = vec_loadN(ina, n);
  273. vec_bf16 in1 = vec_loadN(inb, n);
  274. vec_mult2a_mma(out, in0, in1, inp);
  275. }
  276. FORCEINLINE void vec_mult11b_mma(__vector_quad *out, vec_bf16 in0, vec_bf16 in1, vec_bf16 inp)
  277. {
  278. vec_bf16 in00 = vec_mergeh(in0, in1);
  279. __builtin_mma_xvbf16ger2pp(out, (vec_uc8)inp, (vec_uc8)in00);
  280. }
  281. FORCEINLINE void vec_mult2b_mma(__vector_quad *out, vec_bf16 in0, vec_bf16 in1, vec_bf16 inp)
  282. {
  283. vec_bf16 in01 = vec_mergel(in0, in1);
  284. vec_mult11b_mma(&out[0], in0, in1, inp);
  285. __builtin_mma_xvbf16ger2pp(&out[1], (vec_uc8)inp, (vec_uc8)in01);
  286. }
  287. FORCEINLINE void vec_mult4b_mma(__vector_quad *out, vec_bf16 *in0, vec_bf16 *in1, vec_bf16 inp)
  288. {
  289. vec_mult2b_mma(out + 0, in0[0], in1[0], inp);
  290. vec_mult2b_mma(out + 2, in0[1], in1[1], inp);
  291. }
  292. #ifdef USE_MERGE_MMA
  293. FORCEINLINE void vec_mult1c_mma(__vector_quad *out, vec_bf16 in0, vec_bf16 inp)
  294. {
  295. vec_bf16 in00 = vec_mergeh(in0, in0);
  296. __builtin_mma_xvbf16ger2pp(out, (vec_uc8)inp, (vec_uc8)in00);
  297. }
  298. FORCEINLINE void vec_mult2c_mma(__vector_quad *out, vec_bf16 in0, vec_bf16 inp)
  299. {
  300. vec_bf16 in01 = vec_mergel(in0, in0);
  301. vec_mult1c_mma(&out[0], in0, inp);
  302. __builtin_mma_xvbf16ger2pp(&out[1], (vec_uc8)inp, (vec_uc8)in01);
  303. }
  304. FORCEINLINE void vec_mult44_mma(__vector_quad *out, vec_bf16 *in, vec_bf16 *inp)
  305. {
  306. vec_mult2_mma(out, in[0], inp[0]);
  307. vec_mult2c_mma(out, in[1], inp[1]);
  308. }
  309. FORCEINLINE void vec_mult44c_mma(__vector_quad *out, vec_bf16 *in, vec_bf16 *inp)
  310. {
  311. vec_mult2c_mma(out, in[0], inp[0]);
  312. vec_mult2c_mma(out, in[1], inp[1]);
  313. }
  314. FORCEINLINE void vec_mult44a_mma(__vector_quad *out, vec_bf16 *in0, vec_bf16 *in1, vec_bf16 *inp)
  315. {
  316. vec_mult2a_mma(out, in0[0], in1[0], inp[0]);
  317. vec_mult2b_mma(out, in0[1], in1[1], inp[1]);
  318. }
  319. FORCEINLINE void vec_mult44b_mma(__vector_quad *out, vec_bf16 *in0, vec_bf16 *in1, vec_bf16 *inp)
  320. {
  321. vec_mult2b_mma(out, in0[0], in1[0], inp[0]);
  322. vec_mult2b_mma(out, in0[1], in1[1], inp[1]);
  323. }
  324. #endif
  325. FORCEINLINE void vec_loadN_mult11b_mma(__vector_quad *out, vec_bf16 *ina, vec_bf16 *inb, vec_bf16 inp, BLASLONG n)
  326. {
  327. vec_bf16 in0 = vec_loadN(ina, n);
  328. vec_bf16 in1 = vec_loadN(inb, n);
  329. vec_mult11b_mma(out, in0, in1, inp);
  330. }
  331. FORCEINLINE void vec_load_mult22b_mma(__vector_quad *out, vec_bf16 *ina, vec_bf16 *inb, vec_bf16 inp)
  332. {
  333. vec_bf16 in0 = (vec_bf16)vec_load_vec(ina);
  334. vec_bf16 in1 = (vec_bf16)vec_load_vec(inb);
  335. vec_mult2b_mma(out, in0, in1, inp);
  336. }
  337. #ifndef USE_MERGE_MMA
  338. FORCEINLINE void vec_load_mult28b_mma(__vector_quad *out, vec_bf16 *ina, vec_bf16 *inb, vec_bf16 inp)
  339. {
  340. vec_bf16 in0[4], in1[4];
  341. vec_load4_mma(in0, in1, ina, inb);
  342. vec_mult4b_mma(&out[0], in0 + 0, in1 + 0, inp);
  343. vec_mult4b_mma(&out[4], in0 + 2, in1 + 2, inp);
  344. }
  345. #else
  346. FORCEINLINE void vec_load_mult184_mma(__vector_quad *out, vec_bf16 *in, vec_bf16 *inp)
  347. {
  348. vec_bf16 in0[4];
  349. vec_load_pair((vec_f32 *)(in0 + 0), (vec_f32 *)(in + 0));
  350. vec_load_pair((vec_f32 *)(in0 + 2), (vec_f32 *)(in + 2));
  351. vec_mult44_mma(out, in0 + 0, inp + 0);
  352. vec_mult44c_mma(out, in0 + 2, inp + 2);
  353. }
  354. FORCEINLINE void vec_load_mult284a_mma(__vector_quad *out, vec_bf16 *ina, vec_bf16 *inb, vec_bf16 *inp)
  355. {
  356. vec_bf16 in0[4], in1[4];
  357. vec_load4_mma(in0, in1, ina, inb);
  358. vec_mult44a_mma(out, in0 + 0, in1 + 0, inp + 0);
  359. vec_mult44b_mma(out, in0 + 2, in1 + 2, inp + 2);
  360. }
  361. FORCEINLINE void vec_load_mult284b_mma(__vector_quad *out, vec_bf16 *ina, vec_bf16 *inb, vec_bf16 *inp)
  362. {
  363. vec_bf16 in0[4], in1[4];
  364. vec_load4_mma(in0, in1, ina, inb);
  365. vec_mult44b_mma(out, in0 + 0, in1 + 0, inp + 0);
  366. vec_mult44b_mma(out, in0 + 2, in1 + 2, inp + 2);
  367. }
  368. FORCEINLINE void vec_load_mult288a_mma(__vector_quad *out, vec_bf16 *ina, vec_bf16 *inb, vec_bf16 *inp)
  369. {
  370. vec_bf16 in0[8], in1[8];
  371. vec_load4_mma(in0 + 0, in1 + 0, ina + 0, inb + 0);
  372. vec_load4_mma(in0 + 4, in1 + 4, ina + 4, inb + 4);
  373. vec_mult44a_mma(out + 0, in0 + 0, in1 + 0, inp + 0);
  374. vec_mult44a_mma(out + 2, in0 + 4, in1 + 4, inp + 0);
  375. vec_mult44b_mma(out + 0, in0 + 2, in1 + 2, inp + 2);
  376. vec_mult44b_mma(out + 2, in0 + 6, in1 + 6, inp + 2);
  377. }
  378. FORCEINLINE void vec_load_mult288b_mma(__vector_quad *out, vec_bf16 *ina, vec_bf16 *inb, vec_bf16 *inp)
  379. {
  380. vec_bf16 in0[8], in1[8];
  381. vec_load4_mma(in0 + 0, in1 + 0, ina + 0, inb + 0);
  382. vec_load4_mma(in0 + 4, in1 + 4, ina + 4, inb + 4);
  383. vec_mult44b_mma(out + 0, in0 + 0, in1 + 0, inp + 0);
  384. vec_mult44b_mma(out + 2, in0 + 4, in1 + 4, inp + 0);
  385. vec_mult44b_mma(out + 0, in0 + 2, in1 + 2, inp + 2);
  386. vec_mult44b_mma(out + 2, in0 + 6, in1 + 6, inp + 2);
  387. }
  388. #endif
  389. FORCEINLINE void vec_loadN_mult22b_mma(__vector_quad *out, vec_bf16 *ina, vec_bf16 *inb, vec_bf16 inp, BLASLONG n)
  390. {
  391. vec_bf16 in0 = vec_loadN(ina, n);
  392. vec_bf16 in1 = vec_loadN(inb, n);
  393. vec_mult2b_mma(out, in0, in1, inp);
  394. }
  395. FORCEINLINE void vec_load4_pair(vec_f32 *vy0, vec_f32 *v_y)
  396. {
  397. vec_load_pair(vy0 + 0, v_y + 0);
  398. vec_load_pair(vy0 + 2, v_y + 2);
  399. vec_load_pair(vy0 + 4, v_y + 4);
  400. vec_load_pair(vy0 + 6, v_y + 6);
  401. }
  402. FORCEINLINE void vec_store4_pair(vec_f32 *v_y, vec_f32 *vy0)
  403. {
  404. vec_store_pair(v_y + 0, vy0 + 0);
  405. vec_store_pair(v_y + 2, vy0 + 2);
  406. vec_store_pair(v_y + 4, vy0 + 4);
  407. vec_store_pair(v_y + 6, vy0 + 6);
  408. }
  409. FORCEINLINE void vec_setzero_2(__vector_quad *temp0)
  410. {
  411. __builtin_mma_xxsetaccz(&temp0[0]);
  412. __builtin_mma_xxsetaccz(&temp0[1]);
  413. }
  414. FORCEINLINE void vec_setzero_4(__vector_quad *temp0)
  415. {
  416. vec_setzero_2(temp0 + 0);
  417. vec_setzero_2(temp0 + 2);
  418. }
  419. FORCEINLINE void vec_setzero_8(__vector_quad *temp0)
  420. {
  421. vec_setzero_4(temp0 + 0);
  422. vec_setzero_4(temp0 + 4);
  423. }
  424. FORCEINLINE void vec_reduce_2(vec_f32 *temp00, __vector_quad *temp0)
  425. {
  426. __builtin_mma_disassemble_acc((void*)(temp00 + 0), &temp0[0]);
  427. __builtin_mma_disassemble_acc((void*)(temp00 + 4), &temp0[1]);
  428. }
  429. FORCEINLINE void vec_reduce_4(vec_f32 *temp00, __vector_quad *temp0)
  430. {
  431. vec_reduce_2(temp00 + 0, temp0 + 0);
  432. vec_reduce_2(temp00 + 8, temp0 + 2);
  433. }
  434. FORCEINLINE void vec_reduce_8(vec_f32 *temp00, __vector_quad *temp0)
  435. {
  436. vec_reduce_4(temp00 + 0, temp0 + 0);
  437. vec_reduce_4(temp00 + 16, temp0 + 4);
  438. }
  439. #ifdef USE_MERGE_MMA
  440. FORCEINLINE void vec_load8_pair(vec_f32 *vy0, vec_f32 *v_y)
  441. {
  442. vec_load4_pair(vy0 + 0, v_y + 0);
  443. vec_load4_pair(vy0 + 8, v_y + 8);
  444. }
  445. FORCEINLINE void vec_store8_pair(vec_f32 *v_y, vec_f32 *vy0)
  446. {
  447. vec_store4_pair(v_y + 0, vy0 + 0);
  448. vec_store4_pair(v_y + 8, vy0 + 8);
  449. }
  450. #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  451. #define VEC_SHIFT(data, shift) vec_sldw(data, data, 4 - shift)
  452. #define MASK_0 0xf000
  453. #define MASK_1 0x0f00
  454. #define MASK_2 0x00f0
  455. #define MASK_3 0x000f
  456. #else
  457. #define VEC_SHIFT(data, shift) vec_sldw(data, data, shift)
  458. #define MASK_0 0x000f
  459. #define MASK_1 0x00f0
  460. #define MASK_2 0x0f00
  461. #define MASK_3 0xf000
  462. #endif
  463. FORCEINLINE void vec_make_mult1(vec_bf16 *v_x0, const bool mask)
  464. {
  465. if (mask) {
  466. v_x0[ 0] = vec_and(v_x0[0], (vec_bf16)vec_genbm(MASK_0));
  467. }
  468. v_x0[ 1] = VEC_SHIFT(v_x0[ 0], 1);
  469. v_x0[ 2] = VEC_SHIFT(v_x0[ 0], 2);
  470. v_x0[ 3] = VEC_SHIFT(v_x0[ 0], 3);
  471. }
  472. FORCEINLINE void vec_make_mult2(vec_bf16 *v_x0)
  473. {
  474. v_x0[ 5] = vec_and(v_x0[0], (vec_bf16)vec_genbm(MASK_1));
  475. vec_make_mult1(v_x0, true);
  476. v_x0[ 4] = VEC_SHIFT(v_x0[ 5], 3);
  477. v_x0[ 6] = VEC_SHIFT(v_x0[ 5], 1);
  478. v_x0[ 7] = VEC_SHIFT(v_x0[ 5], 2);
  479. }
  480. FORCEINLINE void vec_make_mult4(vec_bf16 *v_x0)
  481. {
  482. v_x0[10] = vec_and(v_x0[0], (vec_bf16)vec_genbm(MASK_2));
  483. v_x0[15] = vec_and(v_x0[0], (vec_bf16)vec_genbm(MASK_3));
  484. vec_make_mult2(v_x0);
  485. v_x0[ 8] = VEC_SHIFT(v_x0[10], 2);
  486. v_x0[ 9] = VEC_SHIFT(v_x0[10], 3);
  487. v_x0[11] = VEC_SHIFT(v_x0[10], 1);
  488. v_x0[12] = VEC_SHIFT(v_x0[15], 1);
  489. v_x0[13] = VEC_SHIFT(v_x0[15], 2);
  490. v_x0[14] = VEC_SHIFT(v_x0[15], 3);
  491. }
  492. #endif
  493. #endif