You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

sgemm_small_kernel_nn_power10.c 54 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563
  1. /***************************************************************************
  2. Copyright (c) 2021, The OpenBLAS Project
  3. All rights reserved.
  4. Redistribution and use in source and binary forms, with or without
  5. modification, are permitted provided that the following conditions are
  6. met:
  7. 1. Redistributions of source code must retain the above copyright
  8. notice, this list of conditions and the following disclaimer.
  9. 2. Redistributions in binary form must reproduce the above copyright
  10. notice, this list of conditions and the following disclaimer in
  11. the documentation and/or other materials provided with the
  12. distribution.
  13. 3. Neither the name of the OpenBLAS project nor the names of
  14. its contributors may be used to endorse or promote products
  15. derived from this software without specific prior written permission.
  16. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  17. AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  18. IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  19. ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
  20. LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  21. DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  22. SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  23. CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  24. OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  25. USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. *****************************************************************************/
  27. #include "common.h"
  28. #include <altivec.h>
  29. typedef __vector unsigned char vec_t;
  30. #if !defined(B0)
  31. #define SAVE_4x4_ACC(ACC, N, M) \
  32. __builtin_mma_disassemble_acc ((void *)result, ACC); \
  33. rc0 = vec_xl(0, C+(N+0)*ldc+M); \
  34. rc0 = vec_mul(rc0, vbeta); \
  35. result[0] = vec_madd(result[0], valpha, rc0); \
  36. vec_xst(result[0], 0, C+(N+0)*ldc+M); \
  37. rc0 = vec_xl(0, C+(N+1)*ldc+M); \
  38. rc0 = vec_mul(rc0, vbeta); \
  39. result[1] = vec_madd(result[1], valpha, rc0); \
  40. vec_xst(result[1], 0, C+(N+1)*ldc+M); \
  41. rc0 = vec_xl(0, C+(N+2)*ldc+M); \
  42. rc0 = vec_mul(rc0, vbeta); \
  43. result[2] = vec_madd(result[2], valpha, rc0); \
  44. vec_xst(result[2], 0, C+(N+2)*ldc+M); \
  45. rc0 = vec_xl(0, C+(N+3)*ldc+M); \
  46. rc0 = vec_mul(rc0, vbeta); \
  47. result[3] = vec_madd(result[3], valpha, rc0); \
  48. vec_xst(result[3], 0, C+(N+3)*ldc+M);
  49. #define SAVE_4x2_ACC(ACC, N, M) \
  50. __builtin_mma_disassemble_acc ((void *)result, ACC); \
  51. rc0 = vec_xl_len(C+(N+0)*ldc+M, 8); \
  52. rc0 = vec_mul(rc0, vbeta); \
  53. result[0] = vec_madd(result[0], valpha, rc0); \
  54. vec_xst_len(result[0], C+(N+0)*ldc+M, 8); \
  55. rc0 = vec_xl_len(C+(N+1)*ldc+M, 8); \
  56. rc0 = vec_mul(rc0, vbeta); \
  57. result[1] = vec_madd(result[1], valpha, rc0); \
  58. vec_xst_len(result[1], C+(N+1)*ldc+M, 8); \
  59. rc0 = vec_xl_len(C+(N+2)*ldc+M, 8); \
  60. rc0 = vec_mul(rc0, vbeta); \
  61. result[2] = vec_madd(result[2], valpha, rc0); \
  62. vec_xst_len(result[2], C+(N+2)*ldc+M, 8); \
  63. rc0 = vec_xl_len(C+(N+3)*ldc+M, 8); \
  64. rc0 = vec_mul(rc0, vbeta); \
  65. result[3] = vec_madd(result[3], valpha, rc0); \
  66. vec_xst_len(result[3], C+(N+3)*ldc+M, 8);
  67. #define SAVE_2x4_ACC(ACC, N, M) \
  68. __builtin_mma_disassemble_acc ((void *)result, ACC); \
  69. rc0 = vec_xl(0, C+(N+0)*ldc+M); \
  70. rc0 = vec_mul(rc0, vbeta); \
  71. result[0] = vec_madd(result[0], valpha, rc0); \
  72. vec_xst(result[0], 0, C+(N+0)*ldc+M); \
  73. rc0 = vec_xl(0, C+(N+1)*ldc+M); \
  74. rc0 = vec_mul(rc0, vbeta); \
  75. result[1] = vec_madd(result[1], valpha, rc0); \
  76. vec_xst(result[1], 0, C+(N+1)*ldc+M);
  77. #define SAVE_1x4_VSR(result, N, M) \
  78. rc0 = vec_xl(0, C+((N)*ldc)+M); \
  79. rc0 = vec_mul(rc0, vbeta); \
  80. result = vec_madd(result, valpha, rc0); \
  81. vec_xst(result, 0, C+((N)*ldc)+M);
  82. #define SAVE_2x2_VSR(result, N, M) \
  83. rc0 = vec_xl_len(C+(N*ldc)+M, 8); \
  84. rc0 = vec_insert(C[(N+1)*ldc+M+0], rc0, 2); \
  85. rc0 = vec_insert(C[(N+1)*ldc+M+1], rc0, 3); \
  86. rc0 = vec_mul(rc0, vbeta); \
  87. result = vec_madd(result, valpha, rc0); \
  88. vec_xst_len(result, C+(N*ldc)+M, 8); \
  89. C[(N+1)*ldc+M+0] = result[2]; \
  90. C[(N+1)*ldc+M+1] = result[3];
  91. #define SAVE_1x2_VSR(result, N, M) \
  92. rc0 = vec_xl_len(C+(N*ldc)+M, 8); \
  93. rc0 = vec_mul(rc0, vbeta); \
  94. result = vec_madd(result, valpha, rc0); \
  95. vec_xst_len(result, C+(N*ldc)+M, 8);
  96. #define SAVE_4x1_VSR(result, N, M) \
  97. result = vec_mul(result, valpha); \
  98. C[(N+0)*ldc+M] = (C[(N+0)*ldc+M] * beta) + result[0]; \
  99. C[(N+1)*ldc+M] = (C[(N+1)*ldc+M] * beta) + result[1]; \
  100. C[(N+2)*ldc+M] = (C[(N+2)*ldc+M] * beta) + result[2]; \
  101. C[(N+3)*ldc+M] = (C[(N+3)*ldc+M] * beta) + result[3];
  102. #define SAVE_2x1_VSR(result, N, M) \
  103. result = vec_mul(result, valpha); \
  104. C[(N+0)*ldc+M] = (C[(N+0)*ldc+M] * beta) + result[0]; \
  105. C[(N+1)*ldc+M] = (C[(N+1)*ldc+M] * beta) + result[1];
  106. #else
  107. #define SAVE_4x4_ACC(ACC, N, M) \
  108. __builtin_mma_disassemble_acc ((void *)result, ACC); \
  109. result[0] = vec_mul(result[0], valpha); \
  110. vec_xst(result[0], 0, C+(N+0)*ldc+M); \
  111. result[1] = vec_mul(result[1], valpha); \
  112. vec_xst(result[1], 0, C+(N+1)*ldc+M); \
  113. result[2] = vec_mul(result[2], valpha); \
  114. vec_xst(result[2], 0, C+(N+2)*ldc+M); \
  115. result[3] = vec_mul(result[3], valpha); \
  116. vec_xst(result[3], 0, C+(N+3)*ldc+M);
  117. #define SAVE_4x2_ACC(ACC, N, M) \
  118. __builtin_mma_disassemble_acc ((void *)result, ACC); \
  119. result[0] = vec_mul(result[0], valpha); \
  120. vec_xst_len(result[0], C+(N+0)*ldc+M, 8); \
  121. result[1] = vec_mul(result[1], valpha); \
  122. vec_xst_len(result[1], C+(N+1)*ldc+M, 8); \
  123. result[2] = vec_mul(result[2], valpha); \
  124. vec_xst_len(result[2], C+(N+2)*ldc+M, 8); \
  125. result[3] = vec_mul(result[3], valpha); \
  126. vec_xst_len(result[3], C+(N+3)*ldc+M, 8);
  127. #define SAVE_2x4_ACC(ACC, N, M) \
  128. __builtin_mma_disassemble_acc ((void *)result, ACC); \
  129. result[0] = vec_mul(result[0], valpha); \
  130. vec_xst(result[0], 0, C+(N+0)*ldc+M); \
  131. result[1] = vec_mul(result[1], valpha); \
  132. vec_xst(result[1], 0, C+(N+1)*ldc+M);
  133. #define SAVE_1x4_VSR(result, N, M) \
  134. result = vec_mul(result, valpha); \
  135. vec_xst(result, 0, C+((N)*ldc)+M);
  136. #define SAVE_2x2_VSR(result, N, M) \
  137. result = vec_mul(result, valpha); \
  138. vec_xst_len(result, C+(N*ldc)+M, 8); \
  139. C[(N+1)*ldc+M+0] = result[2]; \
  140. C[(N+1)*ldc+M+1] = result[3];
  141. #define SAVE_1x2_VSR(result, N, M) \
  142. result = vec_mul(result, valpha); \
  143. vec_xst_len(result, C+(N*ldc)+M, 8);
  144. #define SAVE_4x1_VSR(result, N, M) \
  145. result = vec_mul(result, valpha); \
  146. C[(N+0)*ldc+M] = result[0]; \
  147. C[(N+1)*ldc+M] = result[1]; \
  148. C[(N+2)*ldc+M] = result[2]; \
  149. C[(N+3)*ldc+M] = result[3];
  150. #define SAVE_2x1_VSR(result, N, M) \
  151. result = vec_mul(result, valpha); \
  152. C[(N+0)*ldc+M] = result[0]; \
  153. C[(N+1)*ldc+M] = result[1];
  154. #endif
  155. #define INIT_8ACCS() \
  156. __builtin_mma_xxsetaccz(&acc0); \
  157. __builtin_mma_xxsetaccz(&acc1); \
  158. __builtin_mma_xxsetaccz(&acc2); \
  159. __builtin_mma_xxsetaccz(&acc3); \
  160. __builtin_mma_xxsetaccz(&acc4); \
  161. __builtin_mma_xxsetaccz(&acc5); \
  162. __builtin_mma_xxsetaccz(&acc6); \
  163. __builtin_mma_xxsetaccz(&acc7);
  164. #define INIT_4ACCS() \
  165. __builtin_mma_xxsetaccz(&acc0); \
  166. __builtin_mma_xxsetaccz(&acc1); \
  167. __builtin_mma_xxsetaccz(&acc2); \
  168. __builtin_mma_xxsetaccz(&acc3);
  169. #define INIT_2ACCS() \
  170. __builtin_mma_xxsetaccz(&acc0); \
  171. __builtin_mma_xxsetaccz(&acc1);
  172. #define INIT_1ACC() \
  173. __builtin_mma_xxsetaccz(&acc0);
  174. #define LOAD_A_1x16(K, M) \
  175. ra0 = vec_xl(0, A+((K)*lda)+M+0); \
  176. ra1 = vec_xl(0, A+((K)*lda)+M+4); \
  177. ra2 = vec_xl(0, A+((K)*lda)+M+8); \
  178. ra3 = vec_xl(0, A+((K)*lda)+M+12);
  179. #define LOAD_A_1x8(K, M) \
  180. ra0 = vec_xl(0, A+((K)*lda)+M+0); \
  181. ra1 = vec_xl(0, A+((K)*lda)+M+4);
  182. #define LOAD_A_1x4(K, M) ra0 = vec_xl(0, A+((K)*lda)+M);
  183. #define LOAD_A_2x2(K, M) \
  184. ra0 = vec_splats(A[K*lda+M]); \
  185. ra0 = vec_insert(A[K*lda+M+1], ra0, 1); \
  186. ra0 = vec_insert(A[K*lda+M+1], ra0, 3);
  187. #define LOAD_A_1x2(K, M) ra0 = vec_xl_len(A+((K)*lda)+M, 8);
  188. #define LOAD_A_1x1(K, M) ra0 = vec_splats(A[(K)*lda+M]);
  189. #define LOAD_BT_16x4(N, K) \
  190. rb0 = vec_xl(0, B+(N+0)*ldb+K); \
  191. rb1 = vec_xl(0, B+(N+1)*ldb+K); \
  192. t0 = vec_mergeh(rb0, rb1); \
  193. t1 = vec_mergel(rb0, rb1); \
  194. rb2 = vec_xl(0, B+(N+2)*ldb+K); \
  195. rb3 = vec_xl(0, B+(N+3)*ldb+K); \
  196. t2 = vec_mergeh(rb2, rb3); \
  197. t3 = vec_mergel(rb2, rb3); \
  198. rb0 = vec_xxpermdi(t0, t2, 0b00); \
  199. rb1 = vec_xxpermdi(t0, t2, 0b11); \
  200. rb2 = vec_xxpermdi(t1, t3, 0b00); \
  201. rb3 = vec_xxpermdi(t1, t3, 0b11); \
  202. rb4 = vec_xl(0, B+(N+4)*ldb+K); \
  203. rb5 = vec_xl(0, B+(N+5)*ldb+K); \
  204. t0 = vec_mergeh(rb4, rb5); \
  205. t1 = vec_mergel(rb4, rb5); \
  206. rb6 = vec_xl(0, B+(N+6)*ldb+K); \
  207. rb7 = vec_xl(0, B+(N+7)*ldb+K); \
  208. t2 = vec_mergeh(rb6, rb7); \
  209. t3 = vec_mergel(rb6, rb7); \
  210. rb4 = vec_xxpermdi(t0, t2, 0b00); \
  211. rb5 = vec_xxpermdi(t0, t2, 0b11); \
  212. rb6 = vec_xxpermdi(t1, t3, 0b00); \
  213. rb7 = vec_xxpermdi(t1, t3, 0b11); \
  214. rb8 = vec_xl(0, B+(N+8)*ldb+K); \
  215. rb9 = vec_xl(0, B+(N+9)*ldb+K); \
  216. t0 = vec_mergeh(rb8, rb9); \
  217. t1 = vec_mergel(rb8, rb9); \
  218. rb10 = vec_xl(0, B+(N+10)*ldb+K); \
  219. rb11 = vec_xl(0, B+(N+11)*ldb+K); \
  220. t2 = vec_mergeh(rb10, rb11); \
  221. t3 = vec_mergel(rb10, rb11); \
  222. rb8 = vec_xxpermdi(t0, t2, 0b00); \
  223. rb9 = vec_xxpermdi(t0, t2, 0b11); \
  224. rb10 = vec_xxpermdi(t1, t3, 0b00); \
  225. rb11 = vec_xxpermdi(t1, t3, 0b11); \
  226. rb12 = vec_xl(0, B+(N+12)*ldb+K); \
  227. rb13 = vec_xl(0, B+(N+13)*ldb+K); \
  228. t0 = vec_mergeh(rb12, rb13); \
  229. t1 = vec_mergel(rb12, rb13); \
  230. rb14 = vec_xl(0, B+(N+14)*ldb+K); \
  231. rb15 = vec_xl(0, B+(N+15)*ldb+K); \
  232. t2 = vec_mergeh(rb14, rb15); \
  233. t3 = vec_mergel(rb14, rb15); \
  234. rb12 = vec_xxpermdi(t0, t2, 0b00); \
  235. rb13 = vec_xxpermdi(t0, t2, 0b11); \
  236. rb14 = vec_xxpermdi(t1, t3, 0b00); \
  237. rb15 = vec_xxpermdi(t1, t3, 0b11);
  238. #define LOAD_BT_16x2(N, K) \
  239. rb0 = vec_xl_len(B+(N+0)*ldb+K, 8); \
  240. rb1 = vec_xl_len(B+(N+1)*ldb+K, 8); \
  241. t0 = vec_mergeh(rb0, rb1); \
  242. rb2 = vec_xl_len(B+(N+2)*ldb+K, 8); \
  243. rb3 = vec_xl_len(B+(N+3)*ldb+K, 8); \
  244. t1 = vec_mergeh(rb2, rb3); \
  245. rb0 = vec_xxpermdi(t0, t1, 0b00); \
  246. rb1 = vec_xxpermdi(t0, t1, 0b11); \
  247. rb4 = vec_xl_len(B+(N+4)*ldb+K, 8); \
  248. rb5 = vec_xl_len(B+(N+5)*ldb+K, 8); \
  249. t0 = vec_mergeh(rb4, rb5); \
  250. rb6 = vec_xl_len(B+(N+6)*ldb+K, 8); \
  251. rb7 = vec_xl_len(B+(N+7)*ldb+K, 8); \
  252. t1 = vec_mergeh(rb6, rb7); \
  253. rb2 = vec_xxpermdi(t0, t1, 0b00); \
  254. rb3 = vec_xxpermdi(t0, t1, 0b11); \
  255. rb8 = vec_xl_len(B+(N+8)*ldb+K, 8); \
  256. rb9 = vec_xl_len(B+(N+9)*ldb+K, 8); \
  257. t0 = vec_mergeh(rb8, rb9); \
  258. rb10 = vec_xl_len(B+(N+10)*ldb+K, 8); \
  259. rb11 = vec_xl_len(B+(N+11)*ldb+K, 8); \
  260. t1 = vec_mergeh(rb10, rb11); \
  261. rb4 = vec_xxpermdi(t0, t1, 0b00); \
  262. rb5 = vec_xxpermdi(t0, t1, 0b11); \
  263. rb12 = vec_xl_len(B+(N+12)*ldb+K, 8); \
  264. rb13 = vec_xl_len(B+(N+13)*ldb+K, 8); \
  265. t0 = vec_mergeh(rb12, rb13); \
  266. rb14 = vec_xl_len(B+(N+14)*ldb+K, 8); \
  267. rb15 = vec_xl_len(B+(N+15)*ldb+K, 8); \
  268. t1 = vec_mergeh(rb14, rb15); \
  269. rb6 = vec_xxpermdi(t0, t1, 0b00); \
  270. rb7 = vec_xxpermdi(t0, t1, 0b11);
  271. #define LOAD_BT_16x1(N, K) \
  272. rb0 = vec_xor(rb0, rb0); \
  273. rb0 = vec_insert(B[(N+0)*ldb+K], rb0, 0); \
  274. rb0 = vec_insert(B[(N+1)*ldb+K], rb0, 1); \
  275. rb0 = vec_insert(B[(N+2)*ldb+K], rb0, 2); \
  276. rb0 = vec_insert(B[(N+3)*ldb+K], rb0, 3); \
  277. rb1 = vec_xor(rb1, rb1); \
  278. rb1 = vec_insert(B[(N+4)*ldb+K], rb1, 0); \
  279. rb1 = vec_insert(B[(N+5)*ldb+K], rb1, 1); \
  280. rb1 = vec_insert(B[(N+6)*ldb+K], rb1, 2); \
  281. rb1 = vec_insert(B[(N+7)*ldb+K], rb1, 3); \
  282. rb2 = vec_xor(rb2, rb2); \
  283. rb2 = vec_insert(B[(N+8)*ldb+K], rb2, 0); \
  284. rb2 = vec_insert(B[(N+9)*ldb+K], rb2, 1); \
  285. rb2 = vec_insert(B[(N+10)*ldb+K], rb2, 2); \
  286. rb2 = vec_insert(B[(N+11)*ldb+K], rb2, 3); \
  287. rb3 = vec_xor(rb3, rb3); \
  288. rb3 = vec_insert(B[(N+12)*ldb+K], rb3, 0); \
  289. rb3 = vec_insert(B[(N+13)*ldb+K], rb3, 1); \
  290. rb3 = vec_insert(B[(N+14)*ldb+K], rb3, 2); \
  291. rb3 = vec_insert(B[(N+15)*ldb+K], rb3, 3);
  292. #define LOAD_BT_8x4(N, K) \
  293. rb0 = vec_xl(0, B+(N+0)*ldb+K); \
  294. rb1 = vec_xl(0, B+(N+1)*ldb+K); \
  295. t0 = vec_mergeh(rb0, rb1); \
  296. t1 = vec_mergel(rb0, rb1); \
  297. rb2 = vec_xl(0, B+(N+2)*ldb+K); \
  298. rb3 = vec_xl(0, B+(N+3)*ldb+K); \
  299. t2 = vec_mergeh(rb2, rb3); \
  300. t3 = vec_mergel(rb2, rb3); \
  301. rb0 = vec_xxpermdi(t0, t2, 0b00); \
  302. rb1 = vec_xxpermdi(t0, t2, 0b11); \
  303. rb2 = vec_xxpermdi(t1, t3, 0b00); \
  304. rb3 = vec_xxpermdi(t1, t3, 0b11); \
  305. rb4 = vec_xl(0, B+(N+4)*ldb+K); \
  306. rb5 = vec_xl(0, B+(N+5)*ldb+K); \
  307. t0 = vec_mergeh(rb4, rb5); \
  308. t1 = vec_mergel(rb4, rb5); \
  309. rb6 = vec_xl(0, B+(N+6)*ldb+K); \
  310. rb7 = vec_xl(0, B+(N+7)*ldb+K); \
  311. t2 = vec_mergeh(rb6, rb7); \
  312. t3 = vec_mergel(rb6, rb7); \
  313. rb4 = vec_xxpermdi(t0, t2, 0b00); \
  314. rb5 = vec_xxpermdi(t0, t2, 0b11); \
  315. rb6 = vec_xxpermdi(t1, t3, 0b00); \
  316. rb7 = vec_xxpermdi(t1, t3, 0b11);
  317. #define LOAD_BT_8x2(N, K) \
  318. rb0 = vec_xl_len(B+(N+0)*ldb+K, 8); \
  319. rb1 = vec_xl_len(B+(N+1)*ldb+K, 8); \
  320. t0 = vec_mergeh(rb0, rb1); \
  321. rb2 = vec_xl_len(B+(N+2)*ldb+K, 8); \
  322. rb3 = vec_xl_len(B+(N+3)*ldb+K, 8); \
  323. t1 = vec_mergeh(rb2, rb3); \
  324. rb0 = vec_xxpermdi(t0, t1, 0b00); \
  325. rb1 = vec_xxpermdi(t0, t1, 0b11); \
  326. rb4 = vec_xl_len(B+(N+4)*ldb+K, 8); \
  327. rb5 = vec_xl_len(B+(N+5)*ldb+K, 8); \
  328. t0 = vec_mergeh(rb4, rb5); \
  329. rb6 = vec_xl_len(B+(N+6)*ldb+K, 8); \
  330. rb7 = vec_xl_len(B+(N+7)*ldb+K, 8); \
  331. t1 = vec_mergeh(rb6, rb7); \
  332. rb2 = vec_xxpermdi(t0, t1, 0b00); \
  333. rb3 = vec_xxpermdi(t0, t1, 0b11);
  334. #define LOAD_BT_8x1(N, K) \
  335. rb0 = vec_xor(rb0, rb0); \
  336. rb0 = vec_insert(B[(N+0)*ldb+K], rb0, 0); \
  337. rb0 = vec_insert(B[(N+1)*ldb+K], rb0, 1); \
  338. rb0 = vec_insert(B[(N+2)*ldb+K], rb0, 2); \
  339. rb0 = vec_insert(B[(N+3)*ldb+K], rb0, 3); \
  340. rb1 = vec_xor(rb1, rb1); \
  341. rb1 = vec_insert(B[(N+4)*ldb+K], rb1, 0); \
  342. rb1 = vec_insert(B[(N+5)*ldb+K], rb1, 1); \
  343. rb1 = vec_insert(B[(N+6)*ldb+K], rb1, 2); \
  344. rb1 = vec_insert(B[(N+7)*ldb+K], rb1, 3);
  345. #define LOAD_BT_4x4(N, K) \
  346. rb0 = vec_xl(0, B+(N+0)*ldb+K); \
  347. rb1 = vec_xl(0, B+(N+1)*ldb+K); \
  348. t0 = vec_mergeh(rb0, rb1); \
  349. t1 = vec_mergel(rb0, rb1); \
  350. rb2 = vec_xl(0, B+(N+2)*ldb+K); \
  351. rb3 = vec_xl(0, B+(N+3)*ldb+K); \
  352. t2 = vec_mergeh(rb2, rb3); \
  353. t3 = vec_mergel(rb2, rb3); \
  354. rb0 = vec_xxpermdi(t0, t2, 0b00); \
  355. rb1 = vec_xxpermdi(t0, t2, 0b11); \
  356. rb2 = vec_xxpermdi(t1, t3, 0b00); \
  357. rb3 = vec_xxpermdi(t1, t3, 0b11);
  358. #define LOAD_BT_4x2(N, K) \
  359. rb0 = vec_xl_len(B+(N+0)*ldb+K, 8); \
  360. rb1 = vec_xl_len(B+(N+1)*ldb+K, 8); \
  361. t0 = vec_mergeh(rb0, rb1); \
  362. rb2 = vec_xl_len(B+(N+2)*ldb+K, 8); \
  363. rb3 = vec_xl_len(B+(N+3)*ldb+K, 8); \
  364. t1 = vec_mergeh(rb2, rb3); \
  365. rb0 = vec_xxpermdi(t0, t1, 0b00); \
  366. rb1 = vec_xxpermdi(t0, t1, 0b11);
  367. #define LOAD_BT_4x1(N, K) \
  368. rb0 = vec_xor(rb0, rb0); \
  369. rb0 = vec_insert(B[(N+0)*ldb+K], rb0, 0); \
  370. rb0 = vec_insert(B[(N+1)*ldb+K], rb0, 1); \
  371. rb0 = vec_insert(B[(N+2)*ldb+K], rb0, 2); \
  372. rb0 = vec_insert(B[(N+3)*ldb+K], rb0, 3);
  373. #define LOAD_BT_2x4(N, K) \
  374. rb0 = vec_xl(0, B+(N+0)*ldb+K); \
  375. rb1 = vec_xl(0, B+(N+1)*ldb+K); \
  376. t0 = vec_mergeh(rb0, rb1); \
  377. t1 = vec_mergeo(rb0, rb1); \
  378. t2 = vec_mergel(rb0, rb1); \
  379. rb0 = t0; \
  380. rb1 = t1; \
  381. rb2 = t2; \
  382. rb3 = vec_xor(rb3, rb3); \
  383. rb3 = vec_insert(vec_extract(t2,2), rb3, 0); \
  384. rb3 = vec_insert(vec_extract(t2,3), rb3, 1);
  385. #define LOAD_BT_2x2(N, K) \
  386. rb0 = vec_xl_len(B+(N+0)*ldb+K, 8); \
  387. rb1 = vec_xl_len(B+(N+1)*ldb+K, 8); \
  388. t0 = vec_mergee(rb0, rb1); \
  389. t1 = vec_mergeo(rb0, rb1); \
  390. rb0 = t0; \
  391. rb1 = t1;
  392. #define LOAD_BT_2x1(N, K) \
  393. rb0 = vec_xor(rb0, rb0); \
  394. rb0 = vec_insert(B[(N+0)*ldb+K], rb0, 0); \
  395. rb0 = vec_insert(B[(N+1)*ldb+K], rb0, 1);
  396. #define LOAD_B_2x2(N, K) \
  397. rb0 = vec_splats(B[(N+0)*ldb+K]); \
  398. rb0 = vec_insert(B[(N+1)*ldb+K], rb0, 2); \
  399. rb0 = vec_insert(B[(N+1)*ldb+K], rb0, 3);
  400. #define LOAD_B_2x1(N, K) \
  401. rb0 = vec_insert(B[(n+0)*ldb+k], rb0, 0); \
  402. rb0 = vec_insert(B[(n+1)*ldb+k], rb0, 1);
  403. #define LOAD_B_1x1(N, K) rb0 = vec_splats(B[(N)*ldb+K]);
  404. #define KERNEL_MMA_8ACC(b0, b1, b2, b3, b4, b5, b6, b7, \
  405. a0, a1, a2, a3, a4, a5, a6, a7) \
  406. __builtin_mma_xvf32gerpp(&acc0, (vec_t)b0, (vec_t)a0); \
  407. __builtin_mma_xvf32gerpp(&acc1, (vec_t)b1, (vec_t)a1); \
  408. __builtin_mma_xvf32gerpp(&acc2, (vec_t)b2, (vec_t)a2); \
  409. __builtin_mma_xvf32gerpp(&acc3, (vec_t)b3, (vec_t)a3); \
  410. __builtin_mma_xvf32gerpp(&acc4, (vec_t)b4, (vec_t)a4); \
  411. __builtin_mma_xvf32gerpp(&acc5, (vec_t)b5, (vec_t)a5); \
  412. __builtin_mma_xvf32gerpp(&acc6, (vec_t)b6, (vec_t)a6); \
  413. __builtin_mma_xvf32gerpp(&acc7, (vec_t)b7, (vec_t)a7);
  414. #define KERNEL_MMA_4ACC(b0, b1, b2, b3, a0, a1, a2, a3) \
  415. __builtin_mma_xvf32gerpp(&acc0, (vec_t)b0, (vec_t)a0); \
  416. __builtin_mma_xvf32gerpp(&acc1, (vec_t)b1, (vec_t)a1); \
  417. __builtin_mma_xvf32gerpp(&acc2, (vec_t)b2, (vec_t)a2); \
  418. __builtin_mma_xvf32gerpp(&acc3, (vec_t)b3, (vec_t)a3);
  419. #define KERNEL_MMA_2ACC(b0, b1, a0, a1) \
  420. __builtin_mma_xvf32gerpp(&acc0, (vec_t)b0, (vec_t)a0); \
  421. __builtin_mma_xvf32gerpp(&acc1, (vec_t)b1, (vec_t)a1);
  422. #define KERNEL_MMA_1ACC(b0, a0) \
  423. __builtin_mma_xvf32gerpp(&acc0, (vec_t)b0, (vec_t)a0);
  424. #define KERNEL_VMADD_4VSR(a0, a1, a2, a3, b0, b1, b2, b3) \
  425. result = vec_madd(a0, b0, result); \
  426. result1 = vec_madd(a1, b1, result1); \
  427. result2 = vec_madd(a2, b2, result2); \
  428. result3 = vec_madd(a3, b3, result3);
  429. #define KERNEL_VMADD_2VSR(a0, a1, b0, b1) \
  430. result = vec_madd(a0, b0, result); \
  431. result1 = vec_madd(a1, b1, result1);
  432. #define KERNEL_VMADD_1VSR(a0, b0) \
  433. result = vec_madd(a0, b0, result);
  434. #define PACK_B(rb0, rb1, rb2, rb3, offset) \
  435. vec_xst(rb0, 0, packB+(k*16)+0+offset); \
  436. vec_xst(rb1, 0, packB+(k*16)+4+offset); \
  437. vec_xst(rb2, 0, packB+(k*16)+8+offset); \
  438. vec_xst(rb3, 0, packB+(k*16)+12+offset);
  439. #define LOAD_PACKED_B(rb0, rb1, rb2, rb3, offset) \
  440. rb0 = vec_xl(0, packB+(k*16)+0+offset); \
  441. rb1 = vec_xl(0, packB+(k*16)+4+offset); \
  442. rb2 = vec_xl(0, packB+(k*16)+8+offset); \
  443. rb3 = vec_xl(0, packB+(k*16)+12+offset);
  444. #ifdef B0
  445. int CNAME(BLASLONG M, BLASLONG N, BLASLONG K, IFLOAT * A, BLASLONG lda, FLOAT alpha, IFLOAT * B, BLASLONG ldb, FLOAT * C, BLASLONG ldc)
  446. #else
  447. int CNAME(BLASLONG M, BLASLONG N, BLASLONG K, IFLOAT * A, BLASLONG lda, FLOAT alpha, IFLOAT * B, BLASLONG ldb, FLOAT beta, FLOAT * C, BLASLONG ldc)
  448. #endif
  449. {
  450. BLASLONG m, n, k;
  451. BLASLONG m16 = M & ~15;
  452. BLASLONG m8 = M & ~7;
  453. BLASLONG m4 = M & ~3;
  454. BLASLONG m2 = M & ~1;
  455. BLASLONG n16 = N & ~15;
  456. BLASLONG n8 = N & ~7;
  457. BLASLONG n4 = N & ~3;
  458. BLASLONG n2 = N & ~1;
  459. BLASLONG k4 = K & ~3;
  460. BLASLONG k2 = K & ~1;
  461. #if defined(__GNUC__) && !defined(__clang__)
  462. int has_packing = (M >= 32 && N >= 32 && K >= 32) ? 1 : 0;
  463. #else
  464. int has_packing = 0;
  465. #endif
  466. float *packB;
  467. if (has_packing) packB = (float *)malloc(K*16*sizeof(float));
  468. vector float valpha = vec_splats(alpha);
  469. #if !defined(B0)
  470. vector float vbeta = vec_splats(beta);
  471. #endif
  472. for (n = 0; n < n16; n += 16) {
  473. for (m = 0; m < m8; m += 8) {
  474. __vector_quad acc0, acc1, acc2, acc3, acc4, acc5, acc6, acc7;
  475. INIT_8ACCS();
  476. register vector float ra0, ra1;
  477. register vector float rb0, rb1, rb2, rb3, rb4, rb5, rb6, rb7, rb8, rb9,
  478. rb10, rb11, rb12, rb13, rb14, rb15;
  479. register vector float t0, t1, t2, t3;
  480. if (has_packing) {
  481. if (m == 0) {
  482. for (k = 0; k < k4; k += 4) {
  483. LOAD_A_1x8(k, m);
  484. LOAD_BT_16x4(n, k);
  485. KERNEL_MMA_8ACC(rb0, rb4, rb8, rb12, rb0, rb4, rb8, rb12,
  486. ra0, ra0, ra0, ra0, ra1, ra1, ra1, ra1);
  487. PACK_B(rb0, rb4, rb8, rb12, 0);
  488. LOAD_A_1x8(k+1, m);
  489. KERNEL_MMA_8ACC(rb1, rb5, rb9, rb13, rb1, rb5, rb9, rb13,
  490. ra0, ra0, ra0, ra0, ra1, ra1, ra1, ra1);
  491. PACK_B(rb1, rb5, rb9, rb13, 16);
  492. LOAD_A_1x8(k+2, m);
  493. KERNEL_MMA_8ACC(rb2, rb6, rb10, rb14, rb2, rb6, rb10, rb14,
  494. ra0, ra0, ra0, ra0, ra1, ra1, ra1, ra1);
  495. PACK_B(rb2, rb6, rb10, rb14, 32);
  496. LOAD_A_1x8(k+3, m);
  497. KERNEL_MMA_8ACC(rb3, rb7, rb11, rb15, rb3, rb7, rb11, rb15,
  498. ra0, ra0, ra0, ra0, ra1, ra1, ra1, ra1);
  499. PACK_B(rb3, rb7, rb11, rb15, 48);
  500. }
  501. for (; k < k2; k += 2) {
  502. LOAD_A_1x8(k, m);
  503. LOAD_BT_16x2(n, k);
  504. KERNEL_MMA_8ACC(rb0, rb2, rb4, rb6, rb0, rb2, rb4, rb6,
  505. ra0, ra0, ra0, ra0, ra1, ra1, ra1, ra1);
  506. PACK_B(rb0, rb2, rb4, rb6, 0);
  507. LOAD_A_1x8(k+1, m);
  508. KERNEL_MMA_8ACC(rb1, rb3, rb5, rb7, rb1, rb3, rb5, rb7,
  509. ra0, ra0, ra0, ra0, ra1, ra1, ra1, ra1);
  510. PACK_B(rb1, rb3, rb5, rb7, 16);
  511. }
  512. for (; k < K; k++) {
  513. LOAD_A_1x8(k, m);
  514. LOAD_BT_16x1(n, k);
  515. KERNEL_MMA_8ACC(rb0, rb1, rb2, rb3, rb0, rb1, rb2, rb3,
  516. ra0, ra0, ra0, ra0, ra1, ra1, ra1, ra1);
  517. PACK_B(rb0, rb1, rb2, rb3, 0);
  518. }
  519. } else {
  520. for (k = 0; k < k4; k += 4) {
  521. LOAD_A_1x8(k, m);
  522. LOAD_PACKED_B(rb0, rb4, rb8, rb12, 0);
  523. KERNEL_MMA_8ACC(rb0, rb4, rb8, rb12, rb0, rb4, rb8, rb12,
  524. ra0, ra0, ra0, ra0, ra1, ra1, ra1, ra1);
  525. LOAD_A_1x8(k+1, m);
  526. LOAD_PACKED_B(rb1, rb5, rb9, rb13, 16);
  527. KERNEL_MMA_8ACC(rb1, rb5, rb9, rb13, rb1, rb5, rb9, rb13,
  528. ra0, ra0, ra0, ra0, ra1, ra1, ra1, ra1);
  529. LOAD_A_1x8(k+2, m);
  530. LOAD_PACKED_B(rb2, rb6, rb10, rb14, 32);
  531. KERNEL_MMA_8ACC(rb2, rb6, rb10, rb14, rb2, rb6, rb10, rb14,
  532. ra0, ra0, ra0, ra0, ra1, ra1, ra1, ra1);
  533. LOAD_A_1x8(k+3, m);
  534. LOAD_PACKED_B(rb3, rb7, rb11, rb15, 48);
  535. KERNEL_MMA_8ACC(rb3, rb7, rb11, rb15, rb3, rb7, rb11, rb15,
  536. ra0, ra0, ra0, ra0, ra1, ra1, ra1, ra1);
  537. }
  538. for (; k < k2; k += 2) {
  539. LOAD_A_1x8(k, m);
  540. LOAD_PACKED_B(rb0, rb2, rb4, rb6, 0);
  541. KERNEL_MMA_8ACC(rb0, rb2, rb4, rb6, rb0, rb2, rb4, rb6,
  542. ra0, ra0, ra0, ra0, ra1, ra1, ra1, ra1);
  543. LOAD_A_1x8(k+1, m);
  544. LOAD_PACKED_B(rb1, rb3, rb5, rb7, 16);
  545. KERNEL_MMA_8ACC(rb1, rb3, rb5, rb7, rb1, rb3, rb5, rb7,
  546. ra0, ra0, ra0, ra0, ra1, ra1, ra1, ra1);
  547. }
  548. for (; k < K; k++) {
  549. LOAD_A_1x8(k, m);
  550. LOAD_PACKED_B(rb0, rb1, rb2, rb3, 0);
  551. KERNEL_MMA_8ACC(rb0, rb1, rb2, rb3, rb0, rb1, rb2, rb3,
  552. ra0, ra0, ra0, ra0, ra1, ra1, ra1, ra1);
  553. }
  554. }
  555. } else {
  556. for (k = 0; k < k4; k += 4) {
  557. LOAD_A_1x8(k, m);
  558. LOAD_BT_16x4(n, k);
  559. KERNEL_MMA_8ACC(rb0, rb4, rb8, rb12, rb0, rb4, rb8, rb12,
  560. ra0, ra0, ra0, ra0, ra1, ra1, ra1, ra1);
  561. LOAD_A_1x8(k+1, m);
  562. KERNEL_MMA_8ACC(rb1, rb5, rb9, rb13, rb1, rb5, rb9, rb13,
  563. ra0, ra0, ra0, ra0, ra1, ra1, ra1, ra1);
  564. LOAD_A_1x8(k+2, m);
  565. KERNEL_MMA_8ACC(rb2, rb6, rb10, rb14, rb2, rb6, rb10, rb14,
  566. ra0, ra0, ra0, ra0, ra1, ra1, ra1, ra1);
  567. LOAD_A_1x8(k+3, m);
  568. KERNEL_MMA_8ACC(rb3, rb7, rb11, rb15, rb3, rb7, rb11, rb15,
  569. ra0, ra0, ra0, ra0, ra1, ra1, ra1, ra1);
  570. }
  571. for (; k < k2; k += 2) {
  572. LOAD_A_1x8(k, m);
  573. LOAD_BT_16x2(n, k);
  574. KERNEL_MMA_8ACC(rb0, rb2, rb4, rb6, rb0, rb2, rb4, rb6,
  575. ra0, ra0, ra0, ra0, ra1, ra1, ra1, ra1);
  576. LOAD_A_1x8(k+1, m);
  577. KERNEL_MMA_8ACC(rb1, rb3, rb5, rb7, rb1, rb3, rb5, rb7,
  578. ra0, ra0, ra0, ra0, ra1, ra1, ra1, ra1);
  579. }
  580. for (; k < K; k++) {
  581. LOAD_A_1x8(k, m);
  582. LOAD_BT_16x1(n, k);
  583. KERNEL_MMA_8ACC(rb0, rb1, rb2, rb3, rb0, rb1, rb2, rb3,
  584. ra0, ra0, ra0, ra0, ra1, ra1, ra1, ra1);
  585. }
  586. }
  587. #if !defined(B0)
  588. register vector float rc0;
  589. #endif
  590. vector float result[4];
  591. SAVE_4x4_ACC(&acc0, n+0, m+0);
  592. SAVE_4x4_ACC(&acc1, n+4, m+0);
  593. SAVE_4x4_ACC(&acc2, n+8, m+0);
  594. SAVE_4x4_ACC(&acc3, n+12, m+0);
  595. SAVE_4x4_ACC(&acc4, n+0, m+4);
  596. SAVE_4x4_ACC(&acc5, n+4, m+4);
  597. SAVE_4x4_ACC(&acc6, n+8, m+4);
  598. SAVE_4x4_ACC(&acc7, n+12, m+4);
  599. }
  600. for (; m < m4; m += 4) {
  601. __vector_quad acc0, acc1, acc2, acc3;
  602. INIT_4ACCS();
  603. register vector float ra0;
  604. register vector float rb0, rb1, rb2, rb3, rb4, rb5, rb6, rb7, rb8, rb9,
  605. rb10, rb11, rb12, rb13, rb14, rb15;
  606. register vector float t0, t1, t2, t3;
  607. if (!has_packing) {
  608. for (k = 0; k < k4; k += 4) {
  609. LOAD_A_1x4(k, m);
  610. LOAD_BT_16x4(n, k);
  611. KERNEL_MMA_4ACC(rb0, rb4, rb8, rb12, ra0, ra0, ra0, ra0);
  612. LOAD_A_1x4(k+1, m);
  613. KERNEL_MMA_4ACC(rb1, rb5, rb9, rb13, ra0, ra0, ra0, ra0);
  614. LOAD_A_1x4(k+2, m);
  615. KERNEL_MMA_4ACC(rb2, rb6, rb10, rb14, ra0, ra0, ra0, ra0);
  616. LOAD_A_1x4(k+3, m);
  617. KERNEL_MMA_4ACC(rb3, rb7, rb11, rb15, ra0, ra0, ra0, ra0);
  618. }
  619. for (; k < k2; k += 2) {
  620. LOAD_A_1x4(k, m);
  621. LOAD_BT_16x2(n, k);
  622. KERNEL_MMA_4ACC(rb0, rb2, rb4, rb6, ra0, ra0, ra0, ra0);
  623. LOAD_A_1x4(k+1, m);
  624. KERNEL_MMA_4ACC(rb1, rb3, rb5, rb7, ra0, ra0, ra0, ra0);
  625. }
  626. for (; k < K; k++) {
  627. LOAD_A_1x4(k, m);
  628. LOAD_BT_16x1(n, k);
  629. KERNEL_MMA_4ACC(rb0, rb1, rb2, rb3, ra0, ra0, ra0, ra0);
  630. }
  631. } else {
  632. for (k = 0; k < k4; k += 4) {
  633. LOAD_A_1x4(k, m);
  634. LOAD_PACKED_B(rb0, rb4, rb8, rb12, 0);
  635. KERNEL_MMA_4ACC(rb0, rb4, rb8, rb12, ra0, ra0, ra0, ra0);
  636. LOAD_A_1x4(k+1, m);
  637. LOAD_PACKED_B(rb1, rb5, rb9, rb13, 16);
  638. KERNEL_MMA_4ACC(rb1, rb5, rb9, rb13, ra0, ra0, ra0, ra0);
  639. LOAD_A_1x4(k+2, m);
  640. LOAD_PACKED_B(rb2, rb6, rb10, rb14, 32);
  641. KERNEL_MMA_4ACC(rb2, rb6, rb10, rb14, ra0, ra0, ra0, ra0);
  642. LOAD_A_1x4(k+3, m);
  643. LOAD_PACKED_B(rb3, rb7, rb11, rb15, 48);
  644. KERNEL_MMA_4ACC(rb3, rb7, rb11, rb15, ra0, ra0, ra0, ra0);
  645. }
  646. for (; k < k2; k += 2) {
  647. LOAD_A_1x4(k, m);
  648. LOAD_PACKED_B(rb0, rb2, rb4, rb6, 0);
  649. KERNEL_MMA_4ACC(rb0, rb2, rb4, rb6, ra0, ra0, ra0, ra0);
  650. LOAD_A_1x4(k+1, m);
  651. LOAD_PACKED_B(rb1, rb3, rb5, rb7, 16);
  652. KERNEL_MMA_4ACC(rb1, rb3, rb5, rb7, ra0, ra0, ra0, ra0);
  653. }
  654. for (; k < K; k++) {
  655. LOAD_A_1x4(k, m);
  656. LOAD_PACKED_B(rb0, rb1, rb2, rb3, 0);
  657. KERNEL_MMA_4ACC(rb0, rb1, rb2, rb3, ra0, ra0, ra0, ra0);
  658. }
  659. }
  660. #if !defined(B0)
  661. register vector float rc0;
  662. #endif
  663. vector float result[4];
  664. SAVE_4x4_ACC(&acc0, n+0, m+0);
  665. SAVE_4x4_ACC(&acc1, n+4, m+0);
  666. SAVE_4x4_ACC(&acc2, n+8, m+0);
  667. SAVE_4x4_ACC(&acc3, n+12, m+0);
  668. }
  669. for (; m < m2; m += 2) {
  670. __vector_quad acc0, acc1, acc2, acc3;
  671. INIT_4ACCS();
  672. register vector float ra0;
  673. register vector float rb0, rb1, rb2, rb3, rb4, rb5, rb6, rb7, rb8, rb9,
  674. rb10, rb11, rb12, rb13, rb14, rb15;
  675. register vector float t0, t1, t2, t3;
  676. if (!has_packing) {
  677. for (k = 0; k < k4; k += 4) {
  678. LOAD_A_1x2(k, m);
  679. LOAD_BT_16x4(n, k);
  680. KERNEL_MMA_4ACC(rb0, rb4, rb8, rb12, ra0, ra0, ra0, ra0);
  681. LOAD_A_1x2(k+1, m);
  682. KERNEL_MMA_4ACC(rb1, rb5, rb9, rb13, ra0, ra0, ra0, ra0);
  683. LOAD_A_1x2(k+2, m);
  684. KERNEL_MMA_4ACC(rb2, rb6, rb10, rb14, ra0, ra0, ra0, ra0);
  685. LOAD_A_1x2(k+3, m);
  686. KERNEL_MMA_4ACC(rb3, rb7, rb11, rb15, ra0, ra0, ra0, ra0);
  687. }
  688. for (; k < k2; k += 2) {
  689. LOAD_A_1x2(k, m);
  690. LOAD_BT_16x2(n, k);
  691. KERNEL_MMA_4ACC(rb0, rb2, rb4, rb6, ra0, ra0, ra0, ra0);
  692. LOAD_A_1x2(k+1, m);
  693. KERNEL_MMA_4ACC(rb1, rb3, rb5, rb7, ra0, ra0, ra0, ra0);
  694. }
  695. for (; k < K; k++) {
  696. LOAD_A_1x2(k, m);
  697. LOAD_BT_16x1(n, k);
  698. KERNEL_MMA_4ACC(rb0, rb1, rb2, rb3, ra0, ra0, ra0, ra0);
  699. }
  700. } else {
  701. for (k = 0; k < k4; k += 4) {
  702. LOAD_A_1x2(k, m);
  703. LOAD_PACKED_B(rb0, rb4, rb8, rb12, 0);
  704. KERNEL_MMA_4ACC(rb0, rb4, rb8, rb12, ra0, ra0, ra0, ra0);
  705. LOAD_A_1x2(k+1, m);
  706. LOAD_PACKED_B(rb1, rb5, rb9, rb13, 16);
  707. KERNEL_MMA_4ACC(rb1, rb5, rb9, rb13, ra0, ra0, ra0, ra0);
  708. LOAD_A_1x2(k+2, m);
  709. LOAD_PACKED_B(rb2, rb6, rb10, rb14, 32);
  710. KERNEL_MMA_4ACC(rb2, rb6, rb10, rb14, ra0, ra0, ra0, ra0);
  711. LOAD_A_1x2(k+3, m);
  712. LOAD_PACKED_B(rb3, rb7, rb11, rb15, 48);
  713. KERNEL_MMA_4ACC(rb3, rb7, rb11, rb15, ra0, ra0, ra0, ra0);
  714. }
  715. for (; k < k2; k += 2) {
  716. LOAD_A_1x2(k, m);
  717. LOAD_PACKED_B(rb0, rb2, rb4, rb6, 0);
  718. KERNEL_MMA_4ACC(rb0, rb2, rb4, rb6, ra0, ra0, ra0, ra0);
  719. LOAD_A_1x2(k+1, m);
  720. LOAD_PACKED_B(rb1, rb3, rb5, rb7, 16);
  721. KERNEL_MMA_4ACC(rb1, rb3, rb5, rb7, ra0, ra0, ra0, ra0);
  722. }
  723. for (; k < K; k++) {
  724. LOAD_A_1x2(k, m);
  725. LOAD_PACKED_B(rb0, rb1, rb2, rb3, 0);
  726. KERNEL_MMA_4ACC(rb0, rb1, rb2, rb3, ra0, ra0, ra0, ra0);
  727. }
  728. }
  729. #if !defined(B0)
  730. register vector float rc0;
  731. #endif
  732. vector float result[4];
  733. SAVE_4x2_ACC(&acc0, n+0, m+0);
  734. SAVE_4x2_ACC(&acc1, n+4, m+0);
  735. SAVE_4x2_ACC(&acc2, n+8, m+0);
  736. SAVE_4x2_ACC(&acc3, n+12, m+0);
  737. }
  738. for (; m < M; m++) {
  739. register vector float ra0;
  740. register vector float rb0, rb1, rb2, rb3, rb4, rb5, rb6, rb7, rb8, rb9,
  741. rb10, rb11, rb12, rb13, rb14, rb15;
  742. register vector float t0, t1, t2, t3;
  743. vector float result = ((vector float){0.,0.,0.,0.});
  744. vector float result1 = ((vector float){0.,0.,0.,0.});
  745. vector float result2 = ((vector float){0.,0.,0.,0.});
  746. vector float result3 = ((vector float){0.,0.,0.,0.});
  747. if (!has_packing) {
  748. for (k = 0; k < k4; k += 4) {
  749. LOAD_A_1x1(k, m);
  750. LOAD_BT_16x4(n, k);
  751. KERNEL_VMADD_4VSR(ra0, ra0, ra0, ra0, rb0, rb4, rb8, rb12);
  752. LOAD_A_1x1(k+1, m);
  753. KERNEL_VMADD_4VSR(ra0, ra0, ra0, ra0, rb1, rb5, rb9, rb13);
  754. LOAD_A_1x1(k+2, m);
  755. KERNEL_VMADD_4VSR(ra0, ra0, ra0, ra0, rb2, rb6, rb10, rb14);
  756. LOAD_A_1x1(k+3, m);
  757. KERNEL_VMADD_4VSR(ra0, ra0, ra0, ra0, rb3, rb7, rb11, rb15);
  758. }
  759. for (; k < k2; k += 2) {
  760. LOAD_A_1x1(k, m);
  761. LOAD_BT_16x2(n, k);
  762. KERNEL_VMADD_4VSR(ra0, ra0, ra0, ra0, rb0, rb2, rb4, rb6);
  763. LOAD_A_1x1(k+1, m);
  764. KERNEL_VMADD_4VSR(ra0, ra0, ra0, ra0, rb1, rb3, rb5, rb7);
  765. }
  766. for (; k < K; k++) {
  767. LOAD_A_1x1(k, m);
  768. LOAD_BT_16x1(n, k);
  769. KERNEL_VMADD_4VSR(ra0, ra0, ra0, ra0, rb0, rb1, rb2, rb3);
  770. }
  771. } else {
  772. for (k = 0; k < k4; k += 4) {
  773. LOAD_A_1x1(k, m);
  774. LOAD_PACKED_B(rb0, rb4, rb8, rb12, 0);
  775. KERNEL_VMADD_4VSR(ra0, ra0, ra0, ra0, rb0, rb4, rb8, rb12);
  776. LOAD_A_1x1(k+1, m);
  777. LOAD_PACKED_B(rb1, rb5, rb9, rb13, 16);
  778. KERNEL_VMADD_4VSR(ra0, ra0, ra0, ra0, rb1, rb5, rb9, rb13);
  779. LOAD_A_1x1(k+2, m);
  780. LOAD_PACKED_B(rb2, rb6, rb10, rb14, 32);
  781. KERNEL_VMADD_4VSR(ra0, ra0, ra0, ra0, rb2, rb6, rb10, rb14);
  782. LOAD_A_1x1(k+3, m);
  783. LOAD_PACKED_B(rb3, rb7, rb11, rb15, 48);
  784. KERNEL_VMADD_4VSR(ra0, ra0, ra0, ra0, rb3, rb7, rb11, rb15);
  785. }
  786. for (; k < k2; k += 2) {
  787. LOAD_A_1x1(k, m);
  788. LOAD_PACKED_B(rb0, rb2, rb4, rb6, 0);
  789. KERNEL_VMADD_4VSR(ra0, ra0, ra0, ra0, rb0, rb2, rb4, rb6);
  790. LOAD_A_1x1(k+1, m);
  791. LOAD_PACKED_B(rb1, rb3, rb5, rb7, 16);
  792. KERNEL_VMADD_4VSR(ra0, ra0, ra0, ra0, rb1, rb3, rb5, rb7);
  793. }
  794. for (; k < K; k++) {
  795. LOAD_A_1x1(k, m);
  796. LOAD_PACKED_B(rb0, rb1, rb2, rb3, 0);
  797. KERNEL_VMADD_4VSR(ra0, ra0, ra0, ra0, rb0, rb1, rb2, rb3);
  798. }
  799. }
  800. SAVE_4x1_VSR(result, n+0, m);
  801. SAVE_4x1_VSR(result1, n+4, m);
  802. SAVE_4x1_VSR(result2, n+8, m);
  803. SAVE_4x1_VSR(result3, n+12, m);
  804. }
  805. }
  806. for (; n < n8; n += 8) {
  807. for (m = 0; m < m16; m += 16) {
  808. __vector_quad acc0, acc1, acc2, acc3, acc4, acc5, acc6, acc7;
  809. INIT_8ACCS();
  810. register vector float ra0, ra1, ra2, ra3;
  811. register vector float rb0, rb1, rb2, rb3, rb4, rb5, rb6, rb7;
  812. register vector float t0, t1, t2, t3;
  813. for (k = 0; k < k4; k += 4) {
  814. LOAD_A_1x16(k, m);
  815. LOAD_BT_8x4(n, k);
  816. KERNEL_MMA_8ACC(rb0, rb4, rb0, rb4, rb0, rb4, rb0, rb4,
  817. ra0, ra0, ra1, ra1, ra2, ra2, ra3, ra3);
  818. LOAD_A_1x16(k+1, m);
  819. KERNEL_MMA_8ACC(rb1, rb5, rb1, rb5, rb1, rb5, rb1, rb5,
  820. ra0, ra0, ra1, ra1, ra2, ra2, ra3, ra3);
  821. LOAD_A_1x16(k+2, m);
  822. KERNEL_MMA_8ACC(rb2, rb6, rb2, rb6, rb2, rb6, rb2, rb6,
  823. ra0, ra0, ra1, ra1, ra2, ra2, ra3, ra3);
  824. LOAD_A_1x16(k+3, m);
  825. KERNEL_MMA_8ACC(rb3, rb7, rb3, rb7, rb3, rb7, rb3, rb7,
  826. ra0, ra0, ra1, ra1, ra2, ra2, ra3, ra3);
  827. }
  828. for (; k < k2; k += 2) {
  829. LOAD_A_1x16(k, m);
  830. LOAD_BT_8x2(n, k);
  831. KERNEL_MMA_8ACC(rb0, rb2, rb0, rb2, rb0, rb2, rb0, rb2,
  832. ra0, ra0, ra1, ra1, ra2, ra2, ra3, ra3);
  833. LOAD_A_1x16(k+1, m);
  834. KERNEL_MMA_8ACC(rb1, rb3, rb1, rb3, rb1, rb3, rb1, rb3,
  835. ra0, ra0, ra1, ra1, ra2, ra2, ra3, ra3);
  836. }
  837. for (; k < K; k++) {
  838. LOAD_A_1x16(k, m);
  839. LOAD_BT_8x1(n, k);
  840. KERNEL_MMA_8ACC(rb0, rb1, rb0, rb1, rb0, rb1, rb0, rb1,
  841. ra0, ra0, ra1, ra1, ra2, ra2, ra3, ra3);
  842. }
  843. #if !defined(B0)
  844. register vector float rc0;
  845. #endif
  846. vector float result[4];
  847. SAVE_4x4_ACC(&acc0, n+0, m+0);
  848. SAVE_4x4_ACC(&acc2, n+0, m+4);
  849. SAVE_4x4_ACC(&acc4, n+0, m+8);
  850. SAVE_4x4_ACC(&acc6, n+0, m+12);
  851. SAVE_4x4_ACC(&acc1, n+4, m+0);
  852. SAVE_4x4_ACC(&acc3, n+4, m+4);
  853. SAVE_4x4_ACC(&acc5, n+4, m+8);
  854. SAVE_4x4_ACC(&acc7, n+4, m+12);
  855. }
  856. for (; m < m8; m += 8) {
  857. __vector_quad acc0, acc1, acc2, acc3;
  858. INIT_4ACCS();
  859. register vector float ra0, ra1;
  860. register vector float rb0, rb1, rb2, rb3, rb4, rb5, rb6, rb7;
  861. register vector float t0, t1, t2, t3;
  862. for (k = 0; k < k4; k += 4) {
  863. LOAD_A_1x8(k, m);
  864. LOAD_BT_8x4(n, k);
  865. KERNEL_MMA_4ACC(rb0, rb4, rb0, rb4, ra0, ra0, ra1, ra1);
  866. LOAD_A_1x8(k+1, m);
  867. KERNEL_MMA_4ACC(rb1, rb5, rb1, rb5, ra0, ra0, ra1, ra1);
  868. LOAD_A_1x8(k+2, m);
  869. KERNEL_MMA_4ACC(rb2, rb6, rb2, rb6, ra0, ra0, ra1, ra1);
  870. LOAD_A_1x8(k+3, m);
  871. KERNEL_MMA_4ACC(rb3, rb7, rb3, rb7, ra0, ra0, ra1, ra1);
  872. }
  873. for (; k < k2; k += 2) {
  874. LOAD_A_1x8(k, m);
  875. LOAD_BT_8x2(n, k);
  876. KERNEL_MMA_4ACC(rb0, rb2, rb0, rb2, ra0, ra0, ra1, ra1);
  877. LOAD_A_1x8(k+1, m);
  878. KERNEL_MMA_4ACC(rb1, rb3, rb1, rb3, ra0, ra0, ra1, ra1);
  879. }
  880. for (; k < K; k++) {
  881. LOAD_A_1x8(k, m);
  882. LOAD_BT_8x1(n, k);
  883. KERNEL_MMA_4ACC(rb0, rb1, rb0, rb1, ra0, ra0, ra1, ra1);
  884. }
  885. #if !defined(B0)
  886. register vector float rc0;
  887. #endif
  888. vector float result[4];
  889. SAVE_4x4_ACC(&acc0, n+0, m+0);
  890. SAVE_4x4_ACC(&acc2, n+0, m+4);
  891. SAVE_4x4_ACC(&acc1, n+4, m+0);
  892. SAVE_4x4_ACC(&acc3, n+4, m+4);
  893. }
  894. for (; m < m4; m += 4) {
  895. __vector_quad acc0, acc1;
  896. INIT_2ACCS();
  897. register vector float ra0;
  898. register vector float rb0, rb1, rb2, rb3, rb4, rb5, rb6, rb7;
  899. register vector float t0, t1, t2, t3;
  900. for (k = 0; k < k4; k += 4) {
  901. LOAD_A_1x4(k, m);
  902. LOAD_BT_8x4(n, k);
  903. KERNEL_MMA_2ACC(rb0, rb4, ra0, ra0);
  904. LOAD_A_1x4(k+1, m);
  905. KERNEL_MMA_2ACC(rb1, rb5, ra0, ra0);
  906. LOAD_A_1x4(k+2, m);
  907. KERNEL_MMA_2ACC(rb2, rb6, ra0, ra0);
  908. LOAD_A_1x4(k+3, m);
  909. KERNEL_MMA_2ACC(rb3, rb7, ra0, ra0);
  910. }
  911. for (; k < k2; k += 2) {
  912. LOAD_A_1x4(k, m);
  913. LOAD_BT_8x2(n, k);
  914. KERNEL_MMA_2ACC(rb0, rb2, ra0, ra0);
  915. LOAD_A_1x4(k+1, m);
  916. KERNEL_MMA_2ACC(rb1, rb3, ra0, ra0);
  917. }
  918. for (; k < K; k++) {
  919. LOAD_A_1x4(k, m);
  920. LOAD_BT_8x1(n, k);
  921. KERNEL_MMA_2ACC(rb0, rb1, ra0, ra0);
  922. }
  923. #if !defined(B0)
  924. register vector float rc0;
  925. #endif
  926. vector float result[4];
  927. SAVE_4x4_ACC(&acc0, n+0, m+0);
  928. SAVE_4x4_ACC(&acc1, n+4, m+0);
  929. }
  930. for (; m < m2; m += 2) {
  931. __vector_quad acc0, acc1;
  932. INIT_2ACCS();
  933. register vector float ra0;
  934. register vector float rb0, rb1, rb2, rb3, rb4, rb5, rb6, rb7;
  935. register vector float t0, t1, t2, t3;
  936. for (k = 0; k < k4; k += 4) {
  937. LOAD_A_1x2(k, m);
  938. LOAD_BT_8x4(n, k);
  939. KERNEL_MMA_2ACC(rb0, rb4, ra0, ra0);
  940. LOAD_A_1x2(k+1, m);
  941. KERNEL_MMA_2ACC(rb1, rb5, ra0, ra0);
  942. LOAD_A_1x2(k+2, m);
  943. KERNEL_MMA_2ACC(rb2, rb6, ra0, ra0);
  944. LOAD_A_1x2(k+3, m);
  945. KERNEL_MMA_2ACC(rb3, rb7, ra0, ra0);
  946. }
  947. for (; k < k2; k += 2) {
  948. LOAD_A_1x2(k, m);
  949. LOAD_BT_8x2(n, k);
  950. KERNEL_MMA_2ACC(rb0, rb2, ra0, ra0);
  951. LOAD_A_1x2(k+1, m);
  952. KERNEL_MMA_2ACC(rb1, rb3, ra0, ra0);
  953. }
  954. for (; k < K; k++) {
  955. LOAD_A_1x2(k, m);
  956. LOAD_BT_8x1(n, k);
  957. KERNEL_MMA_2ACC(rb0, rb1, ra0, ra0);
  958. }
  959. #if !defined(B0)
  960. register vector float rc0;
  961. #endif
  962. vector float result[4];
  963. SAVE_4x2_ACC(&acc0, n+0, m+0);
  964. SAVE_4x2_ACC(&acc1, n+4, m+0);
  965. }
  966. for (; m < M; m++) {
  967. register vector float ra0;
  968. register vector float rb0, rb1, rb2, rb3, rb4, rb5, rb6, rb7;
  969. register vector float t0, t1, t2, t3;
  970. vector float result = ((vector float){0.,0.,0.,0.});
  971. vector float result1 = ((vector float){0.,0.,0.,0.});
  972. for (k = 0; k < k4; k += 4) {
  973. LOAD_A_1x1(k, m);
  974. LOAD_BT_8x4(n, k);
  975. KERNEL_VMADD_2VSR(ra0, ra0, rb0, rb4);
  976. LOAD_A_1x1(k+1, m);
  977. KERNEL_VMADD_2VSR(ra0, ra0, rb1, rb5);
  978. LOAD_A_1x1(k+2, m);
  979. KERNEL_VMADD_2VSR(ra0, ra0, rb2, rb6);
  980. LOAD_A_1x1(k+3, m);
  981. KERNEL_VMADD_2VSR(ra0, ra0, rb3, rb7);
  982. }
  983. for (; k < k2; k += 2) {
  984. LOAD_A_1x1(k, m);
  985. LOAD_BT_8x2(n, k);
  986. KERNEL_VMADD_2VSR(ra0, ra0, rb0, rb2);
  987. LOAD_A_1x1(k+1, m);
  988. KERNEL_VMADD_2VSR(ra0, ra0, rb1, rb3);
  989. }
  990. for (; k < K; k++) {
  991. LOAD_A_1x1(k, m);
  992. LOAD_BT_8x1(n, k);
  993. KERNEL_VMADD_2VSR(ra0, ra0, rb0, rb1);
  994. }
  995. SAVE_4x1_VSR(result, n+0, m);
  996. SAVE_4x1_VSR(result1, n+4, m);
  997. }
  998. }
  999. for (; n < n4; n += 4) {
  1000. for (m = 0; m < m16; m += 16) {
  1001. __vector_quad acc0, acc1, acc2, acc3;
  1002. INIT_4ACCS();
  1003. register vector float ra0, ra1, ra2, ra3;
  1004. register vector float rb0, rb1, rb2, rb3;
  1005. register vector float t0, t1, t2, t3;
  1006. for (k = 0; k < k4; k += 4) {
  1007. LOAD_A_1x16(k, m);
  1008. LOAD_BT_4x4(n, k);
  1009. KERNEL_MMA_4ACC(rb0, rb0, rb0, rb0, ra0, ra1, ra2, ra3);
  1010. LOAD_A_1x16(k+1, m);
  1011. KERNEL_MMA_4ACC(rb1, rb1, rb1, rb1, ra0, ra1, ra2, ra3);
  1012. LOAD_A_1x16(k+2, m);
  1013. KERNEL_MMA_4ACC(rb2, rb2, rb2, rb2, ra0, ra1, ra2, ra3);
  1014. LOAD_A_1x16(k+3, m);
  1015. KERNEL_MMA_4ACC(rb3, rb3, rb3, rb3, ra0, ra1, ra2, ra3);
  1016. }
  1017. for (; k < k2; k += 2) {
  1018. LOAD_A_1x16(k, m);
  1019. LOAD_BT_4x2(n, k);
  1020. KERNEL_MMA_4ACC(rb0, rb0, rb0, rb0, ra0, ra1, ra2, ra3);
  1021. LOAD_A_1x16(k+1, m);
  1022. KERNEL_MMA_4ACC(rb1, rb1, rb1, rb1, ra0, ra1, ra2, ra3);
  1023. }
  1024. for (; k < K; k++) {
  1025. LOAD_A_1x16(k, m);
  1026. LOAD_BT_4x1(n, k);
  1027. KERNEL_MMA_4ACC(rb0, rb0, rb0, rb0, ra0, ra1, ra2, ra3);
  1028. }
  1029. #if !defined(B0)
  1030. register vector float rc0;
  1031. #endif
  1032. vector float result[4];
  1033. SAVE_4x4_ACC(&acc0, n+0, m+0);
  1034. SAVE_4x4_ACC(&acc1, n+0, m+4);
  1035. SAVE_4x4_ACC(&acc2, n+0, m+8);
  1036. SAVE_4x4_ACC(&acc3, n+0, m+12);
  1037. }
  1038. for (; m < m8; m += 8) {
  1039. __vector_quad acc0, acc1;
  1040. INIT_2ACCS();
  1041. register vector float ra0, ra1;
  1042. register vector float rb0, rb1, rb2, rb3;
  1043. register vector float t0, t1, t2, t3;
  1044. for (k = 0; k < k4; k += 4) {
  1045. LOAD_A_1x8(k, m);
  1046. LOAD_BT_4x4(n, k);
  1047. KERNEL_MMA_2ACC(rb0, rb0, ra0, ra1);
  1048. LOAD_A_1x8(k+1, m);
  1049. KERNEL_MMA_2ACC(rb1, rb1, ra0, ra1);
  1050. LOAD_A_1x8(k+2, m);
  1051. KERNEL_MMA_2ACC(rb2, rb2, ra0, ra1);
  1052. LOAD_A_1x8(k+3, m);
  1053. KERNEL_MMA_2ACC(rb3, rb3, ra0, ra1);
  1054. }
  1055. for (; k < k2; k += 2) {
  1056. LOAD_A_1x8(k, m);
  1057. LOAD_BT_4x2(n, k);
  1058. KERNEL_MMA_2ACC(rb0, rb0, ra0, ra1);
  1059. LOAD_A_1x8(k+1, m);
  1060. KERNEL_MMA_2ACC(rb1, rb1, ra0, ra1);
  1061. }
  1062. for (; k < K; k++) {
  1063. LOAD_A_1x8(k, m);
  1064. LOAD_BT_4x1(n, k);
  1065. KERNEL_MMA_2ACC(rb0, rb0, ra0, ra1);
  1066. }
  1067. #if !defined(B0)
  1068. register vector float rc0;
  1069. #endif
  1070. vector float result[4];
  1071. SAVE_4x4_ACC(&acc0, n+0, m+0);
  1072. SAVE_4x4_ACC(&acc1, n+0, m+4);
  1073. }
  1074. for (; m < m4; m += 4) {
  1075. __vector_quad acc0;
  1076. INIT_1ACC();
  1077. register vector float ra0;
  1078. register vector float rb0, rb1, rb2, rb3;
  1079. register vector float t0, t1, t2, t3;
  1080. for (k = 0; k < k4; k += 4) {
  1081. LOAD_A_1x4(k, m);
  1082. LOAD_BT_4x4(n, k);
  1083. KERNEL_MMA_1ACC(rb0, ra0);
  1084. LOAD_A_1x4(k+1, m);
  1085. KERNEL_MMA_1ACC(rb1, ra0);
  1086. LOAD_A_1x4(k+2, m);
  1087. KERNEL_MMA_1ACC(rb2, ra0);
  1088. LOAD_A_1x4(k+3, m);
  1089. KERNEL_MMA_1ACC(rb3, ra0);
  1090. }
  1091. for (; k < k2; k += 2) {
  1092. LOAD_A_1x4(k, m);
  1093. LOAD_BT_4x2(n, k);
  1094. KERNEL_MMA_1ACC(rb0, ra0);
  1095. LOAD_A_1x4(k+1, m);
  1096. KERNEL_MMA_1ACC(rb1, ra0);
  1097. }
  1098. for (; k < K; k++) {
  1099. LOAD_A_1x4(k, m);
  1100. LOAD_BT_4x1(n, k);
  1101. KERNEL_MMA_1ACC(rb0, ra0);
  1102. }
  1103. #if !defined(B0)
  1104. register vector float rc0;
  1105. #endif
  1106. vector float result[4];
  1107. SAVE_4x4_ACC(&acc0, n, m);
  1108. }
  1109. for (; m < m2; m += 2) {
  1110. __vector_quad acc0;
  1111. INIT_1ACC();
  1112. register vector float ra0;
  1113. register vector float rb0, rb1, rb2, rb3;
  1114. register vector float t0, t1, t2, t3;
  1115. for (k = 0; k < k4; k += 4) {
  1116. LOAD_A_1x2(k, m);
  1117. LOAD_BT_4x4(n, k);
  1118. KERNEL_MMA_1ACC(rb0, ra0);
  1119. LOAD_A_1x2(k+1, m);
  1120. KERNEL_MMA_1ACC(rb1, ra0);
  1121. LOAD_A_1x2(k+2, m);
  1122. KERNEL_MMA_1ACC(rb2, ra0);
  1123. LOAD_A_1x2(k+3, m);
  1124. KERNEL_MMA_1ACC(rb3, ra0);
  1125. }
  1126. for (; k < k2; k += 2) {
  1127. LOAD_A_1x2(k, m);
  1128. LOAD_BT_4x2(n, k);
  1129. KERNEL_MMA_1ACC(rb0, ra0);
  1130. LOAD_A_1x2(k+1, m);
  1131. KERNEL_MMA_1ACC(rb1, ra0);
  1132. }
  1133. for (; k < K; k++) {
  1134. LOAD_A_1x2(k, m);
  1135. LOAD_BT_4x1(n, k);
  1136. KERNEL_MMA_1ACC(rb0, ra0);
  1137. }
  1138. #if !defined(B0)
  1139. register vector float rc0;
  1140. #endif
  1141. vector float result[4];
  1142. SAVE_4x2_ACC(&acc0, n, m);
  1143. }
  1144. for (; m < M; m++) {
  1145. register vector float ra0;
  1146. register vector float rb0, rb1, rb2, rb3;
  1147. register vector float t0, t1, t2, t3;
  1148. vector float result = ((vector float){0.,0.,0.,0.});
  1149. for (k = 0; k < k4; k += 4) {
  1150. LOAD_A_1x1(k, m);
  1151. LOAD_BT_4x4(n, k);
  1152. KERNEL_VMADD_1VSR(ra0, rb0);
  1153. LOAD_A_1x1(k+1, m);
  1154. KERNEL_VMADD_1VSR(ra0, rb1);
  1155. LOAD_A_1x1(k+2, m);
  1156. KERNEL_VMADD_1VSR(ra0, rb2);
  1157. LOAD_A_1x1(k+3, m);
  1158. KERNEL_VMADD_1VSR(ra0, rb3);
  1159. }
  1160. for (; k < k2; k += 2) {
  1161. LOAD_A_1x1(k, m);
  1162. LOAD_BT_4x2(n, k);
  1163. KERNEL_VMADD_1VSR(ra0, rb0);
  1164. LOAD_A_1x1(k+1, m);
  1165. KERNEL_VMADD_1VSR(ra0, rb1);
  1166. }
  1167. for (; k < K; k++) {
  1168. LOAD_A_1x1(k, m);
  1169. LOAD_BT_4x1(n, k);
  1170. KERNEL_VMADD_1VSR(ra0, rb0);
  1171. }
  1172. SAVE_4x1_VSR(result, n+0, m);
  1173. }
  1174. }
  1175. for (; n < n2; n += 2) {
  1176. for (m = 0; m < m16; m += 16) {
  1177. __vector_quad acc0, acc1, acc2, acc3;
  1178. INIT_4ACCS();
  1179. register vector float ra0, ra1, ra2, ra3;
  1180. register vector float rb0, rb1, rb2, rb3;
  1181. register vector float t0, t1, t2;
  1182. for (k = 0; k < k4; k += 4) {
  1183. LOAD_A_1x16(k, m);
  1184. LOAD_BT_2x4(n, k);
  1185. KERNEL_MMA_4ACC(rb0, rb0, rb0, rb0, ra0, ra1, ra2, ra3);
  1186. LOAD_A_1x16(k+1, m);
  1187. KERNEL_MMA_4ACC(rb1, rb1, rb1, rb1, ra0, ra1, ra2, ra3);
  1188. LOAD_A_1x16(k+2, m);
  1189. KERNEL_MMA_4ACC(rb2, rb2, rb2, rb2, ra0, ra1, ra2, ra3);
  1190. LOAD_A_1x16(k+3, m);
  1191. KERNEL_MMA_4ACC(rb3, rb3, rb3, rb3, ra0, ra1, ra2, ra3);
  1192. }
  1193. for (; k < k2; k += 2) {
  1194. LOAD_A_1x16(k, m);
  1195. LOAD_BT_2x2(n, k);
  1196. KERNEL_MMA_4ACC(rb0, rb0, rb0, rb0, ra0, ra1, ra2, ra3);
  1197. LOAD_A_1x16(k+1, m);
  1198. KERNEL_MMA_4ACC(rb1, rb1, rb1, rb1, ra0, ra1, ra2, ra3);
  1199. }
  1200. for (; k < K; k++) {
  1201. LOAD_A_1x16(k, m);
  1202. LOAD_BT_2x1(n, k);
  1203. KERNEL_MMA_4ACC(rb0, rb0, rb0, rb0, ra0, ra1, ra2, ra3);
  1204. }
  1205. #if !defined(B0)
  1206. register vector float rc0;
  1207. #endif
  1208. vector float result[4];
  1209. SAVE_2x4_ACC(&acc0, n, m+0);
  1210. SAVE_2x4_ACC(&acc1, n, m+4);
  1211. SAVE_2x4_ACC(&acc2, n, m+8);
  1212. SAVE_2x4_ACC(&acc3, n, m+12);
  1213. }
  1214. for (; m < m8; m += 8) {
  1215. __vector_quad acc0, acc1;
  1216. INIT_2ACCS();
  1217. register vector float ra0, ra1;
  1218. register vector float rb0, rb1, rb2, rb3;
  1219. register vector float t0, t1, t2;
  1220. for (k = 0; k < k4; k += 4) {
  1221. LOAD_A_1x8(k, m);
  1222. LOAD_BT_2x4(n, k);
  1223. KERNEL_MMA_2ACC(rb0, rb0, ra0, ra1);
  1224. LOAD_A_1x8(k+1, m);
  1225. KERNEL_MMA_2ACC(rb1, rb1, ra0, ra1);
  1226. LOAD_A_1x8(k+2, m);
  1227. KERNEL_MMA_2ACC(rb2, rb2, ra0, ra1);
  1228. LOAD_A_1x8(k+3, m);
  1229. KERNEL_MMA_2ACC(rb3, rb3, ra0, ra1);
  1230. }
  1231. for (; k < k2; k += 2) {
  1232. LOAD_A_1x8(k, m);
  1233. LOAD_BT_2x2(n, k);
  1234. KERNEL_MMA_2ACC(rb0, rb0, ra0, ra1);
  1235. LOAD_A_1x8(k+1, m);
  1236. KERNEL_MMA_2ACC(rb1, rb1, ra0, ra1);
  1237. }
  1238. for (; k < K; k++) {
  1239. LOAD_A_1x8(k, m);
  1240. LOAD_BT_2x1(n, k);
  1241. KERNEL_MMA_2ACC(rb0, rb0, ra0, ra1);
  1242. }
  1243. #if !defined(B0)
  1244. register vector float rc0;
  1245. #endif
  1246. vector float result[4];
  1247. SAVE_2x4_ACC(&acc0, n, m+0);
  1248. SAVE_2x4_ACC(&acc1, n, m+4);
  1249. }
  1250. for (; m < m4; m += 4) {
  1251. __vector_quad acc0;
  1252. INIT_1ACC();
  1253. register vector float ra0;
  1254. register vector float rb0, rb1, rb2, rb3;
  1255. register vector float t0, t1, t2;
  1256. for (k = 0; k < k4; k += 4) {
  1257. LOAD_A_1x4(k, m);
  1258. LOAD_BT_2x4(n, k);
  1259. KERNEL_MMA_1ACC(rb0, ra0);
  1260. LOAD_A_1x4(k+1, m);
  1261. KERNEL_MMA_1ACC(rb1, ra0);
  1262. LOAD_A_1x4(k+2, m);
  1263. KERNEL_MMA_1ACC(rb2, ra0);
  1264. LOAD_A_1x4(k+3, m);
  1265. KERNEL_MMA_1ACC(rb3, ra0);
  1266. }
  1267. for (; k < k2; k += 2) {
  1268. LOAD_A_1x4(k, m);
  1269. LOAD_BT_2x2(n, k);
  1270. KERNEL_MMA_1ACC(rb0, ra0);
  1271. LOAD_A_1x4(k+1, m);
  1272. KERNEL_MMA_1ACC(rb1, ra0);
  1273. }
  1274. for (; k < K; k++) {
  1275. LOAD_A_1x4(k, m);
  1276. LOAD_BT_2x1(n, k);
  1277. KERNEL_MMA_1ACC(rb0, ra0);
  1278. }
  1279. #if !defined(B0)
  1280. register vector float rc0;
  1281. #endif
  1282. vector float result[4];
  1283. SAVE_2x4_ACC(&acc0, n, m);
  1284. }
  1285. for (; m < m2; m += 2) {
  1286. vector float result = ((vector float){0.,0.,0.,0.});
  1287. register vector float ra0;
  1288. register vector float rb0;
  1289. for (k = 0; k < K; k++) {
  1290. LOAD_A_2x2(k, m);
  1291. LOAD_B_2x2(n, k);
  1292. KERNEL_VMADD_1VSR(ra0, rb0);
  1293. }
  1294. #if !defined(B0)
  1295. register vector float rc0;
  1296. #endif
  1297. SAVE_2x2_VSR(result, n, m);
  1298. }
  1299. for (; m < M; m++) {
  1300. vector float result = ((vector float){0.,0.,0.,0.});
  1301. register vector float ra0;
  1302. register vector float rb0 = ((vector float){0.,0.,0.,0.});
  1303. for (k = 0; k < K; k++) {
  1304. LOAD_A_1x1(k, m);
  1305. LOAD_B_2x1(n, k);
  1306. KERNEL_VMADD_1VSR(ra0, rb0);
  1307. }
  1308. SAVE_2x1_VSR(result, n, m);
  1309. }
  1310. }
  1311. for (; n < N; n++) {
  1312. for (m = 0; m < m16; m += 16) {
  1313. vector float result = ((vector float){0.,0.,0.,0.});
  1314. vector float result1 = ((vector float){0.,0.,0.,0.});
  1315. vector float result2 = ((vector float){0.,0.,0.,0.});
  1316. vector float result3 = ((vector float){0.,0.,0.,0.});
  1317. register vector float ra0, ra1, ra2, ra3;
  1318. register vector float rb0;
  1319. for (k = 0; k < K; k++) {
  1320. LOAD_A_1x16(k, m);
  1321. LOAD_B_1x1(n, k);
  1322. KERNEL_VMADD_4VSR(ra0, ra1, ra2, ra3, rb0, rb0, rb0, rb0);
  1323. }
  1324. #if !defined(B0)
  1325. register vector float rc0;
  1326. #endif
  1327. SAVE_1x4_VSR(result, n, m);
  1328. SAVE_1x4_VSR(result1, n, m+4);
  1329. SAVE_1x4_VSR(result2, n, m+8);
  1330. SAVE_1x4_VSR(result3, n, m+12);
  1331. }
  1332. for (; m < m8; m += 8) {
  1333. vector float result = ((vector float){0.,0.,0.,0.});
  1334. vector float result1 = ((vector float){0.,0.,0.,0.});
  1335. register vector float ra0, ra1;
  1336. register vector float rb0;
  1337. for (k = 0; k < K; k++) {
  1338. LOAD_A_1x8(k, m);
  1339. LOAD_B_1x1(n, k);
  1340. KERNEL_VMADD_2VSR(ra0, ra1, rb0, rb0);
  1341. }
  1342. #if !defined(B0)
  1343. register vector float rc0;
  1344. #endif
  1345. SAVE_1x4_VSR(result, n, m);
  1346. SAVE_1x4_VSR(result1, n, m+4);
  1347. }
  1348. for (; m < m4; m += 4) {
  1349. vector float result = ((vector float){0.,0.,0.,0.});
  1350. register vector float ra0;
  1351. register vector float rb0;
  1352. for (k = 0; k < K; k++) {
  1353. LOAD_A_1x4(k, m);
  1354. LOAD_B_1x1(n, k);
  1355. KERNEL_VMADD_1VSR(ra0, rb0);
  1356. }
  1357. #if !defined(B0)
  1358. register vector float rc0;
  1359. #endif
  1360. SAVE_1x4_VSR(result, n, m);
  1361. }
  1362. for (; m < m2; m += 2) {
  1363. vector float result = ((vector float){0.,0.,0.,0.});
  1364. register vector float ra0;
  1365. register vector float rb0;
  1366. for (k = 0; k < K; k++) {
  1367. LOAD_A_1x2(k, m);
  1368. LOAD_B_1x1(n, k);
  1369. KERNEL_VMADD_1VSR(ra0, rb0);
  1370. }
  1371. #if !defined(B0)
  1372. register vector float rc0;
  1373. #endif
  1374. SAVE_1x2_VSR(result, n, m);
  1375. }
  1376. for (; m < M; m++) {
  1377. FLOAT result = 0.0f;
  1378. for (k = 0; k < K; k++) {
  1379. result += A[m+k*lda] * B[n*ldb+k];
  1380. }
  1381. result = result * alpha;
  1382. #if !defined(B0)
  1383. C[n*ldc+m] = (C[n*ldc+m] * beta) + result;
  1384. #else
  1385. C[n*ldc+m] = result;
  1386. #endif
  1387. }
  1388. }
  1389. if (has_packing) free (packB);
  1390. return 0;
  1391. }